Merge branch 'master_upstream'

Had to resolve a number of conflicts + added code required for the C++ target to generate header files, after the recent changes in the main repo.
This commit is contained in:
Mike Lischke 2016-11-07 11:16:02 +01:00
commit 3b3fe3da57
123 changed files with 29219 additions and 289 deletions

View File

@ -17,6 +17,8 @@ before_install:
- sudo apt-get install -qq nodejs
- echo "deb http://download.mono-project.com/repo/debian wheezy/snapshots/3.12.1 main" | sudo tee /etc/apt/sources.list.d/mono-xamarin.list
- sudo apt-get install -qq mono-complete
- eval "$(sudo gimme 1.7.3)"
- go version ; go env
- python --version
- python3 --version

View File

@ -11,11 +11,11 @@ ANTLR project lead and supreme dictator for life
[University of San Francisco](http://www.usfca.edu/)
* [Sam Harwell](http://tunnelvisionlabs.com/) (Tool co-author, Java and C# target)
* Eric Vergnaud (Javascript, Python2, Python3 targets and significant work on C# target)
* [Peter Boyer](https://github.com/pboyer) (Go target)
* [Mike Lischke](http://www.soft-gems.net/) (C++ completed target)
* Dan McLaughlin (C++ initial target)
* David Sisson (C++ initial target and test)
## Useful information
* [Release notes](https://github.com/antlr/antlr4/releases)
@ -23,7 +23,8 @@ ANTLR project lead and supreme dictator for life
* [Official site](http://www.antlr.org/)
* [Documentation](https://github.com/antlr/antlr4/blob/master/doc/index.md)
* [FAQ](https://github.com/antlr/antlr4/blob/master/doc/faq/index.md)
* [API](http://www.antlr.org/api/Java/index.html)
* [ANTLR code generation targets](https://github.com/antlr/antlr4/blob/master/doc/targets.md) (Currently: Java, C#, Python2|3, JavaScript, Go)
* [Java API](http://www.antlr.org/api/Java/index.html)
* [ANTLR v3](http://www.antlr3.org/)
* [v3 to v4 Migration, differences](https://github.com/antlr/antlr4/blob/master/doc/faq/general.md)

View File

@ -34,7 +34,7 @@
<parent>
<groupId>org.antlr</groupId>
<artifactId>antlr4-master</artifactId>
<version>4.5.4-SNAPSHOT</version>
<version>4.6-SNAPSHOT</version>
</parent>
<artifactId>antlr4-maven-plugin</artifactId>
<packaging>maven-plugin</packaging>

View File

@ -99,9 +99,12 @@ YYYY/MM/DD, github id, Full name, email
2016/07/20, kosl90, Li Liqiang, kos1990l@gmail.com
2016/07/27, timoc, Tim O'Callaghan, timo@linux.com
2016/07/26, nic30, Michal Orsák, michal.o.socials@gmail.com
2016/07/18, willfaught, Will Faught, will.faught@gmail.com
2016/08/08, wjkohnen, Wolfgang Johannes Kohnen, wjkohnen-go-antlr@ko-sys.com
2016/08/11, BurtHarris, Ralph "Burt" Harris, Burt_Harris_antlr4@azxs.33mail.com
2016/08/19, andjo403, Andreas Jonson, andjo403@hotmail.com
2016/10/13, cgudrian, Christian Gudrian, christian.gudrian@gmx.de
2016/10/13, nielsbasjes, Niels Basjes, niels@basjes.nl
2016/10/21, FloorGoddijn, Floor Goddijn, floor.goddijn[at]aimms.com
2016/11/01, RYDB3RG, Kai Stammerjohann, RYDB3RG@users.noreply.github.com
2016/11/05, runner-mei, meifakun, runner.mei@gmail.com

View File

@ -14,6 +14,13 @@ templates = /Users/parrt/antlr/code/antlr4/runtime-testsuite/resources/org/antlr
target = ALL
browsers = false
viz = false
INFO: Generating target Java
INFO: Generating target Go
INFO: Generating target CSharp
INFO: Generating target Python2
INFO: Generating target Python3
INFO: Generating target JavaScript/Node
...
```
It basically runs the Java program:
@ -74,7 +81,7 @@ Each `.stg` file descripes the following mandatory elements for the test:
- the expected output
- the expected errors
The grammar can itself contain template expressions such as <something>.
The grammar can itself contain template expressions such as `<something>`.
The test generator replaces these with the corresponding values from the target language template (see below).
It then generates a unit test in which the grammar, the input and the expected output and errors are inlined.

14
doc/go-target.md Normal file
View File

@ -0,0 +1,14 @@
# ANTLR4 Language Target, Runtime for Go
### Getting started
1. Get the runtime and install it on your GOPATH: `go get github.com/antlr/antlr4`
2. Generate the parser/lexer code: `antlr MyGrammar.g4 -Dlanguage=Go`
### Referencing in your code
Reference the go runtime package like this:
```go
import "github.com/antlr/antlr4/runtime/Go/antlr"
```

View File

@ -2,14 +2,11 @@
This page lists the available and upcoming ANTLR runtimes. Please note that you won't find here language specific code generators. This is because there is only one tool, written in Java, which is able to generate lexer and parser code for all targets, through command line options. The tool can be invoked from the command line, or any integration plugin to popular IDEs and build systems: Eclipse, IntelliJ, Visual Studio, Maven. So whatever your environment and target is, you should be able to run the tool and produce the code in the targeted language. As of writing, the available targets are the following:
* [Java](java-target.md)<br>
The [ANTLR v4 book](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference) has a decent summary of the runtime library. We have added a useful XPath feature since the book was printed that lets you select bits of parse trees.
<br>[Runtime API](http://www.antlr.org/api/Java/index.html)
<br>See [Getting Started with ANTLR v4](getting-started.md)
* [Java](java-target.md). The [ANTLR v4 book](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference) has a decent summary of the runtime library. We have added a useful XPath feature since the book was printed that lets you select bits of parse trees. See [Runtime API](http://www.antlr.org/api/Java/index.html) and [Getting Started with ANTLR v4](getting-started.md)
* [C#](csharp-target.md)
* [Python](python-target.md) (2 and 3)
* [JavaScript](javascript-target.md)
* [Go](go-target.md)
* Swift (not yet available)
* C++ (not yet available)

View File

@ -7,7 +7,7 @@
</parent>
<groupId>org.antlr</groupId>
<artifactId>antlr4-master</artifactId>
<version>4.5.4-SNAPSHOT</version>
<version>4.6-SNAPSHOT</version>
<packaging>pom</packaging>
<name>ANTLR 4</name>
@ -48,6 +48,12 @@
<role>Developer - JavaScript, C#, Python 2, Python 3</role>
</roles>
</developer>
<developer>
<name>Peter Boyer</name>
<roles>
<role>Developer - Go</role>
</roles>
</developer>
<developer>
<name>Jim Idle</name>
<email>jimi@idle.ws</email>

View File

@ -4,7 +4,7 @@
<parent>
<groupId>org.antlr</groupId>
<artifactId>antlr4-master</artifactId>
<version>4.5.4-SNAPSHOT</version>
<version>4.6-SNAPSHOT</version>
</parent>
<artifactId>antlr4-runtime-testsuite</artifactId>
<name>ANTLR 4 Runtime Test Generator</name>
@ -72,8 +72,9 @@
<includes>
<include>**/csharp/Test*.java</include>
<include>**/java/Test*.java</include>
<include>**/go/Test*.java</include>
<include>**/javascript/node/Test*.java</include>
<include>**/python2/Test*.java</include>
<include>**/python2/Test*.java</include>
<include>**/python3/Test*.java</include>
<include>**/cpp/Test*.java</include>
</includes>

View File

@ -218,6 +218,12 @@ RuleInvocationStack() ::= "Arrays::listToString(getRuleInvocationStack(), \", \"
LL_EXACT_AMBIG_DETECTION() ::= <<getInterpreter\<atn::ParserATNSimulator>()->setPredictionMode(atn::PredictionMode::LL_EXACT_AMBIG_DETECTION);>>
ParserToken(parser, token) ::= <%<parser>::<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
bool Property() {
@ -403,6 +409,14 @@ public:
}
>>
ImportVisitor(X) ::= ""
BasicVisitor(X) ::= ""
WalkVisitor(s) ::= ""
LRWithLabelsVisitor(X) ::= ""
RuleGetterVisitor(X) ::= ""
LRVisitor(x) ::= ""
TokenGetterVisitor(x) ::= ""
DeclareContextListGettersFunction() ::= <<
void foo() {
SContext *s;
@ -427,7 +441,6 @@ bool pred(bool v) {
Invoke_pred(v) ::= <<pred(<v>)>>
ParserTokenType(t) ::= "Parser::<t>"
ContextRuleFunction(ctx, rule) ::= "<ctx>-><rule>"
StringType() ::= "std::string"
ContextMember(ctx, subctx, member) ::= "<ctx>-><subctx>-><member>"

View File

@ -234,6 +234,12 @@ RuleInvocationStack() ::= "GetRuleInvocationStackAsString()"
LL_EXACT_AMBIG_DETECTION() ::= <<Interpreter.PredictionMode = PredictionMode.LlExactAmbigDetection;>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
bool Property() {
@ -416,6 +422,14 @@ public class LeafListener : TBaseListener {
}
>>
ImportVisitor(X) ::= ""
BasicVisitor(X) ::= ""
WalkVisitor(s) ::= ""
LRWithLabelsVisitor(X) ::= ""
RuleGetterVisitor(X) ::= ""
LRVisitor(x) ::= ""
TokenGetterVisitor(x) ::= ""
DeclareContextListGettersFunction() ::= <<
void foo() {
SContext s = null;

View File

@ -0,0 +1,490 @@
IgnoredTests ::= [
"Visitors.Basic": true,
"Visitors.LR": true,
"Visitors.LRWithLabels": true,
"Visitors.RuleGetters_1": true,
"Visitors.RuleGetters_2": true,
"Visitors.TokenGetters_1": true,
"Visitors.TokenGetters_2": true,
default: false
]
TestFile(file) ::= <<
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
<if(file.Options.("ImportGrammar"))>
import org.antlr.v4.tool.Grammar;
<endif>
public class Test<file.name> extends BaseTest {
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
}<\n>
>>
LexerTestMethod(test) ::= <<
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
<testAnnotations(test)>
public void test<test.name>() throws Exception {
mkdir(parserpkgdir);
<test.SlaveGrammars:{grammar |
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
writeFile(parserpkgdir, "<grammar>.g4", slave_<grammar>);
}; separator="\n">
<test.Grammar:{grammar |
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
<test.afterGrammar>
String input =<writeStringLiteral(test.Input)>;
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
assertEquals(<writeStringLiteral(test.Output)>, found);
<if(!isEmpty.(test.Errors))>
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
<else>
assertNull(this.stderrDuringParse);
<endif>
}>
}
>>
CompositeLexerTestMethod(test) ::= <<
<LexerTestMethod(test)>
>>
ParserTestMethod(test) ::= <<
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
<testAnnotations(test)>
public void test<test.name>() throws Exception {
mkdir(parserpkgdir);
<test.SlaveGrammars:{grammar |
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
<if(test.Options.("SlaveIsLexer"))>
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
<else>
writeFile(parserpkgdir, "<grammar>.g4", slave_<grammar>);
<endif>
}; separator="\n">
<test.Grammar:{grammar |
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
<test.afterGrammar>
String input =<writeStringLiteral(test.Input)>;
String found = execParser("<grammar>.g4", grammar, "<grammar>Parser", "<grammar>Lexer",
"<grammar>Listener", "<grammar>Visitor", "<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
assertEquals(<writeStringLiteral(test.Output)>, found);
<if(!isEmpty.(test.Errors))>
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
<else>
assertNull(this.stderrDuringParse);
<endif>
}>
}
>>
CompositeParserTestMethod(test) ::= <<
<ParserTestMethod(test)>
>>
AbstractParserTestMethod(test) ::= <<
String test<test.name>(String input) throws Exception {
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser", "<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
}
>>
ConcreteParserTestMethod(test) ::= <<
<testAnnotations(test)>
public void test<test.name>() throws Exception {
String found = test<test.baseName>("<test.input>");
assertEquals("<test.expectedOutput>", found);
<if(test.expectedErrors)>
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
<else>
assertNull(this.stderrDuringParse);
<endif>
}
>>
testAnnotations(test) ::= <%
@Test
<if(test.Options.("Timeout"))>
(timeout = <test.Options.("Timeout")>)
<endif>
<if(test.Options.("Ignore"))>
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
<endif>
%>
buildStringLiteral(text, variable) ::= <<
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
String <variable> = <variable>Builder.toString();
>>
writeStringLiteral(text) ::= <%
<if(isEmpty.(text))>
""
<else>
<writeLines(lines.(text))>
<endif>
%>
writeLines(textLines) ::= <%
<if(rest(textLines))>
<textLines:{line|
<\n> "<escape.(line)>}; separator="\" +">"
<else>
"<escape.(first(textLines))>"
<endif>
%>
string(text) ::= <<
"<escape.(text)>"
>>
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
writeln(s) ::= <<fmt.Println(<s>)>>
write(s) ::= <<fmt.Print(<s>)>>
writeList(s) ::= <<fmt.Print(<s; separator="+">);>>
False() ::= "false"
True() ::= "true"
Not(v) ::= "!<v>"
Assert(s) ::= ""
Cast(t,v) ::= "(<v>)"
Append(a,b) ::= "<a> + fmt.Sprint(<b>)"
Concat(a,b) ::= "<a><b>"
DeclareLocal(s, v) ::= "var <s> = <v>"
AssertIsList(v) ::= ""
AssignLocal(s, v) ::= "<s> = <v>;"
InitIntMember(n, v) ::= <%var <n> int = <v>; var _ int = <n>; %>
InitBooleanMember(n, v) ::= <%var <n> bool = <v>; var _ bool = <n>; %>
GetMember(n) ::= <%<n>%>
SetMember(n, v) ::= <%<n> = <v>;%>
AddMember(n, v) ::= <%<n> += <v>;%>
PlusMember(v, n) ::= <%<v> + fmt.Sprint(<n>)%>
MemberEquals(n, v) ::= <%<n> == <v>%>
ModMemberEquals(n, m, v) ::= <%<n> % <m> == <v>%>
ModMemberNotEquals(n, m, v) ::= <%<n> % <m> != <v>%>
DumpDFA() ::= "p.DumpDFA()"
Pass() ::= ""
StringList() ::= "[]string"
BuildParseTrees() ::= "p.BuildParseTrees = true"
BailErrorStrategy() ::= <%p.SetErrorHandler(antlr.NewBailErrorStrategy())%>
ToStringTree(s) ::= <%<s>.ToStringTree(nil, p)%>
Column() ::= "p.GetCharPositionInLine()"
Text() ::= "l.GetText()"
ValEquals(a, b) ::= <%<a> == <b>%>
TextEquals(a) ::= <%p.GetText() == "<a>"%>
PlusText(a) ::= <%"<a>" + l.GetText()%>
InputText() ::= "p.GetTokenStream().GetAllText()"
LTEquals(i, v) ::= <%p.GetTokenStream().LT(<i>).GetText() == <v>%>
LANotEquals(i, v) ::= <%p.GetTokenStream().LA(<i>) != <v>%>
TokenStartColumnEquals(i) ::= <%p.TokenStartColumn == <i>%>
ImportListener(X) ::= ""
GetExpectedTokenNames() ::= "p.GetExpectedTokens().StringVerbose(p.GetTokenNames(), nil, false)"
RuleInvocationStack() ::= "antlr.PrintArrayJavaStyle(p.GetRuleInvocationStack(nil))"
LL_EXACT_AMBIG_DETECTION() ::= <<p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);>>
ParserToken(parser, token) ::= <%<parser><token>%>
Production(p) ::= <%<p; format="cap">%>
Result(r) ::= <%Get<r; format="cap">()%>
ParserPropertyMember() ::= <<
@parser::members {
func (p *TParser) Property() bool {
return true
}
}
>>
ParserPropertyCall(p, call) ::= "<p>.<call>"
PositionAdjustingLexer() ::= <<
func (p *PositionAdjustingLexer) NextToken() antlr.Token {
if _, ok := p.Interpreter.(*PositionAdjustingLexerATNSimulator); !ok {
p.Interpreter = NewPositionAdjustingLexerATNSimulator(p, lexerAtn, p.Interpreter.DecisionToDFA(), p.Interpreter.SharedContextCache())
p.Virt = p
}
return p.BaseLexer.NextToken()
}
func (p *PositionAdjustingLexer) Emit() antlr.Token {
switch p.GetType() {
case PositionAdjustingLexerTOKENS:
p.HandleAcceptPositionForKeyword("tokens")
case PositionAdjustingLexerLABEL:
p.HandleAcceptPositionForIdentifier()
}
return p.BaseLexer.Emit()
}
func isIdentifierChar(c rune) bool {
return unicode.IsLetter(c) || unicode.IsDigit(c) || c == '_'
}
func (p *PositionAdjustingLexer) HandleAcceptPositionForIdentifier() bool {
var tokenText = p.GetText()
var identifierLength int
for identifierLength \< len(tokenText) && isIdentifierChar([]rune(tokenText)[identifierLength]) {
identifierLength++
}
if p.GetInputStream().Index() \<= p.TokenStartCharIndex + identifierLength {
return false
}
var offset = identifierLength - 1
p.GetInterpreter().(*PositionAdjustingLexerATNSimulator).ResetAcceptPosition(p.GetInputStream(), p.TokenStartCharIndex + offset, p.TokenStartLine, p.TokenStartColumn + offset)
return true
}
func (p *PositionAdjustingLexer) HandleAcceptPositionForKeyword(keyword string) bool {
if p.GetInputStream().Index() \<= p.TokenStartCharIndex + len(keyword) {
return false
}
var offset = len(keyword) - 1
p.GetInterpreter().(*PositionAdjustingLexerATNSimulator).ResetAcceptPosition(p.GetInputStream(), p.TokenStartCharIndex + offset, p.TokenStartLine, p.TokenStartColumn + offset)
return true
}
type PositionAdjustingLexerATNSimulator struct {
*antlr.LexerATNSimulator
}
func NewPositionAdjustingLexerATNSimulator(recog antlr.Lexer, atn *antlr.ATN, decisionToDFA []*antlr.DFA, sharedContextCache *antlr.PredictionContextCache) *PositionAdjustingLexerATNSimulator {
return &PositionAdjustingLexerATNSimulator{
LexerATNSimulator: antlr.NewLexerATNSimulator(recog, atn, decisionToDFA, sharedContextCache),
}
}
func (p *PositionAdjustingLexerATNSimulator) ResetAcceptPosition(input antlr.CharStream, index, line, charPositionInLine int) {
input.Seek(index)
p.Line = line
p.CharPositionInLine = charPositionInLine
p.Consume(input)
}
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::members {
type MyRuleNode struct {
*antlr.BaseParserRuleContext
altNum int
}
func NewMyRuleNode(parent antlr.ParserRuleContext, invokingStateNumber int) *MyRuleNode {
return &MyRuleNode{
BaseParserRuleContext : antlr.NewBaseParserRuleContext(parent, invokingStateNumber),
}
}
func (m *MyRuleNode) GetAltNumber() int {
return m.altNum
}
func (m *MyRuleNode) SetAltNumber(altNum int) {
m.altNum = altNum
}
}
>>
BasicListener(notused) ::= <<
type LeafListener struct {
*BaseTListener
}
func NewLeafListener() *LeafListener {
return &LeafListener{BaseTListener: &BaseTListener{}}
}
func (*LeafListener) VisitTerminal(node antlr.TerminalNode) {
fmt.Println(node.GetSymbol().GetText())
}
>>
WalkListener(s) ::= <<
var walker = antlr.NewParseTreeWalker()
walker.Walk(NewLeafListener(), <s>)
>>
TokenGetterListener(notused) ::= <<
type LeafListener struct {
*BaseTListener
}
func NewLeafListener() *LeafListener {
return &LeafListener{BaseTListener: &BaseTListener{}}
}
func (*LeafListener) ExitA(ctx *AContext) {
if ctx.GetChildCount() == 2 {
fmt.Printf("%s %s %s", ctx.INT(0).GetSymbol().GetText(), ctx.INT(1).GetSymbol().GetText(), antlr.PrintArrayJavaStyle(antlr.TerminalNodeToStringArray(ctx.AllINT())))
} else {
fmt.Println(ctx.ID().GetSymbol())
}
}
>>
RuleGetterListener(notused) ::= <<
type LeafListener struct {
*BaseTListener
}
func NewLeafListener() *LeafListener {
return &LeafListener{BaseTListener: &BaseTListener{}}
}
func (*LeafListener) ExitA(ctx *AContext) {
if ctx.GetChildCount() == 2 {
fmt.Printf("%s %s %s", ctx.B(0).GetStart().GetText(), ctx.B(1).GetStart().GetText(), ctx.AllB()[0].GetStart().GetText())
} else {
fmt.Println(ctx.B(0).GetStart().GetText())
}
}
>>
LRListener(notused) ::= <<
type LeafListener struct {
*BaseTListener
}
func NewLeafListener() *LeafListener {
return &LeafListener{BaseTListener: &BaseTListener{}}
}
func (*LeafListener) ExitE(ctx *EContext) {
if ctx.GetChildCount() == 3 {
fmt.Printf("%s %s %s\n", ctx.E(0).GetStart().GetText(), ctx.E(1).GetStart().GetText(), ctx.AllE()[0].GetStart().GetText())
} else {
fmt.Println(ctx.INT().GetSymbol().GetText())
}
}
>>
LRWithLabelsListener(notused) ::= <<
type LeafListener struct {
*BaseTListener
}
func NewLeafListener() *LeafListener {
return &LeafListener{BaseTListener: &BaseTListener{}}
}
func (*LeafListener) ExitCall(ctx *CallContext) {
fmt.Printf("%s %s", ctx.E().GetStart().GetText(), ctx.EList().String(nil, nil))
}
func (*LeafListener) ExitInt(ctx *IntContext) {
fmt.Println(ctx.INT().GetSymbol().GetText())
}
>>
ImportVisitor(X) ::= ""
BasicVisitor(X) ::= ""
WalkVisitor(s) ::= ""
LRWithLabelsVisitor(X) ::= ""
RuleGetterVisitor(X) ::= ""
LRVisitor(x) ::= ""
TokenGetterVisitor(x) ::= ""
DeclareContextListGettersFunction() ::= <<
func foo() {
// TODO
// var s SContext
// var a = s.A()
// var b = s.B()
}
>>
Declare_foo() ::= <<
func foo() {
fmt.Println("foo")
}
>>
Invoke_foo() ::= "foo()"
Declare_pred() ::= <<
func pred(v bool) bool {
fmt.Println("eval=" + fmt.Sprint(v))
return v
}
>>
Invoke_pred(v) ::= <<pred(<v>)>>
ContextRuleFunction(ctx, rule) ::= "<ctx>.<rule>"
StringType() ::= "String"
ContextMember(ctx, subctx, member) ::= "<ctx>.<subctx>.<member>"
isEmpty ::= [
"": true,
default: false
]

View File

@ -1,4 +1,11 @@
IgnoredTests ::= [
"Visitors.Basic": true,
"Visitors.LR": true,
"Visitors.LRWithLabels": true,
"Visitors.RuleGetters_1": true,
"Visitors.RuleGetters_2": true,
"Visitors.TokenGetters_1": true,
"Visitors.TokenGetters_2": true,
default: false
]
@ -148,6 +155,9 @@ writeLines(textLines) ::= <%
<endif>
%>
string(text) ::= <<
"<escape.(text)>"
>>
@ -234,6 +244,12 @@ RuleInvocationStack() ::= "getRuleInvocationStack()"
LL_EXACT_AMBIG_DETECTION() ::= <<_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
boolean Property() {
@ -412,6 +428,14 @@ public static class LeafListener extends TBaseListener {
}
>>
ImportVisitor(X) ::= ""
BasicVisitor(X) ::= ""
WalkVisitor(s) ::= ""
LRWithLabelsVisitor(X) ::= ""
RuleGetterVisitor(X) ::= ""
LRVisitor(x) ::= ""
TokenGetterVisitor(x) ::= ""
DeclareContextListGettersFunction() ::= <<
void foo() {
SContext s = null;
@ -438,18 +462,6 @@ ParserTokenType(t) ::= "Parser.<t>"
ContextRuleFunction(ctx, rule) ::= "<ctx>.<rule>"
StringType() ::= "String"
ContextMember(ctx, subctx, member) ::= "<ctx>.<subctx>.<member>"
IgnoredTests ::= [
"Visitors.Basic": true,
"Visitors.LR": true,
"Visitors.LRWithLabels": true,
"Visitors.RuleGetters_1": true,
"Visitors.RuleGetters_2": true,
"Visitors.TokenGetters_1": true,
"Visitors.TokenGetters_2": true,
default: false
]
isEmpty ::= [
"": true,
default: false

View File

@ -233,6 +233,12 @@ RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStac
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
this.Property = function() {

View File

@ -237,6 +237,12 @@ RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStac
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
this.Property = function() {

View File

@ -239,6 +239,12 @@ RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStac
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
this.Property = function() {

View File

@ -239,6 +239,12 @@ RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStac
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
this.Property = function() {

View File

@ -237,6 +237,12 @@ RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStac
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
this.Property = function() {

View File

@ -244,6 +244,12 @@ RuleInvocationStack() ::= "str_list(self.getRuleInvocationStack())"
LL_EXACT_AMBIG_DETECTION() ::= <<self._interp.predictionMode = PredictionMode.LL_EXACT_AMBIG_DETECTION>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
def Property(self):
@ -399,6 +405,14 @@ class LeafListener(TListener):
}
>>
ImportVisitor(X) ::= ""
BasicVisitor(X) ::= ""
WalkVisitor(s) ::= ""
LRWithLabelsVisitor(X) ::= ""
RuleGetterVisitor(X) ::= ""
LRVisitor(x) ::= ""
TokenGetterVisitor(x) ::= ""
DeclareContextListGettersFunction() ::= <<
def foo():
s = SContext()

View File

@ -249,6 +249,12 @@ RuleInvocationStack() ::= "str_list(self.getRuleInvocationStack())"
LL_EXACT_AMBIG_DETECTION() ::= <<self._interp.predictionMode = PredictionMode.LL_EXACT_AMBIG_DETECTION>>
ParserToken(parser, token) ::= <%<parser>.<token>%>
Production(p) ::= <%<p>%>
Result(r) ::= <%<r>%>
ParserPropertyMember() ::= <<
@members {
def Property(self):
@ -384,6 +390,14 @@ class LeafListener(MockListener):
}
>>
ImportVisitor(X) ::= ""
BasicVisitor(X) ::= ""
WalkVisitor(s) ::= ""
LRWithLabelsVisitor(X) ::= ""
RuleGetterVisitor(X) ::= ""
LRVisitor(x) ::= ""
TokenGetterVisitor(x) ::= ""
DeclareContextListGettersFunction() ::= <<
def foo():
s = SContext()

View File

@ -21,8 +21,8 @@ grammar(grammarName) ::= <<
grammar <grammarName>;
s : e {<writeln("$e.v")>};
e returns [int v]
: e '*' e {$v = <Cast("BinaryContext","$ctx"):ContextMember("e(0)", "v")> * <Cast("BinaryContext","$ctx"):ContextMember("e(1)", "v")>;} # binary
| e '+' e {$v = <Cast("BinaryContext","$ctx"):ContextMember("e(0)", "v")> + <Cast("BinaryContext","$ctx"):ContextMember("e(1)", "v")>;} # binary
: e '*' e {$v = <Cast("BinaryContext","$ctx"):ContextMember("e(0)", "v")> * <Cast("BinaryContext","$ctx"):ContextMember({<Production("e")>(1)}, {<Result("v")>})>;} # binary
| e '+' e {$v = <Cast("BinaryContext","$ctx"):ContextMember("e(0)", "v")> + <Cast("BinaryContext","$ctx"):ContextMember({<Production("e")>(1)}, {<Result("v")>})>;} # binary
| INT {$v = $INT.int;} # anInt
| '(' e ')' {$v = $e.v;} # parens
| left=e INC {<Cast("UnaryContext","$ctx"):Concat(".INC() != null"):Assert()>$v = $left.v + 1;} # unary

View File

@ -29,6 +29,6 @@ grammar <grammarName>;
<DeclareContextListGettersFunction()>
}
s : (a | b)+;
a : 'a' {<write("'a'")>};
b : 'b' {<write("'b'")>};
a : 'a' {<write("\"a\"")>};
b : 'b' {<write("\"b\"")>};
>>

View File

@ -25,7 +25,7 @@ grammar(grammarName) ::= <<
grammar <grammarName>;
s : stmt EOF ;
stmt : ifStmt | ID;
ifStmt : 'if' ID stmt ('else' stmt | { <LANotEquals("1", {<grammarName><ParserTokenType("ELSE")>})> }?);
ifStmt : 'if' ID stmt ('else' stmt | { <LANotEquals("1", {<grammarName><ParserToken("Parser", "ELSE")>})> }?);
ELSE : 'else';
ID : [a-zA-Z]+;
WS : [ \\n\\t]+ -> skip;

View File

@ -24,7 +24,7 @@ Errors() ::= ""
grammar(grammarName) ::= <<
grammar <grammarName>;
@members {<InitIntMember("i","0")>}
@parser::members {<InitIntMember("i","0")>}
s : a+ ;
a : {<SetMember("i","1")>} ID {<MemberEquals("i","1")>}? {<writeln("\"alt 1\"")>}
| {<SetMember("i","2")>} ID {<MemberEquals("i","2")>}? {<writeln("\"alt 2\"")>}

View File

@ -27,7 +27,7 @@ Errors() ::= ""
grammar(grammarName) ::= <<
grammar <grammarName>;
@members {
@parser::members {
<Declare_pred()>
}
s : e {} {<True():Invoke_pred()>}? {<writeln("\"parse\"")>} '!' ;

View File

@ -25,7 +25,7 @@ Errors() ::= ""
grammar(grammarName) ::= <<
grammar <grammarName>;
@members {
@parser::members {
<Declare_pred()>
}
s : a[99] ;

View File

@ -18,7 +18,7 @@ file_
@after {<ToStringTree("$ctx"):writeln()>}
: para para EOF ;
para: paraContent NL NL ;
paraContent : ('s'|'x'|{<LANotEquals("2", {<grammarName><ParserTokenType("NL")>})>}? NL)+ ;
paraContent : ('s'|'x'|{<LANotEquals("2",{<grammarName><ParserToken("Parser", "NL")>})>}? NL)+ ;
NL : '\n' ;
s : 's' ;
X : 'x' ;

View File

@ -12,7 +12,7 @@ Rule() ::= "primary"
grammar(grammarName) ::= <<
grammar <grammarName>;
@members {<InitBooleanMember("enumKeyword",True())>}
@parser::members {<InitBooleanMember("enumKeyword",True())>}
primary
: ID {<writeln("\"ID \"+$ID.text")>}
| {<GetMember("enumKeyword"):Not()>}? 'enum' {<writeln("\"enum\"")>}

View File

@ -28,7 +28,7 @@ Errors() ::= ""
grammar(grammarName) ::= <<
grammar <grammarName>;
@members {<InitIntMember("i","0")>}
@parser::members {<InitIntMember("i","0")>}
s : a[2] a[1];
a[int i]
: {<ValEquals("$i","1")>}? ID {<writeln("\"alt 1\"")>}

View File

@ -30,7 +30,7 @@ Errors() ::= ""
grammar(grammarName) ::= <<
grammar <grammarName>;
@members {<InitIntMember("i","0")>}
@parser::members {<InitIntMember("i","0")>}
s : a[2] a[1];
a[int i]
: {<ValEquals("$i","1")>}? ID

View File

@ -26,7 +26,7 @@ Errors() ::= ""
grammar(grammarName) ::= <<
grammar <grammarName>;
@members {
@parser::members {
<Declare_pred()>
}
s : e {<True():Invoke_pred()>}? {<writeln("\"parse\"")>} '!' ;

View File

@ -31,7 +31,7 @@ Errors() ::= ""
grammar(grammarName) ::= <<
grammar <grammarName>;
@members {<InitIntMember("i","0")>}
@parser::members {<InitIntMember("i","0")>}
s : ({<AddMember("i","1")>
<writeList(["\"i=\"", "i"])>} a)+ ;
a : {<ModMemberEquals("i","2","0")>}? ID {<writeln("\"alt 1\"")>}

View File

@ -29,6 +29,13 @@
*/
package org.antlr.v4.testgen;
import org.stringtemplate.v4.ST;
import org.stringtemplate.v4.STGroup;
import org.stringtemplate.v4.STGroupFile;
import org.stringtemplate.v4.StringRenderer;
import org.stringtemplate.v4.gui.STViz;
import org.stringtemplate.v4.misc.ErrorBuffer;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@ -39,14 +46,12 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.stringtemplate.v4.ST;
import org.stringtemplate.v4.STGroup;
import org.stringtemplate.v4.STGroupFile;
import org.stringtemplate.v4.gui.STViz;
public class TestGenerator {
public final static String[] targets = {"Cpp", "CSharp", "Java", "Python2", "Python3", "JavaScript/Node", "JavaScript/Safari", "JavaScript/Firefox", "JavaScript/Explorer", "JavaScript/Chrome"};
public final static String[] targets = {
"Cpp", "Java", "Go", "CSharp", "Python2", "Python3",
"JavaScript/Node", "JavaScript/Safari", "JavaScript/Firefox",
"JavaScript/Explorer", "JavaScript/Chrome"
};
/** Execute from antlr4 root dir:
* *
@ -91,38 +96,41 @@ public class TestGenerator {
}
i++;
}
System.out.println("rootDir = " + rootDir);
System.out.println("outputDir = " + outDir);
System.out.println("templates = " + templatesRoot);
System.out.println("target = " + target);
System.out.println("browsers = " + browsers);
System.out.println("viz = " + viz);
if(rootDir==null) {
System.out.println("rootDir is mandatory!" + rootDir);
return;
}
if(outDir==null)
outDir = rootDir + "/test";
if(templatesRoot==null)
templatesRoot = rootDir + "/resources/org/antlr/v4/test/runtime/templates";
if ( "ALL".equalsIgnoreCase(target)) {
genAllTargets(rootDir, outDir, templatesRoot, browsers, viz);
} else
}
else {
genTarget(rootDir, outDir, target, templatesRoot, viz);
}
}
public static void genAllTargets(String rootDir, String outDirRoot, String templatesRoot, boolean browsers, boolean viz) {
for(String target : targets) {
if(!browsers && "JavaScript/Safari".equals(target))
if(!browsers && "JavaScript/Safari".equals(target)) {
return;
}
genTarget(rootDir, outDirRoot, target, templatesRoot, viz);
}
}
public static void genTarget(final String rootDir, final String outDir, final String fullTarget, final String templatesDir, boolean viz) {
String[] parts = fullTarget.split("/");
String target = parts[0];
@ -169,6 +177,7 @@ public class TestGenerator {
public void execute() {
STGroup targetGroup = new STGroupFile(runtimeTemplate.getPath());
targetGroup.registerModelAdaptor(STGroup.class, new STGroupModelAdaptor());
targetGroup.registerRenderer(String.class, new StringRenderer(), true);
targetGroup.defineDictionary("escape", new JavaEscapeStringMap());
targetGroup.defineDictionary("lines", new LinesStringMap());
targetGroup.defineDictionary("strlen", new StrlenStringMap());
@ -201,9 +210,12 @@ public class TestGenerator {
Collection<String> testTemplates,
File targetFolder)
{
ErrorBuffer errors = new ErrorBuffer();
targetGroup.setListener(errors);
String testName = testDir.substring(testDir.lastIndexOf('/') + 1);
File targetFile = new File(targetFolder, "Test" + testName + ".java");
info("Generating file "+targetFile.getAbsolutePath());
// System.out.println("Generating file "+targetFile.getAbsolutePath());
List<ST> templates = new ArrayList<ST>();
for (String template : testTemplates) {
STGroup testGroup = new STGroupFile(testDir + "/" + template + STGroup.GROUP_FILE_EXTENSION);
@ -225,7 +237,10 @@ public class TestGenerator {
}
ST testFileTemplate = targetGroup.getInstanceOf("TestFile");
testFileTemplate.addAggr("file.{Options,name,tests}", index.rawGetDictionary("Options"), testName, templates);
testFileTemplate.addAggr("file.{Options,name,tests}",
index.rawGetDictionary("Options"),
testName,
templates);
if (visualize) {
STViz viz = testFileTemplate.inspect();
@ -236,7 +251,11 @@ public class TestGenerator {
}
try {
writeFile(targetFile, testFileTemplate.render());
String output = testFileTemplate.render();
if ( errors.errors.size()>0 ) {
System.err.println("errors in "+targetName+": "+errors);
}
writeFile(targetFile, output);
}
catch (IOException ex) {
error(String.format("Failed to write output file: %s", targetFile), ex);
@ -280,17 +299,19 @@ public class TestGenerator {
}
public File getOutputDir(String templateFolder) {
if(templateFolder.startsWith(rootDir))
if(templateFolder.startsWith(rootDir)) {
templateFolder = templateFolder.substring(rootDir.length());
if(templateFolder.startsWith("/resources"))
}
if(templateFolder.startsWith("/resources")) {
templateFolder = templateFolder.substring("/resources".length());
}
templateFolder = templateFolder.substring(0, templateFolder.indexOf("/templates"));
templateFolder += "/" + targetName.toLowerCase();
return new File(outputDir, templateFolder);
}
protected void info(String message) {
// System.out.println("INFO: " + message);
System.out.println("INFO: " + message);
}
protected void warn(String message) {

View File

@ -63,8 +63,8 @@ public class TestParserErrors extends BaseCppTest {
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : (a | b)+;\n");
grammarBuilder.append("a : 'a' {std::cout << 'a';};\n");
grammarBuilder.append("b : 'b' {std::cout << 'b';};");
grammarBuilder.append("a : 'a' {std::cout << \"a\";};\n");
grammarBuilder.append("b : 'b' {std::cout << \"b\";};");
String grammar = grammarBuilder.toString();

View File

@ -81,9 +81,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testActionHidesPreds() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(231);
StringBuilder grammarBuilder = new StringBuilder(239);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a+ ;\n");
grammarBuilder.append("a : {i = 1;} ID {i == 1}? {std::cout << \"alt 1\" << std::endl;}\n");
grammarBuilder.append(" | {i = 2;} ID {i == 2}? {std::cout << \"alt 2\" << std::endl;}\n");
@ -110,9 +110,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testActionsHidePredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(308);
StringBuilder grammarBuilder = new StringBuilder(316);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("bool pred(bool v) {\n");
grammarBuilder.append(" std::cout << \"eval=\" << std::boolalpha << v << std::endl;\n");
grammarBuilder.append(" return v;\n");
@ -164,9 +164,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testDepedentPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(335);
StringBuilder grammarBuilder = new StringBuilder(343);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("bool pred(bool v) {\n");
grammarBuilder.append(" std::cout << \"eval=\" << std::boolalpha << v << std::endl;\n");
grammarBuilder.append(" return v;\n");
@ -392,9 +392,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testPredTestedEvenWhenUnAmbig_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(222);
StringBuilder grammarBuilder = new StringBuilder(230);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {bool enumKeyword = true;}\n");
grammarBuilder.append("@parser::members {bool enumKeyword = true;}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {std::cout << \"ID \"+$ID.text << std::endl;}\n");
grammarBuilder.append(" | {!enumKeyword}? 'enum' {std::cout << \"enum\" << std::endl;}\n");
@ -417,9 +417,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testPredTestedEvenWhenUnAmbig_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(222);
StringBuilder grammarBuilder = new StringBuilder(230);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {bool enumKeyword = true;}\n");
grammarBuilder.append("@parser::members {bool enumKeyword = true;}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {std::cout << \"ID \"+$ID.text << std::endl;}\n");
grammarBuilder.append(" | {!enumKeyword}? 'enum' {std::cout << \"enum\" << std::endl;}\n");
@ -443,9 +443,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testPredicateDependentOnArg() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(230);
StringBuilder grammarBuilder = new StringBuilder(238);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i == 1}? ID {std::cout << \"alt 1\" << std::endl;}\n");
@ -472,9 +472,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testPredicateDependentOnArg2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(158);
StringBuilder grammarBuilder = new StringBuilder(166);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i == 1}? ID \n");
@ -499,9 +499,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(302);
StringBuilder grammarBuilder = new StringBuilder(310);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("bool pred(bool v) {\n");
grammarBuilder.append(" std::cout << \"eval=\" << std::boolalpha << v << std::endl;\n");
grammarBuilder.append(" return v;\n");
@ -669,9 +669,9 @@ public class TestSemPredEvalParser extends BaseCppTest {
public void testToLeftWithVaryingPredicate() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(271);
StringBuilder grammarBuilder = new StringBuilder(279);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : ({i += 1;\n");
grammarBuilder.append(" std::cout << \"i=\" << i << std::endl;} a)+ ;\n");
grammarBuilder.append("a : {i % 2 == 0}? ID {std::cout << \"alt 1\" << std::endl;}\n");

View File

@ -51,8 +51,8 @@ public class TestParserErrors extends BaseTest {
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : (a | b)+;\n");
grammarBuilder.append("a : 'a' {Console.Write('a');};\n");
grammarBuilder.append("b : 'b' {Console.Write('b');};");
grammarBuilder.append("a : 'a' {Console.Write(\"a\");};\n");
grammarBuilder.append("b : 'b' {Console.Write(\"b\");};");
String grammar = grammarBuilder.toString();
String input ="abab";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", input, true);

View File

@ -69,9 +69,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testActionHidesPreds() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(237);
StringBuilder grammarBuilder = new StringBuilder(245);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a+ ;\n");
grammarBuilder.append("a : {this.i = 1;} ID {this.i == 1}? {Console.WriteLine(\"alt 1\");}\n");
grammarBuilder.append(" | {this.i = 2;} ID {this.i == 2}? {Console.WriteLine(\"alt 2\");}\n");
@ -93,9 +93,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testActionsHidePredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(304);
StringBuilder grammarBuilder = new StringBuilder(312);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("bool pred(bool v) {\n");
grammarBuilder.append(" Console.WriteLine(\"eval=\"+v.ToString().ToLower());\n");
grammarBuilder.append(" return v;\n");
@ -137,9 +137,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testDepedentPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(327);
StringBuilder grammarBuilder = new StringBuilder(335);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("bool pred(bool v) {\n");
grammarBuilder.append(" Console.WriteLine(\"eval=\"+v.ToString().ToLower());\n");
grammarBuilder.append(" return v;\n");
@ -325,9 +325,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredTestedEvenWhenUnAmbig_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(213);
StringBuilder grammarBuilder = new StringBuilder(221);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {bool enumKeyword = true;}\n");
grammarBuilder.append("@parser::members {bool enumKeyword = true;}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {Console.WriteLine(\"ID \"+$ID.text);}\n");
grammarBuilder.append(" | {!this.enumKeyword}? 'enum' {Console.WriteLine(\"enum\");}\n");
@ -345,9 +345,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredTestedEvenWhenUnAmbig_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(213);
StringBuilder grammarBuilder = new StringBuilder(221);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {bool enumKeyword = true;}\n");
grammarBuilder.append("@parser::members {bool enumKeyword = true;}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {Console.WriteLine(\"ID \"+$ID.text);}\n");
grammarBuilder.append(" | {!this.enumKeyword}? 'enum' {Console.WriteLine(\"enum\");}\n");
@ -366,9 +366,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredicateDependentOnArg() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(212);
StringBuilder grammarBuilder = new StringBuilder(220);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i==1}? ID {Console.WriteLine(\"alt 1\");}\n");
@ -390,9 +390,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredicateDependentOnArg2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(154);
StringBuilder grammarBuilder = new StringBuilder(162);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i==1}? ID \n");
@ -412,9 +412,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(298);
StringBuilder grammarBuilder = new StringBuilder(306);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("bool pred(bool v) {\n");
grammarBuilder.append(" Console.WriteLine(\"eval=\"+v.ToString().ToLower());\n");
grammarBuilder.append(" return v;\n");
@ -552,9 +552,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testToLeftWithVaryingPredicate() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(261);
StringBuilder grammarBuilder = new StringBuilder(269);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : ({this.i += 1;\n");
grammarBuilder.append("Console.WriteLine(\"i=\"+i);} a)+ ;\n");
grammarBuilder.append("a : {this.i % 2 == 0}? ID {Console.WriteLine(\"alt 1\");}\n");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,66 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestCompositeLexers extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLexerDelegatorInvokesDelegateRule() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"lexer grammar S;\n" +
"A : 'a' {fmt.Println(\"S.A\")};\n" +
"C : 'c' ;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(61);
grammarBuilder.append("lexer grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("B : 'b';\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execLexer("M.g4", grammar, "M", input, false);
assertEquals(
"S.A\n" +
"[@0,0:0='a',<3>,1:0]\n" +
"[@1,1:1='b',<1>,1:1]\n" +
"[@2,2:2='c',<4>,1:2]\n" +
"[@3,3:2='<EOF>',<-1>,1:3]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLexerDelegatorRuleOverridesDelegate() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"lexer grammar S;\n" +
"A : 'a' {fmt.Println(\"S.A\")} ;\n" +
"B : 'b' {fmt.Println(\"S.B\")} ;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(85);
grammarBuilder.append("lexer grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("A : 'a' B {fmt.Println(\"M.A\")} ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="ab";
String found = execLexer("M.g4", grammar, "M", input, false);
assertEquals(
"M.A\n" +
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:1='<EOF>',<-1>,1:2]\n", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -0,0 +1,412 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import org.antlr.v4.tool.Grammar;
public class TestCompositeParsers extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testBringInLiteralsFromDelegate() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"a : '=' 'a' {fmt.Print(\"S.a\")};";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(54);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("s : a ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="=a";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("S.a\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testCombinedImportsCombined() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"tokens { A, B, C }\n" +
"x : 'x' INT {fmt.Println(\"S.x\")};\n" +
"INT : '0'..'9'+ ;\n" +
"WS : (' '|'\\n') -> skip ;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(31);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("s : x INT;");
String grammar = grammarBuilder.toString();
String input ="x 34 9";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("S.x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatesSeeSameTokenType() throws Exception {
mkdir(parserpkgdir);
String slave_T =
"parser grammar T;\n" +
"tokens { C, B, A } // reverse order\n" +
"y : A {fmt.Println(\"T.y\")};";
writeFile(parserpkgdir, "T.g4", slave_T);
String slave_S =
"parser grammar S;\n" +
"tokens { A, B, C }\n" +
"x : A {fmt.Println(\"S.x\")};";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(598);
grammarBuilder.append("// The lexer will create rules to match letters a, b, c.\n");
grammarBuilder.append("// The associated token types A, B, C must have the same value\n");
grammarBuilder.append("// and all import'd parsers. Since ANTLR regenerates all imports\n");
grammarBuilder.append("// for use with the delegator M, it can generate the same token type\n");
grammarBuilder.append("// mapping in each parser:\n");
grammarBuilder.append("// public static final int C=6;\n");
grammarBuilder.append("// public static final int EOF=-1;\n");
grammarBuilder.append("// public static final int B=5;\n");
grammarBuilder.append("// public static final int WS=7;\n");
grammarBuilder.append("// public static final int A=4;\n");
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S,T;\n");
grammarBuilder.append("s : x y ; // matches AA, which should be 'aa'\n");
grammarBuilder.append("B : 'b' ; // another order: B, A, C\n");
grammarBuilder.append("A : 'a' ; \n");
grammarBuilder.append("C : 'c' ; \n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="aa";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals(
"S.x\n" +
"T.y\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatorAccessesDelegateMembers() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"@parser::members {\n" +
"func foo() {\n" +
" fmt.Println(\"foo\")\n" +
"}\n" +
"}\n" +
"a : B;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(121);
grammarBuilder.append("grammar M; // uses no rules from the import\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("s : 'b' {foo()} ; // gS is import pointer\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="b";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("foo\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatorInvokesDelegateRule() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"a : B {fmt.Println(\"S.a\")};";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(104);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("s : a ;\n");
grammarBuilder.append("B : 'b' ; // defines B from inherited token space\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="b";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("S.a\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"a[int x] returns [int y] : B {fmt.Print(\"S.a\")} {$y=1000;} ;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(137);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("s : label=a[3] {fmt.Println($label.y)} ;\n");
grammarBuilder.append("B : 'b' ; // defines B from inherited token space\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="b";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("S.a1000\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"a : B {fmt.Print(\"S.a\")} ;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(125);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("s : a {fmt.Print($a.text)} ;\n");
grammarBuilder.append("B : 'b' ; // defines B from inherited token space\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="b";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("S.ab\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception {
mkdir(parserpkgdir);
String slave_T =
"parser grammar T;\n" +
"a : B {fmt.Println(\"T.a\")};";
writeFile(parserpkgdir, "T.g4", slave_T);
String slave_S =
"parser grammar S;\n" +
"a : b {fmt.Println(\"S.a\")};\n" +
"b : B;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(106);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S,T;\n");
grammarBuilder.append("s : a ;\n");
grammarBuilder.append("B : 'b' ; // defines B from inherited token space\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="b";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("S.a\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatorRuleOverridesDelegate() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"a : b {fmt.Print(\"S.a\")};\n" +
"b : B ;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(59);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("b : 'b'|'c';\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="c";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "a", input, false);
assertEquals("S.a\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatorRuleOverridesDelegates() throws Exception {
mkdir(parserpkgdir);
String slave_T =
"parser grammar T;\n" +
"tokens { A }\n" +
"b : 'b' {fmt.Println(\"T.b\")};";
writeFile(parserpkgdir, "T.g4", slave_T);
String slave_S =
"parser grammar S;\n" +
"a : b {fmt.Println(\"S.a\")};\n" +
"b : 'b' ;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(87);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S, T;\n");
grammarBuilder.append("b : 'b'|'c' {fmt.Println(\"M.b\")}|B|A;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="c";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "a", input, false);
assertEquals(
"M.b\n" +
"S.a\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"type_ : 'int' ;\n" +
"decl : type_ ID ';'\n" +
" | type_ ID init ';' {fmt.Print(\"JavaDecl: \" + $text)};\n" +
"init : '=' INT;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(121);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("prog : decl ;\n");
grammarBuilder.append("type_ : 'int' | 'float' ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="float x = 3;";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "prog", input, false);
assertEquals("JavaDecl: floatx=3;\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testImportLexerWithOnlyFragmentRules() throws Exception {
mkdir(parserpkgdir);
String slave_Unicode =
"lexer grammar Unicode;\n" +
"\n" +
"fragment\n" +
"UNICODE_CLASS_Zs : '\\u0020' | '\\u00A0' | '\\u1680' | '\\u180E'\n" +
" | '\\u2000'..'\\u200A'\n" +
" | '\\u202F' | '\\u205F' | '\\u3000'\n" +
" ;\n";
writeFile(parserpkgdir, "Unicode.g4", slave_Unicode);
StringBuilder grammarBuilder = new StringBuilder(91);
grammarBuilder.append("grammar Test;\n");
grammarBuilder.append("import Unicode;\n");
grammarBuilder.append("\n");
grammarBuilder.append("program : 'test' 'test';\n");
grammarBuilder.append("\n");
grammarBuilder.append("WS : (UNICODE_CLASS_Zs)+ -> skip;\n");
String grammar = grammarBuilder.toString();
String input ="test test";
String found = execParser("Test.g4", grammar, "TestParser", "TestLexer",
"TestListener", "TestVisitor", "program", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testImportedGrammarWithEmptyOptions() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"options {}\n" +
"a : B ;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(64);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("s : a ;\n");
grammarBuilder.append("B : 'b' ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="b";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testImportedRuleWithAction() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"parser grammar S;\n" +
"a @after {var x int = 0; var _ int = x; } : B;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(62);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("s : a;\n");
grammarBuilder.append("B : 'b';\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="b";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "s", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testKeywordVSIDOrder() throws Exception {
mkdir(parserpkgdir);
String slave_S =
"lexer grammar S;\n" +
"ID : 'a'..'z'+;";
writeFile(parserpkgdir, "S.g4", slave_S);
StringBuilder grammarBuilder = new StringBuilder(125);
grammarBuilder.append("grammar M;\n");
grammarBuilder.append("import S;\n");
grammarBuilder.append("a : A {fmt.Println(\"M.a: \" + fmt.Sprint($A))};\n");
grammarBuilder.append("A : 'abc' {fmt.Println(\"M.A\")};\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execParser("M.g4", grammar, "MParser", "MLexer",
"MListener", "MVisitor", "a", input, false);
assertEquals(
"M.A\n" +
"M.a: [@0,0:2='abc',<1>,1:0]\n", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -0,0 +1,446 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestFullContextParsing extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAmbigYieldsCtxSensitiveDFA() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(97);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {p.DumpDFA()}\n");
grammarBuilder.append(" : ID | ID {} ;\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 0:\n" +
"s0-ID->:s1^=>1\n", found);
assertEquals("line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAmbiguityNoLoop() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(217);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("prog\n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append(" : expr expr {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("expr: '@'\n");
grammarBuilder.append(" | ID '@'\n");
grammarBuilder.append(" | ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\r\\n\\t]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a@";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "prog", input, true);
assertEquals("alt 1\n", found);
assertEquals(
"line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\n" +
"line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\n" +
"line 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\n" +
"line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testCtxSensitiveDFATwoDiffInput() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(161);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {p.DumpDFA()}\n");
grammarBuilder.append(" : ('$' a | '@' b)+ ;\n");
grammarBuilder.append("a : e ID ;\n");
grammarBuilder.append("b : e INT ID ;\n");
grammarBuilder.append("e : INT | ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="$ 34 abc @ 34 abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 2:\n" +
"s0-INT->s1\n" +
"s1-ID->:s2^=>1\n", found);
assertEquals(
"line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\n" +
"line 1:2 reportContextSensitivity d=2 (e), input='34'\n" +
"line 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\n" +
"line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testCtxSensitiveDFA_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(158);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {p.DumpDFA()}\n");
grammarBuilder.append(" : '$' a | '@' b ;\n");
grammarBuilder.append("a : e ID ;\n");
grammarBuilder.append("b : e INT ID ;\n");
grammarBuilder.append("e : INT | ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="$ 34 abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 1:\n" +
"s0-INT->s1\n" +
"s1-ID->:s2^=>1\n", found);
assertEquals(
"line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" +
"line 1:2 reportContextSensitivity d=1 (e), input='34'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testCtxSensitiveDFA_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(158);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {p.DumpDFA()}\n");
grammarBuilder.append(" : '$' a | '@' b ;\n");
grammarBuilder.append("a : e ID ;\n");
grammarBuilder.append("b : e INT ID ;\n");
grammarBuilder.append("e : INT | ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="@ 34 abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 1:\n" +
"s0-INT->s1\n" +
"s1-ID->:s2^=>1\n", found);
assertEquals(
"line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" +
"line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testExprAmbiguity_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(293);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append(": expr[0] {fmt.Println($expr.ctx.ToStringTree(nil, p))};\n");
grammarBuilder.append(" expr[int _p]\n");
grammarBuilder.append(" : ID \n");
grammarBuilder.append(" ( \n");
grammarBuilder.append(" {5 >= $_p}? '*' expr[6]\n");
grammarBuilder.append(" | {4 >= $_p}? '+' expr[5]\n");
grammarBuilder.append(" )*\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : [a-zA-Z]+ ;\n");
grammarBuilder.append("WS : [ \\r\\n\\t]+ -> skip ;\n");
String grammar = grammarBuilder.toString();
String input ="a+b";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals("(expr a + (expr b))\n", found);
assertEquals(
"line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" +
"line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testExprAmbiguity_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(293);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append(": expr[0] {fmt.Println($expr.ctx.ToStringTree(nil, p))};\n");
grammarBuilder.append(" expr[int _p]\n");
grammarBuilder.append(" : ID \n");
grammarBuilder.append(" ( \n");
grammarBuilder.append(" {5 >= $_p}? '*' expr[6]\n");
grammarBuilder.append(" | {4 >= $_p}? '+' expr[5]\n");
grammarBuilder.append(" )*\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : [a-zA-Z]+ ;\n");
grammarBuilder.append("WS : [ \\r\\n\\t]+ -> skip ;\n");
String grammar = grammarBuilder.toString();
String input ="a+b*c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals("(expr a + (expr b * (expr c)))\n", found);
assertEquals(
"line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" +
"line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" +
"line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" +
"line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testFullContextIF_THEN_ELSEParse_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(242);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s \n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append("@after {p.DumpDFA()}\n");
grammarBuilder.append(" : '{' stat* '}' ;\n");
grammarBuilder.append("stat: 'if' ID 'then' stat ('else' ID)?\n");
grammarBuilder.append(" | 'return'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="{ if x then return }";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 1:\n" +
"s0-'}'->:s1=>2\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testFullContextIF_THEN_ELSEParse_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(242);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s \n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append("@after {p.DumpDFA()}\n");
grammarBuilder.append(" : '{' stat* '}' ;\n");
grammarBuilder.append("stat: 'if' ID 'then' stat ('else' ID)?\n");
grammarBuilder.append(" | 'return'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="{ if x then return else foo }";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 1:\n" +
"s0-'else'->:s1^=>1\n", found);
assertEquals(
"line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testFullContextIF_THEN_ELSEParse_3() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(242);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s \n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append("@after {p.DumpDFA()}\n");
grammarBuilder.append(" : '{' stat* '}' ;\n");
grammarBuilder.append("stat: 'if' ID 'then' stat ('else' ID)?\n");
grammarBuilder.append(" | 'return'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="{ if x then if y then return else foo }";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 1:\n" +
"s0-'}'->:s2=>2\n" +
"s0-'else'->:s1^=>1\n", found);
assertEquals(
"line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testFullContextIF_THEN_ELSEParse_4() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(242);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s \n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append("@after {p.DumpDFA()}\n");
grammarBuilder.append(" : '{' stat* '}' ;\n");
grammarBuilder.append("stat: 'if' ID 'then' stat ('else' ID)?\n");
grammarBuilder.append(" | 'return'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="{ if x then if y then return else foo else bar }";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 1:\n" +
"s0-'else'->:s1^=>1\n", found);
assertEquals(
"line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\n" +
"line 1:38 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:38 reportContextSensitivity d=1 (stat), input='else'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testFullContextIF_THEN_ELSEParse_5() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(242);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s \n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append("@after {p.DumpDFA()}\n");
grammarBuilder.append(" : '{' stat* '}' ;\n");
grammarBuilder.append("stat: 'if' ID 'then' stat ('else' ID)?\n");
grammarBuilder.append(" | 'return'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input =
"{ if x then return else foo\n" +
"if x then if y then return else foo }";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 1:\n" +
"s0-'}'->:s2=>2\n" +
"s0-'else'->:s1^=>1\n", found);
assertEquals(
"line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" +
"line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testFullContextIF_THEN_ELSEParse_6() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(242);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s \n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append("@after {p.DumpDFA()}\n");
grammarBuilder.append(" : '{' stat* '}' ;\n");
grammarBuilder.append("stat: 'if' ID 'then' stat ('else' ID)?\n");
grammarBuilder.append(" | 'return'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input =
"{ if x then return else foo\n" +
"if x then if y then return else foo }";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 1:\n" +
"s0-'}'->:s2=>2\n" +
"s0-'else'->:s1^=>1\n", found);
assertEquals(
"line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" +
"line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLoopsSimulateTailRecursion() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(316);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("prog\n");
grammarBuilder.append("@init {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);}\n");
grammarBuilder.append(" : expr_or_assign*;\n");
grammarBuilder.append("expr_or_assign\n");
grammarBuilder.append(" : expr '++' {fmt.Println(\"fail.\")}\n");
grammarBuilder.append(" | expr {fmt.Println(\"pass: \"+$expr.text)}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("expr: expr_primary ('<-' ID)?;\n");
grammarBuilder.append("expr_primary\n");
grammarBuilder.append(" : '(' ID ')'\n");
grammarBuilder.append(" | ID '(' ID ')'\n");
grammarBuilder.append(" | ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : [a-z]+ ;");
String grammar = grammarBuilder.toString();
String input ="a(i)<-x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "prog", input, true);
assertEquals("pass: a(i)<-x\n", found);
assertEquals(
"line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\n" +
"line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSLLSeesEOFInLLGrammar() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(145);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {p.DumpDFA()}\n");
grammarBuilder.append(" : a;\n");
grammarBuilder.append("a : e ID ;\n");
grammarBuilder.append("b : e INT ID ;\n");
grammarBuilder.append("e : INT | ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+ ;\n");
grammarBuilder.append("WS : (' '|'\\t'|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="34 abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"Decision 0:\n" +
"s0-INT->s1\n" +
"s1-ID->:s2^=>1\n", found);
assertEquals(
"line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\n" +
"line 1:0 reportContextSensitivity d=0 (e), input='34'\n", this.stderrDuringParse);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,234 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestLexerErrors extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDFAToATNThatFailsBackToDFA() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(39);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("A : 'ab' ;\n");
grammarBuilder.append("B : 'abc' ;");
String grammar = grammarBuilder.toString();
String input ="ababx";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals(
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:3='ab',<1>,1:2]\n" +
"[@2,5:4='<EOF>',<-1>,1:5]\n", found);
assertEquals("line 1:4 token recognition error at: 'x'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDFAToATNThatMatchesThenFailsInATN() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(52);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("A : 'ab' ;\n");
grammarBuilder.append("B : 'abc' ;\n");
grammarBuilder.append("C : 'abcd' ;");
String grammar = grammarBuilder.toString();
String input ="ababcx";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals(
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:4='abc',<2>,1:2]\n" +
"[@2,6:5='<EOF>',<-1>,1:6]\n", found);
assertEquals("line 1:5 token recognition error at: 'x'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testEnforcedGreedyNestedBrances_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(77);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ACTION : '{' (ACTION | ~[{}])* '}';\n");
grammarBuilder.append("WS : [ \\r\\n\\t]+ -> skip;");
String grammar = grammarBuilder.toString();
String input ="{ { } }";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals(
"[@0,0:6='{ { } }',<1>,1:0]\n" +
"[@1,7:6='<EOF>',<-1>,1:7]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testEnforcedGreedyNestedBrances_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(77);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ACTION : '{' (ACTION | ~[{}])* '}';\n");
grammarBuilder.append("WS : [ \\r\\n\\t]+ -> skip;");
String grammar = grammarBuilder.toString();
String input ="{ { }";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals("[@0,5:4='<EOF>',<-1>,1:5]\n", found);
assertEquals("line 1:0 token recognition error at: '{ { }'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testErrorInMiddle() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(28);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("A : 'abc' ;");
String grammar = grammarBuilder.toString();
String input ="abx";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals("[@0,3:2='<EOF>',<-1>,1:3]\n", found);
assertEquals("line 1:0 token recognition error at: 'abx'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testInvalidCharAtStart() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(30);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("A : 'a' 'b' ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals("[@0,1:0='<EOF>',<-1>,1:1]\n", found);
assertEquals("line 1:0 token recognition error at: 'x'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testInvalidCharAtStartAfterDFACache() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(30);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("A : 'a' 'b' ;");
String grammar = grammarBuilder.toString();
String input ="abx";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals(
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n", found);
assertEquals("line 1:2 token recognition error at: 'x'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testInvalidCharInToken() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(30);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("A : 'a' 'b' ;");
String grammar = grammarBuilder.toString();
String input ="ax";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals("[@0,2:1='<EOF>',<-1>,1:2]\n", found);
assertEquals("line 1:0 token recognition error at: 'ax'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testInvalidCharInTokenAfterDFACache() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(30);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("A : 'a' 'b' ;");
String grammar = grammarBuilder.toString();
String input ="abax";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals(
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,4:3='<EOF>',<-1>,1:4]\n", found);
assertEquals("line 1:2 token recognition error at: 'ax'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLexerExecDFA() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(98);
grammarBuilder.append("grammar L;\n");
grammarBuilder.append("start : ID ':' expr;\n");
grammarBuilder.append("expr : primary expr? {} | expr '->' ID;\n");
grammarBuilder.append("primary : ID;\n");
grammarBuilder.append("ID : [a-z]+;");
String grammar = grammarBuilder.toString();
String input ="x : x";
String found = execLexer("L.g4", grammar, "LLexer", input, false);
assertEquals(
"[@0,0:0='x',<3>,1:0]\n" +
"[@1,2:2=':',<1>,1:2]\n" +
"[@2,4:4='x',<3>,1:4]\n" +
"[@3,5:4='<EOF>',<-1>,1:5]\n", found);
assertEquals(
"line 1:1 token recognition error at: ' '\n" +
"line 1:3 token recognition error at: ' '\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testStringsEmbeddedInActions_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(109);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ACTION2 : '[' (STRING | ~'\"')*? ']';\n");
grammarBuilder.append("STRING : '\"' ('\\\"' | .)*? '\"';\n");
grammarBuilder.append("WS : [ \\t\\r\\n]+ -> skip;");
String grammar = grammarBuilder.toString();
String input ="[\"foo\"]";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals(
"[@0,0:6='[\"foo\"]',<1>,1:0]\n" +
"[@1,7:6='<EOF>',<-1>,1:7]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testStringsEmbeddedInActions_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(109);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ACTION2 : '[' (STRING | ~'\"')*? ']';\n");
grammarBuilder.append("STRING : '\"' ('\\\"' | .)*? '\"';\n");
grammarBuilder.append("WS : [ \\t\\r\\n]+ -> skip;");
String grammar = grammarBuilder.toString();
String input ="[\"foo]";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals("[@0,6:5='<EOF>',<-1>,1:6]\n", found);
assertEquals("line 1:0 token recognition error at: '[\"foo]'\n", this.stderrDuringParse);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,363 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestListeners extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testBasic() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(505);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("type LeafListener struct {\n");
grammarBuilder.append(" *BaseTListener\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func NewLeafListener() *LeafListener {\n");
grammarBuilder.append(" return &LeafListener{BaseTListener: &BaseTListener{}}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (*LeafListener) VisitTerminal(node antlr.TerminalNode) {\n");
grammarBuilder.append(" fmt.Println(node.GetSymbol().GetText())\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("var walker = antlr.NewParseTreeWalker()\n");
grammarBuilder.append("\n");
grammarBuilder.append("walker.Walk(NewLeafListener(), $ctx.r)\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : INT INT\n");
grammarBuilder.append(" | ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1 2";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a 1 2)\n" +
"1\n" +
"2\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLR() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(677);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("type LeafListener struct {\n");
grammarBuilder.append(" *BaseTListener\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func NewLeafListener() *LeafListener {\n");
grammarBuilder.append(" return &LeafListener{BaseTListener: &BaseTListener{}}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (*LeafListener) ExitE(ctx *EContext) {\n");
grammarBuilder.append(" if ctx.GetChildCount() == 3 {\n");
grammarBuilder.append(" fmt.Printf(\"%s %s %s\\n\", ctx.E(0).GetStart().GetText(), ctx.E(1).GetStart().GetText(), ctx.AllE()[0].GetStart().GetText())\n");
grammarBuilder.append(" } else {\n");
grammarBuilder.append(" fmt.Println(ctx.INT().GetSymbol().GetText())\n");
grammarBuilder.append(" }\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("var walker = antlr.NewParseTreeWalker()\n");
grammarBuilder.append("\n");
grammarBuilder.append("walker.Walk(NewLeafListener(), $ctx.r)\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=e ;\n");
grammarBuilder.append("e : e op='*' e\n");
grammarBuilder.append(" | e op='+' e\n");
grammarBuilder.append(" | INT\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1+2*3";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(e (e 1) + (e (e 2) * (e 3)))\n" +
"1\n" +
"2\n" +
"3\n" +
"2 3 2\n" +
"1 2 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLRWithLabels() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(685);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("type LeafListener struct {\n");
grammarBuilder.append(" *BaseTListener\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func NewLeafListener() *LeafListener {\n");
grammarBuilder.append(" return &LeafListener{BaseTListener: &BaseTListener{}}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (*LeafListener) ExitCall(ctx *CallContext) {\n");
grammarBuilder.append(" fmt.Printf(\"%s %s\", ctx.E().GetStart().GetText(), ctx.EList().String(nil, nil))\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (*LeafListener) ExitInt(ctx *IntContext) {\n");
grammarBuilder.append(" fmt.Println(ctx.INT().GetSymbol().GetText())\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("var walker = antlr.NewParseTreeWalker()\n");
grammarBuilder.append("\n");
grammarBuilder.append("walker.Walk(NewLeafListener(), $ctx.r)\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=e ;\n");
grammarBuilder.append("e : e '(' eList ')' # Call\n");
grammarBuilder.append(" | INT # Int\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("eList : e (',' e)* ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1(2,3)";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(e (e 1) ( (eList (e 2) , (e 3)) ))\n" +
"1\n" +
"2\n" +
"3\n" +
"1 [13 6]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testRuleGetters_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(700);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("type LeafListener struct {\n");
grammarBuilder.append(" *BaseTListener\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func NewLeafListener() *LeafListener {\n");
grammarBuilder.append(" return &LeafListener{BaseTListener: &BaseTListener{}}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (*LeafListener) ExitA(ctx *AContext) {\n");
grammarBuilder.append(" if ctx.GetChildCount() == 2 {\n");
grammarBuilder.append(" fmt.Printf(\"%s %s %s\", ctx.B(0).GetStart().GetText(), ctx.B(1).GetStart().GetText(), ctx.AllB()[0].GetStart().GetText())\n");
grammarBuilder.append(" } else {\n");
grammarBuilder.append(" fmt.Println(ctx.B(0).GetStart().GetText())\n");
grammarBuilder.append(" }\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("var walker = antlr.NewParseTreeWalker()\n");
grammarBuilder.append("\n");
grammarBuilder.append("walker.Walk(NewLeafListener(), $ctx.r)\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : b b // forces list\n");
grammarBuilder.append(" | b // a list still\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : ID | INT;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1 2";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a (b 1) (b 2))\n" +
"1 2 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testRuleGetters_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(700);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("type LeafListener struct {\n");
grammarBuilder.append(" *BaseTListener\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func NewLeafListener() *LeafListener {\n");
grammarBuilder.append(" return &LeafListener{BaseTListener: &BaseTListener{}}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (*LeafListener) ExitA(ctx *AContext) {\n");
grammarBuilder.append(" if ctx.GetChildCount() == 2 {\n");
grammarBuilder.append(" fmt.Printf(\"%s %s %s\", ctx.B(0).GetStart().GetText(), ctx.B(1).GetStart().GetText(), ctx.AllB()[0].GetStart().GetText())\n");
grammarBuilder.append(" } else {\n");
grammarBuilder.append(" fmt.Println(ctx.B(0).GetStart().GetText())\n");
grammarBuilder.append(" }\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("var walker = antlr.NewParseTreeWalker()\n");
grammarBuilder.append("\n");
grammarBuilder.append("walker.Walk(NewLeafListener(), $ctx.r)\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : b b // forces list\n");
grammarBuilder.append(" | b // a list still\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : ID | INT;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a (b abc))\n" +
"abc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testTokenGetters_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(693);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("type LeafListener struct {\n");
grammarBuilder.append(" *BaseTListener\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func NewLeafListener() *LeafListener {\n");
grammarBuilder.append(" return &LeafListener{BaseTListener: &BaseTListener{}}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (*LeafListener) ExitA(ctx *AContext) {\n");
grammarBuilder.append(" if ctx.GetChildCount() == 2 {\n");
grammarBuilder.append(" fmt.Printf(\"%s %s %s\", ctx.INT(0).GetSymbol().GetText(), ctx.INT(1).GetSymbol().GetText(), antlr.PrintArrayJavaStyle(antlr.TerminalNodeToStringArray(ctx.AllINT())))\n");
grammarBuilder.append(" } else {\n");
grammarBuilder.append(" fmt.Println(ctx.ID().GetSymbol())\n");
grammarBuilder.append(" }\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("var walker = antlr.NewParseTreeWalker()\n");
grammarBuilder.append("\n");
grammarBuilder.append("walker.Walk(NewLeafListener(), $ctx.r)\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : INT INT\n");
grammarBuilder.append(" | ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1 2";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a 1 2)\n" +
"1 2 [1, 2]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testTokenGetters_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(693);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("type LeafListener struct {\n");
grammarBuilder.append(" *BaseTListener\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func NewLeafListener() *LeafListener {\n");
grammarBuilder.append(" return &LeafListener{BaseTListener: &BaseTListener{}}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (*LeafListener) ExitA(ctx *AContext) {\n");
grammarBuilder.append(" if ctx.GetChildCount() == 2 {\n");
grammarBuilder.append(" fmt.Printf(\"%s %s %s\", ctx.INT(0).GetSymbol().GetText(), ctx.INT(1).GetSymbol().GetText(), antlr.PrintArrayJavaStyle(antlr.TerminalNodeToStringArray(ctx.AllINT())))\n");
grammarBuilder.append(" } else {\n");
grammarBuilder.append(" fmt.Println(ctx.ID().GetSymbol())\n");
grammarBuilder.append(" }\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("var walker = antlr.NewParseTreeWalker()\n");
grammarBuilder.append("\n");
grammarBuilder.append("walker.Walk(NewLeafListener(), $ctx.r)\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : INT INT\n");
grammarBuilder.append(" | ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a abc)\n" +
"[@0,0:2='abc',<4>,1:0]\n", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -0,0 +1,277 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestParseTrees extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void test2AltLoop() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(134);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : ('x' | 'y')* 'z'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xyyxyxz";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("(a x y y x y x z)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void test2Alts() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(127);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : 'x' | 'y'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="y";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("(a y)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAltNum() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(640);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("options { contextSuperClass=MyRuleNode; }\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("\n");
grammarBuilder.append("type MyRuleNode struct {\n");
grammarBuilder.append(" *antlr.BaseParserRuleContext\n");
grammarBuilder.append("\n");
grammarBuilder.append(" altNum int\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func NewMyRuleNode(parent antlr.ParserRuleContext, invokingStateNumber int) *MyRuleNode {\n");
grammarBuilder.append(" return &MyRuleNode{\n");
grammarBuilder.append(" BaseParserRuleContext : antlr.NewBaseParserRuleContext(parent, invokingStateNumber),\n");
grammarBuilder.append(" }\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (m *MyRuleNode) GetAltNumber() int {\n");
grammarBuilder.append(" return m.altNum\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("func (m *MyRuleNode) SetAltNumber(altNum int) {\n");
grammarBuilder.append(" m.altNum = altNum\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("a : 'f'\n");
grammarBuilder.append(" | 'g'\n");
grammarBuilder.append(" | 'x' b 'z'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : 'e' {} | 'y'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xyz";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("(a:3 x (b:2 y) z)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testExtraToken() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(140);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : 'x' 'y'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("Z : 'z' \n");
grammarBuilder.append(" ;\n");
grammarBuilder.append(" ");
String grammar = grammarBuilder.toString();
String input ="xzy";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("(a x z y)\n", found);
assertEquals("line 1:1 extraneous input 'z' expecting 'y'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testNoViableAlt() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(142);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : 'x' | 'y'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("Z : 'z' \n");
grammarBuilder.append(" ;\n");
grammarBuilder.append(" ");
String grammar = grammarBuilder.toString();
String input ="z";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("(a z)\n", found);
assertEquals("line 1:0 mismatched input 'z' expecting {'x', 'y'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testRuleRef() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(136);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : b 'x'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : 'y' \n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="yx";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("(a (b y) x)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSync() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(143);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : 'x' 'y'* '!'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("Z : 'z' \n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xzyy!";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("(a x z y y !)\n", found);
assertEquals("line 1:1 extraneous input 'z' expecting {'y', '!'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testToken2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(125);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : 'x' 'y'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xy";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("(a x y)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testTokenAndRuleContextString() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(194);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("p.BuildParseTrees = true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($r.ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : 'x' { \n");
grammarBuilder.append("fmt.Println(antlr.PrintArrayJavaStyle(p.GetRuleInvocationStack(nil)))\n");
grammarBuilder.append("} ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"[a, s]\n" +
"(a x)\n", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -0,0 +1,592 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestParserErrors extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testConjuringUpToken() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(74);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' x='b' {fmt.Println(\"conjured=\" + fmt.Sprint($x))} 'c' ;");
String grammar = grammarBuilder.toString();
String input ="ac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("conjured=[@-1,-1:-1='<missing 'b'>',<2>,1:1]\n", found);
assertEquals("line 1:1 missing 'b' at 'c'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testConjuringUpTokenFromSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(80);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' x=('b'|'c') {fmt.Println(\"conjured=\" + fmt.Sprint($x))} 'd' ;");
String grammar = grammarBuilder.toString();
String input ="ad";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("conjured=[@-1,-1:-1='<missing 'b'>',<2>,1:1]\n", found);
assertEquals("line 1:1 missing {'b', 'c'} at 'd'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testContextListGetters() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(175);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members{\n");
grammarBuilder.append("func foo() {\n");
grammarBuilder.append(" // TODO\n");
grammarBuilder.append(" // var s SContext\n");
grammarBuilder.append(" // var a = s.A()\n");
grammarBuilder.append(" // var b = s.B()\n");
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : (a | b)+;\n");
grammarBuilder.append("a : 'a' {fmt.Print(\"a\")};\n");
grammarBuilder.append("b : 'b' {fmt.Print(\"b\")};");
String grammar = grammarBuilder.toString();
String input ="abab";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals("abab\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDuplicatedLeftRecursiveCall_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(63);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : expr EOF;\n");
grammarBuilder.append("expr : 'x'\n");
grammarBuilder.append(" | expr expr\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, true);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDuplicatedLeftRecursiveCall_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(63);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : expr EOF;\n");
grammarBuilder.append("expr : 'x'\n");
grammarBuilder.append(" | expr expr\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xx";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, true);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDuplicatedLeftRecursiveCall_3() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(63);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : expr EOF;\n");
grammarBuilder.append("expr : 'x'\n");
grammarBuilder.append(" | expr expr\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xxx";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, true);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDuplicatedLeftRecursiveCall_4() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(63);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : expr EOF;\n");
grammarBuilder.append("expr : 'x'\n");
grammarBuilder.append(" | expr expr\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xxxx";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, true);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testInvalidATNStateRemoval() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(98);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : ID ':' expr;\n");
grammarBuilder.append("expr : primary expr? {} | expr '->' ID;\n");
grammarBuilder.append("primary : ID;\n");
grammarBuilder.append("ID : [a-z]+;");
String grammar = grammarBuilder.toString();
String input ="x:x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testInvalidEmptyInput() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(36);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : ID+;\n");
grammarBuilder.append("ID : [a-z]+;");
String grammar = grammarBuilder.toString();
String input ="";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, true);
assertEquals("", found);
assertEquals("line 1:0 missing ID at '<EOF>'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLL1ErrorInfo() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(314);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : animal (AND acClass)? service EOF;\n");
grammarBuilder.append("animal : (DOG | CAT );\n");
grammarBuilder.append("service : (HARDWARE | SOFTWARE) ;\n");
grammarBuilder.append("AND : 'and';\n");
grammarBuilder.append("DOG : 'dog';\n");
grammarBuilder.append("CAT : 'cat';\n");
grammarBuilder.append("HARDWARE: 'hardware';\n");
grammarBuilder.append("SOFTWARE: 'software';\n");
grammarBuilder.append("WS : ' ' -> skip ;\n");
grammarBuilder.append("acClass\n");
grammarBuilder.append("@init\n");
grammarBuilder.append("{fmt.Println(p.GetExpectedTokens().StringVerbose(p.GetTokenNames(), nil, false))}\n");
grammarBuilder.append(" : ;");
String grammar = grammarBuilder.toString();
String input ="dog and software";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, false);
assertEquals("{'hardware', 'software'}\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLL2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(46);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b'\n");
grammarBuilder.append(" | 'a' 'c'\n");
grammarBuilder.append(";\n");
grammarBuilder.append("q : 'e' ;");
String grammar = grammarBuilder.toString();
String input ="ae";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:1 no viable alternative at input 'ae'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLL3() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(55);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b'* 'c'\n");
grammarBuilder.append(" | 'a' 'b' 'd'\n");
grammarBuilder.append(";\n");
grammarBuilder.append("q : 'e' ;");
String grammar = grammarBuilder.toString();
String input ="abe";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:2 no viable alternative at input 'abe'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLLStar() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(48);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a'+ 'b'\n");
grammarBuilder.append(" | 'a'+ 'c'\n");
grammarBuilder.append(";\n");
grammarBuilder.append("q : 'e' ;");
String grammar = grammarBuilder.toString();
String input ="aaae";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:3 no viable alternative at input 'aaae'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testMultiTokenDeletionBeforeLoop() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(28);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b'* 'c';");
String grammar = grammarBuilder.toString();
String input ="aacabc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:1 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testMultiTokenDeletionBeforeLoop2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(36);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' ('b'|'z'{})* 'c';");
String grammar = grammarBuilder.toString();
String input ="aacabc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testMultiTokenDeletionDuringLoop() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(29);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b'* 'c' ;");
String grammar = grammarBuilder.toString();
String input ="abaaababc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals(
"line 1:2 extraneous input 'a' expecting {'b', 'c'}\n" +
"line 1:6 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testMultiTokenDeletionDuringLoop2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(37);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' ('b'|'z'{})* 'c' ;");
String grammar = grammarBuilder.toString();
String input ="abaaababc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals(
"line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n" +
"line 1:6 extraneous input 'a' expecting {'b', 'z', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testNoViableAltAvoidance() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(83);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : e '!' ;\n");
grammarBuilder.append("e : 'a' 'b'\n");
grammarBuilder.append(" | 'a'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("DOT : '.' ;\n");
grammarBuilder.append("WS : [ \\t\\r\\n]+ -> skip;");
String grammar = grammarBuilder.toString();
String input ="a.";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("", found);
assertEquals("line 1:1 mismatched input '.' expecting '!'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleSetInsertion() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(34);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' ('b'|'c') 'd' ;");
String grammar = grammarBuilder.toString();
String input ="ad";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:1 missing {'b', 'c'} at 'd'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleSetInsertionConsumption() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(93);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("myset: ('b'|'c') ;\n");
grammarBuilder.append("a: 'a' myset 'd' {fmt.Println(\"\" + fmt.Sprint($myset.stop))} ; ");
String grammar = grammarBuilder.toString();
String input ="ad";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("[@0,0:0='a',<3>,1:0]\n", found);
assertEquals("line 1:1 missing {'b', 'c'} at 'd'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletion() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(24);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b' ;");
String grammar = grammarBuilder.toString();
String input ="aab";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:1 extraneous input 'a' expecting 'b'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletionBeforeAlt() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(38);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ('b' | 'c')\n");
grammarBuilder.append(";\n");
grammarBuilder.append("q : 'a'\n");
grammarBuilder.append(";");
String grammar = grammarBuilder.toString();
String input ="ac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:0 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletionBeforeLoop() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(25);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b'* ;");
String grammar = grammarBuilder.toString();
String input ="aabc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals(
"line 1:1 extraneous input 'a' expecting {<EOF>, 'b'}\n" +
"line 1:3 token recognition error at: 'c'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletionBeforeLoop2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(32);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' ('b'|'z'{})*;");
String grammar = grammarBuilder.toString();
String input ="aabc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals(
"line 1:1 extraneous input 'a' expecting {<EOF>, 'b', 'z'}\n" +
"line 1:3 token recognition error at: 'c'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletionBeforePredict() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(48);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a'+ 'b'\n");
grammarBuilder.append(" | 'a'+ 'c'\n");
grammarBuilder.append(";\n");
grammarBuilder.append("q : 'e' ;");
String grammar = grammarBuilder.toString();
String input ="caaab";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:0 extraneous input 'c' expecting 'a'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletionConsumption() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(93);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("myset: ('b'|'c') ;\n");
grammarBuilder.append("a: 'a' myset 'd' {fmt.Println(\"\" + fmt.Sprint($myset.stop))} ; ");
String grammar = grammarBuilder.toString();
String input ="aabd";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("[@2,2:2='b',<1>,1:2]\n", found);
assertEquals("line 1:1 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletionDuringLoop() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(29);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b'* 'c' ;");
String grammar = grammarBuilder.toString();
String input ="ababbc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:2 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletionDuringLoop2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(37);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' ('b'|'z'{})* 'c' ;");
String grammar = grammarBuilder.toString();
String input ="ababbc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenDeletionExpectingSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(30);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' ('b'|'c') ;");
String grammar = grammarBuilder.toString();
String input ="aab";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:1 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSingleTokenInsertion() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(28);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b' 'c' ;");
String grammar = grammarBuilder.toString();
String input ="ac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:1 missing 'b' at 'c'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testTokenMismatch() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(24);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : 'a' 'b' ;");
String grammar = grammarBuilder.toString();
String input ="aa";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertEquals("line 1:1 mismatched input 'a' expecting 'b'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testTokenMismatch2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(165);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("stat: ( '(' expr? ')' )? EOF ;\n");
grammarBuilder.append("expr: ID '=' STR ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("ERR : '~FORCE_ERROR~' ;\n");
grammarBuilder.append("ID : [a-zA-Z]+ ;\n");
grammarBuilder.append("STR : '\"' ~[\"]* '\"' ;\n");
grammarBuilder.append("WS : [ \\t\\r\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="( ~FORCE_ERROR~ ";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "stat", input, false);
assertEquals("", found);
assertEquals("line 1:2 mismatched input '~FORCE_ERROR~' expecting ')'\n", this.stderrDuringParse);
}
}

View File

@ -0,0 +1,661 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestParserExec extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAPlus() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(83);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ID+ {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="a b c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAStar_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(83);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ID* {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAStar_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(83);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ID* {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="a b c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAorAPlus() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(88);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|ID)+ {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="a b c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAorAStar_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(88);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|ID)* {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAorAStar_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(88);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|ID)* {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="a b c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAorB() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(134);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ID {\n");
grammarBuilder.append("fmt.Println(\"alt 1\")\n");
grammarBuilder.append("} | INT {\n");
grammarBuilder.append("fmt.Println(\"alt 2\")\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="34";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("alt 2\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAorBPlus() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(111);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|INT{\n");
grammarBuilder.append("})+ {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a 34 c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("a34c\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAorBStar_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(111);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|INT{\n");
grammarBuilder.append("})* {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAorBStar_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(111);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|INT{\n");
grammarBuilder.append("})* {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a 34 c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("a34c\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testBasic() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(104);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ID INT {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="abc 34";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abc34\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testEOFInClosure() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(53);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("prog : stat EOF;\n");
grammarBuilder.append("stat : 'x' ('y' | EOF)*?;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "prog", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testIfIfElseGreedyBinding1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(192);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : statement+ ;\n");
grammarBuilder.append("statement : 'x' | ifStatement;\n");
grammarBuilder.append("ifStatement : 'if' 'y' statement ('else' statement)? {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> channel(HIDDEN);");
String grammar = grammarBuilder.toString();
String input ="if y if y x else x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, false);
assertEquals(
"if y x else x\n" +
"if y if y x else x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testIfIfElseGreedyBinding2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(192);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : statement+ ;\n");
grammarBuilder.append("statement : 'x' | ifStatement;\n");
grammarBuilder.append("ifStatement : 'if' 'y' statement ('else' statement|) {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> channel(HIDDEN);");
String grammar = grammarBuilder.toString();
String input ="if y if y x else x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, false);
assertEquals(
"if y x else x\n" +
"if y if y x else x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testIfIfElseNonGreedyBinding1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(193);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : statement+ ;\n");
grammarBuilder.append("statement : 'x' | ifStatement;\n");
grammarBuilder.append("ifStatement : 'if' 'y' statement ('else' statement)?? {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> channel(HIDDEN);");
String grammar = grammarBuilder.toString();
String input ="if y if y x else x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, false);
assertEquals(
"if y x\n" +
"if y if y x else x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testIfIfElseNonGreedyBinding2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(192);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : statement+ ;\n");
grammarBuilder.append("statement : 'x' | ifStatement;\n");
grammarBuilder.append("ifStatement : 'if' 'y' statement (|'else' statement) {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> channel(HIDDEN);");
String grammar = grammarBuilder.toString();
String input ="if y if y x else x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, false);
assertEquals(
"if y x\n" +
"if y if y x else x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLL1OptionalBlock_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(109);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|{}INT)? {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("INT : '0'..'9'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLL1OptionalBlock_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(109);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|{}INT)? {\n");
grammarBuilder.append("fmt.Println($text)\n");
grammarBuilder.append("};\n");
grammarBuilder.append("ID : 'a'..'z'+;\n");
grammarBuilder.append("INT : '0'..'9'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="a";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("a\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLabelAliasingAcrossLabeledAlternatives() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(169);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : a* EOF;\n");
grammarBuilder.append("a\n");
grammarBuilder.append(" : label=subrule {fmt.Println($label.text)} #One\n");
grammarBuilder.append(" | label='y' {fmt.Println($label.text)} #Two\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("subrule : 'x';\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="xy";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, false);
assertEquals(
"x\n" +
"y\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLabels() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(118);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : b1=b b2+=b* b3+=';' ;\n");
grammarBuilder.append("b : id_=ID val+=INT*;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc 34;";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testListLabelForClosureContext() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(420);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("ifStatement\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : 'if' expression\n");
grammarBuilder.append(" ( ( 'then'\n");
grammarBuilder.append(" executableStatement*\n");
grammarBuilder.append(" elseIfStatement* // <--- problem is here; should yield a list not node\n");
grammarBuilder.append(" elseStatement?\n");
grammarBuilder.append(" 'end' 'if'\n");
grammarBuilder.append(" ) | executableStatement )\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("elseIfStatement\n");
grammarBuilder.append(" : 'else' 'if' expression 'then' executableStatement*\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("expression : 'a' ;\n");
grammarBuilder.append("executableStatement : 'a' ;\n");
grammarBuilder.append("elseStatement : 'a' ;");
String grammar = grammarBuilder.toString();
String input ="a";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "expression", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testListLabelsOnSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(140);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : b b* ';' ;\n");
grammarBuilder.append("b : ID val+=(INT | FLOAT)*;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("FLOAT : [0-9]+ '.' [0-9]+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc 34;";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testMultipleEOFHandling() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(42);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("prog : ('x' | 'x' 'y') EOF EOF;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "prog", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testOptional_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(90);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("stat : ifstat | 'x';\n");
grammarBuilder.append("ifstat : 'if' stat ('else' stat)?;\n");
grammarBuilder.append("WS : [ \\n\\t]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "stat", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testOptional_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(90);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("stat : ifstat | 'x';\n");
grammarBuilder.append("ifstat : 'if' stat ('else' stat)?;\n");
grammarBuilder.append("WS : [ \\n\\t]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="if x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "stat", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testOptional_3() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(90);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("stat : ifstat | 'x';\n");
grammarBuilder.append("ifstat : 'if' stat ('else' stat)?;\n");
grammarBuilder.append("WS : [ \\n\\t]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="if x else x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "stat", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testOptional_4() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(90);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("stat : ifstat | 'x';\n");
grammarBuilder.append("ifstat : 'if' stat ('else' stat)?;\n");
grammarBuilder.append("WS : [ \\n\\t]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="if if x else x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "stat", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testParserProperty() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(181);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("func (p *TParser) Property() bool {\n");
grammarBuilder.append(" return true\n");
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("a : {$parser.Property()}? ID {fmt.Println(\"valid\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("valid\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredicatedIfIfElse() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(183);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : stmt EOF ;\n");
grammarBuilder.append("stmt : ifStmt | ID;\n");
grammarBuilder.append("ifStmt : 'if' ID stmt ('else' stmt | { p.GetTokenStream().LA(1) != TParserELSE }?);\n");
grammarBuilder.append("ELSE : 'else';\n");
grammarBuilder.append("ID : [a-zA-Z]+;\n");
grammarBuilder.append("WS : [ \\n\\t]+ -> skip;");
String grammar = grammarBuilder.toString();
String input ="if x if x a else b";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredictionIssue334() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(255);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("file_ @init{\n");
grammarBuilder.append("p.SetErrorHandler(antlr.NewBailErrorStrategy())\n");
grammarBuilder.append("} \n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : item (SEMICOLON item)* SEMICOLON? EOF ;\n");
grammarBuilder.append("item : A B?;\n");
grammarBuilder.append("SEMICOLON: ';';\n");
grammarBuilder.append("A : 'a'|'A';\n");
grammarBuilder.append("B : 'b'|'B';\n");
grammarBuilder.append("WS : [ \\r\\t\\n]+ -> skip;");
String grammar = grammarBuilder.toString();
String input ="a";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "file_", input, false);
assertEquals("(file_ (item a) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testReferenceToATN_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(112);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|ATN)* ATN? {fmt.Println($text)} ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("ATN : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testReferenceToATN_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(112);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (ID|ATN)* ATN? {fmt.Println($text)} ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("ATN : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a 34 c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("a34c\n", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -0,0 +1,209 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestPerformance extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test(timeout = 60000)
public void testExpressionGrammar_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(164);
grammarBuilder.append("grammar Expr;\n");
grammarBuilder.append("\n");
grammarBuilder.append("program: expr EOF;\n");
grammarBuilder.append("\n");
grammarBuilder.append("expr\n");
grammarBuilder.append(" : ID\n");
grammarBuilder.append(" | 'not' expr\n");
grammarBuilder.append(" | expr 'and' expr\n");
grammarBuilder.append(" | expr 'or' expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("ID: [a-zA-Z_][a-zA-Z_0-9]*;\n");
grammarBuilder.append("WS: [ \\t\\n\\r\\f]+ -> skip;\n");
grammarBuilder.append("ERROR: .;");
String grammar = grammarBuilder.toString();
String input =
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12";
String found = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer",
"ExprListener", "ExprVisitor", "program", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test(timeout = 60000)
public void testExpressionGrammar_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(164);
grammarBuilder.append("grammar Expr;\n");
grammarBuilder.append("\n");
grammarBuilder.append("program: expr EOF;\n");
grammarBuilder.append("\n");
grammarBuilder.append("expr\n");
grammarBuilder.append(" : ID\n");
grammarBuilder.append(" | 'not' expr\n");
grammarBuilder.append(" | expr 'and' expr\n");
grammarBuilder.append(" | expr 'or' expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("ID: [a-zA-Z_][a-zA-Z_0-9]*;\n");
grammarBuilder.append("WS: [ \\t\\n\\r\\f]+ -> skip;\n");
grammarBuilder.append("ERROR: .;");
String grammar = grammarBuilder.toString();
String input =
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
" X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" +
"not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12";
String found = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer",
"ExprListener", "ExprVisitor", "program", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -0,0 +1,203 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestSemPredEvalLexer extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDisableRule() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(131);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("E1 : 'enum' { false }? ;\n");
grammarBuilder.append("E2 : 'enum' { true }? ; // winner not E1 or ID\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="enum abc";
String found = execLexer("L.g4", grammar, "L", input, true);
assertEquals(
"[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<3>,1:5]\n" +
"[@2,8:7='<EOF>',<-1>,1:8]\n" +
"s0-' '->:s5=>4\n" +
"s0-'a'->:s6=>3\n" +
"s0-'e'->:s1=>3\n" +
":s1=>3-'n'->:s2=>3\n" +
":s2=>3-'u'->:s3=>3\n" +
":s6=>3-'b'->:s6=>3\n" +
":s6=>3-'c'->:s6=>3\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testEnumNotID() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(100);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ENUM : [a-z]+ { p.GetText() == \"enum\" }? ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="enum abc enum";
String found = execLexer("L.g4", grammar, "L", input, true);
assertEquals(
"[@0,0:3='enum',<1>,1:0]\n" +
"[@1,5:7='abc',<2>,1:5]\n" +
"[@2,9:12='enum',<1>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s3=>3\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testIDnotEnum() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(84);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ENUM : [a-z]+ { false }? ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="enum abc enum";
String found = execLexer("L.g4", grammar, "L", input, true);
assertEquals(
"[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<2>,1:5]\n" +
"[@2,9:12='enum',<2>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s2=>3\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testIDvsEnum() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(85);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ENUM : 'enum' { false }? ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input ="enum abc enum";
String found = execLexer("L.g4", grammar, "L", input, true);
assertEquals(
"[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<2>,1:5]\n" +
"[@2,9:12='enum',<2>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s5=>3\n" +
"s0-'a'->:s4=>2\n" +
"s0-'e'->:s1=>2\n" +
":s1=>2-'n'->:s2=>2\n" +
":s2=>2-'u'->:s3=>2\n" +
":s4=>2-'b'->:s4=>2\n" +
":s4=>2-'c'->:s4=>2\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testIndent() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(139);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("INDENT : [ \\t]+ { p.TokenStartColumn == 0 }?\n");
grammarBuilder.append(" { fmt.Println(\"INDENT\") } ;\n");
grammarBuilder.append("NL : '\\n';\n");
grammarBuilder.append("WS : [ \\t]+ ;");
String grammar = grammarBuilder.toString();
String input =
"abc\n" +
" def \n";
String found = execLexer("L.g4", grammar, "L", input, true);
assertEquals(
"INDENT\n" +
"[@0,0:2='abc',<1>,1:0]\n" +
"[@1,3:3='\\n',<3>,1:3]\n" +
"[@2,4:5=' ',<2>,2:0]\n" +
"[@3,6:8='def',<1>,2:2]\n" +
"[@4,9:10=' ',<4>,2:5]\n" +
"[@5,11:11='\\n',<3>,2:7]\n" +
"[@6,12:11='<EOF>',<-1>,3:0]\n" +
"s0-'\n" +
"'->:s2=>3\n" +
"s0-'a'->:s1=>1\n" +
"s0-'d'->:s1=>1\n" +
":s1=>1-'b'->:s1=>1\n" +
":s1=>1-'c'->:s1=>1\n" +
":s1=>1-'e'->:s1=>1\n" +
":s1=>1-'f'->:s1=>1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLexerInputPositionSensitivePredicates() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(250);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("WORD1 : ID1+ { fmt.Println(l.GetText()) } ;\n");
grammarBuilder.append("WORD2 : ID2+ { fmt.Println(l.GetText()) } ;\n");
grammarBuilder.append("fragment ID1 : { p.GetCharPositionInLine() < 2 }? [a-zA-Z];\n");
grammarBuilder.append("fragment ID2 : { p.GetCharPositionInLine() >= 2 }? [a-zA-Z];\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip;");
String grammar = grammarBuilder.toString();
String input =
"a cde\n" +
"abcde\n";
String found = execLexer("L.g4", grammar, "L", input, true);
assertEquals(
"a\n" +
"cde\n" +
"ab\n" +
"cde\n" +
"[@0,0:0='a',<1>,1:0]\n" +
"[@1,2:4='cde',<2>,1:2]\n" +
"[@2,6:7='ab',<1>,2:0]\n" +
"[@3,8:10='cde',<2>,2:2]\n" +
"[@4,12:11='<EOF>',<-1>,3:0]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredicatedKeywords() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(160);
grammarBuilder.append("lexer grammar L;\n");
grammarBuilder.append("ENUM : [a-z]+ { p.GetText() == \"enum\" }? { fmt.Println(\"enum!\") } ;\n");
grammarBuilder.append("ID : [a-z]+ { fmt.Println(\"ID \" + l.GetText()) } ;\n");
grammarBuilder.append("WS : [ \\n] -> skip ;");
String grammar = grammarBuilder.toString();
String input ="enum enu a";
String found = execLexer("L.g4", grammar, "L", input, false);
assertEquals(
"enum!\n" +
"ID enu\n" +
"ID a\n" +
"[@0,0:3='enum',<1>,1:0]\n" +
"[@1,5:7='enu',<2>,1:5]\n" +
"[@2,9:9='a',<2>,1:9]\n" +
"[@3,10:9='<EOF>',<-1>,1:10]\n", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -0,0 +1,660 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestSemPredEvalParser extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void test2UnpredicatedAlts() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(299);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);} a ';' a; // do 2x: once in ATN, next in DFA\n");
grammarBuilder.append("a : ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | ID {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" | {false}? ID {fmt.Println(\"alt 3\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x; y";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"alt 1\n" +
"alt 1\n", found);
assertEquals(
"line 1:0 reportAttemptingFullContext d=0 (a), input='x'\n" +
"line 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x'\n" +
"line 1:3 reportAttemptingFullContext d=0 (a), input='y'\n" +
"line 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void test2UnpredicatedAltsAndOneOrthogonalAlt() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(350);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : {p.Interpreter.SetPredictionMode(antlr.PredictionModeLLExactAmbigDetection);} a ';' a ';' a;\n");
grammarBuilder.append("a : INT {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | ID {fmt.Println(\"alt 2\")} // must pick this one for ID since pred is false\n");
grammarBuilder.append(" | ID {fmt.Println(\"alt 3\")}\n");
grammarBuilder.append(" | {false}? ID {fmt.Println(\"alt 4\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="34; x; y";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, true);
assertEquals(
"alt 1\n" +
"alt 2\n" +
"alt 2\n", found);
assertEquals(
"line 1:4 reportAttemptingFullContext d=0 (a), input='x'\n" +
"line 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x'\n" +
"line 1:7 reportAttemptingFullContext d=0 (a), input='y'\n" +
"line 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testActionHidesPreds() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(231);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {var i int = 0; var _ int = i; }\n");
grammarBuilder.append("s : a+ ;\n");
grammarBuilder.append("a : {i = 1;} ID {i == 1}? {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {i = 2;} ID {i == 2}? {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x x y";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"alt 1\n" +
"alt 1\n" +
"alt 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testActionsHidePredsInGlobalFOLLOW() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(286);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("func pred(v bool) bool {\n");
grammarBuilder.append(" fmt.Println(\"eval=\" + fmt.Sprint(v))\n");
grammarBuilder.append("\n");
grammarBuilder.append(" return v\n");
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : e {} {pred(true)}? {fmt.Println(\"parse\")} '!' ;\n");
grammarBuilder.append("t : e {} {pred(false)}? ID ;\n");
grammarBuilder.append("e : ID | ; // non-LL(1) so we use ATN\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a!";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"eval=true\n" +
"parse\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAtomWithClosureInTranslatedLRRule() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(94);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("start : e[0] EOF;\n");
grammarBuilder.append("e[int _p]\n");
grammarBuilder.append(" : ( 'a' | 'b'+ ) ( {3 >= $_p}? '+' e[4] )*\n");
grammarBuilder.append(" ;\n");
String grammar = grammarBuilder.toString();
String input ="a+b+a";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "start", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDepedentPredsInGlobalFOLLOW() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(313);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("func pred(v bool) bool {\n");
grammarBuilder.append(" fmt.Println(\"eval=\" + fmt.Sprint(v))\n");
grammarBuilder.append("\n");
grammarBuilder.append(" return v\n");
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : a[99] ;\n");
grammarBuilder.append("a[int i] : e {pred($i == 99)}? {fmt.Println(\"parse\")} '!' ;\n");
grammarBuilder.append("b[int i] : e {pred($i == 99)}? ID ;\n");
grammarBuilder.append("e : ID | ; // non-LL(1) so we use ATN\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a!";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"eval=true\n" +
"parse\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDependentPredNotInOuterCtxShouldBeIgnored() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(272);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : b[2] ';' | b[2] '.' ; // decision in s drills down to ctx-dependent pred in a;\n");
grammarBuilder.append("b[int i] : a[i] ;\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i == 1}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {$i == 2}? ID {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;\n");
String grammar = grammarBuilder.toString();
String input ="a;";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("alt 2\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testDisabledAlternative() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(121);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("cppCompilationUnit : content+ EOF;\n");
grammarBuilder.append("content: anything | {false}? .;\n");
grammarBuilder.append("anything: ANY_CHAR;\n");
grammarBuilder.append("ANY_CHAR: [_a-zA-Z0-9];");
String grammar = grammarBuilder.toString();
String input ="hello";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "cppCompilationUnit", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testIndependentPredNotPassedOuterCtxToAvoidCastException() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(181);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : b ';' | b '.' ;\n");
grammarBuilder.append("b : a ;\n");
grammarBuilder.append("a\n");
grammarBuilder.append(" : {false}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {true}? ID {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a;";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("alt 2\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testNoTruePredsThrowsNoViableAlt() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(169);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : a a;\n");
grammarBuilder.append("a : {false}? ID INT {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {false}? ID INT {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="y 3 x 4";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("", found);
assertEquals("line 1:0 no viable alternative at input 'y'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testOrder() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(295);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : a {} a; // do 2x: once in ATN, next in DFA;\n");
grammarBuilder.append("// action blocks lookahead from falling off of 'a'\n");
grammarBuilder.append("// and looking into 2nd 'a' ref. !ctx dependent pred\n");
grammarBuilder.append("a : ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {true}? ID {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x y";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"alt 1\n" +
"alt 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredFromAltTestedInLoopBack_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(213);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("file_\n");
grammarBuilder.append("@after {fmt.Println($ctx.ToStringTree(nil, p))}\n");
grammarBuilder.append(" : para para EOF ;\n");
grammarBuilder.append("para: paraContent NL NL ;\n");
grammarBuilder.append("paraContent : ('s'|'x'|{p.GetTokenStream().LA(2) != TParserNL}? NL)+ ;\n");
grammarBuilder.append("NL : '\\n' ;\n");
grammarBuilder.append("s : 's' ;\n");
grammarBuilder.append("X : 'x' ;");
String grammar = grammarBuilder.toString();
String input =
"s\n" +
"\n" +
"\n" +
"x\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "file_", input, true);
assertEquals("(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) <EOF>)\n", found);
assertEquals(
"line 5:0 mismatched input '<EOF>' expecting '\n" +
"'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredFromAltTestedInLoopBack_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(213);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("file_\n");
grammarBuilder.append("@after {fmt.Println($ctx.ToStringTree(nil, p))}\n");
grammarBuilder.append(" : para para EOF ;\n");
grammarBuilder.append("para: paraContent NL NL ;\n");
grammarBuilder.append("paraContent : ('s'|'x'|{p.GetTokenStream().LA(2) != TParserNL}? NL)+ ;\n");
grammarBuilder.append("NL : '\\n' ;\n");
grammarBuilder.append("s : 's' ;\n");
grammarBuilder.append("X : 'x' ;");
String grammar = grammarBuilder.toString();
String input =
"s\n" +
"\n" +
"\n" +
"x\n" +
"\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "file_", input, true);
assertEquals("(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x) \\n \\n) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredTestedEvenWhenUnAmbig_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(233);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {var enumKeyword bool = true; var _ bool = enumKeyword; }\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {fmt.Println(\"ID \"+$ID.text)}\n");
grammarBuilder.append(" | {!enumKeyword}? 'enum' {fmt.Println(\"enum\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n\\r]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "primary", input, false);
assertEquals("ID abc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredTestedEvenWhenUnAmbig_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(233);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {var enumKeyword bool = true; var _ bool = enumKeyword; }\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {fmt.Println(\"ID \"+$ID.text)}\n");
grammarBuilder.append(" | {!enumKeyword}? 'enum' {fmt.Println(\"enum\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n\\r]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="enum";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "primary", input, false);
assertEquals("", found);
assertEquals("line 1:0 no viable alternative at input 'enum'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredicateDependentOnArg() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(230);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {var i int = 0; var _ int = i; }\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i == 1}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {$i == 2}? ID {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a b";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"alt 2\n" +
"alt 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredicateDependentOnArg2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(186);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {var i int = 0; var _ int = i; }\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i == 1}? ID \n");
grammarBuilder.append(" | {$i == 2}? ID \n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a b";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPredsInGlobalFOLLOW() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(280);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("func pred(v bool) bool {\n");
grammarBuilder.append(" fmt.Println(\"eval=\" + fmt.Sprint(v))\n");
grammarBuilder.append("\n");
grammarBuilder.append(" return v\n");
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : e {pred(true)}? {fmt.Println(\"parse\")} '!' ;\n");
grammarBuilder.append("t : e {pred(false)}? ID ;\n");
grammarBuilder.append("e : ID | ; // non-LL(1) so we use ATN\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="a!";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"eval=true\n" +
"parse\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testRewindBeforePredEval() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(241);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : a a;\n");
grammarBuilder.append("a : {p.GetTokenStream().LT(1).GetText() == \"x\"}? ID INT {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {p.GetTokenStream().LT(1).GetText() == \"y\"}? ID INT {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="y 3 x 4";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"alt 2\n" +
"alt 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSimple() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(253);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : a a a; // do 3x: once in ATN, next in DFA then INT in ATN\n");
grammarBuilder.append("a : {false}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {true}? ID {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" | INT {fmt.Println(\"alt 3\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x y 3";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"alt 2\n" +
"alt 2\n" +
"alt 3\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSimpleValidate() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(162);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : a ;\n");
grammarBuilder.append("a : {false}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {true}? INT {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("", found);
assertEquals("line 1:0 no viable alternative at input 'x'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSimpleValidate2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(165);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : a a a;\n");
grammarBuilder.append("a : {false}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {true}? INT {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="3 4 x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"alt 2\n" +
"alt 2\n", found);
assertEquals("line 1:4 no viable alternative at input 'x'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testToLeft() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(162);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append(" s : a+ ;\n");
grammarBuilder.append("a : {false}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {true}? ID {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x x y";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"alt 2\n" +
"alt 2\n" +
"alt 2\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testToLeftWithVaryingPredicate() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(252);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::members {var i int = 0; var _ int = i; }\n");
grammarBuilder.append("s : ({i += 1;\n");
grammarBuilder.append("fmt.Print(\"i=\"+i);} a)+ ;\n");
grammarBuilder.append("a : {i % 2 == 0}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {i % 2 != 0}? ID {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x x y";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"i=1\n" +
"alt 2\n" +
"i=2\n" +
"alt 1\n" +
"i=3\n" +
"alt 2\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testUnpredicatedPathsInAlt() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(181);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : a {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | b {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("a : {false}? ID INT\n");
grammarBuilder.append(" | ID INT\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : ID ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x 4";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("alt 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testValidateInDFA() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(330);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s : a ';' a;\n");
grammarBuilder.append("// ';' helps us to resynchronize without consuming\n");
grammarBuilder.append("// 2nd 'a' reference. We our testing that the DFA also\n");
grammarBuilder.append("// throws an exception if the validating predicate fails\n");
grammarBuilder.append("a : {false}? ID {fmt.Println(\"alt 1\")}\n");
grammarBuilder.append(" | {true}? INT {fmt.Println(\"alt 2\")}\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("ID : 'a'..'z'+ ;\n");
grammarBuilder.append("INT : '0'..'9'+;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="x ; y";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals("", found);
assertEquals(
"line 1:0 no viable alternative at input 'x'\n" +
"line 1:4 no viable alternative at input 'y'\n", this.stderrDuringParse);
}
}

View File

@ -0,0 +1,384 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestSets extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testCharSetLiteral() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(84);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : (A {fmt.Println($A.text)})+ ;\n");
grammarBuilder.append("A : [AaBb] ;\n");
grammarBuilder.append("WS : (' '|'\\n')+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="A a B b";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals(
"A\n" +
"a\n" +
"B\n" +
"b\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testComplementSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(51);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("parse : ~NEW_LINE;\n");
grammarBuilder.append("NEW_LINE: '\\r'? '\\n';");
String grammar = grammarBuilder.toString();
String input ="a";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "parse", input, false);
assertEquals("", found);
assertEquals(
"line 1:0 token recognition error at: 'a'\n" +
"line 1:1 missing {} at '<EOF>'\n", this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLexerOptionalSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(86);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("A : ('a'|'b')? 'c' ;");
String grammar = grammarBuilder.toString();
String input ="ac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("ac\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLexerPlusSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(86);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("A : ('a'|'b')+ 'c' ;");
String grammar = grammarBuilder.toString();
String input ="abaac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abaac\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testLexerStarSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(86);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("A : ('a'|'b')* 'c' ;");
String grammar = grammarBuilder.toString();
String input ="abaac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abaac\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testNotChar() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(52);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println($A.text)} ;\n");
grammarBuilder.append("A : ~'b' ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testNotCharSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(58);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println($A.text)} ;\n");
grammarBuilder.append("A : ~('b'|'c') ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testNotCharSetWithLabel() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(60);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println($A.text)} ;\n");
grammarBuilder.append("A : h=~('b'|'c') ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testNotCharSetWithRuleRef3() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(124);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println($A.text)} ;\n");
grammarBuilder.append("A : ('a'|B) ; // this doesn't collapse to set but works\n");
grammarBuilder.append("fragment\n");
grammarBuilder.append("B : ~('a'|'c') ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testOptionalLexerSingleElement() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(80);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("A : 'b'? 'c' ;");
String grammar = grammarBuilder.toString();
String input ="bc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("bc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testOptionalSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(78);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ('a'|'b')? 'c' {fmt.Println(p.GetTokenStream().GetAllText())} ;");
String grammar = grammarBuilder.toString();
String input ="ac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("ac\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testOptionalSingleElement() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(80);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A? 'c' {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("A : 'b' ;");
String grammar = grammarBuilder.toString();
String input ="bc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("bc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testParserNotSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(56);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : t=~('x'|'y') 'z' {fmt.Println($t.text)} ;");
String grammar = grammarBuilder.toString();
String input ="zz";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("z\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testParserNotToken() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(72);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ~'x' 'z' {fmt.Println(p.GetTokenStream().GetAllText())} ;");
String grammar = grammarBuilder.toString();
String input ="zz";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("zz\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testParserNotTokenWithLabel() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(50);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : t=~'x' 'z' {fmt.Println($t.text)} ;");
String grammar = grammarBuilder.toString();
String input ="zz";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("z\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testParserSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(51);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : t=('x'|'y') {fmt.Println($t.text)} ;");
String grammar = grammarBuilder.toString();
String input ="x";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("x\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPlusLexerSingleElement() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(80);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("A : 'b'+ 'c' ;");
String grammar = grammarBuilder.toString();
String input ="bbbbc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("bbbbc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPlusSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(78);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ('a'|'b')+ 'c' {fmt.Println(p.GetTokenStream().GetAllText())} ;");
String grammar = grammarBuilder.toString();
String input ="abaac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abaac\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testRuleAsSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(85);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a @after {fmt.Println(p.GetTokenStream().GetAllText())} : 'a' | 'b' |'c' ;");
String grammar = grammarBuilder.toString();
String input ="b";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("b\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testSeqDoesNotBecomeSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(122);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : C {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("fragment A : '1' | '2';\n");
grammarBuilder.append("fragment B : '3' '4';\n");
grammarBuilder.append("C : A | B;");
String grammar = grammarBuilder.toString();
String input ="34";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("34\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testStarLexerSingleElement_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(80);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("A : 'b'* 'c' ;");
String grammar = grammarBuilder.toString();
String input ="bbbbc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("bbbbc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testStarLexerSingleElement_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(80);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : A {fmt.Println(p.GetTokenStream().GetAllText())} ;\n");
grammarBuilder.append("A : 'b'* 'c' ;");
String grammar = grammarBuilder.toString();
String input ="c";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("c\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testStarSet() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(78);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("a : ('a'|'b')* 'c' {fmt.Println(p.GetTokenStream().GetAllText())} ;");
String grammar = grammarBuilder.toString();
String input ="abaac";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "a", input, false);
assertEquals("abaac\n", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -0,0 +1,268 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.go;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestVisitors extends BaseTest {
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
@Ignore("true")
public void testBasic() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(218);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::header {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : INT INT\n");
grammarBuilder.append(" | ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1 2";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a 1 2)\n" +
"[ '1', '2' ]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
@Ignore("true")
public void testLR() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(233);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::header {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=e ;\n");
grammarBuilder.append("e : e op='*' e\n");
grammarBuilder.append(" | e op='+' e\n");
grammarBuilder.append(" | INT\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1+2*3";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(e (e 1) + (e (e 2) * (e 3)))\n" +
"1,,2,,32 3 21 2 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
@Ignore("true")
public void testLRWithLabels() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(273);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::header {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=e ;\n");
grammarBuilder.append("e : e '(' eList ')' # Call\n");
grammarBuilder.append(" | INT # Int\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("eList : e (',' e)* ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1(2,3)";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(e (e 1) ( (eList (e 2) , (e 3)) ))\n" +
"1,,2,,3,1 [13 6]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
@Ignore("true")
public void testRuleGetters_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(260);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::header {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : b b // forces list\n");
grammarBuilder.append(" | b // a list still\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : ID | INT;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1 2";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a (b 1) (b 2))\n" +
",1 2 1\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
@Ignore("true")
public void testRuleGetters_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(260);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::header {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : b b // forces list\n");
grammarBuilder.append(" | b // a list still\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : ID | INT;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a (b abc))\n" +
"abc\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
@Ignore("true")
public void testTokenGetters_1() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(218);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::header {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : INT INT\n");
grammarBuilder.append(" | ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="1 2";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a 1 2)\n" +
",1 2 [1, 2]\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
@Ignore("true")
public void testTokenGetters_2() throws Exception {
mkdir(parserpkgdir);
StringBuilder grammarBuilder = new StringBuilder(218);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@parser::header {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("fmt.Println($ctx.r.ToStringTree(nil, p))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("a : INT INT\n");
grammarBuilder.append(" | ID\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("MULT: '*' ;\n");
grammarBuilder.append("ADD : '+' ;\n");
grammarBuilder.append("INT : [0-9]+ ;\n");
grammarBuilder.append("ID : [a-z]+ ;\n");
grammarBuilder.append("WS : [ \\t\\n]+ -> skip ;");
String grammar = grammarBuilder.toString();
String input ="abc";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor", "s", input, false);
assertEquals(
"(a abc)\n" +
"[@0,0:2='abc',<4>,1:0]\n", found);
assertNull(this.stderrDuringParse);
}
}

View File

@ -61,8 +61,8 @@ public class TestParserErrors extends BaseTest {
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : (a | b)+;\n");
grammarBuilder.append("a : 'a' {System.out.print('a');};\n");
grammarBuilder.append("b : 'b' {System.out.print('b');};");
grammarBuilder.append("a : 'a' {System.out.print(\"a\");};\n");
grammarBuilder.append("b : 'b' {System.out.print(\"b\");};");
String grammar = grammarBuilder.toString();

View File

@ -79,9 +79,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testActionHidesPreds() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(239);
StringBuilder grammarBuilder = new StringBuilder(247);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a+ ;\n");
grammarBuilder.append("a : {this.i = 1;} ID {this.i == 1}? {System.out.println(\"alt 1\");}\n");
grammarBuilder.append(" | {this.i = 2;} ID {this.i == 2}? {System.out.println(\"alt 2\");}\n");
@ -107,9 +107,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testActionsHidePredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(291);
StringBuilder grammarBuilder = new StringBuilder(299);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("boolean pred(boolean v) {\n");
grammarBuilder.append(" System.out.println(\"eval=\"+v);\n");
grammarBuilder.append(" return v;\n");
@ -159,9 +159,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testDepedentPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(314);
StringBuilder grammarBuilder = new StringBuilder(322);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("boolean pred(boolean v) {\n");
grammarBuilder.append(" System.out.println(\"eval=\"+v);\n");
grammarBuilder.append(" return v;\n");
@ -379,9 +379,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testPredTestedEvenWhenUnAmbig_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(218);
StringBuilder grammarBuilder = new StringBuilder(226);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {boolean enumKeyword = true;}\n");
grammarBuilder.append("@parser::members {boolean enumKeyword = true;}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {System.out.println(\"ID \"+$ID.text);}\n");
grammarBuilder.append(" | {!this.enumKeyword}? 'enum' {System.out.println(\"enum\");}\n");
@ -403,9 +403,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testPredTestedEvenWhenUnAmbig_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(218);
StringBuilder grammarBuilder = new StringBuilder(226);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {boolean enumKeyword = true;}\n");
grammarBuilder.append("@parser::members {boolean enumKeyword = true;}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {System.out.println(\"ID \"+$ID.text);}\n");
grammarBuilder.append(" | {!this.enumKeyword}? 'enum' {System.out.println(\"enum\");}\n");
@ -428,9 +428,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testPredicateDependentOnArg() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(214);
StringBuilder grammarBuilder = new StringBuilder(222);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i==1}? ID {System.out.println(\"alt 1\");}\n");
@ -456,9 +456,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testPredicateDependentOnArg2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(154);
StringBuilder grammarBuilder = new StringBuilder(162);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i==1}? ID \n");
@ -482,9 +482,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(285);
StringBuilder grammarBuilder = new StringBuilder(293);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("boolean pred(boolean v) {\n");
grammarBuilder.append(" System.out.println(\"eval=\"+v);\n");
grammarBuilder.append(" return v;\n");
@ -646,9 +646,9 @@ public class TestSemPredEvalParser extends BaseTest {
public void testToLeftWithVaryingPredicate() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(264);
StringBuilder grammarBuilder = new StringBuilder(272);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {int i = 0;}\n");
grammarBuilder.append("@parser::members {int i = 0;}\n");
grammarBuilder.append("s : ({this.i += 1;\n");
grammarBuilder.append("System.out.println(\"i=\"+i);} a)+ ;\n");
grammarBuilder.append("a : {this.i % 2 == 0}? ID {System.out.println(\"alt 1\");}\n");

View File

@ -57,8 +57,8 @@ public class TestParserErrors extends BaseTest {
grammarBuilder.append(" };\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : (a | b)+;\n");
grammarBuilder.append("a : 'a' {process.stdout.write('a');};\n");
grammarBuilder.append("b : 'b' {process.stdout.write('b');};");
grammarBuilder.append("a : 'a' {process.stdout.write(\"a\");};\n");
grammarBuilder.append("b : 'b' {process.stdout.write(\"b\");};");
String grammar = grammarBuilder.toString();
String input ="abab";
String found = execParser("T.g4", grammar, "TParser", "TLexer",

View File

@ -75,9 +75,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testActionHidesPreds() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(228);
StringBuilder grammarBuilder = new StringBuilder(236);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {this.i = 0;}\n");
grammarBuilder.append("@parser::members {this.i = 0;}\n");
grammarBuilder.append("s : a+ ;\n");
grammarBuilder.append("a : {this.i = 1;} ID {this.i === 1}? {console.log(\"alt 1\");}\n");
grammarBuilder.append(" | {this.i = 2;} ID {this.i === 2}? {console.log(\"alt 2\");}\n");
@ -101,9 +101,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testActionsHidePredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(291);
StringBuilder grammarBuilder = new StringBuilder(299);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("this.pred = function(v) {\n");
grammarBuilder.append(" console.log(\"eval=\" + v.toString());\n");
grammarBuilder.append(" return v;\n");
@ -149,9 +149,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testDepedentPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(316);
StringBuilder grammarBuilder = new StringBuilder(324);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("this.pred = function(v) {\n");
grammarBuilder.append(" console.log(\"eval=\" + v.toString());\n");
grammarBuilder.append(" return v;\n");
@ -353,9 +353,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredTestedEvenWhenUnAmbig_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(201);
StringBuilder grammarBuilder = new StringBuilder(209);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {this.enumKeyword = true;}\n");
grammarBuilder.append("@parser::members {this.enumKeyword = true;}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {console.log(\"ID \"+$ID.text);}\n");
grammarBuilder.append(" | {!this.enumKeyword}? 'enum' {console.log(\"enum\");}\n");
@ -375,9 +375,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredTestedEvenWhenUnAmbig_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(201);
StringBuilder grammarBuilder = new StringBuilder(209);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {this.enumKeyword = true;}\n");
grammarBuilder.append("@parser::members {this.enumKeyword = true;}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {console.log(\"ID \"+$ID.text);}\n");
grammarBuilder.append(" | {!this.enumKeyword}? 'enum' {console.log(\"enum\");}\n");
@ -398,9 +398,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredicateDependentOnArg() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(203);
StringBuilder grammarBuilder = new StringBuilder(211);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {this.i = 0;}\n");
grammarBuilder.append("@parser::members {this.i = 0;}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i===1}? ID {console.log(\"alt 1\");}\n");
@ -424,9 +424,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredicateDependentOnArg2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(157);
StringBuilder grammarBuilder = new StringBuilder(165);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {this.i = 0;}\n");
grammarBuilder.append("@parser::members {this.i = 0;}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i===1}? ID \n");
@ -448,9 +448,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(285);
StringBuilder grammarBuilder = new StringBuilder(293);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("this.pred = function(v) {\n");
grammarBuilder.append(" console.log(\"eval=\" + v.toString());\n");
grammarBuilder.append(" return v;\n");
@ -600,9 +600,9 @@ public class TestSemPredEvalParser extends BaseTest {
@Test
public void testToLeftWithVaryingPredicate() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(245);
StringBuilder grammarBuilder = new StringBuilder(253);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {this.i = 0;}\n");
grammarBuilder.append("@parser::members {this.i = 0;}\n");
grammarBuilder.append("s : ({this.i += 1;\n");
grammarBuilder.append("console.log(\"i=\"+i);} a)+ ;\n");
grammarBuilder.append("a : {this.i % 2 === 0}? ID {console.log(\"alt 1\");}\n");

View File

@ -62,8 +62,8 @@ public class TestParserErrors extends BasePython2Test {
grammarBuilder.append(" b = s.b()\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : (a | b)+;\n");
grammarBuilder.append("a : 'a' {print('a',end='')};\n");
grammarBuilder.append("b : 'b' {print('b',end='')};");
grammarBuilder.append("a : 'a' {print(\"a\",end='')};\n");
grammarBuilder.append("b : 'b' {print(\"b\",end='')};");
String grammar = grammarBuilder.toString();

View File

@ -81,9 +81,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testActionHidesPreds() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(204);
StringBuilder grammarBuilder = new StringBuilder(212);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {i = 0}\n");
grammarBuilder.append("@parser::members {i = 0}\n");
grammarBuilder.append("s : a+ ;\n");
grammarBuilder.append("a : {self.i = 1} ID {self.i == 1}? {print(\"alt 1\")}\n");
grammarBuilder.append(" | {self.i = 2} ID {self.i == 2}? {print(\"alt 2\")}\n");
@ -110,9 +110,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testActionsHidePredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(269);
StringBuilder grammarBuilder = new StringBuilder(277);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("def pred(self, v):\n");
grammarBuilder.append(" print('eval=' + str(v).lower())\n");
grammarBuilder.append(" return v\n");
@ -164,9 +164,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testDepedentPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(292);
StringBuilder grammarBuilder = new StringBuilder(300);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("def pred(self, v):\n");
grammarBuilder.append(" print('eval=' + str(v).lower())\n");
grammarBuilder.append(" return v\n");
@ -392,9 +392,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testPredTestedEvenWhenUnAmbig_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(184);
StringBuilder grammarBuilder = new StringBuilder(192);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {enumKeyword = True}\n");
grammarBuilder.append("@parser::members {enumKeyword = True}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {print(\"ID \"+$ID.text)}\n");
grammarBuilder.append(" | {not self.enumKeyword}? 'enum' {print(\"enum\")}\n");
@ -417,9 +417,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testPredTestedEvenWhenUnAmbig_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(184);
StringBuilder grammarBuilder = new StringBuilder(192);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {enumKeyword = True}\n");
grammarBuilder.append("@parser::members {enumKeyword = True}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {print(\"ID \"+$ID.text)}\n");
grammarBuilder.append(" | {not self.enumKeyword}? 'enum' {print(\"enum\")}\n");
@ -443,9 +443,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testPredicateDependentOnArg() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(181);
StringBuilder grammarBuilder = new StringBuilder(189);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {i = 0}\n");
grammarBuilder.append("@parser::members {i = 0}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i==1}? ID {print(\"alt 1\")}\n");
@ -472,9 +472,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testPredicateDependentOnArg2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(149);
StringBuilder grammarBuilder = new StringBuilder(157);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {i = 0}\n");
grammarBuilder.append("@parser::members {i = 0}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i==1}? ID \n");
@ -499,9 +499,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(263);
StringBuilder grammarBuilder = new StringBuilder(271);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("def pred(self, v):\n");
grammarBuilder.append(" print('eval=' + str(v).lower())\n");
grammarBuilder.append(" return v\n");
@ -669,9 +669,9 @@ public class TestSemPredEvalParser extends BasePython2Test {
public void testToLeftWithVaryingPredicate() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(226);
StringBuilder grammarBuilder = new StringBuilder(234);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {i = 0}\n");
grammarBuilder.append("@parser::members {i = 0}\n");
grammarBuilder.append("s : ({self.i += 1\n");
grammarBuilder.append("print(str(\"i=\")+str(i))} a)+ ;\n");
grammarBuilder.append("a : {self.i % 2 == 0}? ID {print(\"alt 1\")}\n");

View File

@ -62,8 +62,8 @@ public class TestParserErrors extends BasePython3Test {
grammarBuilder.append(" b = s.b()\n");
grammarBuilder.append("}\n");
grammarBuilder.append("s : (a | b)+;\n");
grammarBuilder.append("a : 'a' {print('a',end='')};\n");
grammarBuilder.append("b : 'b' {print('b',end='')};");
grammarBuilder.append("a : 'a' {print(\"a\",end='')};\n");
grammarBuilder.append("b : 'b' {print(\"b\",end='')};");
String grammar = grammarBuilder.toString();

View File

@ -81,9 +81,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testActionHidesPreds() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(204);
StringBuilder grammarBuilder = new StringBuilder(212);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {i = 0}\n");
grammarBuilder.append("@parser::members {i = 0}\n");
grammarBuilder.append("s : a+ ;\n");
grammarBuilder.append("a : {self.i = 1} ID {self.i == 1}? {print(\"alt 1\")}\n");
grammarBuilder.append(" | {self.i = 2} ID {self.i == 2}? {print(\"alt 2\")}\n");
@ -110,9 +110,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testActionsHidePredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(269);
StringBuilder grammarBuilder = new StringBuilder(277);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("def pred(self, v):\n");
grammarBuilder.append(" print('eval=' + str(v).lower())\n");
grammarBuilder.append(" return v\n");
@ -164,9 +164,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testDepedentPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(292);
StringBuilder grammarBuilder = new StringBuilder(300);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("def pred(self, v):\n");
grammarBuilder.append(" print('eval=' + str(v).lower())\n");
grammarBuilder.append(" return v\n");
@ -392,9 +392,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testPredTestedEvenWhenUnAmbig_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(184);
StringBuilder grammarBuilder = new StringBuilder(192);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {enumKeyword = True}\n");
grammarBuilder.append("@parser::members {enumKeyword = True}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {print(\"ID \"+$ID.text)}\n");
grammarBuilder.append(" | {not self.enumKeyword}? 'enum' {print(\"enum\")}\n");
@ -417,9 +417,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testPredTestedEvenWhenUnAmbig_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(184);
StringBuilder grammarBuilder = new StringBuilder(192);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {enumKeyword = True}\n");
grammarBuilder.append("@parser::members {enumKeyword = True}\n");
grammarBuilder.append("primary\n");
grammarBuilder.append(" : ID {print(\"ID \"+$ID.text)}\n");
grammarBuilder.append(" | {not self.enumKeyword}? 'enum' {print(\"enum\")}\n");
@ -443,9 +443,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testPredicateDependentOnArg() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(181);
StringBuilder grammarBuilder = new StringBuilder(189);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {i = 0}\n");
grammarBuilder.append("@parser::members {i = 0}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i==1}? ID {print(\"alt 1\")}\n");
@ -472,9 +472,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testPredicateDependentOnArg2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(149);
StringBuilder grammarBuilder = new StringBuilder(157);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {i = 0}\n");
grammarBuilder.append("@parser::members {i = 0}\n");
grammarBuilder.append("s : a[2] a[1];\n");
grammarBuilder.append("a[int i]\n");
grammarBuilder.append(" : {$i==1}? ID \n");
@ -499,9 +499,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testPredsInGlobalFOLLOW() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(263);
StringBuilder grammarBuilder = new StringBuilder(271);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("def pred(self, v):\n");
grammarBuilder.append(" print('eval=' + str(v).lower())\n");
grammarBuilder.append(" return v\n");
@ -669,9 +669,9 @@ public class TestSemPredEvalParser extends BasePython3Test {
public void testToLeftWithVaryingPredicate() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(226);
StringBuilder grammarBuilder = new StringBuilder(234);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("@members {i = 0}\n");
grammarBuilder.append("@parser::members {i = 0}\n");
grammarBuilder.append("s : ({self.i += 1\n");
grammarBuilder.append("print(str(\"i=\")+str(i))} a)+ ;\n");
grammarBuilder.append("a : {self.i % 2 == 0}? ID {print(\"alt 1\")}\n");

148
runtime/Go/antlr/atn.go Normal file
View File

@ -0,0 +1,148 @@
package antlr
var ATNInvalidAltNumber int
type ATN struct {
// DecisionToState is the decision points for all rules, subrules, optional
// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
grammarType int
// lexerActions is referenced by action transitions in the ATN for lexer ATNs.
lexerActions []LexerAction
// maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
maxTokenType int
modeNameToStartState map[string]*TokensStartState
modeToStartState []*TokensStartState
// ruleToStartState maps from rule index to starting state number.
ruleToStartState []*RuleStartState
// ruleToStopState maps from rule index to stop state number.
ruleToStopState []*RuleStopState
// ruleToTokenType maps the rule index to the resulting token type for lexer
// ATNs. For parser ATNs, it maps the rule index to the generated bypass token
// type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
// specified, and otherwise is nil.
ruleToTokenType []int
states []ATNState
}
func NewATN(grammarType int, maxTokenType int) *ATN {
return &ATN{
grammarType: grammarType,
maxTokenType: maxTokenType,
modeNameToStartState: make(map[string]*TokensStartState),
}
}
// NextTokensInContext computes the set of valid tokens that can occur starting
// in state s. If ctx is nil, the set of tokens will not include what can follow
// the rule surrounding s. In other words, the set will be restricted to tokens
// reachable staying within the rule of s.
func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
return NewLL1Analyzer(a).Look(s, nil, ctx)
}
// NextTokensNoContext computes the set of valid tokens that can occur starting
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
// rule.
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
if s.GetNextTokenWithinRule() != nil {
return s.GetNextTokenWithinRule()
}
s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil))
s.GetNextTokenWithinRule().readOnly = true
return s.GetNextTokenWithinRule()
}
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
if ctx == nil {
return a.NextTokensNoContext(s)
}
return a.NextTokensInContext(s, ctx)
}
func (a *ATN) addState(state ATNState) {
if state != nil {
state.SetATN(a)
state.SetStateNumber(len(a.states))
}
a.states = append(a.states, state)
}
func (a *ATN) removeState(state ATNState) {
a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
}
func (a *ATN) defineDecisionState(s DecisionState) int {
a.DecisionToState = append(a.DecisionToState, s)
s.setDecision(len(a.DecisionToState) - 1)
return s.getDecision()
}
func (a *ATN) getDecisionState(decision int) DecisionState {
if len(a.DecisionToState) == 0 {
return nil
}
return a.DecisionToState[decision]
}
// getExpectedTokens computes the set of input symbols which could follow ATN
// state number stateNumber in the specified full parse context ctx and returns
// the set of potentially valid input symbols which could follow the specified
// state in the specified context. This method considers the complete parser
// context, but does not evaluate semantic predicates (i.e. all predicates
// encountered during the calculation are assumed true). If a path in the ATN
// exists from the starting state to the RuleStopState of the outermost context
// without Matching any symbols, Token.EOF is added to the returned set.
//
// A nil ctx defaults to ParserRuleContext.EMPTY.
//
// It panics if the ATN does not contain state stateNumber.
func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
if stateNumber < 0 || stateNumber >= len(a.states) {
panic("Invalid state number.")
}
s := a.states[stateNumber]
following := a.NextTokens(s, nil)
if !following.contains(TokenEpsilon) {
return following
}
expected := NewIntervalSet()
expected.addSet(following)
expected.removeOne(TokenEpsilon)
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
invokingState := a.states[ctx.GetInvokingState()]
rt := invokingState.GetTransitions()[0]
following = a.NextTokens(rt.(*RuleTransition).followState, nil)
expected.addSet(following)
expected.removeOne(TokenEpsilon)
ctx = ctx.GetParent().(RuleContext)
}
if following.contains(TokenEpsilon) {
expected.addOne(TokenEOF)
}
return expected
}

View File

@ -0,0 +1,290 @@
package antlr
import (
"fmt"
"strconv"
)
type Comparable interface {
equals(other interface{}) bool
}
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
// context). The syntactic context is a graph-structured stack node whose
// path(s) to the root is the rule invocation(s) chain used to arrive at the
// state. The semantic context is the tree of semantic predicates encountered
// before reaching an ATN state.
type ATNConfig interface {
Hasher
Comparable
GetState() ATNState
GetAlt() int
GetSemanticContext() SemanticContext
GetContext() PredictionContext
SetContext(PredictionContext)
GetReachesIntoOuterContext() int
SetReachesIntoOuterContext(int)
String() string
getPrecedenceFilterSuppressed() bool
setPrecedenceFilterSuppressed(bool)
shortHash() string
}
type BaseATNConfig struct {
precedenceFilterSuppressed bool
state ATNState
alt int
context PredictionContext
semanticContext SemanticContext
reachesIntoOuterContext int
}
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
return &BaseATNConfig{
state: old.state,
alt: old.alt,
context: old.context,
semanticContext: old.semanticContext,
reachesIntoOuterContext: old.reachesIntoOuterContext,
}
}
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
}
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
if semanticContext == nil {
panic("semanticContext cannot be nil") // TODO: Necessary?
}
return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
}
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
}
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
}
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
}
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
}
func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
if semanticContext == nil {
panic("semanticContext cannot be nil")
}
return &BaseATNConfig{
state: state,
alt: c.GetAlt(),
context: context,
semanticContext: semanticContext,
reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
}
}
func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
return b.precedenceFilterSuppressed
}
func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
b.precedenceFilterSuppressed = v
}
func (b *BaseATNConfig) GetState() ATNState {
return b.state
}
func (b *BaseATNConfig) GetAlt() int {
return b.alt
}
func (b *BaseATNConfig) SetContext(v PredictionContext) {
b.context = v
}
func (b *BaseATNConfig) GetContext() PredictionContext {
return b.context
}
func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
return b.semanticContext
}
func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
return b.reachesIntoOuterContext
}
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
b.reachesIntoOuterContext = v
}
// An ATN configuration is equal to another if both have the same state, they
// predict the same alternative, and syntactic/semantic contexts are the same.
func (b *BaseATNConfig) equals(o interface{}) bool {
if b == o {
return true
}
var other, ok = o.(*BaseATNConfig)
if !ok {
return false
}
var equal bool
if b.context == nil {
equal = other.context == nil
} else {
equal = b.context.equals(other.context)
}
var (
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
alts = b.alt == other.alt
cons = b.semanticContext.equals(other.semanticContext)
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
)
return nums && alts && cons && sups && equal
}
func (b *BaseATNConfig) shortHash() string {
return strconv.Itoa(b.state.GetStateNumber()) + "/" + strconv.Itoa(b.alt) + "/" + b.semanticContext.String()
}
func (b *BaseATNConfig) Hash() string {
var c string
if b.context == nil {
c = ""
} else {
c = b.context.Hash()
}
return strconv.Itoa(b.state.GetStateNumber()) + "/" + strconv.Itoa(b.alt) + "/" + c + "/" + b.semanticContext.String()
}
func (b *BaseATNConfig) String() string {
var s1, s2, s3 string
if b.context != nil {
s1 = ",[" + fmt.Sprint(b.context) + "]"
}
if b.semanticContext != SemanticContextNone {
s2 = "," + fmt.Sprint(b.semanticContext)
}
if b.reachesIntoOuterContext > 0 {
s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
}
return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
}
type LexerATNConfig struct {
*BaseATNConfig
lexerActionExecutor *LexerActionExecutor
passedThroughNonGreedyDecision bool
}
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
}
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
return &LexerATNConfig{
BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
lexerActionExecutor: lexerActionExecutor,
}
}
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
return &LexerATNConfig{
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
lexerActionExecutor: c.lexerActionExecutor,
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
}
}
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
return &LexerATNConfig{
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
lexerActionExecutor: lexerActionExecutor,
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
}
}
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
return &LexerATNConfig{
BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
lexerActionExecutor: c.lexerActionExecutor,
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
}
}
func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
}
func (l *LexerATNConfig) Hash() string {
var f string
if l.passedThroughNonGreedyDecision {
f = "1"
} else {
f = "0"
}
return fmt.Sprintf("%v%v%v%v%v%v", l.state.GetStateNumber(), l.alt, l.context, l.semanticContext, f, l.lexerActionExecutor)
}
func (l *LexerATNConfig) equals(other interface{}) bool {
var othert, ok = other.(*LexerATNConfig)
if l == other {
return true
} else if !ok {
return false
} else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
return false
}
var b bool
if l.lexerActionExecutor != nil {
b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
} else {
b = othert.lexerActionExecutor != nil
}
if b {
return false
}
return l.BaseATNConfig.equals(othert.BaseATNConfig)
}
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
var ds, ok = target.(DecisionState)
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
}

View File

@ -0,0 +1,388 @@
package antlr
import "fmt"
type ATNConfigSet interface {
Hasher
Add(ATNConfig, *DoubleDict) bool
AddAll([]ATNConfig) bool
GetStates() *Set
GetPredicates() []SemanticContext
GetItems() []ATNConfig
OptimizeConfigs(interpreter *BaseATNSimulator)
Equals(other interface{}) bool
Length() int
IsEmpty() bool
Contains(ATNConfig) bool
ContainsFast(ATNConfig) bool
Clear()
String() string
HasSemanticContext() bool
SetHasSemanticContext(v bool)
ReadOnly() bool
SetReadOnly(bool)
GetConflictingAlts() *BitSet
SetConflictingAlts(*BitSet)
FullContext() bool
GetUniqueAlt() int
SetUniqueAlt(int)
GetDipsIntoOuterContext() bool
SetDipsIntoOuterContext(bool)
}
// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
// about its elements and can combine similar configurations using a
// graph-structured stack.
type BaseATNConfigSet struct {
cachedHashString string
// configLookup is used to determine whether two BaseATNConfigSets are equal. We
// need all configurations with the same (s, i, _, semctx) to be equal. A key
// effectively doubles the number of objects associated with ATNConfigs. All
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
// read-only because a set becomes a DFA state.
configLookup *Set
// configs is the added elements.
configs []ATNConfig
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
// info together because it saves recomputation. Can we track conflicts as they
// are added to save scanning configs later?
conflictingAlts *BitSet
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
// we hit a pred while computing a closure operation. Do not make a DFA state
// from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
dipsIntoOuterContext bool
// fullCtx is whether it is part of a full context LL prediction. Used to
// determine how to merge $. It is a wildcard with SLL, but not for an LL
// context merge.
fullCtx bool
// Used in parser and lexer. In lexer, it indicates we hit a pred
// while computing a closure operation. Don't make a DFA state from a.
hasSemanticContext bool
// readOnly is whether it is read-only. Do not
// allow any code to manipulate the set if true because DFA states will point at
// sets and those must not change. It not protect other fields; conflictingAlts
// in particular, which is assigned after readOnly.
readOnly bool
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
// info together because it saves recomputation. Can we track conflicts as they
// are added to save scanning configs later?
uniqueAlt int
}
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
return &BaseATNConfigSet{
cachedHashString: "-1",
configLookup: NewSet(hashATNConfig, equalATNConfigs),
fullCtx: fullCtx,
}
}
// Add merges contexts with existing configs for (s, i, pi, _), where s is the
// ATNConfig.state, i is the ATNConfig.alt, and pi is the
// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
// dipsIntoOuterContext and hasSemanticContext when necessary.
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
if b.readOnly {
panic("set is read-only")
}
if config.GetSemanticContext() != SemanticContextNone {
b.hasSemanticContext = true
}
if config.GetReachesIntoOuterContext() > 0 {
b.dipsIntoOuterContext = true
}
existing := b.configLookup.add(config).(ATNConfig)
if existing == config {
b.cachedHashString = "-1"
b.configs = append(b.configs, config) // Track order here
return true
}
// Merge a previous (s, i, pi, _) with it and save the result
rootIsWildcard := !b.fullCtx
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
// No need to check for existing.context because config.context is in the cache,
// since the only way to create new graphs is the "call rule" and here. We cache
// at both places.
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
// Preserve the precedence filter suppression during the merge
if config.getPrecedenceFilterSuppressed() {
existing.setPrecedenceFilterSuppressed(true)
}
// Replace the context because there is no need to do alt mapping
existing.SetContext(merged)
return true
}
func (b *BaseATNConfigSet) GetStates() *Set {
states := NewSet(nil, nil)
for i := 0; i < len(b.configs); i++ {
states.add(b.configs[i].GetState())
}
return states
}
func (b *BaseATNConfigSet) HasSemanticContext() bool {
return b.hasSemanticContext
}
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
b.hasSemanticContext = v
}
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
preds := make([]SemanticContext, 0)
for i := 0; i < len(b.configs); i++ {
c := b.configs[i].GetSemanticContext()
if c != SemanticContextNone {
preds = append(preds, c)
}
}
return preds
}
func (b *BaseATNConfigSet) GetItems() []ATNConfig {
return b.configs
}
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
if b.readOnly {
panic("set is read-only")
}
if b.configLookup.length() == 0 {
return
}
for i := 0; i < len(b.configs); i++ {
config := b.configs[i]
config.SetContext(interpreter.getCachedContext(config.GetContext()))
}
}
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
for i := 0; i < len(coll); i++ {
b.Add(coll[i], nil)
}
return false
}
func (b *BaseATNConfigSet) Equals(other interface{}) bool {
if b == other {
return true
} else if _, ok := other.(*BaseATNConfigSet); !ok {
return false
}
other2 := other.(*BaseATNConfigSet)
return b.configs != nil &&
// TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
b.fullCtx == other2.fullCtx &&
b.uniqueAlt == other2.uniqueAlt &&
b.conflictingAlts == other2.conflictingAlts &&
b.hasSemanticContext == other2.hasSemanticContext &&
b.dipsIntoOuterContext == other2.dipsIntoOuterContext
}
func (b *BaseATNConfigSet) Hash() string {
if b.readOnly {
if b.cachedHashString == "-1" {
b.cachedHashString = b.hashConfigs()
}
return b.cachedHashString
}
return b.hashConfigs()
}
func (b *BaseATNConfigSet) hashConfigs() string {
s := ""
for _, c := range b.configs {
s += fmt.Sprint(c)
}
return s
}
func (b *BaseATNConfigSet) Length() int {
return len(b.configs)
}
func (b *BaseATNConfigSet) IsEmpty() bool {
return len(b.configs) == 0
}
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
if b.configLookup == nil {
panic("not implemented for read-only sets")
}
return b.configLookup.contains(item)
}
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
if b.configLookup == nil {
panic("not implemented for read-only sets")
}
return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set
}
func (b *BaseATNConfigSet) Clear() {
if b.readOnly {
panic("set is read-only")
}
b.configs = make([]ATNConfig, 0)
b.cachedHashString = "-1"
b.configLookup = NewSet(hashATNConfig, equalATNConfigs)
}
func (b *BaseATNConfigSet) FullContext() bool {
return b.fullCtx
}
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
return b.dipsIntoOuterContext
}
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
b.dipsIntoOuterContext = v
}
func (b *BaseATNConfigSet) GetUniqueAlt() int {
return b.uniqueAlt
}
func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
b.uniqueAlt = v
}
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
return b.conflictingAlts
}
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
b.conflictingAlts = v
}
func (b *BaseATNConfigSet) ReadOnly() bool {
return b.readOnly
}
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
b.readOnly = readOnly
if readOnly {
b.configLookup = nil // Read only, so no need for the lookup cache
}
}
func (b *BaseATNConfigSet) String() string {
s := "["
for i, c := range b.configs {
s += c.String()
if i != len(b.configs)-1 {
s += ", "
}
}
s += "]"
if b.hasSemanticContext {
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
}
if b.uniqueAlt != ATNInvalidAltNumber {
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
}
if b.conflictingAlts != nil {
s += ",conflictingAlts=" + b.conflictingAlts.String()
}
if b.dipsIntoOuterContext {
s += ",dipsIntoOuterContext"
}
return s
}
type OrderedATNConfigSet struct {
*BaseATNConfigSet
}
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
b := NewBaseATNConfigSet(false)
b.configLookup = NewSet(nil, nil)
return &OrderedATNConfigSet{BaseATNConfigSet: b}
}
func hashATNConfig(c interface{}) string {
return c.(ATNConfig).shortHash()
}
func equalATNConfigs(a, b interface{}) bool {
if a == nil || b == nil {
return false
}
if a == b {
return true
}
var ai, ok = a.(ATNConfig)
var bi, ok1 = b.(ATNConfig)
if !ok || !ok1 {
return false
}
nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber()
alts := ai.GetAlt() == bi.GetAlt()
cons := ai.GetSemanticContext().equals(bi.GetSemanticContext())
return nums && alts && cons
}

View File

@ -0,0 +1,21 @@
package antlr
var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
type ATNDeserializationOptions struct {
readOnly bool
verifyATN bool
generateRuleBypassTransitions bool
}
func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions {
o := new(ATNDeserializationOptions)
if CopyFrom != nil {
o.readOnly = CopyFrom.readOnly
o.verifyATN = CopyFrom.verifyATN
o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions
}
return o
}

View File

@ -0,0 +1,808 @@
package antlr
import (
"encoding/hex"
"fmt"
"strconv"
"strings"
"unicode/utf16"
)
// This is the earliest supported serialized UUID.
// stick to serialized version for now, we don't need a UUID instance
var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"
// This list contains all of the currently supported UUIDs, ordered by when
// the feature first appeared in this branch.
var SupportedUUIDs = []string{BaseSerializedUUID}
var SerializedVersion = 3
// This is the current serialized UUID.
var SerializedUUID = BaseSerializedUUID
type LoopEndStateIntPair struct {
item0 *LoopEndState
item1 int
}
type BlockStartStateIntPair struct {
item0 BlockStartState
item1 int
}
type ATNDeserializer struct {
deserializationOptions *ATNDeserializationOptions
data []rune
pos int
uuid string
}
func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
if options == nil {
options = ATNDeserializationOptionsdefaultOptions
}
return &ATNDeserializer{deserializationOptions: options}
}
func stringInSlice(a string, list []string) int {
for i, b := range list {
if b == a {
return i
}
}
return -1
}
// isFeatureSupported determines if a particular serialized representation of an
// ATN supports a particular feature, identified by the UUID used for
// serializing the ATN at the time the feature was first introduced. Feature is
// the UUID marking the first time the feature was supported in the serialized
// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently
// being deserialized. It returns true if actualUuid represents a serialized ATN
// at or after the feature identified by feature was introduced, and otherwise
// false.
func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool {
idx1 := stringInSlice(feature, SupportedUUIDs)
if idx1 < 0 {
return false
}
idx2 := stringInSlice(actualUUID, SupportedUUIDs)
return idx2 >= idx1
}
func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
a.reset(utf16.Decode(data))
a.checkVersion()
a.checkUUID()
atn := a.readATN()
a.readStates(atn)
a.readRules(atn)
a.readModes(atn)
sets := a.readSets(atn)
a.readEdges(atn, sets)
a.readDecisions(atn)
a.readLexerActions(atn)
a.markPrecedenceDecisions(atn)
a.verifyATN(atn)
if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser {
a.generateRuleBypassTransitions(atn)
// Re-verify after modification
a.verifyATN(atn)
}
return atn
}
func (a *ATNDeserializer) reset(data []rune) {
temp := make([]rune, len(data))
for i, c := range data {
// Don't adjust the first value since that's the version number
if i == 0 {
temp[i] = c
} else {
temp[i] = c - 2
}
}
a.data = temp
a.pos = 0
}
func (a *ATNDeserializer) checkVersion() {
version := a.readInt()
if version != SerializedVersion {
panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").")
}
}
func (a *ATNDeserializer) checkUUID() {
uuid := a.readUUID()
if stringInSlice(uuid, SupportedUUIDs) < 0 {
panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).")
}
a.uuid = uuid
}
func (a *ATNDeserializer) readATN() *ATN {
grammarType := a.readInt()
maxTokenType := a.readInt()
return NewATN(grammarType, maxTokenType)
}
func (a *ATNDeserializer) readStates(atn *ATN) {
loopBackStateNumbers := make([]LoopEndStateIntPair, 0)
endStateNumbers := make([]BlockStartStateIntPair, 0)
nstates := a.readInt()
for i := 0; i < nstates; i++ {
stype := a.readInt()
// Ignore bad types of states
if stype == ATNStateInvalidType {
atn.addState(nil)
continue
}
ruleIndex := a.readInt()
if ruleIndex == 0xFFFF {
ruleIndex = -1
}
s := a.stateFactory(stype, ruleIndex)
if stype == ATNStateLoopEnd {
loopBackStateNumber := a.readInt()
loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
} else if s2, ok := s.(BlockStartState); ok {
endStateNumber := a.readInt()
endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber})
}
atn.addState(s)
}
// Delay the assignment of loop back and end states until we know all the state
// instances have been initialized
for j := 0; j < len(loopBackStateNumbers); j++ {
pair := loopBackStateNumbers[j]
pair.item0.loopBackState = atn.states[pair.item1]
}
for j := 0; j < len(endStateNumbers); j++ {
pair := endStateNumbers[j]
pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
}
numNonGreedyStates := a.readInt()
for j := 0; j < numNonGreedyStates; j++ {
stateNumber := a.readInt()
atn.states[stateNumber].(DecisionState).setNonGreedy(true)
}
numPrecedenceStates := a.readInt()
for j := 0; j < numPrecedenceStates; j++ {
stateNumber := a.readInt()
atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
}
}
func (a *ATNDeserializer) readRules(atn *ATN) {
nrules := a.readInt()
if atn.grammarType == ATNTypeLexer {
atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0)
}
atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0)
for i := 0; i < nrules; i++ {
s := a.readInt()
startState := atn.states[s].(*RuleStartState)
atn.ruleToStartState[i] = startState
if atn.grammarType == ATNTypeLexer {
tokenType := a.readInt()
if tokenType == 0xFFFF {
tokenType = TokenEOF
}
atn.ruleToTokenType[i] = tokenType
}
}
atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0)
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
if s2, ok := state.(*RuleStopState); ok {
atn.ruleToStopState[s2.ruleIndex] = s2
atn.ruleToStartState[s2.ruleIndex].stopState = s2
}
}
}
func (a *ATNDeserializer) readModes(atn *ATN) {
nmodes := a.readInt()
for i := 0; i < nmodes; i++ {
s := a.readInt()
atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState))
}
}
func (a *ATNDeserializer) readSets(atn *ATN) []*IntervalSet {
sets := make([]*IntervalSet, 0)
m := a.readInt()
for i := 0; i < m; i++ {
iset := NewIntervalSet()
sets = append(sets, iset)
n := a.readInt()
containsEOF := a.readInt()
if containsEOF != 0 {
iset.addOne(-1)
}
for j := 0; j < n; j++ {
i1 := a.readInt()
i2 := a.readInt()
iset.addRange(i1, i2)
}
}
return sets
}
func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
nedges := a.readInt()
for i := 0; i < nedges; i++ {
var (
src = a.readInt()
trg = a.readInt()
ttype = a.readInt()
arg1 = a.readInt()
arg2 = a.readInt()
arg3 = a.readInt()
trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
srcState = atn.states[src]
)
srcState.AddTransition(trans, -1)
}
// Edges for rule stop states can be derived, so they are not serialized
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
for j := 0; j < len(state.GetTransitions()); j++ {
var t, ok = state.GetTransitions()[j].(*RuleTransition)
if !ok {
continue
}
outermostPrecedenceReturn := -1
if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule {
if t.precedence == 0 {
outermostPrecedenceReturn = t.getTarget().GetRuleIndex()
}
}
trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn)
atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1)
}
}
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
if s2, ok := state.(*BaseBlockStartState); ok {
// We need to know the end state to set its start state
if s2.endState == nil {
panic("IllegalState")
}
// Block end states can only be associated to a single block start state
if s2.endState.startState != nil {
panic("IllegalState")
}
s2.endState.startState = state
}
if s2, ok := state.(*PlusLoopbackState); ok {
for j := 0; j < len(s2.GetTransitions()); j++ {
target := s2.GetTransitions()[j].getTarget()
if t2, ok := target.(*PlusBlockStartState); ok {
t2.loopBackState = state
}
}
} else if s2, ok := state.(*StarLoopbackState); ok {
for j := 0; j < len(s2.GetTransitions()); j++ {
target := s2.GetTransitions()[j].getTarget()
if t2, ok := target.(*StarLoopEntryState); ok {
t2.loopBackState = state
}
}
}
}
}
func (a *ATNDeserializer) readDecisions(atn *ATN) {
ndecisions := a.readInt()
for i := 0; i < ndecisions; i++ {
s := a.readInt()
decState := atn.states[s].(DecisionState)
atn.DecisionToState = append(atn.DecisionToState, decState)
decState.setDecision(i)
}
}
func (a *ATNDeserializer) readLexerActions(atn *ATN) {
if atn.grammarType == ATNTypeLexer {
count := a.readInt()
atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil)
for i := 0; i < count; i++ {
actionType := a.readInt()
data1 := a.readInt()
if data1 == 0xFFFF {
data1 = -1
}
data2 := a.readInt()
if data2 == 0xFFFF {
data2 = -1
}
lexerAction := a.lexerActionFactory(actionType, data1, data2)
atn.lexerActions[i] = lexerAction
}
}
}
func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
count := len(atn.ruleToStartState)
for i := 0; i < count; i++ {
atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
}
for i := 0; i < count; i++ {
a.generateRuleBypassTransition(atn, i)
}
}
func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
bypassStart := NewBasicBlockStartState()
bypassStart.ruleIndex = idx
atn.addState(bypassStart)
bypassStop := NewBlockEndState()
bypassStop.ruleIndex = idx
atn.addState(bypassStop)
bypassStart.endState = bypassStop
atn.defineDecisionState(bypassStart.BaseDecisionState)
bypassStop.startState = bypassStart
var excludeTransition Transition
var endState ATNState
if atn.ruleToStartState[idx].isPrecedenceRule {
// Wrap from the beginning of the rule to the StarLoopEntryState
endState = nil
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
if a.stateIsEndStateFor(state, idx) != nil {
endState = state
excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
break
}
}
if excludeTransition == nil {
panic("Couldn't identify final state of the precedence rule prefix section.")
}
} else {
endState = atn.ruleToStopState[idx]
}
// All non-excluded transitions that currently target end state need to target
// blockEnd instead
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
for j := 0; j < len(state.GetTransitions()); j++ {
transition := state.GetTransitions()[j]
if transition == excludeTransition {
continue
}
if transition.getTarget() == endState {
transition.setTarget(bypassStop)
}
}
}
// All transitions leaving the rule start state need to leave blockStart instead
ruleToStartState := atn.ruleToStartState[idx]
count := len(ruleToStartState.GetTransitions())
for count > 0 {
bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
}
// Link the new states
atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
MatchState := NewBasicState()
atn.addState(MatchState)
MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
}
func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
if state.GetRuleIndex() != idx {
return nil
}
if _, ok := state.(*StarLoopEntryState); !ok {
return nil
}
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
return nil
}
var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
return state
}
return nil
}
// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
// the correct value.
func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
for _, state := range atn.states {
if _, ok := state.(*StarLoopEntryState); !ok {
continue
}
// We analyze the ATN to determine if a ATN decision state is the
// decision for the closure block that determines whether a
// precedence rule should continue or complete.
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
if s3.epsilonOnlyTransitions && ok2 {
state.(*StarLoopEntryState).precedenceRuleDecision = true
}
}
}
}
}
func (a *ATNDeserializer) verifyATN(atn *ATN) {
if !a.deserializationOptions.verifyATN {
return
}
// Verify assumptions
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
if state == nil {
continue
}
a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
switch s2 := state.(type) {
case *PlusBlockStartState:
a.checkCondition(s2.loopBackState != nil, "")
case *StarLoopEntryState:
a.checkCondition(s2.loopBackState != nil, "")
a.checkCondition(len(s2.GetTransitions()) == 2, "")
switch s2 := state.(type) {
case *StarBlockStartState:
var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState)
a.checkCondition(ok2, "")
a.checkCondition(!s2.nonGreedy, "")
case *LoopEndState:
var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState)
a.checkCondition(ok2, "")
a.checkCondition(s3.nonGreedy, "")
default:
panic("IllegalState")
}
case *StarLoopbackState:
a.checkCondition(len(state.GetTransitions()) == 1, "")
var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
a.checkCondition(ok2, "")
case *LoopEndState:
a.checkCondition(s2.loopBackState != nil, "")
case *RuleStartState:
a.checkCondition(s2.stopState != nil, "")
case *BaseBlockStartState:
a.checkCondition(s2.endState != nil, "")
case *BlockEndState:
a.checkCondition(s2.startState != nil, "")
case DecisionState:
a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
default:
var _, ok = s2.(*RuleStopState)
a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
}
}
}
func (a *ATNDeserializer) checkCondition(condition bool, message string) {
if !condition {
if message == "" {
message = "IllegalState"
}
panic(message)
}
}
func (a *ATNDeserializer) readInt() int {
v := a.data[a.pos]
a.pos++
return int(v)
}
//TODO
//func (a *ATNDeserializer) readLong() int64 {
// panic("Not implemented")
// var low = a.readInt32()
// var high = a.readInt32()
// return (low & 0x00000000FFFFFFFF) | (high << int32)
//}
func createByteToHex() []string {
bth := make([]string, 256)
for i := 0; i < 256; i++ {
bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)}))
}
return bth
}
var byteToHex = createByteToHex()
func (a *ATNDeserializer) readUUID() string {
bb := make([]int, 16)
for i := 7; i >= 0; i-- {
integer := a.readInt()
bb[(2*i)+1] = integer & 0xFF
bb[2*i] = (integer >> 8) & 0xFF
}
return byteToHex[bb[0]] + byteToHex[bb[1]] +
byteToHex[bb[2]] + byteToHex[bb[3]] + "-" +
byteToHex[bb[4]] + byteToHex[bb[5]] + "-" +
byteToHex[bb[6]] + byteToHex[bb[7]] + "-" +
byteToHex[bb[8]] + byteToHex[bb[9]] + "-" +
byteToHex[bb[10]] + byteToHex[bb[11]] +
byteToHex[bb[12]] + byteToHex[bb[13]] +
byteToHex[bb[14]] + byteToHex[bb[15]]
}
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
target := atn.states[trg]
switch typeIndex {
case TransitionEPSILON:
return NewEpsilonTransition(target, -1)
case TransitionRANGE:
if arg3 != 0 {
return NewRangeTransition(target, TokenEOF, arg2)
}
return NewRangeTransition(target, arg1, arg2)
case TransitionRULE:
return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
case TransitionPREDICATE:
return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
case TransitionPRECEDENCE:
return NewPrecedencePredicateTransition(target, arg1)
case TransitionATOM:
if arg3 != 0 {
return NewAtomTransition(target, TokenEOF)
}
return NewAtomTransition(target, arg1)
case TransitionACTION:
return NewActionTransition(target, arg1, arg2, arg3 != 0)
case TransitionSET:
return NewSetTransition(target, sets[arg1])
case TransitionNOTSET:
return NewNotSetTransition(target, sets[arg1])
case TransitionWILDCARD:
return NewWildcardTransition(target)
}
panic("The specified transition type is not valid.")
}
func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
var s ATNState
switch typeIndex {
case ATNStateInvalidType:
return nil
case ATNStateBasic:
s = NewBasicState()
case ATNStateRuleStart:
s = NewRuleStartState()
case ATNStateBlockStart:
s = NewBasicBlockStartState()
case ATNStatePlusBlockStart:
s = NewPlusBlockStartState()
case ATNStateStarBlockStart:
s = NewStarBlockStartState()
case ATNStateTokenStart:
s = NewTokensStartState()
case ATNStateRuleStop:
s = NewRuleStopState()
case ATNStateBlockEnd:
s = NewBlockEndState()
case ATNStateStarLoopBack:
s = NewStarLoopbackState()
case ATNStateStarLoopEntry:
s = NewStarLoopEntryState()
case ATNStatePlusLoopBack:
s = NewPlusLoopbackState()
case ATNStateLoopEnd:
s = NewLoopEndState()
default:
panic(fmt.Sprintf("state type %d is invalid", typeIndex))
}
s.SetRuleIndex(ruleIndex)
return s
}
func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
switch typeIndex {
case LexerActionTypeChannel:
return NewLexerChannelAction(data1)
case LexerActionTypeCustom:
return NewLexerCustomAction(data1, data2)
case LexerActionTypeMode:
return NewLexerModeAction(data1)
case LexerActionTypeMore:
return LexerMoreActionINSTANCE
case LexerActionTypePopMode:
return LexerPopModeActionINSTANCE
case LexerActionTypePushMode:
return NewLexerPushModeAction(data1)
case LexerActionTypeSkip:
return LexerSkipActionINSTANCE
case LexerActionTypeType:
return NewLexerTypeAction(data1)
default:
panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
}
}

View File

@ -0,0 +1,46 @@
package antlr
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
type IATNSimulator interface {
SharedContextCache() *PredictionContextCache
ATN() *ATN
DecisionToDFA() []*DFA
}
type BaseATNSimulator struct {
atn *ATN
sharedContextCache *PredictionContextCache
decisionToDFA []*DFA
}
func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
b := new(BaseATNSimulator)
b.atn = atn
b.sharedContextCache = sharedContextCache
return b
}
func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
if b.sharedContextCache == nil {
return context
}
visited := make(map[PredictionContext]PredictionContext)
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
}
func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
return b.sharedContextCache
}
func (b *BaseATNSimulator) ATN() *ATN {
return b.atn
}
func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
return b.decisionToDFA
}

View File

@ -0,0 +1,377 @@
package antlr
import "strconv"
// Constants for serialization.
const (
ATNStateInvalidType = 0
ATNStateBasic = 1
ATNStateRuleStart = 2
ATNStateBlockStart = 3
ATNStatePlusBlockStart = 4
ATNStateStarBlockStart = 5
ATNStateTokenStart = 6
ATNStateRuleStop = 7
ATNStateBlockEnd = 8
ATNStateStarLoopBack = 9
ATNStateStarLoopEntry = 10
ATNStatePlusLoopBack = 11
ATNStateLoopEnd = 12
ATNStateInvalidStateNumber = -1
)
var ATNStateInitialNumTransitions = 4
type ATNState interface {
GetEpsilonOnlyTransitions() bool
GetRuleIndex() int
SetRuleIndex(int)
GetNextTokenWithinRule() *IntervalSet
SetNextTokenWithinRule(*IntervalSet)
GetATN() *ATN
SetATN(*ATN)
GetStateType() int
GetStateNumber() int
SetStateNumber(int)
GetTransitions() []Transition
SetTransitions([]Transition)
AddTransition(Transition, int)
String() string
}
type BaseATNState struct {
// NextTokenWithinRule caches lookahead during parsing. Not used during construction.
NextTokenWithinRule *IntervalSet
// atn is the current ATN.
atn *ATN
epsilonOnlyTransitions bool
// ruleIndex tracks the Rule index because there are no Rule objects at runtime.
ruleIndex int
stateNumber int
stateType int
// Track the transitions emanating from this ATN state.
transitions []Transition
}
func NewBaseATNState() *BaseATNState {
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
}
func (as *BaseATNState) GetRuleIndex() int {
return as.ruleIndex
}
func (as *BaseATNState) SetRuleIndex(v int) {
as.ruleIndex = v
}
func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
return as.epsilonOnlyTransitions
}
func (as *BaseATNState) GetATN() *ATN {
return as.atn
}
func (as *BaseATNState) SetATN(atn *ATN) {
as.atn = atn
}
func (as *BaseATNState) GetTransitions() []Transition {
return as.transitions
}
func (as *BaseATNState) SetTransitions(t []Transition) {
as.transitions = t
}
func (as *BaseATNState) GetStateType() int {
return as.stateType
}
func (as *BaseATNState) GetStateNumber() int {
return as.stateNumber
}
func (as *BaseATNState) SetStateNumber(stateNumber int) {
as.stateNumber = stateNumber
}
func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
return as.NextTokenWithinRule
}
func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
as.NextTokenWithinRule = v
}
func (as *BaseATNState) String() string {
return strconv.Itoa(as.stateNumber)
}
func (as *BaseATNState) equals(other interface{}) bool {
if ot, ok := other.(ATNState); ok {
return as.stateNumber == ot.GetStateNumber()
}
return false
}
func (as *BaseATNState) isNonGreedyExitState() bool {
return false
}
func (as *BaseATNState) AddTransition(trans Transition, index int) {
if len(as.transitions) == 0 {
as.epsilonOnlyTransitions = trans.getIsEpsilon()
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
as.epsilonOnlyTransitions = false
}
if index == -1 {
as.transitions = append(as.transitions, trans)
} else {
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
// TODO: as.transitions.splice(index, 1, trans)
}
}
type BasicState struct {
*BaseATNState
}
func NewBasicState() *BasicState {
b := NewBaseATNState()
b.stateType = ATNStateBasic
return &BasicState{BaseATNState: b}
}
type DecisionState interface {
ATNState
getDecision() int
setDecision(int)
getNonGreedy() bool
setNonGreedy(bool)
}
type BaseDecisionState struct {
*BaseATNState
decision int
nonGreedy bool
}
func NewBaseDecisionState() *BaseDecisionState {
return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
}
func (s *BaseDecisionState) getDecision() int {
return s.decision
}
func (s *BaseDecisionState) setDecision(b int) {
s.decision = b
}
func (s *BaseDecisionState) getNonGreedy() bool {
return s.nonGreedy
}
func (s *BaseDecisionState) setNonGreedy(b bool) {
s.nonGreedy = b
}
type BlockStartState interface {
DecisionState
getEndState() *BlockEndState
setEndState(*BlockEndState)
}
// BaseBlockStartState is the start of a regular (...) block.
type BaseBlockStartState struct {
*BaseDecisionState
endState *BlockEndState
}
func NewBlockStartState() *BaseBlockStartState {
return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
}
func (s *BaseBlockStartState) getEndState() *BlockEndState {
return s.endState
}
func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
s.endState = b
}
type BasicBlockStartState struct {
*BaseBlockStartState
}
func NewBasicBlockStartState() *BasicBlockStartState {
b := NewBlockStartState()
b.stateType = ATNStateBlockStart
return &BasicBlockStartState{BaseBlockStartState: b}
}
// BlockEndState is a terminal node of a simple (a|b|c) block.
type BlockEndState struct {
*BaseATNState
startState ATNState
}
func NewBlockEndState() *BlockEndState {
b := NewBaseATNState()
b.stateType = ATNStateBlockEnd
return &BlockEndState{BaseATNState: b}
}
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
// start symbol. In that case, there is one transition to EOF. Later, we might
// encode references to all calls to this rule to compute FOLLOW sets for error
// handling.
type RuleStopState struct {
*BaseATNState
}
func NewRuleStopState() *RuleStopState {
b := NewBaseATNState()
b.stateType = ATNStateRuleStop
return &RuleStopState{BaseATNState: b}
}
type RuleStartState struct {
*BaseATNState
stopState ATNState
isPrecedenceRule bool
}
func NewRuleStartState() *RuleStartState {
b := NewBaseATNState()
b.stateType = ATNStateRuleStart
return &RuleStartState{BaseATNState: b}
}
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
// transitions: one to the loop back to start of the block, and one to exit.
type PlusLoopbackState struct {
*BaseDecisionState
}
func NewPlusLoopbackState() *PlusLoopbackState {
b := NewBaseDecisionState()
b.stateType = ATNStatePlusLoopBack
return &PlusLoopbackState{BaseDecisionState: b}
}
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
// decision state; we don't use it for code generation. Somebody might need it,
// it is included for completeness. In reality, PlusLoopbackState is the real
// decision-making node for A+.
type PlusBlockStartState struct {
*BaseBlockStartState
loopBackState ATNState
}
func NewPlusBlockStartState() *PlusBlockStartState {
b := NewBlockStartState()
b.stateType = ATNStatePlusBlockStart
return &PlusBlockStartState{BaseBlockStartState: b}
}
// StarBlockStartState is the block that begins a closure loop.
type StarBlockStartState struct {
*BaseBlockStartState
}
func NewStarBlockStartState() *StarBlockStartState {
b := NewBlockStartState()
b.stateType = ATNStateStarBlockStart
return &StarBlockStartState{BaseBlockStartState: b}
}
type StarLoopbackState struct {
*BaseATNState
}
func NewStarLoopbackState() *StarLoopbackState {
b := NewBaseATNState()
b.stateType = ATNStateStarLoopBack
return &StarLoopbackState{BaseATNState: b}
}
type StarLoopEntryState struct {
*BaseDecisionState
loopBackState ATNState
precedenceRuleDecision bool
}
func NewStarLoopEntryState() *StarLoopEntryState {
b := NewBaseDecisionState()
b.stateType = ATNStateStarLoopEntry
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
return &StarLoopEntryState{BaseDecisionState: b}
}
// LoopEndState marks the end of a * or + loop.
type LoopEndState struct {
*BaseATNState
loopBackState ATNState
}
func NewLoopEndState() *LoopEndState {
b := NewBaseATNState()
b.stateType = ATNStateLoopEnd
return &LoopEndState{BaseATNState: b}
}
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
type TokensStartState struct {
*BaseDecisionState
}
func NewTokensStartState() *TokensStartState {
b := NewBaseDecisionState()
b.stateType = ATNStateTokenStart
return &TokensStartState{BaseDecisionState: b}
}

View File

@ -0,0 +1,7 @@
package antlr
// Represent the type of recognizer an ATN applies to.
const (
ATNTypeLexer = 0
ATNTypeParser = 1
)

View File

@ -0,0 +1,8 @@
package antlr
type CharStream interface {
IntStream
GetText(int, int) string
GetTextFromTokens(start, end Token) string
GetTextFromInterval(*Interval) string
}

View File

@ -0,0 +1,52 @@
package antlr
// TokenFactory creates CommonToken objects.
type TokenFactory interface {
Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
}
// CommonTokenFactory is the default TokenFactory implementation.
type CommonTokenFactory struct {
// copyText indicates whether CommonToken.setText should be called after
// constructing tokens to explicitly set the text. This is useful for cases
// where the input stream might not be able to provide arbitrary substrings of
// text from the input after the lexer creates a token (e.g. the
// implementation of CharStream.GetText in UnbufferedCharStream panics an
// UnsupportedOperationException). Explicitly setting the token text allows
// Token.GetText to be called at any time regardless of the input stream
// implementation.
//
// The default value is false to avoid the performance and memory overhead of
// copying text for every token unless explicitly requested.
copyText bool
}
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
return &CommonTokenFactory{copyText: copyText}
}
// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
// explicitly copy token text when constructing tokens.
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
t := NewCommonToken(source, ttype, channel, start, stop)
t.line = line
t.column = column
if text != "" {
t.SetText(text)
} else if c.copyText && source.charStream != nil {
t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
}
return t
}
func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
t.SetText(text)
return t
}

View File

@ -0,0 +1,443 @@
package antlr
import (
"strconv"
)
// CommonTokenStream is an implementation of TokenStream that loads tokens from
// a TokenSource on-demand and places the tokens in a buffer to provide access
// to any previous token by index. This token stream ignores the value of
// Token.getChannel. If your parser requires the token stream filter tokens to
// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
type CommonTokenStream struct {
channel int
// fetchedEOF indicates whether the Token.EOF token has been fetched from
// tokenSource and added to tokens. This field improves performance for the
// following cases:
//
// consume: The lookahead check in consume to preven consuming the EOF symbol is
// optimized by checking the values of fetchedEOF and p instead of calling LA.
//
// fetch: The check to prevent adding multiple EOF symbols into tokens is
// trivial with bt field.
fetchedEOF bool
// index indexs into tokens of the current token (next token to consume).
// tokens[p] should be LT(1). It is set to -1 when the stream is first
// constructed or when SetTokenSource is called, indicating that the first token
// has not yet been fetched from the token source. For additional information,
// see the documentation of IntStream for a description of initializing methods.
index int
// tokenSource is the TokenSource from which tokens for the bt stream are
// fetched.
tokenSource TokenSource
// tokens is all tokens fetched from the token source. The list is considered a
// complete view of the input once fetchedEOF is set to true.
tokens []Token
}
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
return &CommonTokenStream{
channel: channel,
index: -1,
tokenSource: lexer,
tokens: make([]Token, 0),
}
}
func (c *CommonTokenStream) GetAllTokens() []Token {
return c.tokens
}
func (c *CommonTokenStream) Mark() int {
return 0
}
func (c *CommonTokenStream) Release(marker int) {}
func (c *CommonTokenStream) reset() {
c.Seek(0)
}
func (c *CommonTokenStream) Seek(index int) {
c.lazyInit()
c.index = c.adjustSeekIndex(index)
}
func (c *CommonTokenStream) Get(index int) Token {
c.lazyInit()
return c.tokens[index]
}
func (c *CommonTokenStream) Consume() {
SkipEOFCheck := false
if c.index >= 0 {
if c.fetchedEOF {
// The last token in tokens is EOF. Skip the check if p indexes any fetched.
// token except the last.
SkipEOFCheck = c.index < len(c.tokens)-1
} else {
// No EOF token in tokens. Skip the check if p indexes a fetched token.
SkipEOFCheck = c.index < len(c.tokens)
}
} else {
// Not yet initialized
SkipEOFCheck = false
}
if !SkipEOFCheck && c.LA(1) == TokenEOF {
panic("cannot consume EOF")
}
if c.Sync(c.index + 1) {
c.index = c.adjustSeekIndex(c.index + 1)
}
}
// Sync makes sure index i in tokens has a token and returns true if a token is
// located at index i and otherwise false.
func (c *CommonTokenStream) Sync(i int) bool {
n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
if n > 0 {
fetched := c.fetch(n)
return fetched >= n
}
return true
}
// fetch adds n elements to buffer and returns the actual number of elements
// added to the buffer.
func (c *CommonTokenStream) fetch(n int) int {
if c.fetchedEOF {
return 0
}
for i := 0; i < n; i++ {
t := c.tokenSource.NextToken()
t.SetTokenIndex(len(c.tokens))
c.tokens = append(c.tokens, t)
if t.GetTokenType() == TokenEOF {
c.fetchedEOF = true
return i + 1
}
}
return n
}
// GetTokens gets all tokens from start to stop inclusive.
func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
if start < 0 || stop < 0 {
return nil
}
c.lazyInit()
subset := make([]Token, 0)
if stop >= len(c.tokens) {
stop = len(c.tokens) - 1
}
for i := start; i < stop; i++ {
t := c.tokens[i]
if t.GetTokenType() == TokenEOF {
break
}
if types == nil || types.contains(t.GetTokenType()) {
subset = append(subset, t)
}
}
return subset
}
func (c *CommonTokenStream) LA(i int) int {
return c.LT(i).GetTokenType()
}
func (c *CommonTokenStream) lazyInit() {
if c.index == -1 {
c.setup()
}
}
func (c *CommonTokenStream) setup() {
c.Sync(0)
c.index = c.adjustSeekIndex(0)
}
func (c *CommonTokenStream) GetTokenSource() TokenSource {
return c.tokenSource
}
// SetTokenSource resets the c token stream by setting its token source.
func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
c.tokenSource = tokenSource
c.tokens = make([]Token, 0)
c.index = -1
}
// NextTokenOnChannel returns the index of the next token on channel given a
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
// no tokens on channel between i and EOF.
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
c.Sync(i)
if i >= len(c.tokens) {
return -1
}
token := c.tokens[i]
for token.GetChannel() != c.channel {
if token.GetTokenType() == TokenEOF {
return -1
}
i++
c.Sync(i)
token = c.tokens[i]
}
return i
}
// previousTokenOnChannel returns the index of the previous token on channel
// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
// there are no tokens on channel between i and 0.
func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
for i >= 0 && c.tokens[i].GetChannel() != channel {
i--
}
return i
}
// getHiddenTokensToRight collects all tokens on a specified channel to the
// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
// or EOF. If channel is -1, it finds any non-default channel token.
func (c *CommonTokenStream) getHiddenTokensToRight(tokenIndex, channel int) []Token {
c.lazyInit()
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
}
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
from := tokenIndex + 1
// If no onchannel to the right, then nextOnChannel == -1, so set to to last token
var to int
if nextOnChannel == -1 {
to = len(c.tokens) - 1
} else {
to = nextOnChannel
}
return c.filterForChannel(from, to, channel)
}
// getHiddenTokensToLeft collects all tokens on channel to the left of the
// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
// -1, it finds any non default channel token.
func (c *CommonTokenStream) getHiddenTokensToLeft(tokenIndex, channel int) []Token {
c.lazyInit()
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
}
prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
if prevOnChannel == tokenIndex-1 {
return nil
}
// If there are none on channel to the left and prevOnChannel == -1 then from = 0
from := prevOnChannel + 1
to := tokenIndex - 1
return c.filterForChannel(from, to, channel)
}
func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
hidden := make([]Token, 0)
for i := left; i < right+1; i++ {
t := c.tokens[i]
if channel == -1 {
if t.GetChannel() != LexerDefaultTokenChannel {
hidden = append(hidden, t)
}
} else if t.GetChannel() == channel {
hidden = append(hidden, t)
}
}
if len(hidden) == 0 {
return nil
}
return hidden
}
func (c *CommonTokenStream) GetSourceName() string {
return c.tokenSource.GetSourceName()
}
func (c *CommonTokenStream) Size() int {
return len(c.tokens)
}
func (c *CommonTokenStream) Index() int {
return c.index
}
func (c *CommonTokenStream) GetAllText() string {
return c.GetTextFromInterval(nil)
}
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
if start == nil || end == nil {
return ""
}
return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
}
func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
return c.GetTextFromInterval(interval.GetSourceInterval())
}
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
c.lazyInit()
c.Fill()
if interval == nil {
interval = NewInterval(0, len(c.tokens)-1)
}
start := interval.start
stop := interval.stop
if start < 0 || stop < 0 {
return ""
}
if stop >= len(c.tokens) {
stop = len(c.tokens) - 1
}
s := ""
for i := start; i < stop+1; i++ {
t := c.tokens[i]
if t.GetTokenType() == TokenEOF {
break
}
s += t.GetText()
}
return s
}
// Fill gets all tokens from the lexer until EOF.
func (c *CommonTokenStream) Fill() {
c.lazyInit()
for c.fetch(1000) == 1000 {
continue
}
}
func (c *CommonTokenStream) adjustSeekIndex(i int) int {
return c.NextTokenOnChannel(i, c.channel)
}
func (c *CommonTokenStream) LB(k int) Token {
if k == 0 || c.index-k < 0 {
return nil
}
i := c.index
n := 1
// Find k good tokens looking backward
for n <= k {
// Skip off-channel tokens
i = c.previousTokenOnChannel(i-1, c.channel)
n++
}
if i < 0 {
return nil
}
return c.tokens[i]
}
func (c *CommonTokenStream) LT(k int) Token {
c.lazyInit()
if k == 0 {
return nil
}
if k < 0 {
return c.LB(-k)
}
i := c.index
n := 1 // We know tokens[n] is valid
// Find k good tokens
for n < k {
// Skip off-channel tokens, but make sure to not look past EOF
if c.Sync(i + 1) {
i = c.NextTokenOnChannel(i+1, c.channel)
}
n++
}
return c.tokens[i]
}
// getNumberOfOnChannelTokens counts EOF once.
func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
var n int
c.Fill()
for i := 0; i < len(c.tokens); i++ {
t := c.tokens[i]
if t.GetChannel() == c.channel {
n++
}
if t.GetTokenType() == TokenEOF {
break
}
}
return n
}

129
runtime/Go/antlr/dfa.go Normal file
View File

@ -0,0 +1,129 @@
package antlr
import "sort"
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
decision int
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there.
states map[string]*DFAState
s0 *DFAState
// precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
// True if the DFA is for a precedence decision and false otherwise.
precedenceDfa bool
}
func NewDFA(atnStartState DecisionState, decision int) *DFA {
return &DFA{
atnStartState: atnStartState,
decision: decision,
states: make(map[string]*DFAState),
}
}
// getPrecedenceStartState gets the start state for the current precedence and
// returns the start state corresponding to the specified precedence if a start
// state exists for the specified precedence and nil otherwise. d must be a
// precedence DFA. See also isPrecedenceDfa.
func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
if !d.precedenceDfa {
panic("only precedence DFAs may contain a precedence start state")
}
// s0.edges is never nil for a precedence DFA
if precedence < 0 || precedence >= len(d.s0.edges) {
return nil
}
return d.s0.edges[precedence]
}
// setPrecedenceStartState sets the start state for the current precedence. d
// must be a precedence DFA. See also isPrecedenceDfa.
func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
if !d.precedenceDfa {
panic("only precedence DFAs may contain a precedence start state")
}
if precedence < 0 {
return
}
// Synchronization on s0 here is ok. When the DFA is turned into a
// precedence DFA, s0 will be initialized once and not updated again. s0.edges
// is never nil for a precedence DFA.
if precedence >= len(d.s0.edges) {
d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...)
}
d.s0.edges[precedence] = startState
}
// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
// from the current DFA configuration, then d.states is cleared, the initial
// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
// store the start states for individual precedence values if precedenceDfa is
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.precedenceDfa != precedenceDfa {
d.states = make(map[string]*DFAState)
if precedenceDfa {
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
precedenceState.edges = make([]*DFAState, 0)
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
d.s0 = precedenceState
} else {
d.s0 = nil
}
d.precedenceDfa = precedenceDfa
}
}
func (d *DFA) GetStates() map[string]*DFAState {
return d.states
}
type DFAStateList []*DFAState
func (d DFAStateList) Len() int { return len(d) }
func (d DFAStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
func (d DFAStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
// sortedStates returns the states in d sorted by their state number.
func (d *DFA) sortedStates() []*DFAState {
vs := make([]*DFAState, 0, len(d.states))
for _, v := range d.states {
vs = append(vs, v)
}
sort.Sort(DFAStateList(vs))
return vs
}
func (d *DFA) String(literalNames []string, symbolicNames []string) string {
if d.s0 == nil {
return ""
}
return NewDFASerializer(d, literalNames, symbolicNames).String()
}
func (d *DFA) ToLexerString() string {
if d.s0 == nil {
return ""
}
return NewLexerDFASerializer(d).String()
}

View File

@ -0,0 +1,148 @@
package antlr
import (
"fmt"
"strconv"
)
// DFASerializer is a DFA walker that knows how to dump them to serialized
// strings.
type DFASerializer struct {
dfa *DFA
literalNames []string
symbolicNames []string
}
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
if literalNames == nil {
literalNames = make([]string, 0)
}
if symbolicNames == nil {
symbolicNames = make([]string, 0)
}
return &DFASerializer{
dfa: dfa,
literalNames: literalNames,
symbolicNames: symbolicNames,
}
}
func (d *DFASerializer) String() string {
if d.dfa.s0 == nil {
return ""
}
buf := ""
states := d.dfa.sortedStates()
for _, s := range states {
if s.edges != nil {
n := len(s.edges)
for j := 0; j < n; j++ {
t := s.edges[j]
if t != nil && t.stateNumber != 0x7FFFFFFF {
buf += d.GetStateString(s)
buf += "-"
buf += d.getEdgeLabel(j)
buf += "->"
buf += d.GetStateString(t)
buf += "\n"
}
}
}
}
if len(buf) == 0 {
return ""
}
return buf
}
func (d *DFASerializer) getEdgeLabel(i int) string {
if i == 0 {
return "EOF"
} else if d.literalNames != nil && i-1 < len(d.literalNames) {
return d.literalNames[i-1]
} else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
return d.symbolicNames[i-1]
}
return strconv.Itoa(i - 1)
}
func (d *DFASerializer) GetStateString(s *DFAState) string {
var a, b string
if s.isAcceptState {
a = ":"
}
if s.requiresFullContext {
b = "^"
}
baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
if s.isAcceptState {
if s.predicates != nil {
return baseStateStr + "=>" + fmt.Sprint(s.predicates)
}
return baseStateStr + "=>" + fmt.Sprint(s.prediction)
}
return baseStateStr
}
type LexerDFASerializer struct {
*DFASerializer
}
func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
}
func (l *LexerDFASerializer) getEdgeLabel(i int) string {
return "'" + string(i) + "'"
}
func (l *LexerDFASerializer) String() string {
if l.dfa.s0 == nil {
return ""
}
buf := ""
states := l.dfa.sortedStates()
for i := 0; i < len(states); i++ {
s := states[i]
if s.edges != nil {
n := len(s.edges)
for j := 0; j < n; j++ {
t := s.edges[j]
if t != nil && t.stateNumber != 0x7FFFFFFF {
buf += l.GetStateString(s)
buf += "-"
buf += l.getEdgeLabel(j)
buf += "->"
buf += l.GetStateString(t)
buf += "\n"
}
}
}
}
if len(buf) == 0 {
return ""
}
return buf
}

View File

@ -0,0 +1,147 @@
package antlr
import (
"fmt"
"strconv"
)
// PredPrediction maps a predicate to a predicted alternative.
type PredPrediction struct {
alt int
pred SemanticContext
}
func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
return &PredPrediction{alt: alt, pred: pred}
}
func (p *PredPrediction) String() string {
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
}
// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
// states the ATN can be in after reading each input symbol. That is to say,
// after reading input a1a2..an, the DFA is in a state that represents the
// subset T of the states of the ATN that are reachable from the ATN's start
// state along some path labeled a1a2..an." In conventional NFA-to-DFA
// conversion, therefore, the subset T would be a bitset representing the set of
// states the ATN could be in. We need to track the alt predicted by each state
// as well, however. More importantly, we need to maintain a stack of states,
// tracking the closure operations as they jump from rule to rule, emulating
// rule invocations (method calls). I have to add a stack to simulate the proper
// lookahead sequences for the underlying LL grammar from which the ATN was
// derived.
//
// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
// state (ala normal conversion) and a RuleContext describing the chain of rules
// (if any) followed to arrive at that state.
//
// A DFAState may have multiple references to a particular state, but with
// different ATN contexts (with same or different alts) meaning that state was
// reached via a different set of rule invocations.
type DFAState struct {
stateNumber int
configs ATNConfigSet
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
// Token.EOF maps to the first element.
edges []*DFAState
isAcceptState bool
// prediction is the ttype we match or alt we predict if the state is accept.
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
// requiresFullContext.
prediction int
lexerActionExecutor *LexerActionExecutor
// requiresFullContext indicates it was created during an SLL prediction that
// discovered a conflict between the configurations in the state. Future
// ParserATNSimulator.execATN invocations immediately jump doing
// full context prediction if true.
requiresFullContext bool
// predicates is the predicates associated with the ATN configurations of the
// DFA state during SLL parsing. When we have predicates, requiresFullContext
// is false, since full context prediction evaluates predicates on-the-fly. If
// d is
// not nil, then prediction is ATN.INVALID_ALT_NUMBER.
//
// We only use these for non-requiresFullContext but conflicting states. That
// means we know from the context (it's $ or we don't dip into outer context)
// that it's an ambiguity not a conflict.
//
// This list is computed by
// ParserATNSimulator.predicateDFAState.
predicates []*PredPrediction
}
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
if configs == nil {
configs = NewBaseATNConfigSet(false)
}
return &DFAState{configs: configs, stateNumber: stateNumber}
}
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
func (d *DFAState) GetAltSet() *Set {
alts := NewSet(nil, nil)
if d.configs != nil {
for _, c := range d.configs.GetItems() {
alts.add(c.GetAlt())
}
}
if alts.length() == 0 {
return nil
}
return alts
}
func (d *DFAState) setPrediction(v int) {
d.prediction = v
}
// equals returns whether d equals other. Two DFAStates are equal if their ATN
// configuration sets are the same. This method is used to see if a state
// already exists.
//
// Because the number of alternatives and number of ATN configurations are
// finite, there is a finite number of DFA states that can be processed. This is
// necessary to show that the algorithm terminates.
//
// Cannot test the DFA state numbers here because in
// ParserATNSimulator.addDFAState we need to know if any other state exists that
// has d exact set of ATN configurations. The stateNumber is irrelevant.
func (d *DFAState) equals(other interface{}) bool {
if d == other {
return true
} else if _, ok := other.(*DFAState); !ok {
return false
}
return d.configs.Equals(other.(*DFAState).configs)
}
func (d *DFAState) String() string {
return strconv.Itoa(d.stateNumber) + ":" + d.Hash()
}
func (d *DFAState) Hash() string {
var s string
if d.isAcceptState {
if d.predicates != nil {
s = "=>" + fmt.Sprint(d.predicates)
} else {
s = "=>" + fmt.Sprint(d.prediction)
}
}
return fmt.Sprint(d.configs) + s
}

View File

@ -0,0 +1,107 @@
package antlr
import (
"strconv"
)
//
// This implementation of {@link ANTLRErrorListener} can be used to identify
// certain potential correctness and performance problems in grammars. "reports"
// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
// message.
//
// <ul>
// <li><b>Ambiguities</b>: These are cases where more than one path through the
// grammar can Match the input.</li>
// <li><b>Weak context sensitivity</b>: These are cases where full-context
// prediction resolved an SLL conflict to a unique alternative which equaled the
// minimum alternative of the SLL conflict.</li>
// <li><b>Strong (forced) context sensitivity</b>: These are cases where the
// full-context prediction resolved an SLL conflict to a unique alternative,
// <em>and</em> the minimum alternative of the SLL conflict was found to not be
// a truly viable alternative. Two-stage parsing cannot be used for inputs where
// d situation occurs.</li>
// </ul>
type DiagnosticErrorListener struct {
*DefaultErrorListener
exactOnly bool
}
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
n := new(DiagnosticErrorListener)
// whether all ambiguities or only exact ambiguities are Reported.
n.exactOnly = exactOnly
return n
}
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
if d.exactOnly && !exact {
return
}
msg := "reportAmbiguity d=" +
d.getDecisionDescription(recognizer, dfa) +
": ambigAlts=" +
d.getConflictingAlts(ambigAlts, configs).String() +
", input='" +
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.NotifyErrorListeners(msg, nil, nil)
}
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
msg := "reportAttemptingFullContext d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.NotifyErrorListeners(msg, nil, nil)
}
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
msg := "reportContextSensitivity d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.NotifyErrorListeners(msg, nil, nil)
}
func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
decision := dfa.decision
ruleIndex := dfa.atnStartState.GetRuleIndex()
ruleNames := recognizer.GetRuleNames()
if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
return strconv.Itoa(decision)
}
ruleName := ruleNames[ruleIndex]
if ruleName == "" {
return strconv.Itoa(decision)
}
return strconv.Itoa(decision) + " (" + ruleName + ")"
}
//
// Computes the set of conflicting or ambiguous alternatives from a
// configuration set, if that information was not already provided by the
// parser.
//
// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
// Reported by the parser.
// @param configs The conflicting or ambiguous configuration set.
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
// returns the set of alternatives represented in {@code configs}.
//
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
if ReportedAlts != nil {
return ReportedAlts
}
result := NewBitSet()
for _, c := range set.GetItems() {
result.add(c.GetAlt())
}
return result
}

View File

@ -0,0 +1,104 @@
package antlr
import (
"fmt"
"os"
"strconv"
)
// Provides an empty default implementation of {@link ANTLRErrorListener}. The
// default implementation of each method does nothing, but can be overridden as
// necessary.
type ErrorListener interface {
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
}
type DefaultErrorListener struct {
}
func NewDefaultErrorListener() *DefaultErrorListener {
return new(DefaultErrorListener)
}
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
}
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
}
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
}
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
}
type ConsoleErrorListener struct {
*DefaultErrorListener
}
func NewConsoleErrorListener() *ConsoleErrorListener {
return new(ConsoleErrorListener)
}
//
// Provides a default instance of {@link ConsoleErrorListener}.
//
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
//
// {@inheritDoc}
//
// <p>
// This implementation prints messages to {@link System//err} containing the
// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
// the following format.</p>
//
// <pre>
// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
// </pre>
//
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
}
type ProxyErrorListener struct {
*DefaultErrorListener
delegates []ErrorListener
}
func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
if delegates == nil {
panic("delegates is not provided")
}
l := new(ProxyErrorListener)
l.delegates = delegates
return l
}
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
for _, d := range p.delegates {
d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
}
}
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
}
}
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
}
}
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
}
}

View File

@ -0,0 +1,757 @@
package antlr
import (
"fmt"
"reflect"
"strconv"
"strings"
)
type ErrorStrategy interface {
reset(Parser)
RecoverInline(Parser) Token
Recover(Parser, RecognitionException)
Sync(Parser)
inErrorRecoveryMode(Parser) bool
ReportError(Parser, RecognitionException)
ReportMatch(Parser)
}
// This is the default implementation of {@link ANTLRErrorStrategy} used for
// error Reporting and recovery in ANTLR parsers.
//
type DefaultErrorStrategy struct {
errorRecoveryMode bool
lastErrorIndex int
lastErrorStates *IntervalSet
}
var _ ErrorStrategy = &DefaultErrorStrategy{}
func NewDefaultErrorStrategy() *DefaultErrorStrategy {
d := new(DefaultErrorStrategy)
// Indicates whether the error strategy is currently "recovering from an
// error". This is used to suppress Reporting multiple error messages while
// attempting to recover from a detected syntax error.
//
// @see //inErrorRecoveryMode
//
d.errorRecoveryMode = false
// The index into the input stream where the last error occurred.
// This is used to prevent infinite loops where an error is found
// but no token is consumed during recovery...another error is found,
// ad nauseum. This is a failsafe mechanism to guarantee that at least
// one token/tree node is consumed for two errors.
//
d.lastErrorIndex = -1
d.lastErrorStates = nil
return d
}
// <p>The default implementation simply calls {@link //endErrorCondition} to
// ensure that the handler is not in error recovery mode.</p>
func (d *DefaultErrorStrategy) reset(recognizer Parser) {
d.endErrorCondition(recognizer)
}
//
// This method is called to enter error recovery mode when a recognition
// exception is Reported.
//
// @param recognizer the parser instance
//
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
d.errorRecoveryMode = true
}
func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool {
return d.errorRecoveryMode
}
//
// This method is called to leave error recovery mode after recovering from
// a recognition exception.
//
// @param recognizer
//
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
d.errorRecoveryMode = false
d.lastErrorStates = nil
d.lastErrorIndex = -1
}
//
// {@inheritDoc}
//
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
//
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
d.endErrorCondition(recognizer)
}
//
// {@inheritDoc}
//
// <p>The default implementation returns immediately if the handler is already
// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
// and dispatches the Reporting task based on the runtime type of {@code e}
// according to the following table.</p>
//
// <ul>
// <li>{@link NoViableAltException}: Dispatches the call to
// {@link //ReportNoViableAlternative}</li>
// <li>{@link InputMisMatchException}: Dispatches the call to
// {@link //ReportInputMisMatch}</li>
// <li>{@link FailedPredicateException}: Dispatches the call to
// {@link //ReportFailedPredicate}</li>
// <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report
// the exception</li>
// </ul>
//
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
// if we've already Reported an error and have not Matched a token
// yet successfully, don't Report any errors.
if d.inErrorRecoveryMode(recognizer) {
return // don't Report spurious errors
}
d.beginErrorCondition(recognizer)
switch t := e.(type) {
default:
fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
// fmt.Println(e.stack)
recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
case *NoViableAltException:
d.ReportNoViableAlternative(recognizer, t)
case *InputMisMatchException:
d.ReportInputMisMatch(recognizer, t)
case *FailedPredicateException:
d.ReportFailedPredicate(recognizer, t)
}
}
// {@inheritDoc}
//
// <p>The default implementation reSynchronizes the parser by consuming tokens
// until we find one in the reSynchronization set--loosely the set of tokens
// that can follow the current rule.</p>
//
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
// uh oh, another error at same token index and previously-Visited
// state in ATN must be a case where LT(1) is in the recovery
// token set so nothing got consumed. Consume a single token
// at least to prevent an infinite loop d is a failsafe.
recognizer.Consume()
}
d.lastErrorIndex = recognizer.GetInputStream().Index()
if d.lastErrorStates == nil {
d.lastErrorStates = NewIntervalSet()
}
d.lastErrorStates.addOne(recognizer.GetState())
followSet := d.getErrorRecoverySet(recognizer)
d.consumeUntil(recognizer, followSet)
}
// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
// that the current lookahead symbol is consistent with what were expecting
// at d point in the ATN. You can call d anytime but ANTLR only
// generates code to check before subrules/loops and each iteration.
//
// <p>Implements Jim Idle's magic Sync mechanism in closures and optional
// subrules. E.g.,</p>
//
// <pre>
// a : Sync ( stuff Sync )*
// Sync : {consume to what can follow Sync}
// </pre>
//
// At the start of a sub rule upon error, {@link //Sync} performs single
// token deletion, if possible. If it can't do that, it bails on the current
// rule and uses the default error recovery, which consumes until the
// reSynchronization set of the current rule.
//
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
// with an empty alternative), then the expected set includes what follows
// the subrule.</p>
//
// <p>During loop iteration, it consumes until it sees a token that can start a
// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
// stay in the loop as long as possible.</p>
//
// <p><strong>ORIGINS</strong></p>
//
// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
// A single mismatch token or missing token would force the parser to bail
// out of the entire rules surrounding the loop. So, for rule</p>
//
// <pre>
// classfunc : 'class' ID '{' member* '}'
// </pre>
//
// input with an extra token between members would force the parser to
// consume until it found the next class definition rather than the next
// member definition of the current class.
//
// <p>This functionality cost a little bit of effort because the parser has to
// compare token set at the start of the loop and at each iteration. If for
// some reason speed is suffering for you, you can turn off d
// functionality by simply overriding d method as a blank { }.</p>
//
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
// If already recovering, don't try to Sync
if d.inErrorRecoveryMode(recognizer) {
return
}
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
la := recognizer.GetTokenStream().LA(1)
// try cheaper subset first might get lucky. seems to shave a wee bit off
if la == TokenEOF || recognizer.GetATN().NextTokens(s, nil).contains(la) {
return
}
// Return but don't end recovery. only do that upon valid token Match
if recognizer.IsExpectedToken(la) {
return
}
switch s.GetStateType() {
case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
// Report error and recover if possible
if d.SingleTokenDeletion(recognizer) != nil {
return
}
panic(NewInputMisMatchException(recognizer))
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
d.ReportUnwantedToken(recognizer)
expecting := NewIntervalSet()
expecting.addSet(recognizer.GetExpectedTokens())
whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
default:
// do nothing if we can't identify the exact kind of ATN state
}
}
// This is called by {@link //ReportError} when the exception is a
// {@link NoViableAltException}.
//
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
tokens := recognizer.GetTokenStream()
var input string
if tokens != nil {
if e.startToken.GetTokenType() == TokenEOF {
input = "<EOF>"
} else {
input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
}
} else {
input = "<unknown input>"
}
msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
//
// This is called by {@link //ReportError} when the exception is an
// {@link InputMisMatchException}.
//
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
//
// This is called by {@link //ReportError} when the exception is a
// {@link FailedPredicateException}.
//
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
msg := "rule " + ruleName + " " + e.message
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
// This method is called to Report a syntax error which requires the removal
// of a token from the input stream. At the time d method is called, the
// erroneous symbol is current {@code LT(1)} symbol and has not yet been
// removed from the input stream. When d method returns,
// {@code recognizer} is in error recovery mode.
//
// <p>This method is called when {@link //singleTokenDeletion} identifies
// single-token deletion as a viable recovery strategy for a mismatched
// input error.</p>
//
// <p>The default implementation simply returns if the handler is already in
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
// enter error recovery mode, followed by calling
// {@link Parser//NotifyErrorListeners}.</p>
//
// @param recognizer the parser instance
//
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
if d.inErrorRecoveryMode(recognizer) {
return
}
d.beginErrorCondition(recognizer)
t := recognizer.GetCurrentToken()
tokenName := d.GetTokenErrorDisplay(t)
expecting := d.GetExpectedTokens(recognizer)
msg := "extraneous input " + tokenName + " expecting " +
expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
recognizer.NotifyErrorListeners(msg, t, nil)
}
// This method is called to Report a syntax error which requires the
// insertion of a missing token into the input stream. At the time d
// method is called, the missing token has not yet been inserted. When d
// method returns, {@code recognizer} is in error recovery mode.
//
// <p>This method is called when {@link //singleTokenInsertion} identifies
// single-token insertion as a viable recovery strategy for a mismatched
// input error.</p>
//
// <p>The default implementation simply returns if the handler is already in
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
// enter error recovery mode, followed by calling
// {@link Parser//NotifyErrorListeners}.</p>
//
// @param recognizer the parser instance
//
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
if d.inErrorRecoveryMode(recognizer) {
return
}
d.beginErrorCondition(recognizer)
t := recognizer.GetCurrentToken()
expecting := d.GetExpectedTokens(recognizer)
msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
" at " + d.GetTokenErrorDisplay(t)
recognizer.NotifyErrorListeners(msg, t, nil)
}
// <p>The default implementation attempts to recover from the mismatched input
// by using single token insertion and deletion as described below. If the
// recovery attempt fails, d method panics an
// {@link InputMisMatchException}.</p>
//
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
//
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
// right token, however, then assume {@code LA(1)} is some extra spurious
// token and delete it. Then consume and return the next token (which was
// the {@code LA(2)} token) as the successful result of the Match operation.</p>
//
// <p>This recovery strategy is implemented by {@link
// //singleTokenDeletion}.</p>
//
// <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
//
// <p>If current token (at {@code LA(1)}) is consistent with what could come
// after the expected {@code LA(1)} token, then assume the token is missing
// and use the parser's {@link TokenFactory} to create it on the fly. The
// "insertion" is performed by returning the created token as the successful
// result of the Match operation.</p>
//
// <p>This recovery strategy is implemented by {@link
// //singleTokenInsertion}.</p>
//
// <p><strong>EXAMPLE</strong></p>
//
// <p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
// the parser returns from the nested call to {@code expr}, it will have
// call chain:</p>
//
// <pre>
// stat &rarr expr &rarr atom
// </pre>
//
// and it will be trying to Match the {@code ')'} at d point in the
// derivation:
//
// <pre>
// =&gt ID '=' '(' INT ')' ('+' atom)* ''
// ^
// </pre>
//
// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
// is in the set of tokens that can follow the {@code ')'} token reference
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
//
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// SINGLE TOKEN DELETION
MatchedSymbol := d.SingleTokenDeletion(recognizer)
if MatchedSymbol != nil {
// we have deleted the extra token.
// now, move past ttype token as if all were ok
recognizer.Consume()
return MatchedSymbol
}
// SINGLE TOKEN INSERTION
if d.SingleTokenInsertion(recognizer) {
return d.GetMissingSymbol(recognizer)
}
// even that didn't work must panic the exception
panic(NewInputMisMatchException(recognizer))
}
//
// This method implements the single-token insertion inline error recovery
// strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this
// method returns {@code true}, {@code recognizer} will be in error recovery
// mode.
//
// <p>This method determines whether or not single-token insertion is viable by
// checking if the {@code LA(1)} input symbol could be successfully Matched
// if it were instead the {@code LA(2)} symbol. If d method returns
// {@code true}, the caller is responsible for creating and inserting a
// token with the correct type to produce d behavior.</p>
//
// @param recognizer the parser instance
// @return {@code true} if single-token insertion is a viable recovery
// strategy for the current mismatched input, otherwise {@code false}
//
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
currentSymbolType := recognizer.GetTokenStream().LA(1)
// if current token is consistent with what could come after current
// ATN state, then we know we're missing a token error recovery
// is free to conjure up and insert the missing token
atn := recognizer.GetInterpreter().atn
currentState := atn.states[recognizer.GetState()]
next := currentState.GetTransitions()[0].getTarget()
expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
if expectingAtLL2.contains(currentSymbolType) {
d.ReportMissingToken(recognizer)
return true
}
return false
}
// This method implements the single-token deletion inline error recovery
// strategy. It is called by {@link //recoverInline} to attempt to recover
// from mismatched input. If this method returns nil, the parser and error
// handler state will not have changed. If this method returns non-nil,
// {@code recognizer} will <em>not</em> be in error recovery mode since the
// returned token was a successful Match.
//
// <p>If the single-token deletion is successful, d method calls
// {@link //ReportUnwantedToken} to Report the error, followed by
// {@link Parser//consume} to actually "delete" the extraneous token. Then,
// before returning {@link //ReportMatch} is called to signal a successful
// Match.</p>
//
// @param recognizer the parser instance
// @return the successfully Matched {@link Token} instance if single-token
// deletion successfully recovers from the mismatched input, otherwise
// {@code nil}
//
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
NextTokenType := recognizer.GetTokenStream().LA(2)
expecting := d.GetExpectedTokens(recognizer)
if expecting.contains(NextTokenType) {
d.ReportUnwantedToken(recognizer)
// print("recoverFromMisMatchedToken deleting " \
// + str(recognizer.GetTokenStream().LT(1)) \
// + " since " + str(recognizer.GetTokenStream().LT(2)) \
// + " is what we want", file=sys.stderr)
recognizer.Consume() // simply delete extra token
// we want to return the token we're actually Matching
MatchedSymbol := recognizer.GetCurrentToken()
d.ReportMatch(recognizer) // we know current token is correct
return MatchedSymbol
}
return nil
}
// Conjure up a missing token during error recovery.
//
// The recognizer attempts to recover from single missing
// symbols. But, actions might refer to that missing symbol.
// For example, x=ID {f($x)}. The action clearly assumes
// that there has been an identifier Matched previously and that
// $x points at that token. If that token is missing, but
// the next token in the stream is what we want we assume that
// d token is missing and we keep going. Because we
// have to return some token to replace the missing token,
// we have to conjure one up. This method gives the user control
// over the tokens returned for missing tokens. Mostly,
// you will want to create something special for identifier
// tokens. For literals such as '{' and ',', the default
// action in the parser or tree parser works. It simply creates
// a CommonToken of the appropriate type. The text will be the token.
// If you change what tokens must be created by the lexer,
// override d method to create the appropriate tokens.
//
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
currentSymbol := recognizer.GetCurrentToken()
expecting := d.GetExpectedTokens(recognizer)
expectedTokenType := expecting.first()
var tokenText string
if expectedTokenType == TokenEOF {
tokenText = "<missing EOF>"
} else {
ln := recognizer.GetLiteralNames()
if expectedTokenType > 0 && expectedTokenType < len(ln) {
tokenText = "<missing " + recognizer.GetLiteralNames()[expectedTokenType] + ">"
} else {
tokenText = "<missing undefined>" // TODO matches the JS impl
}
}
current := currentSymbol
lookback := recognizer.GetTokenStream().LT(-1)
if current.GetTokenType() == TokenEOF && lookback != nil {
current = lookback
}
tf := recognizer.GetTokenFactory()
return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
}
func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
return recognizer.GetExpectedTokens()
}
// How should a token be displayed in an error message? The default
// is to display just the text, but during development you might
// want to have a lot of information spit out. Override in that case
// to use t.String() (which, for CommonToken, dumps everything about
// the token). This is better than forcing you to override a method in
// your token objects because you don't have to go modify your lexer
// so that it creates a NewJava type.
//
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
if t == nil {
return "<no token>"
}
s := t.GetText()
if s == "" {
if t.GetTokenType() == TokenEOF {
s = "<EOF>"
} else {
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
}
}
return d.escapeWSAndQuote(s)
}
func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
s = strings.Replace(s, "\t", "\\t", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\r", -1)
return "'" + s + "'"
}
// Compute the error recovery set for the current rule. During
// rule invocation, the parser pushes the set of tokens that can
// follow that rule reference on the stack d amounts to
// computing FIRST of what follows the rule reference in the
// enclosing rule. See LinearApproximator.FIRST().
// This local follow set only includes tokens
// from within the rule i.e., the FIRST computation done by
// ANTLR stops at the end of a rule.
//
// EXAMPLE
//
// When you find a "no viable alt exception", the input is not
// consistent with any of the alternatives for rule r. The best
// thing to do is to consume tokens until you see something that
// can legally follow a call to r//or* any rule that called r.
// You don't want the exact set of viable next tokens because the
// input might just be missing a token--you might consume the
// rest of the input looking for one of the missing tokens.
//
// Consider grammar:
//
// a : '[' b ']'
// | '(' b ')'
//
// b : c '^' INT
// c : ID
// | INT
//
//
// At each rule invocation, the set of tokens that could follow
// that rule is pushed on a stack. Here are the various
// context-sensitive follow sets:
//
// FOLLOW(b1_in_a) = FIRST(']') = ']'
// FOLLOW(b2_in_a) = FIRST(')') = ')'
// FOLLOW(c_in_b) = FIRST('^') = '^'
//
// Upon erroneous input "[]", the call chain is
//
// a -> b -> c
//
// and, hence, the follow context stack is:
//
// depth follow set start of rule execution
// 0 <EOF> a (from main())
// 1 ']' b
// 2 '^' c
//
// Notice that ')' is not included, because b would have to have
// been called from a different context in rule a for ')' to be
// included.
//
// For error recovery, we cannot consider FOLLOW(c)
// (context-sensitive or otherwise). We need the combined set of
// all context-sensitive FOLLOW sets--the set of all tokens that
// could follow any reference in the call chain. We need to
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
// we reSync'd to that token, we'd consume until EOF. We need to
// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
// not consume anything. After printing an error, rule c would
// return normally. Rule b would not find the required '^' though.
// At this point, it gets a mismatched token error and panics an
// exception (since LA(1) is not in the viable following token
// set). The rule exception handler tries to recover, but finds
// the same recovery set and doesn't consume anything. Rule b
// exits normally returning to rule a. Now it finds the ']' (and
// with the successful Match exits errorRecovery mode).
//
// So, you can see that the parser walks up the call chain looking
// for the token that was a member of the recovery set.
//
// Errors are not generated in errorRecovery mode.
//
// ANTLR's error recovery mechanism is based upon original ideas:
//
// "Algorithms + Data Structures = Programs" by Niklaus Wirth
//
// and
//
// "A note on error recovery in recursive descent parsers":
// http://portal.acm.org/citation.cfm?id=947902.947905
//
// Later, Josef Grosch had some good ideas:
//
// "Efficient and Comfortable Error Recovery in Recursive Descent
// Parsers":
// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
//
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
// at run-time upon error to avoid overhead during parsing.
//
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
atn := recognizer.GetInterpreter().atn
ctx := recognizer.GetParserRuleContext()
recoverSet := NewIntervalSet()
for ctx != nil && ctx.GetInvokingState() >= 0 {
// compute what follows who invoked us
invokingState := atn.states[ctx.GetInvokingState()]
rt := invokingState.GetTransitions()[0]
follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
recoverSet.addSet(follow)
ctx = ctx.GetParent().(ParserRuleContext)
}
recoverSet.removeOne(TokenEpsilon)
return recoverSet
}
// Consume tokens until one Matches the given token set.//
func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
ttype := recognizer.GetTokenStream().LA(1)
for ttype != TokenEOF && !set.contains(ttype) {
recognizer.Consume()
ttype = recognizer.GetTokenStream().LA(1)
}
}
//
// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
// by immediately canceling the parse operation with a
// {@link ParseCancellationException}. The implementation ensures that the
// {@link ParserRuleContext//exception} field is set for all parse tree nodes
// that were not completed prior to encountering the error.
//
// <p>
// This error strategy is useful in the following scenarios.</p>
//
// <ul>
// <li><strong>Two-stage parsing:</strong> This error strategy allows the first
// stage of two-stage parsing to immediately terminate if an error is
// encountered, and immediately fall back to the second stage. In addition to
// avoiding wasted work by attempting to recover from errors here, the empty
// implementation of {@link BailErrorStrategy//Sync} improves the performance of
// the first stage.</li>
// <li><strong>Silent validation:</strong> When syntax errors are not being
// Reported or logged, and the parse result is simply ignored if errors occur,
// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
// when the result will be ignored either way.</li>
// </ul>
//
// <p>
// {@code myparser.setErrorHandler(NewBailErrorStrategy())}</p>
//
// @see Parser//setErrorHandler(ANTLRErrorStrategy)
type BailErrorStrategy struct {
*DefaultErrorStrategy
}
var _ ErrorStrategy = &BailErrorStrategy{}
func NewBailErrorStrategy() *BailErrorStrategy {
b := new(BailErrorStrategy)
b.DefaultErrorStrategy = NewDefaultErrorStrategy()
return b
}
// Instead of recovering from exception {@code e}, re-panic it wrapped
// in a {@link ParseCancellationException} so it is not caught by the
// rule func catches. Use {@link Exception//getCause()} to get the
// original {@link RecognitionException}.
//
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
context := recognizer.GetParserRuleContext()
for context != nil {
context.SetException(e)
context = context.GetParent().(ParserRuleContext)
}
panic(NewParseCancellationException()) // TODO we don't emit e properly
}
// Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception.
//
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
b.Recover(recognizer, NewInputMisMatchException(recognizer))
return nil
}
// Make sure we don't attempt to recover from problems in subrules.//
func (b *BailErrorStrategy) Sync(recognizer Parser) {
// pass
}

237
runtime/Go/antlr/errors.go Normal file
View File

@ -0,0 +1,237 @@
package antlr
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
// 3 kinds of errors: prediction errors, failed predicate errors, and
// mismatched input errors. In each case, the parser knows where it is
// in the input, where it is in the ATN, the rule invocation stack,
// and what kind of problem occurred.
type RecognitionException interface {
GetOffendingToken() Token
GetMessage() string
GetInputStream() IntStream
}
type BaseRecognitionException struct {
message string
recognizer Recognizer
offendingToken Token
offendingState int
ctx RuleContext
input IntStream
}
func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
// todo
// Error.call(this)
//
// if (!!Error.captureStackTrace) {
// Error.captureStackTrace(this, RecognitionException)
// } else {
// stack := NewError().stack
// }
// TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
t := new(BaseRecognitionException)
t.message = message
t.recognizer = recognizer
t.input = input
t.ctx = ctx
// The current {@link Token} when an error occurred. Since not all streams
// support accessing symbols by index, we have to track the {@link Token}
// instance itself.
t.offendingToken = nil
// Get the ATN state number the parser was in at the time the error
// occurred. For {@link NoViableAltException} and
// {@link LexerNoViableAltException} exceptions, this is the
// {@link DecisionState} number. For others, it is the state whose outgoing
// edge we couldn't Match.
t.offendingState = -1
if t.recognizer != nil {
t.offendingState = t.recognizer.GetState()
}
return t
}
func (b *BaseRecognitionException) GetMessage() string {
return b.message
}
func (b *BaseRecognitionException) GetOffendingToken() Token {
return b.offendingToken
}
func (b *BaseRecognitionException) GetInputStream() IntStream {
return b.input
}
// <p>If the state number is not known, b method returns -1.</p>
//
// Gets the set of input symbols which could potentially follow the
// previously Matched symbol at the time b exception was panicn.
//
// <p>If the set of expected tokens is not known and could not be computed,
// b method returns {@code nil}.</p>
//
// @return The set of token types that could potentially follow the current
// state in the ATN, or {@code nil} if the information is not available.
// /
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
if b.recognizer != nil {
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
}
return nil
}
func (b *BaseRecognitionException) String() string {
return b.message
}
type LexerNoViableAltException struct {
*BaseRecognitionException
startIndex int
deadEndConfigs ATNConfigSet
}
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
l := new(LexerNoViableAltException)
l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
l.startIndex = startIndex
l.deadEndConfigs = deadEndConfigs
return l
}
func (l *LexerNoViableAltException) String() string {
symbol := ""
if l.startIndex >= 0 && l.startIndex < l.input.Size() {
symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
}
return "LexerNoViableAltException" + symbol
}
type NoViableAltException struct {
*BaseRecognitionException
startToken Token
offendingToken Token
ctx ParserRuleContext
deadEndConfigs ATNConfigSet
}
// Indicates that the parser could not decide which of two or more paths
// to take based upon the remaining input. It tracks the starting token
// of the offending input and also knows where the parser was
// in the various paths when the error. Reported by ReportNoViableAlternative()
//
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
if ctx == nil {
ctx = recognizer.GetParserRuleContext()
}
if offendingToken == nil {
offendingToken = recognizer.GetCurrentToken()
}
if startToken == nil {
startToken = recognizer.GetCurrentToken()
}
if input == nil {
input = recognizer.GetInputStream().(TokenStream)
}
n := new(NoViableAltException)
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
// Which configurations did we try at input.Index() that couldn't Match
// input.LT(1)?//
n.deadEndConfigs = deadEndConfigs
// The token object at the start index the input stream might
// not be buffering tokens so get a reference to it. (At the
// time the error occurred, of course the stream needs to keep a
// buffer all of the tokens but later we might not have access to those.)
n.startToken = startToken
n.offendingToken = offendingToken
return n
}
type InputMisMatchException struct {
*BaseRecognitionException
}
// This signifies any kind of mismatched input exceptions such as
// when the current input does not Match the expected token.
//
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
i := new(InputMisMatchException)
i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
i.offendingToken = recognizer.GetCurrentToken()
return i
}
// A semantic predicate failed during validation. Validation of predicates
// occurs when normally parsing the alternative just like Matching a token.
// Disambiguating predicate evaluation occurs when we test a predicate during
// prediction.
type FailedPredicateException struct {
*BaseRecognitionException
ruleIndex int
predicateIndex int
predicate string
}
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
f := new(FailedPredicateException)
f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
trans := s.GetTransitions()[0]
if trans2, ok := trans.(*PredicateTransition); ok {
f.ruleIndex = trans2.ruleIndex
f.predicateIndex = trans2.predIndex
} else {
f.ruleIndex = 0
f.predicateIndex = 0
}
f.predicate = predicate
f.offendingToken = recognizer.GetCurrentToken()
return f
}
func (f *FailedPredicateException) formatMessage(predicate, message string) string {
if message != "" {
return message
}
return "failed predicate: {" + predicate + "}?"
}
type ParseCancellationException struct {
}
func NewParseCancellationException() *ParseCancellationException {
// Error.call(this)
// Error.captureStackTrace(this, ParseCancellationException)
return new(ParseCancellationException)
}

View File

@ -0,0 +1,39 @@
package antlr
import (
"bytes"
"io"
"os"
)
// This is an InputStream that is loaded from a file all at once
// when you construct the object.
type FileStream struct {
*InputStream
filename string
}
func NewFileStream(fileName string) *FileStream {
buf := bytes.NewBuffer(nil)
f, _ := os.Open(fileName) // Error handling elided for brevity.
io.Copy(buf, f) // Error handling elided for brevity.
f.Close()
fs := new(FileStream)
fs.filename = fileName
s := string(buf.Bytes())
fs.InputStream = NewInputStream(s)
return fs
}
func (f *FileStream) GetSourceName() string {
return f.filename
}

View File

@ -0,0 +1,109 @@
package antlr
type InputStream struct {
name string
index int
data []rune
size int
}
func NewInputStream(data string) *InputStream {
is := new(InputStream)
is.name = "<empty>"
is.index = 0
is.data = []rune(data)
is.size = len(is.data) // number of runes
return is
}
func (is *InputStream) reset() {
is.index = 0
}
func (is *InputStream) Consume() {
if is.index >= is.size {
// assert is.LA(1) == TokenEOF
panic("cannot consume EOF")
}
is.index++
}
func (is *InputStream) LA(offset int) int {
if offset == 0 {
return 0 // nil
}
if offset < 0 {
offset++ // e.g., translate LA(-1) to use offset=0
}
pos := is.index + offset - 1
if pos < 0 || pos >= is.size { // invalid
return TokenEOF
}
return int(is.data[pos])
}
func (is *InputStream) LT(offset int) int {
return is.LA(offset)
}
func (is *InputStream) Index() int {
return is.index
}
func (is *InputStream) Size() int {
return is.size
}
// mark/release do nothing we have entire buffer
func (is *InputStream) Mark() int {
return -1
}
func (is *InputStream) Release(marker int) {
}
func (is *InputStream) Seek(index int) {
if index <= is.index {
is.index = index // just jump don't update stream state (line,...)
return
}
// seek forward
is.index = intMin(index, is.size)
}
func (is *InputStream) GetText(start int, stop int) string {
if stop >= is.size {
stop = is.size - 1
}
if start >= is.size {
return ""
}
return string(is.data[start : stop+1])
}
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
if start != nil && stop != nil {
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
}
return ""
}
func (is *InputStream) GetTextFromInterval(i *Interval) string {
return is.GetText(i.start, i.stop)
}
func (*InputStream) GetSourceName() string {
return "Obtained from string"
}
func (is *InputStream) String() string {
return string(is.data)
}

View File

@ -0,0 +1,12 @@
package antlr
type IntStream interface {
Consume()
LA(int) int
Mark() int
Release(marker int)
Index() int
Seek(index int)
Size() int
GetSourceName() string
}

View File

@ -0,0 +1,298 @@
package antlr
import (
"strconv"
"strings"
)
type Interval struct {
start int
stop int
}
/* stop is not included! */
func NewInterval(start, stop int) *Interval {
i := new(Interval)
i.start = start
i.stop = stop
return i
}
func (i *Interval) contains(item int) bool {
return item >= i.start && item < i.stop
}
func (i *Interval) String() string {
if i.start == i.stop-1 {
return strconv.Itoa(i.start)
}
return strconv.Itoa(i.start) + ".." + strconv.Itoa(i.stop-1)
}
func (i *Interval) length() int {
return i.stop - i.start
}
type IntervalSet struct {
intervals []*Interval
readOnly bool
}
func NewIntervalSet() *IntervalSet {
i := new(IntervalSet)
i.intervals = nil
i.readOnly = false
return i
}
func (i *IntervalSet) first() int {
if len(i.intervals) == 0 {
return TokenInvalidType
}
return i.intervals[0].start
}
func (i *IntervalSet) addOne(v int) {
i.addInterval(NewInterval(v, v+1))
}
func (i *IntervalSet) addRange(l, h int) {
i.addInterval(NewInterval(l, h+1))
}
func (i *IntervalSet) addInterval(v *Interval) {
if i.intervals == nil {
i.intervals = make([]*Interval, 0)
i.intervals = append(i.intervals, v)
} else {
// find insert pos
for k := 0; k < len(i.intervals); k++ {
interval := i.intervals[k]
// ditinct range -> insert
if v.stop < interval.start {
// i.intervals = splice(k, 0, v)
i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
return
} else if v.stop == interval.start {
i.intervals[k].start = v.start
return
} else if v.start <= interval.stop {
i.intervals[k] = NewInterval(intMin(interval.start, v.start), intMax(interval.stop, v.stop))
i.reduce(k)
return
}
}
// greater than any exiting
i.intervals = append(i.intervals, v)
}
}
func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
if other.intervals != nil {
for k := 0; k < len(other.intervals); k++ {
i2 := other.intervals[k]
i.addInterval(NewInterval(i2.start, i2.stop))
}
}
return i
}
func (i *IntervalSet) reduce(k int) {
// only need to reduce if k is not the last
if k < len(i.intervals)-1 {
l := i.intervals[k]
r := i.intervals[k+1]
// if r contained in l
if l.stop >= r.stop {
i.intervals = i.intervals[0 : len(i.intervals)-1] // pop(k + 1)
i.reduce(k)
} else if l.stop >= r.start {
i.intervals[k] = NewInterval(l.start, r.stop)
i.intervals = i.intervals[0 : len(i.intervals)-1] // i.intervals.pop(k + 1)
}
}
}
func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
result := NewIntervalSet()
result.addInterval(NewInterval(start, stop+1))
for j := 0; j < len(i.intervals); j++ {
result.removeRange(i.intervals[j])
}
return result
}
func (i *IntervalSet) contains(item int) bool {
if i.intervals == nil {
return false
}
for k := 0; k < len(i.intervals); k++ {
if i.intervals[k].contains(item) {
return true
}
}
return false
}
func (i *IntervalSet) length() int {
len := 0
for _, v := range i.intervals {
len += v.length()
}
return len
}
func (i *IntervalSet) removeRange(v *Interval) {
if v.start == v.stop-1 {
i.removeOne(v.start)
} else if i.intervals != nil {
k := 0
for n := 0; n < len(i.intervals); n++ {
ni := i.intervals[k]
// intervals are ordered
if v.stop <= ni.start {
return
} else if v.start > ni.start && v.stop < ni.stop {
i.intervals[k] = NewInterval(ni.start, v.start)
x := NewInterval(v.stop, ni.stop)
// i.intervals.splice(k, 0, x)
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
} else if v.start <= ni.start && v.stop >= ni.stop {
// i.intervals.splice(k, 1)
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
k = k - 1 // need another pass
} else if v.start < ni.stop {
i.intervals[k] = NewInterval(ni.start, v.start)
} else if v.stop < ni.stop {
i.intervals[k] = NewInterval(v.stop, ni.stop)
}
k++
}
}
}
func (i *IntervalSet) removeOne(v int) {
if i.intervals != nil {
for k := 0; k < len(i.intervals); k++ {
ki := i.intervals[k]
// intervals i ordered
if v < ki.start {
return
} else if v == ki.start && v == ki.stop-1 {
// i.intervals.splice(k, 1)
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
return
} else if v == ki.start {
i.intervals[k] = NewInterval(ki.start+1, ki.stop)
return
} else if v == ki.stop-1 {
i.intervals[k] = NewInterval(ki.start, ki.stop-1)
return
} else if v < ki.stop-1 {
x := NewInterval(ki.start, v)
ki.start = v + 1
// i.intervals.splice(k, 0, x)
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
}
}
}
}
func (i *IntervalSet) String() string {
return i.StringVerbose(nil, nil, false)
}
func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
if i.intervals == nil {
return "{}"
} else if literalNames != nil || symbolicNames != nil {
return i.toTokenString(literalNames, symbolicNames)
} else if elemsAreChar {
return i.toCharString()
}
return i.toIndexString()
}
func (i *IntervalSet) toCharString() string {
names := make([]string, len(i.intervals))
for j := 0; j < len(i.intervals); j++ {
v := i.intervals[j]
if v.stop == v.start+1 {
if v.start == TokenEOF {
names = append(names, "<EOF>")
} else {
names = append(names, ("'" + string(v.start) + "'"))
}
} else {
names = append(names, "'"+string(v.start)+"'..'"+string(v.stop-1)+"'")
}
}
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
}
return names[0]
}
func (i *IntervalSet) toIndexString() string {
names := make([]string, 0)
for j := 0; j < len(i.intervals); j++ {
v := i.intervals[j]
if v.stop == v.start+1 {
if v.start == TokenEOF {
names = append(names, "<EOF>")
} else {
names = append(names, strconv.Itoa(v.start))
}
} else {
names = append(names, strconv.Itoa(v.start)+".."+strconv.Itoa(v.stop-1))
}
}
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
}
return names[0]
}
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
names := make([]string, 0)
for _, v := range i.intervals {
for j := v.start; j < v.stop; j++ {
names = append(names, i.elementName(literalNames, symbolicNames, j))
}
}
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
}
return names[0]
}
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
if a == TokenEOF {
return "<EOF>"
} else if a == TokenEpsilon {
return "<EPSILON>"
} else {
if a < len(literalNames) && literalNames[a] != "" {
return literalNames[a]
}
return symbolicNames[a]
}
}

409
runtime/Go/antlr/lexer.go Normal file
View File

@ -0,0 +1,409 @@
package antlr
import (
"fmt"
"strconv"
)
// A lexer is recognizer that draws input symbols from a character stream.
// lexer grammars result in a subclass of this object. A Lexer object
// uses simplified Match() and error recovery mechanisms in the interest
// of speed.
///
type Lexer interface {
TokenSource
Recognizer
Emit() Token
setChannel(int)
pushMode(int)
popMode() int
setType(int)
setMode(int)
}
type BaseLexer struct {
*BaseRecognizer
Interpreter ILexerATNSimulator
TokenStartCharIndex int
TokenStartLine int
TokenStartColumn int
ActionType int
Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
input CharStream
factory TokenFactory
tokenFactorySourcePair *TokenSourceCharStreamPair
token Token
hitEOF bool
channel int
thetype int
modeStack IntStack
mode int
text string
}
func NewBaseLexer(input CharStream) *BaseLexer {
lexer := new(BaseLexer)
lexer.BaseRecognizer = NewBaseRecognizer()
lexer.input = input
lexer.factory = CommonTokenFactoryDEFAULT
lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
lexer.Virt = lexer
lexer.Interpreter = nil // child classes must populate it
// The goal of all lexer rules/methods is to create a token object.
// l is an instance variable as multiple rules may collaborate to
// create a single token. NextToken will return l object after
// Matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be Matched or
// something nonnil so that the auto token emit mechanism will not
// emit another token.
lexer.token = nil
// What character index in the stream did the current token start at?
// Needed, for example, to get the text for current token. Set at
// the start of NextToken.
lexer.TokenStartCharIndex = -1
// The line on which the first character of the token resides///
lexer.TokenStartLine = -1
// The character position of first character within the line///
lexer.TokenStartColumn = -1
// Once we see EOF on char stream, next token will be EOF.
// If you have DONE : EOF then you see DONE EOF.
lexer.hitEOF = false
// The channel number for the current token///
lexer.channel = TokenDefaultChannel
// The token type for the current token///
lexer.thetype = TokenInvalidType
lexer.modeStack = make([]int, 0)
lexer.mode = LexerDefaultMode
// You can set the text for the current token to override what is in
// the input char buffer. Use setText() or can set l instance var.
// /
lexer.text = ""
return lexer
}
const (
LexerDefaultMode = 0
LexerMore = -2
LexerSkip = -3
)
const (
LexerDefaultTokenChannel = TokenDefaultChannel
LexerHidden = TokenHiddenChannel
LexerMinCharValue = '\u0000'
LexerMaxCharValue = '\uFFFE'
)
func (b *BaseLexer) reset() {
// wack Lexer state variables
if b.input != nil {
b.input.Seek(0) // rewind the input
}
b.token = nil
b.thetype = TokenInvalidType
b.channel = TokenDefaultChannel
b.TokenStartCharIndex = -1
b.TokenStartColumn = -1
b.TokenStartLine = -1
b.text = ""
b.hitEOF = false
b.mode = LexerDefaultMode
b.modeStack = make([]int, 0)
b.Interpreter.reset()
}
func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
return b.Interpreter
}
func (b *BaseLexer) GetInputStream() CharStream {
return b.input
}
func (b *BaseLexer) GetSourceName() string {
return b.GrammarFileName
}
func (b *BaseLexer) setChannel(v int) {
b.channel = v
}
func (b *BaseLexer) GetTokenFactory() TokenFactory {
return b.factory
}
func (b *BaseLexer) setTokenFactory(f TokenFactory) {
b.factory = f
}
func (b *BaseLexer) safeMatch() (ret int) {
defer func() {
if e := recover(); e != nil {
if re, ok := e.(RecognitionException); ok {
b.notifyListeners(re) // Report error
b.Recover(re)
ret = LexerSkip // default
}
}
}()
return b.Interpreter.Match(b.input, b.mode)
}
// Return a token from l source i.e., Match a token on the char stream.
func (b *BaseLexer) NextToken() Token {
if b.input == nil {
panic("NextToken requires a non-nil input stream.")
}
tokenStartMarker := b.input.Mark()
// previously in finally block
defer func() {
// make sure we release marker after Match or
// unbuffered char stream will keep buffering
b.input.Release(tokenStartMarker)
}()
for {
if b.hitEOF {
b.EmitEOF()
return b.token
}
b.token = nil
b.channel = TokenDefaultChannel
b.TokenStartCharIndex = b.input.Index()
b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
b.TokenStartLine = b.Interpreter.GetLine()
b.text = ""
continueOuter := false
for {
b.thetype = TokenInvalidType
ttype := LexerSkip
ttype = b.safeMatch()
if b.input.LA(1) == TokenEOF {
b.hitEOF = true
}
if b.thetype == TokenInvalidType {
b.thetype = ttype
}
if b.thetype == LexerSkip {
continueOuter = true
break
}
if b.thetype != LexerMore {
break
}
}
if continueOuter {
continue
}
if b.token == nil {
b.Virt.Emit()
}
return b.token
}
return nil
}
// Instruct the lexer to Skip creating a token for current lexer rule
// and look for another token. NextToken() knows to keep looking when
// a lexer rule finishes with token set to SKIPTOKEN. Recall that
// if token==nil at end of any token rule, it creates one for you
// and emits it.
// /
func (b *BaseLexer) Skip() {
b.thetype = LexerSkip
}
func (b *BaseLexer) More() {
b.thetype = LexerMore
}
func (b *BaseLexer) setMode(m int) {
b.mode = m
}
func (b *BaseLexer) pushMode(m int) {
if LexerATNSimulatorDebug {
fmt.Println("pushMode " + strconv.Itoa(m))
}
b.modeStack.Push(b.mode)
b.mode = m
}
func (b *BaseLexer) popMode() int {
if len(b.modeStack) == 0 {
panic("Empty Stack")
}
if LexerATNSimulatorDebug {
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
}
i, _ := b.modeStack.Pop()
b.mode = i
return b.mode
}
func (b *BaseLexer) inputStream() CharStream {
return b.input
}
func (b *BaseLexer) setInputStream(input CharStream) {
b.input = nil
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
b.reset()
b.input = input
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
}
// By default does not support multiple emits per NextToken invocation
// for efficiency reasons. Subclass and override l method, NextToken,
// and GetToken (to push tokens into a list and pull from that list
// rather than a single variable as l implementation does).
// /
func (b *BaseLexer) EmitToken(token Token) {
b.token = token
}
// The standard method called to automatically emit a token at the
// outermost lexical rule. The token object should point into the
// char buffer start..stop. If there is a text override in 'text',
// use that to set the token's text. Override l method to emit
// custom Token objects or provide a Newfactory.
// /
func (b *BaseLexer) Emit() Token {
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
b.EmitToken(t)
return t
}
func (b *BaseLexer) EmitEOF() Token {
cpos := b.GetCharPositionInLine()
lpos := b.GetLine()
eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
b.EmitToken(eof)
return eof
}
func (b *BaseLexer) GetCharPositionInLine() int {
return b.Interpreter.GetCharPositionInLine()
}
func (b *BaseLexer) GetLine() int {
return b.Interpreter.GetLine()
}
func (b *BaseLexer) GetType() int {
return b.thetype
}
func (b *BaseLexer) setType(t int) {
b.thetype = t
}
// What is the index of the current character of lookahead?///
func (b *BaseLexer) GetCharIndex() int {
return b.input.Index()
}
// Return the text Matched so far for the current token or any text override.
//Set the complete text of l token it wipes any previous changes to the text.
func (b *BaseLexer) GetText() string {
if b.text != "" {
return b.text
}
return b.Interpreter.GetText(b.input)
}
func (b *BaseLexer) SetText(text string) {
b.text = text
}
func (b *BaseLexer) GetATN() *ATN {
return b.Interpreter.ATN()
}
// Return a list of all Token objects in input char stream.
// Forces load of all tokens. Does not include EOF token.
// /
func (b *BaseLexer) getAllTokens() []Token {
vl := b.Virt
tokens := make([]Token, 0)
t := vl.NextToken()
for t.GetTokenType() != TokenEOF {
tokens = append(tokens, t)
t = vl.NextToken()
}
return tokens
}
func (b *BaseLexer) notifyListeners(e RecognitionException) {
start := b.TokenStartCharIndex
stop := b.input.Index()
text := b.input.GetTextFromInterval(NewInterval(start, stop))
msg := "token recognition error at: '" + text + "'"
listener := b.GetErrorListenerDispatch()
listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
}
func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
if c == TokenEOF {
return "<EOF>"
} else if c == '\n' {
return "\\n"
} else if c == '\t' {
return "\\t"
} else if c == '\r' {
return "\\r"
} else {
return string(c)
}
}
func (b *BaseLexer) getCharErrorDisplay(c rune) string {
return "'" + b.getErrorDisplayForChar(c) + "'"
}
// Lexers can normally Match any char in it's vocabulary after Matching
// a token, so do the easy thing and just kill a character and hope
// it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule.
// /
func (b *BaseLexer) Recover(re RecognitionException) {
if b.input.LA(1) != TokenEOF {
if _, ok := re.(*LexerNoViableAltException); ok {
// Skip a char and try again
b.Interpreter.Consume(b.input)
} else {
// TODO: Do we lose character or line position information?
b.input.Consume()
}
}
}

View File

@ -0,0 +1,407 @@
package antlr
import "strconv"
const (
LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
)
type LexerAction interface {
getActionType() int
getIsPositionDependent() bool
execute(lexer Lexer)
Hash() string
equals(other LexerAction) bool
}
type BaseLexerAction struct {
actionType int
isPositionDependent bool
}
func NewBaseLexerAction(action int) *BaseLexerAction {
la := new(BaseLexerAction)
la.actionType = action
la.isPositionDependent = false
return la
}
func (b *BaseLexerAction) execute(lexer Lexer) {
panic("Not implemented")
}
func (b *BaseLexerAction) getActionType() int {
return b.actionType
}
func (b *BaseLexerAction) getIsPositionDependent() bool {
return b.isPositionDependent
}
func (b *BaseLexerAction) Hash() string {
return strconv.Itoa(b.actionType)
}
func (b *BaseLexerAction) equals(other LexerAction) bool {
return b == other
}
//
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
//
// <p>The {@code Skip} command does not have any parameters, so l action is
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerSkipAction struct {
*BaseLexerAction
}
func NewLexerSkipAction() *LexerSkipAction {
la := new(LexerSkipAction)
la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
return la
}
// Provides a singleton instance of l parameterless lexer action.
var LexerSkipActionINSTANCE = NewLexerSkipAction()
func (l *LexerSkipAction) execute(lexer Lexer) {
lexer.Skip()
}
func (l *LexerSkipAction) String() string {
return "skip"
}
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
// with the assigned type.
type LexerTypeAction struct {
*BaseLexerAction
thetype int
}
func NewLexerTypeAction(thetype int) *LexerTypeAction {
l := new(LexerTypeAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
l.thetype = thetype
return l
}
func (l *LexerTypeAction) execute(lexer Lexer) {
lexer.setType(l.thetype)
}
func (l *LexerTypeAction) Hash() string {
return strconv.Itoa(l.actionType) + strconv.Itoa(l.thetype)
}
func (l *LexerTypeAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerTypeAction); !ok {
return false
} else {
return l.thetype == other.(*LexerTypeAction).thetype
}
}
func (l *LexerTypeAction) String() string {
return "actionType(" + strconv.Itoa(l.thetype) + ")"
}
// Implements the {@code pushMode} lexer action by calling
// {@link Lexer//pushMode} with the assigned mode.
type LexerPushModeAction struct {
*BaseLexerAction
mode int
}
func NewLexerPushModeAction(mode int) *LexerPushModeAction {
l := new(LexerPushModeAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
l.mode = mode
return l
}
// <p>This action is implemented by calling {@link Lexer//pushMode} with the
// value provided by {@link //getMode}.</p>
func (l *LexerPushModeAction) execute(lexer Lexer) {
lexer.pushMode(l.mode)
}
func (l *LexerPushModeAction) Hash() string {
return strconv.Itoa(l.actionType) + strconv.Itoa(l.mode)
}
func (l *LexerPushModeAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerPushModeAction); !ok {
return false
} else {
return l.mode == other.(*LexerPushModeAction).mode
}
}
func (l *LexerPushModeAction) String() string {
return "pushMode(" + strconv.Itoa(l.mode) + ")"
}
// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
//
// <p>The {@code popMode} command does not have any parameters, so l action is
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerPopModeAction struct {
*BaseLexerAction
}
func NewLexerPopModeAction() *LexerPopModeAction {
l := new(LexerPopModeAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
return l
}
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
func (l *LexerPopModeAction) execute(lexer Lexer) {
lexer.popMode()
}
func (l *LexerPopModeAction) String() string {
return "popMode"
}
// Implements the {@code more} lexer action by calling {@link Lexer//more}.
//
// <p>The {@code more} command does not have any parameters, so l action is
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerMoreAction struct {
*BaseLexerAction
}
func NewLexerMoreAction() *LexerMoreAction {
l := new(LexerMoreAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
return l
}
var LexerMoreActionINSTANCE = NewLexerMoreAction()
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
func (l *LexerMoreAction) execute(lexer Lexer) {
lexer.More()
}
func (l *LexerMoreAction) String() string {
return "more"
}
// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
// the assigned mode.
type LexerModeAction struct {
*BaseLexerAction
mode int
}
func NewLexerModeAction(mode int) *LexerModeAction {
l := new(LexerModeAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
l.mode = mode
return l
}
// <p>This action is implemented by calling {@link Lexer//mode} with the
// value provided by {@link //getMode}.</p>
func (l *LexerModeAction) execute(lexer Lexer) {
lexer.setMode(l.mode)
}
func (l *LexerModeAction) Hash() string {
return strconv.Itoa(l.actionType) + strconv.Itoa(l.mode)
}
func (l *LexerModeAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerModeAction); !ok {
return false
} else {
return l.mode == other.(*LexerModeAction).mode
}
}
func (l *LexerModeAction) String() string {
return "mode(" + strconv.Itoa(l.mode) + ")"
}
// Executes a custom lexer action by calling {@link Recognizer//action} with the
// rule and action indexes assigned to the custom action. The implementation of
// a custom action is added to the generated code for the lexer in an override
// of {@link Recognizer//action} when the grammar is compiled.
//
// <p>This class may represent embedded actions created with the <code>{...}</code>
// syntax in ANTLR 4, as well as actions created for lexer commands where the
// command argument could not be evaluated when the grammar was compiled.</p>
// Constructs a custom lexer action with the specified rule and action
// indexes.
//
// @param ruleIndex The rule index to use for calls to
// {@link Recognizer//action}.
// @param actionIndex The action index to use for calls to
// {@link Recognizer//action}.
type LexerCustomAction struct {
*BaseLexerAction
ruleIndex, actionIndex int
}
func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
l := new(LexerCustomAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
l.ruleIndex = ruleIndex
l.actionIndex = actionIndex
l.isPositionDependent = true
return l
}
// <p>Custom actions are implemented by calling {@link Lexer//action} with the
// appropriate rule and action indexes.</p>
func (l *LexerCustomAction) execute(lexer Lexer) {
lexer.Action(nil, l.ruleIndex, l.actionIndex)
}
func (l *LexerCustomAction) Hash() string {
return strconv.Itoa(l.actionType) + strconv.Itoa(l.ruleIndex) + strconv.Itoa(l.actionIndex)
}
func (l *LexerCustomAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerCustomAction); !ok {
return false
} else {
return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
}
}
// Implements the {@code channel} lexer action by calling
// {@link Lexer//setChannel} with the assigned channel.
// Constructs a New{@code channel} action with the specified channel value.
// @param channel The channel value to pass to {@link Lexer//setChannel}.
type LexerChannelAction struct {
*BaseLexerAction
channel int
}
func NewLexerChannelAction(channel int) *LexerChannelAction {
l := new(LexerChannelAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
l.channel = channel
return l
}
// <p>This action is implemented by calling {@link Lexer//setChannel} with the
// value provided by {@link //getChannel}.</p>
func (l *LexerChannelAction) execute(lexer Lexer) {
lexer.setChannel(l.channel)
}
func (l *LexerChannelAction) Hash() string {
return strconv.Itoa(l.actionType) + strconv.Itoa(l.channel)
}
func (l *LexerChannelAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerChannelAction); !ok {
return false
} else {
return l.channel == other.(*LexerChannelAction).channel
}
}
func (l *LexerChannelAction) String() string {
return "channel(" + strconv.Itoa(l.channel) + ")"
}
// This implementation of {@link LexerAction} is used for tracking input offsets
// for position-dependent actions within a {@link LexerActionExecutor}.
//
// <p>This action is not serialized as part of the ATN, and is only required for
// position-dependent lexer actions which appear at a location other than the
// end of a rule. For more information about DFA optimizations employed for
// lexer actions, see {@link LexerActionExecutor//append} and
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.</p>
// Constructs a Newindexed custom action by associating a character offset
// with a {@link LexerAction}.
//
// <p>Note: This class is only required for lexer actions for which
// {@link LexerAction//isPositionDependent} returns {@code true}.</p>
//
// @param offset The offset into the input {@link CharStream}, relative to
// the token start index, at which the specified lexer action should be
// executed.
// @param action The lexer action to execute at a particular offset in the
// input {@link CharStream}.
type LexerIndexedCustomAction struct {
*BaseLexerAction
offset int
lexerAction LexerAction
isPositionDependent bool
}
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
l := new(LexerIndexedCustomAction)
l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
l.offset = offset
l.lexerAction = lexerAction
l.isPositionDependent = true
return l
}
// <p>This method calls {@link //execute} on the result of {@link //getAction}
// using the provided {@code lexer}.</p>
func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
// assume the input stream position was properly set by the calling code
l.lexerAction.execute(lexer)
}
func (l *LexerIndexedCustomAction) Hash() string {
return strconv.Itoa(l.actionType) + strconv.Itoa(l.offset) + l.lexerAction.Hash()
}
func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
return false
} else {
return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
}
}

View File

@ -0,0 +1,169 @@
package antlr
// Represents an executor for a sequence of lexer actions which traversed during
// the Matching operation of a lexer rule (token).
//
// <p>The executor tracks position information for position-dependent lexer actions
// efficiently, ensuring that actions appearing only at the end of the rule do
// not cause bloating of the {@link DFA} created for the lexer.</p>
type LexerActionExecutor struct {
lexerActions []LexerAction
cachedHashString string
}
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
if lexerActions == nil {
lexerActions = make([]LexerAction, 0)
}
l := new(LexerActionExecutor)
l.lexerActions = lexerActions
// Caches the result of {@link //hashCode} since the hash code is an element
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
var s string
for _, a := range lexerActions {
s += a.Hash()
}
l.cachedHashString = s // "".join([str(la) for la in
return l
}
// Creates a {@link LexerActionExecutor} which executes the actions for
// the input {@code lexerActionExecutor} followed by a specified
// {@code lexerAction}.
//
// @param lexerActionExecutor The executor for actions already traversed by
// the lexer while Matching a token within a particular
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
// though it were an empty executor.
// @param lexerAction The lexer action to execute after the actions
// specified in {@code lexerActionExecutor}.
//
// @return A {@link LexerActionExecutor} for executing the combine actions
// of {@code lexerActionExecutor} and {@code lexerAction}.
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
if lexerActionExecutor == nil {
return NewLexerActionExecutor([]LexerAction{lexerAction})
}
lexerActions := append(lexerActionExecutor.lexerActions, lexerAction)
// lexerActions := lexerActionExecutor.lexerActions.concat([ lexerAction ])
return NewLexerActionExecutor(lexerActions)
}
// Creates a {@link LexerActionExecutor} which encodes the current offset
// for position-dependent lexer actions.
//
// <p>Normally, when the executor encounters lexer actions where
// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
// {@link IntStream//seek} on the input {@link CharStream} to set the input
// position to the <em>end</em> of the current token. This behavior provides
// for efficient DFA representation of lexer actions which appear at the end
// of a lexer rule, even when the lexer rule Matches a variable number of
// characters.</p>
//
// <p>Prior to traversing a Match transition in the ATN, the current offset
// from the token start index is assigned to all position-dependent lexer
// actions which have not already been assigned a fixed offset. By storing
// the offsets relative to the token start index, the DFA representation of
// lexer actions which appear in the middle of tokens remains efficient due
// to sharing among tokens of the same length, regardless of their absolute
// position in the input stream.</p>
//
// <p>If the current executor already has offsets assigned to all
// position-dependent lexer actions, the method returns {@code this}.</p>
//
// @param offset The current offset to assign to all position-dependent
// lexer actions which do not already have offsets assigned.
//
// @return A {@link LexerActionExecutor} which stores input stream offsets
// for all position-dependent lexer actions.
// /
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
var updatedLexerActions []LexerAction
for i := 0; i < len(l.lexerActions); i++ {
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
if l.lexerActions[i].getIsPositionDependent() && !ok {
if updatedLexerActions == nil {
updatedLexerActions = make([]LexerAction, 0)
for _, a := range l.lexerActions {
updatedLexerActions = append(updatedLexerActions, a)
}
}
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
}
}
if updatedLexerActions == nil {
return l
}
return NewLexerActionExecutor(updatedLexerActions)
}
// Execute the actions encapsulated by l executor within the context of a
// particular {@link Lexer}.
//
// <p>This method calls {@link IntStream//seek} to set the position of the
// {@code input} {@link CharStream} prior to calling
// {@link LexerAction//execute} on a position-dependent action. Before the
// method returns, the input position will be restored to the same position
// it was in when the method was invoked.</p>
//
// @param lexer The lexer instance.
// @param input The input stream which is the source for the current token.
// When l method is called, the current {@link IntStream//index} for
// {@code input} should be the start of the following token, i.e. 1
// character past the end of the current token.
// @param startIndex The token start index. This value may be passed to
// {@link IntStream//seek} to set the {@code input} position to the beginning
// of the token.
// /
func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
requiresSeek := false
stopIndex := input.Index()
defer func() {
if requiresSeek {
input.Seek(stopIndex)
}
}()
for i := 0; i < len(l.lexerActions); i++ {
lexerAction := l.lexerActions[i]
if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
offset := la.offset
input.Seek(startIndex + offset)
lexerAction = la.lexerAction
requiresSeek = (startIndex + offset) != stopIndex
} else if lexerAction.getIsPositionDependent() {
input.Seek(stopIndex)
requiresSeek = false
}
lexerAction.execute(lexer)
}
}
func (l *LexerActionExecutor) Hash() string {
return l.cachedHashString
}
func (l *LexerActionExecutor) equals(other interface{}) bool {
if l == other {
return true
} else if _, ok := other.(*LexerActionExecutor); !ok {
return false
} else {
return l.cachedHashString == other.(*LexerActionExecutor).cachedHashString &&
&l.lexerActions == &other.(*LexerActionExecutor).lexerActions
}
}

View File

@ -0,0 +1,654 @@
package antlr
import (
"fmt"
"strconv"
)
var (
LexerATNSimulatorDebug = false
LexerATNSimulatorDFADebug = false
LexerATNSimulatorMinDFAEdge = 0
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
LexerATNSimulatorMatchCalls = 0
)
type ILexerATNSimulator interface {
IATNSimulator
reset()
Match(input CharStream, mode int) int
GetCharPositionInLine() int
GetLine() int
GetText(input CharStream) string
Consume(input CharStream)
}
type LexerATNSimulator struct {
*BaseATNSimulator
recog Lexer
predictionMode int
mergeCache DoubleDict
startIndex int
Line int
CharPositionInLine int
mode int
prevAccept *SimState
MatchCalls int
}
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
l := new(LexerATNSimulator)
l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
l.decisionToDFA = decisionToDFA
l.recog = recog
// The current token's starting index into the character stream.
// Shared across DFA to ATN simulation in case the ATN fails and the
// DFA did not have a previous accept state. In l case, we use the
// ATN-generated exception object.
l.startIndex = -1
// line number 1..n within the input///
l.Line = 1
// The index of the character relative to the beginning of the line
// 0..n-1///
l.CharPositionInLine = 0
l.mode = LexerDefaultMode
// Used during DFA/ATN exec to record the most recent accept configuration
// info
l.prevAccept = NewSimState()
// done
return l
}
func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
l.CharPositionInLine = simulator.CharPositionInLine
l.Line = simulator.Line
l.mode = simulator.mode
l.startIndex = simulator.startIndex
}
func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
l.MatchCalls++
l.mode = mode
mark := input.Mark()
defer func() {
input.Release(mark)
}()
l.startIndex = input.Index()
l.prevAccept.reset()
dfa := l.decisionToDFA[mode]
if dfa.s0 == nil {
return l.MatchATN(input)
}
return l.execATN(input, dfa.s0)
}
func (l *LexerATNSimulator) reset() {
l.prevAccept.reset()
l.startIndex = -1
l.Line = 1
l.CharPositionInLine = 0
l.mode = LexerDefaultMode
}
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
startState := l.atn.modeToStartState[l.mode]
if LexerATNSimulatorDebug {
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
}
oldMode := l.mode
s0Closure := l.computeStartState(input, startState)
suppressEdge := s0Closure.hasSemanticContext
s0Closure.hasSemanticContext = false
next := l.addDFAState(s0Closure)
if !suppressEdge {
l.decisionToDFA[l.mode].s0 = next
}
predict := l.execATN(input, next)
if LexerATNSimulatorDebug {
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
}
return predict
}
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
if LexerATNSimulatorDebug {
fmt.Println("start state closure=" + ds0.configs.String())
}
if ds0.isAcceptState {
// allow zero-length tokens
l.captureSimState(l.prevAccept, input, ds0)
}
t := input.LA(1)
s := ds0 // s is current/from DFA state
for { // while more work
if LexerATNSimulatorDebug {
fmt.Println("execATN loop starting closure: " + s.configs.String())
}
// As we move src->trg, src->trg, we keep track of the previous trg to
// avoid looking up the DFA state again, which is expensive.
// If the previous target was already part of the DFA, we might
// be able to avoid doing a reach operation upon t. If s!=nil,
// it means that semantic predicates didn't prevent us from
// creating a DFA state. Once we know s!=nil, we check to see if
// the DFA state has an edge already for t. If so, we can just reuse
// it's configuration set there's no point in re-computing it.
// This is kind of like doing DFA simulation within the ATN
// simulation because DFA simulation is really just a way to avoid
// computing reach/closure sets. Technically, once we know that
// we have a previously added DFA state, we could jump over to
// the DFA simulator. But, that would mean popping back and forth
// a lot and making things more complicated algorithmically.
// This optimization makes a lot of sense for loops within DFA.
// A character will take us back to an existing DFA state
// that already has lots of edges out of it. e.g., .* in comments.
target := l.getExistingTargetState(s, t)
if target == nil {
target = l.computeTargetState(input, s, t)
// print("Computed:" + str(target))
}
if target == ATNSimulatorError {
break
}
// If l is a consumable input element, make sure to consume before
// capturing the accept state so the input index, line, and char
// position accurately reflect the state of the interpreter at the
// end of the token.
if t != TokenEOF {
l.Consume(input)
}
if target.isAcceptState {
l.captureSimState(l.prevAccept, input, target)
if t == TokenEOF {
break
}
}
t = input.LA(1)
s = target // flip current DFA target becomes Newsrc/from state
}
return l.failOrAccept(l.prevAccept, input, s.configs, t)
}
// Get an existing target state for an edge in the DFA. If the target state
// for the edge has not yet been computed or is otherwise not available,
// l method returns {@code nil}.
//
// @param s The current DFA state
// @param t The next input symbol
// @return The existing target DFA state for the given input symbol
// {@code t}, or {@code nil} if the target state for l edge is not
// already cached
func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
return nil
}
target := s.edges[t-LexerATNSimulatorMinDFAEdge]
if LexerATNSimulatorDebug && target != nil {
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
}
return target
}
// Compute a target state for an edge in the DFA, and attempt to add the
// computed state and corresponding edge to the DFA.
//
// @param input The input stream
// @param s The current DFA state
// @param t The next input symbol
//
// @return The computed target DFA state for the given input symbol
// {@code t}. If {@code t} does not lead to a valid DFA state, l method
// returns {@link //ERROR}.
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
reach := NewOrderedATNConfigSet()
// if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions
l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
if len(reach.configs) == 0 { // we got nowhere on t from s
if !reach.hasSemanticContext {
// we got nowhere on t, don't panic out l knowledge it'd
// cause a failover from DFA later.
l.addDFAEdge(s, t, ATNSimulatorError, nil)
}
// stop when we can't Match any more char
return ATNSimulatorError
}
// Add an edge from s to target DFA found/created for reach
return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
}
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
if l.prevAccept.dfaState != nil {
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
return prevAccept.dfaState.prediction
}
// if no accept and EOF is first char, return EOF
if t == TokenEOF && input.Index() == l.startIndex {
return TokenEOF
}
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
}
// Given a starting configuration set, figure out all ATN configurations
// we can reach upon input {@code t}. Parameter {@code reach} is a return
// parameter.
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
// l is used to Skip processing for configs which have a lower priority
// than a config that already reached an accept state for the same rule
SkipAlt := ATNInvalidAltNumber
for _, cfg := range closure.GetItems() {
currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
continue
}
if LexerATNSimulatorDebug {
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
}
for _, trans := range cfg.GetState().GetTransitions() {
target := l.getReachableTarget(trans, t)
if target != nil {
lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
if lexerActionExecutor != nil {
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
}
treatEOFAsEpsilon := (t == TokenEOF)
config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
if l.closure(input, config, reach,
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
// any remaining configs for l alt have a lower priority
// than the one that just reached an accept state.
SkipAlt = cfg.GetAlt()
}
}
}
}
}
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
if LexerATNSimulatorDebug {
fmt.Printf("ACTION %s\n", lexerActionExecutor)
}
// seek to after last char in token
input.Seek(index)
l.Line = line
l.CharPositionInLine = charPos
if lexerActionExecutor != nil && l.recog != nil {
lexerActionExecutor.execute(l.recog, input, startIndex)
}
}
func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
if trans.Matches(t, 0, 0xFFFE) {
return trans.getTarget()
}
return nil
}
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
configs := NewOrderedATNConfigSet()
for i := 0; i < len(p.GetTransitions()); i++ {
target := p.GetTransitions()[i].getTarget()
cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
l.closure(input, cfg, configs, false, false, false)
}
return configs
}
// Since the alternatives within any lexer decision are ordered by
// preference, l method stops pursuing the closure as soon as an accept
// state is reached. After the first accept state is reached by depth-first
// search from {@code config}, all other (potentially reachable) states for
// l rule would have a lower priority.
//
// @return {@code true} if an accept state is reached, otherwise
// {@code false}.
func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
if LexerATNSimulatorDebug {
fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
}
_, ok := config.state.(*RuleStopState)
if ok {
if LexerATNSimulatorDebug {
if l.recog != nil {
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
} else {
fmt.Printf("closure at rule stop %s\n", config)
}
}
if config.context == nil || config.context.hasEmptyPath() {
if config.context == nil || config.context.isEmpty() {
configs.Add(config, nil)
return true
}
configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
currentAltReachedAcceptState = true
}
if config.context != nil && !config.context.isEmpty() {
for i := 0; i < config.context.length(); i++ {
if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
newContext := config.context.GetParent(i) // "pop" return state
returnState := l.atn.states[config.context.getReturnState(i)]
cfg := NewLexerATNConfig2(config, returnState, newContext)
currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
}
}
}
return currentAltReachedAcceptState
}
// optimization
if !config.state.GetEpsilonOnlyTransitions() {
if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
configs.Add(config, nil)
}
}
for j := 0; j < len(config.state.GetTransitions()); j++ {
trans := config.state.GetTransitions()[j]
cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
if cfg != nil {
currentAltReachedAcceptState = l.closure(input, cfg, configs,
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
}
}
return currentAltReachedAcceptState
}
// side-effect: can alter configs.hasSemanticContext
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
var cfg *LexerATNConfig
if trans.getSerializationType() == TransitionRULE {
rt := trans.(*RuleTransition)
newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
} else if trans.getSerializationType() == TransitionPRECEDENCE {
panic("Precedence predicates are not supported in lexers.")
} else if trans.getSerializationType() == TransitionPREDICATE {
// Track traversing semantic predicates. If we traverse,
// we cannot add a DFA state for l "reach" computation
// because the DFA would not test the predicate again in the
// future. Rather than creating collections of semantic predicates
// like v3 and testing them on prediction, v4 will test them on the
// fly all the time using the ATN not the DFA. This is slower but
// semantically it's not used that often. One of the key elements to
// l predicate mechanism is not adding DFA states that see
// predicates immediately afterwards in the ATN. For example,
// a : ID {p1}? | ID {p2}?
// should create the start state for rule 'a' (to save start state
// competition), but should not create target of ID state. The
// collection of ATN states the following ID references includes
// states reached by traversing predicates. Since l is when we
// test them, we cannot cash the DFA state target of ID.
pt := trans.(*PredicateTransition)
if LexerATNSimulatorDebug {
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
}
configs.SetHasSemanticContext(true)
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
} else if trans.getSerializationType() == TransitionACTION {
if config.context == nil || config.context.hasEmptyPath() {
// execute actions anywhere in the start rule for a token.
//
// TODO: if the entry rule is invoked recursively, some
// actions may be executed during the recursive call. The
// problem can appear when hasEmptyPath() is true but
// isEmpty() is false. In l case, the config needs to be
// split into two contexts - one with just the empty path
// and another with everything but the empty path.
// Unfortunately, the current algorithm does not allow
// getEpsilonTarget to return two configurations, so
// additional modifications are needed before we can support
// the split operation.
lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
} else {
// ignore actions in referenced rules
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
} else if trans.getSerializationType() == TransitionEPSILON {
cfg = NewLexerATNConfig4(config, trans.getTarget())
} else if trans.getSerializationType() == TransitionATOM ||
trans.getSerializationType() == TransitionRANGE ||
trans.getSerializationType() == TransitionSET {
if treatEOFAsEpsilon {
if trans.Matches(TokenEOF, 0, 0xFFFF) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
}
}
return cfg
}
// Evaluate a predicate specified in the lexer.
//
// <p>If {@code speculative} is {@code true}, l method was called before
// {@link //consume} for the Matched character. This method should call
// {@link //consume} before evaluating the predicate to ensure position
// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
// and {@link Lexer//getcolumn}, properly reflect the current
// lexer state. This method should restore {@code input} and the simulator
// to the original state before returning (i.e. undo the actions made by the
// call to {@link //consume}.</p>
//
// @param input The input stream.
// @param ruleIndex The rule containing the predicate.
// @param predIndex The index of the predicate within the rule.
// @param speculative {@code true} if the current index in {@code input} is
// one character before the predicate's location.
//
// @return {@code true} if the specified predicate evaluates to
// {@code true}.
// /
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
// assume true if no recognizer was provided
if l.recog == nil {
return true
}
if !speculative {
return l.recog.Sempred(nil, ruleIndex, predIndex)
}
savedcolumn := l.CharPositionInLine
savedLine := l.Line
index := input.Index()
marker := input.Mark()
defer func() {
l.CharPositionInLine = savedcolumn
l.Line = savedLine
input.Seek(index)
input.Release(marker)
}()
l.Consume(input)
return l.recog.Sempred(nil, ruleIndex, predIndex)
}
func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
settings.index = input.Index()
settings.line = l.Line
settings.column = l.CharPositionInLine
settings.dfaState = dfaState
}
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
if to == nil && cfgs != nil {
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
// marker indicating dynamic predicate evaluation makes l edge
// dependent on the specific input sequence, so the static edge in the
// DFA should be omitted. The target DFAState is still created since
// execATN has the ability to reSynchronize with the DFA state cache
// following the predicate evaluation step.
//
// TJP notes: next time through the DFA, we see a pred again and eval.
// If that gets us to a previously created (but dangling) DFA
// state, we can continue in pure DFA mode from there.
// /
suppressEdge := cfgs.HasSemanticContext()
cfgs.SetHasSemanticContext(false)
to = l.addDFAState(cfgs)
if suppressEdge {
return to
}
}
// add the edge
if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
// Only track edges within the DFA bounds
return to
}
if LexerATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
}
if from.edges == nil {
// make room for tokens 1..n and -1 masquerading as index 0
from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1)
}
from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect
return to
}
// Add a NewDFA state if there isn't one with l set of
// configurations already. This method also detects the first
// configuration containing an ATN rule stop state. Later, when
// traversing the DFA, we will know which rule to accept.
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState {
proposed := NewDFAState(-1, configs)
var firstConfigWithRuleStopState ATNConfig
for _, cfg := range configs.GetItems() {
_, ok := cfg.GetState().(*RuleStopState)
if ok {
firstConfigWithRuleStopState = cfg
break
}
}
if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
}
hash := proposed.Hash()
dfa := l.decisionToDFA[l.mode]
existing := dfa.GetStates()[hash]
if existing != nil {
return existing
}
newState := proposed
newState.stateNumber = len(dfa.GetStates())
configs.SetReadOnly(true)
newState.configs = configs
dfa.GetStates()[hash] = newState
return newState
}
func (l *LexerATNSimulator) getDFA(mode int) *DFA {
return l.decisionToDFA[mode]
}
// Get the text Matched so far for the current token.
func (l *LexerATNSimulator) GetText(input CharStream) string {
// index is first lookahead char, don't include.
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
}
func (l *LexerATNSimulator) Consume(input CharStream) {
curChar := input.LA(1)
if curChar == int('\n') {
l.Line++
l.CharPositionInLine = 0
} else {
l.CharPositionInLine++
}
input.Consume()
}
func (l *LexerATNSimulator) GetCharPositionInLine() int {
return l.CharPositionInLine
}
func (l *LexerATNSimulator) GetLine() int {
return l.Line
}
func (l *LexerATNSimulator) GetTokenName(tt int) string {
if tt == -1 {
return "EOF"
}
return "'" + string(tt) + "'"
}
func resetSimState(sim *SimState) {
sim.index = -1
sim.line = 0
sim.column = -1
sim.dfaState = nil
}
type SimState struct {
index int
line int
column int
dfaState *DFAState
}
func NewSimState() *SimState {
s := new(SimState)
resetSimState(s)
return s
}
func (s *SimState) reset() {
resetSimState(s)
}

View File

@ -0,0 +1,211 @@
package antlr
type LL1Analyzer struct {
atn *ATN
}
func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
la := new(LL1Analyzer)
la.atn = atn
return la
}
//* Special value added to the lookahead sets to indicate that we hit
// a predicate during analysis if {@code seeThruPreds==false}.
///
const (
LL1AnalyzerHitPred = TokenInvalidType
)
//*
// Calculates the SLL(1) expected lookahead set for each outgoing transition
// of an {@link ATNState}. The returned array has one element for each
// outgoing transition in {@code s}. If the closure from transition
// <em>i</em> leads to a semantic predicate before Matching a symbol, the
// element at index <em>i</em> of the result will be {@code nil}.
//
// @param s the ATN state
// @return the expected symbols for each outgoing transition of {@code s}.
func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
if s == nil {
return nil
}
count := len(s.GetTransitions())
look := make([]*IntervalSet, count)
for alt := 0; alt < count; alt++ {
look[alt] = NewIntervalSet()
lookBusy := NewSet(nil, nil)
seeThruPreds := false // fail to get lookahead upon pred
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
// Wipe out lookahead for la alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
look[alt] = nil
}
}
return look
}
//*
// Compute set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
//
// <p>If {@code ctx} is {@code nil} and the end of the rule containing
// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
// If {@code ctx} is not {@code nil} and the end of the outermost rule is
// reached, {@link Token//EOF} is added to the result set.</p>
//
// @param s the ATN state
// @param stopState the ATN state to stop at. This can be a
// {@link BlockEndState} to detect epsilon paths through a closure.
// @param ctx the complete parser context, or {@code nil} if the context
// should be ignored
//
// @return The set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
///
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
r := NewIntervalSet()
seeThruPreds := true // ignore preds get all lookahead
var lookContext PredictionContext
if ctx != nil {
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
}
la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true)
return r
}
//*
// Compute set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
//
// <p>If {@code ctx} is {@code nil} and {@code stopState} or the end of the
// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
// {@code true} and {@code stopState} or the end of the outermost rule is
// reached, {@link Token//EOF} is added to the result set.</p>
//
// @param s the ATN state.
// @param stopState the ATN state to stop at. This can be a
// {@link BlockEndState} to detect epsilon paths through a closure.
// @param ctx The outer context, or {@code nil} if the outer context should
// not be used.
// @param look The result lookahead set.
// @param lookBusy A set used for preventing epsilon closures in the ATN
// from causing a stack overflow. Outside code should pass
// {@code NewSet<ATNConfig>} for la argument.
// @param calledRuleStack A set used for preventing left recursion in the
// ATN from causing a stack overflow. Outside code should pass
// {@code NewBitSet()} for la argument.
// @param seeThruPreds {@code true} to true semantic predicates as
// implicitly {@code true} and "see through them", otherwise {@code false}
// to treat semantic predicates as opaque and add {@link //HitPred} to the
// result if one is encountered.
// @param addEOF Add {@link Token//EOF} to the result if the end of the
// outermost context is reached. This parameter has no effect if {@code ctx}
// is {@code nil}.
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
returnState := la.atn.states[ctx.getReturnState(i)]
removed := calledRuleStack.contains(returnState.GetRuleIndex())
defer func() {
if removed {
calledRuleStack.add(returnState.GetRuleIndex())
}
}()
calledRuleStack.remove(returnState.GetRuleIndex())
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
c := NewBaseATNConfig6(s, 0, ctx)
if lookBusy.contains(c) {
return
}
lookBusy.add(c)
if s == stopState {
if ctx == nil {
look.addOne(TokenEpsilon)
return
} else if ctx.isEmpty() && addEOF {
look.addOne(TokenEOF)
return
}
}
_, ok := s.(*RuleStopState)
if ok {
if ctx == nil {
look.addOne(TokenEpsilon)
return
} else if ctx.isEmpty() && addEOF {
look.addOne(TokenEOF)
return
}
if ctx != BasePredictionContextEMPTY {
// run thru all possible stack tops in ctx
for i := 0; i < ctx.length(); i++ {
returnState := la.atn.states[ctx.getReturnState(i)]
la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
}
return
}
}
n := len(s.GetTransitions())
for i := 0; i < n; i++ {
t := s.GetTransitions()[i]
if t1, ok := t.(*RuleTransition); ok {
if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
continue
}
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
} else if t2, ok := t.(AbstractPredicateTransition); ok {
if seeThruPreds {
la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else {
look.addOne(LL1AnalyzerHitPred)
}
} else if t.getIsEpsilon() {
la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else if _, ok := t.(*WildcardTransition); ok {
look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
} else {
set := t.getLabel()
if set != nil {
if _, ok := t.(*NotSetTransition); ok {
set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
}
look.addSet(set)
}
}
}
}
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
defer func() {
calledRuleStack.remove(t1.getTarget().GetRuleIndex())
}()
calledRuleStack.add(t1.getTarget().GetRuleIndex())
la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}

714
runtime/Go/antlr/parser.go Normal file
View File

@ -0,0 +1,714 @@
package antlr
import (
"fmt"
"strconv"
)
type Parser interface {
Recognizer
GetInterpreter() *ParserATNSimulator
GetTokenStream() TokenStream
GetTokenFactory() TokenFactory
GetParserRuleContext() ParserRuleContext
SetParserRuleContext(ParserRuleContext)
Consume() Token
GetParseListeners() []ParseTreeListener
GetErrorHandler() ErrorStrategy
SetErrorHandler(ErrorStrategy)
GetInputStream() IntStream
GetCurrentToken() Token
GetExpectedTokens() *IntervalSet
NotifyErrorListeners(string, Token, RecognitionException)
IsExpectedToken(int) bool
GetPrecedence() int
GetRuleInvocationStack(ParserRuleContext) []string
}
type BaseParser struct {
*BaseRecognizer
Interpreter *ParserATNSimulator
BuildParseTrees bool
input TokenStream
errHandler ErrorStrategy
precedenceStack IntStack
ctx ParserRuleContext
tracer *TraceListener
parseListeners []ParseTreeListener
_SyntaxErrors int
}
// p.is all the parsing support code essentially most of it is error
// recovery stuff.//
func NewBaseParser(input TokenStream) *BaseParser {
p := new(BaseParser)
p.BaseRecognizer = NewBaseRecognizer()
// The input stream.
p.input = nil
// The error handling strategy for the parser. The default value is a new
// instance of {@link DefaultErrorStrategy}.
p.errHandler = NewDefaultErrorStrategy()
p.precedenceStack = make([]int, 0)
p.precedenceStack.Push(0)
// The {@link ParserRuleContext} object for the currently executing rule.
// p.is always non-nil during the parsing process.
p.ctx = nil
// Specifies whether or not the parser should construct a parse tree during
// the parsing process. The default value is {@code true}.
p.BuildParseTrees = true
// When {@link //setTrace}{@code (true)} is called, a reference to the
// {@link TraceListener} is stored here so it can be easily removed in a
// later call to {@link //setTrace}{@code (false)}. The listener itself is
// implemented as a parser listener so p.field is not directly used by
// other parser methods.
p.tracer = nil
// The list of {@link ParseTreeListener} listeners registered to receive
// events during the parse.
p.parseListeners = nil
// The number of syntax errors Reported during parsing. p.value is
// incremented each time {@link //NotifyErrorListeners} is called.
p._SyntaxErrors = 0
p.SetInputStream(input)
return p
}
// p.field maps from the serialized ATN string to the deserialized {@link
// ATN} with
// bypass alternatives.
//
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
//
var bypassAltsAtnCache = make(map[string]int)
// reset the parser's state//
func (p *BaseParser) reset() {
if p.input != nil {
p.input.Seek(0)
}
p.errHandler.reset(p)
p.ctx = nil
p._SyntaxErrors = 0
p.SetTrace(nil)
p.precedenceStack = make([]int, 0)
p.precedenceStack.Push(0)
if p.Interpreter != nil {
p.Interpreter.reset()
}
}
func (p *BaseParser) GetErrorHandler() ErrorStrategy {
return p.errHandler
}
func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
p.errHandler = e
}
// Match current input symbol against {@code ttype}. If the symbol type
// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
// called to complete the Match process.
//
// <p>If the symbol type does not Match,
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
// strategy to attempt recovery. If {@link //getBuildParseTree} is
// {@code true} and the token index of the symbol returned by
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
//
// @param ttype the token type to Match
// @return the Matched symbol
// @panics RecognitionException if the current input symbol did not Match
// {@code ttype} and the error strategy could not recover from the
// mismatched symbol
func (p *BaseParser) Match(ttype int) Token {
t := p.GetCurrentToken()
if t.GetTokenType() == ttype {
p.errHandler.ReportMatch(p)
p.Consume()
} else {
t = p.errHandler.RecoverInline(p)
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
// we must have conjured up a Newtoken during single token
// insertion
// if it's not the current symbol
p.ctx.AddErrorNode(t)
}
}
return t
}
// Match current input symbol as a wildcard. If the symbol type Matches
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
// and {@link //consume} are called to complete the Match process.
//
// <p>If the symbol type does not Match,
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
// strategy to attempt recovery. If {@link //getBuildParseTree} is
// {@code true} and the token index of the symbol returned by
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
//
// @return the Matched symbol
// @panics RecognitionException if the current input symbol did not Match
// a wildcard and the error strategy could not recover from the mismatched
// symbol
func (p *BaseParser) MatchWildcard() Token {
t := p.GetCurrentToken()
if t.GetTokenType() > 0 {
p.errHandler.ReportMatch(p)
p.Consume()
} else {
t = p.errHandler.RecoverInline(p)
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
// we must have conjured up a Newtoken during single token
// insertion
// if it's not the current symbol
p.ctx.AddErrorNode(t)
}
}
return t
}
func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
return p.ctx
}
func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
p.ctx = v
}
func (p *BaseParser) GetParseListeners() []ParseTreeListener {
if p.parseListeners == nil {
return make([]ParseTreeListener, 0)
}
return p.parseListeners
}
// Registers {@code listener} to receive events during the parsing process.
//
// <p>To support output-preserving grammar transformations (including but not
// limited to left-recursion removal, automated left-factoring, and
// optimized code generation), calls to listener methods during the parse
// may differ substantially from calls made by
// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
// particular, rule entry and exit events may occur in a different order
// during the parse than after the parser. In addition, calls to certain
// rule entry methods may be omitted.</p>
//
// <p>With the following specific exceptions, calls to listener events are
// <em>deterministic</em>, i.e. for identical input the calls to listener
// methods will be the same.</p>
//
// <ul>
// <li>Alterations to the grammar used to generate code may change the
// behavior of the listener calls.</li>
// <li>Alterations to the command line options passed to ANTLR 4 when
// generating the parser may change the behavior of the listener calls.</li>
// <li>Changing the version of the ANTLR Tool used to generate the parser
// may change the behavior of the listener calls.</li>
// </ul>
//
// @param listener the listener to add
//
// @panics nilPointerException if {@code} listener is {@code nil}
//
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
if listener == nil {
panic("listener")
}
if p.parseListeners == nil {
p.parseListeners = make([]ParseTreeListener, 0)
}
p.parseListeners = append(p.parseListeners, listener)
}
//
// Remove {@code listener} from the list of parse listeners.
//
// <p>If {@code listener} is {@code nil} or has not been added as a parse
// listener, p.method does nothing.</p>
// @param listener the listener to remove
//
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
if p.parseListeners != nil {
idx := -1
for i, v := range p.parseListeners {
if v == listener {
idx = i
break
}
}
if idx == -1 {
return
}
// remove the listener from the slice
p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
if len(p.parseListeners) == 0 {
p.parseListeners = nil
}
}
}
// Remove all parse listeners.
func (p *BaseParser) removeParseListeners() {
p.parseListeners = nil
}
// Notify any parse listeners of an enter rule event.
func (p *BaseParser) TriggerEnterRuleEvent() {
if p.parseListeners != nil {
ctx := p.ctx
for _, listener := range p.parseListeners {
listener.EnterEveryRule(ctx)
ctx.EnterRule(listener)
}
}
}
//
// Notify any parse listeners of an exit rule event.
//
// @see //addParseListener
//
func (p *BaseParser) TriggerExitRuleEvent() {
if p.parseListeners != nil {
// reverse order walk of listeners
ctx := p.ctx
l := len(p.parseListeners) - 1
for i := range p.parseListeners {
listener := p.parseListeners[l-i]
ctx.ExitRule(listener)
listener.ExitEveryRule(ctx)
}
}
}
func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
return p.Interpreter
}
func (p *BaseParser) GetATN() *ATN {
return p.Interpreter.atn
}
func (p *BaseParser) GetTokenFactory() TokenFactory {
return p.input.GetTokenSource().GetTokenFactory()
}
// Tell our token source and error strategy about a Newway to create tokens.//
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
p.input.GetTokenSource().setTokenFactory(factory)
}
// The ATN with bypass alternatives is expensive to create so we create it
// lazily.
//
// @panics UnsupportedOperationException if the current parser does not
// implement the {@link //getSerializedATN()} method.
//
func (p *BaseParser) GetATNWithBypassAlts() {
// TODO
panic("Not implemented!")
// serializedAtn := p.getSerializedATN()
// if (serializedAtn == nil) {
// panic("The current parser does not support an ATN with bypass alternatives.")
// }
// result := p.bypassAltsAtnCache[serializedAtn]
// if (result == nil) {
// deserializationOptions := NewATNDeserializationOptions(nil)
// deserializationOptions.generateRuleBypassTransitions = true
// result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
// p.bypassAltsAtnCache[serializedAtn] = result
// }
// return result
}
// The preferred method of getting a tree pattern. For example, here's a
// sample use:
//
// <pre>
// ParseTree t = parser.expr()
// ParseTreePattern p = parser.compileParseTreePattern("&ltID&gt+0",
// MyParser.RULE_expr)
// ParseTreeMatch m = p.Match(t)
// String id = m.Get("ID")
// </pre>
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
panic("NewParseTreePatternMatcher not implemented!")
//
// if (lexer == nil) {
// if (p.GetTokenStream() != nil) {
// tokenSource := p.GetTokenStream().GetTokenSource()
// if _, ok := tokenSource.(ILexer); ok {
// lexer = tokenSource
// }
// }
// }
// if (lexer == nil) {
// panic("Parser can't discover a lexer to use")
// }
// m := NewParseTreePatternMatcher(lexer, p)
// return m.compile(pattern, patternRuleIndex)
}
func (p *BaseParser) GetInputStream() IntStream {
return p.GetTokenStream()
}
func (p *BaseParser) SetInputStream(input TokenStream) {
p.SetTokenStream(input)
}
func (p *BaseParser) GetTokenStream() TokenStream {
return p.input
}
// Set the token stream and reset the parser.//
func (p *BaseParser) SetTokenStream(input TokenStream) {
p.input = nil
p.reset()
p.input = input
}
// Match needs to return the current input symbol, which gets put
// into the label for the associated token ref e.g., x=ID.
//
func (p *BaseParser) GetCurrentToken() Token {
return p.input.LT(1)
}
func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
if offendingToken == nil {
offendingToken = p.GetCurrentToken()
}
p._SyntaxErrors++
line := offendingToken.GetLine()
column := offendingToken.GetColumn()
listener := p.GetErrorListenerDispatch()
listener.SyntaxError(p, offendingToken, line, column, msg, err)
}
func (p *BaseParser) Consume() Token {
o := p.GetCurrentToken()
if o.GetTokenType() != TokenEOF {
p.GetInputStream().Consume()
}
hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
if p.BuildParseTrees || hasListener {
if p.errHandler.inErrorRecoveryMode(p) {
node := p.ctx.AddErrorNode(o)
if p.parseListeners != nil {
for _, l := range p.parseListeners {
l.VisitErrorNode(node)
}
}
} else {
node := p.ctx.AddTokenNode(o)
if p.parseListeners != nil {
for _, l := range p.parseListeners {
l.VisitTerminal(node)
}
}
}
// node.invokingState = p.state
}
return o
}
func (p *BaseParser) addContextToParseTree() {
// add current context to parent if we have a parent
if p.ctx.GetParent() != nil {
p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
}
}
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
p.SetState(state)
p.ctx = localctx
p.ctx.SetStart(p.input.LT(1))
if p.BuildParseTrees {
p.addContextToParseTree()
}
if p.parseListeners != nil {
p.TriggerEnterRuleEvent()
}
}
func (p *BaseParser) ExitRule() {
p.ctx.SetStop(p.input.LT(-1))
// trigger event on ctx, before it reverts to parent
if p.parseListeners != nil {
p.TriggerExitRuleEvent()
}
p.SetState(p.ctx.GetInvokingState())
if p.ctx.GetParent() != nil {
p.ctx = p.ctx.GetParent().(ParserRuleContext)
} else {
p.ctx = nil
}
}
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
localctx.SetAltNumber(altNum)
// if we have Newlocalctx, make sure we replace existing ctx
// that is previous child of parse tree
if p.BuildParseTrees && p.ctx != localctx {
if p.ctx.GetParent() != nil {
p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
}
}
p.ctx = localctx
}
// Get the precedence level for the top-most precedence rule.
//
// @return The precedence level for the top-most precedence rule, or -1 if
// the parser context is not nested within a precedence rule.
func (p *BaseParser) GetPrecedence() int {
if len(p.precedenceStack) == 0 {
return -1
}
return p.precedenceStack[len(p.precedenceStack)-1]
}
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
p.SetState(state)
p.precedenceStack.Push(precedence)
p.ctx = localctx
p.ctx.SetStart(p.input.LT(1))
if p.parseListeners != nil {
p.TriggerEnterRuleEvent() // simulates rule entry for
// left-recursive rules
}
}
//
// Like {@link //EnterRule} but for recursive rules.
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
previous := p.ctx
previous.SetParent(localctx)
previous.SetInvokingState(state)
previous.SetStop(p.input.LT(-1))
p.ctx = localctx
p.ctx.SetStart(previous.GetStart())
if p.BuildParseTrees {
p.ctx.AddChild(previous)
}
if p.parseListeners != nil {
p.TriggerEnterRuleEvent() // simulates rule entry for
// left-recursive rules
}
}
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
p.precedenceStack.Pop()
p.ctx.SetStop(p.input.LT(-1))
retCtx := p.ctx // save current ctx (return value)
// unroll so ctx is as it was before call to recursive method
if p.parseListeners != nil {
for p.ctx != parentCtx {
p.TriggerExitRuleEvent()
p.ctx = p.ctx.GetParent().(ParserRuleContext)
}
} else {
p.ctx = parentCtx
}
// hook into tree
retCtx.SetParent(parentCtx)
if p.BuildParseTrees && parentCtx != nil {
// add return ctx into invoking rule's tree
parentCtx.AddChild(retCtx)
}
}
func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
ctx := p.ctx
for ctx != nil {
if ctx.GetRuleIndex() == ruleIndex {
return ctx
}
ctx = ctx.GetParent().(ParserRuleContext)
}
return nil
}
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
}
func (p *BaseParser) inContext(context ParserRuleContext) bool {
// TODO: useful in parser?
return false
}
//
// Checks whether or not {@code symbol} can follow the current state in the
// ATN. The behavior of p.method is equivalent to the following, but is
// implemented such that the complete context-sensitive follow set does not
// need to be explicitly constructed.
//
// <pre>
// return getExpectedTokens().contains(symbol)
// </pre>
//
// @param symbol the symbol type to check
// @return {@code true} if {@code symbol} can follow the current state in
// the ATN, otherwise {@code false}.
func (p *BaseParser) IsExpectedToken(symbol int) bool {
atn := p.Interpreter.atn
ctx := p.ctx
s := atn.states[p.state]
following := atn.NextTokens(s, nil)
if following.contains(symbol) {
return true
}
if !following.contains(TokenEpsilon) {
return false
}
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
invokingState := atn.states[ctx.GetInvokingState()]
rt := invokingState.GetTransitions()[0]
following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
if following.contains(symbol) {
return true
}
ctx = ctx.GetParent().(ParserRuleContext)
}
if following.contains(TokenEpsilon) && symbol == TokenEOF {
return true
}
return false
}
// Computes the set of input symbols which could follow the current parser
// state and context, as given by {@link //GetState} and {@link //GetContext},
// respectively.
//
// @see ATN//getExpectedTokens(int, RuleContext)
//
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
}
func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
atn := p.Interpreter.atn
s := atn.states[p.state]
return atn.NextTokens(s, nil)
}
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
func (p *BaseParser) GetRuleIndex(ruleName string) int {
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
if ok {
return ruleIndex
}
return -1
}
// Return List&ltString&gt of the rule names in your parser instance
// leading up to a call to the current rule. You could override if
// you want more details such as the file/line info of where
// in the ATN a rule is invoked.
//
// this very useful for error messages.
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
if c == nil {
c = p.ctx
}
stack := make([]string, 0)
for c != nil {
// compute what follows who invoked us
ruleIndex := c.GetRuleIndex()
if ruleIndex < 0 {
stack = append(stack, "n/a")
} else {
stack = append(stack, p.GetRuleNames()[ruleIndex])
}
vp := c.GetParent()
if vp == nil {
break
}
c = vp.(ParserRuleContext)
}
return stack
}
// For debugging and other purposes.//
func (p *BaseParser) GetDFAStrings() string {
return fmt.Sprint(p.Interpreter.decisionToDFA)
}
// For debugging and other purposes.//
func (p *BaseParser) DumpDFA() {
seenOne := false
for _, dfa := range p.Interpreter.decisionToDFA {
if len(dfa.GetStates()) > 0 {
if seenOne {
fmt.Println()
}
fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
seenOne = true
}
}
}
func (p *BaseParser) GetSourceName() string {
return p.GrammarFileName
}
// During a parse is sometimes useful to listen in on the rule entry and exit
// events as well as token Matches. p.is for quick and dirty debugging.
//
func (p *BaseParser) SetTrace(trace *TraceListener) {
if trace == nil {
p.RemoveParseListener(p.tracer)
p.tracer = nil
} else {
if p.tracer != nil {
p.RemoveParseListener(p.tracer)
}
p.tracer = NewTraceListener(p)
p.AddParseListener(p.tracer)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,358 @@
package antlr
import (
"reflect"
"strconv"
)
type ParserRuleContext interface {
RuleContext
SetException(RecognitionException)
AddTokenNode(token Token) *TerminalNodeImpl
AddErrorNode(badToken Token) *ErrorNodeImpl
EnterRule(listener ParseTreeListener)
ExitRule(listener ParseTreeListener)
SetStart(Token)
GetStart() Token
SetStop(Token)
GetStop() Token
AddChild(child RuleContext) RuleContext
RemoveLastChild()
}
type BaseParserRuleContext struct {
*BaseRuleContext
start, stop Token
exception RecognitionException
children []Tree
}
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
prc := new(BaseParserRuleContext)
prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
prc.RuleIndex = -1
// * If we are debugging or building a parse tree for a Visitor,
// we need to track all of the tokens and rule invocations associated
// with prc rule's context. This is empty for parsing w/o tree constr.
// operation because we don't the need to track the details about
// how we parse prc rule.
// /
prc.children = nil
prc.start = nil
prc.stop = nil
// The exception that forced prc rule to return. If the rule successfully
// completed, prc is {@code nil}.
prc.exception = nil
return prc
}
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
prc.exception = e
}
func (prc *BaseParserRuleContext) GetChildren() []Tree {
return prc.children
}
func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
// from RuleContext
prc.parentCtx = ctx.parentCtx
prc.invokingState = ctx.invokingState
prc.children = nil
prc.start = ctx.start
prc.stop = ctx.stop
}
func (prc *BaseParserRuleContext) GetText() string {
if prc.GetChildCount() == 0 {
return ""
}
var s string
for _, child := range prc.children {
s += child.(ParseTree).GetText()
}
return s
}
// Double dispatch methods for listeners
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
}
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
}
// * Does not set parent link other add methods do that///
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
if prc.children == nil {
prc.children = make([]Tree, 0)
}
if child == nil {
panic("Child may not be null")
}
prc.children = append(prc.children, child)
return child
}
func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
if prc.children == nil {
prc.children = make([]Tree, 0)
}
if child == nil {
panic("Child may not be null")
}
prc.children = append(prc.children, child)
return child
}
// * Used by EnterOuterAlt to toss out a RuleContext previously added as
// we entered a rule. If we have // label, we will need to remove
// generic ruleContext object.
// /
func (prc *BaseParserRuleContext) RemoveLastChild() {
if prc.children != nil && len(prc.children) > 0 {
prc.children = prc.children[0 : len(prc.children)-1]
}
}
func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
node := NewTerminalNodeImpl(token)
prc.addTerminalNodeChild(node)
node.parentCtx = prc
return node
}
func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
node := NewErrorNodeImpl(badToken)
prc.addTerminalNodeChild(node)
node.parentCtx = prc
return node
}
func (prc *BaseParserRuleContext) GetChild(i int) Tree {
if prc.children != nil && len(prc.children) >= i {
return prc.children[i]
}
return nil
}
func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
if childType == nil {
return prc.GetChild(i).(RuleContext)
}
for j := 0; j < len(prc.children); j++ {
child := prc.children[j]
if reflect.TypeOf(child) == childType {
if i == 0 {
return child.(RuleContext)
}
i--
}
}
return nil
}
func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
return TreesStringTree(prc, ruleNames, recog)
}
func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
return prc
}
func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
return visitor.VisitChildren(prc)
}
func (prc *BaseParserRuleContext) SetStart(t Token) {
prc.start = t
}
func (prc *BaseParserRuleContext) GetStart() Token {
return prc.start
}
func (prc *BaseParserRuleContext) SetStop(t Token) {
prc.stop = t
}
func (prc *BaseParserRuleContext) GetStop() Token {
return prc.stop
}
func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
for j := 0; j < len(prc.children); j++ {
child := prc.children[j]
if c2, ok := child.(TerminalNode); ok {
if c2.GetSymbol().GetTokenType() == ttype {
if i == 0 {
return c2
}
i--
}
}
}
return nil
}
func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
if prc.children == nil {
return make([]TerminalNode, 0)
}
tokens := make([]TerminalNode, 0)
for j := 0; j < len(prc.children); j++ {
child := prc.children[j]
if tchild, ok := child.(TerminalNode); ok {
if tchild.GetSymbol().GetTokenType() == ttype {
tokens = append(tokens, tchild)
}
}
}
return tokens
}
func (prc *BaseParserRuleContext) GetPayload() interface{} {
return prc
}
func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
if prc.children == nil || i < 0 || i >= len(prc.children) {
return nil
}
j := -1 // what element have we found with ctxType?
for _, o := range prc.children {
childType := reflect.TypeOf(o)
if childType.Implements(ctxType) {
j++
if j == i {
return o.(RuleContext)
}
}
}
return nil
}
// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
// check for convertibility
func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
return prc.getChild(ctxType, i)
}
func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
if prc.children == nil {
return make([]RuleContext, 0)
}
contexts := make([]RuleContext, 0)
for _, child := range prc.children {
childType := reflect.TypeOf(child)
if childType.ConvertibleTo(ctxType) {
contexts = append(contexts, child.(RuleContext))
}
}
return contexts
}
func (prc *BaseParserRuleContext) GetChildCount() int {
if prc.children == nil {
return 0
}
return len(prc.children)
}
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
if prc.start == nil || prc.stop == nil {
return TreeInvalidInterval
}
return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
}
//need to manage circular dependencies, so export now
// Print out a whole tree, not just a node, in LISP format
// (root child1 .. childN). Print just a node if b is a leaf.
//
func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
var p ParserRuleContext = prc
s := "["
for p != nil && p != stop {
if ruleNames == nil {
if !p.IsEmpty() {
s += strconv.Itoa(p.GetInvokingState())
}
} else {
ri := p.GetRuleIndex()
var ruleName string
if ri >= 0 && ri < len(ruleNames) {
ruleName = ruleNames[ri]
} else {
ruleName = strconv.Itoa(ri)
}
s += ruleName
}
if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
s += " "
}
pi := p.GetParent()
if pi != nil {
p = pi.(ParserRuleContext)
} else {
p = nil
}
}
s += "]"
return s
}
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
type InterpreterRuleContext interface {
ParserRuleContext
}
type BaseInterpreterRuleContext struct {
*BaseParserRuleContext
}
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
prc := new(BaseInterpreterRuleContext)
prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
prc.RuleIndex = ruleIndex
return prc
}

View File

@ -0,0 +1,770 @@
package antlr
import (
"fmt"
"strconv"
)
// Represents {@code $} in local context prediction, which means wildcard.
// {@code//+x =//}.
// /
const (
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
)
// Represents {@code $} in an array in full context mode, when {@code $}
// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
// {@code $} = {@link //EmptyReturnState}.
// /
var (
BasePredictionContextglobalNodeCount = 1
BasePredictionContextid = BasePredictionContextglobalNodeCount
)
type PredictionContext interface {
Hash() string
GetParent(int) PredictionContext
getReturnState(int) int
equals(PredictionContext) bool
length() int
isEmpty() bool
hasEmptyPath() bool
String() string
}
type BasePredictionContext struct {
cachedHashString string
}
func NewBasePredictionContext(cachedHashString string) *BasePredictionContext {
pc := new(BasePredictionContext)
pc.cachedHashString = cachedHashString
return pc
}
// Stores the computed hash code of this {@link BasePredictionContext}. The hash
// code is computed in parts to Match the following reference algorithm.
//
// <pre>
// private int referenceHashCode() {
// int hash = {@link MurmurHash//initialize MurmurHash.initialize}({@link
// //INITIAL_HASH})
//
// for (int i = 0 i &lt {@link //Size()} i++) {
// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //GetParent
// GetParent}(i))
// }
//
// for (int i = 0 i &lt {@link //Size()} i++) {
// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link
// //getReturnState getReturnState}(i))
// }
//
// hash = {@link MurmurHash//finish MurmurHash.finish}(hash, 2// {@link
// //Size()})
// return hash
// }
// </pre>
//
func (b *BasePredictionContext) isEmpty() bool {
return false
}
func (b *BasePredictionContext) Hash() string {
return b.cachedHashString
}
func calculateHashString(parent PredictionContext, returnState int) string {
return parent.String() + strconv.Itoa(returnState)
}
func calculateEmptyHashString() string {
return ""
}
// Used to cache {@link BasePredictionContext} objects. Its used for the shared
// context cash associated with contexts in DFA states. This cache
// can be used for both lexers and parsers.
type PredictionContextCache struct {
cache map[PredictionContext]PredictionContext
}
func NewPredictionContextCache() *PredictionContextCache {
t := new(PredictionContextCache)
t.cache = make(map[PredictionContext]PredictionContext)
return t
}
// Add a context to the cache and return it. If the context already exists,
// return that one instead and do not add a Newcontext to the cache.
// Protect shared cache from unsafe thread access.
//
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
if ctx == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY
}
existing := p.cache[ctx]
if existing != nil {
return existing
}
p.cache[ctx] = ctx
return ctx
}
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
return p.cache[ctx]
}
func (p *PredictionContextCache) length() int {
return len(p.cache)
}
type SingletonPredictionContext interface {
PredictionContext
}
type BaseSingletonPredictionContext struct {
*BasePredictionContext
parentCtx PredictionContext
returnState int
}
func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
s := new(BaseSingletonPredictionContext)
s.BasePredictionContext = NewBasePredictionContext("")
if parent != nil {
s.cachedHashString = calculateHashString(parent, returnState)
} else {
s.cachedHashString = calculateEmptyHashString()
}
s.parentCtx = parent
s.returnState = returnState
return s
}
func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
// someone can pass in the bits of an array ctx that mean $
return BasePredictionContextEMPTY
}
return NewBaseSingletonPredictionContext(parent, returnState)
}
func (b *BaseSingletonPredictionContext) length() int {
return 1
}
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
return b.parentCtx
}
func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
return b.returnState
}
func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
return b.returnState == BasePredictionContextEmptyReturnState
}
func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
if b == other {
return true
} else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
return false
} else if b.Hash() != other.Hash() {
return false // can't be same if hash is different
}
otherP := other.(*BaseSingletonPredictionContext)
if b.returnState != other.getReturnState(0) {
return false
} else if b.parentCtx == nil {
return otherP.parentCtx == nil
}
return b.parentCtx.equals(otherP.parentCtx)
}
func (b *BaseSingletonPredictionContext) Hash() string {
return b.cachedHashString
}
func (b *BaseSingletonPredictionContext) String() string {
var up string
if b.parentCtx == nil {
up = ""
} else {
up = b.parentCtx.String()
}
if len(up) == 0 {
if b.returnState == BasePredictionContextEmptyReturnState {
return "$"
}
return strconv.Itoa(b.returnState)
}
return strconv.Itoa(b.returnState) + " " + up
}
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
type EmptyPredictionContext struct {
*BaseSingletonPredictionContext
}
func NewEmptyPredictionContext() *EmptyPredictionContext {
p := new(EmptyPredictionContext)
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
return p
}
func (e *EmptyPredictionContext) isEmpty() bool {
return true
}
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
return nil
}
func (e *EmptyPredictionContext) getReturnState(index int) int {
return e.returnState
}
func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
return e == other
}
func (e *EmptyPredictionContext) String() string {
return "$"
}
type ArrayPredictionContext struct {
*BasePredictionContext
parents []PredictionContext
returnStates []int
}
func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
// Parent can be nil only if full ctx mode and we make an array
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
// nil parent and
// returnState == {@link //EmptyReturnState}.
c := new(ArrayPredictionContext)
c.BasePredictionContext = NewBasePredictionContext("")
for i := range parents {
c.cachedHashString += calculateHashString(parents[i], returnStates[i])
}
c.parents = parents
c.returnStates = returnStates
return c
}
func (a *ArrayPredictionContext) GetReturnStates() []int {
return a.returnStates
}
func (a *ArrayPredictionContext) hasEmptyPath() bool {
return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
}
func (a *ArrayPredictionContext) isEmpty() bool {
// since EmptyReturnState can only appear in the last position, we
// don't need to verify that size==1
return a.returnStates[0] == BasePredictionContextEmptyReturnState
}
func (a *ArrayPredictionContext) length() int {
return len(a.returnStates)
}
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
return a.parents[index]
}
func (a *ArrayPredictionContext) getReturnState(index int) int {
return a.returnStates[index]
}
func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
if _, ok := other.(*ArrayPredictionContext); !ok {
return false
} else if a.cachedHashString != other.Hash() {
return false // can't be same if hash is different
} else {
otherP := other.(*ArrayPredictionContext)
return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
}
}
func (a *ArrayPredictionContext) String() string {
if a.isEmpty() {
return "[]"
}
s := "["
for i := 0; i < len(a.returnStates); i++ {
if i > 0 {
s = s + ", "
}
if a.returnStates[i] == BasePredictionContextEmptyReturnState {
s = s + "$"
continue
}
s = s + strconv.Itoa(a.returnStates[i])
if a.parents[i] != nil {
s = s + " " + a.parents[i].String()
} else {
s = s + "nil"
}
}
return s + "]"
}
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
// /
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
if outerContext == nil {
outerContext = RuleContextEmpty
}
// if we are in RuleContext of start rule, s, then BasePredictionContext
// is EMPTY. Nobody called us. (if we are empty, return empty)
if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
return BasePredictionContextEMPTY
}
// If we have a parent, convert it to a BasePredictionContext graph
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
state := a.states[outerContext.GetInvokingState()]
transition := state.GetTransitions()[0]
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
}
func calculateListsHashString(parents []BasePredictionContext, returnStates []int) string {
s := ""
for _, p := range parents {
s += fmt.Sprint(p)
}
for _, r := range returnStates {
s += fmt.Sprint(r)
}
return s
}
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
// share same graph if both same
if a == b {
return a
}
ac, ok1 := a.(*BaseSingletonPredictionContext)
bc, ok2 := b.(*BaseSingletonPredictionContext)
if ok1 && ok2 {
return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
}
// At least one of a or b is array
// If one is $ and rootIsWildcard, return $ as// wildcard
if rootIsWildcard {
if _, ok := a.(*EmptyPredictionContext); ok {
return a
}
if _, ok := b.(*EmptyPredictionContext); ok {
return b
}
}
// convert singleton so both are arrays to normalize
if _, ok := a.(*BaseSingletonPredictionContext); ok {
a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
}
if _, ok := b.(*BaseSingletonPredictionContext); ok {
b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
}
return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
}
//
// Merge two {@link SingletonBasePredictionContext} instances.
//
// <p>Stack tops equal, parents merge is same return left graph.<br>
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
// type="image/svg+xml"/></p>
//
// <p>Same stack top, parents differ merge parents giving array node, then
// remainders of those graphs. A Newroot node is created to point to the
// merged parents.<br>
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
// type="image/svg+xml"/></p>
//
// <p>Different stack tops pointing to same parent. Make array node for the
// root where both element in the root point to the same (original)
// parent.<br>
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
// type="image/svg+xml"/></p>
//
// <p>Different stack tops pointing to different parents. Make array node for
// the root where each element points to the corresponding original
// parent.<br>
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
// type="image/svg+xml"/></p>
//
// @param a the first {@link SingletonBasePredictionContext}
// @param b the second {@link SingletonBasePredictionContext}
// @param rootIsWildcard {@code true} if this is a local-context merge,
// otherwise false to indicate a full-context merge
// @param mergeCache
// /
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
previous := mergeCache.Get(a.Hash(), b.Hash())
if previous != nil {
return previous.(PredictionContext)
}
previous = mergeCache.Get(b.Hash(), a.Hash())
if previous != nil {
return previous.(PredictionContext)
}
}
rootMerge := mergeRoot(a, b, rootIsWildcard)
if rootMerge != nil {
if mergeCache != nil {
mergeCache.set(a.Hash(), b.Hash(), rootMerge)
}
return rootMerge
}
if a.returnState == b.returnState {
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
// if parent is same as existing a or b parent or reduced to a parent,
// return it
if parent == a.parentCtx {
return a // ax + bx = ax, if a=b
}
if parent == b.parentCtx {
return b // ax + bx = bx, if a=b
}
// else: ax + ay = a'[x,y]
// merge parents x and y, giving array node with x,y then remainders
// of those graphs. dup a, a' points at merged array
// Newjoined parent so create Newsingleton pointing to it, a'
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
if mergeCache != nil {
mergeCache.set(a.Hash(), b.Hash(), spc)
}
return spc
}
// a != b payloads differ
// see if we can collapse parents due to $+x parents if local ctx
var singleParent PredictionContext
if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
// bx =
// [a,b]x
singleParent = a.parentCtx
}
if singleParent != nil { // parents are same
// sort payloads and use same parent
payloads := []int{a.returnState, b.returnState}
if a.returnState > b.returnState {
payloads[0] = b.returnState
payloads[1] = a.returnState
}
parents := []PredictionContext{singleParent, singleParent}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
mergeCache.set(a.Hash(), b.Hash(), apc)
}
return apc
}
// parents differ and can't merge them. Just pack together
// into array can't merge.
// ax + by = [ax,by]
payloads := []int{a.returnState, b.returnState}
parents := []PredictionContext{a.parentCtx, b.parentCtx}
if a.returnState > b.returnState { // sort by payload
payloads[0] = b.returnState
payloads[1] = a.returnState
parents = []PredictionContext{b.parentCtx, a.parentCtx}
}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
mergeCache.set(a.Hash(), b.Hash(), apc)
}
return apc
}
//
// Handle case where at least one of {@code a} or {@code b} is
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
// to represent {@link //EMPTY}.
//
// <h2>Local-Context Merges</h2>
//
// <p>These local-context merge operations are used when {@code rootIsWildcard}
// is true.</p>
//
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
//
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
// {@code //EMPTY} return left graph.<br>
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
//
// <p>Special case of last merge if local context.<br>
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
//
// <h2>Full-Context Merges</h2>
//
// <p>These full-context merge operations are used when {@code rootIsWildcard}
// is false.</p>
//
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
//
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
// nil parent).<br>
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
//
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
//
// @param a the first {@link SingletonBasePredictionContext}
// @param b the second {@link SingletonBasePredictionContext}
// @param rootIsWildcard {@code true} if this is a local-context merge,
// otherwise false to indicate a full-context merge
// /
func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
if rootIsWildcard {
if a == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY // // + b =//
}
if b == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY // a +// =//
}
} else {
if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY // $ + $ = $
} else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
parents := []PredictionContext{b.GetParent(-1), nil}
return NewArrayPredictionContext(parents, payloads)
} else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
parents := []PredictionContext{a.GetParent(-1), nil}
return NewArrayPredictionContext(parents, payloads)
}
}
return nil
}
//
// Merge two {@link ArrayBasePredictionContext} instances.
//
// <p>Different tops, different parents.<br>
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
//
// <p>Shared top, same parents.<br>
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
//
// <p>Shared top, different parents.<br>
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
//
// <p>Shared top, all shared parents.<br>
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
// type="image/svg+xml"/></p>
//
// <p>Equal tops, merge parents and reduce top to
// {@link SingletonBasePredictionContext}.<br>
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
// /
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
previous := mergeCache.Get(a.Hash(), b.Hash())
if previous != nil {
return previous.(PredictionContext)
}
previous = mergeCache.Get(b.Hash(), a.Hash())
if previous != nil {
return previous.(PredictionContext)
}
}
// merge sorted payloads a + b => M
i := 0 // walks a
j := 0 // walks b
k := 0 // walks target M array
mergedReturnStates := make([]int, len(a.returnStates) + len(b.returnStates))
mergedParents := make([]PredictionContext, len(a.returnStates) + len(b.returnStates))
// walk and merge to yield mergedParents, mergedReturnStates
for i < len(a.returnStates) && j < len(b.returnStates) {
aParent := a.parents[i]
bParent := b.parents[j]
if a.returnStates[i] == b.returnStates[j] {
// same payload (stack tops are equal), must yield merged singleton
payload := a.returnStates[i]
// $+$ = $
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
// ->
// ax
if bothDollars || axAX {
mergedParents[k] = aParent // choose left
mergedReturnStates[k] = payload
} else { // ax+ay -> a'[x,y]
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
mergedParents[k] = mergedParent
mergedReturnStates[k] = payload
}
i++ // hop over left one as usual
j++ // but also Skip one in right side since we merge
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
mergedParents[k] = aParent
mergedReturnStates[k] = a.returnStates[i]
i++
} else { // b > a, copy b[j] to M
mergedParents[k] = bParent
mergedReturnStates[k] = b.returnStates[j]
j++
}
k++
}
// copy over any payloads remaining in either array
if i < len(a.returnStates) {
for p := i; p < len(a.returnStates); p++ {
mergedParents[k] = a.parents[p]
mergedReturnStates[k] = a.returnStates[p]
k++
}
} else {
for p := j; p < len(b.returnStates); p++ {
mergedParents[k] = b.parents[p]
mergedReturnStates[k] = b.returnStates[p]
k++
}
}
// trim merged if we combined a few that had same stack tops
if k < len(mergedParents) { // write index < last position trim
if k == 1 { // for just one merged element, return singleton top
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
if mergeCache != nil {
mergeCache.set(a.Hash(), b.Hash(), pc)
}
return pc
}
mergedParents = mergedParents[0:k]
mergedReturnStates = mergedReturnStates[0:k]
}
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
// if we created same array as a or b, return that instead
// TODO: track whether this is possible above during merge sort for speed
if M == a {
if mergeCache != nil {
mergeCache.set(a.Hash(), b.Hash(), a)
}
return a
}
if M == b {
if mergeCache != nil {
mergeCache.set(a.Hash(), b.Hash(), b)
}
return b
}
combineCommonParents(mergedParents)
if mergeCache != nil {
mergeCache.set(a.Hash(), b.Hash(), M)
}
return M
}
//
// Make pass over all <em>M</em> {@code parents} merge any {@code equals()}
// ones.
// /
func combineCommonParents(parents []PredictionContext) {
uniqueParents := make(map[PredictionContext]PredictionContext)
for p := 0; p < len(parents); p++ {
parent := parents[p]
if uniqueParents[parent] == nil {
uniqueParents[parent] = parent
}
}
for q := 0; q < len(parents); q++ {
parents[q] = uniqueParents[parents[q]]
}
}
func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
if context.isEmpty() {
return context
}
existing := visited[context]
if existing != nil {
return existing
}
existing = contextCache.Get(context)
if existing != nil {
visited[context] = existing
return existing
}
changed := false
parents := make([]PredictionContext, context.length())
for i := 0; i < len(parents); i++ {
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
if changed || parent != context.GetParent(i) {
if !changed {
parents = make([]PredictionContext, context.length())
for j := 0; j < context.length(); j++ {
parents[j] = context.GetParent(j)
}
changed = true
}
parents[i] = parent
}
}
if !changed {
contextCache.add(context)
visited[context] = context
return context
}
var updated PredictionContext
if len(parents) == 0 {
updated = BasePredictionContextEMPTY
} else if len(parents) == 1 {
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
} else {
updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
}
contextCache.add(updated)
visited[updated] = updated
visited[context] = updated
return updated
}

View File

@ -0,0 +1,558 @@
package antlr
import (
"strconv"
"strings"
)
//
// This enumeration defines the prediction modes available in ANTLR 4 along with
// utility methods for analyzing configuration sets for conflicts and/or
// ambiguities.
const (
//
// The SLL(*) prediction mode. This prediction mode ignores the current
// parser context when making predictions. This is the fastest prediction
// mode, and provides correct results for many grammars. This prediction
// mode is more powerful than the prediction mode provided by ANTLR 3, but
// may result in syntax errors for grammar and input combinations which are
// not SLL.
//
// <p>
// When using this prediction mode, the parser will either return a correct
// parse tree (i.e. the same parse tree that would be returned with the
// {@link //LL} prediction mode), or it will Report a syntax error. If a
// syntax error is encountered when using the {@link //SLL} prediction mode,
// it may be due to either an actual syntax error in the input or indicate
// that the particular combination of grammar and input requires the more
// powerful {@link //LL} prediction abilities to complete successfully.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeSLL = 0
//
// The LL(*) prediction mode. This prediction mode allows the current parser
// context to be used for resolving SLL conflicts that occur during
// prediction. This is the fastest prediction mode that guarantees correct
// parse results for all combinations of grammars with syntactically correct
// inputs.
//
// <p>
// When using this prediction mode, the parser will make correct decisions
// for all syntactically-correct grammar and input combinations. However, in
// cases where the grammar is truly ambiguous this prediction mode might not
// Report a precise answer for <em>exactly which</em> alternatives are
// ambiguous.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeLL = 1
//
// The LL(*) prediction mode with exact ambiguity detection. In addition to
// the correctness guarantees provided by the {@link //LL} prediction mode,
// this prediction mode instructs the prediction algorithm to determine the
// complete and exact set of ambiguous alternatives for every ambiguous
// decision encountered while parsing.
//
// <p>
// This prediction mode may be used for diagnosing ambiguities during
// grammar development. Due to the performance overhead of calculating sets
// of ambiguous alternatives, this prediction mode should be avoided when
// the exact results are not necessary.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeLLExactAmbigDetection = 2
)
//
// Computes the SLL prediction termination condition.
//
// <p>
// This method computes the SLL prediction termination condition for both of
// the following cases.</p>
//
// <ul>
// <li>The usual SLL+LL fallback upon SLL conflict</li>
// <li>Pure SLL without LL fallback</li>
// </ul>
//
// <p><strong>COMBINED SLL+LL PARSING</strong></p>
//
// <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
// ensured regardless of how the termination condition is computed by this
// method. Due to the substantially higher cost of LL prediction, the
// prediction should only fall back to LL when the additional lookahead
// cannot lead to a unique SLL prediction.</p>
//
// <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
// conflicting subsets should fall back to full LL, even if the
// configuration sets don't resolve to the same alternative (e.g.
// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
// configuration, SLL could continue with the hopes that more lookahead will
// resolve via one of those non-conflicting configurations.</p>
//
// <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
// stops when it sees only conflicting configuration subsets. In contrast,
// full LL keeps going when there is uncertainty.</p>
//
// <p><strong>HEURISTIC</strong></p>
//
// <p>As a heuristic, we stop prediction when we see any conflicting subset
// unless we see a state that only has one alternative associated with it.
// The single-alt-state thing lets prediction continue upon rules like
// (otherwise, it would admit defeat too soon):</p>
//
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }</p>
//
// <p>When the ATN simulation reaches the state before {@code ''}, it has a
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
// processing this node because alternative to has another way to continue,
// via {@code [6|2|[]]}.</p>
//
// <p>It also let's us continue for this rule:</p>
//
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
//
// <p>After Matching input A, we reach the stop state for rule A, state 1.
// State 8 is the state right before B. Clearly alternatives 1 and 2
// conflict and no amount of further lookahead will separate the two.
// However, alternative 3 will be able to continue and so we do not stop
// working on this state. In the previous example, we're concerned with
// states associated with the conflicting alternatives. Here alt 3 is not
// associated with the conflicting configs, but since we can continue
// looking for input reasonably, don't declare the state done.</p>
//
// <p><strong>PURE SLL PARSING</strong></p>
//
// <p>To handle pure SLL parsing, all we have to do is make sure that we
// combine stack contexts for configurations that differ only by semantic
// predicate. From there, we can do the usual SLL termination heuristic.</p>
//
// <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
//
// <p>SLL decisions don't evaluate predicates until after they reach DFA stop
// states because they need to create the DFA cache that works in all
// semantic situations. In contrast, full LL evaluates predicates collected
// during start state computation so it can ignore predicates thereafter.
// This means that SLL termination detection can totally ignore semantic
// predicates.</p>
//
// <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
// semantic predicate contexts so we might see two configurations like the
// following.</p>
//
// <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
//
// <p>Before testing these configurations against others, we have to merge
// {@code x} and {@code x'} (without modifying the existing configurations).
// For example, we test {@code (x+x')==x''} when looking for conflicts in
// the following configurations.</p>
//
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
//
// <p>If the configuration set has predicates (as indicated by
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
// the configurations to strip out all of the predicates so that a standard
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
//
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
// Configs in rule stop states indicate reaching the end of the decision
// rule (local context) or end of start rule (full context). If all
// configs meet this condition, then none of the configurations is able
// to Match additional input so we terminate prediction.
//
if PredictionModeallConfigsInRuleStopStates(configs) {
return true
}
// pure SLL mode parsing
if mode == PredictionModeSLL {
// Don't bother with combining configs from different semantic
// contexts if we can fail over to full LL costs more time
// since we'll often fail over anyway.
if configs.HasSemanticContext() {
// dup configs, tossing out semantic predicates
dup := NewBaseATNConfigSet(false)
for _, c := range configs.GetItems() {
// NewBaseATNConfig({semanticContext:}, c)
c = NewBaseATNConfig2(c, SemanticContextNone)
dup.Add(c, nil)
}
configs = dup
}
// now we have combined contexts for configs with dissimilar preds
}
// pure SLL or combined SLL+LL mode parsing
altsets := PredictionModegetConflictingAltSubsets(configs)
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
}
// Checks if any configuration in {@code configs} is in a
// {@link RuleStopState}. Configurations meeting this condition have reached
// the end of the decision rule (local context) or end of start rule (full
// context).
//
// @param configs the configuration set to test
// @return {@code true} if any configuration in {@code configs} is in a
// {@link RuleStopState}, otherwise {@code false}
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
for _, c := range configs.GetItems() {
if _, ok := c.GetState().(*RuleStopState); ok {
return true
}
}
return false
}
// Checks if all configurations in {@code configs} are in a
// {@link RuleStopState}. Configurations meeting this condition have reached
// the end of the decision rule (local context) or end of start rule (full
// context).
//
// @param configs the configuration set to test
// @return {@code true} if all configurations in {@code configs} are in a
// {@link RuleStopState}, otherwise {@code false}
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
for _, c := range configs.GetItems() {
if _, ok := c.GetState().(*RuleStopState); !ok {
return false
}
}
return true
}
//
// Full LL prediction termination.
//
// <p>Can we stop looking ahead during ATN simulation or is there some
// uncertainty as to which alternative we will ultimately pick, after
// consuming more input? Even if there are partial conflicts, we might know
// that everything is going to resolve to the same minimum alternative. That
// means we can stop since no more lookahead will change that fact. On the
// other hand, there might be multiple conflicts that resolve to different
// minimums. That means we need more look ahead to decide which of those
// alternatives we should predict.</p>
//
// <p>The basic idea is to split the set of configurations {@code C}, into
// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
// non-conflicting configurations. Two configurations conflict if they have
// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
// and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
//
// <p>Reduce these configuration subsets to the set of possible alternatives.
// You can compute the alternative subsets in one pass as follows:</p>
//
// <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
// {@code C} holding {@code s} and {@code ctx} fixed.</p>
//
// <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
//
// <pre>
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
// alt and not pred
// </pre>
//
// <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
//
// <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
// {@code s} and {@code ctx}.</p>
//
// <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
// the union of these alternative subsets is a singleton, then no amount of
// more lookahead will help us. We will always pick that alternative. If,
// however, there is more than one alternative, then we are uncertain which
// alternative to predict and must continue looking for resolution. We may
// or may not discover an ambiguity in the future, even if there are no
// conflicting subsets this round.</p>
//
// <p>The biggest sin is to terminate early because it means we've made a
// decision but were uncertain as to the eventual outcome. We haven't used
// enough lookahead. On the other hand, announcing a conflict too late is no
// big deal you will still have the conflict. It's just inefficient. It
// might even look until the end of file.</p>
//
// <p>No special consideration for semantic predicates is required because
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
// no configuration contains a semantic context during the termination
// check.</p>
//
// <p><strong>CONFLICTING CONFIGS</strong></p>
//
// <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
// when {@code i!=j} but {@code x=x'}. Because we merge all
// {@code (s, i, _)} configurations together, that means that there are at
// most {@code n} configurations associated with state {@code s} for
// {@code n} possible alternatives in the decision. The merged stacks
// complicate the comparison of configuration contexts {@code x} and
// {@code x'}. Sam checks to see if one is a subset of the other by calling
// merge and checking to see if the merged result is either {@code x} or
// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
// is the superset, then {@code i} is the only possible prediction since the
// others resolve to {@code min(i)} as well. However, if {@code x} is
// associated with {@code j>i} then at least one stack configuration for
// {@code j} is not in conflict with alternative {@code i}. The algorithm
// should keep going, looking for more lookahead due to the uncertainty.</p>
//
// <p>For simplicity, I'm doing a equality check between {@code x} and
// {@code x'} that lets the algorithm continue to consume lookahead longer
// than necessary. The reason I like the equality is of course the
// simplicity but also because that is the test you need to detect the
// alternatives that are actually in conflict.</p>
//
// <p><strong>CONTINUE/STOP RULE</strong></p>
//
// <p>Continue if union of resolved alternative sets from non-conflicting and
// conflicting alternative subsets has more than one alternative. We are
// uncertain about which alternative to predict.</p>
//
// <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
// alternatives are still in the running for the amount of input we've
// consumed at this point. The conflicting sets let us to strip away
// configurations that won't lead to more states because we resolve
// conflicts to the configuration with a minimum alternate for the
// conflicting set.</p>
//
// <p><strong>CASES</strong></p>
//
// <ul>
//
// <li>no conflicts and more than 1 alternative in set =&gt continue</li>
//
// <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
// {@code {1,3}} =&gt continue
// </li>
//
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
// {@code {1}} =&gt stop and predict 1</li>
//
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
// {@code {1}} = {@code {1}} =&gt stop and predict 1, can announce
// ambiguity {@code {1,2}}</li>
//
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
// {@code {2}} = {@code {1,2}} =&gt continue</li>
//
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
// {@code {3}} = {@code {1,3}} =&gt continue</li>
//
// </ul>
//
// <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
//
// <p>If all states Report the same conflicting set of alternatives, then we
// know we have the exact ambiguity set.</p>
//
// <p><code>|A_<em>i</em>|&gt1</code> and
// <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
//
// <p>In other words, we continue examining lookahead until all {@code A_i}
// have more than one alternative and all {@code A_i} are the same. If
// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
// because the resolved set is {@code {1}}. To determine what the real
// ambiguity is, we have to know whether the ambiguity is between one and
// two or one and three so we keep going. We can only stop prediction when
// we need exact ambiguity detection when the sets look like
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
//
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
return PredictionModegetSingleViableAlt(altsets)
}
//
// Determines if every alternative subset in {@code altsets} contains more
// than one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if every {@link BitSet} in {@code altsets} has
// {@link BitSet//cardinality cardinality} &gt 1, otherwise {@code false}
//
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
return !PredictionModehasNonConflictingAltSet(altsets)
}
//
// Determines if any single alternative subset in {@code altsets} contains
// exactly one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
//
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
if alts.length() == 1 {
return true
}
}
return false
}
//
// Determines if any single alternative subset in {@code altsets} contains
// more than one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
// {@link BitSet//cardinality cardinality} &gt 1, otherwise {@code false}
//
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
if alts.length() > 1 {
return true
}
}
return false
}
//
// Determines if every alternative subset in {@code altsets} is equivalent.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if every member of {@code altsets} is equal to the
// others, otherwise {@code false}
//
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
var first *BitSet
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
if first == nil {
first = alts
} else if alts != first {
return false
}
}
return true
}
//
// Returns the unique alternative predicted by all alternative subsets in
// {@code altsets}. If no such alternative exists, this method returns
// {@link ATN//INVALID_ALT_NUMBER}.
//
// @param altsets a collection of alternative subsets
//
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
all := PredictionModeGetAlts(altsets)
if all.length() == 1 {
return all.minValue()
}
return ATNInvalidAltNumber
}
// Gets the complete set of represented alternatives for a collection of
// alternative subsets. This method returns the union of each {@link BitSet}
// in {@code altsets}.
//
// @param altsets a collection of alternative subsets
// @return the set of represented alternatives in {@code altsets}
//
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
all := NewBitSet()
for _, alts := range altsets {
all.or(alts)
}
return all
}
//
// This func gets the conflicting alt subsets from a configuration set.
// For each configuration {@code c} in {@code configs}:
//
// <pre>
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
// alt and not pred
// </pre>
//
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
configToAlts := make(map[string]*BitSet)
for _, c := range configs.GetItems() {
key := "key_" + strconv.Itoa(c.GetState().GetStateNumber()) + "/" + c.GetContext().String()
alts := configToAlts[key]
if alts == nil {
alts = NewBitSet()
configToAlts[key] = alts
}
alts.add(c.GetAlt())
}
values := make([]*BitSet, 0)
for k := range configToAlts {
if strings.Index(k, "key_") != 0 {
continue
}
values = append(values, configToAlts[k])
}
return values
}
//
// Get a map from state to alt subset from a configuration set. For each
// configuration {@code c} in {@code configs}:
//
// <pre>
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
// </pre>
//
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
m := NewAltDict()
for _, c := range configs.GetItems() {
alts := m.Get(c.GetState().String())
if alts == nil {
alts = NewBitSet()
m.put(c.GetState().String(), alts)
}
alts.(*BitSet).add(c.GetAlt())
}
return m
}
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
values := PredictionModeGetStateToAltMap(configs).values()
for i := 0; i < len(values); i++ {
if values[i].(*BitSet).length() == 1 {
return true
}
}
return false
}
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
result := ATNInvalidAltNumber
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
minAlt := alts.minValue()
if result == ATNInvalidAltNumber {
result = minAlt
} else if result != minAlt { // more than 1 viable alt
return ATNInvalidAltNumber
}
}
return result
}

View File

@ -0,0 +1,213 @@
package antlr
import (
"fmt"
"strings"
"strconv"
)
type Recognizer interface {
GetLiteralNames() []string
GetSymbolicNames() []string
GetRuleNames() []string
Sempred(RuleContext, int, int) bool
Precpred(RuleContext, int) bool
GetState() int
SetState(int)
Action(RuleContext, int, int)
AddErrorListener(ErrorListener)
RemoveErrorListeners()
GetATN() *ATN
GetErrorListenerDispatch() ErrorListener
}
type BaseRecognizer struct {
listeners []ErrorListener
state int
RuleNames []string
LiteralNames []string
SymbolicNames []string
GrammarFileName string
}
func NewBaseRecognizer() *BaseRecognizer {
rec := new(BaseRecognizer)
rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
rec.state = -1
return rec
}
var tokenTypeMapCache = make(map[string]int)
var ruleIndexMapCache = make(map[string]int)
func (b *BaseRecognizer) checkVersion(toolVersion string) {
runtimeVersion := "4.5.2"
if runtimeVersion != toolVersion {
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
}
}
func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
panic("action not implemented on Recognizer!")
}
func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
b.listeners = append(b.listeners, listener)
}
func (b *BaseRecognizer) RemoveErrorListeners() {
b.listeners = make([]ErrorListener, 0)
}
func (b *BaseRecognizer) GetRuleNames() []string {
return b.RuleNames
}
func (b *BaseRecognizer) GetTokenNames() []string {
return b.LiteralNames
}
func (b *BaseRecognizer) GetSymbolicNames() []string {
return b.SymbolicNames
}
func (b *BaseRecognizer) GetLiteralNames() []string {
return b.LiteralNames
}
func (b *BaseRecognizer) GetState() int {
return b.state
}
func (b *BaseRecognizer) SetState(v int) {
b.state = v
}
//func (b *Recognizer) GetTokenTypeMap() {
// var tokenNames = b.GetTokenNames()
// if (tokenNames==nil) {
// panic("The current recognizer does not provide a list of token names.")
// }
// var result = tokenTypeMapCache[tokenNames]
// if(result==nil) {
// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
// result.EOF = TokenEOF
// tokenTypeMapCache[tokenNames] = result
// }
// return result
//}
// Get a map from rule names to rule indexes.
//
// <p>Used for XPath and tree pattern compilation.</p>
//
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
panic("Method not defined!")
// var ruleNames = b.GetRuleNames()
// if (ruleNames==nil) {
// panic("The current recognizer does not provide a list of rule names.")
// }
//
// var result = ruleIndexMapCache[ruleNames]
// if(result==nil) {
// result = ruleNames.reduce(function(o, k, i) { o[k] = i })
// ruleIndexMapCache[ruleNames] = result
// }
// return result
}
func (b *BaseRecognizer) GetTokenType(tokenName string) int {
panic("Method not defined!")
// var ttype = b.GetTokenTypeMap()[tokenName]
// if (ttype !=nil) {
// return ttype
// } else {
// return TokenInvalidType
// }
}
//func (b *Recognizer) GetTokenTypeMap() map[string]int {
// Vocabulary vocabulary = getVocabulary()
//
// Synchronized (tokenTypeMapCache) {
// Map<String, Integer> result = tokenTypeMapCache.Get(vocabulary)
// if (result == null) {
// result = new HashMap<String, Integer>()
// for (int i = 0; i < GetATN().maxTokenType; i++) {
// String literalName = vocabulary.getLiteralName(i)
// if (literalName != null) {
// result.put(literalName, i)
// }
//
// String symbolicName = vocabulary.GetSymbolicName(i)
// if (symbolicName != null) {
// result.put(symbolicName, i)
// }
// }
//
// result.put("EOF", Token.EOF)
// result = Collections.unmodifiableMap(result)
// tokenTypeMapCache.put(vocabulary, result)
// }
//
// return result
// }
//}
// What is the error header, normally line/character position information?//
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
line := e.GetOffendingToken().GetLine()
column := e.GetOffendingToken().GetColumn()
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
}
// How should a token be displayed in an error message? The default
// is to display just the text, but during development you might
// want to have a lot of information spit out. Override in that case
// to use t.String() (which, for CommonToken, dumps everything about
// the token). This is better than forcing you to override a method in
// your token objects because you don't have to go modify your lexer
// so that it creates a NewJava type.
//
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
// implementations of {@link ANTLRErrorStrategy} may provide a similar
// feature when necessary. For example, see
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
//
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
if t == nil {
return "<no token>"
}
s := t.GetText()
if s == "" {
if t.GetTokenType() == TokenEOF {
s = "<EOF>"
} else {
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
}
}
s = strings.Replace(s, "\t", "\\t", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\r", -1)
return "'" + s + "'"
}
func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
return NewProxyErrorListener(b.listeners)
}
// subclass needs to override these if there are sempreds or actions
// that the ATN interp needs to execute
func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
return true
}
func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
return true
}

View File

@ -0,0 +1,110 @@
package antlr
// A rule context is a record of a single rule invocation. It knows
// which context invoked it, if any. If there is no parent context, then
// naturally the invoking state is not valid. The parent link
// provides a chain upwards from the current rule invocation to the root
// of the invocation tree, forming a stack. We actually carry no
// information about the rule associated with b context (except
// when parsing). We keep only the state number of the invoking state from
// the ATN submachine that invoked b. Contrast b with the s
// pointer inside ParserRuleContext that tracks the current state
// being "executed" for the current rule.
//
// The parent contexts are useful for computing lookahead sets and
// getting error information.
//
// These objects are used during parsing and prediction.
// For the special case of parsers, we use the subclass
// ParserRuleContext.
//
// @see ParserRuleContext
//
type RuleContext interface {
RuleNode
GetInvokingState() int
SetInvokingState(int)
GetRuleIndex() int
IsEmpty() bool
GetAltNumber() int
SetAltNumber(altNumber int)
String([]string, RuleContext) string
}
type BaseRuleContext struct {
parentCtx RuleContext
invokingState int
RuleIndex int
}
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
rn := new(BaseRuleContext)
// What context invoked b rule?
rn.parentCtx = parent
// What state invoked the rule associated with b context?
// The "return address" is the followState of invokingState
// If parent is nil, b should be -1.
if parent == nil {
rn.invokingState = -1
} else {
rn.invokingState = invokingState
}
return rn
}
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
return b
}
func (b *BaseRuleContext) SetParent(v Tree) {
if v == nil {
b.parentCtx = nil
} else {
b.parentCtx = v.(RuleContext)
}
}
func (b *BaseRuleContext) GetInvokingState() int {
return b.invokingState
}
func (b *BaseRuleContext) SetInvokingState(t int) {
b.invokingState = t
}
func (b *BaseRuleContext) GetRuleIndex() int {
return b.RuleIndex
}
func (b *BaseRuleContext) GetAltNumber() int {
return ATNInvalidAltNumber
}
func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
// A context is empty if there is no invoking state meaning nobody call
// current context.
func (b *BaseRuleContext) IsEmpty() bool {
return b.invokingState == -1
}
// Return the combined text of all child nodes. This method only considers
// tokens which have been added to the parse tree.
// <p>
// Since tokens on hidden channels (e.g. whitespace or comments) are not
// added to the parse trees, they will not appear in the output of b
// method.
//
func (b *BaseRuleContext) GetParent() Tree {
return b.parentCtx
}

View File

@ -0,0 +1,441 @@
package antlr
import (
"fmt"
"strconv"
)
// A tree structure used to record the semantic context in which
// an ATN configuration is valid. It's either a single predicate,
// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
//
// <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
// {@link SemanticContext} within the scope of this outer class.</p>
//
type SemanticContext interface {
Comparable
evaluate(parser Recognizer, outerContext RuleContext) bool
evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
String() string
}
func SemanticContextandContext(a, b SemanticContext) SemanticContext {
if a == nil || a == SemanticContextNone {
return b
}
if b == nil || b == SemanticContextNone {
return a
}
result := NewAND(a, b)
if len(result.opnds) == 1 {
return result.opnds[0]
}
return result
}
func SemanticContextorContext(a, b SemanticContext) SemanticContext {
if a == nil {
return b
}
if b == nil {
return a
}
if a == SemanticContextNone || b == SemanticContextNone {
return SemanticContextNone
}
result := NewOR(a, b)
if len(result.opnds) == 1 {
return result.opnds[0]
}
return result
}
type Predicate struct {
ruleIndex int
predIndex int
isCtxDependent bool
}
func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
p := new(Predicate)
p.ruleIndex = ruleIndex
p.predIndex = predIndex
p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
return p
}
//The default {@link SemanticContext}, which is semantically equivalent to
//a predicate of the form {@code {true}?}.
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
return p
}
func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
var localctx RuleContext
if p.isCtxDependent {
localctx = outerContext
}
return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
}
func (p *Predicate) Hash() string {
return strconv.Itoa(p.ruleIndex) + "/" + strconv.Itoa(p.predIndex) + "/" + fmt.Sprint(p.isCtxDependent)
}
func (p *Predicate) equals(other interface{}) bool {
if p == other {
return true
} else if _, ok := other.(*Predicate); !ok {
return false
} else {
return p.ruleIndex == other.(*Predicate).ruleIndex &&
p.predIndex == other.(*Predicate).predIndex &&
p.isCtxDependent == other.(*Predicate).isCtxDependent
}
}
func (p *Predicate) String() string {
return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
}
type PrecedencePredicate struct {
precedence int
}
func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
p := new(PrecedencePredicate)
p.precedence = precedence
return p
}
func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
return parser.Precpred(outerContext, p.precedence)
}
func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
if parser.Precpred(outerContext, p.precedence) {
return SemanticContextNone
}
return nil
}
func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
return p.precedence - other.precedence
}
func (p *PrecedencePredicate) Hash() string {
return "31"
}
func (p *PrecedencePredicate) equals(other interface{}) bool {
if p == other {
return true
} else if _, ok := other.(*PrecedencePredicate); !ok {
return false
} else {
return p.precedence == other.(*PrecedencePredicate).precedence
}
}
func (p *PrecedencePredicate) String() string {
return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
}
func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate {
result := make([]*PrecedencePredicate, 0)
for _, v := range set.values() {
if c2, ok := v.(*PrecedencePredicate); ok {
result = append(result, c2)
}
}
return result
}
// A semantic context which is true whenever none of the contained contexts
// is false.`
type AND struct {
opnds []SemanticContext
}
func NewAND(a, b SemanticContext) *AND {
operands := NewSet(nil, nil)
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
operands.add(o)
}
} else {
operands.add(a)
}
if ba, ok := b.(*AND); ok {
for _, o := range ba.opnds {
operands.add(o)
}
} else {
operands.add(b)
}
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
if len(precedencePredicates) > 0 {
// interested in the transition with the lowest precedence
var reduced *PrecedencePredicate
for _, p := range precedencePredicates {
if reduced == nil || p.precedence < reduced.precedence {
reduced = p
}
}
operands.add(reduced)
}
vs := operands.values()
opnds := make([]SemanticContext, len(vs))
for i, v := range vs {
opnds[i] = v.(SemanticContext)
}
and := new(AND)
and.opnds = opnds
return and
}
func (a *AND) equals(other interface{}) bool {
if a == other {
return true
} else if _, ok := other.(*AND); !ok {
return false
} else {
for i, v := range other.(*AND).opnds {
if !a.opnds[i].equals(v) {
return false
}
}
return true
}
}
func (a *AND) Hash() string {
return fmt.Sprint(a.opnds) + "/AND"
}
//
// {@inheritDoc}
//
// <p>
// The evaluation of predicates by a context is short-circuiting, but
// unordered.</p>
//
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
for i := 0; i < len(a.opnds); i++ {
if !a.opnds[i].evaluate(parser, outerContext) {
return false
}
}
return true
}
func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
differs := false
operands := make([]SemanticContext, 0)
for i := 0; i < len(a.opnds); i++ {
context := a.opnds[i]
evaluated := context.evalPrecedence(parser, outerContext)
differs = differs || (evaluated != context)
if evaluated == nil {
// The AND context is false if any element is false
return nil
} else if evaluated != SemanticContextNone {
// Reduce the result by Skipping true elements
operands = append(operands, evaluated)
}
}
if !differs {
return a
}
if len(operands) == 0 {
// all elements were true, so the AND context is true
return SemanticContextNone
}
var result SemanticContext
for _, o := range operands {
if result == nil {
result = o
} else {
result = SemanticContextandContext(result, o)
}
}
return result
}
func (a *AND) String() string {
s := ""
for _, o := range a.opnds {
s += "&& " + fmt.Sprint(o)
}
if len(s) > 3 {
return s[0:3]
}
return s
}
//
// A semantic context which is true whenever at least one of the contained
// contexts is true.
//
type OR struct {
opnds []SemanticContext
}
func NewOR(a, b SemanticContext) *OR {
operands := NewSet(nil, nil)
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
operands.add(o)
}
} else {
operands.add(a)
}
if ba, ok := b.(*OR); ok {
for _, o := range ba.opnds {
operands.add(o)
}
} else {
operands.add(b)
}
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
if len(precedencePredicates) > 0 {
// interested in the transition with the lowest precedence
var reduced *PrecedencePredicate
for _, p := range precedencePredicates {
if reduced == nil || p.precedence > reduced.precedence {
reduced = p
}
}
operands.add(reduced)
}
vs := operands.values()
opnds := make([]SemanticContext, len(vs))
for i, v := range vs {
opnds[i] = v.(SemanticContext)
}
o := new(OR)
o.opnds = opnds
return o
}
func (o *OR) equals(other interface{}) bool {
if o == other {
return true
} else if _, ok := other.(*OR); !ok {
return false
} else {
for i, v := range other.(*OR).opnds {
if !o.opnds[i].equals(v) {
return false
}
}
return true
}
}
func (o *OR) Hash() string {
return fmt.Sprint(o.opnds) + "/OR"
}
// <p>
// The evaluation of predicates by o context is short-circuiting, but
// unordered.</p>
//
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
for i := 0; i < len(o.opnds); i++ {
if o.opnds[i].evaluate(parser, outerContext) {
return true
}
}
return false
}
func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
differs := false
operands := make([]SemanticContext, 0)
for i := 0; i < len(o.opnds); i++ {
context := o.opnds[i]
evaluated := context.evalPrecedence(parser, outerContext)
differs = differs || (evaluated != context)
if evaluated == SemanticContextNone {
// The OR context is true if any element is true
return SemanticContextNone
} else if evaluated != nil {
// Reduce the result by Skipping false elements
operands = append(operands, evaluated)
}
}
if !differs {
return o
}
if len(operands) == 0 {
// all elements were false, so the OR context is false
return nil
}
var result SemanticContext
for _, o := range operands {
if result == nil {
result = o
} else {
result = SemanticContextorContext(result, o)
}
}
return result
}
func (o *OR) String() string {
s := ""
for _, o := range o.opnds {
s += "|| " + fmt.Sprint(o)
}
if len(s) > 3 {
return s[0:3]
}
return s
}

206
runtime/Go/antlr/token.go Normal file
View File

@ -0,0 +1,206 @@
package antlr
import (
"strconv"
"strings"
)
type TokenSourceCharStreamPair struct {
tokenSource TokenSource
charStream CharStream
}
// A token has properties: text, type, line, character position in the line
// (so we can ignore tabs), token channel, index, and source from which
// we obtained this token.
type Token interface {
GetSource() *TokenSourceCharStreamPair
GetTokenType() int
GetChannel() int
GetStart() int
GetStop() int
GetLine() int
GetColumn() int
GetText() string
SetText(s string)
GetTokenIndex() int
SetTokenIndex(v int)
GetTokenSource() TokenSource
GetInputStream() CharStream
}
type BaseToken struct {
source *TokenSourceCharStreamPair
tokenType int // token type of the token
channel int // The parser ignores everything not on DEFAULT_CHANNEL
start int // optional return -1 if not implemented.
stop int // optional return -1 if not implemented.
tokenIndex int // from 0..n-1 of the token object in the input stream
line int // line=1..n of the 1st character
column int // beginning of the line at which it occurs, 0..n-1
text string // text of the token.
readOnly bool
}
const (
TokenInvalidType = 0
// During lookahead operations, this "token" signifies we hit rule end ATN state
// and did not follow it despite needing to.
TokenEpsilon = -2
TokenMinUserTokenType = 1
TokenEOF = -1
// All tokens go to the parser (unless Skip() is called in that rule)
// on a particular "channel". The parser tunes to a particular channel
// so that whitespace etc... can go to the parser on a "hidden" channel.
TokenDefaultChannel = 0
// Anything on different channel than DEFAULT_CHANNEL is not parsed
// by parser.
TokenHiddenChannel = 1
)
func (b *BaseToken) GetChannel() int {
return b.channel
}
func (b *BaseToken) GetStart() int {
return b.start
}
func (b *BaseToken) GetStop() int {
return b.stop
}
func (b *BaseToken) GetLine() int {
return b.line
}
func (b *BaseToken) GetColumn() int {
return b.column
}
func (b *BaseToken) GetTokenType() int {
return b.tokenType
}
func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
return b.source
}
func (b *BaseToken) GetTokenIndex() int {
return b.tokenIndex
}
func (b *BaseToken) SetTokenIndex(v int) {
b.tokenIndex = v
}
func (b *BaseToken) GetTokenSource() TokenSource {
return b.source.tokenSource
}
func (b *BaseToken) GetInputStream() CharStream {
return b.source.charStream
}
type CommonToken struct {
*BaseToken
}
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
t := new(CommonToken)
t.BaseToken = new(BaseToken)
t.source = source
t.tokenType = tokenType
t.channel = channel
t.start = start
t.stop = stop
t.tokenIndex = -1
if t.source.tokenSource != nil {
t.line = source.tokenSource.GetLine()
t.column = source.tokenSource.GetCharPositionInLine()
} else {
t.column = -1
}
return t
}
// An empty {@link Pair} which is used as the default value of
// {@link //source} for tokens that do not have a source.
//CommonToken.EMPTY_SOURCE = [ nil, nil ]
// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
//
// <p>
// If {@code oldToken} is also a {@link CommonToken} instance, the newly
// constructed token will share a reference to the {@link //text} field and
// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
// be assigned the result of calling {@link //GetText}, and {@link //source}
// will be constructed from the result of {@link Token//GetTokenSource} and
// {@link Token//GetInputStream}.</p>
//
// @param oldToken The token to copy.
//
func (c *CommonToken) clone() *CommonToken {
t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
t.tokenIndex = c.GetTokenIndex()
t.line = c.GetLine()
t.column = c.GetColumn()
t.text = c.GetText()
return t
}
func (c *CommonToken) GetText() string {
if c.text != "" {
return c.text
}
input := c.GetInputStream()
if input == nil {
return ""
}
n := input.Size()
if c.start < n && c.stop < n {
return input.GetTextFromInterval(NewInterval(c.start, c.stop))
}
return "<EOF>"
}
func (c *CommonToken) SetText(text string) {
c.text = text
}
func (c *CommonToken) String() string {
txt := c.GetText()
if txt != "" {
txt = strings.Replace(txt, "\n", "\\n", -1)
txt = strings.Replace(txt, "\r", "\\r", -1)
txt = strings.Replace(txt, "\t", "\\t", -1)
} else {
txt = "<no text>"
}
var ch string
if c.channel > 0 {
ch = ",channel=" + strconv.Itoa(c.channel)
} else {
ch = ""
}
return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
}

View File

@ -0,0 +1,13 @@
package antlr
type TokenSource interface {
NextToken() Token
Skip()
More()
GetLine() int
GetCharPositionInLine() int
GetInputStream() CharStream
GetSourceName() string
setTokenFactory(factory TokenFactory)
GetTokenFactory() TokenFactory
}

View File

@ -0,0 +1,16 @@
package antlr
type TokenStream interface {
IntStream
LT(k int) Token
Get(index int) Token
GetTokenSource() TokenSource
SetTokenSource(TokenSource)
GetAllText() string
GetTextFromInterval(*Interval) string
GetTextFromRuleContext(RuleContext) string
GetTextFromTokens(Token, Token) string
}

View File

@ -0,0 +1,28 @@
package antlr
import "fmt"
type TraceListener struct {
parser *BaseParser
}
func NewTraceListener(parser *BaseParser) *TraceListener {
tl := new(TraceListener)
tl.parser = parser
return tl
}
func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
}
func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
}
func (t *TraceListener) VisitTerminal(node TerminalNode) {
fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
}
func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
}

Some files were not shown because too many files have changed in this diff Show More