Merge branch 'master' into master
This commit is contained in:
commit
e967463e91
|
@ -0,0 +1,10 @@
|
||||||
|
# Contributing to ANTLR 4
|
||||||
|
|
||||||
|
1. [Fork](https://help.github.com/articles/fork-a-repo) the [antlr/antlr4 repo](https://github.com/antlr/antlr4)
|
||||||
|
2. Install and configure [EditorConfig](http://editorconfig.org/) so your text editor or IDE uses the ANTLR 4 coding style
|
||||||
|
3. [Build ANTLR 4](doc/building-antlr.md)
|
||||||
|
4. [Run the ANTLR project unit tests](doc/antlr-project-testing.md)
|
||||||
|
5. Create a [pull request](https://help.github.com/articles/using-pull-requests/) including your change
|
||||||
|
|
||||||
|
|
||||||
|
**Note:** You must sign the `contributors.txt` certificate of origin with your pull request if you've not done so before.
|
26
LICENSE.txt
26
LICENSE.txt
|
@ -24,3 +24,29 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
=====
|
||||||
|
|
||||||
|
MIT License for codepointat.js from https://git.io/codepointat
|
||||||
|
MIT License for fromcodepoint.js from https://git.io/vDW1m
|
||||||
|
|
||||||
|
Copyright Mathias Bynens <https://mathiasbynens.be/>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
|
@ -129,6 +129,10 @@ YYYY/MM/DD, github id, Full name, email
|
||||||
2016/12/11, Gaulouis, Gaulouis, gaulouis.com@gmail.com
|
2016/12/11, Gaulouis, Gaulouis, gaulouis.com@gmail.com
|
||||||
2016/12/22, akosthekiss, Akos Kiss, akiss@inf.u-szeged.hu
|
2016/12/22, akosthekiss, Akos Kiss, akiss@inf.u-szeged.hu
|
||||||
2016/12/24, adrpo, Adrian Pop, adrian.pop@liu.se
|
2016/12/24, adrpo, Adrian Pop, adrian.pop@liu.se
|
||||||
|
2017/01/11, robertbrignull, Robert Brignull, robertbrignull@gmail.com
|
||||||
2017/01/13, marcelo-rocha, Marcelo Rocha, mcrocha@gmail.com
|
2017/01/13, marcelo-rocha, Marcelo Rocha, mcrocha@gmail.com
|
||||||
|
2017/01/23, bhamiltoncx, Ben Hamilton, bhamiltoncx+antlr@gmail.com
|
||||||
|
2017/01/18, mshockwave, Bekket McClane, yihshyng223@gmail.com
|
||||||
|
2017/02/10, lionelplessis, Lionel Plessis, lionelplessis@users.noreply.github.com
|
||||||
2017/02/14, lecode-official, David Neumann, david.neumann@lecode.de
|
2017/02/14, lecode-official, David Neumann, david.neumann@lecode.de
|
||||||
2017/02/14, xied75, Dong Xie, xied75@gmail.com
|
2017/02/14, xied75, Dong Xie, xied75@gmail.com
|
|
@ -61,6 +61,8 @@ This documentation is a reference and summarizes grammar syntax and the key sema
|
||||||
|
|
||||||
* [Building ANTLR itself](building-antlr.md)
|
* [Building ANTLR itself](building-antlr.md)
|
||||||
|
|
||||||
|
* [Contributing to ANTLR](/CONTRIBUTING.md)
|
||||||
|
|
||||||
* [Cutting an ANTLR Release](releasing-antlr.md)
|
* [Cutting an ANTLR Release](releasing-antlr.md)
|
||||||
|
|
||||||
* [ANTLR project unit tests](antlr-project-testing.md)
|
* [ANTLR project unit tests](antlr-project-testing.md)
|
||||||
|
|
|
@ -95,6 +95,8 @@
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
<version>2.19.1</version>
|
<version>2.19.1</version>
|
||||||
<configuration>
|
<configuration>
|
||||||
|
<!-- SUREFIRE-951: file.encoding cannot be set via systemPropertyVariables -->
|
||||||
|
<argLine>-Dfile.encoding=UTF-8</argLine>
|
||||||
<includes>
|
<includes>
|
||||||
<include>**/csharp/Test*.java</include>
|
<include>**/csharp/Test*.java</include>
|
||||||
<include>**/java/Test*.java</include>
|
<include>**/java/Test*.java</include>
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
writeln(s) ::= <<Console.WriteLine(<s>);>>
|
writeln(s) ::= <<Output.WriteLine(<s>);>>
|
||||||
write(s) ::= <<Console.Write(<s>);>>
|
write(s) ::= <<Output.Write(<s>);>>
|
||||||
writeList(s) ::= <<Console.WriteLine(<s; separator="+">);>>
|
writeList(s) ::= <<Output.WriteLine(<s; separator="+">);>>
|
||||||
|
|
||||||
False() ::= "false"
|
False() ::= "false"
|
||||||
|
|
||||||
|
@ -176,8 +176,14 @@ public class PositionAdjustingLexerATNSimulator : LexerATNSimulator {
|
||||||
BasicListener(X) ::= <<
|
BasicListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
public class LeafListener : TBaseListener {
|
public class LeafListener : TBaseListener {
|
||||||
|
private readonly TextWriter Output;
|
||||||
|
|
||||||
|
public LeafListener(TextWriter output) {
|
||||||
|
Output = output;
|
||||||
|
}
|
||||||
|
|
||||||
public override void VisitTerminal(ITerminalNode node) {
|
public override void VisitTerminal(ITerminalNode node) {
|
||||||
Console.WriteLine(node.Symbol.Text);
|
Output.WriteLine(node.Symbol.Text);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -185,7 +191,7 @@ public class LeafListener : TBaseListener {
|
||||||
|
|
||||||
WalkListener(s) ::= <<
|
WalkListener(s) ::= <<
|
||||||
ParseTreeWalker walker = new ParseTreeWalker();
|
ParseTreeWalker walker = new ParseTreeWalker();
|
||||||
walker.Walk(new LeafListener(), <s>);
|
walker.Walk(new LeafListener(Output), <s>);
|
||||||
>>
|
>>
|
||||||
|
|
||||||
TreeNodeWithAltNumField(X) ::= <<
|
TreeNodeWithAltNumField(X) ::= <<
|
||||||
|
@ -204,6 +210,12 @@ public class MyRuleNode : ParserRuleContext {
|
||||||
TokenGetterListener(X) ::= <<
|
TokenGetterListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
public class LeafListener : TBaseListener {
|
public class LeafListener : TBaseListener {
|
||||||
|
private readonly TextWriter Output;
|
||||||
|
|
||||||
|
public LeafListener(TextWriter output) {
|
||||||
|
Output = output;
|
||||||
|
}
|
||||||
|
|
||||||
public override void ExitA(TParser.AContext ctx) {
|
public override void ExitA(TParser.AContext ctx) {
|
||||||
if (ctx.ChildCount==2)
|
if (ctx.ChildCount==2)
|
||||||
{
|
{
|
||||||
|
@ -214,11 +226,11 @@ public class LeafListener : TBaseListener {
|
||||||
}
|
}
|
||||||
sb.Length = sb.Length - 2;
|
sb.Length = sb.Length - 2;
|
||||||
sb.Append ("]");
|
sb.Append ("]");
|
||||||
Console.Write ("{0} {1} {2}", ctx.INT (0).Symbol.Text,
|
Output.Write ("{0} {1} {2}", ctx.INT (0).Symbol.Text,
|
||||||
ctx.INT (1).Symbol.Text, sb.ToString());
|
ctx.INT (1).Symbol.Text, sb.ToString());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
Console.WriteLine(ctx.ID().Symbol);
|
Output.WriteLine(ctx.ID().Symbol);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -227,12 +239,18 @@ public class LeafListener : TBaseListener {
|
||||||
RuleGetterListener(X) ::= <<
|
RuleGetterListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
public class LeafListener : TBaseListener {
|
public class LeafListener : TBaseListener {
|
||||||
|
private readonly TextWriter Output;
|
||||||
|
|
||||||
|
public LeafListener(TextWriter output) {
|
||||||
|
Output = output;
|
||||||
|
}
|
||||||
|
|
||||||
public override void ExitA(TParser.AContext ctx) {
|
public override void ExitA(TParser.AContext ctx) {
|
||||||
if (ctx.ChildCount==2) {
|
if (ctx.ChildCount==2) {
|
||||||
Console.Write("{0} {1} {2}",ctx.b(0).Start.Text,
|
Output.Write("{0} {1} {2}",ctx.b(0).Start.Text,
|
||||||
ctx.b(1).Start.Text,ctx.b()[0].Start.Text);
|
ctx.b(1).Start.Text,ctx.b()[0].Start.Text);
|
||||||
} else
|
} else
|
||||||
Console.WriteLine(ctx.b(0).Start.Text);
|
Output.WriteLine(ctx.b(0).Start.Text);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -242,12 +260,18 @@ public class LeafListener : TBaseListener {
|
||||||
LRListener(X) ::= <<
|
LRListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
public class LeafListener : TBaseListener {
|
public class LeafListener : TBaseListener {
|
||||||
|
private readonly TextWriter Output;
|
||||||
|
|
||||||
|
public LeafListener(TextWriter output) {
|
||||||
|
Output = output;
|
||||||
|
}
|
||||||
|
|
||||||
public override void ExitE(TParser.EContext ctx) {
|
public override void ExitE(TParser.EContext ctx) {
|
||||||
if (ctx.ChildCount==3) {
|
if (ctx.ChildCount==3) {
|
||||||
Console.Write("{0} {1} {2}\n",ctx.e(0).Start.Text,
|
Output.Write("{0} {1} {2}\n",ctx.e(0).Start.Text,
|
||||||
ctx.e(1).Start.Text, ctx.e()[0].Start.Text);
|
ctx.e(1).Start.Text, ctx.e()[0].Start.Text);
|
||||||
} else
|
} else
|
||||||
Console.WriteLine(ctx.INT().Symbol.Text);
|
Output.WriteLine(ctx.INT().Symbol.Text);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -256,11 +280,17 @@ public class LeafListener : TBaseListener {
|
||||||
LRWithLabelsListener(X) ::= <<
|
LRWithLabelsListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
public class LeafListener : TBaseListener {
|
public class LeafListener : TBaseListener {
|
||||||
|
private readonly TextWriter Output;
|
||||||
|
|
||||||
|
public LeafListener(TextWriter output) {
|
||||||
|
Output = output;
|
||||||
|
}
|
||||||
|
|
||||||
public override void ExitCall(TParser.CallContext ctx) {
|
public override void ExitCall(TParser.CallContext ctx) {
|
||||||
Console.Write("{0} {1}",ctx.e().Start.Text,ctx.eList());
|
Output.Write("{0} {1}",ctx.e().Start.Text,ctx.eList());
|
||||||
}
|
}
|
||||||
public override void ExitInt(TParser.IntContext ctx) {
|
public override void ExitInt(TParser.IntContext ctx) {
|
||||||
Console.WriteLine(ctx.INT().Symbol.Text);
|
Output.WriteLine(ctx.INT().Symbol.Text);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -274,12 +304,12 @@ void foo() {
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
Declare_foo() ::= <<public void foo() {Console.WriteLine("foo");}>>
|
Declare_foo() ::= <<public void foo() {Output.WriteLine("foo");}>>
|
||||||
|
|
||||||
Invoke_foo() ::= "this.foo();"
|
Invoke_foo() ::= "this.foo();"
|
||||||
|
|
||||||
Declare_pred() ::= <<bool pred(bool v) {
|
Declare_pred() ::= <<bool pred(bool v) {
|
||||||
Console.WriteLine("eval="+v.ToString().ToLower());
|
Output.WriteLine("eval="+v.ToString().ToLower());
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
|
@ -325,5 +325,5 @@ func pred(v bool) bool {
|
||||||
|
|
||||||
Invoke_pred(v) ::= <<pred(<v>)>>
|
Invoke_pred(v) ::= <<pred(<v>)>>
|
||||||
ContextRuleFunction(ctx, rule) ::= "<ctx>.<rule>"
|
ContextRuleFunction(ctx, rule) ::= "<ctx>.<rule>"
|
||||||
StringType() ::= "String"
|
StringType() ::= "string"
|
||||||
ContextMember(ctx, subctx, member) ::= "<ctx>.<subctx>.<member; format={cap}>"
|
ContextMember(ctx, subctx, member) ::= "<ctx>.<subctx>.<member; format={cap}>"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
writeln(s) ::= <<print(<s>)>>
|
writeln(s) ::= <<print(<s>, file=self._output)>>
|
||||||
write(s) ::= <<print(<s>,end='')>>
|
write(s) ::= <<print(<s>,end='', file=self._output)>>
|
||||||
writeList(s) ::= <<print(<s: {v | str(<v>)}; separator="+">)>>
|
writeList(s) ::= <<print(<s: {v | str(<v>)}; separator="+">, file=self._output)>>
|
||||||
|
|
||||||
False() ::= "False"
|
False() ::= "False"
|
||||||
|
|
||||||
|
@ -152,14 +152,16 @@ else:
|
||||||
from <X>Listener import <X>Listener
|
from <X>Listener import <X>Listener
|
||||||
|
|
||||||
class LeafListener(TListener):
|
class LeafListener(TListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def visitTerminal(self, node):
|
def visitTerminal(self, node):
|
||||||
print(node.symbol.text)
|
print(node.symbol.text, file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
WalkListener(s) ::= <<
|
WalkListener(s) ::= <<
|
||||||
walker = ParseTreeWalker()
|
walker = ParseTreeWalker()
|
||||||
walker.walk(TParser.LeafListener(), <s>)
|
walker.walk(TParser.LeafListener(self._output), <s>)
|
||||||
>>
|
>>
|
||||||
|
|
||||||
TreeNodeWithAltNumField(X) ::= <<
|
TreeNodeWithAltNumField(X) ::= <<
|
||||||
|
@ -183,11 +185,13 @@ else:
|
||||||
from <X>Listener import <X>Listener
|
from <X>Listener import <X>Listener
|
||||||
|
|
||||||
class LeafListener(TListener):
|
class LeafListener(TListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def exitA(self, ctx):
|
def exitA(self, ctx):
|
||||||
if ctx.getChildCount()==2:
|
if ctx.getChildCount()==2:
|
||||||
print(ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + str_list(ctx.INT()))
|
print(ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + str_list(ctx.INT()), file=self._output)
|
||||||
else:
|
else:
|
||||||
print(str(ctx.ID().symbol))
|
print(str(ctx.ID().symbol), file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
|
@ -199,11 +203,13 @@ else:
|
||||||
from <X>Listener import <X>Listener
|
from <X>Listener import <X>Listener
|
||||||
|
|
||||||
class LeafListener(TListener):
|
class LeafListener(TListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def exitA(self, ctx):
|
def exitA(self, ctx):
|
||||||
if ctx.getChildCount()==2:
|
if ctx.getChildCount()==2:
|
||||||
print(ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text)
|
print(ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text, file=self._output)
|
||||||
else:
|
else:
|
||||||
print(ctx.b(0).start.text)
|
print(ctx.b(0).start.text, file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
|
@ -216,11 +222,13 @@ else:
|
||||||
from <X>Listener import <X>Listener
|
from <X>Listener import <X>Listener
|
||||||
|
|
||||||
class LeafListener(TListener):
|
class LeafListener(TListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def exitE(self, ctx):
|
def exitE(self, ctx):
|
||||||
if ctx.getChildCount()==3:
|
if ctx.getChildCount()==3:
|
||||||
print(ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text)
|
print(ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text, file=self._output)
|
||||||
else:
|
else:
|
||||||
print(ctx.INT().symbol.text)
|
print(ctx.INT().symbol.text, file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
|
@ -232,10 +240,12 @@ else:
|
||||||
from <X>Listener import <X>Listener
|
from <X>Listener import <X>Listener
|
||||||
|
|
||||||
class LeafListener(TListener):
|
class LeafListener(TListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def exitCall(self, ctx):
|
def exitCall(self, ctx):
|
||||||
print(ctx.e().start.text + ' ' + str(ctx.eList()))
|
print(ctx.e().start.text + ' ' + str(ctx.eList()), file=self._output)
|
||||||
def exitInt(self, ctx):
|
def exitInt(self, ctx):
|
||||||
print(ctx.INT().symbol.text)
|
print(ctx.INT().symbol.text, file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
|
@ -247,13 +257,13 @@ def foo():
|
||||||
>>
|
>>
|
||||||
|
|
||||||
Declare_foo() ::= <<def foo(self):
|
Declare_foo() ::= <<def foo(self):
|
||||||
print('foo')
|
print('foo', file=self._output)
|
||||||
>>
|
>>
|
||||||
|
|
||||||
Invoke_foo() ::= "self.foo()"
|
Invoke_foo() ::= "self.foo()"
|
||||||
|
|
||||||
Declare_pred() ::= <<def pred(self, v):
|
Declare_pred() ::= <<def pred(self, v):
|
||||||
print('eval=' + str(v).lower())
|
print('eval=' + str(v).lower(), file=self._output)
|
||||||
return v
|
return v
|
||||||
|
|
||||||
>>
|
>>
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
writeln(s) ::= <<print(<s>)>>
|
writeln(s) ::= <<print(<s>, file=self._output)>>
|
||||||
write(s) ::= <<print(<s>,end='')>>
|
write(s) ::= <<print(<s>,end='',file=self._output)>>
|
||||||
writeList(s) ::= <<print(<s: {v | str(<v>)}; separator="+">)>>
|
writeList(s) ::= <<print(<s: {v | str(<v>)}; separator="+">, file=self._output)>>
|
||||||
|
|
||||||
False() ::= "False"
|
False() ::= "False"
|
||||||
|
|
||||||
|
@ -152,8 +152,10 @@ def isIdentifierChar(c):
|
||||||
BasicListener(X) ::= <<
|
BasicListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
class LeafListener(MockListener):
|
class LeafListener(MockListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def visitTerminal(self, node):
|
def visitTerminal(self, node):
|
||||||
print(node.symbol.text)
|
print(node.symbol.text, file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
|
@ -164,7 +166,7 @@ else:
|
||||||
from TListener import TListener
|
from TListener import TListener
|
||||||
TParser.LeafListener.__bases__ = (TListener,)
|
TParser.LeafListener.__bases__ = (TListener,)
|
||||||
walker = ParseTreeWalker()
|
walker = ParseTreeWalker()
|
||||||
walker.walk(TParser.LeafListener(), <s>)
|
walker.walk(TParser.LeafListener(self._output), <s>)
|
||||||
>>
|
>>
|
||||||
|
|
||||||
TreeNodeWithAltNumField(X) ::= <<
|
TreeNodeWithAltNumField(X) ::= <<
|
||||||
|
@ -183,22 +185,26 @@ class MyRuleNode(ParserRuleContext):
|
||||||
TokenGetterListener(X) ::= <<
|
TokenGetterListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
class LeafListener(MockListener):
|
class LeafListener(MockListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def exitA(self, ctx):
|
def exitA(self, ctx):
|
||||||
if ctx.getChildCount()==2:
|
if ctx.getChildCount()==2:
|
||||||
print(ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + str_list(ctx.INT()))
|
print(ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + str_list(ctx.INT()), file=self._output)
|
||||||
else:
|
else:
|
||||||
print(str(ctx.ID().symbol))
|
print(str(ctx.ID().symbol), file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
RuleGetterListener(X) ::= <<
|
RuleGetterListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
class LeafListener(MockListener):
|
class LeafListener(MockListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def exitA(self, ctx):
|
def exitA(self, ctx):
|
||||||
if ctx.getChildCount()==2:
|
if ctx.getChildCount()==2:
|
||||||
print(ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text)
|
print(ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text, file=self._output)
|
||||||
else:
|
else:
|
||||||
print(ctx.b(0).start.text)
|
print(ctx.b(0).start.text, file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
|
@ -206,21 +212,25 @@ class LeafListener(MockListener):
|
||||||
LRListener(X) ::= <<
|
LRListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
class LeafListener(MockListener):
|
class LeafListener(MockListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def exitE(self, ctx):
|
def exitE(self, ctx):
|
||||||
if ctx.getChildCount()==3:
|
if ctx.getChildCount()==3:
|
||||||
print(ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text)
|
print(ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text, file=self._output)
|
||||||
else:
|
else:
|
||||||
print(ctx.INT().symbol.text)
|
print(ctx.INT().symbol.text, file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
LRWithLabelsListener(X) ::= <<
|
LRWithLabelsListener(X) ::= <<
|
||||||
@parser::members {
|
@parser::members {
|
||||||
class LeafListener(MockListener):
|
class LeafListener(MockListener):
|
||||||
|
def __init__(self, output):
|
||||||
|
self._output = output
|
||||||
def exitCall(self, ctx):
|
def exitCall(self, ctx):
|
||||||
print(ctx.e().start.text + ' ' + str(ctx.eList()))
|
print(ctx.e().start.text + ' ' + str(ctx.eList()), file=self._output)
|
||||||
def exitInt(self, ctx):
|
def exitInt(self, ctx):
|
||||||
print(ctx.INT().symbol.text)
|
print(ctx.INT().symbol.text, file=self._output)
|
||||||
}
|
}
|
||||||
>>
|
>>
|
||||||
|
|
||||||
|
@ -232,13 +242,13 @@ def foo():
|
||||||
>>
|
>>
|
||||||
|
|
||||||
Declare_foo() ::= <<def foo(self):
|
Declare_foo() ::= <<def foo(self):
|
||||||
print('foo')
|
print('foo', file=self._output)
|
||||||
>>
|
>>
|
||||||
|
|
||||||
Invoke_foo() ::= "self.foo()"
|
Invoke_foo() ::= "self.foo()"
|
||||||
|
|
||||||
Declare_pred() ::= <<def pred(self, v):
|
Declare_pred() ::= <<def pred(self, v):
|
||||||
print('eval=' + str(v).lower())
|
print('eval=' + str(v).lower(), file=self._output)
|
||||||
return v
|
return v
|
||||||
|
|
||||||
>>
|
>>
|
||||||
|
|
|
@ -224,7 +224,7 @@ public abstract class BaseRuntimeTest {
|
||||||
String... extraOptions)
|
String... extraOptions)
|
||||||
{
|
{
|
||||||
mkdir(workdir);
|
mkdir(workdir);
|
||||||
BaseJavaTest.writeFile(workdir, grammarFileName, grammarStr);
|
writeFile(workdir, grammarFileName, grammarStr);
|
||||||
return antlrOnString(workdir, targetName, grammarFileName, defaultListener, extraOptions);
|
return antlrOnString(workdir, targetName, grammarFileName, defaultListener, extraOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.test.runtime;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
|
||||||
|
public final class StreamVacuum implements Runnable {
|
||||||
|
private StringBuilder buf = new StringBuilder();
|
||||||
|
private BufferedReader in;
|
||||||
|
private Thread sucker;
|
||||||
|
public StreamVacuum(InputStream in) {
|
||||||
|
this.in = new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8) );
|
||||||
|
}
|
||||||
|
public void start() {
|
||||||
|
sucker = new Thread(this);
|
||||||
|
sucker.start();
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
try {
|
||||||
|
TestOutputReading.append(in, buf);
|
||||||
|
}
|
||||||
|
catch (IOException ioe) {
|
||||||
|
System.err.println("can't read output from process");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/** wait for the thread to finish */
|
||||||
|
public void join() throws InterruptedException {
|
||||||
|
sucker.join();
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return buf.toString();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,64 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.test.runtime;
|
||||||
|
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.NoSuchFileException;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public abstract class TestOutputReading {
|
||||||
|
public static void append(BufferedReader in, StringBuilder buf) throws IOException {
|
||||||
|
String line = in.readLine();
|
||||||
|
while (line!=null) {
|
||||||
|
buf.append(line);
|
||||||
|
// NOTE: This appends a newline at EOF
|
||||||
|
// regardless of whether or not the
|
||||||
|
// input actually ended with a
|
||||||
|
// newline.
|
||||||
|
//
|
||||||
|
// We should revisit this and read a
|
||||||
|
// block at a time rather than a line
|
||||||
|
// at a time, and change all tests
|
||||||
|
// which rely on this behavior to
|
||||||
|
// remove the trailing newline at EOF.
|
||||||
|
//
|
||||||
|
// When we fix this, we can remove the
|
||||||
|
// TestOutputReading class entirely.
|
||||||
|
buf.append('\n');
|
||||||
|
line = in.readLine();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read in the UTF-8 bytes at {@code path}, convert all
|
||||||
|
* platform-specific line terminators to NL, and append NL
|
||||||
|
* if the file was non-empty and didn't already end with one.
|
||||||
|
*
|
||||||
|
* {@see StreamVacuum#run()} for why this method exists.
|
||||||
|
*
|
||||||
|
* Returns {@code null} if the file does not exist or the output
|
||||||
|
* was empty.
|
||||||
|
*/
|
||||||
|
public static String read(Path path) throws IOException {
|
||||||
|
// Mimic StreamVacuum.run()'s behavior of replacing all platform-specific
|
||||||
|
// EOL sequences with NL.
|
||||||
|
StringBuilder buf = new StringBuilder();
|
||||||
|
try (BufferedReader in = Files.newBufferedReader(path, StandardCharsets.UTF_8)) {
|
||||||
|
append(in, buf);
|
||||||
|
} catch (FileNotFoundException | NoSuchFileException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (buf.length() > 0) {
|
||||||
|
return buf.toString();
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -36,6 +36,7 @@ import org.antlr.v4.runtime.tree.ParseTree;
|
||||||
import org.antlr.v4.semantics.SemanticPipeline;
|
import org.antlr.v4.semantics.SemanticPipeline;
|
||||||
import org.antlr.v4.test.runtime.ErrorQueue;
|
import org.antlr.v4.test.runtime.ErrorQueue;
|
||||||
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
||||||
|
import org.antlr.v4.test.runtime.StreamVacuum;
|
||||||
import org.antlr.v4.tool.ANTLRMessage;
|
import org.antlr.v4.tool.ANTLRMessage;
|
||||||
import org.antlr.v4.tool.DOTGenerator;
|
import org.antlr.v4.tool.DOTGenerator;
|
||||||
import org.antlr.v4.tool.Grammar;
|
import org.antlr.v4.tool.Grammar;
|
||||||
|
@ -46,13 +47,7 @@ import org.stringtemplate.v4.ST;
|
||||||
import org.stringtemplate.v4.STGroup;
|
import org.stringtemplate.v4.STGroup;
|
||||||
import org.stringtemplate.v4.STGroupString;
|
import org.stringtemplate.v4.STGroupString;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.BufferedWriter;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileWriter;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.InputStreamReader;
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
import java.lang.reflect.InvocationTargetException;
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
@ -70,6 +65,7 @@ import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
@ -751,41 +747,6 @@ public class BaseCppTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class StreamVacuum implements Runnable {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
BufferedReader in;
|
|
||||||
Thread sucker;
|
|
||||||
public StreamVacuum(InputStream in) {
|
|
||||||
this.in = new BufferedReader( new InputStreamReader(in) );
|
|
||||||
}
|
|
||||||
public void start() {
|
|
||||||
sucker = new Thread(this);
|
|
||||||
sucker.start();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
String line = in.readLine();
|
|
||||||
while (line!=null) {
|
|
||||||
buf.append(line);
|
|
||||||
buf.append('\n');
|
|
||||||
line = in.readLine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't read output from process");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/** wait for the thread to finish */
|
|
||||||
public void join() throws InterruptedException {
|
|
||||||
sucker.join();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
||||||
GrammarSemanticsMessage expectedMessage)
|
GrammarSemanticsMessage expectedMessage)
|
||||||
throws Exception
|
throws Exception
|
||||||
|
@ -869,21 +830,6 @@ public class BaseCppTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, String content) {
|
|
||||||
try {
|
|
||||||
File f = new File(dir, fileName);
|
|
||||||
FileWriter w = new FileWriter(f);
|
|
||||||
BufferedWriter bw = new BufferedWriter(w);
|
|
||||||
bw.write(content);
|
|
||||||
bw.close();
|
|
||||||
w.close();
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void mkdir(String dir) {
|
protected void mkdir(String dir) {
|
||||||
File f = new File(dir);
|
File f = new File(dir);
|
||||||
f.mkdirs();
|
f.mkdirs();
|
||||||
|
|
|
@ -13,6 +13,7 @@ import org.antlr.v4.runtime.WritableToken;
|
||||||
import org.antlr.v4.runtime.misc.Utils;
|
import org.antlr.v4.runtime.misc.Utils;
|
||||||
import org.antlr.v4.test.runtime.ErrorQueue;
|
import org.antlr.v4.test.runtime.ErrorQueue;
|
||||||
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
||||||
|
import org.antlr.v4.test.runtime.StreamVacuum;
|
||||||
import org.antlr.v4.tool.ANTLRMessage;
|
import org.antlr.v4.tool.ANTLRMessage;
|
||||||
import org.antlr.v4.tool.GrammarSemanticsMessage;
|
import org.antlr.v4.tool.GrammarSemanticsMessage;
|
||||||
import org.junit.rules.TestRule;
|
import org.junit.rules.TestRule;
|
||||||
|
@ -31,7 +32,6 @@ import javax.xml.transform.stream.StreamResult;
|
||||||
import javax.xml.xpath.XPathConstants;
|
import javax.xml.xpath.XPathConstants;
|
||||||
import javax.xml.xpath.XPathExpression;
|
import javax.xml.xpath.XPathExpression;
|
||||||
import javax.xml.xpath.XPathFactory;
|
import javax.xml.xpath.XPathFactory;
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileOutputStream;
|
import java.io.FileOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -49,6 +49,7 @@ import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
@ -428,7 +429,7 @@ public class BaseCSharpTest implements RuntimeTestSupport /*, SpecialRuntimeTest
|
||||||
}
|
}
|
||||||
|
|
||||||
private String locateTool(String tool) {
|
private String locateTool(String tool) {
|
||||||
String[] roots = { "/opt/local/bin/", "/usr/bin/", "/usr/local/bin/" };
|
String[] roots = { "/opt/local/bin/", "/usr/local/bin/", "/usr/bin/" };
|
||||||
for(String root : roots) {
|
for(String root : roots) {
|
||||||
if(new File(root + tool).exists())
|
if(new File(root + tool).exists())
|
||||||
return root + tool;
|
return root + tool;
|
||||||
|
@ -657,41 +658,6 @@ public class BaseCSharpTest implements RuntimeTestSupport /*, SpecialRuntimeTest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public static class StreamVacuum implements Runnable {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
BufferedReader in;
|
|
||||||
Thread sucker;
|
|
||||||
public StreamVacuum(InputStream in) {
|
|
||||||
this.in = new BufferedReader( new InputStreamReader(in) );
|
|
||||||
}
|
|
||||||
public void start() {
|
|
||||||
sucker = new Thread(this);
|
|
||||||
sucker.start();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
String line = in.readLine();
|
|
||||||
while (line!=null) {
|
|
||||||
buf.append(line);
|
|
||||||
buf.append('\n');
|
|
||||||
line = in.readLine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't read output from process");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/** wait for the thread to finish */
|
|
||||||
public void join() throws InterruptedException {
|
|
||||||
sucker.join();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
||||||
GrammarSemanticsMessage expectedMessage)
|
GrammarSemanticsMessage expectedMessage)
|
||||||
throws Exception
|
throws Exception
|
||||||
|
@ -734,16 +700,6 @@ public class BaseCSharpTest implements RuntimeTestSupport /*, SpecialRuntimeTest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, String content) {
|
|
||||||
try {
|
|
||||||
Utils.writeFile(dir+"/"+fileName, content, "UTF-8");
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void mkdir(String dir) {
|
protected void mkdir(String dir) {
|
||||||
File f = new File(dir);
|
File f = new File(dir);
|
||||||
f.mkdirs();
|
f.mkdirs();
|
||||||
|
|
|
@ -112,7 +112,7 @@ public class ParseTreesDescriptors {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean ignore(String targetName) {
|
public boolean ignore(String targetName) {
|
||||||
return !targetName.matches("Java|Python2|Python3|Node|Swift");
|
return !targetName.matches("Java|Python2|Python3|Node|Swift|CSharp");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.semantics.SemanticPipeline;
|
import org.antlr.v4.semantics.SemanticPipeline;
|
||||||
import org.antlr.v4.test.runtime.ErrorQueue;
|
import org.antlr.v4.test.runtime.ErrorQueue;
|
||||||
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
||||||
|
import org.antlr.v4.test.runtime.StreamVacuum;
|
||||||
import org.antlr.v4.tool.ANTLRMessage;
|
import org.antlr.v4.tool.ANTLRMessage;
|
||||||
import org.antlr.v4.tool.DOTGenerator;
|
import org.antlr.v4.tool.DOTGenerator;
|
||||||
import org.antlr.v4.tool.Grammar;
|
import org.antlr.v4.tool.Grammar;
|
||||||
|
@ -43,8 +44,6 @@ import org.stringtemplate.v4.ST;
|
||||||
import org.stringtemplate.v4.STGroup;
|
import org.stringtemplate.v4.STGroup;
|
||||||
import org.stringtemplate.v4.STGroupString;
|
import org.stringtemplate.v4.STGroupString;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.BufferedWriter;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileFilter;
|
import java.io.FileFilter;
|
||||||
import java.io.FileInputStream;
|
import java.io.FileInputStream;
|
||||||
|
@ -52,7 +51,6 @@ import java.io.FileOutputStream;
|
||||||
import java.io.FileWriter;
|
import java.io.FileWriter;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.InputStreamReader;
|
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -70,6 +68,7 @@ import static junit.framework.TestCase.assertFalse;
|
||||||
import static junit.framework.TestCase.assertNotNull;
|
import static junit.framework.TestCase.assertNotNull;
|
||||||
import static junit.framework.TestCase.assertTrue;
|
import static junit.framework.TestCase.assertTrue;
|
||||||
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
|
|
||||||
public class BaseGoTest implements RuntimeTestSupport {
|
public class BaseGoTest implements RuntimeTestSupport {
|
||||||
|
@ -309,7 +308,7 @@ public class BaseGoTest implements RuntimeTestSupport {
|
||||||
boolean success = rawGenerateAndBuildRecognizer(grammarFileName,
|
boolean success = rawGenerateAndBuildRecognizer(grammarFileName,
|
||||||
grammarStr, null, lexerName, "-no-listener");
|
grammarStr, null, lexerName, "-no-listener");
|
||||||
assertTrue(success);
|
assertTrue(success);
|
||||||
writeFile(overall_tmpdir, "input", input);
|
writeFile(overall_tmpdir.toString(), "input", input);
|
||||||
writeLexerTestFile(lexerName, showDFA);
|
writeLexerTestFile(lexerName, showDFA);
|
||||||
String output = execModule("Test.go");
|
String output = execModule("Test.go");
|
||||||
return output;
|
return output;
|
||||||
|
@ -338,7 +337,7 @@ public class BaseGoTest implements RuntimeTestSupport {
|
||||||
boolean success = rawGenerateAndBuildRecognizer(grammarFileName,
|
boolean success = rawGenerateAndBuildRecognizer(grammarFileName,
|
||||||
grammarStr, parserName, lexerName, "-visitor");
|
grammarStr, parserName, lexerName, "-visitor");
|
||||||
assertTrue(success);
|
assertTrue(success);
|
||||||
writeFile(overall_tmpdir, "input", input);
|
writeFile(overall_tmpdir.toString(), "input", input);
|
||||||
rawBuildRecognizerTestFile(parserName, lexerName, listenerName,
|
rawBuildRecognizerTestFile(parserName, lexerName, listenerName,
|
||||||
visitorName, startRuleName, showDiagnosticErrors);
|
visitorName, startRuleName, showDiagnosticErrors);
|
||||||
return execRecognizer();
|
return execRecognizer();
|
||||||
|
@ -571,45 +570,6 @@ public class BaseGoTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class StreamVacuum implements Runnable {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
BufferedReader in;
|
|
||||||
Thread sucker;
|
|
||||||
|
|
||||||
public StreamVacuum(InputStream in) {
|
|
||||||
this.in = new BufferedReader(new InputStreamReader(in));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void start() {
|
|
||||||
sucker = new Thread(this);
|
|
||||||
sucker.start();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
String line = in.readLine();
|
|
||||||
while (line != null) {
|
|
||||||
buf.append(line);
|
|
||||||
buf.append('\n');
|
|
||||||
line = in.readLine();
|
|
||||||
}
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
System.err.println("can't read output from process");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** wait for the thread to finish */
|
|
||||||
public void join() throws InterruptedException {
|
|
||||||
sucker.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
||||||
GrammarSemanticsMessage expectedMessage) throws Exception {
|
GrammarSemanticsMessage expectedMessage) throws Exception {
|
||||||
ANTLRMessage foundMsg = null;
|
ANTLRMessage foundMsg = null;
|
||||||
|
@ -700,35 +660,6 @@ public class BaseGoTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeFile(File dir, String fileName, String content) {
|
|
||||||
try {
|
|
||||||
File f = new File(dir, fileName);
|
|
||||||
FileWriter w = new FileWriter(f);
|
|
||||||
BufferedWriter bw = new BufferedWriter(w);
|
|
||||||
bw.write(content);
|
|
||||||
bw.close();
|
|
||||||
w.close();
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, InputStream content) {
|
|
||||||
try {
|
|
||||||
File f = new File(dir, fileName);
|
|
||||||
OutputStream output = new FileOutputStream(f);
|
|
||||||
while(content.available()>0) {
|
|
||||||
int b = content.read();
|
|
||||||
output.write(b);
|
|
||||||
}
|
|
||||||
output.close();
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void mkdir(File dir) {
|
protected void mkdir(File dir) {
|
||||||
dir.mkdirs();
|
dir.mkdirs();
|
||||||
}
|
}
|
||||||
|
@ -785,7 +716,7 @@ public class BaseGoTest implements RuntimeTestSupport {
|
||||||
outputFileST.add("listenerName", listenerName);
|
outputFileST.add("listenerName", listenerName);
|
||||||
outputFileST.add("visitorName", visitorName);
|
outputFileST.add("visitorName", visitorName);
|
||||||
outputFileST.add("parserStartRuleName", parserStartRuleName.substring(0, 1).toUpperCase() + parserStartRuleName.substring(1) );
|
outputFileST.add("parserStartRuleName", parserStartRuleName.substring(0, 1).toUpperCase() + parserStartRuleName.substring(1) );
|
||||||
writeFile(overall_tmpdir, "Test.go", outputFileST.render());
|
writeFile(overall_tmpdir.toString(), "Test.go", outputFileST.render());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -813,7 +744,7 @@ public class BaseGoTest implements RuntimeTestSupport {
|
||||||
+ "}\n"
|
+ "}\n"
|
||||||
+ "\n");
|
+ "\n");
|
||||||
outputFileST.add("lexerName", lexerName);
|
outputFileST.add("lexerName", lexerName);
|
||||||
writeFile(overall_tmpdir, "Test.go", outputFileST.render());
|
writeFile(overall_tmpdir.toString(), "Test.go", outputFileST.render());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void writeRecognizer(String parserName, String lexerName,
|
public void writeRecognizer(String parserName, String lexerName,
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.antlr.v4.semantics.SemanticPipeline;
|
||||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||||
import org.antlr.v4.test.runtime.ErrorQueue;
|
import org.antlr.v4.test.runtime.ErrorQueue;
|
||||||
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
||||||
|
import org.antlr.v4.test.runtime.StreamVacuum;
|
||||||
import org.antlr.v4.tool.ANTLRMessage;
|
import org.antlr.v4.tool.ANTLRMessage;
|
||||||
import org.antlr.v4.tool.Grammar;
|
import org.antlr.v4.tool.Grammar;
|
||||||
import org.antlr.v4.tool.GrammarSemanticsMessage;
|
import org.antlr.v4.tool.GrammarSemanticsMessage;
|
||||||
|
@ -53,7 +54,6 @@ import javax.tools.JavaCompiler;
|
||||||
import javax.tools.JavaFileObject;
|
import javax.tools.JavaFileObject;
|
||||||
import javax.tools.StandardJavaFileManager;
|
import javax.tools.StandardJavaFileManager;
|
||||||
import javax.tools.ToolProvider;
|
import javax.tools.ToolProvider;
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
@ -81,6 +81,7 @@ import static junit.framework.TestCase.assertEquals;
|
||||||
import static junit.framework.TestCase.assertFalse;
|
import static junit.framework.TestCase.assertFalse;
|
||||||
import static junit.framework.TestCase.assertNotNull;
|
import static junit.framework.TestCase.assertNotNull;
|
||||||
import static junit.framework.TestCase.assertTrue;
|
import static junit.framework.TestCase.assertTrue;
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
|
|
||||||
public class BaseJavaTest implements RuntimeTestSupport {
|
public class BaseJavaTest implements RuntimeTestSupport {
|
||||||
|
@ -702,6 +703,7 @@ public class BaseJavaTest implements RuntimeTestSupport {
|
||||||
try {
|
try {
|
||||||
String[] args = new String[] {
|
String[] args = new String[] {
|
||||||
"java", "-classpath", tmpdir+pathSep+CLASSPATH,
|
"java", "-classpath", tmpdir+pathSep+CLASSPATH,
|
||||||
|
"-Dfile.encoding=UTF-8",
|
||||||
className, new File(tmpdir, "input").getAbsolutePath()
|
className, new File(tmpdir, "input").getAbsolutePath()
|
||||||
};
|
};
|
||||||
// String cmdLine = Utils.join(args, " ");
|
// String cmdLine = Utils.join(args, " ");
|
||||||
|
@ -823,41 +825,6 @@ public class BaseJavaTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class StreamVacuum implements Runnable {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
BufferedReader in;
|
|
||||||
Thread sucker;
|
|
||||||
public StreamVacuum(InputStream in) {
|
|
||||||
this.in = new BufferedReader( new InputStreamReader(in) );
|
|
||||||
}
|
|
||||||
public void start() {
|
|
||||||
sucker = new Thread(this);
|
|
||||||
sucker.start();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
String line = in.readLine();
|
|
||||||
while (line!=null) {
|
|
||||||
buf.append(line);
|
|
||||||
buf.append('\n');
|
|
||||||
line = in.readLine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't read output from process");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/** wait for the thread to finish */
|
|
||||||
public void join() throws InterruptedException {
|
|
||||||
sucker.join();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
||||||
GrammarSemanticsMessage expectedMessage)
|
GrammarSemanticsMessage expectedMessage)
|
||||||
throws Exception
|
throws Exception
|
||||||
|
@ -941,16 +908,6 @@ public class BaseJavaTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, String content) {
|
|
||||||
try {
|
|
||||||
Utils.writeFile(dir+"/"+fileName, content, "UTF-8");
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void writeTestFile(String parserName,
|
protected void writeTestFile(String parserName,
|
||||||
String lexerName,
|
String lexerName,
|
||||||
String parserStartRuleName,
|
String parserStartRuleName,
|
||||||
|
@ -961,11 +918,12 @@ public class BaseJavaTest implements RuntimeTestSupport {
|
||||||
"import org.antlr.v4.runtime.*;\n" +
|
"import org.antlr.v4.runtime.*;\n" +
|
||||||
"import org.antlr.v4.runtime.tree.*;\n" +
|
"import org.antlr.v4.runtime.tree.*;\n" +
|
||||||
"import org.antlr.v4.runtime.atn.*;\n" +
|
"import org.antlr.v4.runtime.atn.*;\n" +
|
||||||
|
"import java.nio.file.Paths;\n"+
|
||||||
"import java.util.Arrays;\n"+
|
"import java.util.Arrays;\n"+
|
||||||
"\n" +
|
"\n" +
|
||||||
"public class Test {\n" +
|
"public class Test {\n" +
|
||||||
" public static void main(String[] args) throws Exception {\n" +
|
" public static void main(String[] args) throws Exception {\n" +
|
||||||
" CharStream input = new ANTLRFileStream(args[0]);\n" +
|
" CharStream input = CharStreams.createWithUTF8(Paths.get(args[0]));\n" +
|
||||||
" <lexerName> lex = new <lexerName>(input);\n" +
|
" <lexerName> lex = new <lexerName>(input);\n" +
|
||||||
" CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
|
" CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
|
||||||
" <createParser>\n"+
|
" <createParser>\n"+
|
||||||
|
@ -1017,11 +975,12 @@ public class BaseJavaTest implements RuntimeTestSupport {
|
||||||
|
|
||||||
protected void writeLexerTestFile(String lexerName, boolean showDFA) {
|
protected void writeLexerTestFile(String lexerName, boolean showDFA) {
|
||||||
ST outputFileST = new ST(
|
ST outputFileST = new ST(
|
||||||
|
"import java.nio.file.Paths;\n" +
|
||||||
"import org.antlr.v4.runtime.*;\n" +
|
"import org.antlr.v4.runtime.*;\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"public class Test {\n" +
|
"public class Test {\n" +
|
||||||
" public static void main(String[] args) throws Exception {\n" +
|
" public static void main(String[] args) throws Exception {\n" +
|
||||||
" CharStream input = new ANTLRFileStream(args[0]);\n" +
|
" CharStream input = CharStreams.createWithUTF8(Paths.get(args[0]));\n" +
|
||||||
" <lexerName> lex = new <lexerName>(input);\n" +
|
" <lexerName> lex = new <lexerName>(input);\n" +
|
||||||
" CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
|
" CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
|
||||||
" tokens.fill();\n" +
|
" tokens.fill();\n" +
|
||||||
|
|
|
@ -0,0 +1,172 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.test.runtime.java;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
import java.nio.channels.SeekableByteChannel;
|
||||||
|
import java.nio.charset.CharacterCodingException;
|
||||||
|
import java.nio.charset.CodingErrorAction;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.CharStreams;
|
||||||
|
import org.antlr.v4.runtime.CodePointCharStream;
|
||||||
|
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.rules.ExpectedException;
|
||||||
|
import org.junit.rules.TemporaryFolder;
|
||||||
|
|
||||||
|
public class TestCharStreams {
|
||||||
|
@Rule
|
||||||
|
public TemporaryFolder folder = new TemporaryFolder();
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public ExpectedException thrown = ExpectedException.none();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithBMPStringHasExpectedSize() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("hello");
|
||||||
|
assertEquals(5, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello", s.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithSMPStringHasExpectedSize() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
"hello \uD83C\uDF0E");
|
||||||
|
assertEquals(7, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello \uD83C\uDF0E", s.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithBMPUTF8PathHasExpectedSize() throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
Files.write(p, "hello".getBytes(StandardCharsets.UTF_8));
|
||||||
|
CodePointCharStream s = CharStreams.createWithUTF8(p);
|
||||||
|
assertEquals(5, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello", s.toString());
|
||||||
|
assertEquals(p.toString(), s.getSourceName());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithSMPUTF8PathHasExpectedSize() throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8));
|
||||||
|
CodePointCharStream s = CharStreams.createWithUTF8(p);
|
||||||
|
assertEquals(7, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello \uD83C\uDF0E", s.toString());
|
||||||
|
assertEquals(p.toString(), s.getSourceName());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithBMPUTF8InputStreamHasExpectedSize() throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
Files.write(p, "hello".getBytes(StandardCharsets.UTF_8));
|
||||||
|
try (InputStream is = Files.newInputStream(p)) {
|
||||||
|
CodePointCharStream s = CharStreams.createWithUTF8Stream(is);
|
||||||
|
assertEquals(5, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello", s.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithSMPUTF8InputStreamHasExpectedSize() throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8));
|
||||||
|
try (InputStream is = Files.newInputStream(p)) {
|
||||||
|
CodePointCharStream s = CharStreams.createWithUTF8Stream(is);
|
||||||
|
assertEquals(7, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello \uD83C\uDF0E", s.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithBMPUTF8ChannelHasExpectedSize() throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
Files.write(p, "hello".getBytes(StandardCharsets.UTF_8));
|
||||||
|
try (SeekableByteChannel c = Files.newByteChannel(p)) {
|
||||||
|
CodePointCharStream s = CharStreams.createWithUTF8Channel(
|
||||||
|
c, 4096, CodingErrorAction.REPLACE, "foo");
|
||||||
|
assertEquals(5, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello", s.toString());
|
||||||
|
assertEquals("foo", s.getSourceName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithSMPUTF8ChannelHasExpectedSize() throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8));
|
||||||
|
try (SeekableByteChannel c = Files.newByteChannel(p)) {
|
||||||
|
CodePointCharStream s = CharStreams.createWithUTF8Channel(
|
||||||
|
c, 4096, CodingErrorAction.REPLACE, "foo");
|
||||||
|
assertEquals(7, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello \uD83C\uDF0E", s.toString());
|
||||||
|
assertEquals("foo", s.getSourceName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithInvalidUTF8BytesChannelReplacesWithSubstCharInReplaceMode()
|
||||||
|
throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
byte[] toWrite = new byte[] { (byte)0xCA, (byte)0xFE, (byte)0xFE, (byte)0xED };
|
||||||
|
Files.write(p, toWrite);
|
||||||
|
try (SeekableByteChannel c = Files.newByteChannel(p)) {
|
||||||
|
CodePointCharStream s = CharStreams.createWithUTF8Channel(
|
||||||
|
c, 4096, CodingErrorAction.REPLACE, "foo");
|
||||||
|
assertEquals(3, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("\uFFFD\uFFFD\uFFFD", s.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithInvalidUTF8BytesThrowsInReportMode() throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
byte[] toWrite = new byte[] { (byte)0xCA, (byte)0xFE };
|
||||||
|
Files.write(p, toWrite);
|
||||||
|
try (SeekableByteChannel c = Files.newByteChannel(p)) {
|
||||||
|
thrown.expect(CharacterCodingException.class);
|
||||||
|
CharStreams.createWithUTF8Channel(c, 4096, CodingErrorAction.REPORT, "foo");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createWithSMPUTF8SequenceStraddlingBufferBoundary() throws Exception {
|
||||||
|
Path p = folder.newFile().toPath();
|
||||||
|
Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8));
|
||||||
|
try (SeekableByteChannel c = Files.newByteChannel(p)) {
|
||||||
|
CodePointCharStream s = CharStreams.createWithUTF8Channel(
|
||||||
|
c,
|
||||||
|
// Note this buffer size ensures the SMP code point
|
||||||
|
// straddles the boundary of two buffers
|
||||||
|
8,
|
||||||
|
CodingErrorAction.REPLACE,
|
||||||
|
"foo");
|
||||||
|
assertEquals(7, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals("hello \uD83C\uDF0E", s.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,294 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.test.runtime.java;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.IntBuffer;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.CharStreams;
|
||||||
|
import org.antlr.v4.runtime.CodePointCharStream;
|
||||||
|
import org.antlr.v4.runtime.IntStream;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.rules.ExpectedException;
|
||||||
|
|
||||||
|
public class TestCodePointCharStream {
|
||||||
|
@Rule
|
||||||
|
public ExpectedException thrown = ExpectedException.none();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void emptyBytesHasSize0() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("");
|
||||||
|
assertEquals(0, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void emptyBytesLookAheadReturnsEOF() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("");
|
||||||
|
assertEquals(IntStream.EOF, s.LA(1));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void consumingEmptyStreamShouldThrow() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("");
|
||||||
|
thrown.expect(IllegalStateException.class);
|
||||||
|
thrown.expectMessage("cannot consume EOF");
|
||||||
|
s.consume();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleLatinCodePointHasSize1() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("X");
|
||||||
|
assertEquals(1, s.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void consumingSingleLatinCodePointShouldMoveIndex() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("X");
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
s.consume();
|
||||||
|
assertEquals(1, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void consumingPastSingleLatinCodePointShouldThrow() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("X");
|
||||||
|
s.consume();
|
||||||
|
thrown.expect(IllegalStateException.class);
|
||||||
|
thrown.expectMessage("cannot consume EOF");
|
||||||
|
s.consume();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleLatinCodePointLookAheadShouldReturnCodePoint() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("X");
|
||||||
|
assertEquals('X', s.LA(1));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void multipleLatinCodePointsLookAheadShouldReturnCodePoints() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("XYZ");
|
||||||
|
assertEquals('X', s.LA(1));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals('Y', s.LA(2));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
assertEquals('Z', s.LA(3));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleLatinCodePointLookAheadPastEndShouldReturnEOF() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("X");
|
||||||
|
assertEquals(IntStream.EOF, s.LA(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleCJKCodePointHasSize1() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("\u611B");
|
||||||
|
assertEquals(1, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void consumingSingleCJKCodePointShouldMoveIndex() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("\u611B");
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
s.consume();
|
||||||
|
assertEquals(1, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void consumingPastSingleCJKCodePointShouldThrow() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("\u611B");
|
||||||
|
s.consume();
|
||||||
|
thrown.expect(IllegalStateException.class);
|
||||||
|
thrown.expectMessage("cannot consume EOF");
|
||||||
|
s.consume();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleCJKCodePointLookAheadShouldReturnCodePoint() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("\u611B");
|
||||||
|
assertEquals(0x611B, s.LA(1));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleCJKCodePointLookAheadPastEndShouldReturnEOF() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("\u611B");
|
||||||
|
assertEquals(IntStream.EOF, s.LA(2));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleEmojiCodePointHasSize1() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder().appendCodePoint(0x1F4A9).toString());
|
||||||
|
assertEquals(1, s.size());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void consumingSingleEmojiCodePointShouldMoveIndex() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder().appendCodePoint(0x1F4A9).toString());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
s.consume();
|
||||||
|
assertEquals(1, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void consumingPastEndOfEmojiCodePointWithShouldThrow() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder().appendCodePoint(0x1F4A9).toString());
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
s.consume();
|
||||||
|
assertEquals(1, s.index());
|
||||||
|
thrown.expect(IllegalStateException.class);
|
||||||
|
thrown.expectMessage("cannot consume EOF");
|
||||||
|
s.consume();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleEmojiCodePointLookAheadShouldReturnCodePoint() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder().appendCodePoint(0x1F4A9).toString());
|
||||||
|
assertEquals(0x1F4A9, s.LA(1));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void singleEmojiCodePointLookAheadPastEndShouldReturnEOF() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder().appendCodePoint(0x1F4A9).toString());
|
||||||
|
assertEquals(IntStream.EOF, s.LA(2));
|
||||||
|
assertEquals(0, s.index());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getTextWithLatin() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("0123456789");
|
||||||
|
assertEquals("34567", s.getText(Interval.of(3, 7)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getTextWithCJK() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("01234\u40946789");
|
||||||
|
assertEquals("34\u409467", s.getText(Interval.of(3, 7)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getTextWithEmoji() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder("01234")
|
||||||
|
.appendCodePoint(0x1F522)
|
||||||
|
.append("6789")
|
||||||
|
.toString());
|
||||||
|
assertEquals("34\uD83D\uDD2267", s.getText(Interval.of(3, 7)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void toStringWithLatin() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("0123456789");
|
||||||
|
assertEquals("0123456789", s.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void toStringWithCJK() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("01234\u40946789");
|
||||||
|
assertEquals("01234\u40946789", s.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void toStringWithEmoji() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder("01234")
|
||||||
|
.appendCodePoint(0x1F522)
|
||||||
|
.append("6789")
|
||||||
|
.toString());
|
||||||
|
assertEquals("01234\uD83D\uDD226789", s.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void lookAheadWithLatin() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("0123456789");
|
||||||
|
assertEquals('5', s.LA(6));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void lookAheadWithCJK() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("01234\u40946789");
|
||||||
|
assertEquals(0x4094, s.LA(6));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void lookAheadWithEmoji() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder("01234")
|
||||||
|
.appendCodePoint(0x1F522)
|
||||||
|
.append("6789")
|
||||||
|
.toString());
|
||||||
|
assertEquals(0x1F522, s.LA(6));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void seekWithLatin() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("0123456789");
|
||||||
|
s.seek(5);
|
||||||
|
assertEquals('5', s.LA(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void seekWithCJK() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("01234\u40946789");
|
||||||
|
s.seek(5);
|
||||||
|
assertEquals(0x4094, s.LA(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void seekWithEmoji() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder("01234")
|
||||||
|
.appendCodePoint(0x1F522)
|
||||||
|
.append("6789")
|
||||||
|
.toString());
|
||||||
|
s.seek(5);
|
||||||
|
assertEquals(0x1F522, s.LA(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void lookBehindWithLatin() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("0123456789");
|
||||||
|
s.seek(6);
|
||||||
|
assertEquals('5', s.LA(-1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void lookBehindWithCJK() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString("01234\u40946789");
|
||||||
|
s.seek(6);
|
||||||
|
assertEquals(0x4094, s.LA(-1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void lookBehindWithEmoji() {
|
||||||
|
CodePointCharStream s = CharStreams.createWithString(
|
||||||
|
new StringBuilder("01234")
|
||||||
|
.appendCodePoint(0x1F522)
|
||||||
|
.append("6789")
|
||||||
|
.toString());
|
||||||
|
s.seek(6);
|
||||||
|
assertEquals(0x1F522, s.LA(-1));
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,70 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.test.runtime.java;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.IntegerList;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.rules.ExpectedException;
|
||||||
|
|
||||||
|
public class TestIntegerList {
|
||||||
|
@Rule
|
||||||
|
public ExpectedException thrown = ExpectedException.none();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void emptyListToEmptyCharArray() {
|
||||||
|
IntegerList l = new IntegerList();
|
||||||
|
assertArrayEquals(new char[0], l.toCharArray());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void negativeIntegerToCharArrayThrows() {
|
||||||
|
IntegerList l = new IntegerList();
|
||||||
|
l.add(-42);
|
||||||
|
thrown.expect(IllegalArgumentException.class);
|
||||||
|
l.toCharArray();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void surrogateRangeIntegerToCharArray() {
|
||||||
|
IntegerList l = new IntegerList();
|
||||||
|
// Java allows dangling surrogates, so (currently) we do
|
||||||
|
// as well. We could change this if desired.
|
||||||
|
l.add(0xDC00);
|
||||||
|
char expected[] = new char[] { 0xDC00 };
|
||||||
|
assertArrayEquals(expected, l.toCharArray());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void tooLargeIntegerToCharArrayThrows() {
|
||||||
|
IntegerList l = new IntegerList();
|
||||||
|
l.add(0x110000);
|
||||||
|
thrown.expect(IllegalArgumentException.class);
|
||||||
|
l.toCharArray();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void unicodeBMPIntegerListToCharArray() {
|
||||||
|
IntegerList l = new IntegerList();
|
||||||
|
l.add(0x35);
|
||||||
|
l.add(0x4E94);
|
||||||
|
l.add(0xFF15);
|
||||||
|
char expected[] = new char[] { 0x35, 0x4E94, 0xFF15 };
|
||||||
|
assertArrayEquals(expected, l.toCharArray());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void unicodeSMPIntegerListToCharArray() {
|
||||||
|
IntegerList l = new IntegerList();
|
||||||
|
l.add(0x104A5);
|
||||||
|
l.add(0x116C5);
|
||||||
|
l.add(0x1D7FB);
|
||||||
|
char expected[] = new char[] { 0xD801, 0xDCA5, 0xD805, 0xDEC5, 0xD835, 0xDFFB };
|
||||||
|
assertArrayEquals(expected, l.toCharArray());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,162 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.test.runtime.java;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.IntBuffer;
|
||||||
|
|
||||||
|
import java.nio.charset.CharacterCodingException;
|
||||||
|
import java.nio.charset.CodingErrorAction;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.UTF8CodePointDecoder;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.rules.ExpectedException;
|
||||||
|
|
||||||
|
public class TestUTF8CodePointDecoder {
|
||||||
|
@Rule
|
||||||
|
public ExpectedException thrown = ExpectedException.none();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodeEmptyByteBufferWritesNothing() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPLACE);
|
||||||
|
ByteBuffer utf8BytesIn = ByteBuffer.allocate(0);
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(0);
|
||||||
|
IntBuffer result = decoder.decodeCodePointsFromBuffer(
|
||||||
|
utf8BytesIn,
|
||||||
|
codePointsOut,
|
||||||
|
true);
|
||||||
|
result.flip();
|
||||||
|
assertEquals(0, result.remaining());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodeLatinByteBufferWritesCodePoint() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPLACE);
|
||||||
|
ByteBuffer utf8BytesIn = StandardCharsets.UTF_8.encode("X");
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
IntBuffer result = decoder.decodeCodePointsFromBuffer(
|
||||||
|
utf8BytesIn,
|
||||||
|
codePointsOut,
|
||||||
|
true);
|
||||||
|
result.flip();
|
||||||
|
assertEquals(1, result.remaining());
|
||||||
|
assertEquals('X', result.get(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodeCyrillicByteBufferWritesCodePoint() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPLACE);
|
||||||
|
ByteBuffer utf8BytesIn = StandardCharsets.UTF_8.encode("\u042F");
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
IntBuffer result = decoder.decodeCodePointsFromBuffer(
|
||||||
|
utf8BytesIn,
|
||||||
|
codePointsOut,
|
||||||
|
true);
|
||||||
|
result.flip();
|
||||||
|
assertEquals(1, result.remaining());
|
||||||
|
assertEquals(0x042F, result.get(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodeCJKByteBufferWritesCodePoint() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPLACE);
|
||||||
|
ByteBuffer utf8BytesIn = StandardCharsets.UTF_8.encode("\u611B");
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
IntBuffer result = decoder.decodeCodePointsFromBuffer(
|
||||||
|
utf8BytesIn,
|
||||||
|
codePointsOut,
|
||||||
|
true);
|
||||||
|
result.flip();
|
||||||
|
assertEquals(1, result.remaining());
|
||||||
|
assertEquals(0x611B, result.get(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodeEmojiByteBufferWritesCodePoint() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPLACE);
|
||||||
|
ByteBuffer utf8BytesIn = StandardCharsets.UTF_8.encode(
|
||||||
|
new StringBuilder().appendCodePoint(0x1F4A9).toString()
|
||||||
|
);
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
IntBuffer result = decoder.decodeCodePointsFromBuffer(
|
||||||
|
utf8BytesIn,
|
||||||
|
codePointsOut,
|
||||||
|
true);
|
||||||
|
result.flip();
|
||||||
|
assertEquals(1, result.remaining());
|
||||||
|
assertEquals(0x1F4A9, result.get(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodingInvalidLeadInReplaceModeWritesSubstitutionCharacter() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPLACE);
|
||||||
|
ByteBuffer utf8BytesIn = ByteBuffer.wrap(new byte[] { (byte)0xF8 });
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
IntBuffer result = decoder.decodeCodePointsFromBuffer(utf8BytesIn, codePointsOut, true);
|
||||||
|
result.flip();
|
||||||
|
assertEquals(1, result.remaining());
|
||||||
|
assertEquals(0xFFFD, result.get(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodingInvalidLeadInReportModeThrows() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPORT);
|
||||||
|
ByteBuffer utf8BytesIn = ByteBuffer.wrap(new byte[] { (byte)0xF8 });
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
thrown.expect(CharacterCodingException.class);
|
||||||
|
thrown.expectMessage("Invalid UTF-8 leading byte 0xF8");
|
||||||
|
decoder.decodeCodePointsFromBuffer(utf8BytesIn, codePointsOut, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodingInvalidTrailInReplaceModeWritesSubstitutionCharacter() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPLACE);
|
||||||
|
ByteBuffer utf8BytesIn = ByteBuffer.wrap(new byte[] { (byte)0xC0, (byte)0xC0 });
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
IntBuffer result = decoder.decodeCodePointsFromBuffer(utf8BytesIn, codePointsOut, true);
|
||||||
|
result.flip();
|
||||||
|
assertEquals(1, result.remaining());
|
||||||
|
assertEquals(0xFFFD, result.get(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodingInvalidTrailInReportModeThrows() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPORT);
|
||||||
|
ByteBuffer utf8BytesIn = ByteBuffer.wrap(new byte[] { (byte)0xC0, (byte)0xC0 });
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
thrown.expect(CharacterCodingException.class);
|
||||||
|
thrown.expectMessage("Invalid UTF-8 trailing byte 0xC0");
|
||||||
|
decoder.decodeCodePointsFromBuffer(utf8BytesIn, codePointsOut, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodingNonShortestFormInReplaceModeWritesSubstitutionCharacter() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPLACE);
|
||||||
|
// 0xC1 0x9C would decode to \ (U+005C) if we didn't have this check
|
||||||
|
ByteBuffer utf8BytesIn = ByteBuffer.wrap(new byte[] { (byte)0xC1, (byte)0x9C });
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
IntBuffer result = decoder.decodeCodePointsFromBuffer(utf8BytesIn, codePointsOut, true);
|
||||||
|
result.flip();
|
||||||
|
assertEquals(1, result.remaining());
|
||||||
|
assertEquals(0xFFFD, result.get(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void decodingNonShortestFormInReportModeThrows() throws Exception {
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(CodingErrorAction.REPORT);
|
||||||
|
// 0xC1 0x9C would decode to \ (U+005C) if we didn't have this check
|
||||||
|
ByteBuffer utf8BytesIn = ByteBuffer.wrap(new byte[] { (byte)0xC1, (byte)0x9C });
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(1);
|
||||||
|
thrown.expect(CharacterCodingException.class);
|
||||||
|
thrown.expectMessage("Code point 92 is out of expected range 128..2047");
|
||||||
|
decoder.decodeCodePointsFromBuffer(utf8BytesIn, codePointsOut, true);
|
||||||
|
}
|
||||||
|
}
|
|
@ -53,13 +53,8 @@ import org.stringtemplate.v4.ST;
|
||||||
import org.stringtemplate.v4.STGroup;
|
import org.stringtemplate.v4.STGroup;
|
||||||
import org.stringtemplate.v4.STGroupString;
|
import org.stringtemplate.v4.STGroupString;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.BufferedWriter;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileWriter;
|
import java.io.FileWriter;
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.InputStreamReader;
|
|
||||||
import java.net.BindException;
|
import java.net.BindException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -72,6 +67,7 @@ import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
@ -528,41 +524,6 @@ public abstract class BaseBrowserTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class StreamVacuum implements Runnable {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
BufferedReader in;
|
|
||||||
Thread sucker;
|
|
||||||
public StreamVacuum(InputStream in) {
|
|
||||||
this.in = new BufferedReader( new InputStreamReader(in) );
|
|
||||||
}
|
|
||||||
public void start() {
|
|
||||||
sucker = new Thread(this);
|
|
||||||
sucker.start();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
String line = in.readLine();
|
|
||||||
while (line!=null) {
|
|
||||||
buf.append(line);
|
|
||||||
buf.append('\n');
|
|
||||||
line = in.readLine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't read output from process");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/** wait for the thread to finish */
|
|
||||||
public void join() throws InterruptedException {
|
|
||||||
sucker.join();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
||||||
GrammarSemanticsMessage expectedMessage)
|
GrammarSemanticsMessage expectedMessage)
|
||||||
throws Exception
|
throws Exception
|
||||||
|
@ -646,21 +607,6 @@ public abstract class BaseBrowserTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, String content) {
|
|
||||||
try {
|
|
||||||
File f = new File(dir, fileName);
|
|
||||||
FileWriter w = new FileWriter(f);
|
|
||||||
BufferedWriter bw = new BufferedWriter(w);
|
|
||||||
bw.write(content);
|
|
||||||
bw.close();
|
|
||||||
w.close();
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void mkdir(String dir) {
|
protected void mkdir(String dir) {
|
||||||
File f = new File(dir);
|
File f = new File(dir);
|
||||||
f.mkdirs();
|
f.mkdirs();
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.semantics.SemanticPipeline;
|
import org.antlr.v4.semantics.SemanticPipeline;
|
||||||
import org.antlr.v4.test.runtime.ErrorQueue;
|
import org.antlr.v4.test.runtime.ErrorQueue;
|
||||||
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
||||||
|
import org.antlr.v4.test.runtime.StreamVacuum;
|
||||||
import org.antlr.v4.tool.ANTLRMessage;
|
import org.antlr.v4.tool.ANTLRMessage;
|
||||||
import org.antlr.v4.tool.DOTGenerator;
|
import org.antlr.v4.tool.DOTGenerator;
|
||||||
import org.antlr.v4.tool.Grammar;
|
import org.antlr.v4.tool.Grammar;
|
||||||
|
@ -43,15 +44,8 @@ import org.stringtemplate.v4.ST;
|
||||||
import org.stringtemplate.v4.STGroup;
|
import org.stringtemplate.v4.STGroup;
|
||||||
import org.stringtemplate.v4.STGroupString;
|
import org.stringtemplate.v4.STGroupString;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.BufferedWriter;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileOutputStream;
|
|
||||||
import java.io.FileWriter;
|
import java.io.FileWriter;
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.InputStreamReader;
|
|
||||||
import java.io.OutputStream;
|
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -64,6 +58,7 @@ import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
@ -524,45 +519,6 @@ public class BaseNodeTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class StreamVacuum implements Runnable {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
BufferedReader in;
|
|
||||||
Thread sucker;
|
|
||||||
|
|
||||||
public StreamVacuum(InputStream in) {
|
|
||||||
this.in = new BufferedReader(new InputStreamReader(in));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void start() {
|
|
||||||
sucker = new Thread(this);
|
|
||||||
sucker.start();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
String line = in.readLine();
|
|
||||||
while (line != null) {
|
|
||||||
buf.append(line);
|
|
||||||
buf.append('\n');
|
|
||||||
line = in.readLine();
|
|
||||||
}
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
System.err.println("can't read output from process");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** wait for the thread to finish */
|
|
||||||
public void join() throws InterruptedException {
|
|
||||||
sucker.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
||||||
GrammarSemanticsMessage expectedMessage) throws Exception {
|
GrammarSemanticsMessage expectedMessage) throws Exception {
|
||||||
ANTLRMessage foundMsg = null;
|
ANTLRMessage foundMsg = null;
|
||||||
|
@ -653,35 +609,6 @@ public class BaseNodeTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, String content) {
|
|
||||||
try {
|
|
||||||
File f = new File(dir, fileName);
|
|
||||||
FileWriter w = new FileWriter(f);
|
|
||||||
BufferedWriter bw = new BufferedWriter(w);
|
|
||||||
bw.write(content);
|
|
||||||
bw.close();
|
|
||||||
w.close();
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, InputStream content) {
|
|
||||||
try {
|
|
||||||
File f = new File(dir, fileName);
|
|
||||||
OutputStream output = new FileOutputStream(f);
|
|
||||||
while(content.available()>0) {
|
|
||||||
int b = content.read();
|
|
||||||
output.write(b);
|
|
||||||
}
|
|
||||||
output.close();
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void mkdir(String dir) {
|
protected void mkdir(String dir) {
|
||||||
File f = new File(dir);
|
File f = new File(dir);
|
||||||
f.mkdirs();
|
f.mkdirs();
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.antlr.v4.runtime.tree.ParseTree;
|
||||||
import org.antlr.v4.semantics.SemanticPipeline;
|
import org.antlr.v4.semantics.SemanticPipeline;
|
||||||
import org.antlr.v4.test.runtime.ErrorQueue;
|
import org.antlr.v4.test.runtime.ErrorQueue;
|
||||||
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
||||||
|
import org.antlr.v4.test.runtime.StreamVacuum;
|
||||||
import org.antlr.v4.tool.ANTLRMessage;
|
import org.antlr.v4.tool.ANTLRMessage;
|
||||||
import org.antlr.v4.tool.DOTGenerator;
|
import org.antlr.v4.tool.DOTGenerator;
|
||||||
import org.antlr.v4.tool.Grammar;
|
import org.antlr.v4.tool.Grammar;
|
||||||
|
@ -49,13 +50,7 @@ import org.stringtemplate.v4.ST;
|
||||||
import org.stringtemplate.v4.STGroup;
|
import org.stringtemplate.v4.STGroup;
|
||||||
import org.stringtemplate.v4.STGroupString;
|
import org.stringtemplate.v4.STGroupString;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.BufferedWriter;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileWriter;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.InputStreamReader;
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
import java.lang.reflect.InvocationTargetException;
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
@ -70,6 +65,7 @@ import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
@ -493,6 +489,7 @@ public abstract class BasePythonTest implements RuntimeTestSupport {
|
||||||
try {
|
try {
|
||||||
ProcessBuilder builder = new ProcessBuilder( pythonPath, modulePath, inputPath );
|
ProcessBuilder builder = new ProcessBuilder( pythonPath, modulePath, inputPath );
|
||||||
builder.environment().put("PYTHONPATH",runtimePath);
|
builder.environment().put("PYTHONPATH",runtimePath);
|
||||||
|
builder.environment().put("PYTHONIOENCODING", "utf-8");
|
||||||
builder.directory(new File(tmpdir));
|
builder.directory(new File(tmpdir));
|
||||||
Process process = builder.start();
|
Process process = builder.start();
|
||||||
StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream());
|
StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream());
|
||||||
|
@ -651,41 +648,6 @@ public abstract class BasePythonTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class StreamVacuum implements Runnable {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
BufferedReader in;
|
|
||||||
Thread sucker;
|
|
||||||
public StreamVacuum(InputStream in) {
|
|
||||||
this.in = new BufferedReader( new InputStreamReader(in) );
|
|
||||||
}
|
|
||||||
public void start() {
|
|
||||||
sucker = new Thread(this);
|
|
||||||
sucker.start();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
String line = in.readLine();
|
|
||||||
while (line!=null) {
|
|
||||||
buf.append(line);
|
|
||||||
buf.append('\n');
|
|
||||||
line = in.readLine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't read output from process");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/** wait for the thread to finish */
|
|
||||||
public void join() throws InterruptedException {
|
|
||||||
sucker.join();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
protected void checkGrammarSemanticsError(ErrorQueue equeue,
|
||||||
GrammarSemanticsMessage expectedMessage)
|
GrammarSemanticsMessage expectedMessage)
|
||||||
throws Exception
|
throws Exception
|
||||||
|
@ -769,21 +731,6 @@ public abstract class BasePythonTest implements RuntimeTestSupport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, String content) {
|
|
||||||
try {
|
|
||||||
File f = new File(dir, fileName);
|
|
||||||
FileWriter w = new FileWriter(f);
|
|
||||||
BufferedWriter bw = new BufferedWriter(w);
|
|
||||||
bw.write(content);
|
|
||||||
bw.close();
|
|
||||||
w.close();
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void mkdir(String dir) {
|
protected void mkdir(String dir) {
|
||||||
File f = new File(dir);
|
File f = new File(dir);
|
||||||
f.mkdirs();
|
f.mkdirs();
|
||||||
|
|
|
@ -9,6 +9,8 @@ package org.antlr.v4.test.runtime.python2;
|
||||||
import org.antlr.v4.test.runtime.python.BasePythonTest;
|
import org.antlr.v4.test.runtime.python.BasePythonTest;
|
||||||
import org.stringtemplate.v4.ST;
|
import org.stringtemplate.v4.ST;
|
||||||
|
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
|
|
||||||
public class BasePython2Test extends BasePythonTest {
|
public class BasePython2Test extends BasePythonTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -30,7 +32,7 @@ public class BasePython2Test extends BasePythonTest {
|
||||||
+ "from <lexerName> import <lexerName>\n"
|
+ "from <lexerName> import <lexerName>\n"
|
||||||
+ "\n"
|
+ "\n"
|
||||||
+ "def main(argv):\n"
|
+ "def main(argv):\n"
|
||||||
+ " input = FileStream(argv[1])\n"
|
+ " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n"
|
||||||
+ " lexer = <lexerName>(input)\n"
|
+ " lexer = <lexerName>(input)\n"
|
||||||
+ " stream = CommonTokenStream(lexer)\n"
|
+ " stream = CommonTokenStream(lexer)\n"
|
||||||
+ " stream.fill()\n"
|
+ " stream.fill()\n"
|
||||||
|
@ -74,7 +76,7 @@ public class BasePython2Test extends BasePythonTest {
|
||||||
+ " raise IllegalStateException(\"Invalid parse tree shape detected.\")\n"
|
+ " raise IllegalStateException(\"Invalid parse tree shape detected.\")\n"
|
||||||
+ "\n"
|
+ "\n"
|
||||||
+ "def main(argv):\n"
|
+ "def main(argv):\n"
|
||||||
+ " input = FileStream(argv[1])\n"
|
+ " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n"
|
||||||
+ " lexer = <lexerName>(input)\n"
|
+ " lexer = <lexerName>(input)\n"
|
||||||
+ " stream = CommonTokenStream(lexer)\n"
|
+ " stream = CommonTokenStream(lexer)\n"
|
||||||
+ "<createParser>"
|
+ "<createParser>"
|
||||||
|
|
|
@ -8,6 +8,8 @@ package org.antlr.v4.test.runtime.python3;
|
||||||
import org.antlr.v4.test.runtime.python.BasePythonTest;
|
import org.antlr.v4.test.runtime.python.BasePythonTest;
|
||||||
import org.stringtemplate.v4.ST;
|
import org.stringtemplate.v4.ST;
|
||||||
|
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
|
|
||||||
public class BasePython3Test extends BasePythonTest {
|
public class BasePython3Test extends BasePythonTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -28,7 +30,7 @@ public class BasePython3Test extends BasePythonTest {
|
||||||
+ "from <lexerName> import <lexerName>\n"
|
+ "from <lexerName> import <lexerName>\n"
|
||||||
+ "\n"
|
+ "\n"
|
||||||
+ "def main(argv):\n"
|
+ "def main(argv):\n"
|
||||||
+ " input = FileStream(argv[1])\n"
|
+ " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n"
|
||||||
+ " lexer = <lexerName>(input)\n"
|
+ " lexer = <lexerName>(input)\n"
|
||||||
+ " stream = CommonTokenStream(lexer)\n"
|
+ " stream = CommonTokenStream(lexer)\n"
|
||||||
+ " stream.fill()\n"
|
+ " stream.fill()\n"
|
||||||
|
@ -72,7 +74,7 @@ public class BasePython3Test extends BasePythonTest {
|
||||||
+ " raise IllegalStateException(\"Invalid parse tree shape detected.\")\n"
|
+ " raise IllegalStateException(\"Invalid parse tree shape detected.\")\n"
|
||||||
+ "\n"
|
+ "\n"
|
||||||
+ "def main(argv):\n"
|
+ "def main(argv):\n"
|
||||||
+ " input = FileStream(argv[1])\n"
|
+ " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n"
|
||||||
+ " lexer = <lexerName>(input)\n"
|
+ " lexer = <lexerName>(input)\n"
|
||||||
+ " stream = CommonTokenStream(lexer)\n"
|
+ " stream = CommonTokenStream(lexer)\n"
|
||||||
+ "<createParser>"
|
+ "<createParser>"
|
||||||
|
|
|
@ -9,15 +9,11 @@ package org.antlr.v4.test.runtime.swift;
|
||||||
import org.antlr.v4.Tool;
|
import org.antlr.v4.Tool;
|
||||||
import org.antlr.v4.test.runtime.ErrorQueue;
|
import org.antlr.v4.test.runtime.ErrorQueue;
|
||||||
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
import org.antlr.v4.test.runtime.RuntimeTestSupport;
|
||||||
|
import org.antlr.v4.test.runtime.StreamVacuum;
|
||||||
import org.stringtemplate.v4.ST;
|
import org.stringtemplate.v4.ST;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.BufferedWriter;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileWriter;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.InputStreamReader;
|
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -27,6 +23,7 @@ import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString;
|
||||||
|
import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
public class BaseSwiftTest implements RuntimeTestSupport {
|
public class BaseSwiftTest implements RuntimeTestSupport {
|
||||||
|
@ -57,7 +54,7 @@ public class BaseSwiftTest implements RuntimeTestSupport {
|
||||||
//add antlr.swift
|
//add antlr.swift
|
||||||
final ClassLoader loader = Thread.currentThread().getContextClassLoader();
|
final ClassLoader loader = Thread.currentThread().getContextClassLoader();
|
||||||
|
|
||||||
final URL swiftRuntime = loader.getResource("Swift/Antlr4");
|
final URL swiftRuntime = loader.getResource("Swift/Sources/Antlr4");
|
||||||
if (swiftRuntime == null) {
|
if (swiftRuntime == null) {
|
||||||
throw new RuntimeException("Swift runtime file not found at:" + swiftRuntime.getPath());
|
throw new RuntimeException("Swift runtime file not found at:" + swiftRuntime.getPath());
|
||||||
}
|
}
|
||||||
|
@ -118,49 +115,6 @@ public class BaseSwiftTest implements RuntimeTestSupport {
|
||||||
return runProcess(argsString, ANTLR_FRAMEWORK_DIR);
|
return runProcess(argsString, ANTLR_FRAMEWORK_DIR);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class StreamVacuum implements Runnable {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
BufferedReader in;
|
|
||||||
Thread sucker;
|
|
||||||
|
|
||||||
public StreamVacuum(InputStream in) {
|
|
||||||
this.in = new BufferedReader(new InputStreamReader(in));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void start() {
|
|
||||||
sucker = new Thread(this);
|
|
||||||
sucker.start();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
String line = in.readLine();
|
|
||||||
while (line != null) {
|
|
||||||
buf.append(line);
|
|
||||||
buf.append('\n');
|
|
||||||
line = in.readLine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (IOException ioe) {
|
|
||||||
System.err.println("can't read output from process");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* wait for the thread to finish
|
|
||||||
*/
|
|
||||||
public void join() throws InterruptedException {
|
|
||||||
sucker.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public String tmpdir = null;
|
public String tmpdir = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -364,20 +318,6 @@ public class BaseSwiftTest implements RuntimeTestSupport {
|
||||||
return execTest();
|
return execTest();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeFile(String dir, String fileName, String content) {
|
|
||||||
try {
|
|
||||||
File f = new File(dir, fileName);
|
|
||||||
FileWriter w = new FileWriter(f);
|
|
||||||
BufferedWriter bw = new BufferedWriter(w);
|
|
||||||
bw.write(content);
|
|
||||||
bw.close();
|
|
||||||
w.close();
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
System.err.println("can't write file");
|
|
||||||
ioe.printStackTrace(System.err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void writeParserTestFile(String parserName,
|
protected void writeParserTestFile(String parserName,
|
||||||
String lexerName,
|
String lexerName,
|
||||||
String parserStartRuleName,
|
String parserStartRuleName,
|
||||||
|
|
|
@ -4,36 +4,19 @@
|
||||||
*/
|
*/
|
||||||
using System;
|
using System;
|
||||||
using System.IO;
|
using System.IO;
|
||||||
|
using System.Text;
|
||||||
using Antlr4.Runtime;
|
using Antlr4.Runtime;
|
||||||
using Antlr4.Runtime.Misc;
|
using Antlr4.Runtime.Misc;
|
||||||
using Antlr4.Runtime.Sharpen;
|
using Antlr4.Runtime.Sharpen;
|
||||||
|
|
||||||
namespace Antlr4.Runtime
|
namespace Antlr4.Runtime
|
||||||
{
|
{
|
||||||
/// <summary>
|
public abstract class BaseInputCharStream : ICharStream
|
||||||
/// Vacuum all input from a
|
|
||||||
/// <see cref="System.IO.TextReader"/>
|
|
||||||
/// /
|
|
||||||
/// <see cref="System.IO.Stream"/>
|
|
||||||
/// and then treat it
|
|
||||||
/// like a
|
|
||||||
/// <c>char[]</c>
|
|
||||||
/// buffer. Can also pass in a
|
|
||||||
/// <see cref="string"/>
|
|
||||||
/// or
|
|
||||||
/// <c>char[]</c>
|
|
||||||
/// to use.
|
|
||||||
/// <p>If you need encoding, pass in stream/reader with correct encoding.</p>
|
|
||||||
/// </summary>
|
|
||||||
public class AntlrInputStream : ICharStream
|
|
||||||
{
|
{
|
||||||
public const int ReadBufferSize = 1024;
|
public const int ReadBufferSize = 1024;
|
||||||
|
|
||||||
public const int InitialBufferSize = 1024;
|
public const int InitialBufferSize = 1024;
|
||||||
|
|
||||||
/// <summary>The data being scanned</summary>
|
|
||||||
protected internal char[] data;
|
|
||||||
|
|
||||||
/// <summary>How many characters are actually in the buffer</summary>
|
/// <summary>How many characters are actually in the buffer</summary>
|
||||||
protected internal int n;
|
protected internal int n;
|
||||||
|
|
||||||
|
@ -43,72 +26,6 @@ namespace Antlr4.Runtime
|
||||||
/// <summary>What is name or source of this char stream?</summary>
|
/// <summary>What is name or source of this char stream?</summary>
|
||||||
public string name;
|
public string name;
|
||||||
|
|
||||||
public AntlrInputStream()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>Copy data in string to a local char array</summary>
|
|
||||||
public AntlrInputStream(string input)
|
|
||||||
{
|
|
||||||
this.data = input.ToCharArray();
|
|
||||||
this.n = input.Length;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>This is the preferred constructor for strings as no data is copied</summary>
|
|
||||||
public AntlrInputStream(char[] data, int numberOfActualCharsInArray)
|
|
||||||
{
|
|
||||||
this.data = data;
|
|
||||||
this.n = numberOfActualCharsInArray;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <exception cref="System.IO.IOException"/>
|
|
||||||
public AntlrInputStream(TextReader r)
|
|
||||||
: this(r, InitialBufferSize, ReadBufferSize)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <exception cref="System.IO.IOException"/>
|
|
||||||
public AntlrInputStream(TextReader r, int initialSize)
|
|
||||||
: this(r, initialSize, ReadBufferSize)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <exception cref="System.IO.IOException"/>
|
|
||||||
public AntlrInputStream(TextReader r, int initialSize, int readChunkSize)
|
|
||||||
{
|
|
||||||
Load(r, initialSize, readChunkSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <exception cref="System.IO.IOException"/>
|
|
||||||
public AntlrInputStream(Stream input)
|
|
||||||
: this(new StreamReader(input), InitialBufferSize)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <exception cref="System.IO.IOException"/>
|
|
||||||
public AntlrInputStream(Stream input, int initialSize)
|
|
||||||
: this(new StreamReader(input), initialSize)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <exception cref="System.IO.IOException"/>
|
|
||||||
public AntlrInputStream(Stream input, int initialSize, int readChunkSize)
|
|
||||||
: this(new StreamReader(input), initialSize, readChunkSize)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <exception cref="System.IO.IOException"/>
|
|
||||||
public virtual void Load(TextReader r, int size, int readChunkSize)
|
|
||||||
{
|
|
||||||
if (r == null)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
data = r.ReadToEnd().ToCharArray();
|
|
||||||
n = data.Length;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Reset the stream so that it's in the same state it was
|
/// Reset the stream so that it's in the same state it was
|
||||||
/// when the object was created *except* the data array is not
|
/// when the object was created *except* the data array is not
|
||||||
|
@ -163,7 +80,7 @@ namespace Antlr4.Runtime
|
||||||
}
|
}
|
||||||
//System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
|
//System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
|
||||||
//System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
|
//System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
|
||||||
return data[p + i - 1];
|
return ValueAt(p + i - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual int Lt(int i)
|
public virtual int Lt(int i)
|
||||||
|
@ -243,10 +160,16 @@ namespace Antlr4.Runtime
|
||||||
{
|
{
|
||||||
return string.Empty;
|
return string.Empty;
|
||||||
}
|
}
|
||||||
// System.err.println("data: "+Arrays.toString(data)+", n="+n+
|
return ConvertDataToString(start, count);
|
||||||
// ", start="+start+
|
}
|
||||||
// ", stop="+stop);
|
|
||||||
return new string(data, start, count);
|
protected abstract int ValueAt(int i);
|
||||||
|
|
||||||
|
protected abstract string ConvertDataToString(int start, int count);
|
||||||
|
|
||||||
|
public override sealed string ToString()
|
||||||
|
{
|
||||||
|
return ConvertDataToString(0, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual string SourceName
|
public virtual string SourceName
|
||||||
|
@ -260,10 +183,148 @@ namespace Antlr4.Runtime
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public override string ToString()
|
/// <summary>
|
||||||
|
/// Vacuum all input from a
|
||||||
|
/// <see cref="System.IO.TextReader"/>
|
||||||
|
/// /
|
||||||
|
/// <see cref="System.IO.Stream"/>
|
||||||
|
/// and then treat it
|
||||||
|
/// like a
|
||||||
|
/// <c>char[]</c>
|
||||||
|
/// buffer. Can also pass in a
|
||||||
|
/// <see cref="string"/>
|
||||||
|
/// or
|
||||||
|
/// <c>char[]</c>
|
||||||
|
/// to use.
|
||||||
|
/// <p>If you need encoding, pass in stream/reader with correct encoding.</p>
|
||||||
|
/// </summary>
|
||||||
|
public class AntlrInputStream : BaseInputCharStream
|
||||||
|
{
|
||||||
|
/// <summary>The data being scanned</summary>
|
||||||
|
protected internal char[] data;
|
||||||
|
|
||||||
|
public AntlrInputStream()
|
||||||
{
|
{
|
||||||
return new string(data);
|
}
|
||||||
|
|
||||||
|
/// <summary>Copy data in string to a local char array</summary>
|
||||||
|
public AntlrInputStream(string input)
|
||||||
|
{
|
||||||
|
this.data = input.ToCharArray();
|
||||||
|
this.n = input.Length;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>This is the preferred constructor for strings as no data is copied</summary>
|
||||||
|
public AntlrInputStream(char[] data, int numberOfActualCharsInArray)
|
||||||
|
{
|
||||||
|
this.data = data;
|
||||||
|
this.n = numberOfActualCharsInArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <exception cref="System.IO.IOException"/>
|
||||||
|
public AntlrInputStream(TextReader r)
|
||||||
|
: this(r, InitialBufferSize, ReadBufferSize)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <exception cref="System.IO.IOException"/>
|
||||||
|
public AntlrInputStream(TextReader r, int initialSize)
|
||||||
|
: this(r, initialSize, ReadBufferSize)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <exception cref="System.IO.IOException"/>
|
||||||
|
public AntlrInputStream(TextReader r, int initialSize, int readChunkSize)
|
||||||
|
{
|
||||||
|
Load(r, initialSize, readChunkSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <exception cref="System.IO.IOException"/>
|
||||||
|
public AntlrInputStream(Stream input)
|
||||||
|
: this(new StreamReader(input), InitialBufferSize)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <exception cref="System.IO.IOException"/>
|
||||||
|
public AntlrInputStream(Stream input, int initialSize)
|
||||||
|
: this(new StreamReader(input), initialSize)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <exception cref="System.IO.IOException"/>
|
||||||
|
public AntlrInputStream(Stream input, int initialSize, int readChunkSize)
|
||||||
|
: this(new StreamReader(input), initialSize, readChunkSize)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <exception cref="System.IO.IOException"/>
|
||||||
|
public virtual void Load(TextReader r, int size, int readChunkSize)
|
||||||
|
{
|
||||||
|
if (r == null)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
data = r.ReadToEnd().ToCharArray();
|
||||||
|
n = data.Length;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override int ValueAt(int i)
|
||||||
|
{
|
||||||
|
return data[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override string ConvertDataToString(int start, int count)
|
||||||
|
{
|
||||||
|
// System.err.println("data: "+Arrays.toString(data)+", n="+n+
|
||||||
|
// ", start="+start+
|
||||||
|
// ", stop="+stop);
|
||||||
|
return new string(data, start, count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Alternative to
|
||||||
|
/// <see cref="ANTLRInputStream"/>
|
||||||
|
/// which treats the input as a series of Unicode code points,
|
||||||
|
/// instead of a series of UTF-16 code units.
|
||||||
|
///
|
||||||
|
/// Use this if you need to parse input which potentially contains
|
||||||
|
/// Unicode values > U+FFFF.
|
||||||
|
/// </summary>
|
||||||
|
public class CodePointCharStream : BaseInputCharStream
|
||||||
|
{
|
||||||
|
private int[] data;
|
||||||
|
|
||||||
|
public CodePointCharStream(string input)
|
||||||
|
{
|
||||||
|
this.data = new int[input.Length];
|
||||||
|
int dataIdx = 0;
|
||||||
|
for (int i = 0; i < input.Length; ) {
|
||||||
|
var codePoint = Char.ConvertToUtf32(input, i);
|
||||||
|
data[dataIdx++] = codePoint;
|
||||||
|
if (dataIdx > data.Length) {
|
||||||
|
Array.Resize(ref data, data.Length * 2);
|
||||||
|
}
|
||||||
|
i += codePoint <= 0xFFFF ? 1 : 2;
|
||||||
|
}
|
||||||
|
this.n = dataIdx;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override int ValueAt(int i)
|
||||||
|
{
|
||||||
|
return data[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override string ConvertDataToString(int start, int count)
|
||||||
|
{
|
||||||
|
var sb = new StringBuilder(count);
|
||||||
|
for (int i = start; i < start + count; i++) {
|
||||||
|
sb.Append(Char.ConvertFromUtf32(data[i]));
|
||||||
|
}
|
||||||
|
return sb.ToString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
*/
|
*/
|
||||||
using System;
|
using System;
|
||||||
|
using System.IO;
|
||||||
using System.Collections.Generic;
|
using System.Collections.Generic;
|
||||||
using System.Text;
|
using System.Text;
|
||||||
using Antlr4.Runtime;
|
using Antlr4.Runtime;
|
||||||
|
@ -33,6 +34,8 @@ namespace Antlr4.Runtime
|
||||||
|
|
||||||
private ICharStream _input;
|
private ICharStream _input;
|
||||||
|
|
||||||
|
protected readonly TextWriter Output;
|
||||||
|
|
||||||
private Tuple<ITokenSource, ICharStream> _tokenFactorySourcePair;
|
private Tuple<ITokenSource, ICharStream> _tokenFactorySourcePair;
|
||||||
|
|
||||||
/// <summary>How to create token objects</summary>
|
/// <summary>How to create token objects</summary>
|
||||||
|
@ -94,9 +97,12 @@ namespace Antlr4.Runtime
|
||||||
/// </remarks>
|
/// </remarks>
|
||||||
private string _text;
|
private string _text;
|
||||||
|
|
||||||
public Lexer(ICharStream input)
|
public Lexer(ICharStream input) : this(input, Console.Out) { }
|
||||||
|
|
||||||
|
public Lexer(ICharStream input, TextWriter output)
|
||||||
{
|
{
|
||||||
this._input = input;
|
this._input = input;
|
||||||
|
this.Output = output;
|
||||||
this._tokenFactorySourcePair = Tuple.Create((ITokenSource)this, input);
|
this._tokenFactorySourcePair = Tuple.Create((ITokenSource)this, input);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,7 +508,7 @@ outer_continue: ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual string[] ModeNames
|
public virtual string[] ChannelNames
|
||||||
{
|
{
|
||||||
get
|
get
|
||||||
{
|
{
|
||||||
|
@ -510,6 +516,13 @@ outer_continue: ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public virtual string[] ModeNames
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// <summary>Return a list of all Token objects in input char stream.</summary>
|
/// <summary>Return a list of all Token objects in input char stream.</summary>
|
||||||
/// <remarks>
|
/// <remarks>
|
||||||
|
|
|
@ -13,23 +13,31 @@ using Antlr4.Runtime.Sharpen;
|
||||||
|
|
||||||
namespace Antlr4.Runtime
|
namespace Antlr4.Runtime
|
||||||
{
|
{
|
||||||
public class LexerInterpreter : Lexer
|
public class LexerInterpreter: Lexer
|
||||||
{
|
{
|
||||||
private readonly string grammarFileName;
|
private readonly string grammarFileName;
|
||||||
|
|
||||||
private readonly ATN atn;
|
private readonly ATN atn;
|
||||||
|
|
||||||
private readonly string[] ruleNames;
|
private readonly string[] ruleNames;
|
||||||
|
|
||||||
private readonly string[] modeNames;
|
private readonly string[] channelNames;
|
||||||
|
|
||||||
|
private readonly string[] modeNames;
|
||||||
|
|
||||||
[NotNull]
|
[NotNull]
|
||||||
private readonly IVocabulary vocabulary;
|
private readonly IVocabulary vocabulary;
|
||||||
|
|
||||||
protected DFA[] decisionToDFA;
|
protected DFA[] decisionToDFA;
|
||||||
protected PredictionContextCache sharedContextCache = new PredictionContextCache();
|
protected PredictionContextCache sharedContextCache = new PredictionContextCache();
|
||||||
|
|
||||||
|
[Obsolete("Use constructor with channelNames argument")]
|
||||||
public LexerInterpreter(string grammarFileName, IVocabulary vocabulary, IEnumerable<string> ruleNames, IEnumerable<string> modeNames, ATN atn, ICharStream input)
|
public LexerInterpreter(string grammarFileName, IVocabulary vocabulary, IEnumerable<string> ruleNames, IEnumerable<string> modeNames, ATN atn, ICharStream input)
|
||||||
|
: this(grammarFileName, vocabulary, ruleNames, new string[0], modeNames, atn, input)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
public LexerInterpreter(string grammarFileName, IVocabulary vocabulary, IEnumerable<string> ruleNames, IEnumerable<string> channelNames, IEnumerable<string> modeNames, ATN atn, ICharStream input)
|
||||||
: base(input)
|
: base(input)
|
||||||
{
|
{
|
||||||
if (atn.grammarType != ATNType.Lexer)
|
if (atn.grammarType != ATNType.Lexer)
|
||||||
|
@ -39,14 +47,15 @@ namespace Antlr4.Runtime
|
||||||
this.grammarFileName = grammarFileName;
|
this.grammarFileName = grammarFileName;
|
||||||
this.atn = atn;
|
this.atn = atn;
|
||||||
this.ruleNames = ruleNames.ToArray();
|
this.ruleNames = ruleNames.ToArray();
|
||||||
|
this.channelNames = channelNames.ToArray();
|
||||||
this.modeNames = modeNames.ToArray();
|
this.modeNames = modeNames.ToArray();
|
||||||
this.vocabulary = vocabulary;
|
this.vocabulary = vocabulary;
|
||||||
this.decisionToDFA = new DFA[atn.NumberOfDecisions];
|
this.decisionToDFA = new DFA[atn.NumberOfDecisions];
|
||||||
for (int i = 0; i < decisionToDFA.Length; i++)
|
for (int i = 0; i < decisionToDFA.Length; i++)
|
||||||
{
|
{
|
||||||
decisionToDFA[i] = new DFA(atn.GetDecisionState(i), i);
|
decisionToDFA[i] = new DFA(atn.GetDecisionState(i), i);
|
||||||
}
|
}
|
||||||
this.Interpreter = new LexerATNSimulator(this, atn, decisionToDFA, sharedContextCache);
|
this.Interpreter = new LexerATNSimulator(this, atn, decisionToDFA, sharedContextCache);
|
||||||
}
|
}
|
||||||
|
|
||||||
public override ATN Atn
|
public override ATN Atn
|
||||||
|
@ -73,6 +82,14 @@ namespace Antlr4.Runtime
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public override string[] ChannelNames
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
return channelNames;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public override string[] ModeNames
|
public override string[] ModeNames
|
||||||
{
|
{
|
||||||
get
|
get
|
||||||
|
|
|
@ -131,19 +131,5 @@ namespace Antlr4.Runtime.Misc
|
||||||
}
|
}
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static char[] ToCharArray(ArrayList<int> data)
|
|
||||||
{
|
|
||||||
if (data == null)
|
|
||||||
{
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
char[] cdata = new char[data.Count];
|
|
||||||
for (int i = 0; i < data.Count; i++)
|
|
||||||
{
|
|
||||||
cdata[i] = (char)data[i];
|
|
||||||
}
|
|
||||||
return cdata;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
*/
|
*/
|
||||||
using System;
|
using System;
|
||||||
|
using System.IO;
|
||||||
using System.Text;
|
using System.Text;
|
||||||
using System.Collections.Generic;
|
using System.Collections.Generic;
|
||||||
using Antlr4.Runtime.Atn;
|
using Antlr4.Runtime.Atn;
|
||||||
|
@ -21,14 +22,20 @@ namespace Antlr4.Runtime
|
||||||
#if !PORTABLE
|
#if !PORTABLE
|
||||||
public class TraceListener : IParseTreeListener
|
public class TraceListener : IParseTreeListener
|
||||||
{
|
{
|
||||||
|
private readonly TextWriter Output;
|
||||||
|
|
||||||
|
public TraceListener(TextWriter output) {
|
||||||
|
Output = output;
|
||||||
|
}
|
||||||
|
|
||||||
public virtual void EnterEveryRule(ParserRuleContext ctx)
|
public virtual void EnterEveryRule(ParserRuleContext ctx)
|
||||||
{
|
{
|
||||||
System.Console.Out.WriteLine("enter " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text);
|
Output.WriteLine("enter " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text);
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual void ExitEveryRule(ParserRuleContext ctx)
|
public virtual void ExitEveryRule(ParserRuleContext ctx)
|
||||||
{
|
{
|
||||||
System.Console.Out.WriteLine("exit " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text);
|
Output.WriteLine("exit " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text);
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual void VisitErrorNode(IErrorNode node)
|
public virtual void VisitErrorNode(IErrorNode node)
|
||||||
|
@ -39,7 +46,7 @@ namespace Antlr4.Runtime
|
||||||
{
|
{
|
||||||
ParserRuleContext parent = (ParserRuleContext)((IRuleNode)node.Parent).RuleContext;
|
ParserRuleContext parent = (ParserRuleContext)((IRuleNode)node.Parent).RuleContext;
|
||||||
IToken token = node.Symbol;
|
IToken token = node.Symbol;
|
||||||
System.Console.Out.WriteLine("consume " + token + " rule " + this._enclosing.RuleNames[parent.RuleIndex]);
|
Output.WriteLine("consume " + token + " rule " + this._enclosing.RuleNames[parent.RuleIndex]);
|
||||||
}
|
}
|
||||||
|
|
||||||
internal TraceListener(Parser _enclosing)
|
internal TraceListener(Parser _enclosing)
|
||||||
|
@ -161,9 +168,14 @@ namespace Antlr4.Runtime
|
||||||
/// </remarks>
|
/// </remarks>
|
||||||
private int _syntaxErrors;
|
private int _syntaxErrors;
|
||||||
|
|
||||||
public Parser(ITokenStream input)
|
protected readonly TextWriter Output;
|
||||||
|
|
||||||
|
public Parser(ITokenStream input) : this(input, Console.Out) { }
|
||||||
|
|
||||||
|
public Parser(ITokenStream input, TextWriter output)
|
||||||
{
|
{
|
||||||
TokenStream = input;
|
TokenStream = input;
|
||||||
|
Output = output;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// <summary>reset the parser's state</summary>
|
/// <summary>reset the parser's state</summary>
|
||||||
|
@ -1143,10 +1155,10 @@ namespace Antlr4.Runtime
|
||||||
{
|
{
|
||||||
if (seenOne)
|
if (seenOne)
|
||||||
{
|
{
|
||||||
System.Console.Out.WriteLine();
|
Output.WriteLine();
|
||||||
}
|
}
|
||||||
System.Console.Out.WriteLine("Decision " + dfa.decision + ":");
|
Output.WriteLine("Decision " + dfa.decision + ":");
|
||||||
System.Console.Out.Write(dfa.ToString(Vocabulary));
|
Output.Write(dfa.ToString(Vocabulary));
|
||||||
seenOne = true;
|
seenOne = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,7 +116,18 @@ namespace Antlr4.Runtime
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// <summary>COPY a ctx (I'm deliberately not using copy constructor)</summary>
|
/// <summary>
|
||||||
|
/// COPY a ctx (I'm deliberately not using copy constructor) to avoid
|
||||||
|
/// confusion with creating node with parent. Does not copy children.
|
||||||
|
///
|
||||||
|
/// This is used in the generated parser code to flip a generic XContext
|
||||||
|
/// node for rule X to a YContext for alt label Y. In that sense, it is
|
||||||
|
/// not really a generic copy function.
|
||||||
|
///
|
||||||
|
/// If we do an error sync() at start of a rule, we might add error nodes
|
||||||
|
/// to the generic XContext so this function must copy those nodes to
|
||||||
|
/// the YContext as well else they are lost!
|
||||||
|
/// </summary>
|
||||||
public virtual void CopyFrom(Antlr4.Runtime.ParserRuleContext ctx)
|
public virtual void CopyFrom(Antlr4.Runtime.ParserRuleContext ctx)
|
||||||
{
|
{
|
||||||
// from RuleContext
|
// from RuleContext
|
||||||
|
@ -124,6 +135,22 @@ namespace Antlr4.Runtime
|
||||||
this.invokingState = ctx.invokingState;
|
this.invokingState = ctx.invokingState;
|
||||||
this._start = ctx._start;
|
this._start = ctx._start;
|
||||||
this._stop = ctx._stop;
|
this._stop = ctx._stop;
|
||||||
|
|
||||||
|
// copy any error nodes to alt label node
|
||||||
|
if (ctx.children != null)
|
||||||
|
{
|
||||||
|
children = new List<IParseTree>();
|
||||||
|
// reset parent pointer for any error nodes
|
||||||
|
foreach (var child in ctx.children)
|
||||||
|
{
|
||||||
|
var errorChildNode = child as ErrorNodeImpl;
|
||||||
|
if (errorChildNode != null)
|
||||||
|
{
|
||||||
|
children.Add(errorChildNode);
|
||||||
|
errorChildNode.Parent = this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public ParserRuleContext(Antlr4.Runtime.ParserRuleContext parent, int invokingStateNumber)
|
public ParserRuleContext(Antlr4.Runtime.ParserRuleContext parent, int invokingStateNumber)
|
||||||
|
|
|
@ -1,115 +1,151 @@
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// <auto-generated>
|
// <auto-generated>
|
||||||
// This code was generated by a tool.
|
// This code was generated by a tool.
|
||||||
// ANTLR Version: 4.4.1-dev
|
// ANTLR Version: 4.6.1
|
||||||
//
|
//
|
||||||
// Changes to this file may cause incorrect behavior and will be lost if
|
// Changes to this file may cause incorrect behavior and will be lost if
|
||||||
// the code is regenerated.
|
// the code is regenerated.
|
||||||
// </auto-generated>
|
// </auto-generated>
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
// Generated from /Users/ericvergnaud/Development/antlr4/antlr/antlr4-csharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.g4 by ANTLR 4.4.1-dev
|
|
||||||
|
|
||||||
// Unreachable code detected
|
// Unreachable code detected
|
||||||
#pragma warning disable 0162
|
#pragma warning disable 0162
|
||||||
// The variable '...' is assigned but its value is never used
|
// The variable '...' is assigned but its value is never used
|
||||||
#pragma warning disable 0219
|
#pragma warning disable 0219
|
||||||
// Missing XML comment for publicly visible type or member '...'
|
// Missing XML comment for publicly visible type or member '...'
|
||||||
#pragma warning disable 1591
|
#pragma warning disable 1591
|
||||||
|
// Ambiguous reference in cref attribute
|
||||||
|
#pragma warning disable 419
|
||||||
|
|
||||||
|
|
||||||
using System;
|
using System;
|
||||||
|
|
||||||
|
using System;
|
||||||
|
using System.Text;
|
||||||
using Antlr4.Runtime;
|
using Antlr4.Runtime;
|
||||||
using Antlr4.Runtime.Atn;
|
using Antlr4.Runtime.Atn;
|
||||||
using Antlr4.Runtime.Misc;
|
using Antlr4.Runtime.Misc;
|
||||||
using DFA = Antlr4.Runtime.Dfa.DFA;
|
using DFA = Antlr4.Runtime.Dfa.DFA;
|
||||||
|
|
||||||
|
[System.CodeDom.Compiler.GeneratedCode("ANTLR", "4.6.1")]
|
||||||
[System.CLSCompliant(false)]
|
[System.CLSCompliant(false)]
|
||||||
public partial class XPathLexer : Lexer {
|
public partial class XPathLexer : Lexer
|
||||||
public const int
|
{
|
||||||
TokenRef=1, RuleRef=2, Anywhere=3, Root=4, Wildcard=5, Bang=6, ID=7, String=8;
|
protected static DFA[] decisionToDFA;
|
||||||
public static string[] modeNames = {
|
protected static PredictionContextCache sharedContextCache = new PredictionContextCache();
|
||||||
|
public const int
|
||||||
|
TokenRef = 1, RuleRef = 2, Anywhere = 3, Root = 4, Wildcard = 5, Bang = 6, ID = 7, String = 8;
|
||||||
|
public static string[] channelNames = {
|
||||||
|
"DEFAULT_TOKEN_CHANNEL", "HIDDEN"
|
||||||
|
};
|
||||||
|
|
||||||
|
public static string[] modeNames = {
|
||||||
"DEFAULT_MODE"
|
"DEFAULT_MODE"
|
||||||
};
|
};
|
||||||
|
|
||||||
public static readonly string[] ruleNames = {
|
public static readonly string[] ruleNames = {
|
||||||
"Anywhere", "Root", "Wildcard", "Bang", "ID", "NameChar", "NameStartChar",
|
"Anywhere", "Root", "Wildcard", "Bang", "ID", "NameChar", "NameStartChar",
|
||||||
"String"
|
"String"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
public XPathLexer(ICharStream input)
|
public XPathLexer(ICharStream input)
|
||||||
: base(input)
|
: base(input)
|
||||||
{
|
{
|
||||||
Interpreter = new LexerATNSimulator(this, _ATN, null, null);
|
Interpreter = new LexerATNSimulator(this, _ATN, decisionToDFA, sharedContextCache);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static readonly string[] _LiteralNames = {
|
private static readonly string[] _LiteralNames = {
|
||||||
null, null, null, "'//'", "'/'", "'*'", "'!'"
|
null, null, null, "'//'", "'/'", "'*'", "'!'"
|
||||||
};
|
};
|
||||||
private static readonly string[] _SymbolicNames = {
|
private static readonly string[] _SymbolicNames = {
|
||||||
null, "TokenRef", "RuleRef", "Anywhere", "Root", "Wildcard", "Bang", "ID",
|
null, "TokenRef", "RuleRef", "Anywhere", "Root", "Wildcard", "Bang", "ID",
|
||||||
"String"
|
"String"
|
||||||
};
|
};
|
||||||
public static readonly IVocabulary DefaultVocabulary = new Vocabulary(_LiteralNames, _SymbolicNames);
|
public static readonly IVocabulary DefaultVocabulary = new Vocabulary(_LiteralNames, _SymbolicNames);
|
||||||
|
|
||||||
[NotNull]
|
[NotNull]
|
||||||
public override IVocabulary Vocabulary
|
public override IVocabulary Vocabulary
|
||||||
{
|
{
|
||||||
get
|
get
|
||||||
{
|
{
|
||||||
return DefaultVocabulary;
|
return DefaultVocabulary;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public override string GrammarFileName { get { return "XPathLexer.g4"; } }
|
public override string GrammarFileName { get { return "XPathLexer.g4"; } }
|
||||||
|
|
||||||
public override string[] RuleNames { get { return ruleNames; } }
|
public override string[] RuleNames { get { return ruleNames; } }
|
||||||
|
|
||||||
public override string[] ModeNames { get { return modeNames; } }
|
public override string[] ChannelNames { get { return channelNames; } }
|
||||||
|
|
||||||
public override string SerializedAtn { get { return _serializedATN; } }
|
public override string[] ModeNames { get { return modeNames; } }
|
||||||
|
|
||||||
|
public override string SerializedAtn { get { return _serializedATN; } }
|
||||||
|
|
||||||
|
static XPathLexer()
|
||||||
|
{
|
||||||
|
decisionToDFA = new DFA[_ATN.NumberOfDecisions];
|
||||||
|
for (int i = 0; i < _ATN.NumberOfDecisions; i++)
|
||||||
|
{
|
||||||
|
decisionToDFA[i] = new DFA(_ATN.GetDecisionState(i), i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public override void Action(RuleContext _localctx, int ruleIndex, int actionIndex)
|
||||||
|
{
|
||||||
|
switch (ruleIndex)
|
||||||
|
{
|
||||||
|
case 4: ID_action(_localctx, actionIndex); break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
private void ID_action(RuleContext _localctx, int actionIndex)
|
||||||
|
{
|
||||||
|
switch (actionIndex)
|
||||||
|
{
|
||||||
|
case 0:
|
||||||
|
String text = Text;
|
||||||
|
if (Char.IsUpper(text[0]))
|
||||||
|
Type = TokenRef;
|
||||||
|
else
|
||||||
|
Type = RuleRef;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static string _serializedATN = _serializeATN();
|
||||||
|
private static string _serializeATN()
|
||||||
|
{
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
sb.Append("\x3\x430\xD6D1\x8206\xAD2D\x4417\xAEF1\x8D80\xAADD\x2\n\x34");
|
||||||
|
sb.Append("\b\x1\x4\x2\t\x2\x4\x3\t\x3\x4\x4\t\x4\x4\x5\t\x5\x4\x6\t\x6");
|
||||||
|
sb.Append("\x4\a\t\a\x4\b\t\b\x4\t\t\t\x3\x2\x3\x2\x3\x2\x3\x3\x3\x3\x3");
|
||||||
|
sb.Append("\x4\x3\x4\x3\x5\x3\x5\x3\x6\x3\x6\a\x6\x1F\n\x6\f\x6\xE\x6\"");
|
||||||
|
sb.Append("\v\x6\x3\x6\x3\x6\x3\a\x3\a\x5\a(\n\a\x3\b\x3\b\x3\t\x3\t\a");
|
||||||
|
sb.Append("\t.\n\t\f\t\xE\t\x31\v\t\x3\t\x3\t\x3/\x2\n\x3\x5\x5\x6\a\a");
|
||||||
|
sb.Append("\t\b\v\t\r\x2\xF\x2\x11\n\x3\x2\x4\a\x2\x32;\x61\x61\xB9\xB9");
|
||||||
|
sb.Append("\x302\x371\x2041\x2042\xF\x2\x43\\\x63|\xC2\xD8\xDA\xF8\xFA");
|
||||||
|
sb.Append("\x301\x372\x37F\x381\x2001\x200E\x200F\x2072\x2191\x2C02\x2FF1");
|
||||||
|
sb.Append("\x3003\xD801\xF902\xFDD1\xFDF2\xFFFF\x34\x2\x3\x3\x2\x2\x2\x2");
|
||||||
|
sb.Append("\x5\x3\x2\x2\x2\x2\a\x3\x2\x2\x2\x2\t\x3\x2\x2\x2\x2\v\x3\x2");
|
||||||
|
sb.Append("\x2\x2\x2\x11\x3\x2\x2\x2\x3\x13\x3\x2\x2\x2\x5\x16\x3\x2\x2");
|
||||||
|
sb.Append("\x2\a\x18\x3\x2\x2\x2\t\x1A\x3\x2\x2\x2\v\x1C\x3\x2\x2\x2\r");
|
||||||
|
sb.Append("\'\x3\x2\x2\x2\xF)\x3\x2\x2\x2\x11+\x3\x2\x2\x2\x13\x14\a\x31");
|
||||||
|
sb.Append("\x2\x2\x14\x15\a\x31\x2\x2\x15\x4\x3\x2\x2\x2\x16\x17\a\x31");
|
||||||
|
sb.Append("\x2\x2\x17\x6\x3\x2\x2\x2\x18\x19\a,\x2\x2\x19\b\x3\x2\x2\x2");
|
||||||
|
sb.Append("\x1A\x1B\a#\x2\x2\x1B\n\x3\x2\x2\x2\x1C \x5\xF\b\x2\x1D\x1F");
|
||||||
|
sb.Append("\x5\r\a\x2\x1E\x1D\x3\x2\x2\x2\x1F\"\x3\x2\x2\x2 \x1E\x3\x2");
|
||||||
|
sb.Append("\x2\x2 !\x3\x2\x2\x2!#\x3\x2\x2\x2\" \x3\x2\x2\x2#$\b\x6\x2");
|
||||||
|
sb.Append("\x2$\f\x3\x2\x2\x2%(\x5\xF\b\x2&(\t\x2\x2\x2\'%\x3\x2\x2\x2");
|
||||||
|
sb.Append("\'&\x3\x2\x2\x2(\xE\x3\x2\x2\x2)*\t\x3\x2\x2*\x10\x3\x2\x2\x2");
|
||||||
|
sb.Append("+/\a)\x2\x2,.\v\x2\x2\x2-,\x3\x2\x2\x2.\x31\x3\x2\x2\x2/\x30");
|
||||||
|
sb.Append("\x3\x2\x2\x2/-\x3\x2\x2\x2\x30\x32\x3\x2\x2\x2\x31/\x3\x2\x2");
|
||||||
|
sb.Append("\x2\x32\x33\a)\x2\x2\x33\x12\x3\x2\x2\x2\x6\x2 \'/\x3\x3\x6");
|
||||||
|
sb.Append("\x2");
|
||||||
|
return sb.ToString();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static readonly ATN _ATN =
|
||||||
|
new ATNDeserializer().Deserialize(_serializedATN.ToCharArray());
|
||||||
|
|
||||||
public override void Action(RuleContext _localctx, int ruleIndex, int actionIndex) {
|
|
||||||
switch (ruleIndex) {
|
|
||||||
case 4 : ID_action(_localctx, actionIndex); break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
private void ID_action(RuleContext _localctx, int actionIndex) {
|
|
||||||
switch (actionIndex) {
|
|
||||||
case 0:
|
|
||||||
String text = Text;
|
|
||||||
if ( Char.IsUpper(text[0]) )
|
|
||||||
Type = TokenRef;
|
|
||||||
else
|
|
||||||
Type = RuleRef;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static readonly string _serializedATN =
|
|
||||||
"\x3\x430\xD6D1\x8206\xAD2D\x4417\xAEF1\x8D80\xAADD\x2\n\x34\b\x1\x4\x2"+
|
|
||||||
"\t\x2\x4\x3\t\x3\x4\x4\t\x4\x4\x5\t\x5\x4\x6\t\x6\x4\a\t\a\x4\b\t\b\x4"+
|
|
||||||
"\t\t\t\x3\x2\x3\x2\x3\x2\x3\x3\x3\x3\x3\x4\x3\x4\x3\x5\x3\x5\x3\x6\x3"+
|
|
||||||
"\x6\a\x6\x1F\n\x6\f\x6\xE\x6\"\v\x6\x3\x6\x3\x6\x3\a\x3\a\x5\a(\n\a\x3"+
|
|
||||||
"\b\x3\b\x3\t\x3\t\a\t.\n\t\f\t\xE\t\x31\v\t\x3\t\x3\t\x3/\x2\n\x3\x5\x5"+
|
|
||||||
"\x6\a\a\t\b\v\t\r\x2\xF\x2\x11\n\x3\x2\x4\a\x2\x32;\x61\x61\xB9\xB9\x302"+
|
|
||||||
"\x371\x2041\x2042\xF\x2\x43\\\x63|\xC2\xD8\xDA\xF8\xFA\x301\x372\x37F"+
|
|
||||||
"\x381\x2001\x200E\x200F\x2072\x2191\x2C02\x2FF1\x3003\xD801\xF902\xFDD1"+
|
|
||||||
"\xFDF2\xFFFF\x34\x2\x3\x3\x2\x2\x2\x2\x5\x3\x2\x2\x2\x2\a\x3\x2\x2\x2"+
|
|
||||||
"\x2\t\x3\x2\x2\x2\x2\v\x3\x2\x2\x2\x2\x11\x3\x2\x2\x2\x3\x13\x3\x2\x2"+
|
|
||||||
"\x2\x5\x16\x3\x2\x2\x2\a\x18\x3\x2\x2\x2\t\x1A\x3\x2\x2\x2\v\x1C\x3\x2"+
|
|
||||||
"\x2\x2\r\'\x3\x2\x2\x2\xF)\x3\x2\x2\x2\x11+\x3\x2\x2\x2\x13\x14\a\x31"+
|
|
||||||
"\x2\x2\x14\x15\a\x31\x2\x2\x15\x4\x3\x2\x2\x2\x16\x17\a\x31\x2\x2\x17"+
|
|
||||||
"\x6\x3\x2\x2\x2\x18\x19\a,\x2\x2\x19\b\x3\x2\x2\x2\x1A\x1B\a#\x2\x2\x1B"+
|
|
||||||
"\n\x3\x2\x2\x2\x1C \x5\xF\b\x2\x1D\x1F\x5\r\a\x2\x1E\x1D\x3\x2\x2\x2\x1F"+
|
|
||||||
"\"\x3\x2\x2\x2 \x1E\x3\x2\x2\x2 !\x3\x2\x2\x2!#\x3\x2\x2\x2\" \x3\x2\x2"+
|
|
||||||
"\x2#$\b\x6\x2\x2$\f\x3\x2\x2\x2%(\x5\xF\b\x2&(\t\x2\x2\x2\'%\x3\x2\x2"+
|
|
||||||
"\x2\'&\x3\x2\x2\x2(\xE\x3\x2\x2\x2)*\t\x3\x2\x2*\x10\x3\x2\x2\x2+/\a)"+
|
|
||||||
"\x2\x2,.\v\x2\x2\x2-,\x3\x2\x2\x2.\x31\x3\x2\x2\x2/\x30\x3\x2\x2\x2/-"+
|
|
||||||
"\x3\x2\x2\x2\x30\x32\x3\x2\x2\x2\x31/\x3\x2\x2\x2\x32\x33\a)\x2\x2\x33"+
|
|
||||||
"\x12\x3\x2\x2\x2\x6\x2 \'/\x3\x3\x6\x2";
|
|
||||||
public static readonly ATN _ATN =
|
|
||||||
new ATNDeserializer().Deserialize(_serializedATN.ToCharArray());
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -155,9 +155,9 @@ ExternalProject_ADD(
|
||||||
|
|
||||||
ExternalProject_Get_Property(antlr4cpp INSTALL_DIR)
|
ExternalProject_Get_Property(antlr4cpp INSTALL_DIR)
|
||||||
|
|
||||||
list(APPEND ANTLR4CPP_INCLUDE_DIRS ${INSTALL_DIR}/include)
|
list(APPEND ANTLR4CPP_INCLUDE_DIRS ${INSTALL_DIR}/include/antlr4-runtime)
|
||||||
foreach(src_path misc atn dfa tree support)
|
foreach(src_path misc atn dfa tree support)
|
||||||
list(APPEND ANTLR4CPP_INCLUDE_DIRS ${INSTALL_DIR}/include/${src_path})
|
list(APPEND ANTLR4CPP_INCLUDE_DIRS ${INSTALL_DIR}/include/antlr4-runtime/${src_path})
|
||||||
endforeach(src_path)
|
endforeach(src_path)
|
||||||
|
|
||||||
set(ANTLR4CPP_LIBS "${INSTALL_DIR}/lib")
|
set(ANTLR4CPP_LIBS "${INSTALL_DIR}/lib")
|
||||||
|
|
|
@ -156,6 +156,8 @@ namespace antlr4 {
|
||||||
|
|
||||||
virtual size_t getChannel();
|
virtual size_t getChannel();
|
||||||
|
|
||||||
|
virtual const std::vector<std::string>& getChannelNames() const = 0;
|
||||||
|
|
||||||
virtual const std::vector<std::string>& getModeNames() const = 0;
|
virtual const std::vector<std::string>& getModeNames() const = 0;
|
||||||
|
|
||||||
/// Return a list of all Token objects in input char stream.
|
/// Return a list of all Token objects in input char stream.
|
||||||
|
|
|
@ -15,15 +15,16 @@
|
||||||
using namespace antlr4;
|
using namespace antlr4;
|
||||||
|
|
||||||
LexerInterpreter::LexerInterpreter(const std::string &grammarFileName, const std::vector<std::string> &tokenNames,
|
LexerInterpreter::LexerInterpreter(const std::string &grammarFileName, const std::vector<std::string> &tokenNames,
|
||||||
const std::vector<std::string> &ruleNames, const std::vector<std::string> &modeNames, const atn::ATN &atn,
|
const std::vector<std::string> &ruleNames, const std::vector<std::string> &channelNames, const std::vector<std::string> &modeNames,
|
||||||
CharStream *input)
|
const atn::ATN &atn, CharStream *input)
|
||||||
: LexerInterpreter(grammarFileName, dfa::Vocabulary::fromTokenNames(tokenNames), ruleNames, modeNames, atn, input) {
|
: LexerInterpreter(grammarFileName, dfa::Vocabulary::fromTokenNames(tokenNames), ruleNames, channelNames, modeNames, atn, input) {
|
||||||
}
|
}
|
||||||
|
|
||||||
LexerInterpreter::LexerInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary,
|
LexerInterpreter::LexerInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary,
|
||||||
const std::vector<std::string> &ruleNames, const std::vector<std::string> &modeNames, const atn::ATN &atn,
|
const std::vector<std::string> &ruleNames, const std::vector<std::string> &channelNames, const std::vector<std::string> &modeNames,
|
||||||
CharStream *input)
|
const atn::ATN &atn, CharStream *input)
|
||||||
: Lexer(input), _grammarFileName(grammarFileName), _atn(atn), _ruleNames(ruleNames), _modeNames(modeNames),
|
: Lexer(input), _grammarFileName(grammarFileName), _atn(atn), _ruleNames(ruleNames),
|
||||||
|
_channelNames(channelNames), _modeNames(modeNames),
|
||||||
_vocabulary(vocabulary) {
|
_vocabulary(vocabulary) {
|
||||||
|
|
||||||
if (_atn.grammarType != atn::ATNType::LEXER) {
|
if (_atn.grammarType != atn::ATNType::LEXER) {
|
||||||
|
@ -61,6 +62,10 @@ const std::vector<std::string>& LexerInterpreter::getRuleNames() const {
|
||||||
return _ruleNames;
|
return _ruleNames;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const std::vector<std::string>& LexerInterpreter::getChannelNames() const {
|
||||||
|
return _channelNames;
|
||||||
|
}
|
||||||
|
|
||||||
const std::vector<std::string>& LexerInterpreter::getModeNames() const {
|
const std::vector<std::string>& LexerInterpreter::getModeNames() const {
|
||||||
return _modeNames;
|
return _modeNames;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,11 +15,11 @@ namespace antlr4 {
|
||||||
public:
|
public:
|
||||||
// @deprecated
|
// @deprecated
|
||||||
LexerInterpreter(const std::string &grammarFileName, const std::vector<std::string> &tokenNames,
|
LexerInterpreter(const std::string &grammarFileName, const std::vector<std::string> &tokenNames,
|
||||||
const std::vector<std::string> &ruleNames, const std::vector<std::string> &modeNames,
|
const std::vector<std::string> &ruleNames, const std::vector<std::string> &channelNames,
|
||||||
const atn::ATN &atn, CharStream *input);
|
const std::vector<std::string> &modeNames, const atn::ATN &atn, CharStream *input);
|
||||||
LexerInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary,
|
LexerInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary,
|
||||||
const std::vector<std::string> &ruleNames, const std::vector<std::string> &modeNames,
|
const std::vector<std::string> &ruleNames, const std::vector<std::string> &channelNames,
|
||||||
const atn::ATN &atn, CharStream *input);
|
const std::vector<std::string> &modeNames, const atn::ATN &atn, CharStream *input);
|
||||||
|
|
||||||
~LexerInterpreter();
|
~LexerInterpreter();
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ namespace antlr4 {
|
||||||
virtual std::string getGrammarFileName() const override;
|
virtual std::string getGrammarFileName() const override;
|
||||||
virtual const std::vector<std::string>& getTokenNames() const override;
|
virtual const std::vector<std::string>& getTokenNames() const override;
|
||||||
virtual const std::vector<std::string>& getRuleNames() const override;
|
virtual const std::vector<std::string>& getRuleNames() const override;
|
||||||
|
virtual const std::vector<std::string>& getChannelNames() const override;
|
||||||
virtual const std::vector<std::string>& getModeNames() const override;
|
virtual const std::vector<std::string>& getModeNames() const override;
|
||||||
|
|
||||||
virtual const dfa::Vocabulary& getVocabulary() const override;
|
virtual const dfa::Vocabulary& getVocabulary() const override;
|
||||||
|
@ -38,6 +39,7 @@ namespace antlr4 {
|
||||||
// @deprecated
|
// @deprecated
|
||||||
std::vector<std::string> _tokenNames;
|
std::vector<std::string> _tokenNames;
|
||||||
const std::vector<std::string> &_ruleNames;
|
const std::vector<std::string> &_ruleNames;
|
||||||
|
const std::vector<std::string> &_channelNames;
|
||||||
const std::vector<std::string> &_modeNames;
|
const std::vector<std::string> &_modeNames;
|
||||||
std::vector<dfa::DFA> _decisionToDFA;
|
std::vector<dfa::DFA> _decisionToDFA;
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,9 @@
|
||||||
|
|
||||||
// Generated from XPathLexer.g4 by ANTLR 4.5.3
|
|
||||||
|
|
||||||
|
|
||||||
#include "XPathLexer.h"
|
#include "XPathLexer.h"
|
||||||
|
|
||||||
|
|
||||||
using namespace antlr4;
|
using namespace antlr4;
|
||||||
|
|
||||||
|
|
||||||
XPathLexer::XPathLexer(CharStream *input) : Lexer(input) {
|
XPathLexer::XPathLexer(CharStream *input) : Lexer(input) {
|
||||||
_interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache);
|
_interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache);
|
||||||
}
|
}
|
||||||
|
@ -23,6 +20,10 @@ const std::vector<std::string>& XPathLexer::getRuleNames() const {
|
||||||
return _ruleNames;
|
return _ruleNames;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const std::vector<std::string>& XPathLexer::getChannelNames() const {
|
||||||
|
return _channelNames;
|
||||||
|
}
|
||||||
|
|
||||||
const std::vector<std::string>& XPathLexer::getModeNames() const {
|
const std::vector<std::string>& XPathLexer::getModeNames() const {
|
||||||
return _modeNames;
|
return _modeNames;
|
||||||
}
|
}
|
||||||
|
@ -31,7 +32,7 @@ const std::vector<std::string>& XPathLexer::getTokenNames() const {
|
||||||
return _tokenNames;
|
return _tokenNames;
|
||||||
}
|
}
|
||||||
|
|
||||||
const dfa::Vocabulary& XPathLexer::getVocabulary() const {
|
dfa::Vocabulary& XPathLexer::getVocabulary() const {
|
||||||
return _vocabulary;
|
return _vocabulary;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,14 +47,14 @@ const atn::ATN& XPathLexer::getATN() const {
|
||||||
|
|
||||||
void XPathLexer::action(RuleContext *context, size_t ruleIndex, size_t actionIndex) {
|
void XPathLexer::action(RuleContext *context, size_t ruleIndex, size_t actionIndex) {
|
||||||
switch (ruleIndex) {
|
switch (ruleIndex) {
|
||||||
case 4: IDAction(dynamic_cast<RuleContext *>(context), actionIndex); break;
|
case 4: IDAction(dynamic_cast<antlr4::RuleContext *>(context), actionIndex); break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void XPathLexer::IDAction(RuleContext * /*context*/, size_t actionIndex) {
|
void XPathLexer::IDAction(antlr4::RuleContext *context, size_t actionIndex) {
|
||||||
switch (actionIndex) {
|
switch (actionIndex) {
|
||||||
case 0:
|
case 0:
|
||||||
if (isupper(getText()[0]))
|
if (isupper(getText()[0]))
|
||||||
|
@ -82,6 +83,10 @@ std::vector<std::string> XPathLexer::_ruleNames = {
|
||||||
"STRING"
|
"STRING"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
std::vector<std::string> XPathLexer::_channelNames = {
|
||||||
|
"DEFAULT_TOKEN_CHANNEL", "HIDDEN"
|
||||||
|
};
|
||||||
|
|
||||||
std::vector<std::string> XPathLexer::_modeNames = {
|
std::vector<std::string> XPathLexer::_modeNames = {
|
||||||
"DEFAULT_MODE"
|
"DEFAULT_MODE"
|
||||||
};
|
};
|
||||||
|
@ -158,7 +163,9 @@ XPathLexer::Initializer::Initializer() {
|
||||||
atn::ATNDeserializer deserializer;
|
atn::ATNDeserializer deserializer;
|
||||||
_atn = deserializer.deserialize(_serializedATN);
|
_atn = deserializer.deserialize(_serializedATN);
|
||||||
|
|
||||||
for (size_t i = 0; i < _atn.getNumberOfDecisions(); i++) {
|
size_t count = _atn.getNumberOfDecisions();
|
||||||
|
_decisionToDFA.reserve(count);
|
||||||
|
for (size_t i = 0; i < count; i++) {
|
||||||
_decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
|
_decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,51 +1,50 @@
|
||||||
|
|
||||||
// Generated from XPathLexer.g4 by ANTLR 4.5.3
|
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
|
||||||
#include "antlr4-runtime.h"
|
#include "antlr4-runtime.h"
|
||||||
#include "Vocabulary.h"
|
|
||||||
|
|
||||||
namespace antlr4 {
|
|
||||||
|
|
||||||
class XPathLexer : public Lexer {
|
|
||||||
|
|
||||||
|
class XPathLexer : public antlr4::Lexer {
|
||||||
public:
|
public:
|
||||||
enum {
|
enum {
|
||||||
TOKEN_REF = 1, RULE_REF = 2, ANYWHERE = 3, ROOT = 4, WILDCARD = 5, BANG = 6,
|
TOKEN_REF = 1, RULE_REF = 2, ANYWHERE = 3, ROOT = 4, WILDCARD = 5, BANG = 6,
|
||||||
ID = 7, STRING = 8
|
ID = 7, STRING = 8
|
||||||
};
|
};
|
||||||
|
|
||||||
XPathLexer(CharStream *input);
|
XPathLexer(antlr4::CharStream *input);
|
||||||
~XPathLexer();
|
~XPathLexer();
|
||||||
|
|
||||||
virtual std::string getGrammarFileName() const override;
|
virtual std::string getGrammarFileName() const override;
|
||||||
virtual const std::vector<std::string>& getRuleNames() const override;
|
virtual const std::vector<std::string>& getRuleNames() const override;
|
||||||
|
|
||||||
|
virtual const std::vector<std::string>& getChannelNames() const override;
|
||||||
virtual const std::vector<std::string>& getModeNames() const override;
|
virtual const std::vector<std::string>& getModeNames() const override;
|
||||||
virtual const std::vector<std::string>& getTokenNames() const override; // deprecated, use vocabulary instead
|
virtual const std::vector<std::string>& getTokenNames() const override; // deprecated, use vocabulary instead
|
||||||
virtual const dfa::Vocabulary& getVocabulary() const override;
|
virtual antlr4::dfa::Vocabulary& getVocabulary() const override;
|
||||||
|
|
||||||
virtual const std::vector<uint16_t> getSerializedATN() const override;
|
virtual const std::vector<uint16_t> getSerializedATN() const override;
|
||||||
virtual const atn::ATN& getATN() const override;
|
virtual const antlr4::atn::ATN& getATN() const override;
|
||||||
|
|
||||||
virtual void action(RuleContext *context, size_t ruleIndex, size_t actionIndex) override;
|
|
||||||
|
|
||||||
|
virtual void action(antlr4::RuleContext *context, size_t ruleIndex, size_t actionIndex) override;
|
||||||
private:
|
private:
|
||||||
static std::vector<dfa::DFA> _decisionToDFA;
|
static std::vector<antlr4::dfa::DFA> _decisionToDFA;
|
||||||
static atn::PredictionContextCache _sharedContextCache;
|
static antlr4::atn::PredictionContextCache _sharedContextCache;
|
||||||
static std::vector<std::string> _ruleNames;
|
static std::vector<std::string> _ruleNames;
|
||||||
static std::vector<std::string> _tokenNames;
|
static std::vector<std::string> _tokenNames;
|
||||||
|
static std::vector<std::string> _channelNames;
|
||||||
static std::vector<std::string> _modeNames;
|
static std::vector<std::string> _modeNames;
|
||||||
|
|
||||||
static std::vector<std::string> _literalNames;
|
static std::vector<std::string> _literalNames;
|
||||||
static std::vector<std::string> _symbolicNames;
|
static std::vector<std::string> _symbolicNames;
|
||||||
static dfa::Vocabulary _vocabulary;
|
static antlr4::dfa::Vocabulary _vocabulary;
|
||||||
static atn::ATN _atn;
|
static antlr4::atn::ATN _atn;
|
||||||
static std::vector<uint16_t> _serializedATN;
|
static std::vector<uint16_t> _serializedATN;
|
||||||
|
|
||||||
|
|
||||||
// Individual action functions triggered by action() above.
|
// Individual action functions triggered by action() above.
|
||||||
void IDAction(RuleContext *context, size_t actionIndex);
|
void IDAction(antlr4::RuleContext *context, size_t actionIndex);
|
||||||
|
|
||||||
// Individual semantic predicate functions triggered by sempred() above.
|
// Individual semantic predicate functions triggered by sempred() above.
|
||||||
|
|
||||||
|
@ -55,4 +54,3 @@ private:
|
||||||
static Initializer _init;
|
static Initializer _init;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace antlr4
|
|
||||||
|
|
|
@ -0,0 +1,99 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.IntBuffer;
|
||||||
|
import java.nio.charset.CodingErrorAction;
|
||||||
|
import java.nio.channels.Channels;
|
||||||
|
import java.nio.channels.FileChannel;
|
||||||
|
import java.nio.channels.ReadableByteChannel;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility class to create {@link CodePointCharStream}s from
|
||||||
|
* various sources of Unicode data.
|
||||||
|
*/
|
||||||
|
public final class CharStreams {
|
||||||
|
private static final int DEFAULT_BUFFER_SIZE = 4096;
|
||||||
|
|
||||||
|
// Utility class; do not construct.
|
||||||
|
private CharStreams() { }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convenience method to create a {@link CodePointCharStream}
|
||||||
|
* for the Unicode code points in a Java {@link String}.
|
||||||
|
*/
|
||||||
|
public static CodePointCharStream createWithString(String s) {
|
||||||
|
// Initial guess assumes no code points > U+FFFF: one code
|
||||||
|
// point for each code unit in the string
|
||||||
|
IntBuffer codePointBuffer = IntBuffer.allocate(s.length());
|
||||||
|
int stringIdx = 0;
|
||||||
|
while (stringIdx < s.length()) {
|
||||||
|
if (!codePointBuffer.hasRemaining()) {
|
||||||
|
// Grow the code point buffer size by 2.
|
||||||
|
IntBuffer newBuffer = IntBuffer.allocate(codePointBuffer.capacity() * 2);
|
||||||
|
codePointBuffer.flip();
|
||||||
|
newBuffer.put(codePointBuffer);
|
||||||
|
codePointBuffer = newBuffer;
|
||||||
|
}
|
||||||
|
int codePoint = Character.codePointAt(s, stringIdx);
|
||||||
|
codePointBuffer.put(codePoint);
|
||||||
|
stringIdx += Character.charCount(codePoint);
|
||||||
|
}
|
||||||
|
codePointBuffer.flip();
|
||||||
|
return new CodePointCharStream(codePointBuffer, IntStream.UNKNOWN_SOURCE_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static CodePointCharStream createWithUTF8(Path path) throws IOException {
|
||||||
|
try (ReadableByteChannel channel = Files.newByteChannel(path)) {
|
||||||
|
return createWithUTF8Channel(
|
||||||
|
channel,
|
||||||
|
DEFAULT_BUFFER_SIZE,
|
||||||
|
CodingErrorAction.REPLACE,
|
||||||
|
path.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static CodePointCharStream createWithUTF8Stream(InputStream is) throws IOException {
|
||||||
|
try (ReadableByteChannel channel = Channels.newChannel(is)) {
|
||||||
|
return createWithUTF8Channel(
|
||||||
|
channel,
|
||||||
|
DEFAULT_BUFFER_SIZE,
|
||||||
|
CodingErrorAction.REPLACE,
|
||||||
|
IntStream.UNKNOWN_SOURCE_NAME);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static CodePointCharStream createWithUTF8Channel(
|
||||||
|
ReadableByteChannel channel,
|
||||||
|
int bufferSize,
|
||||||
|
CodingErrorAction decodingErrorAction,
|
||||||
|
String sourceName
|
||||||
|
) throws IOException {
|
||||||
|
ByteBuffer utf8BytesIn = ByteBuffer.allocateDirect(bufferSize);
|
||||||
|
IntBuffer codePointsOut = IntBuffer.allocate(bufferSize);
|
||||||
|
boolean endOfInput = false;
|
||||||
|
UTF8CodePointDecoder decoder = new UTF8CodePointDecoder(decodingErrorAction);
|
||||||
|
while (!endOfInput) {
|
||||||
|
int bytesRead = channel.read(utf8BytesIn);
|
||||||
|
endOfInput = (bytesRead == -1);
|
||||||
|
utf8BytesIn.flip();
|
||||||
|
codePointsOut = decoder.decodeCodePointsFromBuffer(
|
||||||
|
utf8BytesIn,
|
||||||
|
codePointsOut,
|
||||||
|
endOfInput);
|
||||||
|
utf8BytesIn.compact();
|
||||||
|
}
|
||||||
|
codePointsOut.limit(codePointsOut.position());
|
||||||
|
codePointsOut.flip();
|
||||||
|
return new CodePointCharStream(codePointsOut, sourceName);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,137 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
|
import java.nio.IntBuffer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alternative to {@link ANTLRInputStream} which treats the input
|
||||||
|
* as a series of Unicode code points, instead of a series of UTF-16
|
||||||
|
* code units.
|
||||||
|
*
|
||||||
|
* Use this if you need to parse input which potentially contains
|
||||||
|
* Unicode values > U+FFFF.
|
||||||
|
*/
|
||||||
|
public final class CodePointCharStream implements CharStream {
|
||||||
|
private final IntBuffer codePointBuffer;
|
||||||
|
private final int initialPosition;
|
||||||
|
private final int size;
|
||||||
|
private final String name;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a {@link CodePointCharStream} which provides access
|
||||||
|
* to the Unicode code points stored in {@code codePointBuffer}.
|
||||||
|
*
|
||||||
|
* {@code codePointBuffer}'s {@link IntBuffer#position position}
|
||||||
|
* reflects the first code point of the stream, and its
|
||||||
|
* {@link IntBuffer#limit limit} is just after the last code point
|
||||||
|
* of the stream.
|
||||||
|
*/
|
||||||
|
public CodePointCharStream(IntBuffer codePointBuffer) {
|
||||||
|
this(codePointBuffer, UNKNOWN_SOURCE_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a named {@link CodePointCharStream} which provides access
|
||||||
|
* to the Unicode code points stored in {@code codePointBuffer}.
|
||||||
|
*
|
||||||
|
* {@code codePointBuffer}'s {@link IntBuffer#position position}
|
||||||
|
* reflects the first code point of the stream, and its
|
||||||
|
* {@link IntBuffer#limit limit} is just after the last code point
|
||||||
|
* of the stream.
|
||||||
|
*/
|
||||||
|
public CodePointCharStream(IntBuffer codePointBuffer, String name) {
|
||||||
|
this.codePointBuffer = codePointBuffer;
|
||||||
|
this.initialPosition = codePointBuffer.position();
|
||||||
|
this.size = codePointBuffer.remaining();
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
private int relativeBufferPosition(int i) {
|
||||||
|
return initialPosition + codePointBuffer.position() + i;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void consume() {
|
||||||
|
if (!codePointBuffer.hasRemaining()) {
|
||||||
|
assert LA(1) == IntStream.EOF;
|
||||||
|
throw new IllegalStateException("cannot consume EOF");
|
||||||
|
}
|
||||||
|
codePointBuffer.position(codePointBuffer.position() + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int LA(int i) {
|
||||||
|
if (i == 0) {
|
||||||
|
// Undefined
|
||||||
|
return 0;
|
||||||
|
} else if (i < 0) {
|
||||||
|
if (codePointBuffer.position() + i < initialPosition) {
|
||||||
|
return IntStream.EOF;
|
||||||
|
}
|
||||||
|
return codePointBuffer.get(relativeBufferPosition(i));
|
||||||
|
} else if (i > codePointBuffer.remaining()) {
|
||||||
|
return IntStream.EOF;
|
||||||
|
} else {
|
||||||
|
return codePointBuffer.get(relativeBufferPosition(i - 1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int index() {
|
||||||
|
return codePointBuffer.position() - initialPosition;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** mark/release do nothing; we have entire buffer */
|
||||||
|
@Override
|
||||||
|
public int mark() {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void release(int marker) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void seek(int index) {
|
||||||
|
codePointBuffer.position(initialPosition + index);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Return the UTF-16 encoded string for the given interval */
|
||||||
|
@Override
|
||||||
|
public String getText(Interval interval) {
|
||||||
|
final int startIdx = initialPosition + Math.min(interval.a, size - 1);
|
||||||
|
final int stopIdx = initialPosition + Math.min(interval.b, size - 1);
|
||||||
|
// interval.length() will be too small if we contain any code points > U+FFFF,
|
||||||
|
// but it's just a hint for initial capacity; StringBuilder will grow anyway.
|
||||||
|
StringBuilder sb = new StringBuilder(interval.length());
|
||||||
|
for (int codePointIdx = startIdx; codePointIdx <= stopIdx; codePointIdx++) {
|
||||||
|
sb.appendCodePoint(codePointBuffer.get(codePointIdx));
|
||||||
|
}
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSourceName() {
|
||||||
|
if (name == null || name.isEmpty()) {
|
||||||
|
return UNKNOWN_SOURCE_NAME;
|
||||||
|
}
|
||||||
|
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return getText(Interval.of(0, size - 1));
|
||||||
|
}
|
||||||
|
}
|
|
@ -320,6 +320,8 @@ public abstract class Lexer extends Recognizer<Integer, LexerATNSimulator>
|
||||||
return _channel;
|
return _channel;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String[] getChannelNames() { return null; }
|
||||||
|
|
||||||
public String[] getModeNames() {
|
public String[] getModeNames() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||||
import org.antlr.v4.runtime.atn.PredictionContextCache;
|
import org.antlr.v4.runtime.atn.PredictionContextCache;
|
||||||
import org.antlr.v4.runtime.dfa.DFA;
|
import org.antlr.v4.runtime.dfa.DFA;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
public class LexerInterpreter extends Lexer {
|
public class LexerInterpreter extends Lexer {
|
||||||
|
@ -21,6 +22,7 @@ public class LexerInterpreter extends Lexer {
|
||||||
@Deprecated
|
@Deprecated
|
||||||
protected final String[] tokenNames;
|
protected final String[] tokenNames;
|
||||||
protected final String[] ruleNames;
|
protected final String[] ruleNames;
|
||||||
|
protected final String[] channelNames;
|
||||||
protected final String[] modeNames;
|
protected final String[] modeNames;
|
||||||
|
|
||||||
|
|
||||||
|
@ -32,10 +34,15 @@ public class LexerInterpreter extends Lexer {
|
||||||
|
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public LexerInterpreter(String grammarFileName, Collection<String> tokenNames, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
|
public LexerInterpreter(String grammarFileName, Collection<String> tokenNames, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
|
||||||
this(grammarFileName, VocabularyImpl.fromTokenNames(tokenNames.toArray(new String[tokenNames.size()])), ruleNames, modeNames, atn, input);
|
this(grammarFileName, VocabularyImpl.fromTokenNames(tokenNames.toArray(new String[tokenNames.size()])), ruleNames, new ArrayList<String>(), modeNames, atn, input);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Deprecated
|
||||||
public LexerInterpreter(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
|
public LexerInterpreter(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
|
||||||
|
this(grammarFileName, vocabulary, ruleNames, new ArrayList<String>(), modeNames, atn, input);
|
||||||
|
}
|
||||||
|
|
||||||
|
public LexerInterpreter(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, Collection<String> channelNames, Collection<String> modeNames, ATN atn, CharStream input) {
|
||||||
super(input);
|
super(input);
|
||||||
|
|
||||||
if (atn.grammarType != ATNType.LEXER) {
|
if (atn.grammarType != ATNType.LEXER) {
|
||||||
|
@ -50,6 +57,7 @@ public class LexerInterpreter extends Lexer {
|
||||||
}
|
}
|
||||||
|
|
||||||
this.ruleNames = ruleNames.toArray(new String[ruleNames.size()]);
|
this.ruleNames = ruleNames.toArray(new String[ruleNames.size()]);
|
||||||
|
this.channelNames = channelNames.toArray(new String[channelNames.size()]);
|
||||||
this.modeNames = modeNames.toArray(new String[modeNames.size()]);
|
this.modeNames = modeNames.toArray(new String[modeNames.size()]);
|
||||||
this.vocabulary = vocabulary;
|
this.vocabulary = vocabulary;
|
||||||
|
|
||||||
|
@ -81,6 +89,11 @@ public class LexerInterpreter extends Lexer {
|
||||||
return ruleNames;
|
return ruleNames;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String[] getChannelNames() {
|
||||||
|
return channelNames;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String[] getModeNames() {
|
public String[] getModeNames() {
|
||||||
return modeNames;
|
return modeNames;
|
||||||
|
|
|
@ -0,0 +1,275 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||||
|
* Use of this file is governed by the BSD 3-clause license that
|
||||||
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.IntBuffer;
|
||||||
|
import java.nio.charset.CharacterCodingException;
|
||||||
|
import java.nio.charset.CodingErrorAction;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decodes UTF-8 bytes directly to Unicode code points, stored in an
|
||||||
|
* {@link IntBuffer}.
|
||||||
|
*
|
||||||
|
* Unlike {@link CharsetDecoder}, this does not use UTF-16 as an
|
||||||
|
* intermediate representation, so this optimizes the common case of
|
||||||
|
* decoding a UTF-8 file for parsing as Unicode code points.
|
||||||
|
*/
|
||||||
|
public class UTF8CodePointDecoder {
|
||||||
|
private static final int SUBSTITUTION_CHARACTER = 0xFFFD;
|
||||||
|
private static final byte NVAL = (byte) 0xFF;
|
||||||
|
|
||||||
|
// Table mapping UTF-8 leading byte to the length of the trailing
|
||||||
|
// sequence.
|
||||||
|
protected static final byte[] UTF8_LEADING_BYTE_LENGTHS = new byte[] {
|
||||||
|
// [0x00, 0x7F] -> 0 trailing bytes
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
|
||||||
|
// [0x80, 0xBF] -> invalid leading byte
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL,
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL,
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL,
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL,
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL,
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL,
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL,
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL,
|
||||||
|
|
||||||
|
// [0xC0, 0xDF] -> one trailing byte
|
||||||
|
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||||
|
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||||
|
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||||
|
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||||
|
|
||||||
|
// [0xE0, 0xEF] -> two trailing bytes
|
||||||
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
||||||
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
||||||
|
|
||||||
|
// [0xF0, 0xF7] -> three trailing bytes
|
||||||
|
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
|
||||||
|
|
||||||
|
// [0xF8, 0xFF] -> invalid leading sequence
|
||||||
|
NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL, NVAL
|
||||||
|
};
|
||||||
|
|
||||||
|
// Table mapping UTF-8 sequence length to valid Unicode code point
|
||||||
|
// ranges for that sequence length.
|
||||||
|
protected static final Interval[] UTF8_VALID_INTERVALS = new Interval[] {
|
||||||
|
Interval.of(0x00, 0x7F),
|
||||||
|
Interval.of(0x80, 0x7FF),
|
||||||
|
Interval.of(0x800, 0xFFFF),
|
||||||
|
Interval.of(0x10000, 0x10FFFF)
|
||||||
|
};
|
||||||
|
|
||||||
|
protected final CodingErrorAction decodingErrorAction;
|
||||||
|
protected int decodingTrailBytesNeeded;
|
||||||
|
protected int decodingCurrentCodePoint;
|
||||||
|
protected Interval validDecodedCodePointRange;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a new {@link UTF8CodePointDecoder} with a specified
|
||||||
|
* {@link CodingErrorAction} to handle invalid UTF-8 sequences.
|
||||||
|
*/
|
||||||
|
public UTF8CodePointDecoder(CodingErrorAction decodingErrorAction) {
|
||||||
|
this.decodingErrorAction = decodingErrorAction;
|
||||||
|
reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resets the state in this {@link UTF8CodePointDecoder}, preparing it
|
||||||
|
* for use with a new input buffer.
|
||||||
|
*/
|
||||||
|
public void reset() {
|
||||||
|
this.decodingTrailBytesNeeded = -1;
|
||||||
|
this.decodingCurrentCodePoint = -1;
|
||||||
|
this.validDecodedCodePointRange = Interval.INVALID;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decodes as many UTF-8 bytes as possible from {@code utf8BytesIn},
|
||||||
|
* writing the result to {@code codePointsOut}.
|
||||||
|
*
|
||||||
|
* If you have more bytes to decode, set {@code endOfInput} to
|
||||||
|
* {@code false} and call this method again once more bytes
|
||||||
|
* are available.
|
||||||
|
*
|
||||||
|
* If there are no more bytes available, make sure to call this
|
||||||
|
* setting {@code endOfInput} to {@code true} so that any invalid
|
||||||
|
* UTF-8 sequence at the end of the input is handled.
|
||||||
|
*
|
||||||
|
* If {@code codePointsOut} is not large enough to store the result,
|
||||||
|
* a new buffer is allocated and returned. Otherwise, returns
|
||||||
|
* {@code codePointsOut}.
|
||||||
|
*
|
||||||
|
* After returning, the {@link ByteBuffer#position position} of
|
||||||
|
* {@code utf8BytesIn} is moved forward to reflect the bytes consumed,
|
||||||
|
* and the {@link IntBuffer#position position} of the result
|
||||||
|
* is moved forward to reflect the code points written.
|
||||||
|
*
|
||||||
|
* The {@link IntBuffer#limit limit} of the result is not changed,
|
||||||
|
* so if this is the end of the input, you will want to set the
|
||||||
|
* limit to the {@link IntBuffer#position position}, then
|
||||||
|
* {@link IntBuffer#flip flip} the result to prepare for reading.
|
||||||
|
*/
|
||||||
|
public IntBuffer decodeCodePointsFromBuffer(
|
||||||
|
ByteBuffer utf8BytesIn,
|
||||||
|
IntBuffer codePointsOut,
|
||||||
|
boolean endOfInput
|
||||||
|
) throws CharacterCodingException {
|
||||||
|
while (utf8BytesIn.hasRemaining()) {
|
||||||
|
if (decodingTrailBytesNeeded == -1) {
|
||||||
|
// Start a new UTF-8 sequence by checking the leading byte.
|
||||||
|
byte leadingByte = utf8BytesIn.get();
|
||||||
|
if (!decodeLeadingByte(leadingByte)) {
|
||||||
|
codePointsOut = handleDecodeError(
|
||||||
|
String.format("Invalid UTF-8 leading byte 0x%02X", leadingByte),
|
||||||
|
codePointsOut);
|
||||||
|
reset();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert decodingTrailBytesNeeded != -1;
|
||||||
|
if (utf8BytesIn.remaining() < decodingTrailBytesNeeded) {
|
||||||
|
// The caller will have to call us back with more bytes.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Now we know the input buffer has enough bytes to decode
|
||||||
|
// the entire sequence.
|
||||||
|
while (decodingTrailBytesNeeded > 0) {
|
||||||
|
// Continue a multi-byte UTF-8 sequence by checking the next trailing byte.
|
||||||
|
byte trailingByte = utf8BytesIn.get();
|
||||||
|
decodingTrailBytesNeeded--;
|
||||||
|
if (!decodeTrailingByte(trailingByte)) {
|
||||||
|
codePointsOut = handleDecodeError(
|
||||||
|
String.format("Invalid UTF-8 trailing byte 0x%02X", trailingByte),
|
||||||
|
codePointsOut);
|
||||||
|
// Skip past any remaining trailing bytes in the sequence.
|
||||||
|
utf8BytesIn.position(utf8BytesIn.position() + decodingTrailBytesNeeded);
|
||||||
|
reset();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (decodingTrailBytesNeeded == 0) {
|
||||||
|
codePointsOut = appendCodePointFromInterval(
|
||||||
|
decodingCurrentCodePoint,
|
||||||
|
validDecodedCodePointRange,
|
||||||
|
codePointsOut);
|
||||||
|
reset();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (endOfInput) {
|
||||||
|
if (decodingTrailBytesNeeded != -1) {
|
||||||
|
codePointsOut = handleDecodeError(
|
||||||
|
"Unterminated UTF-8 sequence at end of bytes",
|
||||||
|
codePointsOut);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return codePointsOut;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean decodeLeadingByte(byte leadingByte) {
|
||||||
|
// Be careful about Java silently widening (unsigned)
|
||||||
|
// byte to (signed) int and sign-extending here.
|
||||||
|
//
|
||||||
|
// We use binary AND liberally below to prevent widening.
|
||||||
|
int leadingByteIdx = leadingByte & 0xFF;
|
||||||
|
decodingTrailBytesNeeded = UTF8_LEADING_BYTE_LENGTHS[leadingByteIdx];
|
||||||
|
switch (decodingTrailBytesNeeded) {
|
||||||
|
case 0:
|
||||||
|
decodingCurrentCodePoint = leadingByte;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
case 2:
|
||||||
|
case 3:
|
||||||
|
int mask = (0b00111111 >> decodingTrailBytesNeeded);
|
||||||
|
decodingCurrentCodePoint = leadingByte & mask;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
validDecodedCodePointRange = UTF8_VALID_INTERVALS[decodingTrailBytesNeeded];
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean decodeTrailingByte(byte trailingByte) {
|
||||||
|
int trailingValue = (trailingByte & 0xFF) - 0x80;
|
||||||
|
if (trailingValue < 0x00 || trailingValue > 0x3F) {
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
decodingCurrentCodePoint = (decodingCurrentCodePoint << 6) | trailingValue;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private IntBuffer appendCodePointFromInterval(
|
||||||
|
int codePoint,
|
||||||
|
Interval validCodePointRange,
|
||||||
|
IntBuffer codePointsOut
|
||||||
|
) throws CharacterCodingException {
|
||||||
|
assert validCodePointRange != Interval.INVALID;
|
||||||
|
|
||||||
|
// Security check: UTF-8 must represent code points using their
|
||||||
|
// shortest encoded form.
|
||||||
|
if (codePoint < validCodePointRange.a ||
|
||||||
|
codePoint > validCodePointRange.b) {
|
||||||
|
return handleDecodeError(
|
||||||
|
String.format(
|
||||||
|
"Code point %d is out of expected range %s",
|
||||||
|
codePoint,
|
||||||
|
validCodePointRange),
|
||||||
|
codePointsOut);
|
||||||
|
} else {
|
||||||
|
return appendCodePoint(codePoint, codePointsOut);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private IntBuffer appendCodePoint(int codePoint, IntBuffer codePointsOut) {
|
||||||
|
if (!codePointsOut.hasRemaining()) {
|
||||||
|
// Grow the code point buffer size by 2.
|
||||||
|
IntBuffer newBuffer = IntBuffer.allocate(codePointsOut.capacity() * 2);
|
||||||
|
codePointsOut.flip();
|
||||||
|
newBuffer.put(codePointsOut);
|
||||||
|
codePointsOut = newBuffer;
|
||||||
|
}
|
||||||
|
codePointsOut.put(codePoint);
|
||||||
|
return codePointsOut;
|
||||||
|
}
|
||||||
|
|
||||||
|
private IntBuffer handleDecodeError(
|
||||||
|
final String error,
|
||||||
|
IntBuffer codePointsOut
|
||||||
|
) throws CharacterCodingException {
|
||||||
|
if (decodingErrorAction == CodingErrorAction.REPLACE) {
|
||||||
|
codePointsOut = appendCodePoint(SUBSTITUTION_CHARACTER, codePointsOut);
|
||||||
|
} else if (decodingErrorAction == CodingErrorAction.REPORT) {
|
||||||
|
throw new CharacterCodingException() {
|
||||||
|
@Override
|
||||||
|
public String getMessage() {
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return codePointsOut;
|
||||||
|
}
|
||||||
|
}
|
|
@ -272,4 +272,39 @@ public class IntegerList {
|
||||||
_data = Arrays.copyOf(_data, newLength);
|
_data = Arrays.copyOf(_data, newLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Convert the list to a UTF-16 encoded char array. If all values are less
|
||||||
|
* than the 0xFFFF 16-bit code point limit then this is just a char array
|
||||||
|
* of 16-bit char as usual. For values in the supplementary range, encode
|
||||||
|
* them as two UTF-16 code units.
|
||||||
|
*/
|
||||||
|
public final char[] toCharArray() {
|
||||||
|
// Optimize for the common case (all data values are
|
||||||
|
// < 0xFFFF) to avoid an extra scan
|
||||||
|
char[] resultArray = new char[_size];
|
||||||
|
int resultIdx = 0;
|
||||||
|
boolean calculatedPreciseResultSize = false;
|
||||||
|
for (int i = 0; i < _size; i++) {
|
||||||
|
int codePoint = _data[i];
|
||||||
|
// Calculate the precise result size if we encounter
|
||||||
|
// a code point > 0xFFFF
|
||||||
|
if (!calculatedPreciseResultSize &&
|
||||||
|
Character.isSupplementaryCodePoint(codePoint)) {
|
||||||
|
resultArray = Arrays.copyOf(resultArray, charArraySize());
|
||||||
|
calculatedPreciseResultSize = true;
|
||||||
|
}
|
||||||
|
// This will throw IllegalArgumentException if
|
||||||
|
// the code point is not a valid Unicode code point
|
||||||
|
int charsWritten = Character.toChars(codePoint, resultArray, resultIdx);
|
||||||
|
resultIdx += charsWritten;
|
||||||
|
}
|
||||||
|
return resultArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
private int charArraySize() {
|
||||||
|
int result = 0;
|
||||||
|
for (int i = 0; i < _size; i++) {
|
||||||
|
result += Character.charCount(_data[i]);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,11 +137,7 @@ public class Utils {
|
||||||
|
|
||||||
public static char[] toCharArray(IntegerList data) {
|
public static char[] toCharArray(IntegerList data) {
|
||||||
if ( data==null ) return null;
|
if ( data==null ) return null;
|
||||||
char[] cdata = new char[data.size()];
|
return data.toCharArray();
|
||||||
for (int i=0; i<data.size(); i++) {
|
|
||||||
cdata[i] = (char)data.get(i);
|
|
||||||
}
|
|
||||||
return cdata;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static IntervalSet toSet(BitSet bits) {
|
public static IntervalSet toSet(BitSet bits) {
|
||||||
|
|
|
@ -13,9 +13,9 @@ var InputStream = require('./InputStream').InputStream;
|
||||||
var isNodeJs = typeof window === 'undefined' && typeof importScripts === 'undefined';
|
var isNodeJs = typeof window === 'undefined' && typeof importScripts === 'undefined';
|
||||||
var fs = isNodeJs ? require("fs") : null;
|
var fs = isNodeJs ? require("fs") : null;
|
||||||
|
|
||||||
function FileStream(fileName) {
|
function FileStream(fileName, decodeToUnicodeCodePoints) {
|
||||||
var data = fs.readFileSync(fileName, "utf8");
|
var data = fs.readFileSync(fileName, "utf8");
|
||||||
InputStream.call(this, data);
|
InputStream.call(this, data, decodeToUnicodeCodePoints);
|
||||||
this.fileName = fileName;
|
this.fileName = fileName;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,21 +6,38 @@
|
||||||
//
|
//
|
||||||
|
|
||||||
var Token = require('./Token').Token;
|
var Token = require('./Token').Token;
|
||||||
|
require('./polyfills/codepointat');
|
||||||
|
require('./polyfills/fromcodepoint');
|
||||||
|
|
||||||
// Vacuum all input from a string and then treat it like a buffer.
|
// Vacuum all input from a string and then treat it like a buffer.
|
||||||
|
|
||||||
function _loadString(stream) {
|
function _loadString(stream, decodeToUnicodeCodePoints) {
|
||||||
stream._index = 0;
|
stream._index = 0;
|
||||||
stream.data = [];
|
stream.data = [];
|
||||||
for (var i = 0; i < stream.strdata.length; i++) {
|
if (stream.decodeToUnicodeCodePoints) {
|
||||||
stream.data.push(stream.strdata.charCodeAt(i));
|
for (var i = 0; i < stream.strdata.length; ) {
|
||||||
|
var codePoint = stream.strdata.codePointAt(i);
|
||||||
|
stream.data.push(codePoint);
|
||||||
|
i += codePoint <= 0xFFFF ? 1 : 2;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (var i = 0; i < stream.strdata.length; i++) {
|
||||||
|
var codeUnit = stream.strdata.charCodeAt(i);
|
||||||
|
stream.data.push(codeUnit);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
stream._size = stream.data.length;
|
stream._size = stream.data.length;
|
||||||
}
|
}
|
||||||
|
|
||||||
function InputStream(data) {
|
// If decodeToUnicodeCodePoints is true, the input is treated
|
||||||
|
// as a series of Unicode code points.
|
||||||
|
//
|
||||||
|
// Otherwise, the input is treated as a series of 16-bit UTF-16 code
|
||||||
|
// units.
|
||||||
|
function InputStream(data, decodeToUnicodeCodePoints) {
|
||||||
this.name = "<empty>";
|
this.name = "<empty>";
|
||||||
this.strdata = data;
|
this.strdata = data;
|
||||||
|
this.decodeToUnicodeCodePoints = decodeToUnicodeCodePoints || false;
|
||||||
_loadString(this);
|
_loadString(this);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -99,7 +116,15 @@ InputStream.prototype.getText = function(start, stop) {
|
||||||
if (start >= this._size) {
|
if (start >= this._size) {
|
||||||
return "";
|
return "";
|
||||||
} else {
|
} else {
|
||||||
return this.strdata.slice(start, stop + 1);
|
if (this.decodeToUnicodeCodePoints) {
|
||||||
|
var result = "";
|
||||||
|
for (var i = start; i <= stop; i++) {
|
||||||
|
result += String.fromCodePoint(this.data[i]);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
} else {
|
||||||
|
return this.strdata.slice(start, stop + 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -361,7 +361,7 @@ Hash.prototype.update = function () {
|
||||||
k = (k << 15) | (k >>> (32 - 15));
|
k = (k << 15) | (k >>> (32 - 15));
|
||||||
k = k * 0x1B873593;
|
k = k * 0x1B873593;
|
||||||
this.count = this.count + 1;
|
this.count = this.count + 1;
|
||||||
hash = this.hash ^ k;
|
var hash = this.hash ^ k;
|
||||||
hash = (hash << 13) | (hash >>> (32 - 13));
|
hash = (hash << 13) | (hash >>> (32 - 13));
|
||||||
hash = hash * 5 + 0xE6546B64;
|
hash = hash * 5 + 0xE6546B64;
|
||||||
this.hash = hash;
|
this.hash = hash;
|
||||||
|
|
|
@ -1444,7 +1444,7 @@ ParserATNSimulator.prototype.precedenceTransition = function(config, pt, collec
|
||||||
c = new ATNConfig({state:pt.target}, config); // no pred context
|
c = new ATNConfig({state:pt.target}, config); // no pred context
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
newSemCtx = SemanticContext.andContext(config.semanticContext, pt.getPredicate());
|
var newSemCtx = SemanticContext.andContext(config.semanticContext, pt.getPredicate());
|
||||||
c = new ATNConfig({state:pt.target, semanticContext:newSemCtx}, config);
|
c = new ATNConfig({state:pt.target, semanticContext:newSemCtx}, config);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -32,7 +32,7 @@ function DFA(atnStartState, decision) {
|
||||||
{
|
{
|
||||||
if (atnStartState.isPrecedenceDecision) {
|
if (atnStartState.isPrecedenceDecision) {
|
||||||
this.precedenceDfa = true;
|
this.precedenceDfa = true;
|
||||||
precedenceState = new DFAState(null, new ATNConfigSet());
|
var precedenceState = new DFAState(null, new ATNConfigSet());
|
||||||
precedenceState.edges = [];
|
precedenceState.edges = [];
|
||||||
precedenceState.isAcceptState = false;
|
precedenceState.isAcceptState = false;
|
||||||
precedenceState.requiresFullContext = false;
|
precedenceState.requiresFullContext = false;
|
||||||
|
|
|
@ -166,3 +166,4 @@ exports.NoViableAltException = NoViableAltException;
|
||||||
exports.LexerNoViableAltException = LexerNoViableAltException;
|
exports.LexerNoViableAltException = LexerNoViableAltException;
|
||||||
exports.InputMismatchException = InputMismatchException;
|
exports.InputMismatchException = InputMismatchException;
|
||||||
exports.FailedPredicateException = FailedPredicateException;
|
exports.FailedPredicateException = FailedPredicateException;
|
||||||
|
exports.ParseCancellationException = ParseCancellationException;
|
||||||
|
|
|
@ -3,7 +3,9 @@
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
* can be found in the LICENSE.txt file in the project root.
|
||||||
*/
|
*/
|
||||||
exports.atn = require('./atn/index');
|
exports.atn = require('./atn/index');
|
||||||
|
exports.codepointat = require('./polyfills/codepointat');
|
||||||
exports.dfa = require('./dfa/index');
|
exports.dfa = require('./dfa/index');
|
||||||
|
exports.fromcodepoint = require('./polyfills/fromcodepoint');
|
||||||
exports.tree = require('./tree/index');
|
exports.tree = require('./tree/index');
|
||||||
exports.error = require('./error/index');
|
exports.error = require('./error/index');
|
||||||
exports.Token = require('./Token').Token;
|
exports.Token = require('./Token').Token;
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
/*! https://mths.be/codepointat v0.2.0 by @mathias */
|
||||||
|
if (!String.prototype.codePointAt) {
|
||||||
|
(function() {
|
||||||
|
'use strict'; // needed to support `apply`/`call` with `undefined`/`null`
|
||||||
|
var defineProperty = (function() {
|
||||||
|
// IE 8 only supports `Object.defineProperty` on DOM elements
|
||||||
|
try {
|
||||||
|
var object = {};
|
||||||
|
var $defineProperty = Object.defineProperty;
|
||||||
|
var result = $defineProperty(object, object, object) && $defineProperty;
|
||||||
|
} catch(error) {}
|
||||||
|
return result;
|
||||||
|
}());
|
||||||
|
var codePointAt = function(position) {
|
||||||
|
if (this == null) {
|
||||||
|
throw TypeError();
|
||||||
|
}
|
||||||
|
var string = String(this);
|
||||||
|
var size = string.length;
|
||||||
|
// `ToInteger`
|
||||||
|
var index = position ? Number(position) : 0;
|
||||||
|
if (index != index) { // better `isNaN`
|
||||||
|
index = 0;
|
||||||
|
}
|
||||||
|
// Account for out-of-bounds indices:
|
||||||
|
if (index < 0 || index >= size) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
// Get the first code unit
|
||||||
|
var first = string.charCodeAt(index);
|
||||||
|
var second;
|
||||||
|
if ( // check if it’s the start of a surrogate pair
|
||||||
|
first >= 0xD800 && first <= 0xDBFF && // high surrogate
|
||||||
|
size > index + 1 // there is a next code unit
|
||||||
|
) {
|
||||||
|
second = string.charCodeAt(index + 1);
|
||||||
|
if (second >= 0xDC00 && second <= 0xDFFF) { // low surrogate
|
||||||
|
// https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
|
||||||
|
return (first - 0xD800) * 0x400 + second - 0xDC00 + 0x10000;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return first;
|
||||||
|
};
|
||||||
|
if (defineProperty) {
|
||||||
|
defineProperty(String.prototype, 'codePointAt', {
|
||||||
|
'value': codePointAt,
|
||||||
|
'configurable': true,
|
||||||
|
'writable': true
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
String.prototype.codePointAt = codePointAt;
|
||||||
|
}
|
||||||
|
}());
|
||||||
|
}
|
|
@ -0,0 +1,62 @@
|
||||||
|
/*! https://mths.be/fromcodepoint v0.2.1 by @mathias */
|
||||||
|
if (!String.fromCodePoint) {
|
||||||
|
(function() {
|
||||||
|
var defineProperty = (function() {
|
||||||
|
// IE 8 only supports `Object.defineProperty` on DOM elements
|
||||||
|
try {
|
||||||
|
var object = {};
|
||||||
|
var $defineProperty = Object.defineProperty;
|
||||||
|
var result = $defineProperty(object, object, object) && $defineProperty;
|
||||||
|
} catch(error) {}
|
||||||
|
return result;
|
||||||
|
}());
|
||||||
|
var stringFromCharCode = String.fromCharCode;
|
||||||
|
var floor = Math.floor;
|
||||||
|
var fromCodePoint = function(_) {
|
||||||
|
var MAX_SIZE = 0x4000;
|
||||||
|
var codeUnits = [];
|
||||||
|
var highSurrogate;
|
||||||
|
var lowSurrogate;
|
||||||
|
var index = -1;
|
||||||
|
var length = arguments.length;
|
||||||
|
if (!length) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
var result = '';
|
||||||
|
while (++index < length) {
|
||||||
|
var codePoint = Number(arguments[index]);
|
||||||
|
if (
|
||||||
|
!isFinite(codePoint) || // `NaN`, `+Infinity`, or `-Infinity`
|
||||||
|
codePoint < 0 || // not a valid Unicode code point
|
||||||
|
codePoint > 0x10FFFF || // not a valid Unicode code point
|
||||||
|
floor(codePoint) != codePoint // not an integer
|
||||||
|
) {
|
||||||
|
throw RangeError('Invalid code point: ' + codePoint);
|
||||||
|
}
|
||||||
|
if (codePoint <= 0xFFFF) { // BMP code point
|
||||||
|
codeUnits.push(codePoint);
|
||||||
|
} else { // Astral code point; split in surrogate halves
|
||||||
|
// https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
|
||||||
|
codePoint -= 0x10000;
|
||||||
|
highSurrogate = (codePoint >> 10) + 0xD800;
|
||||||
|
lowSurrogate = (codePoint % 0x400) + 0xDC00;
|
||||||
|
codeUnits.push(highSurrogate, lowSurrogate);
|
||||||
|
}
|
||||||
|
if (index + 1 == length || codeUnits.length > MAX_SIZE) {
|
||||||
|
result += stringFromCharCode.apply(null, codeUnits);
|
||||||
|
codeUnits.length = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
};
|
||||||
|
if (defineProperty) {
|
||||||
|
defineProperty(String, 'fromCodePoint', {
|
||||||
|
'value': fromCodePoint,
|
||||||
|
'configurable': true,
|
||||||
|
'writable': true
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
String.fromCodePoint = fromCodePoint;
|
||||||
|
}
|
||||||
|
}());
|
||||||
|
}
|
|
@ -16,12 +16,12 @@ from antlr4.InputStream import InputStream
|
||||||
|
|
||||||
class FileStream(InputStream):
|
class FileStream(InputStream):
|
||||||
|
|
||||||
def __init__(self, fileName, encoding='ascii'):
|
def __init__(self, fileName, encoding='ascii', errors='strict'):
|
||||||
self.fileName = fileName
|
self.fileName = fileName
|
||||||
# read binary to avoid line ending conversion
|
# read binary to avoid line ending conversion
|
||||||
with open(fileName, 'rb') as file:
|
with open(fileName, 'rb') as file:
|
||||||
bytes = file.read()
|
bytes = file.read()
|
||||||
data = codecs.decode(bytes, encoding)
|
data = codecs.decode(bytes, encoding, errors)
|
||||||
super(type(self), self).__init__(data)
|
super(type(self), self).__init__(data)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,9 @@
|
||||||
# uses simplified match() and error recovery mechanisms in the interest
|
# uses simplified match() and error recovery mechanisms in the interest
|
||||||
# of speed.
|
# of speed.
|
||||||
#/
|
#/
|
||||||
|
from __future__ import print_function
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
import sys
|
||||||
from antlr4.CommonTokenFactory import CommonTokenFactory
|
from antlr4.CommonTokenFactory import CommonTokenFactory
|
||||||
from antlr4.Recognizer import Recognizer
|
from antlr4.Recognizer import Recognizer
|
||||||
from antlr4.Token import Token
|
from antlr4.Token import Token
|
||||||
|
@ -30,9 +32,10 @@ class Lexer(Recognizer, TokenSource):
|
||||||
MIN_CHAR_VALUE = '\u0000'
|
MIN_CHAR_VALUE = '\u0000'
|
||||||
MAX_CHAR_VALUE = '\uFFFE'
|
MAX_CHAR_VALUE = '\uFFFE'
|
||||||
|
|
||||||
def __init__(self, input):
|
def __init__(self, input, output=sys.stdout):
|
||||||
super(Lexer, self).__init__()
|
super(Lexer, self).__init__()
|
||||||
self._input = input
|
self._input = input
|
||||||
|
self._output = output
|
||||||
self._factory = CommonTokenFactory.DEFAULT
|
self._factory = CommonTokenFactory.DEFAULT
|
||||||
self._tokenFactorySourcePair = (self, input)
|
self._tokenFactorySourcePair = (self, input)
|
||||||
|
|
||||||
|
@ -160,7 +163,7 @@ class Lexer(Recognizer, TokenSource):
|
||||||
|
|
||||||
def pushMode(self, m):
|
def pushMode(self, m):
|
||||||
if self._interp.debug:
|
if self._interp.debug:
|
||||||
print("pushMode " + str(m))
|
print("pushMode " + str(m), file=self._output)
|
||||||
self._modeStack.append(self._mode)
|
self._modeStack.append(self._mode)
|
||||||
self.mode(m)
|
self.mode(m)
|
||||||
|
|
||||||
|
@ -168,7 +171,7 @@ class Lexer(Recognizer, TokenSource):
|
||||||
if len(self._modeStack)==0:
|
if len(self._modeStack)==0:
|
||||||
raise Exception("Empty Stack")
|
raise Exception("Empty Stack")
|
||||||
if self._interp.debug:
|
if self._interp.debug:
|
||||||
print("popMode back to "+ self._modeStack[:-1])
|
print("popMode back to "+ self._modeStack[:-1], file=self._output)
|
||||||
self.mode( self._modeStack.pop() )
|
self.mode( self._modeStack.pop() )
|
||||||
return self._mode
|
return self._mode
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions
|
||||||
from antlr4.error.Errors import UnsupportedOperationException
|
from antlr4.error.Errors import UnsupportedOperationException
|
||||||
from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher
|
from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher
|
||||||
from antlr4.tree.Tree import ParseTreeListener, ErrorNode, TerminalNode
|
from antlr4.tree.Tree import ParseTreeListener, ErrorNode, TerminalNode
|
||||||
|
import sys
|
||||||
|
|
||||||
class TraceListener(ParseTreeListener):
|
class TraceListener(ParseTreeListener):
|
||||||
|
|
||||||
|
@ -19,16 +19,16 @@ class TraceListener(ParseTreeListener):
|
||||||
self._parser = parser
|
self._parser = parser
|
||||||
|
|
||||||
def enterEveryRule(self, ctx):
|
def enterEveryRule(self, ctx):
|
||||||
print("enter " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text)
|
print("enter " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text, file=self._parser._output)
|
||||||
|
|
||||||
def visitTerminal(self, node):
|
def visitTerminal(self, node):
|
||||||
print("consume " + str(node.symbol) + " rule " + self._parser.ruleNames[self._parser._ctx.getRuleIndex()])
|
print("consume " + str(node.symbol) + " rule " + self._parser.ruleNames[self._parser._ctx.getRuleIndex()], file=self._parser._output)
|
||||||
|
|
||||||
def visitErrorNode(self, node):
|
def visitErrorNode(self, node):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def exitEveryRule(self, ctx):
|
def exitEveryRule(self, ctx):
|
||||||
print("exit " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text)
|
print("exit " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text, file=self._parser._output)
|
||||||
|
|
||||||
|
|
||||||
# self is all the parsing support code essentially; most of it is error recovery stuff.#
|
# self is all the parsing support code essentially; most of it is error recovery stuff.#
|
||||||
|
@ -41,10 +41,11 @@ class Parser (Recognizer):
|
||||||
#
|
#
|
||||||
bypassAltsAtnCache = dict()
|
bypassAltsAtnCache = dict()
|
||||||
|
|
||||||
def __init__(self, input):
|
def __init__(self, input, output=sys.stdout):
|
||||||
super(Parser, self).__init__()
|
super(Parser, self).__init__()
|
||||||
# The input stream.
|
# The input stream.
|
||||||
self._input = None
|
self._input = None
|
||||||
|
self._output = output
|
||||||
# The error handling strategy for the parser. The default value is a new
|
# The error handling strategy for the parser. The default value is a new
|
||||||
# instance of {@link DefaultErrorStrategy}.
|
# instance of {@link DefaultErrorStrategy}.
|
||||||
self._errHandler = DefaultErrorStrategy()
|
self._errHandler = DefaultErrorStrategy()
|
||||||
|
@ -532,9 +533,9 @@ class Parser (Recognizer):
|
||||||
dfa = self._interp.decisionToDFA[i]
|
dfa = self._interp.decisionToDFA[i]
|
||||||
if len(dfa.states)>0:
|
if len(dfa.states)>0:
|
||||||
if seenOne:
|
if seenOne:
|
||||||
print()
|
print(file=self._output)
|
||||||
print("Decision " + str(dfa.decision) + ":")
|
print("Decision " + str(dfa.decision) + ":", file=self._output)
|
||||||
print(dfa.toString(self.literalNames, self.symbolicNames), end='')
|
print(dfa.toString(self.literalNames, self.symbolicNames), end='', file=self._output)
|
||||||
seenOne = True
|
seenOne = True
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ from antlr4.InputStream import InputStream
|
||||||
|
|
||||||
class StdinStream(InputStream):
|
class StdinStream(InputStream):
|
||||||
|
|
||||||
def __init__(self, encoding='ascii'):
|
def __init__(self, encoding='ascii', errors='strict'):
|
||||||
bytes = sys.stdin.read()
|
bytes = sys.stdin.read()
|
||||||
data = codecs.decode(bytes, encoding)
|
data = codecs.decode(bytes, encoding, errors)
|
||||||
super(type(self), self).__init__(data)
|
super(type(self), self).__init__(data)
|
|
@ -16,15 +16,15 @@ from antlr4.InputStream import InputStream
|
||||||
|
|
||||||
class FileStream(InputStream):
|
class FileStream(InputStream):
|
||||||
|
|
||||||
def __init__(self, fileName:str, encoding:str='ascii'):
|
def __init__(self, fileName:str, encoding:str='ascii', errors:str='strict'):
|
||||||
super().__init__(self.readDataFrom(fileName, encoding))
|
super().__init__(self.readDataFrom(fileName, encoding, errors))
|
||||||
self.fileName = fileName
|
self.fileName = fileName
|
||||||
|
|
||||||
def readDataFrom(self, fileName:str, encoding:str):
|
def readDataFrom(self, fileName:str, encoding:str, errors:str='strict'):
|
||||||
# read binary to avoid line ending conversion
|
# read binary to avoid line ending conversion
|
||||||
with open(fileName, 'rb') as file:
|
with open(fileName, 'rb') as file:
|
||||||
bytes = file.read()
|
bytes = file.read()
|
||||||
return codecs.decode(bytes, encoding)
|
return codecs.decode(bytes, encoding, errors)
|
||||||
|
|
||||||
|
|
||||||
class TestFileStream(unittest.TestCase):
|
class TestFileStream(unittest.TestCase):
|
||||||
|
|
|
@ -9,6 +9,8 @@
|
||||||
# of speed.
|
# of speed.
|
||||||
#/
|
#/
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
from typing.io import TextIO
|
||||||
|
import sys
|
||||||
from antlr4.CommonTokenFactory import CommonTokenFactory
|
from antlr4.CommonTokenFactory import CommonTokenFactory
|
||||||
from antlr4.atn.LexerATNSimulator import LexerATNSimulator
|
from antlr4.atn.LexerATNSimulator import LexerATNSimulator
|
||||||
from antlr4.InputStream import InputStream
|
from antlr4.InputStream import InputStream
|
||||||
|
@ -32,9 +34,10 @@ class Lexer(Recognizer, TokenSource):
|
||||||
MIN_CHAR_VALUE = '\u0000'
|
MIN_CHAR_VALUE = '\u0000'
|
||||||
MAX_CHAR_VALUE = '\uFFFE'
|
MAX_CHAR_VALUE = '\uFFFE'
|
||||||
|
|
||||||
def __init__(self, input:InputStream):
|
def __init__(self, input:InputStream, output:TextIO = sys.stdout):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._input = input
|
self._input = input
|
||||||
|
self._output = output
|
||||||
self._factory = CommonTokenFactory.DEFAULT
|
self._factory = CommonTokenFactory.DEFAULT
|
||||||
self._tokenFactorySourcePair = (self, input)
|
self._tokenFactorySourcePair = (self, input)
|
||||||
|
|
||||||
|
@ -162,7 +165,7 @@ class Lexer(Recognizer, TokenSource):
|
||||||
|
|
||||||
def pushMode(self, m:int):
|
def pushMode(self, m:int):
|
||||||
if self._interp.debug:
|
if self._interp.debug:
|
||||||
print("pushMode " + str(m))
|
print("pushMode " + str(m), file=self._output)
|
||||||
self._modeStack.append(self._mode)
|
self._modeStack.append(self._mode)
|
||||||
self.mode(m)
|
self.mode(m)
|
||||||
|
|
||||||
|
@ -170,7 +173,7 @@ class Lexer(Recognizer, TokenSource):
|
||||||
if len(self._modeStack)==0:
|
if len(self._modeStack)==0:
|
||||||
raise Exception("Empty Stack")
|
raise Exception("Empty Stack")
|
||||||
if self._interp.debug:
|
if self._interp.debug:
|
||||||
print("popMode back to "+ self._modeStack[:-1])
|
print("popMode back to "+ self._modeStack[:-1], file=self._output)
|
||||||
self.mode( self._modeStack.pop() )
|
self.mode( self._modeStack.pop() )
|
||||||
return self._mode
|
return self._mode
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
||||||
# Use of this file is governed by the BSD 3-clause license that
|
# Use of this file is governed by the BSD 3-clause license that
|
||||||
# can be found in the LICENSE.txt file in the project root.
|
# can be found in the LICENSE.txt file in the project root.
|
||||||
|
import sys
|
||||||
|
from typing.io import TextIO
|
||||||
from antlr4.BufferedTokenStream import TokenStream
|
from antlr4.BufferedTokenStream import TokenStream
|
||||||
from antlr4.CommonTokenFactory import TokenFactory
|
from antlr4.CommonTokenFactory import TokenFactory
|
||||||
from antlr4.error.ErrorStrategy import DefaultErrorStrategy
|
from antlr4.error.ErrorStrategy import DefaultErrorStrategy
|
||||||
|
@ -23,18 +25,18 @@ class TraceListener(ParseTreeListener):
|
||||||
self._parser = parser
|
self._parser = parser
|
||||||
|
|
||||||
def enterEveryRule(self, ctx):
|
def enterEveryRule(self, ctx):
|
||||||
print("enter " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text)
|
print("enter " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text, file=self._parser._output)
|
||||||
|
|
||||||
def visitTerminal(self, node):
|
def visitTerminal(self, node):
|
||||||
|
|
||||||
print("consume " + str(node.symbol) + " rule " + self._parser.ruleNames[self._parser._ctx.getRuleIndex()])
|
print("consume " + str(node.symbol) + " rule " + self._parser.ruleNames[self._parser._ctx.getRuleIndex()], file=self._parser._output)
|
||||||
|
|
||||||
def visitErrorNode(self, node):
|
def visitErrorNode(self, node):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def exitEveryRule(self, ctx):
|
def exitEveryRule(self, ctx):
|
||||||
print("exit " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text)
|
print("exit " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text, file=self._parser._output)
|
||||||
|
|
||||||
|
|
||||||
# self is all the parsing support code essentially; most of it is error recovery stuff.#
|
# self is all the parsing support code essentially; most of it is error recovery stuff.#
|
||||||
|
@ -47,10 +49,11 @@ class Parser (Recognizer):
|
||||||
#
|
#
|
||||||
bypassAltsAtnCache = dict()
|
bypassAltsAtnCache = dict()
|
||||||
|
|
||||||
def __init__(self, input:TokenStream):
|
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
# The input stream.
|
# The input stream.
|
||||||
self._input = None
|
self._input = None
|
||||||
|
self._output = output
|
||||||
# The error handling strategy for the parser. The default value is a new
|
# The error handling strategy for the parser. The default value is a new
|
||||||
# instance of {@link DefaultErrorStrategy}.
|
# instance of {@link DefaultErrorStrategy}.
|
||||||
self._errHandler = DefaultErrorStrategy()
|
self._errHandler = DefaultErrorStrategy()
|
||||||
|
@ -538,9 +541,9 @@ class Parser (Recognizer):
|
||||||
dfa = self._interp.decisionToDFA[i]
|
dfa = self._interp.decisionToDFA[i]
|
||||||
if len(dfa.states)>0:
|
if len(dfa.states)>0:
|
||||||
if seenOne:
|
if seenOne:
|
||||||
print()
|
print(file=self._output)
|
||||||
print("Decision " + str(dfa.decision) + ":")
|
print("Decision " + str(dfa.decision) + ":", file=self._output)
|
||||||
print(dfa.toString(self.literalNames, self.symbolicNames), end='')
|
print(dfa.toString(self.literalNames, self.symbolicNames), end='', file=self._output)
|
||||||
seenOne = True
|
seenOne = True
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
import Antlr4
|
|
||||||
|
|
||||||
var input = "hello world"
|
|
||||||
|
|
||||||
let lexer = HelloLexer(ANTLRInputStream(input))
|
|
||||||
let tokens = CommonTokenStream(lexer)
|
|
||||||
|
|
||||||
do {
|
|
||||||
let parser = try HelloParser(tokens)
|
|
||||||
|
|
||||||
let tree = try parser.r()
|
|
||||||
let walker = ParseTreeWalker()
|
|
||||||
try walker.walk(HelloWalker(), tree)
|
|
||||||
}
|
|
||||||
catch ANTLRException.cannotInvokeStartRule {
|
|
||||||
print("Error: cannot invoke start rule.")
|
|
||||||
}
|
|
||||||
catch ANTLRException.recognition(let e) {
|
|
||||||
print("Unrecoverable recognition error: \(e)")
|
|
||||||
}
|
|
||||||
catch {
|
|
||||||
print("Unknown error: \(error)")
|
|
||||||
}
|
|
|
@ -1,5 +0,0 @@
|
||||||
// Define a grammar called Hello
|
|
||||||
grammar Hello;
|
|
||||||
r : 'hello' ID ; // match keyword hello followed by an identifier
|
|
||||||
ID : [a-z]+ ; // match lower-case identifiers
|
|
||||||
WS : [ \t\r\n]+ -> skip ; // skip spaces, tabs, newlines
|
|
|
@ -1 +0,0 @@
|
||||||
hello world
|
|
|
@ -1,4 +0,0 @@
|
||||||
T__0=1
|
|
||||||
ID=2
|
|
||||||
WS=3
|
|
||||||
'hello'=1
|
|
|
@ -1,20 +0,0 @@
|
||||||
// Generated from Hello.g4 by ANTLR 4.6
|
|
||||||
import Antlr4
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class provides an empty implementation of {@link HelloVisitor},
|
|
||||||
* which can be extended to create a visitor which only needs to handle a subset
|
|
||||||
* of the available methods.
|
|
||||||
*
|
|
||||||
* @param <T> The return type of the visit operation. Use {@link Void} for
|
|
||||||
* operations with no return type.
|
|
||||||
*/
|
|
||||||
open class HelloBaseVisitor<T>: AbstractParseTreeVisitor<T> {
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* <p>The default implementation returns the result of calling
|
|
||||||
* {@link #visitChildren} on {@code ctx}.</p>
|
|
||||||
*/
|
|
||||||
open func visitR(_ ctx: HelloParser.RContext) -> T? { return visitChildren(ctx) }
|
|
||||||
}
|
|
|
@ -1,4 +0,0 @@
|
||||||
T__0=1
|
|
||||||
ID=2
|
|
||||||
WS=3
|
|
||||||
'hello'=1
|
|
|
@ -1,3 +0,0 @@
|
||||||
class HelloLexerATN {
|
|
||||||
let jsonString: String = "{\"version\":3,\"uuid\":\"aadb8d7e-aeef-4415-ad2b-8204d6cf042e\",\"grammarType\":0,\"maxTokenType\":3,\"states\":[{\"stateType\":6,\"ruleIndex\":-1},{\"stateType\":2,\"ruleIndex\":0},{\"stateType\":7,\"ruleIndex\":0},{\"stateType\":2,\"ruleIndex\":1},{\"stateType\":7,\"ruleIndex\":1},{\"stateType\":2,\"ruleIndex\":2},{\"stateType\":7,\"ruleIndex\":2},{\"stateType\":1,\"ruleIndex\":0},{\"stateType\":1,\"ruleIndex\":0},{\"stateType\":1,\"ruleIndex\":0},{\"stateType\":1,\"ruleIndex\":0},{\"stateType\":1,\"ruleIndex\":0},{\"stateType\":1,\"ruleIndex\":0},{\"stateType\":1,\"ruleIndex\":1},{\"stateType\":4,\"ruleIndex\":1,\"detailStateNumber\":15},{\"stateType\":8,\"ruleIndex\":1},{\"stateType\":11,\"ruleIndex\":1},{\"stateType\":12,\"ruleIndex\":1,\"detailStateNumber\":16},{\"stateType\":1,\"ruleIndex\":2},{\"stateType\":4,\"ruleIndex\":2,\"detailStateNumber\":20},{\"stateType\":8,\"ruleIndex\":2},{\"stateType\":11,\"ruleIndex\":2},{\"stateType\":12,\"ruleIndex\":2,\"detailStateNumber\":21},{\"stateType\":1,\"ruleIndex\":2},{\"stateType\":1,\"ruleIndex\":2}],\"nonGreedyStates\":[],\"precedenceStates\":[],\"ruleToStartState\":[{\"stateNumber\":1,\"ruleToTokenType\":1},{\"stateNumber\":3,\"ruleToTokenType\":2},{\"stateNumber\":5,\"ruleToTokenType\":3}],\"modeToStartState\":[0],\"nsets\":2,\"IntervalSet\":[{\"size\":1,\"containsEof\":0,\"Intervals\":[{\"a\":97,\"b\":122}]},{\"size\":3,\"containsEof\":0,\"Intervals\":[{\"a\":9,\"b\":10},{\"a\":13,\"b\":13},{\"a\":32,\"b\":32}]}],\"allTransitionsBuilder\":[[{\"src\":0,\"trg\":1,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0},{\"src\":0,\"trg\":3,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0},{\"src\":0,\"trg\":5,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":1,\"trg\":7,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":3,\"trg\":14,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":5,\"trg\":19,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":7,\"trg\":8,\"edgeType\":5,\"arg1\":104,\"arg2\":0,\"arg3\":0}],[{\"src\":8,\"trg\":9,\"edgeType\":5,\"arg1\":101,\"arg2\":0,\"arg3\":0}],[{\"src\":9,\"trg\":10,\"edgeType\":5,\"arg1\":108,\"arg2\":0,\"arg3\":0}],[{\"src\":10,\"trg\":11,\"edgeType\":5,\"arg1\":108,\"arg2\":0,\"arg3\":0}],[{\"src\":11,\"trg\":12,\"edgeType\":5,\"arg1\":111,\"arg2\":0,\"arg3\":0}],[{\"src\":12,\"trg\":2,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":13,\"trg\":15,\"edgeType\":7,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":14,\"trg\":13,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":15,\"trg\":16,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":16,\"trg\":14,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0},{\"src\":16,\"trg\":17,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":17,\"trg\":4,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":18,\"trg\":20,\"edgeType\":7,\"arg1\":1,\"arg2\":0,\"arg3\":0}],[{\"src\":19,\"trg\":18,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":20,\"trg\":21,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":21,\"trg\":19,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0},{\"src\":21,\"trg\":22,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":22,\"trg\":23,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}],[{\"src\":23,\"trg\":24,\"edgeType\":6,\"arg1\":2,\"arg2\":0,\"arg3\":0}],[{\"src\":24,\"trg\":6,\"edgeType\":1,\"arg1\":0,\"arg2\":0,\"arg3\":0}]],\"decisionToState\":[0,16,21],\"lexerActions\":[{\"actionType\":6,\"a\":0,\"b\":0}]}"
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
// Generated from Hello.g4 by ANTLR 4.6
|
|
||||||
import Antlr4
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This interface defines a complete listener for a parse tree produced by
|
|
||||||
* {@link HelloParser}.
|
|
||||||
*/
|
|
||||||
public protocol HelloListener: ParseTreeListener {
|
|
||||||
/**
|
|
||||||
* Enter a parse tree produced by {@link HelloParser#r}.
|
|
||||||
- Parameters:
|
|
||||||
- ctx: the parse tree
|
|
||||||
*/
|
|
||||||
func enterR(_ ctx: HelloParser.RContext)
|
|
||||||
/**
|
|
||||||
* Exit a parse tree produced by {@link HelloParser#r}.
|
|
||||||
- Parameters:
|
|
||||||
- ctx: the parse tree
|
|
||||||
*/
|
|
||||||
func exitR(_ ctx: HelloParser.RContext)
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
// Generated from Hello.g4 by ANTLR 4.6
|
|
||||||
import Antlr4
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This interface defines a complete generic visitor for a parse tree produced
|
|
||||||
* by {@link HelloParser}.
|
|
||||||
*
|
|
||||||
* @param <T> The return type of the visit operation. Use {@link Void} for
|
|
||||||
* operations with no return type.
|
|
||||||
*/
|
|
||||||
open class HelloVisitor<T>: ParseTreeVisitor<T> {
|
|
||||||
/**
|
|
||||||
* Visit a parse tree produced by {@link HelloParser#r}.
|
|
||||||
- Parameters:
|
|
||||||
- ctx: the parse tree
|
|
||||||
- returns: the visitor result
|
|
||||||
*/
|
|
||||||
open func visitR(_ ctx: HelloParser.RContext) -> T{
|
|
||||||
fatalError(#function + " must be overridden")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
import Antlr4
|
|
||||||
|
|
||||||
public class HelloWalker: HelloBaseListener {
|
|
||||||
public override func enterR(_ ctx: HelloParser.RContext) {
|
|
||||||
print("enterR: \(ctx.IDText())")
|
|
||||||
}
|
|
||||||
|
|
||||||
public override func exitR(_ ctx: HelloParser.RContext) {
|
|
||||||
print("exitR: \(ctx.IDText())")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fileprivate extension HelloParser.RContext {
|
|
||||||
fileprivate func IDText() -> String {
|
|
||||||
return ID()?.getText() ?? ""
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,4 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
|
||||||
<playground version='5.0' target-platform='macos'>
|
|
||||||
<timeline fileName='timeline.xctimeline'/>
|
|
||||||
</playground>
|
|
|
@ -1,7 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<Workspace
|
|
||||||
version = "1.0">
|
|
||||||
<FileRef
|
|
||||||
location = "self:">
|
|
||||||
</FileRef>
|
|
||||||
</Workspace>
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,7 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Scheme
|
<Scheme
|
||||||
LastUpgradeVersion = "0810"
|
LastUpgradeVersion = "0810"
|
||||||
version = "1.3">
|
version = "1.7">
|
||||||
<BuildAction
|
<BuildAction
|
||||||
parallelizeBuildables = "YES"
|
parallelizeBuildables = "YES"
|
||||||
buildImplicitDependencies = "YES">
|
buildImplicitDependencies = "YES">
|
||||||
|
@ -37,6 +37,10 @@
|
||||||
BlueprintName = "Antlr4Tests"
|
BlueprintName = "Antlr4Tests"
|
||||||
ReferencedContainer = "container:Antlr4.xcodeproj">
|
ReferencedContainer = "container:Antlr4.xcodeproj">
|
||||||
</BuildableReference>
|
</BuildableReference>
|
||||||
|
<LocationScenarioReference
|
||||||
|
identifier = "com.apple.dt.IDEFoundation.CurrentLocationScenarioIdentifier"
|
||||||
|
referenceType = "1">
|
||||||
|
</LocationScenarioReference>
|
||||||
</TestableReference>
|
</TestableReference>
|
||||||
</Testables>
|
</Testables>
|
||||||
<MacroExpansion>
|
<MacroExpansion>
|
||||||
|
|
|
@ -4,7 +4,4 @@
|
||||||
<FileRef
|
<FileRef
|
||||||
location = "container:Antlr4.xcodeproj">
|
location = "container:Antlr4.xcodeproj">
|
||||||
</FileRef>
|
</FileRef>
|
||||||
<FileRef
|
|
||||||
location = "container:Antlr4 playground.playground">
|
|
||||||
</FileRef>
|
|
||||||
</Workspace>
|
</Workspace>
|
||||||
|
|
|
@ -1,171 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
/** How to emit recognition errors. */
|
|
||||||
|
|
||||||
public protocol ANTLRErrorListener: class {
|
|
||||||
/**
|
|
||||||
* Upon syntax error, notify any interested parties. This is not how to
|
|
||||||
* recover from errors or compute error messages. {@link org.antlr.v4.runtime.ANTLRErrorStrategy}
|
|
||||||
* specifies how to recover from syntax errors and how to compute error
|
|
||||||
* messages. This listener's job is simply to emit a computed message,
|
|
||||||
* though it has enough information to create its own message in many cases.
|
|
||||||
*
|
|
||||||
* <p>The {@link org.antlr.v4.runtime.RecognitionException} is non-null for all syntax errors except
|
|
||||||
* when we discover mismatched token errors that we can recover from
|
|
||||||
* in-line, without returning from the surrounding rule (via the single
|
|
||||||
* token insertion and deletion mechanism).</p>
|
|
||||||
*
|
|
||||||
* @param recognizer
|
|
||||||
* What parser got the error. From this
|
|
||||||
* object, you can access the context as well
|
|
||||||
* as the input stream.
|
|
||||||
* @param offendingSymbol
|
|
||||||
* The offending token in the input token
|
|
||||||
* stream, unless recognizer is a lexer (then it's null). If
|
|
||||||
* no viable alternative error, {@code e} has token at which we
|
|
||||||
* started production for the decision.
|
|
||||||
* @param line
|
|
||||||
* The line number in the input where the error occurred.
|
|
||||||
* @param charPositionInLine
|
|
||||||
* The character position within that line where the error occurred.
|
|
||||||
* @param msg
|
|
||||||
* The message to emit.
|
|
||||||
* @param e
|
|
||||||
* The exception generated by the parser that led to
|
|
||||||
* the reporting of an error. It is null in the case where
|
|
||||||
* the parser was able to recover in line without exiting the
|
|
||||||
* surrounding rule.
|
|
||||||
*/
|
|
||||||
func syntaxError<T:ATNSimulator>(_ recognizer: Recognizer<T>,
|
|
||||||
_ offendingSymbol: AnyObject?,
|
|
||||||
_ line: Int,
|
|
||||||
_ charPositionInLine: Int,
|
|
||||||
_ msg: String,
|
|
||||||
_ e: AnyObject?// RecognitionException?
|
|
||||||
)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called by the parser when a full-context prediction
|
|
||||||
* results in an ambiguity.
|
|
||||||
*
|
|
||||||
* <p>Each full-context prediction which does not result in a syntax error
|
|
||||||
* will call either {@link #reportContextSensitivity} or
|
|
||||||
* {@link #reportAmbiguity}.</p>
|
|
||||||
*
|
|
||||||
* <p>When {@code ambigAlts} is not null, it contains the set of potentially
|
|
||||||
* viable alternatives identified by the prediction algorithm. When
|
|
||||||
* {@code ambigAlts} is null, use {@link org.antlr.v4.runtime.atn.ATNConfigSet#getAlts} to obtain the
|
|
||||||
* represented alternatives from the {@code configs} argument.</p>
|
|
||||||
*
|
|
||||||
* <p>When {@code exact} is {@code true}, <em>all</em> of the potentially
|
|
||||||
* viable alternatives are truly viable, i.e. this is reporting an exact
|
|
||||||
* ambiguity. When {@code exact} is {@code false}, <em>at least two</em> of
|
|
||||||
* the potentially viable alternatives are viable for the current input, but
|
|
||||||
* the prediction algorithm terminated as soon as it determined that at
|
|
||||||
* least the <em>minimum</em> potentially viable alternative is truly
|
|
||||||
* viable.</p>
|
|
||||||
*
|
|
||||||
* <p>When the {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction
|
|
||||||
* mode is used, the parser is required to identify exact ambiguities so
|
|
||||||
* {@code exact} will always be {@code true}.</p>
|
|
||||||
*
|
|
||||||
* <p>This method is not used by lexers.</p>
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @param dfa the DFA for the current decision
|
|
||||||
* @param startIndex the input index where the decision started
|
|
||||||
* @param stopIndex the input input where the ambiguity was identified
|
|
||||||
* @param exact {@code true} if the ambiguity is exactly known, otherwise
|
|
||||||
* {@code false}. This is always {@code true} when
|
|
||||||
* {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
|
|
||||||
* @param ambigAlts the potentially ambiguous alternatives, or {@code null}
|
|
||||||
* to indicate that the potentially ambiguous alternatives are the complete
|
|
||||||
* set of represented alternatives in {@code configs}
|
|
||||||
* @param configs the ATN configuration set where the ambiguity was
|
|
||||||
* identified
|
|
||||||
*/
|
|
||||||
func reportAmbiguity(_ recognizer: Parser,
|
|
||||||
_ dfa: DFA,
|
|
||||||
_ startIndex: Int,
|
|
||||||
_ stopIndex: Int,
|
|
||||||
_ exact: Bool,
|
|
||||||
_ ambigAlts: BitSet,
|
|
||||||
_ configs: ATNConfigSet) throws
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called when an SLL conflict occurs and the parser is about
|
|
||||||
* to use the full context information to make an LL decision.
|
|
||||||
*
|
|
||||||
* <p>If one or more configurations in {@code configs} contains a semantic
|
|
||||||
* predicate, the predicates are evaluated before this method is called. The
|
|
||||||
* subset of alternatives which are still viable after predicates are
|
|
||||||
* evaluated is reported in {@code conflictingAlts}.</p>
|
|
||||||
*
|
|
||||||
* <p>This method is not used by lexers.</p>
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @param dfa the DFA for the current decision
|
|
||||||
* @param startIndex the input index where the decision started
|
|
||||||
* @param stopIndex the input index where the SLL conflict occurred
|
|
||||||
* @param conflictingAlts The specific conflicting alternatives. If this is
|
|
||||||
* {@code null}, the conflicting alternatives are all alternatives
|
|
||||||
* represented in {@code configs}. At the moment, conflictingAlts is non-null
|
|
||||||
* (for the reference implementation, but Sam's optimized version can see this
|
|
||||||
* as null).
|
|
||||||
* @param configs the ATN configuration set where the SLL conflict was
|
|
||||||
* detected
|
|
||||||
*/
|
|
||||||
func reportAttemptingFullContext(_ recognizer: Parser,
|
|
||||||
_ dfa: DFA,
|
|
||||||
_ startIndex: Int,
|
|
||||||
_ stopIndex: Int,
|
|
||||||
_ conflictingAlts: BitSet?,
|
|
||||||
_ configs: ATNConfigSet) throws
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called by the parser when a full-context prediction has a
|
|
||||||
* unique result.
|
|
||||||
*
|
|
||||||
* <p>Each full-context prediction which does not result in a syntax error
|
|
||||||
* will call either {@link #reportContextSensitivity} or
|
|
||||||
* {@link #reportAmbiguity}.</p>
|
|
||||||
*
|
|
||||||
* <p>For prediction implementations that only evaluate full-context
|
|
||||||
* predictions when an SLL conflict is found (including the default
|
|
||||||
* {@link org.antlr.v4.runtime.atn.ParserATNSimulator} implementation), this method reports cases
|
|
||||||
* where SLL conflicts were resolved to unique full-context predictions,
|
|
||||||
* i.e. the decision was context-sensitive. This report does not necessarily
|
|
||||||
* indicate a problem, and it may appear even in completely unambiguous
|
|
||||||
* grammars.</p>
|
|
||||||
*
|
|
||||||
* <p>{@code configs} may have more than one represented alternative if the
|
|
||||||
* full-context prediction algorithm does not evaluate predicates before
|
|
||||||
* beginning the full-context prediction. In all cases, the final prediction
|
|
||||||
* is passed as the {@code prediction} argument.</p>
|
|
||||||
*
|
|
||||||
* <p>Note that the definition of "context sensitivity" in this method
|
|
||||||
* differs from the concept in {@link org.antlr.v4.runtime.atn.DecisionInfo#contextSensitivities}.
|
|
||||||
* This method reports all instances where an SLL conflict occurred but LL
|
|
||||||
* parsing produced a unique result, whether or not that unique result
|
|
||||||
* matches the minimum alternative in the SLL conflicting set.</p>
|
|
||||||
*
|
|
||||||
* <p>This method is not used by lexers.</p>
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @param dfa the DFA for the current decision
|
|
||||||
* @param startIndex the input index where the decision started
|
|
||||||
* @param stopIndex the input index where the context sensitivity was
|
|
||||||
* finally determined
|
|
||||||
* @param prediction the unambiguous result of the full-context prediction
|
|
||||||
* @param configs the ATN configuration set where the unambiguous prediction
|
|
||||||
* was determined
|
|
||||||
*/
|
|
||||||
func reportContextSensitivity(_ recognizer: Parser,
|
|
||||||
_ dfa: DFA,
|
|
||||||
_ startIndex: Int,
|
|
||||||
_ stopIndex: Int,
|
|
||||||
_ prediction: Int,
|
|
||||||
_ configs: ATNConfigSet) throws
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
/**
|
|
||||||
* The interface for defining strategies to deal with syntax errors encountered
|
|
||||||
* during a parse by ANTLR-generated parsers. We distinguish between three
|
|
||||||
* different kinds of errors:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>The parser could not figure out which path to take in the ATN (none of
|
|
||||||
* the available alternatives could possibly match)</li>
|
|
||||||
* <li>The current input does not match what we were looking for</li>
|
|
||||||
* <li>A predicate evaluated to false</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* Implementations of this interface report syntax errors by calling
|
|
||||||
* {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.
|
|
||||||
*
|
|
||||||
* <p>TODO: what to do about lexers</p>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public protocol ANTLRErrorStrategy {
|
|
||||||
/**
|
|
||||||
* Reset the error handler state for the specified {@code recognizer}.
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
*/
|
|
||||||
func reset(_ recognizer: Parser)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called when an unexpected symbol is encountered during an
|
|
||||||
* inline match operation, such as {@link org.antlr.v4.runtime.Parser#match}. If the error
|
|
||||||
* strategy successfully recovers from the match failure, this method
|
|
||||||
* returns the {@link org.antlr.v4.runtime.Token} instance which should be treated as the
|
|
||||||
* successful result of the match.
|
|
||||||
*
|
|
||||||
* <p>This method handles the consumption of any tokens - the caller should
|
|
||||||
* <b>not</b> call {@link org.antlr.v4.runtime.Parser#consume} after a successful recovery.</p>
|
|
||||||
*
|
|
||||||
* <p>Note that the calling code will not report an error if this method
|
|
||||||
* returns successfully. The error strategy implementation is responsible
|
|
||||||
* for calling {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} as appropriate.</p>
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @throws org.antlr.v4.runtime.RecognitionException if the error strategy was not able to
|
|
||||||
* recover from the unexpected input symbol
|
|
||||||
*/
|
|
||||||
@discardableResult
|
|
||||||
func recoverInline(_ recognizer: Parser) throws -> Token // RecognitionException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called to recover from exception {@code e}. This method is
|
|
||||||
* called after {@link #reportError} by the default exception handler
|
|
||||||
* generated for a rule method.
|
|
||||||
*
|
|
||||||
* @see #reportError
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @param e the recognition exception to recover from
|
|
||||||
* @throws org.antlr.v4.runtime.RecognitionException if the error strategy could not recover from
|
|
||||||
* the recognition exception
|
|
||||||
*/
|
|
||||||
func recover(_ recognizer: Parser, _ e: AnyObject) throws // RecognitionException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method provides the error handler with an opportunity to handle
|
|
||||||
* syntactic or semantic errors in the input stream before they result in a
|
|
||||||
* {@link org.antlr.v4.runtime.RecognitionException}.
|
|
||||||
*
|
|
||||||
* <p>The generated code currently contains calls to {@link #sync} after
|
|
||||||
* entering the decision state of a closure block ({@code (...)*} or
|
|
||||||
* {@code (...)+}).</p>
|
|
||||||
*
|
|
||||||
* <p>For an implementation based on Jim Idle's "magic sync" mechanism, see
|
|
||||||
* {@link org.antlr.v4.runtime.DefaultErrorStrategy#sync}.</p>
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.DefaultErrorStrategy#sync
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @throws org.antlr.v4.runtime.RecognitionException if an error is detected by the error
|
|
||||||
* strategy but cannot be automatically recovered at the current state in
|
|
||||||
* the parsing process
|
|
||||||
*/
|
|
||||||
func sync(_ recognizer: Parser) throws // RecognitionException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Tests whether or not {@code recognizer} is in the process of recovering
|
|
||||||
* from an error. In error recovery mode, {@link org.antlr.v4.runtime.Parser#consume} adds
|
|
||||||
* symbols to the parse tree by calling
|
|
||||||
* {@link org.antlr.v4.runtime.ParserRuleContext#addErrorNode(org.antlr.v4.runtime.Token)} instead of
|
|
||||||
* {@link org.antlr.v4.runtime.ParserRuleContext#addChild(org.antlr.v4.runtime.Token)}.
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @return {@code true} if the parser is currently recovering from a parse
|
|
||||||
* error, otherwise {@code false}
|
|
||||||
*/
|
|
||||||
func inErrorRecoveryMode(_ recognizer: Parser) -> Bool
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called by when the parser successfully matches an input
|
|
||||||
* symbol.
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
*/
|
|
||||||
func reportMatch(_ recognizer: Parser)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Report any kind of {@link org.antlr.v4.runtime.RecognitionException}. This method is called by
|
|
||||||
* the default exception handler generated for a rule method.
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @param e the recognition exception to report
|
|
||||||
*/
|
|
||||||
func reportError(_ recognizer: Parser, _ e: AnyObject)
|
|
||||||
}
|
|
|
@ -1,224 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
/**
|
|
||||||
* Vacuum all input from a {@link java.io.Reader}/{@link java.io.InputStream} and then treat it
|
|
||||||
* like a {@code char[]} buffer. Can also pass in a {@link String} or
|
|
||||||
* {@code char[]} to use.
|
|
||||||
*
|
|
||||||
* <p>If you need encoding, pass in stream/reader with correct encoding.</p>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class ANTLRInputStream: CharStream {
|
|
||||||
public static let READ_BUFFER_SIZE: Int = 1024
|
|
||||||
public static let INITIAL_BUFFER_SIZE: Int = 1024
|
|
||||||
|
|
||||||
/** The data being scanned */
|
|
||||||
internal var data: [Character]
|
|
||||||
|
|
||||||
/** How many characters are actually in the buffer */
|
|
||||||
internal var n: Int
|
|
||||||
|
|
||||||
/** 0..n-1 index into string of next char */
|
|
||||||
internal var p: Int = 0
|
|
||||||
|
|
||||||
/** What is name or source of this char stream? */
|
|
||||||
public var name: String?
|
|
||||||
|
|
||||||
public init() {
|
|
||||||
n = 0
|
|
||||||
data = [Character]()
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Copy data in string to a local char array */
|
|
||||||
public init(_ input: String) {
|
|
||||||
self.data = Array(input.characters) // input.toCharArray();
|
|
||||||
self.n = input.length
|
|
||||||
}
|
|
||||||
|
|
||||||
/** This is the preferred constructor for strings as no data is copied */
|
|
||||||
public init(_ data: [Character], _ numberOfActualCharsInArray: Int) {
|
|
||||||
self.data = data
|
|
||||||
self.n = numberOfActualCharsInArray
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
public convenience init(_ r : Reader) throws; IOException {
|
|
||||||
self.init(r, INITIAL_BUFFER_SIZE, READ_BUFFER_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
public convenience init(_ r : Reader, _ initialSize : Int) throws; IOException {
|
|
||||||
self.init(r, initialSize, READ_BUFFER_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
public init(_ r : Reader, _ initialSize : Int, _ readChunkSize : Int) throws; IOException {
|
|
||||||
load(r, initialSize, readChunkSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
public convenience init(_ input : InputStream) throws; IOException {
|
|
||||||
self.init(InputStreamReader(input), INITIAL_BUFFER_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
public convenience init(_ input : InputStream, _ initialSize : Int) throws; IOException {
|
|
||||||
self.init(InputStreamReader(input), initialSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
public convenience init(_ input : InputStream, _ initialSize : Int, _ readChunkSize : Int) throws; IOException {
|
|
||||||
self.init(InputStreamReader(input), initialSize, readChunkSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
public func load(r : Reader, _ size : Int, _ readChunkSize : Int)
|
|
||||||
throws; IOException
|
|
||||||
{
|
|
||||||
if ( r==nil ) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if ( size<=0 ) {
|
|
||||||
size = INITIAL_BUFFER_SIZE;
|
|
||||||
}
|
|
||||||
if ( readChunkSize<=0 ) {
|
|
||||||
readChunkSize = READ_BUFFER_SIZE;
|
|
||||||
}
|
|
||||||
// print("load "+size+" in chunks of "+readChunkSize);
|
|
||||||
try {
|
|
||||||
// alloc initial buffer size.
|
|
||||||
data = new char[size];
|
|
||||||
// read all the data in chunks of readChunkSize
|
|
||||||
var numRead : Int=0;
|
|
||||||
var p : Int = 0;
|
|
||||||
do {
|
|
||||||
if ( p+readChunkSize > data.length ) { // overflow?
|
|
||||||
// print("### overflow p="+p+", data.length="+data.length);
|
|
||||||
data = Arrays.copyOf(data, data.length * 2);
|
|
||||||
}
|
|
||||||
numRead = r.read(data, p, readChunkSize);
|
|
||||||
// print("read "+numRead+" chars; p was "+p+" is now "+(p+numRead));
|
|
||||||
p += numRead;
|
|
||||||
} while (numRead!=-1); // while not EOF
|
|
||||||
// set the actual size of the data available;
|
|
||||||
// EOF subtracted one above in p+=numRead; add one back
|
|
||||||
n = p+1;
|
|
||||||
//print("n="+n);
|
|
||||||
}
|
|
||||||
finally {
|
|
||||||
r.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
/** Reset the stream so that it's in the same state it was
|
|
||||||
* when the object was created *except* the data array is not
|
|
||||||
* touched.
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func reset() {
|
|
||||||
p = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public func consume() throws {
|
|
||||||
if p >= n {
|
|
||||||
assert(LA(1) == ANTLRInputStream.EOF, "Expected: LA(1)==IntStream.EOF")
|
|
||||||
|
|
||||||
throw ANTLRError.illegalState(msg: "annot consume EOF")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// print("prev p="+p+", c="+(char)data[p]);
|
|
||||||
if p < n {
|
|
||||||
p += 1
|
|
||||||
//print("p moves to "+p+" (c='"+(char)data[p]+"')");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public func LA(_ i: Int) -> Int {
|
|
||||||
var i = i
|
|
||||||
if i == 0 {
|
|
||||||
return 0 // undefined
|
|
||||||
}
|
|
||||||
if i < 0 {
|
|
||||||
i += 1 // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
|
|
||||||
if (p + i - 1) < 0 {
|
|
||||||
return ANTLRInputStream.EOF// invalid; no char before first char
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p + i - 1) >= n {
|
|
||||||
//print("char LA("+i+")=EOF; p="+p);
|
|
||||||
return ANTLRInputStream.EOF
|
|
||||||
}
|
|
||||||
//print("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
|
|
||||||
//print("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
|
|
||||||
return data[p + i - 1].unicodeValue
|
|
||||||
}
|
|
||||||
|
|
||||||
public func LT(_ i: Int) -> Int {
|
|
||||||
return LA(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Return the current input symbol index 0..n where n indicates the
|
|
||||||
* last symbol has been read. The index is the index of char to
|
|
||||||
* be returned from LA(1).
|
|
||||||
*/
|
|
||||||
public func index() -> Int {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
public func size() -> Int {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
/** mark/release do nothing; we have entire buffer */
|
|
||||||
|
|
||||||
public func mark() -> Int {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
public func release(_ marker: Int) {
|
|
||||||
}
|
|
||||||
|
|
||||||
/** consume() ahead until p==index; can't just set p=index as we must
|
|
||||||
* update line and charPositionInLine. If we seek backwards, just set p
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func seek(_ index: Int) throws {
|
|
||||||
var index = index
|
|
||||||
if index <= p {
|
|
||||||
p = index // just jump; don't update stream state (line, ...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// seek forward, consume until p hits index or n (whichever comes first)
|
|
||||||
index = min(index, n)
|
|
||||||
while p < index {
|
|
||||||
try consume()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public func getText(_ interval: Interval) -> String {
|
|
||||||
let start: Int = interval.a
|
|
||||||
var stop: Int = interval.b
|
|
||||||
if stop >= n {
|
|
||||||
stop = n - 1
|
|
||||||
}
|
|
||||||
let count = stop - start + 1;
|
|
||||||
if start >= n {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return String(data[start ..< (start + count)])
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public func getSourceName() -> String {
|
|
||||||
guard let name = name , !name.isEmpty else {
|
|
||||||
return ANTLRInputStream.UNKNOWN_SOURCE_NAME
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public func toString() -> String {
|
|
||||||
return String(data)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,76 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} responds to syntax errors
|
|
||||||
* by immediately canceling the parse operation with a
|
|
||||||
* {@link org.antlr.v4.runtime.misc.ParseCancellationException}. The implementation ensures that the
|
|
||||||
* {@link org.antlr.v4.runtime.ParserRuleContext#exception} field is set for all parse tree nodes
|
|
||||||
* that were not completed prior to encountering the error.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* This error strategy is useful in the following scenarios.</p>
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li><strong>Two-stage parsing:</strong> This error strategy allows the first
|
|
||||||
* stage of two-stage parsing to immediately terminate if an error is
|
|
||||||
* encountered, and immediately fall back to the second stage. In addition to
|
|
||||||
* avoiding wasted work by attempting to recover from errors here, the empty
|
|
||||||
* implementation of {@link org.antlr.v4.runtime.BailErrorStrategy#sync} improves the performance of
|
|
||||||
* the first stage.</li>
|
|
||||||
* <li><strong>Silent validation:</strong> When syntax errors are not being
|
|
||||||
* reported or logged, and the parse result is simply ignored if errors occur,
|
|
||||||
* the {@link org.antlr.v4.runtime.BailErrorStrategy} avoids wasting work on recovering from errors
|
|
||||||
* when the result will be ignored either way.</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* {@code myparser.setErrorHandler(new BailErrorStrategy());}</p>
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.Parser#setErrorHandler(org.antlr.v4.runtime.ANTLRErrorStrategy)
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class BailErrorStrategy: DefaultErrorStrategy {
|
|
||||||
public override init(){}
|
|
||||||
/** Instead of recovering from exception {@code e}, re-throw it wrapped
|
|
||||||
* in a {@link org.antlr.v4.runtime.misc.ParseCancellationException} so it is not caught by the
|
|
||||||
* rule function catches. Use {@link Exception#getCause()} to get the
|
|
||||||
* original {@link org.antlr.v4.runtime.RecognitionException}.
|
|
||||||
*/
|
|
||||||
override
|
|
||||||
public func recover(_ recognizer: Parser, _ e: AnyObject) throws {
|
|
||||||
var context: ParserRuleContext? = recognizer.getContext()
|
|
||||||
while let contextWrap = context{
|
|
||||||
contextWrap.exception = e
|
|
||||||
context = (contextWrap.getParent() as? ParserRuleContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
throw ANTLRException.recognition(e: e)
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Make sure we don't attempt to recover inline; if the parser
|
|
||||||
* successfully recovers, it won't throw an exception.
|
|
||||||
*/
|
|
||||||
override
|
|
||||||
public func recoverInline(_ recognizer: Parser) throws -> Token {
|
|
||||||
let e: InputMismatchException = try InputMismatchException(recognizer)
|
|
||||||
var context: ParserRuleContext? = recognizer.getContext()
|
|
||||||
while let contextWrap = context {
|
|
||||||
contextWrap.exception = e
|
|
||||||
context = (contextWrap.getParent() as? ParserRuleContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
throw ANTLRException.recognition(e: e)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Make sure we don't attempt to recover from problems in subrules. */
|
|
||||||
override
|
|
||||||
public func sync(_ recognizer: Parser) {
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,27 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/** A source of characters for an ANTLR lexer. */
|
|
||||||
|
|
||||||
public protocol CharStream: IntStream {
|
|
||||||
/**
|
|
||||||
* This method returns the text for a range of characters within this input
|
|
||||||
* stream. This method is guaranteed to not throw an exception if the
|
|
||||||
* specified {@code interval} lies entirely within a marked range. For more
|
|
||||||
* information about marked ranges, see {@link org.antlr.v4.runtime.IntStream#mark}.
|
|
||||||
*
|
|
||||||
* @param interval an interval within the stream
|
|
||||||
* @return the text of the specified interval
|
|
||||||
*
|
|
||||||
* @throws NullPointerException if {@code interval} is {@code null}
|
|
||||||
* @throws IllegalArgumentException if {@code interval.a < 0}, or if
|
|
||||||
* {@code interval.b < interval.a - 1}, or if {@code interval.b} lies at or
|
|
||||||
* past the end of the stream
|
|
||||||
* @throws UnsupportedOperationException if the stream does not support
|
|
||||||
* getting the text of the specified interval
|
|
||||||
*/
|
|
||||||
func getText(_ interval: Interval) -> String
|
|
||||||
}
|
|
|
@ -1,88 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This default implementation of {@link org.antlr.v4.runtime.TokenFactory} creates
|
|
||||||
* {@link org.antlr.v4.runtime.CommonToken} objects.
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class CommonTokenFactory: TokenFactory {
|
|
||||||
/**
|
|
||||||
* The default {@link org.antlr.v4.runtime.CommonTokenFactory} instance.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* This token factory does not explicitly copy token text when constructing
|
|
||||||
* tokens.</p>
|
|
||||||
*/
|
|
||||||
public static let DEFAULT: TokenFactory = CommonTokenFactory()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Indicates whether {@link org.antlr.v4.runtime.CommonToken#setText} should be called after
|
|
||||||
* constructing tokens to explicitly set the text. This is useful for cases
|
|
||||||
* where the input stream might not be able to provide arbitrary substrings
|
|
||||||
* of text from the input after the lexer creates a token (e.g. the
|
|
||||||
* implementation of {@link org.antlr.v4.runtime.CharStream#getText} in
|
|
||||||
* {@link org.antlr.v4.runtime.UnbufferedCharStream} throws an
|
|
||||||
* {@link UnsupportedOperationException}). Explicitly setting the token text
|
|
||||||
* allows {@link org.antlr.v4.runtime.Token#getText} to be called at any time regardless of the
|
|
||||||
* input stream implementation.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* The default value is {@code false} to avoid the performance and memory
|
|
||||||
* overhead of copying text for every token unless explicitly requested.</p>
|
|
||||||
*/
|
|
||||||
internal final var copyText: Bool
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a {@link org.antlr.v4.runtime.CommonTokenFactory} with the specified value for
|
|
||||||
* {@link #copyText}.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* When {@code copyText} is {@code false}, the {@link #DEFAULT} instance
|
|
||||||
* should be used instead of constructing a new instance.</p>
|
|
||||||
*
|
|
||||||
* @param copyText The value for {@link #copyText}.
|
|
||||||
*/
|
|
||||||
public init(_ copyText: Bool) {
|
|
||||||
self.copyText = copyText
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a {@link org.antlr.v4.runtime.CommonTokenFactory} with {@link #copyText} set to
|
|
||||||
* {@code false}.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* The {@link #DEFAULT} instance should be used instead of calling this
|
|
||||||
* directly.</p>
|
|
||||||
*/
|
|
||||||
public convenience init() {
|
|
||||||
self.init(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public func create(_ source: (TokenSource?, CharStream?), _ type: Int, _ text: String?,
|
|
||||||
_ channel: Int, _ start: Int, _ stop: Int,
|
|
||||||
_ line: Int, _ charPositionInLine: Int) -> Token {
|
|
||||||
let t: CommonToken = CommonToken(source, type, channel, start, stop)
|
|
||||||
t.setLine(line)
|
|
||||||
t.setCharPositionInLine(charPositionInLine)
|
|
||||||
if text != nil {
|
|
||||||
t.setText(text!)
|
|
||||||
} else {
|
|
||||||
if let cStream = source.1 , copyText {
|
|
||||||
t.setText(cStream.getText(Interval.of(start, stop)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public func create(_ type: Int, _ text: String) -> Token {
|
|
||||||
return CommonToken(type, text)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,133 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class extends {@link org.antlr.v4.runtime.BufferedTokenStream} with functionality to filter
|
|
||||||
* token streams to tokens on a particular channel (tokens where
|
|
||||||
* {@link org.antlr.v4.runtime.Token#getChannel} returns a particular value).
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* This token stream provides access to all tokens by index or when calling
|
|
||||||
* methods like {@link #getText}. The channel filtering is only used for code
|
|
||||||
* accessing tokens via the lookahead methods {@link #LA}, {@link #LT}, and
|
|
||||||
* {@link #LB}.</p>
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* By default, tokens are placed on the default channel
|
|
||||||
* ({@link org.antlr.v4.runtime.Token#DEFAULT_CHANNEL}), but may be reassigned by using the
|
|
||||||
* {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to
|
|
||||||
* call {@link org.antlr.v4.runtime.Lexer#setChannel}.
|
|
||||||
* </p>
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* Note: lexer rules which use the {@code ->skip} lexer command or call
|
|
||||||
* {@link org.antlr.v4.runtime.Lexer#skip} do not produce tokens at all, so input text matched by
|
|
||||||
* such a rule will not be available as part of the token stream, regardless of
|
|
||||||
* channel.</p>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class CommonTokenStream: BufferedTokenStream {
|
|
||||||
/**
|
|
||||||
* Specifies the channel to use for filtering tokens.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* The default value is {@link org.antlr.v4.runtime.Token#DEFAULT_CHANNEL}, which matches the
|
|
||||||
* default channel assigned to tokens created by the lexer.</p>
|
|
||||||
*/
|
|
||||||
internal var channel: Int = CommonToken.DEFAULT_CHANNEL
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new {@link org.antlr.v4.runtime.CommonTokenStream} using the specified token
|
|
||||||
* source and the default token channel ({@link org.antlr.v4.runtime.Token#DEFAULT_CHANNEL}).
|
|
||||||
*
|
|
||||||
* @param tokenSource The token source.
|
|
||||||
*/
|
|
||||||
public override init(_ tokenSource: TokenSource) {
|
|
||||||
super.init(tokenSource)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new {@link org.antlr.v4.runtime.CommonTokenStream} using the specified token
|
|
||||||
* source and filtering tokens to the specified channel. Only tokens whose
|
|
||||||
* {@link org.antlr.v4.runtime.Token#getChannel} matches {@code channel} or have the
|
|
||||||
* {@link org.antlr.v4.runtime.Token#getType} equal to {@link org.antlr.v4.runtime.Token#EOF} will be returned by the
|
|
||||||
* token stream lookahead methods.
|
|
||||||
*
|
|
||||||
* @param tokenSource The token source.
|
|
||||||
* @param channel The channel to use for filtering tokens.
|
|
||||||
*/
|
|
||||||
public convenience init(_ tokenSource: TokenSource, _ channel: Int) {
|
|
||||||
self.init(tokenSource)
|
|
||||||
self.channel = channel
|
|
||||||
}
|
|
||||||
|
|
||||||
override
|
|
||||||
internal func adjustSeekIndex(_ i: Int) throws -> Int {
|
|
||||||
return try nextTokenOnChannel(i, channel)
|
|
||||||
}
|
|
||||||
|
|
||||||
override
|
|
||||||
internal func LB(_ k: Int) throws -> Token? {
|
|
||||||
if k == 0 || (p - k) < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var i: Int = p
|
|
||||||
var n: Int = 1
|
|
||||||
// find k good tokens looking backwards
|
|
||||||
while n <= k {
|
|
||||||
// skip off-channel tokens
|
|
||||||
try i = previousTokenOnChannel(i - 1, channel)
|
|
||||||
n += 1
|
|
||||||
}
|
|
||||||
if i < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return tokens[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
override
|
|
||||||
public func LT(_ k: Int) throws -> Token? {
|
|
||||||
//System.out.println("enter LT("+k+")");
|
|
||||||
try lazyInit()
|
|
||||||
if k == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if k < 0 {
|
|
||||||
return try LB(-k)
|
|
||||||
}
|
|
||||||
var i: Int = p
|
|
||||||
var n: Int = 1 // we know tokens[p] is a good one
|
|
||||||
// find k good tokens
|
|
||||||
while n < k {
|
|
||||||
// skip off-channel tokens, but make sure to not look past EOF
|
|
||||||
if try sync(i + 1) {
|
|
||||||
i = try nextTokenOnChannel(i + 1, channel)
|
|
||||||
}
|
|
||||||
n += 1
|
|
||||||
}
|
|
||||||
// if ( i>range ) range = i;
|
|
||||||
return tokens[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Count EOF just once. */
|
|
||||||
public func getNumberOfOnChannelTokens() throws -> Int {
|
|
||||||
var n: Int = 0
|
|
||||||
try fill()
|
|
||||||
let length = tokens.count
|
|
||||||
for i in 0..<length {
|
|
||||||
let t: Token = tokens[i]
|
|
||||||
if t.getChannel() == channel {
|
|
||||||
n += 1
|
|
||||||
}
|
|
||||||
if t.getType() == CommonToken.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @author Sam Harwell
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class ConsoleErrorListener: BaseErrorListener {
|
|
||||||
/**
|
|
||||||
* Provides a default instance of {@link org.antlr.v4.runtime.ConsoleErrorListener}.
|
|
||||||
*/
|
|
||||||
public static let INSTANCE: ConsoleErrorListener = ConsoleErrorListener()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* This implementation prints messages to {@link System#err} containing the
|
|
||||||
* values of {@code line}, {@code charPositionInLine}, and {@code msg} using
|
|
||||||
* the following format.</p>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
override
|
|
||||||
public func syntaxError<T:ATNSimulator>(_ recognizer: Recognizer<T>,
|
|
||||||
_ offendingSymbol: AnyObject?,
|
|
||||||
_ line: Int,
|
|
||||||
_ charPositionInLine: Int,
|
|
||||||
_ msg: String,
|
|
||||||
_ e: AnyObject?
|
|
||||||
) {
|
|
||||||
if Parser.ConsoleError {
|
|
||||||
errPrint("line \(line):\(charPositionInLine) \(msg)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,745 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is the default implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} used for
|
|
||||||
* error reporting and recovery in ANTLR parsers.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import Foundation
|
|
||||||
|
|
||||||
public class DefaultErrorStrategy: ANTLRErrorStrategy {
|
|
||||||
/**
|
|
||||||
* Indicates whether the error strategy is currently "recovering from an
|
|
||||||
* error". This is used to suppress reporting multiple error messages while
|
|
||||||
* attempting to recover from a detected syntax error.
|
|
||||||
*
|
|
||||||
* @see #inErrorRecoveryMode
|
|
||||||
*/
|
|
||||||
internal var errorRecoveryMode: Bool = false
|
|
||||||
|
|
||||||
/** The index into the input stream where the last error occurred.
|
|
||||||
* This is used to prevent infinite loops where an error is found
|
|
||||||
* but no token is consumed during recovery...another error is found,
|
|
||||||
* ad nauseum. This is a failsafe mechanism to guarantee that at least
|
|
||||||
* one token/tree node is consumed for two errors.
|
|
||||||
*/
|
|
||||||
internal var lastErrorIndex: Int = -1
|
|
||||||
|
|
||||||
internal var lastErrorStates: IntervalSet?
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* <p>The default implementation simply calls {@link #endErrorCondition} to
|
|
||||||
* ensure that the handler is not in error recovery mode.</p>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func reset(_ recognizer: Parser) {
|
|
||||||
endErrorCondition(recognizer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called to enter error recovery mode when a recognition
|
|
||||||
* exception is reported.
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
*/
|
|
||||||
internal func beginErrorCondition(_ recognizer: Parser) {
|
|
||||||
errorRecoveryMode = true
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func inErrorRecoveryMode(_ recognizer: Parser) -> Bool {
|
|
||||||
return errorRecoveryMode
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called to leave error recovery mode after recovering from
|
|
||||||
* a recognition exception.
|
|
||||||
*
|
|
||||||
* @param recognizer
|
|
||||||
*/
|
|
||||||
internal func endErrorCondition(_ recognizer: Parser) {
|
|
||||||
errorRecoveryMode = false
|
|
||||||
lastErrorStates = nil
|
|
||||||
lastErrorIndex = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* <p>The default implementation simply calls {@link #endErrorCondition}.</p>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func reportMatch(_ recognizer: Parser) {
|
|
||||||
endErrorCondition(recognizer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* <p>The default implementation returns immediately if the handler is already
|
|
||||||
* in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
|
|
||||||
* and dispatches the reporting task based on the runtime type of {@code e}
|
|
||||||
* according to the following table.</p>
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link org.antlr.v4.runtime.NoViableAltException}: Dispatches the call to
|
|
||||||
* {@link #reportNoViableAlternative}</li>
|
|
||||||
* <li>{@link org.antlr.v4.runtime.InputMismatchException}: Dispatches the call to
|
|
||||||
* {@link #reportInputMismatch}</li>
|
|
||||||
* <li>{@link org.antlr.v4.runtime.FailedPredicateException}: Dispatches the call to
|
|
||||||
* {@link #reportFailedPredicate}</li>
|
|
||||||
* <li>All other types: calls {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} to report
|
|
||||||
* the exception</li>
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func reportError(_ recognizer: Parser,
|
|
||||||
_ e: AnyObject) {
|
|
||||||
// if we've already reported an error and have not matched a token
|
|
||||||
// yet successfully, don't report any errors.
|
|
||||||
if inErrorRecoveryMode(recognizer) {
|
|
||||||
|
|
||||||
return // don't report spurious errors
|
|
||||||
}
|
|
||||||
beginErrorCondition(recognizer)
|
|
||||||
//TODO: exception handler
|
|
||||||
if (e is NoViableAltException) {
|
|
||||||
try! reportNoViableAlternative(recognizer, e as! NoViableAltException);
|
|
||||||
} else {
|
|
||||||
if (e is InputMismatchException) {
|
|
||||||
reportInputMismatch(recognizer, e as! InputMismatchException);
|
|
||||||
} else {
|
|
||||||
if (e is FailedPredicateException) {
|
|
||||||
reportFailedPredicate(recognizer, e as! FailedPredicateException);
|
|
||||||
} else {
|
|
||||||
errPrint("unknown recognition error type: " + String(describing: type(of: e)));
|
|
||||||
let re = (e as! RecognitionException<ParserATNSimulator>)
|
|
||||||
recognizer.notifyErrorListeners(re.getOffendingToken(), re.message ?? "", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* <p>The default implementation resynchronizes the parser by consuming tokens
|
|
||||||
* until we find one in the resynchronization set--loosely the set of tokens
|
|
||||||
* that can follow the current rule.</p>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func recover(_ recognizer: Parser, _ e: AnyObject) throws {
|
|
||||||
// print("recover in "+recognizer.getRuleInvocationStack()+
|
|
||||||
// " index="+getTokenStream(recognizer).index()+
|
|
||||||
// ", lastErrorIndex="+
|
|
||||||
// lastErrorIndex+
|
|
||||||
// ", states="+lastErrorStates);
|
|
||||||
if let lastErrorStates = lastErrorStates ,
|
|
||||||
lastErrorIndex == getTokenStream(recognizer).index() &&
|
|
||||||
lastErrorStates.contains(recognizer.getState()) {
|
|
||||||
// uh oh, another error at same token index and previously-visited
|
|
||||||
// state in ATN; must be a case where LT(1) is in the recovery
|
|
||||||
// token set so nothing got consumed. Consume a single token
|
|
||||||
// at least to prevent an infinite loop; this is a failsafe.
|
|
||||||
// errPrint("seen error condition before index="+
|
|
||||||
// lastErrorIndex+", states="+lastErrorStates);
|
|
||||||
// errPrint("FAILSAFE consumes "+recognizer.getTokenNames()[getTokenStream(recognizer).LA(1)]);
|
|
||||||
try recognizer.consume()
|
|
||||||
}
|
|
||||||
lastErrorIndex = getTokenStream(recognizer).index()
|
|
||||||
if lastErrorStates == nil {
|
|
||||||
lastErrorStates = try IntervalSet()
|
|
||||||
}
|
|
||||||
try lastErrorStates!.add(recognizer.getState())
|
|
||||||
let followSet: IntervalSet = try getErrorRecoverySet(recognizer)
|
|
||||||
try consumeUntil(recognizer, followSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The default implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy#sync} makes sure
|
|
||||||
* that the current lookahead symbol is consistent with what were expecting
|
|
||||||
* at this point in the ATN. You can call this anytime but ANTLR only
|
|
||||||
* generates code to check before subrules/loops and each iteration.
|
|
||||||
*
|
|
||||||
* <p>Implements Jim Idle's magic sync mechanism in closures and optional
|
|
||||||
* subrules. E.g.,</p>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* a : sync ( stuff sync )* ;
|
|
||||||
* sync : {consume to what can follow sync} ;
|
|
||||||
* </pre>
|
|
||||||
*
|
|
||||||
* At the start of a sub rule upon error, {@link #sync} performs single
|
|
||||||
* token deletion, if possible. If it can't do that, it bails on the current
|
|
||||||
* rule and uses the default error recovery, which consumes until the
|
|
||||||
* resynchronization set of the current rule.
|
|
||||||
*
|
|
||||||
* <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
|
|
||||||
* with an empty alternative), then the expected set includes what follows
|
|
||||||
* the subrule.</p>
|
|
||||||
*
|
|
||||||
* <p>During loop iteration, it consumes until it sees a token that can start a
|
|
||||||
* sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
|
|
||||||
* stay in the loop as long as possible.</p>
|
|
||||||
*
|
|
||||||
* <p><strong>ORIGINS</strong></p>
|
|
||||||
*
|
|
||||||
* <p>Previous versions of ANTLR did a poor job of their recovery within loops.
|
|
||||||
* A single mismatch token or missing token would force the parser to bail
|
|
||||||
* out of the entire rules surrounding the loop. So, for rule</p>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* classDef : 'class' ID '{' member* '}'
|
|
||||||
* </pre>
|
|
||||||
*
|
|
||||||
* input with an extra token between members would force the parser to
|
|
||||||
* consume until it found the next class definition rather than the next
|
|
||||||
* member definition of the current class.
|
|
||||||
*
|
|
||||||
* <p>This functionality cost a little bit of effort because the parser has to
|
|
||||||
* compare token set at the start of the loop and at each iteration. If for
|
|
||||||
* some reason speed is suffering for you, you can turn off this
|
|
||||||
* functionality by simply overriding this method as a blank { }.</p>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func sync(_ recognizer: Parser) throws {
|
|
||||||
let s: ATNState = recognizer.getInterpreter().atn.states[recognizer.getState()]!
|
|
||||||
// errPrint("sync @ "+s.stateNumber+"="+s.getClass().getSimpleName());
|
|
||||||
// If already recovering, don't try to sync
|
|
||||||
if inErrorRecoveryMode(recognizer) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
let tokens: TokenStream = getTokenStream(recognizer)
|
|
||||||
let la: Int = try tokens.LA(1)
|
|
||||||
|
|
||||||
// try cheaper subset first; might get lucky. seems to shave a wee bit off
|
|
||||||
//let set : IntervalSet = recognizer.getATN().nextTokens(s)
|
|
||||||
|
|
||||||
if try recognizer.getATN().nextTokens(s).contains(CommonToken.EPSILON) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if try recognizer.getATN().nextTokens(s).contains(la) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch s.getStateType() {
|
|
||||||
case ATNState.BLOCK_START: fallthrough
|
|
||||||
case ATNState.STAR_BLOCK_START: fallthrough
|
|
||||||
case ATNState.PLUS_BLOCK_START: fallthrough
|
|
||||||
case ATNState.STAR_LOOP_ENTRY:
|
|
||||||
// report error and recover if possible
|
|
||||||
if try singleTokenDeletion(recognizer) != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
throw try ANTLRException.recognition(e: InputMismatchException(recognizer))
|
|
||||||
|
|
||||||
case ATNState.PLUS_LOOP_BACK: fallthrough
|
|
||||||
case ATNState.STAR_LOOP_BACK:
|
|
||||||
// errPrint("at loop back: "+s.getClass().getSimpleName());
|
|
||||||
try reportUnwantedToken(recognizer)
|
|
||||||
let expecting: IntervalSet = try recognizer.getExpectedTokens()
|
|
||||||
let whatFollowsLoopIterationOrRule: IntervalSet =
|
|
||||||
try expecting.or(try getErrorRecoverySet(recognizer)) as! IntervalSet
|
|
||||||
try consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
|
|
||||||
break
|
|
||||||
|
|
||||||
default:
|
|
||||||
// do nothing if we can't identify the exact kind of ATN state
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is called by {@link #reportError} when the exception is a
|
|
||||||
* {@link org.antlr.v4.runtime.NoViableAltException}.
|
|
||||||
*
|
|
||||||
* @see #reportError
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @param e the recognition exception
|
|
||||||
*/
|
|
||||||
internal func reportNoViableAlternative(_ recognizer: Parser,
|
|
||||||
_ e: NoViableAltException) throws {
|
|
||||||
let tokens: TokenStream? = getTokenStream(recognizer)
|
|
||||||
var input: String
|
|
||||||
if let tokens = tokens {
|
|
||||||
if e.getStartToken().getType() == CommonToken.EOF {
|
|
||||||
input = "<EOF>"
|
|
||||||
} else {
|
|
||||||
input = try tokens.getText(e.getStartToken(), e.getOffendingToken())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
input = "<unknown input>"
|
|
||||||
}
|
|
||||||
let msg: String = "no viable alternative at input " + escapeWSAndQuote(input)
|
|
||||||
recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is called by {@link #reportError} when the exception is an
|
|
||||||
* {@link org.antlr.v4.runtime.InputMismatchException}.
|
|
||||||
*
|
|
||||||
* @see #reportError
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @param e the recognition exception
|
|
||||||
*/
|
|
||||||
internal func reportInputMismatch(_ recognizer: Parser,
|
|
||||||
_ e: InputMismatchException) {
|
|
||||||
let msg: String = "mismatched input " + getTokenErrorDisplay(e.getOffendingToken()) +
|
|
||||||
" expecting " + e.getExpectedTokens()!.toString(recognizer.getVocabulary())
|
|
||||||
recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is called by {@link #reportError} when the exception is a
|
|
||||||
* {@link org.antlr.v4.runtime.FailedPredicateException}.
|
|
||||||
*
|
|
||||||
* @see #reportError
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @param e the recognition exception
|
|
||||||
*/
|
|
||||||
internal func reportFailedPredicate(_ recognizer: Parser,
|
|
||||||
_ e: FailedPredicateException) {
|
|
||||||
let ruleName: String = recognizer.getRuleNames()[recognizer._ctx!.getRuleIndex()]
|
|
||||||
let msg: String = "rule " + ruleName + " " + e.message! // e.getMessage()
|
|
||||||
recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called to report a syntax error which requires the removal
|
|
||||||
* of a token from the input stream. At the time this method is called, the
|
|
||||||
* erroneous symbol is current {@code LT(1)} symbol and has not yet been
|
|
||||||
* removed from the input stream. When this method returns,
|
|
||||||
* {@code recognizer} is in error recovery mode.
|
|
||||||
*
|
|
||||||
* <p>This method is called when {@link #singleTokenDeletion} identifies
|
|
||||||
* single-token deletion as a viable recovery strategy for a mismatched
|
|
||||||
* input error.</p>
|
|
||||||
*
|
|
||||||
* <p>The default implementation simply returns if the handler is already in
|
|
||||||
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
|
|
||||||
* enter error recovery mode, followed by calling
|
|
||||||
* {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.</p>
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
*/
|
|
||||||
internal func reportUnwantedToken(_ recognizer: Parser) throws {
|
|
||||||
if inErrorRecoveryMode(recognizer) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
beginErrorCondition(recognizer)
|
|
||||||
|
|
||||||
let t: Token = try recognizer.getCurrentToken()
|
|
||||||
let tokenName: String = getTokenErrorDisplay(t)
|
|
||||||
let expecting: IntervalSet = try getExpectedTokens(recognizer)
|
|
||||||
let msg: String = "extraneous input " + tokenName + " expecting " +
|
|
||||||
expecting.toString(recognizer.getVocabulary())
|
|
||||||
recognizer.notifyErrorListeners(t, msg, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is called to report a syntax error which requires the
|
|
||||||
* insertion of a missing token into the input stream. At the time this
|
|
||||||
* method is called, the missing token has not yet been inserted. When this
|
|
||||||
* method returns, {@code recognizer} is in error recovery mode.
|
|
||||||
*
|
|
||||||
* <p>This method is called when {@link #singleTokenInsertion} identifies
|
|
||||||
* single-token insertion as a viable recovery strategy for a mismatched
|
|
||||||
* input error.</p>
|
|
||||||
*
|
|
||||||
* <p>The default implementation simply returns if the handler is already in
|
|
||||||
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
|
|
||||||
* enter error recovery mode, followed by calling
|
|
||||||
* {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.</p>
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
*/
|
|
||||||
internal func reportMissingToken(_ recognizer: Parser) throws {
|
|
||||||
if inErrorRecoveryMode(recognizer) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
beginErrorCondition(recognizer)
|
|
||||||
|
|
||||||
let t: Token = try recognizer.getCurrentToken()
|
|
||||||
let expecting: IntervalSet = try getExpectedTokens(recognizer)
|
|
||||||
let msg: String = "missing " + expecting.toString(recognizer.getVocabulary()) +
|
|
||||||
" at " + getTokenErrorDisplay(t)
|
|
||||||
|
|
||||||
recognizer.notifyErrorListeners(t, msg, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* <p>The default implementation attempts to recover from the mismatched input
|
|
||||||
* by using single token insertion and deletion as described below. If the
|
|
||||||
* recovery attempt fails, this method throws an
|
|
||||||
* {@link org.antlr.v4.runtime.InputMismatchException}.</p>
|
|
||||||
*
|
|
||||||
* <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
|
|
||||||
*
|
|
||||||
* <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
|
|
||||||
* right token, however, then assume {@code LA(1)} is some extra spurious
|
|
||||||
* token and delete it. Then consume and return the next token (which was
|
|
||||||
* the {@code LA(2)} token) as the successful result of the match operation.</p>
|
|
||||||
*
|
|
||||||
* <p>This recovery strategy is implemented by {@link #singleTokenDeletion}.</p>
|
|
||||||
*
|
|
||||||
* <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
|
|
||||||
*
|
|
||||||
* <p>If current token (at {@code LA(1)}) is consistent with what could come
|
|
||||||
* after the expected {@code LA(1)} token, then assume the token is missing
|
|
||||||
* and use the parser's {@link org.antlr.v4.runtime.TokenFactory} to create it on the fly. The
|
|
||||||
* "insertion" is performed by returning the created token as the successful
|
|
||||||
* result of the match operation.</p>
|
|
||||||
*
|
|
||||||
* <p>This recovery strategy is implemented by {@link #singleTokenInsertion}.</p>
|
|
||||||
*
|
|
||||||
* <p><strong>EXAMPLE</strong></p>
|
|
||||||
*
|
|
||||||
* <p>For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
|
|
||||||
* the parser returns from the nested call to {@code expr}, it will have
|
|
||||||
* call chain:</p>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* stat → expr → atom
|
|
||||||
* </pre>
|
|
||||||
*
|
|
||||||
* and it will be trying to match the {@code ')'} at this point in the
|
|
||||||
* derivation:
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* => ID '=' '(' INT ')' ('+' atom)* ';'
|
|
||||||
* ^
|
|
||||||
* </pre>
|
|
||||||
*
|
|
||||||
* The attempt to match {@code ')'} will fail when it sees {@code ';'} and
|
|
||||||
* call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
|
|
||||||
* is in the set of tokens that can follow the {@code ')'} token reference
|
|
||||||
* in rule {@code atom}. It can assume that you forgot the {@code ')'}.
|
|
||||||
*/
|
|
||||||
|
|
||||||
public func recoverInline(_ recognizer: Parser) throws -> Token {
|
|
||||||
// SINGLE TOKEN DELETION
|
|
||||||
let matchedSymbol: Token? = try singleTokenDeletion(recognizer)
|
|
||||||
if matchedSymbol != nil {
|
|
||||||
// we have deleted the extra token.
|
|
||||||
// now, move past ttype token as if all were ok
|
|
||||||
try recognizer.consume()
|
|
||||||
return matchedSymbol!
|
|
||||||
}
|
|
||||||
|
|
||||||
// SINGLE TOKEN INSERTION
|
|
||||||
if try singleTokenInsertion(recognizer) {
|
|
||||||
return try getMissingSymbol(recognizer)
|
|
||||||
}
|
|
||||||
throw try ANTLRException.recognition(e: InputMismatchException(recognizer))
|
|
||||||
// throw try ANTLRException.InputMismatch(e: InputMismatchException(recognizer) )
|
|
||||||
//RuntimeException("InputMismatchException")
|
|
||||||
// even that didn't work; must throw the exception
|
|
||||||
//throwException() /* throw InputMismatchException(recognizer); */
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method implements the single-token insertion inline error recovery
|
|
||||||
* strategy. It is called by {@link #recoverInline} if the single-token
|
|
||||||
* deletion strategy fails to recover from the mismatched input. If this
|
|
||||||
* method returns {@code true}, {@code recognizer} will be in error recovery
|
|
||||||
* mode.
|
|
||||||
*
|
|
||||||
* <p>This method determines whether or not single-token insertion is viable by
|
|
||||||
* checking if the {@code LA(1)} input symbol could be successfully matched
|
|
||||||
* if it were instead the {@code LA(2)} symbol. If this method returns
|
|
||||||
* {@code true}, the caller is responsible for creating and inserting a
|
|
||||||
* token with the correct type to produce this behavior.</p>
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @return {@code true} if single-token insertion is a viable recovery
|
|
||||||
* strategy for the current mismatched input, otherwise {@code false}
|
|
||||||
*/
|
|
||||||
internal func singleTokenInsertion(_ recognizer: Parser) throws -> Bool {
|
|
||||||
let currentSymbolType: Int = try getTokenStream(recognizer).LA(1)
|
|
||||||
// if current token is consistent with what could come after current
|
|
||||||
// ATN state, then we know we're missing a token; error recovery
|
|
||||||
// is free to conjure up and insert the missing token
|
|
||||||
let currentState: ATNState = recognizer.getInterpreter().atn.states[recognizer.getState()]!
|
|
||||||
let next: ATNState = currentState.transition(0).target
|
|
||||||
let atn: ATN = recognizer.getInterpreter().atn
|
|
||||||
let expectingAtLL2: IntervalSet = try atn.nextTokens(next, recognizer._ctx)
|
|
||||||
// print("LT(2) set="+expectingAtLL2.toString(recognizer.getTokenNames()));
|
|
||||||
if expectingAtLL2.contains(currentSymbolType) {
|
|
||||||
try reportMissingToken(recognizer)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method implements the single-token deletion inline error recovery
|
|
||||||
* strategy. It is called by {@link #recoverInline} to attempt to recover
|
|
||||||
* from mismatched input. If this method returns null, the parser and error
|
|
||||||
* handler state will not have changed. If this method returns non-null,
|
|
||||||
* {@code recognizer} will <em>not</em> be in error recovery mode since the
|
|
||||||
* returned token was a successful match.
|
|
||||||
*
|
|
||||||
* <p>If the single-token deletion is successful, this method calls
|
|
||||||
* {@link #reportUnwantedToken} to report the error, followed by
|
|
||||||
* {@link org.antlr.v4.runtime.Parser#consume} to actually "delete" the extraneous token. Then,
|
|
||||||
* before returning {@link #reportMatch} is called to signal a successful
|
|
||||||
* match.</p>
|
|
||||||
*
|
|
||||||
* @param recognizer the parser instance
|
|
||||||
* @return the successfully matched {@link org.antlr.v4.runtime.Token} instance if single-token
|
|
||||||
* deletion successfully recovers from the mismatched input, otherwise
|
|
||||||
* {@code null}
|
|
||||||
*/
|
|
||||||
internal func singleTokenDeletion(_ recognizer: Parser) throws -> Token? {
|
|
||||||
let nextTokenType: Int = try getTokenStream(recognizer).LA(2)
|
|
||||||
let expecting: IntervalSet = try getExpectedTokens(recognizer)
|
|
||||||
if expecting.contains(nextTokenType) {
|
|
||||||
try reportUnwantedToken(recognizer)
|
|
||||||
/*
|
|
||||||
errPrint("recoverFromMismatchedToken deleting "+
|
|
||||||
((TokenStream)getTokenStream(recognizer)).LT(1)+
|
|
||||||
" since "+((TokenStream)getTokenStream(recognizer)).LT(2)+
|
|
||||||
" is what we want");
|
|
||||||
*/
|
|
||||||
try recognizer.consume() // simply delete extra token
|
|
||||||
// we want to return the token we're actually matching
|
|
||||||
let matchedSymbol: Token = try recognizer.getCurrentToken()
|
|
||||||
reportMatch(recognizer) // we know current token is correct
|
|
||||||
return matchedSymbol
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Conjure up a missing token during error recovery.
|
|
||||||
*
|
|
||||||
* The recognizer attempts to recover from single missing
|
|
||||||
* symbols. But, actions might refer to that missing symbol.
|
|
||||||
* For example, x=ID {f($x);}. The action clearly assumes
|
|
||||||
* that there has been an identifier matched previously and that
|
|
||||||
* $x points at that token. If that token is missing, but
|
|
||||||
* the next token in the stream is what we want we assume that
|
|
||||||
* this token is missing and we keep going. Because we
|
|
||||||
* have to return some token to replace the missing token,
|
|
||||||
* we have to conjure one up. This method gives the user control
|
|
||||||
* over the tokens returned for missing tokens. Mostly,
|
|
||||||
* you will want to create something special for identifier
|
|
||||||
* tokens. For literals such as '{' and ',', the default
|
|
||||||
* action in the parser or tree parser works. It simply creates
|
|
||||||
* a CommonToken of the appropriate type. The text will be the token.
|
|
||||||
* If you change what tokens must be created by the lexer,
|
|
||||||
* override this method to create the appropriate tokens.
|
|
||||||
*/
|
|
||||||
|
|
||||||
internal func getTokenStream(_ recognizer: Parser) -> TokenStream {
|
|
||||||
return recognizer.getInputStream() as! TokenStream
|
|
||||||
}
|
|
||||||
|
|
||||||
internal func getMissingSymbol(_ recognizer: Parser) throws -> Token {
|
|
||||||
let currentSymbol: Token = try recognizer.getCurrentToken()
|
|
||||||
let expecting: IntervalSet = try getExpectedTokens(recognizer)
|
|
||||||
let expectedTokenType: Int = expecting.getMinElement() // get any element
|
|
||||||
var tokenText: String
|
|
||||||
if expectedTokenType == CommonToken.EOF {
|
|
||||||
tokenText = "<missing EOF>"
|
|
||||||
} else {
|
|
||||||
tokenText = "<missing " + recognizer.getVocabulary().getDisplayName(expectedTokenType) + ">"
|
|
||||||
}
|
|
||||||
var current: Token = currentSymbol
|
|
||||||
let lookback: Token? = try getTokenStream(recognizer).LT(-1)
|
|
||||||
if current.getType() == CommonToken.EOF && lookback != nil {
|
|
||||||
current = lookback!
|
|
||||||
}
|
|
||||||
|
|
||||||
let token = recognizer.getTokenFactory().create((current.getTokenSource(), current.getTokenSource()!.getInputStream()), expectedTokenType, tokenText,
|
|
||||||
CommonToken.DEFAULT_CHANNEL,
|
|
||||||
-1, -1,
|
|
||||||
current.getLine(), current.getCharPositionInLine())
|
|
||||||
|
|
||||||
return token
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
internal func getExpectedTokens(_ recognizer: Parser) throws -> IntervalSet {
|
|
||||||
return try recognizer.getExpectedTokens()
|
|
||||||
}
|
|
||||||
|
|
||||||
/** How should a token be displayed in an error message? The default
|
|
||||||
* is to display just the text, but during development you might
|
|
||||||
* want to have a lot of information spit out. Override in that case
|
|
||||||
* to use t.toString() (which, for CommonToken, dumps everything about
|
|
||||||
* the token). This is better than forcing you to override a method in
|
|
||||||
* your token objects because you don't have to go modify your lexer
|
|
||||||
* so that it creates a new Java type.
|
|
||||||
*/
|
|
||||||
internal func getTokenErrorDisplay(_ t: Token?) -> String {
|
|
||||||
if t == nil {
|
|
||||||
return "<no token>"
|
|
||||||
}
|
|
||||||
var s: String? = getSymbolText(t!)
|
|
||||||
if s == nil {
|
|
||||||
if getSymbolType(t!) == CommonToken.EOF {
|
|
||||||
s = "<EOF>"
|
|
||||||
} else {
|
|
||||||
s = "<\(getSymbolType(t!))>"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return escapeWSAndQuote(s!)
|
|
||||||
}
|
|
||||||
|
|
||||||
internal func getSymbolText(_ symbol: Token) -> String {
|
|
||||||
return symbol.getText()!
|
|
||||||
}
|
|
||||||
|
|
||||||
internal func getSymbolType(_ symbol: Token) -> Int {
|
|
||||||
return symbol.getType()
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
internal func escapeWSAndQuote(_ s: String) -> String {
|
|
||||||
var s = s
|
|
||||||
s = s.replaceAll("\n", replacement: "\\n")
|
|
||||||
s = s.replaceAll("\r", replacement: "\\r")
|
|
||||||
s = s.replaceAll("\t", replacement: "\\t")
|
|
||||||
return "'" + s + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compute the error recovery set for the current rule. During
|
|
||||||
* rule invocation, the parser pushes the set of tokens that can
|
|
||||||
* follow that rule reference on the stack; this amounts to
|
|
||||||
* computing FIRST of what follows the rule reference in the
|
|
||||||
* enclosing rule. See LinearApproximator.FIRST().
|
|
||||||
* This local follow set only includes tokens
|
|
||||||
* from within the rule; i.e., the FIRST computation done by
|
|
||||||
* ANTLR stops at the end of a rule.
|
|
||||||
*
|
|
||||||
* EXAMPLE
|
|
||||||
*
|
|
||||||
* When you find a "no viable alt exception", the input is not
|
|
||||||
* consistent with any of the alternatives for rule r. The best
|
|
||||||
* thing to do is to consume tokens until you see something that
|
|
||||||
* can legally follow a call to r *or* any rule that called r.
|
|
||||||
* You don't want the exact set of viable next tokens because the
|
|
||||||
* input might just be missing a token--you might consume the
|
|
||||||
* rest of the input looking for one of the missing tokens.
|
|
||||||
*
|
|
||||||
* Consider grammar:
|
|
||||||
*
|
|
||||||
* a : '[' b ']'
|
|
||||||
* | '(' b ')'
|
|
||||||
* ;
|
|
||||||
* b : c '^' INT ;
|
|
||||||
* c : ID
|
|
||||||
* | INT
|
|
||||||
* ;
|
|
||||||
*
|
|
||||||
* At each rule invocation, the set of tokens that could follow
|
|
||||||
* that rule is pushed on a stack. Here are the various
|
|
||||||
* context-sensitive follow sets:
|
|
||||||
*
|
|
||||||
* FOLLOW(b1_in_a) = FIRST(']') = ']'
|
|
||||||
* FOLLOW(b2_in_a) = FIRST(')') = ')'
|
|
||||||
* FOLLOW(c_in_b) = FIRST('^') = '^'
|
|
||||||
*
|
|
||||||
* Upon erroneous input "[]", the call chain is
|
|
||||||
*
|
|
||||||
* a -> b -> c
|
|
||||||
*
|
|
||||||
* and, hence, the follow context stack is:
|
|
||||||
*
|
|
||||||
* depth follow set start of rule execution
|
|
||||||
* 0 <EOF> a (from main())
|
|
||||||
* 1 ']' b
|
|
||||||
* 2 '^' c
|
|
||||||
*
|
|
||||||
* Notice that ')' is not included, because b would have to have
|
|
||||||
* been called from a different context in rule a for ')' to be
|
|
||||||
* included.
|
|
||||||
*
|
|
||||||
* For error recovery, we cannot consider FOLLOW(c)
|
|
||||||
* (context-sensitive or otherwise). We need the combined set of
|
|
||||||
* all context-sensitive FOLLOW sets--the set of all tokens that
|
|
||||||
* could follow any reference in the call chain. We need to
|
|
||||||
* resync to one of those tokens. Note that FOLLOW(c)='^' and if
|
|
||||||
* we resync'd to that token, we'd consume until EOF. We need to
|
|
||||||
* sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
|
|
||||||
* In this case, for input "[]", LA(1) is ']' and in the set, so we would
|
|
||||||
* not consume anything. After printing an error, rule c would
|
|
||||||
* return normally. Rule b would not find the required '^' though.
|
|
||||||
* At this point, it gets a mismatched token error and throws an
|
|
||||||
* exception (since LA(1) is not in the viable following token
|
|
||||||
* set). The rule exception handler tries to recover, but finds
|
|
||||||
* the same recovery set and doesn't consume anything. Rule b
|
|
||||||
* exits normally returning to rule a. Now it finds the ']' (and
|
|
||||||
* with the successful match exits errorRecovery mode).
|
|
||||||
*
|
|
||||||
* So, you can see that the parser walks up the call chain looking
|
|
||||||
* for the token that was a member of the recovery set.
|
|
||||||
*
|
|
||||||
* Errors are not generated in errorRecovery mode.
|
|
||||||
*
|
|
||||||
* ANTLR's error recovery mechanism is based upon original ideas:
|
|
||||||
*
|
|
||||||
* "Algorithms + Data Structures = Programs" by Niklaus Wirth
|
|
||||||
*
|
|
||||||
* and
|
|
||||||
*
|
|
||||||
* "A note on error recovery in recursive descent parsers":
|
|
||||||
* http://portal.acm.org/citation.cfm?id=947902.947905
|
|
||||||
*
|
|
||||||
* Later, Josef Grosch had some good ideas:
|
|
||||||
*
|
|
||||||
* "Efficient and Comfortable Error Recovery in Recursive Descent
|
|
||||||
* Parsers":
|
|
||||||
* ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
|
||||||
*
|
|
||||||
* Like Grosch I implement context-sensitive FOLLOW sets that are combined
|
|
||||||
* at run-time upon error to avoid overhead during parsing.
|
|
||||||
*/
|
|
||||||
internal func getErrorRecoverySet(_ recognizer: Parser) throws -> IntervalSet {
|
|
||||||
let atn: ATN = recognizer.getInterpreter().atn
|
|
||||||
var ctx: RuleContext? = recognizer._ctx
|
|
||||||
let recoverSet: IntervalSet = try IntervalSet()
|
|
||||||
while let ctxWrap = ctx , ctxWrap.invokingState >= 0 {
|
|
||||||
// compute what follows who invoked us
|
|
||||||
let invokingState: ATNState = atn.states[ctxWrap.invokingState]!
|
|
||||||
let rt: RuleTransition = invokingState.transition(0) as! RuleTransition
|
|
||||||
let follow: IntervalSet = try atn.nextTokens(rt.followState)
|
|
||||||
try recoverSet.addAll(follow)
|
|
||||||
ctx = ctxWrap.parent
|
|
||||||
}
|
|
||||||
try recoverSet.remove(CommonToken.EPSILON)
|
|
||||||
// print("recover set "+recoverSet.toString(recognizer.getTokenNames()));
|
|
||||||
return recoverSet
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Consume tokens until one matches the given token set. */
|
|
||||||
internal func consumeUntil(_ recognizer: Parser, _ set: IntervalSet) throws {
|
|
||||||
// errPrint("consumeUntil("+set.toString(recognizer.getTokenNames())+")");
|
|
||||||
var ttype: Int = try getTokenStream(recognizer).LA(1)
|
|
||||||
while ttype != CommonToken.EOF && !set.contains(ttype) {
|
|
||||||
//print("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
|
|
||||||
// getTokenStream(recognizer).consume();
|
|
||||||
try recognizer.consume()
|
|
||||||
ttype = try getTokenStream(recognizer).LA(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/** This signifies any kind of mismatched input exceptions such as
|
|
||||||
* when the current input does not match the expected token.
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class InputMismatchException: RecognitionException<ParserATNSimulator> {
|
|
||||||
public init(_ recognizer: Parser) throws {
|
|
||||||
super.init(recognizer, recognizer.getInputStream()!, recognizer._ctx)
|
|
||||||
self.setOffendingToken(try recognizer.getCurrentToken())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,216 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A simple stream of symbols whose values are represented as integers. This
|
|
||||||
* interface provides <em>marked ranges</em> with support for a minimum level
|
|
||||||
* of buffering necessary to implement arbitrary lookahead during prediction.
|
|
||||||
* For more information on marked ranges, see {@link #mark}.
|
|
||||||
*
|
|
||||||
* <p><strong>Initializing Methods:</strong> Some methods in this interface have
|
|
||||||
* unspecified behavior if no call to an initializing method has occurred after
|
|
||||||
* the stream was constructed. The following is a list of initializing methods:</p>
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link #LA}</li>
|
|
||||||
* <li>{@link #consume}</li>
|
|
||||||
* <li>{@link #size}</li>
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public protocol IntStream: class {
|
|
||||||
/**
|
|
||||||
* The value returned by {@link #LA LA()} when the end of the stream is
|
|
||||||
* reached.
|
|
||||||
*/
|
|
||||||
//let EOF : Int = -1;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The value returned by {@link #getSourceName} when the actual name of the
|
|
||||||
* underlying source is not known.
|
|
||||||
*/
|
|
||||||
//let UNKNOWN_SOURCE_NAME : String = "<unknown>";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Consumes the current symbol in the stream. This method has the following
|
|
||||||
* effects:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li><strong>Forward movement:</strong> The value of {@link #index index()}
|
|
||||||
* before calling this method is less than the value of {@code index()}
|
|
||||||
* after calling this method.</li>
|
|
||||||
* <li><strong>Ordered lookahead:</strong> The value of {@code LA(1)} before
|
|
||||||
* calling this method becomes the value of {@code LA(-1)} after calling
|
|
||||||
* this method.</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* Note that calling this method does not guarantee that {@code index()} is
|
|
||||||
* incremented by exactly 1, as that would preclude the ability to implement
|
|
||||||
* filtering streams (e.g. {@link org.antlr.v4.runtime.CommonTokenStream} which distinguishes
|
|
||||||
* between "on-channel" and "off-channel" tokens).
|
|
||||||
*
|
|
||||||
* @throws IllegalStateException if an attempt is made to consume the the
|
|
||||||
* end of the stream (i.e. if {@code LA(1)==}{@link #EOF EOF} before calling
|
|
||||||
* {@code consume}).
|
|
||||||
*/
|
|
||||||
func consume() throws
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the value of the symbol at offset {@code i} from the current
|
|
||||||
* position. When {@code i==1}, this method returns the value of the current
|
|
||||||
* symbol in the stream (which is the next symbol to be consumed). When
|
|
||||||
* {@code i==-1}, this method returns the value of the previously read
|
|
||||||
* symbol in the stream. It is not valid to call this method with
|
|
||||||
* {@code i==0}, but the specific behavior is unspecified because this
|
|
||||||
* method is frequently called from performance-critical code.
|
|
||||||
*
|
|
||||||
* <p>This method is guaranteed to succeed if any of the following are true:</p>
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@code i>0}</li>
|
|
||||||
* <li>{@code i==-1} and {@link #index index()} returns a value greater
|
|
||||||
* than the value of {@code index()} after the stream was constructed
|
|
||||||
* and {@code LA(1)} was called in that order. Specifying the current
|
|
||||||
* {@code index()} relative to the index after the stream was created
|
|
||||||
* allows for filtering implementations that do not return every symbol
|
|
||||||
* from the underlying source. Specifying the call to {@code LA(1)}
|
|
||||||
* allows for lazily initialized streams.</li>
|
|
||||||
* <li>{@code LA(i)} refers to a symbol consumed within a marked region
|
|
||||||
* that has not yet been released.</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <p>If {@code i} represents a position at or beyond the end of the stream,
|
|
||||||
* this method returns {@link #EOF}.</p>
|
|
||||||
*
|
|
||||||
* <p>The return value is unspecified if {@code i<0} and fewer than {@code -i}
|
|
||||||
* calls to {@link #consume consume()} have occurred from the beginning of
|
|
||||||
* the stream before calling this method.</p>
|
|
||||||
*
|
|
||||||
* @throws UnsupportedOperationException if the stream does not support
|
|
||||||
* retrieving the value of the specified symbol
|
|
||||||
*/
|
|
||||||
func LA(_ i: Int) throws -> Int
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A mark provides a guarantee that {@link #seek seek()} operations will be
|
|
||||||
* valid over a "marked range" extending from the index where {@code mark()}
|
|
||||||
* was called to the current {@link #index index()}. This allows the use of
|
|
||||||
* streaming input sources by specifying the minimum buffering requirements
|
|
||||||
* to support arbitrary lookahead during prediction.
|
|
||||||
*
|
|
||||||
* <p>The returned mark is an opaque handle (type {@code int}) which is passed
|
|
||||||
* to {@link #release release()} when the guarantees provided by the marked
|
|
||||||
* range are no longer necessary. When calls to
|
|
||||||
* {@code mark()}/{@code release()} are nested, the marks must be released
|
|
||||||
* in reverse order of which they were obtained. Since marked regions are
|
|
||||||
* used during performance-critical sections of prediction, the specific
|
|
||||||
* behavior of invalid usage is unspecified (i.e. a mark is not released, or
|
|
||||||
* a mark is released twice, or marks are not released in reverse order from
|
|
||||||
* which they were created).</p>
|
|
||||||
*
|
|
||||||
* <p>The behavior of this method is unspecified if no call to an
|
|
||||||
* {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream was
|
|
||||||
* constructed.</p>
|
|
||||||
*
|
|
||||||
* <p>This method does not change the current position in the input stream.</p>
|
|
||||||
*
|
|
||||||
* <p>The following example shows the use of {@link #mark mark()},
|
|
||||||
* {@link #release release(mark)}, {@link #index index()}, and
|
|
||||||
* {@link #seek seek(index)} as part of an operation to safely work within a
|
|
||||||
* marked region, then restore the stream position to its original value and
|
|
||||||
* release the mark.</p>
|
|
||||||
* <pre>
|
|
||||||
* IntStream stream = ...;
|
|
||||||
* int index = -1;
|
|
||||||
* int mark = stream.mark();
|
|
||||||
* try {
|
|
||||||
* index = stream.index();
|
|
||||||
* // perform work here...
|
|
||||||
* } finally {
|
|
||||||
* if (index != -1) {
|
|
||||||
* stream.seek(index);
|
|
||||||
* }
|
|
||||||
* stream.release(mark);
|
|
||||||
* }
|
|
||||||
* </pre>
|
|
||||||
*
|
|
||||||
* @return An opaque marker which should be passed to
|
|
||||||
* {@link #release release()} when the marked range is no longer required.
|
|
||||||
*/
|
|
||||||
func mark() -> Int
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method releases a marked range created by a call to
|
|
||||||
* {@link #mark mark()}. Calls to {@code release()} must appear in the
|
|
||||||
* reverse order of the corresponding calls to {@code mark()}. If a mark is
|
|
||||||
* released twice, or if marks are not released in reverse order of the
|
|
||||||
* corresponding calls to {@code mark()}, the behavior is unspecified.
|
|
||||||
*
|
|
||||||
* <p>For more information and an example, see {@link #mark}.</p>
|
|
||||||
*
|
|
||||||
* @param marker A marker returned by a call to {@code mark()}.
|
|
||||||
* @see #mark
|
|
||||||
*/
|
|
||||||
func release(_ marker: Int) throws
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the index into the stream of the input symbol referred to by
|
|
||||||
* {@code LA(1)}.
|
|
||||||
*
|
|
||||||
* <p>The behavior of this method is unspecified if no call to an
|
|
||||||
* {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream was
|
|
||||||
* constructed.</p>
|
|
||||||
*/
|
|
||||||
func index() -> Int
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the input cursor to the position indicated by {@code index}. If the
|
|
||||||
* specified index lies past the end of the stream, the operation behaves as
|
|
||||||
* though {@code index} was the index of the EOF symbol. After this method
|
|
||||||
* returns without throwing an exception, then at least one of the following
|
|
||||||
* will be true.
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link #index index()} will return the index of the first symbol
|
|
||||||
* appearing at or after the specified {@code index}. Specifically,
|
|
||||||
* implementations which filter their sources should automatically
|
|
||||||
* adjust {@code index} forward the minimum amount required for the
|
|
||||||
* operation to target a non-ignored symbol.</li>
|
|
||||||
* <li>{@code LA(1)} returns {@link #EOF}</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* This operation is guaranteed to not throw an exception if {@code index}
|
|
||||||
* lies within a marked region. For more information on marked regions, see
|
|
||||||
* {@link #mark}. The behavior of this method is unspecified if no call to
|
|
||||||
* an {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream
|
|
||||||
* was constructed.
|
|
||||||
*
|
|
||||||
* @param index The absolute index to seek to.
|
|
||||||
*
|
|
||||||
* @throws IllegalArgumentException if {@code index} is less than 0
|
|
||||||
* @throws UnsupportedOperationException if the stream does not support
|
|
||||||
* seeking to the specified index
|
|
||||||
*/
|
|
||||||
func seek(_ index: Int) throws
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the total number of symbols in the stream, including a single EOF
|
|
||||||
* symbol.
|
|
||||||
*
|
|
||||||
* @throws UnsupportedOperationException if the size of the stream is
|
|
||||||
* unknown.
|
|
||||||
*/
|
|
||||||
func size() -> Int
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the name of the underlying symbol source. This method returns a
|
|
||||||
* non-null, non-empty string. If such a name is not known, this method
|
|
||||||
* returns {@link #UNKNOWN_SOURCE_NAME}.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func getSourceName() -> String
|
|
||||||
}
|
|
|
@ -1,63 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class extends {@link org.antlr.v4.runtime.ParserRuleContext} by allowing the value of
|
|
||||||
* {@link #getRuleIndex} to be explicitly set for the context.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* {@link org.antlr.v4.runtime.ParserRuleContext} does not include field storage for the rule index
|
|
||||||
* since the context classes created by the code generator override the
|
|
||||||
* {@link #getRuleIndex} method to return the correct value for that context.
|
|
||||||
* Since the parser interpreter does not use the context classes generated for a
|
|
||||||
* parser, this class (with slightly more memory overhead per node) is used to
|
|
||||||
* provide equivalent functionality.</p>
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class InterpreterRuleContext: ParserRuleContext {
|
|
||||||
/** This is the backing field for {@link #getRuleIndex}. */
|
|
||||||
private var ruleIndex: Int = -1
|
|
||||||
|
|
||||||
public override init() {
|
|
||||||
super.init()
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new {@link org.antlr.v4.runtime.InterpreterRuleContext} with the specified
|
|
||||||
* parent, invoking state, and rule index.
|
|
||||||
*
|
|
||||||
* @param parent The parent context.
|
|
||||||
* @param invokingStateNumber The invoking state number.
|
|
||||||
* @param ruleIndex The rule index for the current context.
|
|
||||||
*/
|
|
||||||
public init(_ parent: ParserRuleContext?,
|
|
||||||
_ invokingStateNumber: Int,
|
|
||||||
_ ruleIndex: Int) {
|
|
||||||
self.ruleIndex = ruleIndex
|
|
||||||
super.init(parent, invokingStateNumber)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
override
|
|
||||||
public func getRuleIndex() -> Int {
|
|
||||||
return ruleIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Copy a {@link org.antlr.v4.runtime.ParserRuleContext} or {@link org.antlr.v4.runtime.InterpreterRuleContext}
|
|
||||||
* stack to a {@link org.antlr.v4.runtime.InterpreterRuleContext} tree.
|
|
||||||
* Return {@link null} if {@code ctx} is null.
|
|
||||||
*/
|
|
||||||
public static func fromParserRuleContext(_ ctx: ParserRuleContext?) -> InterpreterRuleContext? {
|
|
||||||
guard let ctx = ctx else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
let dup: InterpreterRuleContext = InterpreterRuleContext()
|
|
||||||
dup.copyFrom(ctx)
|
|
||||||
dup.ruleIndex = ctx.getRuleIndex()
|
|
||||||
dup.parent = fromParserRuleContext(ctx.getParent() as? ParserRuleContext)
|
|
||||||
return dup
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,26 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Represents the type of recognizer an ATN applies to.
|
|
||||||
*
|
|
||||||
* @author Sam Harwell
|
|
||||||
*/
|
|
||||||
|
|
||||||
public enum ATNType: Int {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A lexer grammar.
|
|
||||||
*/
|
|
||||||
case lexer = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A parser grammar.
|
|
||||||
*/
|
|
||||||
case parser
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @author Sam Harwell
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class AbstractPredicateTransition: Transition {
|
|
||||||
|
|
||||||
/*public override init(_ target : ATNState) {
|
|
||||||
super.init(target);
|
|
||||||
}*/
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,67 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class represents profiling event information for an ambiguity.
|
|
||||||
* Ambiguities are decisions where a particular input resulted in an SLL
|
|
||||||
* conflict, followed by LL prediction also reaching a conflict state
|
|
||||||
* (indicating a true ambiguity in the grammar).
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* This event may be reported during SLL prediction in cases where the
|
|
||||||
* conflicting SLL configuration set provides sufficient information to
|
|
||||||
* determine that the SLL conflict is truly an ambiguity. For example, if none
|
|
||||||
* of the ATN configurations in the conflicting SLL configuration set have
|
|
||||||
* traversed a global follow transition (i.e.
|
|
||||||
* {@link org.antlr.v4.runtime.atn.ATNConfig#reachesIntoOuterContext} is 0 for all configurations), then
|
|
||||||
* the result of SLL prediction for that input is known to be equivalent to the
|
|
||||||
* result of LL prediction for that input.</p>
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* In some cases, the minimum represented alternative in the conflicting LL
|
|
||||||
* configuration set is not equal to the minimum represented alternative in the
|
|
||||||
* conflicting SLL configuration set. Grammars and inputs which result in this
|
|
||||||
* scenario are unable to use {@link org.antlr.v4.runtime.atn.PredictionMode#SLL}, which in turn means
|
|
||||||
* they cannot use the two-stage parsing strategy to improve parsing performance
|
|
||||||
* for that input.</p>
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.ParserATNSimulator#reportAmbiguity
|
|
||||||
* @see org.antlr.v4.runtime.ANTLRErrorListener#reportAmbiguity
|
|
||||||
*
|
|
||||||
* @since 4.3
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class AmbiguityInfo: DecisionEventInfo {
|
|
||||||
/** The set of alternative numbers for this decision event that lead to a valid parse. */
|
|
||||||
public var ambigAlts: BitSet
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new instance of the {@link org.antlr.v4.runtime.atn.AmbiguityInfo} class with the
|
|
||||||
* specified detailed ambiguity information.
|
|
||||||
*
|
|
||||||
* @param decision The decision number
|
|
||||||
* @param configs The final configuration set identifying the ambiguous
|
|
||||||
* alternatives for the current input
|
|
||||||
* @param ambigAlts The set of alternatives in the decision that lead to a valid parse.
|
|
||||||
* @param input The input token stream
|
|
||||||
* @param startIndex The start index for the current prediction
|
|
||||||
* @param stopIndex The index at which the ambiguity was identified during
|
|
||||||
* prediction
|
|
||||||
* @param fullCtx {@code true} if the ambiguity was identified during LL
|
|
||||||
* prediction; otherwise, {@code false} if the ambiguity was identified
|
|
||||||
* during SLL prediction
|
|
||||||
*/
|
|
||||||
public init(_ decision: Int,
|
|
||||||
_ configs: ATNConfigSet,
|
|
||||||
_ ambigAlts: BitSet,
|
|
||||||
_ input: TokenStream, _ startIndex: Int, _ stopIndex: Int,
|
|
||||||
_ fullCtx: Bool) {
|
|
||||||
self.ambigAlts = ambigAlts
|
|
||||||
super.init(decision, configs, input, startIndex, stopIndex, fullCtx)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @author Sam Harwell
|
|
||||||
*/
|
|
||||||
|
|
||||||
public final class BasicBlockStartState: BlockStartState {
|
|
||||||
override
|
|
||||||
public func getStateType() -> Int {
|
|
||||||
return BlockStartState.BLOCK_START
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @author Sam Harwell
|
|
||||||
*/
|
|
||||||
|
|
||||||
public final class BasicState: ATNState {
|
|
||||||
|
|
||||||
override
|
|
||||||
public func getStateType() -> Int {
|
|
||||||
return ATNState.BASIC
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/** Terminal node of a simple {@code (a|b|c)} block. */
|
|
||||||
|
|
||||||
public final class BlockEndState: ATNState {
|
|
||||||
public var startState: BlockStartState?
|
|
||||||
|
|
||||||
override
|
|
||||||
public func getStateType() -> Int {
|
|
||||||
return ATNState.BLOCK_END
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,11 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/** The start of a regular {@code (...)} block. */
|
|
||||||
|
|
||||||
public class BlockStartState: DecisionState {
|
|
||||||
public var endState: BlockEndState?
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class represents profiling event information for a context sensitivity.
|
|
||||||
* Context sensitivities are decisions where a particular input resulted in an
|
|
||||||
* SLL conflict, but LL prediction produced a single unique alternative.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* In some cases, the unique alternative identified by LL prediction is not
|
|
||||||
* equal to the minimum represented alternative in the conflicting SLL
|
|
||||||
* configuration set. Grammars and inputs which result in this scenario are
|
|
||||||
* unable to use {@link org.antlr.v4.runtime.atn.PredictionMode#SLL}, which in turn means they cannot use
|
|
||||||
* the two-stage parsing strategy to improve parsing performance for that
|
|
||||||
* input.</p>
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.ParserATNSimulator#reportContextSensitivity
|
|
||||||
* @see org.antlr.v4.runtime.ANTLRErrorListener#reportContextSensitivity
|
|
||||||
*
|
|
||||||
* @since 4.3
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class ContextSensitivityInfo: DecisionEventInfo {
|
|
||||||
/**
|
|
||||||
* Constructs a new instance of the {@link org.antlr.v4.runtime.atn.ContextSensitivityInfo} class
|
|
||||||
* with the specified detailed context sensitivity information.
|
|
||||||
*
|
|
||||||
* @param decision The decision number
|
|
||||||
* @param configs The final configuration set containing the unique
|
|
||||||
* alternative identified by full-context prediction
|
|
||||||
* @param input The input token stream
|
|
||||||
* @param startIndex The start index for the current prediction
|
|
||||||
* @param stopIndex The index at which the context sensitivity was
|
|
||||||
* identified during full-context prediction
|
|
||||||
*/
|
|
||||||
public init(_ decision: Int,
|
|
||||||
_ configs: ATNConfigSet,
|
|
||||||
_ input: TokenStream, _ startIndex: Int, _ stopIndex: Int) {
|
|
||||||
super.init(decision, configs, input, startIndex, stopIndex, true)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,74 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is the base class for gathering detailed information about prediction
|
|
||||||
* events which occur during parsing.
|
|
||||||
*
|
|
||||||
* Note that we could record the parser call stack at the time this event
|
|
||||||
* occurred but in the presence of left recursive rules, the stack is kind of
|
|
||||||
* meaningless. It's better to look at the individual configurations for their
|
|
||||||
* individual stacks. Of course that is a {@link org.antlr.v4.runtime.atn.PredictionContext} object
|
|
||||||
* not a parse tree node and so it does not have information about the extent
|
|
||||||
* (start...stop) of the various subtrees. Examining the stack tops of all
|
|
||||||
* configurations provide the return states for the rule invocations.
|
|
||||||
* From there you can get the enclosing rule.
|
|
||||||
*
|
|
||||||
* @since 4.3
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class DecisionEventInfo {
|
|
||||||
/**
|
|
||||||
* The invoked decision number which this event is related to.
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.ATN#decisionToState
|
|
||||||
*/
|
|
||||||
public let decision: Int
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The configuration set containing additional information relevant to the
|
|
||||||
* prediction state when the current event occurred, or {@code null} if no
|
|
||||||
* additional information is relevant or available.
|
|
||||||
*/
|
|
||||||
public let configs: ATNConfigSet?
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The input token stream which is being parsed.
|
|
||||||
*/
|
|
||||||
public let input: TokenStream
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The token index in the input stream at which the current prediction was
|
|
||||||
* originally invoked.
|
|
||||||
*/
|
|
||||||
public let startIndex: Int
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The token index in the input stream at which the current event occurred.
|
|
||||||
*/
|
|
||||||
public let stopIndex: Int
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@code true} if the current event occurred during LL prediction;
|
|
||||||
* otherwise, {@code false} if the input occurred during SLL prediction.
|
|
||||||
*/
|
|
||||||
public let fullCtx: Bool
|
|
||||||
|
|
||||||
public init(_ decision: Int,
|
|
||||||
_ configs: ATNConfigSet?,
|
|
||||||
_ input: TokenStream,
|
|
||||||
_ startIndex: Int,
|
|
||||||
_ stopIndex: Int,
|
|
||||||
_ fullCtx: Bool) {
|
|
||||||
self.decision = decision
|
|
||||||
self.fullCtx = fullCtx
|
|
||||||
self.stopIndex = stopIndex
|
|
||||||
self.input = input
|
|
||||||
self.startIndex = startIndex
|
|
||||||
self.configs = configs
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,244 +0,0 @@
|
||||||
/* Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
|
|
||||||
* Use of this file is governed by the BSD 3-clause license that
|
|
||||||
* can be found in the LICENSE.txt file in the project root.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class contains profiling gathered for a particular decision.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* Parsing performance in ANTLR 4 is heavily influenced by both static factors
|
|
||||||
* (e.g. the form of the rules in the grammar) and dynamic factors (e.g. the
|
|
||||||
* choice of input and the state of the DFA cache at the time profiling
|
|
||||||
* operations are started). For best results, gather and use aggregate
|
|
||||||
* statistics from a large sample of inputs representing the inputs expected in
|
|
||||||
* production before using the results to make changes in the grammar.</p>
|
|
||||||
*
|
|
||||||
* @since 4.3
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class DecisionInfo: CustomStringConvertible {
|
|
||||||
/**
|
|
||||||
* The decision number, which is an index into {@link org.antlr.v4.runtime.atn.ATN#decisionToState}.
|
|
||||||
*/
|
|
||||||
public final var decision: Int
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The total number of times {@link org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict} was
|
|
||||||
* invoked for this decision.
|
|
||||||
*/
|
|
||||||
public var invocations: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The total time spent in {@link org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict} for
|
|
||||||
* this decision, in nanoseconds.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* The value of this field contains the sum of differential results obtained
|
|
||||||
* by {@link System#nanoTime()}, and is not adjusted to compensate for JIT
|
|
||||||
* and/or garbage collection overhead. For best accuracy, use a modern JVM
|
|
||||||
* implementation that provides precise results from
|
|
||||||
* {@link System#nanoTime()}, and perform profiling in a separate process
|
|
||||||
* which is warmed up by parsing the input prior to profiling. If desired,
|
|
||||||
* call {@link org.antlr.v4.runtime.atn.ATNSimulator#clearDFA} to reset the DFA cache to its initial
|
|
||||||
* state before starting the profiling measurement pass.</p>
|
|
||||||
*/
|
|
||||||
public var timeInPrediction: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The sum of the lookahead required for SLL prediction for this decision.
|
|
||||||
* Note that SLL prediction is used before LL prediction for performance
|
|
||||||
* reasons even when {@link org.antlr.v4.runtime.atn.PredictionMode#LL} or
|
|
||||||
* {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
|
|
||||||
*/
|
|
||||||
public var SLL_TotalLook: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the minimum lookahead required for any single SLL prediction to
|
|
||||||
* complete for this decision, by reaching a unique prediction, reaching an
|
|
||||||
* SLL conflict state, or encountering a syntax error.
|
|
||||||
*/
|
|
||||||
public var SLL_MinLook: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the maximum lookahead required for any single SLL prediction to
|
|
||||||
* complete for this decision, by reaching a unique prediction, reaching an
|
|
||||||
* SLL conflict state, or encountering a syntax error.
|
|
||||||
*/
|
|
||||||
public var SLL_MaxLook: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the {@link org.antlr.v4.runtime.atn.LookaheadEventInfo} associated with the event where the
|
|
||||||
* {@link #SLL_MaxLook} value was set.
|
|
||||||
*/
|
|
||||||
public var SLL_MaxLookEvent: LookaheadEventInfo!
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The sum of the lookahead required for LL prediction for this decision.
|
|
||||||
* Note that LL prediction is only used when SLL prediction reaches a
|
|
||||||
* conflict state.
|
|
||||||
*/
|
|
||||||
public var LL_TotalLook: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the minimum lookahead required for any single LL prediction to
|
|
||||||
* complete for this decision. An LL prediction completes when the algorithm
|
|
||||||
* reaches a unique prediction, a conflict state (for
|
|
||||||
* {@link org.antlr.v4.runtime.atn.PredictionMode#LL}, an ambiguity state (for
|
|
||||||
* {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error.
|
|
||||||
*/
|
|
||||||
public var LL_MinLook: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the maximum lookahead required for any single LL prediction to
|
|
||||||
* complete for this decision. An LL prediction completes when the algorithm
|
|
||||||
* reaches a unique prediction, a conflict state (for
|
|
||||||
* {@link org.antlr.v4.runtime.atn.PredictionMode#LL}, an ambiguity state (for
|
|
||||||
* {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error.
|
|
||||||
*/
|
|
||||||
public var LL_MaxLook: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the {@link org.antlr.v4.runtime.atn.LookaheadEventInfo} associated with the event where the
|
|
||||||
* {@link #LL_MaxLook} value was set.
|
|
||||||
*/
|
|
||||||
public var LL_MaxLookEvent: LookaheadEventInfo!
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A collection of {@link org.antlr.v4.runtime.atn.ContextSensitivityInfo} instances describing the
|
|
||||||
* context sensitivities encountered during LL prediction for this decision.
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.ContextSensitivityInfo
|
|
||||||
*/
|
|
||||||
public final var contextSensitivities: Array<ContextSensitivityInfo> = Array<ContextSensitivityInfo>()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A collection of {@link org.antlr.v4.runtime.atn.ErrorInfo} instances describing the parse errors
|
|
||||||
* identified during calls to {@link org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict} for
|
|
||||||
* this decision.
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.ErrorInfo
|
|
||||||
*/
|
|
||||||
public final var errors: Array<ErrorInfo> = Array<ErrorInfo>()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A collection of {@link org.antlr.v4.runtime.atn.AmbiguityInfo} instances describing the
|
|
||||||
* ambiguities encountered during LL prediction for this decision.
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.AmbiguityInfo
|
|
||||||
*/
|
|
||||||
public final var ambiguities: Array<AmbiguityInfo> = Array<AmbiguityInfo>()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A collection of {@link org.antlr.v4.runtime.atn.PredicateEvalInfo} instances describing the
|
|
||||||
* results of evaluating individual predicates during prediction for this
|
|
||||||
* decision.
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.PredicateEvalInfo
|
|
||||||
*/
|
|
||||||
public final var predicateEvals: Array<PredicateEvalInfo> = Array<PredicateEvalInfo>()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The total number of ATN transitions required during SLL prediction for
|
|
||||||
* this decision. An ATN transition is determined by the number of times the
|
|
||||||
* DFA does not contain an edge that is required for prediction, resulting
|
|
||||||
* in on-the-fly computation of that edge.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* If DFA caching of SLL transitions is employed by the implementation, ATN
|
|
||||||
* computation may cache the computed edge for efficient lookup during
|
|
||||||
* future parsing of this decision. Otherwise, the SLL parsing algorithm
|
|
||||||
* will use ATN transitions exclusively.</p>
|
|
||||||
*
|
|
||||||
* @see #SLL_ATNTransitions
|
|
||||||
* @see org.antlr.v4.runtime.atn.ParserATNSimulator#computeTargetState
|
|
||||||
* @see org.antlr.v4.runtime.atn.LexerATNSimulator#computeTargetState
|
|
||||||
*/
|
|
||||||
public var SLL_ATNTransitions: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The total number of DFA transitions required during SLL prediction for
|
|
||||||
* this decision.
|
|
||||||
*
|
|
||||||
* <p>If the ATN simulator implementation does not use DFA caching for SLL
|
|
||||||
* transitions, this value will be 0.</p>
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.ParserATNSimulator#getExistingTargetState
|
|
||||||
* @see org.antlr.v4.runtime.atn.LexerATNSimulator#getExistingTargetState
|
|
||||||
*/
|
|
||||||
public var SLL_DFATransitions: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the total number of times SLL prediction completed in a conflict
|
|
||||||
* state, resulting in fallback to LL prediction.
|
|
||||||
*
|
|
||||||
* <p>Note that this value is not related to whether or not
|
|
||||||
* {@link org.antlr.v4.runtime.atn.PredictionMode#SLL} may be used successfully with a particular
|
|
||||||
* grammar. If the ambiguity resolution algorithm applied to the SLL
|
|
||||||
* conflicts for this decision produce the same result as LL prediction for
|
|
||||||
* this decision, {@link org.antlr.v4.runtime.atn.PredictionMode#SLL} would produce the same overall
|
|
||||||
* parsing result as {@link org.antlr.v4.runtime.atn.PredictionMode#LL}.</p>
|
|
||||||
*/
|
|
||||||
public var LL_Fallback: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The total number of ATN transitions required during LL prediction for
|
|
||||||
* this decision. An ATN transition is determined by the number of times the
|
|
||||||
* DFA does not contain an edge that is required for prediction, resulting
|
|
||||||
* in on-the-fly computation of that edge.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* If DFA caching of LL transitions is employed by the implementation, ATN
|
|
||||||
* computation may cache the computed edge for efficient lookup during
|
|
||||||
* future parsing of this decision. Otherwise, the LL parsing algorithm will
|
|
||||||
* use ATN transitions exclusively.</p>
|
|
||||||
*
|
|
||||||
* @see #LL_DFATransitions
|
|
||||||
* @see org.antlr.v4.runtime.atn.ParserATNSimulator#computeTargetState
|
|
||||||
* @see org.antlr.v4.runtime.atn.LexerATNSimulator#computeTargetState
|
|
||||||
*/
|
|
||||||
public var LL_ATNTransitions: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The total number of DFA transitions required during LL prediction for
|
|
||||||
* this decision.
|
|
||||||
*
|
|
||||||
* <p>If the ATN simulator implementation does not use DFA caching for LL
|
|
||||||
* transitions, this value will be 0.</p>
|
|
||||||
*
|
|
||||||
* @see org.antlr.v4.runtime.atn.ParserATNSimulator#getExistingTargetState
|
|
||||||
* @see org.antlr.v4.runtime.atn.LexerATNSimulator#getExistingTargetState
|
|
||||||
*/
|
|
||||||
public var LL_DFATransitions: Int64 = 0
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new instance of the {@link org.antlr.v4.runtime.atn.DecisionInfo} class to contain
|
|
||||||
* statistics for a particular decision.
|
|
||||||
*
|
|
||||||
* @param decision The decision number
|
|
||||||
*/
|
|
||||||
public init(_ decision: Int) {
|
|
||||||
self.decision = decision
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public var description: String {
|
|
||||||
let desc: StringBuilder = StringBuilder()
|
|
||||||
desc.append("{")
|
|
||||||
desc.append("decision=\(decision)")
|
|
||||||
desc.append(", contextSensitivities=\(contextSensitivities.count)")
|
|
||||||
desc.append(", errors=\(errors.count)")
|
|
||||||
desc.append(", ambiguities=\(ambiguities.count)")
|
|
||||||
desc.append(", SLL_lookahead=\(SLL_TotalLook)")
|
|
||||||
desc.append(", SLL_ATNTransitions=\(SLL_ATNTransitions)")
|
|
||||||
desc.append(", SLL_DFATransitions=\(SLL_DFATransitions)")
|
|
||||||
desc.append(", LL_Fallback=\(LL_Fallback)")
|
|
||||||
desc.append(", LL_lookahead=\(LL_TotalLook)")
|
|
||||||
desc.append(", LL_ATNTransitions=\(LL_ATNTransitions)")
|
|
||||||
desc.append("}")
|
|
||||||
|
|
||||||
return desc.toString()
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue