Merge pull request #57 from parrt/master
Lots of new stuff,Pulled from my main development repo
This commit is contained in:
commit
2454182a38
|
@ -87,6 +87,7 @@ build.sysclasspath=ignore
|
||||||
</classpath>
|
</classpath>
|
||||||
</java>
|
</java>
|
||||||
|
|
||||||
|
<!--
|
||||||
<echo>gunit grammars</echo>
|
<echo>gunit grammars</echo>
|
||||||
<java classname="org.antlr.Tool" fork="true" failonerror="false" maxmemory="300m"
|
<java classname="org.antlr.Tool" fork="true" failonerror="false" maxmemory="300m"
|
||||||
dir="${basedir}/gunit/src/org/antlr/v4/gunit">
|
dir="${basedir}/gunit/src/org/antlr/v4/gunit">
|
||||||
|
@ -100,6 +101,7 @@ build.sysclasspath=ignore
|
||||||
<pathelement path="${java.class.path}"/>
|
<pathelement path="${java.class.path}"/>
|
||||||
</classpath>
|
</classpath>
|
||||||
</java>
|
</java>
|
||||||
|
-->
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<target name="compile" depends="antlr" description="Compile for generic OS">
|
<target name="compile" depends="antlr" description="Compile for generic OS">
|
||||||
|
@ -108,7 +110,7 @@ build.sysclasspath=ignore
|
||||||
<copy todir="${build.dir}/src" >
|
<copy todir="${build.dir}/src" >
|
||||||
<fileset dir="${basedir}/tool/src/"/>
|
<fileset dir="${basedir}/tool/src/"/>
|
||||||
<fileset dir="${basedir}/runtime/Java/src/"/>
|
<fileset dir="${basedir}/runtime/Java/src/"/>
|
||||||
<fileset dir="${basedir}/gunit/src/"/>
|
<!-- <fileset dir="${basedir}/gunit/src/"/> -->
|
||||||
</copy>
|
</copy>
|
||||||
<replace dir="${build.dir}/src" token="@version@" value="${version}"/>
|
<replace dir="${build.dir}/src" token="@version@" value="${version}"/>
|
||||||
<javac
|
<javac
|
||||||
|
@ -161,12 +163,14 @@ build.sysclasspath=ignore
|
||||||
<include name="**/*.st"/>
|
<include name="**/*.st"/>
|
||||||
<include name="**/*.stg"/>
|
<include name="**/*.stg"/>
|
||||||
</fileset>
|
</fileset>
|
||||||
|
<!--
|
||||||
<fileset dir="${basedir}/gunit/src/">
|
<fileset dir="${basedir}/gunit/src/">
|
||||||
<include name="**/*.java"/>
|
<include name="**/*.java"/>
|
||||||
<include name="**/*.g"/>
|
<include name="**/*.g"/>
|
||||||
<include name="**/*.st"/>
|
<include name="**/*.st"/>
|
||||||
<include name="**/*.stg"/>
|
<include name="**/*.stg"/>
|
||||||
</fileset>
|
</fileset>
|
||||||
|
-->
|
||||||
</copy>
|
</copy>
|
||||||
|
|
||||||
<copy todir="${install.root.dir}">
|
<copy todir="${install.root.dir}">
|
||||||
|
|
|
@ -23,17 +23,17 @@
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.abego</groupId>
|
<groupId>org.abego.treelayout</groupId>
|
||||||
<artifactId>treelayout.core</artifactId>
|
<artifactId>org.abego.treelayout.core</artifactId>
|
||||||
<version>1.0</version>
|
<version>1.0.1</version>
|
||||||
<scope>system</scope>
|
<scope>compile</scope>
|
||||||
<systemPath>${project.basedir}/lib/org.abego.treelayout.core.jar</systemPath>
|
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
|
|
||||||
<sourceDirectory>src</sourceDirectory>
|
<sourceDirectory>src</sourceDirectory>
|
||||||
|
<resources/>
|
||||||
|
|
||||||
<plugins>
|
<plugins>
|
||||||
<plugin>
|
<plugin>
|
||||||
|
|
|
@ -28,6 +28,8 @@
|
||||||
*/
|
*/
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
|
@ -210,7 +212,9 @@ public class ANTLRInputStream implements CharStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String substring(int start, int stop) {
|
public String getText(Interval interval) {
|
||||||
|
int start = interval.a;
|
||||||
|
int stop = interval.b;
|
||||||
if ( stop >= n ) stop = n-1;
|
if ( stop >= n ) stop = n-1;
|
||||||
int count = stop - start + 1;
|
int count = stop - start + 1;
|
||||||
if ( start >= n ) return "";
|
if ( start >= n ) return "";
|
||||||
|
|
|
@ -29,7 +29,13 @@
|
||||||
|
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
import java.util.*;
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.NoSuchElementException;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
/** Buffer all input tokens but do on-demand fetching of new tokens from
|
/** Buffer all input tokens but do on-demand fetching of new tokens from
|
||||||
* lexer. Useful when the parser or lexer has to set context/mode info before
|
* lexer. Useful when the parser or lexer has to set context/mode info before
|
||||||
|
@ -235,17 +241,23 @@ public class BufferedTokenStream<T extends Token> implements TokenStream {
|
||||||
|
|
||||||
/** Grab *all* tokens from stream and return string */
|
/** Grab *all* tokens from stream and return string */
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() { return getText(); }
|
||||||
|
|
||||||
|
/** Get the text of all tokens in this buffer. */
|
||||||
|
public String getText() {
|
||||||
if ( p == -1 ) setup();
|
if ( p == -1 ) setup();
|
||||||
fill();
|
fill();
|
||||||
return toString(0, tokens.size()-1);
|
return getText(Interval.of(0,size()-1));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString(int start, int stop) {
|
public String getText(Interval interval) {
|
||||||
|
int start = interval.a;
|
||||||
|
int stop = interval.b;
|
||||||
if ( start<0 || stop<0 ) return "";
|
if ( start<0 || stop<0 ) return "";
|
||||||
if ( p == -1 ) setup();
|
if ( p == -1 ) setup();
|
||||||
if ( stop>=tokens.size() ) stop = tokens.size()-1;
|
if ( stop>=tokens.size() ) stop = tokens.size()-1;
|
||||||
|
|
||||||
StringBuilder buf = new StringBuilder();
|
StringBuilder buf = new StringBuilder();
|
||||||
for (int i = start; i <= stop; i++) {
|
for (int i = start; i <= stop; i++) {
|
||||||
T t = tokens.get(i);
|
T t = tokens.get(i);
|
||||||
|
@ -256,9 +268,12 @@ public class BufferedTokenStream<T extends Token> implements TokenStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString(Token start, Token stop) {
|
public String getText(RuleContext ctx) { return getText(ctx.getSourceInterval()); }
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getText(Token start, Token stop) {
|
||||||
if ( start!=null && stop!=null ) {
|
if ( start!=null && stop!=null ) {
|
||||||
return toString(start.getTokenIndex(), stop.getTokenIndex());
|
return getText(Interval.of(start.getTokenIndex(), stop.getTokenIndex()));
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,8 @@
|
||||||
*/
|
*/
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
/** A source of characters for an ANTLR lexer */
|
/** A source of characters for an ANTLR lexer */
|
||||||
public interface CharStream extends IntStream {
|
public interface CharStream extends IntStream {
|
||||||
public static final int EOF = -1;
|
public static final int EOF = -1;
|
||||||
|
@ -37,6 +39,7 @@ public interface CharStream extends IntStream {
|
||||||
/** For unbuffered streams, you can't use this; primarily I'm providing
|
/** For unbuffered streams, you can't use this; primarily I'm providing
|
||||||
* a useful interface for action code. Just make sure actions don't
|
* a useful interface for action code. Just make sure actions don't
|
||||||
* use this on streams that don't support it.
|
* use this on streams that don't support it.
|
||||||
|
* @param interval
|
||||||
*/
|
*/
|
||||||
public String substring(int start, int stop);
|
public String getText(Interval interval);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,8 @@
|
||||||
*/
|
*/
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
|
|
||||||
public class CommonToken implements WritableToken, Serializable {
|
public class CommonToken implements WritableToken, Serializable {
|
||||||
|
@ -109,7 +111,7 @@ public class CommonToken implements WritableToken, Serializable {
|
||||||
if ( input==null ) return null;
|
if ( input==null ) return null;
|
||||||
int n = input.size();
|
int n = input.size();
|
||||||
if ( start<n && stop<n) {
|
if ( start<n && stop<n) {
|
||||||
return input.substring(start,stop);
|
return input.getText(Interval.of(start,stop));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
return "<EOF>";
|
return "<EOF>";
|
||||||
|
|
|
@ -165,7 +165,7 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
||||||
// If already recovering, don't try to sync
|
// If already recovering, don't try to sync
|
||||||
if ( errorRecoveryMode ) return;
|
if ( errorRecoveryMode ) return;
|
||||||
|
|
||||||
SymbolStream<Token> tokens = recognizer.getInputStream();
|
TokenStream tokens = recognizer.getInputStream();
|
||||||
int la = tokens.LA(1);
|
int la = tokens.LA(1);
|
||||||
|
|
||||||
// try cheaper subset first; might get lucky. seems to shave a wee bit off
|
// try cheaper subset first; might get lucky. seems to shave a wee bit off
|
||||||
|
@ -201,11 +201,11 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
||||||
NoViableAltException e)
|
NoViableAltException e)
|
||||||
throws RecognitionException
|
throws RecognitionException
|
||||||
{
|
{
|
||||||
SymbolStream<Token> tokens = recognizer.getInputStream();
|
TokenStream tokens = recognizer.getInputStream();
|
||||||
String input;
|
String input;
|
||||||
if (tokens instanceof TokenStream) {
|
if (tokens instanceof TokenStream) {
|
||||||
if ( e.startToken.getType()==Token.EOF ) input = "<EOF>";
|
if ( e.startToken.getType()==Token.EOF ) input = "<EOF>";
|
||||||
else input = ((TokenStream)tokens).toString(e.startToken, e.offendingToken);
|
else input = ((TokenStream)tokens).getText(e.startToken, e.offendingToken);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
input = "<unknown input>";
|
input = "<unknown input>";
|
||||||
|
|
|
@ -31,6 +31,7 @@ package org.antlr.v4.runtime;
|
||||||
|
|
||||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||||
import org.antlr.v4.runtime.dfa.DFA;
|
import org.antlr.v4.runtime.dfa.DFA;
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||||
import org.antlr.v4.runtime.misc.NotNull;
|
import org.antlr.v4.runtime.misc.NotNull;
|
||||||
|
|
||||||
|
@ -42,7 +43,7 @@ public class DiagnosticErrorListener extends BaseErrorListener<Token> {
|
||||||
{
|
{
|
||||||
recognizer.notifyErrorListeners("reportAmbiguity d=" + dfa.decision +
|
recognizer.notifyErrorListeners("reportAmbiguity d=" + dfa.decision +
|
||||||
": ambigAlts=" + ambigAlts + ", input='" +
|
": ambigAlts=" + ambigAlts + ", input='" +
|
||||||
recognizer.getInputString(startIndex, stopIndex) + "'");
|
recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)) + "'");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -53,7 +54,7 @@ public class DiagnosticErrorListener extends BaseErrorListener<Token> {
|
||||||
{
|
{
|
||||||
recognizer.notifyErrorListeners("reportAttemptingFullContext d=" +
|
recognizer.notifyErrorListeners("reportAttemptingFullContext d=" +
|
||||||
dfa.decision + ", input='" +
|
dfa.decision + ", input='" +
|
||||||
recognizer.getInputString(startIndex, stopIndex) + "'");
|
recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)) + "'");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -62,6 +63,6 @@ public class DiagnosticErrorListener extends BaseErrorListener<Token> {
|
||||||
{
|
{
|
||||||
recognizer.notifyErrorListeners("reportContextSensitivity d=" +
|
recognizer.notifyErrorListeners("reportContextSensitivity d=" +
|
||||||
dfa.decision + ", input='" +
|
dfa.decision + ", input='" +
|
||||||
recognizer.getInputString(startIndex, stopIndex) + "'");
|
recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)) + "'");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,10 +29,10 @@
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
import java.util.ArrayDeque;
|
import java.util.ArrayDeque;
|
||||||
import java.util.EmptyStackException;
|
import java.util.EmptyStackException;
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
/** A lexer is recognizer that draws input symbols from a character stream.
|
/** A lexer is recognizer that draws input symbols from a character stream.
|
||||||
* lexer grammars result in a subclass of this object. A Lexer object
|
* lexer grammars result in a subclass of this object. A Lexer object
|
||||||
|
@ -308,7 +308,7 @@ public abstract class Lexer extends Recognizer<Integer, LexerATNSimulator>
|
||||||
|
|
||||||
public void notifyListeners(LexerNoViableAltException e) {
|
public void notifyListeners(LexerNoViableAltException e) {
|
||||||
String msg = "token recognition error at: '"+
|
String msg = "token recognition error at: '"+
|
||||||
_input.substring(_tokenStartCharIndex, _input.index())+"'";
|
_input.getText(Interval.of(_tokenStartCharIndex, _input.index()))+"'";
|
||||||
|
|
||||||
ANTLRErrorListener<? super Integer> listener = getErrorListenerDispatch();
|
ANTLRErrorListener<? super Integer> listener = getErrorListenerDispatch();
|
||||||
listener.error(this, null, _tokenStartLine, _tokenStartCharPositionInLine, msg, e);
|
listener.error(this, null, _tokenStartLine, _tokenStartCharPositionInLine, msg, e);
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.runtime.misc.Utils;
|
import org.antlr.v4.runtime.misc.Utils;
|
||||||
|
|
||||||
public class LexerNoViableAltException extends RecognitionException {
|
public class LexerNoViableAltException extends RecognitionException {
|
||||||
|
@ -57,7 +58,7 @@ public class LexerNoViableAltException extends RecognitionException {
|
||||||
public String toString() {
|
public String toString() {
|
||||||
String symbol = "";
|
String symbol = "";
|
||||||
if (startIndex >= 0 && startIndex < input.size()) {
|
if (startIndex >= 0 && startIndex < input.size()) {
|
||||||
symbol = getInputStream().substring(startIndex, startIndex);
|
symbol = getInputStream().getText(Interval.of(startIndex,startIndex));
|
||||||
symbol = Utils.escapeWhitespace(symbol, false);
|
symbol = Utils.escapeWhitespace(symbol, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,8 +30,10 @@ package org.antlr.v4.runtime;
|
||||||
|
|
||||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||||
|
|
||||||
/** The parser could not decide which path in the decision to take based
|
/** Indicates that the parser could not decide which of two or more paths
|
||||||
* upon the remaining input.
|
* to take based upon the remaining input. It tracks the starting token
|
||||||
|
* of the offending input and also knows where the parser was
|
||||||
|
* in the various paths when the error. Reported by reportNoViableAlternative()
|
||||||
*/
|
*/
|
||||||
public class NoViableAltException extends RecognitionException {
|
public class NoViableAltException extends RecognitionException {
|
||||||
/** Which configurations did we try at input.index() that couldn't match input.LT(1)? */
|
/** Which configurations did we try at input.index() that couldn't match input.LT(1)? */
|
||||||
|
@ -44,7 +46,7 @@ public class NoViableAltException extends RecognitionException {
|
||||||
*/
|
*/
|
||||||
public Token startToken;
|
public Token startToken;
|
||||||
|
|
||||||
public <Symbol extends Token> NoViableAltException(Parser recognizer) { // LL(1) error
|
public NoViableAltException(Parser recognizer) { // LL(1) error
|
||||||
this(recognizer,recognizer.getInputStream(),
|
this(recognizer,recognizer.getInputStream(),
|
||||||
recognizer.getCurrentToken(),
|
recognizer.getCurrentToken(),
|
||||||
recognizer.getCurrentToken(),
|
recognizer.getCurrentToken(),
|
||||||
|
@ -52,8 +54,8 @@ public class NoViableAltException extends RecognitionException {
|
||||||
recognizer._ctx);
|
recognizer._ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
public <Symbol> NoViableAltException(Parser recognizer,
|
public NoViableAltException(Parser recognizer,
|
||||||
SymbolStream<Symbol> input,
|
TokenStream input,
|
||||||
Token startToken,
|
Token startToken,
|
||||||
Token offendingToken,
|
Token offendingToken,
|
||||||
ATNConfigSet deadEndConfigs,
|
ATNConfigSet deadEndConfigs,
|
||||||
|
|
|
@ -92,7 +92,7 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator<Token>
|
||||||
*/
|
*/
|
||||||
protected ParserRuleContext<Token> _ctx;
|
protected ParserRuleContext<Token> _ctx;
|
||||||
|
|
||||||
protected boolean _buildParseTrees;
|
protected boolean _buildParseTrees = true;
|
||||||
|
|
||||||
protected TraceListener _tracer;
|
protected TraceListener _tracer;
|
||||||
|
|
||||||
|
@ -295,18 +295,6 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator<Token>
|
||||||
this._input = input;
|
this._input = input;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getInputString(int start) {
|
|
||||||
return getInputString(start, getInputStream().index());
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getInputString(int start, int stop) {
|
|
||||||
SymbolStream<Token> input = getInputStream();
|
|
||||||
if ( input instanceof TokenStream ) {
|
|
||||||
return ((TokenStream)input).toString(start,stop);
|
|
||||||
}
|
|
||||||
return "n/a";
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Match needs to return the current input symbol, which gets put
|
/** Match needs to return the current input symbol, which gets put
|
||||||
* into the label for the associated token ref; e.g., x=ID.
|
* into the label for the associated token ref; e.g., x=ID.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -297,16 +297,6 @@ public class ParserRuleContext<Symbol extends Token> extends RuleContext {
|
||||||
return Interval.of(start.getTokenIndex(), stop.getTokenIndex());
|
return Interval.of(start.getTokenIndex(), stop.getTokenIndex());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Return the text matched by this context and below in the parse
|
|
||||||
* tree. It includes tokens from this.start .. this.stop inclusive.
|
|
||||||
* It includes hidden channel tokens between start, stop. The
|
|
||||||
* edge tokens are always on-channel tokens.
|
|
||||||
*/
|
|
||||||
public String getText(TokenStream tokens) {
|
|
||||||
Interval range = getSourceInterval();
|
|
||||||
return range==Interval.INVALID ? null : tokens.toString(range.a, range.b);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Symbol getStart() { return start; }
|
public Symbol getStart() { return start; }
|
||||||
public Symbol getStop() { return stop; }
|
public Symbol getStop() { return stop; }
|
||||||
|
|
||||||
|
|
|
@ -224,6 +224,27 @@ public class RuleContext implements ParseTree.RuleNode {
|
||||||
@Override
|
@Override
|
||||||
public RuleContext getPayload() { return this; }
|
public RuleContext getPayload() { return this; }
|
||||||
|
|
||||||
|
/** Return the combined text of all child nodes. This method only considers
|
||||||
|
* tokens which have been added to the parse tree.
|
||||||
|
* <p>
|
||||||
|
* Since tokens on hidden channels (e.g. whitespace or comments) are not
|
||||||
|
* added to the parse trees, they will not appear in the output of this
|
||||||
|
* method.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public String getText() {
|
||||||
|
if (getChildCount() == 0) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
for (int i = 0; i < getChildCount(); i++) {
|
||||||
|
builder.append(getChild(i).getText());
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
|
|
||||||
public int getRuleIndex() { return -1; }
|
public int getRuleIndex() { return -1; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -1,51 +0,0 @@
|
||||||
/*
|
|
||||||
[The "BSD license"]
|
|
||||||
Copyright (c) 2011 Terence Parr
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions
|
|
||||||
are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in the
|
|
||||||
documentation and/or other materials provided with the distribution.
|
|
||||||
3. The name of the author may not be used to endorse or promote products
|
|
||||||
derived from this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
||||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
||||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
||||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
||||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.antlr.v4.runtime;
|
|
||||||
|
|
||||||
/** A stream of either tokens or tree nodes */
|
|
||||||
public interface SymbolStream<T> extends IntStream {
|
|
||||||
/** Get the symbol at absolute index i; 0..n-1.
|
|
||||||
* This is only valid if the underlying stream implementation buffers
|
|
||||||
* all of the incoming objects.
|
|
||||||
*
|
|
||||||
* @throws UnsupportedOperationException if the index {@code i} is outside
|
|
||||||
* the marked region and the stream does not support accessing symbols by
|
|
||||||
* index outside of marked regions.
|
|
||||||
*/
|
|
||||||
public T get(int i);
|
|
||||||
|
|
||||||
/** Get symbol at current input pointer + {@code k} ahead where {@code k=1}
|
|
||||||
* is next symbol. k<0 indicates objects in the past. So -1 is previous
|
|
||||||
* Object and -2 is two Objects ago. {@code LT(0)} is undefined. For i>=n,
|
|
||||||
* return an object representing EOF. Return {@code null} for {@code LT(0)}
|
|
||||||
* and any index that results in an absolute index that is negative.
|
|
||||||
*/
|
|
||||||
T LT(int k);
|
|
||||||
}
|
|
|
@ -29,8 +29,10 @@
|
||||||
|
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
/** A stream of tokens accessing tokens from a TokenSource */
|
/** A stream of tokens accessing tokens from a TokenSource */
|
||||||
public interface TokenStream extends SymbolStream<Token> {
|
public interface TokenStream extends IntStream {
|
||||||
/** Get Token at current input pointer + i ahead where i=1 is next Token.
|
/** Get Token at current input pointer + i ahead where i=1 is next Token.
|
||||||
* i<0 indicates tokens in the past. So -1 is previous token and -2 is
|
* i<0 indicates tokens in the past. So -1 is previous token and -2 is
|
||||||
* two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken.
|
* two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken.
|
||||||
|
@ -38,14 +40,8 @@ public interface TokenStream extends SymbolStream<Token> {
|
||||||
* that is negative.
|
* that is negative.
|
||||||
* TODO (Sam): Throw exception for invalid k?
|
* TODO (Sam): Throw exception for invalid k?
|
||||||
*/
|
*/
|
||||||
@Override
|
|
||||||
public Token LT(int k);
|
public Token LT(int k);
|
||||||
|
|
||||||
/** How far ahead has the stream been asked to look? The return
|
|
||||||
* value is a valid index from 0..n-1.
|
|
||||||
*/
|
|
||||||
// int range();
|
|
||||||
|
|
||||||
/** Get a token at an absolute index i; 0..n-1. This is really only
|
/** Get a token at an absolute index i; 0..n-1. This is really only
|
||||||
* needed for profiling and debugging and token stream rewriting.
|
* needed for profiling and debugging and token stream rewriting.
|
||||||
* If you don't want to buffer up tokens, then this method makes no
|
* If you don't want to buffer up tokens, then this method makes no
|
||||||
|
@ -53,7 +49,6 @@ public interface TokenStream extends SymbolStream<Token> {
|
||||||
* I believe DebugTokenStream can easily be altered to not use
|
* I believe DebugTokenStream can easily be altered to not use
|
||||||
* this method, removing the dependency.
|
* this method, removing the dependency.
|
||||||
*/
|
*/
|
||||||
@Override
|
|
||||||
public Token get(int i);
|
public Token get(int i);
|
||||||
|
|
||||||
/** Where is this stream pulling tokens from? This is not the name, but
|
/** Where is this stream pulling tokens from? This is not the name, but
|
||||||
|
@ -61,18 +56,25 @@ public interface TokenStream extends SymbolStream<Token> {
|
||||||
*/
|
*/
|
||||||
public TokenSource getTokenSource();
|
public TokenSource getTokenSource();
|
||||||
|
|
||||||
/** Return the text of all tokens from start to stop, inclusive.
|
/** Return the text of all tokens from within the interval.
|
||||||
* If the stream does not buffer all the tokens then it can just
|
* If the stream does not buffer all the tokens then it must
|
||||||
* return "" or null; Users should not access $ruleLabel.text in
|
* throw UnsupportedOperationException;
|
||||||
* an action of course in that case.
|
* Users should not access $ruleLabel.text in an action of course in
|
||||||
|
* that case.
|
||||||
|
* @param interval
|
||||||
*/
|
*/
|
||||||
public String toString(int start, int stop);
|
public String getText(Interval interval);
|
||||||
|
|
||||||
|
public String getText();
|
||||||
|
|
||||||
|
public String getText(RuleContext ctx);
|
||||||
|
|
||||||
/** Because the user is not required to use a token with an index stored
|
/** Because the user is not required to use a token with an index stored
|
||||||
* in it, we must provide a means for two token objects themselves to
|
* in it, we must provide a means for two token objects themselves to
|
||||||
* indicate the start/end location. Most often this will just delegate
|
* indicate the start/end location. Most often this will just delegate
|
||||||
* to the other toString(int,int). This is also parallel with
|
* to the other getText(Interval).
|
||||||
* the TreeNodeStream.toString(Object,Object).
|
* If the stream does not buffer all the tokens then it must
|
||||||
|
* throw UnsupportedOperationException;
|
||||||
*/
|
*/
|
||||||
public String toString(Token start, Token stop);
|
public String getText(Token start, Token stop);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/*
|
/*
|
||||||
[The "BSD license"]
|
[The "BSD license"]
|
||||||
Copyright (c) 2011 Terence Parr
|
Copyright (c) 2012 Terence Parr
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
@ -28,23 +28,38 @@
|
||||||
*/
|
*/
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.runtime.misc.Nullable;
|
import org.antlr.v4.runtime.misc.Nullable;
|
||||||
|
|
||||||
import java.util.*;
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/** Useful for dumping out the input stream after doing some
|
/** Useful for rewriting out a buffered input token stream after doing some
|
||||||
* augmentation or other manipulations.
|
* augmentation or other manipulations on it.
|
||||||
*
|
*
|
||||||
* You can insert stuff, replace, and delete chunks. Note that the
|
* You can insert stuff, replace, and delete chunks. Note that the
|
||||||
* operations are done lazily--only if you convert the buffer to a
|
* operations are done lazily--only if you convert the buffer to a
|
||||||
* String. This is very efficient because you are not moving data around
|
* String with getText(). This is very efficient because you are not moving
|
||||||
* all the time. As the buffer of tokens is converted to strings, the
|
* data around all the time. As the buffer of tokens is converted to strings,
|
||||||
* toString() method(s) check to see if there is an operation at the
|
* the getText() method(s) scan the input token stream and check
|
||||||
* current index. If so, the operation is done and then normal String
|
* to see if there is an operation at the current index.
|
||||||
|
* If so, the operation is done and then normal String
|
||||||
* rendering continues on the buffer. This is like having multiple Turing
|
* rendering continues on the buffer. This is like having multiple Turing
|
||||||
* machine instruction streams (programs) operating on a single input tape. :)
|
* machine instruction streams (programs) operating on a single input tape. :)
|
||||||
*
|
*
|
||||||
* Since the operations are done lazily at toString-time, operations do not
|
* This rewriter makes no modifications to the token stream. It does not
|
||||||
|
* ask the stream to fill itself up nor does it advance the input cursor.
|
||||||
|
* The token stream index() will return the same value before and after
|
||||||
|
* any getText() call.
|
||||||
|
*
|
||||||
|
* The rewriter only works on tokens that you have in the buffer and
|
||||||
|
* ignores the current input cursor. If you are buffering tokens on-demand,
|
||||||
|
* calling getText() halfway through the input will only do rewrites
|
||||||
|
* for those tokens in the first half of the file.
|
||||||
|
*
|
||||||
|
* Since the operations are done lazily at getText-time, operations do not
|
||||||
* screw up the token index values. That is, an insert operation at token
|
* screw up the token index values. That is, an insert operation at token
|
||||||
* index i does not change the index values for tokens i+1..n-1.
|
* index i does not change the index values for tokens i+1..n-1.
|
||||||
*
|
*
|
||||||
|
@ -56,19 +71,18 @@ import java.util.*;
|
||||||
*
|
*
|
||||||
* CharStream input = new ANTLRFileStream("input");
|
* CharStream input = new ANTLRFileStream("input");
|
||||||
* TLexer lex = new TLexer(input);
|
* TLexer lex = new TLexer(input);
|
||||||
* TokenRewriteStream tokens = new TokenRewriteStream(lex);
|
* CommonTokenStream tokens = new CommonTokenStream(lex);
|
||||||
* T parser = new T(tokens);
|
* T parser = new T(tokens);
|
||||||
|
* TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
|
||||||
* parser.startRule();
|
* parser.startRule();
|
||||||
*
|
*
|
||||||
* Then in the rules, you can execute
|
* Then in the rules, you can execute (assuming rewriter is visible):
|
||||||
* Token t,u;
|
* Token t,u;
|
||||||
* ...
|
* ...
|
||||||
* input.insertAfter(t, "text to put after t");}
|
* rewriter.insertAfter(t, "text to put after t");}
|
||||||
* input.insertAfter(u, "text after u");}
|
* rewriter.insertAfter(u, "text after u");}
|
||||||
* System.out.println(tokens.toString());
|
* System.out.println(tokens.toString());
|
||||||
*
|
*
|
||||||
* Actually, you have to cast the 'input' to a TokenRewriteStream. :(
|
|
||||||
*
|
|
||||||
* You can also have multiple "instruction streams" and get multiple
|
* You can also have multiple "instruction streams" and get multiple
|
||||||
* rewrites from a single pass over the input. Just name the instruction
|
* rewrites from a single pass over the input. Just name the instruction
|
||||||
* streams and use that name again when printing the buffer. This could be
|
* streams and use that name again when printing the buffer. This could be
|
||||||
|
@ -83,7 +97,7 @@ import java.util.*;
|
||||||
* If you don't use named rewrite streams, a "default" stream is used as
|
* If you don't use named rewrite streams, a "default" stream is used as
|
||||||
* the first example shows.
|
* the first example shows.
|
||||||
*/
|
*/
|
||||||
public class TokenRewriteStream extends CommonTokenStream {
|
public class TokenStreamRewriter {
|
||||||
public static final String DEFAULT_PROGRAM_NAME = "default";
|
public static final String DEFAULT_PROGRAM_NAME = "default";
|
||||||
public static final int PROGRAM_INIT_SIZE = 100;
|
public static final int PROGRAM_INIT_SIZE = 100;
|
||||||
public static final int MIN_TOKEN_INDEX = 0;
|
public static final int MIN_TOKEN_INDEX = 0;
|
||||||
|
@ -164,33 +178,28 @@ public class TokenRewriteStream extends CommonTokenStream {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Our source stream */
|
||||||
|
protected final TokenStream tokens;
|
||||||
|
|
||||||
/** You may have multiple, named streams of rewrite operations.
|
/** You may have multiple, named streams of rewrite operations.
|
||||||
* I'm calling these things "programs."
|
* I'm calling these things "programs."
|
||||||
* Maps String (name) -> rewrite (List)
|
* Maps String (name) -> rewrite (List)
|
||||||
*/
|
*/
|
||||||
protected Map<String, List<RewriteOperation>> programs = null;
|
protected final Map<String, List<RewriteOperation>> programs;
|
||||||
|
|
||||||
/** Map String (program name) -> Integer index */
|
/** Map String (program name) -> Integer index */
|
||||||
protected Map<String, Integer> lastRewriteTokenIndexes = null;
|
protected final Map<String, Integer> lastRewriteTokenIndexes;
|
||||||
|
|
||||||
public TokenRewriteStream() {
|
public TokenStreamRewriter(TokenStream tokens) {
|
||||||
init();
|
this.tokens = tokens;
|
||||||
}
|
|
||||||
|
|
||||||
protected void init() {
|
|
||||||
programs = new HashMap<String, List<RewriteOperation>>();
|
programs = new HashMap<String, List<RewriteOperation>>();
|
||||||
programs.put(DEFAULT_PROGRAM_NAME, new ArrayList<RewriteOperation>(PROGRAM_INIT_SIZE));
|
programs.put(DEFAULT_PROGRAM_NAME,
|
||||||
|
new ArrayList<RewriteOperation>(PROGRAM_INIT_SIZE));
|
||||||
lastRewriteTokenIndexes = new HashMap<String, Integer>();
|
lastRewriteTokenIndexes = new HashMap<String, Integer>();
|
||||||
}
|
}
|
||||||
|
|
||||||
public TokenRewriteStream(TokenSource tokenSource) {
|
public final TokenStream getTokenStream() {
|
||||||
super(tokenSource);
|
return tokens;
|
||||||
init();
|
|
||||||
}
|
|
||||||
|
|
||||||
public TokenRewriteStream(TokenSource tokenSource, int channel) {
|
|
||||||
super(tokenSource, channel);
|
|
||||||
init();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void rollback(int instructionIndex) {
|
public void rollback(int instructionIndex) {
|
||||||
|
@ -340,44 +349,37 @@ public class TokenRewriteStream extends CommonTokenStream {
|
||||||
return is;
|
return is;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String toOriginalString() {
|
/** Return the text from the original tokens altered per the
|
||||||
fill();
|
* instructions given to this rewriter.
|
||||||
return toOriginalString(MIN_TOKEN_INDEX, size()-1);
|
*/
|
||||||
|
public String getText() {
|
||||||
|
return getText(DEFAULT_PROGRAM_NAME, Interval.of(0,tokens.size()-1));
|
||||||
}
|
}
|
||||||
|
|
||||||
public String toOriginalString(int start, int end) {
|
/** Return the text associated with the tokens in the interval from the
|
||||||
StringBuilder buf = new StringBuilder();
|
* original token stream but with the alterations given to this rewriter.
|
||||||
for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
|
* The interval refers to the indexes in the original token stream.
|
||||||
if ( get(i).getType()!=Token.EOF ) buf.append(get(i).getText());
|
* We do not alter the token stream in any way, so the indexes
|
||||||
}
|
* and intervals are still consistent. Includes any operations done
|
||||||
return buf.toString();
|
* to the first and last token in the interval. So, if you did an
|
||||||
|
* insertBefore on the first token, you would get that insertion.
|
||||||
|
* The same is true if you do an insertAfter the stop token.
|
||||||
|
*/
|
||||||
|
public String getText(Interval interval) {
|
||||||
|
return getText(DEFAULT_PROGRAM_NAME, interval);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public String getText(String programName, Interval interval) {
|
||||||
public String toString() {
|
|
||||||
fill();
|
|
||||||
return toString(MIN_TOKEN_INDEX, size()-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
public String toString(String programName) {
|
|
||||||
fill();
|
|
||||||
return toString(programName, MIN_TOKEN_INDEX, size()-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString(int start, int end) {
|
|
||||||
return toString(DEFAULT_PROGRAM_NAME, start, end);
|
|
||||||
}
|
|
||||||
|
|
||||||
public String toString(String programName, int start, int end) {
|
|
||||||
List<RewriteOperation> rewrites = programs.get(programName);
|
List<RewriteOperation> rewrites = programs.get(programName);
|
||||||
|
int start = interval.a;
|
||||||
|
int stop = interval.b;
|
||||||
|
|
||||||
// ensure start/end are in range
|
// ensure start/end are in range
|
||||||
if ( end>tokens.size()-1 ) end = tokens.size()-1;
|
if ( stop>tokens.size()-1 ) stop = tokens.size()-1;
|
||||||
if ( start<0 ) start = 0;
|
if ( start<0 ) start = 0;
|
||||||
|
|
||||||
if ( rewrites==null || rewrites.isEmpty() ) {
|
if ( rewrites==null || rewrites.isEmpty() ) {
|
||||||
return toOriginalString(start,end); // no instructions to execute
|
return tokens.getText(interval); // no instructions to execute
|
||||||
}
|
}
|
||||||
StringBuilder buf = new StringBuilder();
|
StringBuilder buf = new StringBuilder();
|
||||||
|
|
||||||
|
@ -386,7 +388,7 @@ public class TokenRewriteStream extends CommonTokenStream {
|
||||||
|
|
||||||
// Walk buffer, executing instructions and emitting tokens
|
// Walk buffer, executing instructions and emitting tokens
|
||||||
int i = start;
|
int i = start;
|
||||||
while ( i <= end && i < tokens.size() ) {
|
while ( i <= stop && i < tokens.size() ) {
|
||||||
RewriteOperation op = indexToOp.get(i);
|
RewriteOperation op = indexToOp.get(i);
|
||||||
indexToOp.remove(i); // remove so any left have index size-1
|
indexToOp.remove(i); // remove so any left have index size-1
|
||||||
Token t = tokens.get(i);
|
Token t = tokens.get(i);
|
||||||
|
@ -403,12 +405,10 @@ public class TokenRewriteStream extends CommonTokenStream {
|
||||||
// include stuff after end if it's last index in buffer
|
// include stuff after end if it's last index in buffer
|
||||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||||
// foo if end==lastValidIndex.
|
// foo if end==lastValidIndex.
|
||||||
if ( end==tokens.size()-1 ) {
|
if ( stop==tokens.size()-1 ) {
|
||||||
// Scan any remaining operations after last token
|
// Scan any remaining operations after last token
|
||||||
// should be included (they will be inserts).
|
// should be included (they will be inserts).
|
||||||
Iterator<RewriteOperation> it = indexToOp.values().iterator();
|
for (RewriteOperation op : indexToOp.values()) {
|
||||||
while (it.hasNext()) {
|
|
||||||
RewriteOperation op = it.next();
|
|
||||||
if ( op.index >= tokens.size()-1 ) buf.append(op.text);
|
if ( op.index >= tokens.size()-1 ) buf.append(op.text);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -569,10 +569,6 @@ public class TokenRewriteStream extends CommonTokenStream {
|
||||||
return x+y;
|
return x+y;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind) {
|
|
||||||
return getKindOfOps(rewrites, kind, rewrites.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Get all operations before an index of a particular kind */
|
/** Get all operations before an index of a particular kind */
|
||||||
protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind, int before) {
|
protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind, int before) {
|
||||||
List<T> ops = new ArrayList<T>();
|
List<T> ops = new ArrayList<T>();
|
||||||
|
@ -586,15 +582,4 @@ public class TokenRewriteStream extends CommonTokenStream {
|
||||||
return ops;
|
return ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String toDebugString() {
|
|
||||||
return toDebugString(MIN_TOKEN_INDEX, size()-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
public String toDebugString(int start, int end) {
|
|
||||||
StringBuilder buf = new StringBuilder();
|
|
||||||
for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
|
|
||||||
buf.append(get(i));
|
|
||||||
}
|
|
||||||
return buf.toString();
|
|
||||||
}
|
|
||||||
}
|
}
|
|
@ -29,6 +29,8 @@
|
||||||
|
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
|
@ -195,7 +197,11 @@ public class UnbufferedCharStream implements CharStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String substring(int start, int stop) {
|
public String getText(Interval interval) {
|
||||||
return null; // map to buffer indexes
|
if (interval.a < bufferStartIndex || interval.b >= bufferStartIndex + n) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
return new String(data, interval.a, interval.length());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
|
|
||||||
package org.antlr.v4.runtime;
|
package org.antlr.v4.runtime;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.runtime.misc.LookaheadStream;
|
import org.antlr.v4.runtime.misc.LookaheadStream;
|
||||||
|
|
||||||
/** A token stream that pulls tokens from the source on-demand and
|
/** A token stream that pulls tokens from the source on-demand and
|
||||||
|
@ -78,13 +79,41 @@ public class UnbufferedTokenStream<T extends Token>
|
||||||
public TokenSource getTokenSource() { return tokenSource; }
|
public TokenSource getTokenSource() { return tokenSource; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString(int start, int stop) {
|
public String getText(Interval interval) {
|
||||||
throw new UnsupportedOperationException("unbuffered stream can't give strings");
|
int bufferStartIndex = currentElementIndex - p;
|
||||||
|
int bufferStopIndex = bufferStartIndex + data.size() - 1;
|
||||||
|
|
||||||
|
int start = interval.a;
|
||||||
|
int stop = interval.b;
|
||||||
|
if (start < bufferStartIndex || stop > bufferStopIndex) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
StringBuilder buf = new StringBuilder();
|
||||||
|
for (int i = start; i <= stop; i++) {
|
||||||
|
T t = data.get(i - bufferStartIndex);
|
||||||
|
buf.append(t.getText());
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString(Token start, Token stop) {
|
public String getText() {
|
||||||
throw new UnsupportedOperationException("unbuffered stream can't give strings");
|
return getText(Interval.of(0,index()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getText(RuleContext ctx) {
|
||||||
|
return getText(ctx.getSourceInterval());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getText(Token start, Token stop) {
|
||||||
|
if ( start!=null && stop!=null ) {
|
||||||
|
return getText(Interval.of(start.getTokenIndex(), stop.getTokenIndex()));
|
||||||
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -92,7 +121,13 @@ public class UnbufferedTokenStream<T extends Token>
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public T get(int i) {
|
public T get(int i) {
|
||||||
throw new UnsupportedOperationException("Absolute token indexes are meaningless in an unbuffered stream");
|
int bufferStartIndex = currentElementIndex - p;
|
||||||
|
int bufferStopIndex = bufferStartIndex + data.size() - 1;
|
||||||
|
if (i < bufferStartIndex || i > bufferStopIndex) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
return data.get(i - bufferStartIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -29,9 +29,15 @@
|
||||||
|
|
||||||
package org.antlr.v4.runtime.atn;
|
package org.antlr.v4.runtime.atn;
|
||||||
|
|
||||||
import org.antlr.v4.runtime.*;
|
import org.antlr.v4.runtime.CharStream;
|
||||||
|
import org.antlr.v4.runtime.IntStream;
|
||||||
|
import org.antlr.v4.runtime.Lexer;
|
||||||
|
import org.antlr.v4.runtime.LexerNoViableAltException;
|
||||||
|
import org.antlr.v4.runtime.RuleContext;
|
||||||
|
import org.antlr.v4.runtime.Token;
|
||||||
import org.antlr.v4.runtime.dfa.DFA;
|
import org.antlr.v4.runtime.dfa.DFA;
|
||||||
import org.antlr.v4.runtime.dfa.DFAState;
|
import org.antlr.v4.runtime.dfa.DFAState;
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.runtime.misc.NotNull;
|
import org.antlr.v4.runtime.misc.NotNull;
|
||||||
import org.antlr.v4.runtime.misc.Nullable;
|
import org.antlr.v4.runtime.misc.Nullable;
|
||||||
|
|
||||||
|
@ -581,7 +587,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
||||||
if ( dfa_debug ) {
|
if ( dfa_debug ) {
|
||||||
System.out.format("no edge for %s\n", getTokenName(input.LA(1)));
|
System.out.format("no edge for %s\n", getTokenName(input.LA(1)));
|
||||||
System.out.format("ATN exec upon %s at DFA state %d = %s\n",
|
System.out.format("ATN exec upon %s at DFA state %d = %s\n",
|
||||||
input.substring(startIndex, input.index()), s.stateNumber, s.configset);
|
input.getText(Interval.of(startIndex, input.index())), s.stateNumber, s.configset);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttype = execATN(input, s.configset, s);
|
int ttype = execATN(input, s.configset, s);
|
||||||
|
@ -713,7 +719,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
||||||
/** Get the text of the current token */
|
/** Get the text of the current token */
|
||||||
@NotNull
|
@NotNull
|
||||||
public String getText(@NotNull CharStream input) {
|
public String getText(@NotNull CharStream input) {
|
||||||
return input.substring(this.startIndex, input.index());
|
return input.getText(Interval.of(startIndex, input.index()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getLine() {
|
public int getLine() {
|
||||||
|
|
|
@ -34,10 +34,11 @@ import org.antlr.v4.runtime.NoViableAltException;
|
||||||
import org.antlr.v4.runtime.Parser;
|
import org.antlr.v4.runtime.Parser;
|
||||||
import org.antlr.v4.runtime.ParserRuleContext;
|
import org.antlr.v4.runtime.ParserRuleContext;
|
||||||
import org.antlr.v4.runtime.RuleContext;
|
import org.antlr.v4.runtime.RuleContext;
|
||||||
import org.antlr.v4.runtime.SymbolStream;
|
|
||||||
import org.antlr.v4.runtime.Token;
|
import org.antlr.v4.runtime.Token;
|
||||||
|
import org.antlr.v4.runtime.TokenStream;
|
||||||
import org.antlr.v4.runtime.dfa.DFA;
|
import org.antlr.v4.runtime.dfa.DFA;
|
||||||
import org.antlr.v4.runtime.dfa.DFAState;
|
import org.antlr.v4.runtime.dfa.DFAState;
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||||
import org.antlr.v4.runtime.misc.NotNull;
|
import org.antlr.v4.runtime.misc.NotNull;
|
||||||
import org.antlr.v4.runtime.misc.Nullable;
|
import org.antlr.v4.runtime.misc.Nullable;
|
||||||
|
@ -285,7 +286,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
public void reset() {
|
public void reset() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public int adaptivePredict(@NotNull SymbolStream<? extends Symbol> input, int decision,
|
public int adaptivePredict(@NotNull TokenStream input, int decision,
|
||||||
@Nullable ParserRuleContext<?> outerContext)
|
@Nullable ParserRuleContext<?> outerContext)
|
||||||
{
|
{
|
||||||
predict_calls++;
|
predict_calls++;
|
||||||
|
@ -311,7 +312,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public int predictATN(@NotNull DFA dfa, @NotNull SymbolStream<? extends Symbol> input,
|
public int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||||
@Nullable ParserRuleContext<?> outerContext)
|
@Nullable ParserRuleContext<?> outerContext)
|
||||||
{
|
{
|
||||||
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
||||||
|
@ -343,7 +344,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
}
|
}
|
||||||
|
|
||||||
public int execDFA(@NotNull DFA dfa, @NotNull DFAState s0,
|
public int execDFA(@NotNull DFA dfa, @NotNull DFAState s0,
|
||||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
@NotNull TokenStream input, int startIndex,
|
||||||
@Nullable ParserRuleContext<?> outerContext)
|
@Nullable ParserRuleContext<?> outerContext)
|
||||||
{
|
{
|
||||||
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
||||||
|
@ -396,8 +397,9 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
if ( dfa_debug && t>=0 ) System.out.println("no edge for "+parser.getTokenNames()[t]);
|
if ( dfa_debug && t>=0 ) System.out.println("no edge for "+parser.getTokenNames()[t]);
|
||||||
int alt;
|
int alt;
|
||||||
if ( dfa_debug ) {
|
if ( dfa_debug ) {
|
||||||
|
Interval interval = Interval.of(startIndex, parser.getTokenStream().index());
|
||||||
System.out.println("ATN exec upon "+
|
System.out.println("ATN exec upon "+
|
||||||
parser.getInputString(startIndex) +
|
parser.getTokenStream().getText(interval) +
|
||||||
" at DFA state "+s.stateNumber);
|
" at DFA state "+s.stateNumber);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -499,7 +501,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
|
|
||||||
*/
|
*/
|
||||||
public int execATN(@NotNull DFA dfa, @NotNull DFAState s0,
|
public int execATN(@NotNull DFA dfa, @NotNull DFAState s0,
|
||||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
@NotNull TokenStream input, int startIndex,
|
||||||
ParserRuleContext<?> outerContext)
|
ParserRuleContext<?> outerContext)
|
||||||
{
|
{
|
||||||
if ( debug ) System.out.println("execATN decision "+dfa.decision+" exec LA(1)=="+ getLookaheadName(input));
|
if ( debug ) System.out.println("execATN decision "+dfa.decision+" exec LA(1)=="+ getLookaheadName(input));
|
||||||
|
@ -626,7 +628,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
public ATNConfigSet execATNWithFullContext(DFA dfa,
|
public ATNConfigSet execATNWithFullContext(DFA dfa,
|
||||||
DFAState D, // how far we got before failing over
|
DFAState D, // how far we got before failing over
|
||||||
@NotNull ATNConfigSet s0,
|
@NotNull ATNConfigSet s0,
|
||||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
@NotNull TokenStream input, int startIndex,
|
||||||
ParserRuleContext<?> outerContext,
|
ParserRuleContext<?> outerContext,
|
||||||
int nalts,
|
int nalts,
|
||||||
boolean greedy)
|
boolean greedy)
|
||||||
|
@ -1293,7 +1295,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
return String.valueOf(t);
|
return String.valueOf(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getLookaheadName(SymbolStream<? extends Symbol> input) {
|
public String getLookaheadName(TokenStream input) {
|
||||||
return getTokenName(input.LA(1));
|
return getTokenName(input.LA(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1318,7 +1320,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
}
|
}
|
||||||
|
|
||||||
@NotNull
|
@NotNull
|
||||||
public NoViableAltException noViableAlt(@NotNull SymbolStream<? extends Symbol> input,
|
public NoViableAltException noViableAlt(@NotNull TokenStream input,
|
||||||
@NotNull ParserRuleContext<?> outerContext,
|
@NotNull ParserRuleContext<?> outerContext,
|
||||||
@NotNull ATNConfigSet configs,
|
@NotNull ATNConfigSet configs,
|
||||||
int startIndex)
|
int startIndex)
|
||||||
|
@ -1405,16 +1407,18 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
|
|
||||||
public void reportAttemptingFullContext(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
public void reportAttemptingFullContext(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
||||||
if ( debug || retry_debug ) {
|
if ( debug || retry_debug ) {
|
||||||
|
Interval interval = Interval.of(startIndex, stopIndex);
|
||||||
System.out.println("reportAttemptingFullContext decision="+dfa.decision+":"+configs+
|
System.out.println("reportAttemptingFullContext decision="+dfa.decision+":"+configs+
|
||||||
", input="+parser.getInputString(startIndex, stopIndex));
|
", input="+parser.getTokenStream().getText(interval));
|
||||||
}
|
}
|
||||||
if ( parser!=null ) parser.getErrorListenerDispatch().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, configs);
|
if ( parser!=null ) parser.getErrorListenerDispatch().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, configs);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void reportContextSensitivity(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
public void reportContextSensitivity(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
||||||
if ( debug || retry_debug ) {
|
if ( debug || retry_debug ) {
|
||||||
|
Interval interval = Interval.of(startIndex, stopIndex);
|
||||||
System.out.println("reportContextSensitivity decision="+dfa.decision+":"+configs+
|
System.out.println("reportContextSensitivity decision="+dfa.decision+":"+configs+
|
||||||
", input="+parser.getInputString(startIndex, stopIndex));
|
", input="+parser.getTokenStream().getText(interval));
|
||||||
}
|
}
|
||||||
if ( parser!=null ) parser.getErrorListenerDispatch().reportContextSensitivity(parser, dfa, startIndex, stopIndex, configs);
|
if ( parser!=null ) parser.getErrorListenerDispatch().reportContextSensitivity(parser, dfa, startIndex, stopIndex, configs);
|
||||||
}
|
}
|
||||||
|
@ -1441,9 +1445,10 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
||||||
// }
|
// }
|
||||||
// i++;
|
// i++;
|
||||||
// }
|
// }
|
||||||
|
Interval interval = Interval.of(startIndex, stopIndex);
|
||||||
System.out.println("reportAmbiguity "+
|
System.out.println("reportAmbiguity "+
|
||||||
ambigAlts+":"+configs+
|
ambigAlts+":"+configs+
|
||||||
", input="+parser.getInputString(startIndex, stopIndex));
|
", input="+parser.getTokenStream().getText(interval));
|
||||||
}
|
}
|
||||||
if ( parser!=null ) parser.getErrorListenerDispatch().reportAmbiguity(parser, dfa, startIndex, stopIndex,
|
if ( parser!=null ) parser.getErrorListenerDispatch().reportAmbiguity(parser, dfa, startIndex, stopIndex,
|
||||||
ambigAlts, configs);
|
ambigAlts, configs);
|
||||||
|
|
|
@ -57,6 +57,9 @@ import java.lang.reflect.Method;
|
||||||
* [input-filename]
|
* [input-filename]
|
||||||
*/
|
*/
|
||||||
public class TestRig {
|
public class TestRig {
|
||||||
|
|
||||||
|
public static final String LEXER_START_RULE_NAME = "tokens";
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
String grammarName;
|
String grammarName;
|
||||||
String startRuleName;
|
String startRuleName;
|
||||||
|
@ -69,10 +72,12 @@ public class TestRig {
|
||||||
boolean diagnostics = false;
|
boolean diagnostics = false;
|
||||||
String encoding = null;
|
String encoding = null;
|
||||||
if ( args.length < 2 ) {
|
if ( args.length < 2 ) {
|
||||||
System.err.println("java org.antlr.v4.runtime.misc.TestRig GrammarName startRuleName" +
|
System.err.println("java org.antlr.v4.runtime.misc.TestRig GrammarName startRuleName\n" +
|
||||||
" [-tokens] [-print] [-gui] [-ps file.ps] [-encoding encodingname]" +
|
" [-tokens] [-print] [-gui] [-ps file.ps] [-encoding encodingname]\n" +
|
||||||
" [-trace] [-diagnostics]"+
|
" [-trace] [-diagnostics]\n"+
|
||||||
" [input-filename]");
|
" [input-filename]");
|
||||||
|
System.err.println("Use startRuleName='tokens' if GrammarName is a lexer grammar.");
|
||||||
|
System.err.println("Omitting input-filename makes rig read from stdin.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int i=0;
|
int i=0;
|
||||||
|
@ -121,16 +126,11 @@ public class TestRig {
|
||||||
}
|
}
|
||||||
// System.out.println("exec "+grammarName+"."+startRuleName);
|
// System.out.println("exec "+grammarName+"."+startRuleName);
|
||||||
String lexerName = grammarName+"Lexer";
|
String lexerName = grammarName+"Lexer";
|
||||||
String parserName = grammarName+"Parser";
|
|
||||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||||
Class lexerClass = cl.loadClass(lexerName);
|
Class lexerClass = cl.loadClass(lexerName);
|
||||||
if ( lexerClass==null ) {
|
if ( lexerClass==null ) {
|
||||||
System.err.println("Can't load "+lexerName);
|
System.err.println("Can't load "+lexerName);
|
||||||
}
|
}
|
||||||
Class parserClass = cl.loadClass(parserName);
|
|
||||||
if ( parserClass==null ) {
|
|
||||||
System.err.println("Can't load "+parserName);
|
|
||||||
}
|
|
||||||
|
|
||||||
InputStream is = System.in;
|
InputStream is = System.in;
|
||||||
if ( inputFile!=null ) {
|
if ( inputFile!=null ) {
|
||||||
|
@ -158,6 +158,13 @@ public class TestRig {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( startRuleName.equals(LEXER_START_RULE_NAME) ) return;
|
||||||
|
|
||||||
|
String parserName = grammarName+"Parser";
|
||||||
|
Class parserClass = cl.loadClass(parserName);
|
||||||
|
if ( parserClass==null ) {
|
||||||
|
System.err.println("Can't load "+parserName);
|
||||||
|
}
|
||||||
Constructor<Parser> parserCtor = parserClass.getConstructor(TokenStream.class);
|
Constructor<Parser> parserCtor = parserClass.getConstructor(TokenStream.class);
|
||||||
Parser parser = parserCtor.newInstance(tokens);
|
Parser parser = parserCtor.newInstance(tokens);
|
||||||
|
|
||||||
|
|
|
@ -84,6 +84,9 @@ public interface ParseTree extends SyntaxTree {
|
||||||
return visitor.visitTerminal(this);
|
return visitor.visitTerminal(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getText() { return symbol.getText(); }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toStringTree(Parser parser) {
|
public String toStringTree(Parser parser) {
|
||||||
return toString();
|
return toString();
|
||||||
|
@ -135,6 +138,12 @@ public interface ParseTree extends SyntaxTree {
|
||||||
/** The ParseTreeVisitor needs a double dispatch method */
|
/** The ParseTreeVisitor needs a double dispatch method */
|
||||||
public <T> T accept(ParseTreeVisitor<? extends T> visitor);
|
public <T> T accept(ParseTreeVisitor<? extends T> visitor);
|
||||||
|
|
||||||
|
/** Return the combined text of all leaf nodes. Does not get any
|
||||||
|
* off-channel tokens (if any) so won't return whitespace and
|
||||||
|
* comments if they are sent to parser on hidden channel.
|
||||||
|
*/
|
||||||
|
public String getText();
|
||||||
|
|
||||||
/** Specialize toStringTree so that it can print out more information
|
/** Specialize toStringTree so that it can print out more information
|
||||||
* based upon the parser.
|
* based upon the parser.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,17 +1,23 @@
|
||||||
|
Parsers
|
||||||
|
|
||||||
|
* Full context LL(*) not SLL(*)
|
||||||
|
* Adaptive, takes all but indirect left-recursion
|
||||||
|
|
||||||
Actions/scopes
|
Actions/scopes
|
||||||
|
|
||||||
* no global scopes. no scope[n].
|
* no global scopes. no scope[n].
|
||||||
|
|
||||||
Trees
|
Trees
|
||||||
|
|
||||||
|
* no ASTs
|
||||||
|
* no tree grammars
|
||||||
|
* parse trees created by default
|
||||||
* moved methods to Trees
|
* moved methods to Trees
|
||||||
* Tree->AST
|
|
||||||
* added parse trees
|
|
||||||
|
|
||||||
no -> and other tree construct in tree grammars
|
|
||||||
|
|
||||||
Lexers
|
Lexers
|
||||||
|
|
||||||
* Added [Abc] notation
|
* Added [Abc] notation
|
||||||
|
|
||||||
* unicode rule/token names
|
* unicode rule/token names
|
||||||
|
|
||||||
|
* -> skip notation
|
|
@ -1,8 +1,25 @@
|
||||||
grammar T;
|
grammar T;
|
||||||
s : e ';' ;
|
@members {
|
||||||
e : e '*' e
|
public static class LeafListener extends TBaseListener {
|
||||||
| ID
|
public void exitCall(TParser.CallContext ctx) {
|
||||||
| INT
|
System.out.printf("%s %s",ctx.e().start.getText(),
|
||||||
|
ctx.eList());
|
||||||
|
}
|
||||||
|
public void exitInt(TParser.IntContext ctx) {
|
||||||
|
System.out.println(ctx.INT().getSymbol().getText());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s
|
||||||
|
@init {setBuildParseTree(true);}
|
||||||
|
@after { System.out.println($r.ctx.toStringTree(this)); ParseTreeWalker walker = new ParseTreeWalker();
|
||||||
|
walker.walk(new LeafListener(), $r.ctx);}
|
||||||
|
: r=e ;
|
||||||
|
e : e '(' eList ')' # Call
|
||||||
|
| INT # Int
|
||||||
;
|
;
|
||||||
INT : '0'..'9'+;
|
eList : e (',' e)* ;
|
||||||
WS : (' '|'\n') {skip();} ;
|
MULT: '*' ;
|
||||||
|
ADD : '+' ;
|
||||||
|
INT : [0-9]+ ;
|
||||||
|
WS : [ \t\n]+ -> skip ;
|
||||||
|
|
|
@ -538,12 +538,12 @@ TokenPropertyRef_int(t) ::= "(<ctx(t)>.<t.label>!=null?Integer.valueOf(<ctx(t)>.
|
||||||
|
|
||||||
RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>!=null?(<ctx(r)>.<r.label>.start):null)"
|
RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>!=null?(<ctx(r)>.<r.label>.start):null)"
|
||||||
RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>!=null?(<ctx(r)>.<r.label>.stop):null)"
|
RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>!=null?(<ctx(r)>.<r.label>.stop):null)"
|
||||||
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>!=null?_input.toString(<ctx(r)>.<r.label>.start,<ctx(r)>.<r.label>.stop):null)"
|
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>!=null?_input.getText(<ctx(r)>.<r.label>.start,<ctx(r)>.<r.label>.stop):null)"
|
||||||
RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>"
|
RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>"
|
||||||
|
|
||||||
ThisRulePropertyRef_start(r) ::= "_localctx.start"
|
ThisRulePropertyRef_start(r) ::= "_localctx.start"
|
||||||
ThisRulePropertyRef_stop(r) ::= "_localctx.stop"
|
ThisRulePropertyRef_stop(r) ::= "_localctx.stop"
|
||||||
ThisRulePropertyRef_text(r) ::= "_input.toString(_localctx.start, _input.LT(-1))"
|
ThisRulePropertyRef_text(r) ::= "_input.getText(_localctx.start, _input.LT(-1))"
|
||||||
ThisRulePropertyRef_ctx(r) ::= "_localctx"
|
ThisRulePropertyRef_ctx(r) ::= "_localctx"
|
||||||
|
|
||||||
NonLocalAttrRef(s) ::= "((<s.ruleName; format=\"cap\">Context)getInvokingContext(<s.ruleIndex>)).<s.name>"
|
NonLocalAttrRef(s) ::= "((<s.ruleName; format=\"cap\">Context)getInvokingContext(<s.ruleIndex>)).<s.name>"
|
||||||
|
|
|
@ -74,6 +74,7 @@ import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
|
|
||||||
public class Tool {
|
public class Tool {
|
||||||
public String VERSION = "4.0-"+new Date();
|
public String VERSION = "4.0-"+new Date();
|
||||||
|
@ -166,8 +167,7 @@ public class Tool {
|
||||||
public ErrorManager errMgr = new ErrorManager(this);
|
public ErrorManager errMgr = new ErrorManager(this);
|
||||||
public LogManager logMgr = new LogManager();
|
public LogManager logMgr = new LogManager();
|
||||||
|
|
||||||
List<ANTLRToolListener> listeners =
|
List<ANTLRToolListener> listeners = new CopyOnWriteArrayList<ANTLRToolListener>();
|
||||||
Collections.synchronizedList(new ArrayList<ANTLRToolListener>());
|
|
||||||
|
|
||||||
/** Track separately so if someone adds a listener, it's the only one
|
/** Track separately so if someone adds a listener, it's the only one
|
||||||
* instead of it and the default stderr listener.
|
* instead of it and the default stderr listener.
|
||||||
|
|
|
@ -359,7 +359,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
|
||||||
TokenStream tokens = input.getTokenStream();
|
TokenStream tokens = input.getTokenStream();
|
||||||
// find =>
|
// find =>
|
||||||
for (int i=stop; i>=start; i--) {
|
for (int i=stop; i>=start; i--) {
|
||||||
if ( tokens.get(i).getType()==RARROW ) {
|
if ( tokens.get(i).getType()==POUND ) {
|
||||||
altAST.setTokenStopIndex(i-1);
|
altAST.setTokenStopIndex(i-1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ public class MutableInt extends Number implements Comparable<Number> {
|
||||||
public int v;
|
public int v;
|
||||||
|
|
||||||
public MutableInt(int v) { this.v = v; }
|
public MutableInt(int v) { this.v = v; }
|
||||||
@Override public int compareTo(Number o) { return v; }
|
@Override public int compareTo(Number o) { return v-o.intValue(); }
|
||||||
@Override public int intValue() { return v; }
|
@Override public int intValue() { return v; }
|
||||||
@Override public long longValue() { return v; }
|
@Override public long longValue() { return v; }
|
||||||
@Override public float floatValue() { return v; }
|
@Override public float floatValue() { return v; }
|
||||||
|
|
|
@ -535,7 +535,7 @@ ruleAltList
|
||||||
|
|
||||||
labeledAlt
|
labeledAlt
|
||||||
: alternative
|
: alternative
|
||||||
( RARROW! id! {((AltAST)$alternative.tree).altLabel=$id.tree;}
|
( POUND! id! {((AltAST)$alternative.tree).altLabel=$id.tree;}
|
||||||
)?
|
)?
|
||||||
;
|
;
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ package org.antlr.v4.semantics;
|
||||||
|
|
||||||
import org.antlr.v4.analysis.LeftRecursiveRuleTransformer;
|
import org.antlr.v4.analysis.LeftRecursiveRuleTransformer;
|
||||||
import org.antlr.v4.parse.ANTLRParser;
|
import org.antlr.v4.parse.ANTLRParser;
|
||||||
|
import org.antlr.v4.runtime.Token;
|
||||||
import org.antlr.v4.tool.*;
|
import org.antlr.v4.tool.*;
|
||||||
import org.antlr.v4.tool.ast.GrammarAST;
|
import org.antlr.v4.tool.ast.GrammarAST;
|
||||||
|
|
||||||
|
@ -116,7 +117,7 @@ public class SemanticPipeline {
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
assignTokenTypes(g, collector.tokensDefs,
|
assignTokenTypes(g, collector.tokensDefs,
|
||||||
collector.tokenIDRefs, collector.strings);
|
collector.tokenIDRefs, collector.terminals);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK RULE REFS NOW (that we've defined rules in grammar)
|
// CHECK RULE REFS NOW (that we've defined rules in grammar)
|
||||||
|
@ -163,7 +164,7 @@ public class SemanticPipeline {
|
||||||
}
|
}
|
||||||
|
|
||||||
void assignTokenTypes(Grammar g, List<GrammarAST> tokensDefs,
|
void assignTokenTypes(Grammar g, List<GrammarAST> tokensDefs,
|
||||||
List<GrammarAST> tokenIDs, Set<String> strings)
|
List<GrammarAST> tokenIDs, List<GrammarAST> terminals)
|
||||||
{
|
{
|
||||||
//Grammar G = g.getOutermostGrammar(); // put in root, even if imported
|
//Grammar G = g.getOutermostGrammar(); // put in root, even if imported
|
||||||
|
|
||||||
|
@ -174,6 +175,9 @@ public class SemanticPipeline {
|
||||||
String lit = alias.getChild(1).getText();
|
String lit = alias.getChild(1).getText();
|
||||||
g.defineTokenAlias(name, lit);
|
g.defineTokenAlias(name, lit);
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
g.defineTokenName(alias.getText());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEFINE TOKEN TYPES FOR X : 'x' ; RULES
|
// DEFINE TOKEN TYPES FOR X : 'x' ; RULES
|
||||||
|
@ -187,10 +191,25 @@ public class SemanticPipeline {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
|
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
|
||||||
for (GrammarAST idAST : tokenIDs) { g.defineTokenName(idAST.getText()); }
|
for (GrammarAST idAST : tokenIDs) {
|
||||||
|
if (g.getTokenType(idAST.getText()) == Token.INVALID_TYPE) {
|
||||||
|
g.tool.errMgr.grammarError(ErrorType.IMPLICIT_TOKEN_DEFINITION, g.fileName, idAST.token, idAST.getText());
|
||||||
|
}
|
||||||
|
|
||||||
|
g.defineTokenName(idAST.getText());
|
||||||
|
}
|
||||||
|
|
||||||
|
// VERIFY TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
|
||||||
|
for (GrammarAST termAST : terminals) {
|
||||||
|
if (termAST.getType() != ANTLRParser.STRING_LITERAL) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g.getTokenType(termAST.getText()) == Token.INVALID_TYPE) {
|
||||||
|
g.tool.errMgr.grammarError(ErrorType.IMPLICIT_STRING_DEFINITION, g.fileName, termAST.token, termAST.getText());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
|
|
||||||
for (String s : strings) { g.defineStringLiteral(s); }
|
|
||||||
g.tool.log("semantics", "tokens="+g.tokenNameToTypeMap);
|
g.tool.log("semantics", "tokens="+g.tokenNameToTypeMap);
|
||||||
g.tool.log("semantics", "strings="+g.stringLiteralToTypeMap);
|
g.tool.log("semantics", "strings="+g.stringLiteralToTypeMap);
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,7 +131,7 @@ public class ErrorManager {
|
||||||
locationValid = true;
|
locationValid = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
messageFormatST.add("id", msg.errorType.ordinal());
|
messageFormatST.add("id", msg.errorType.code);
|
||||||
messageFormatST.add("text", messageST);
|
messageFormatST.add("text", messageST);
|
||||||
|
|
||||||
if (locationValid) reportST.add("location", locationST);
|
if (locationValid) reportST.add("location", locationST);
|
||||||
|
|
|
@ -144,6 +144,9 @@ public enum ErrorType {
|
||||||
RULE_WITH_TOO_FEW_ALT_LABELS(122, "rule <arg>: must label all alternatives or none", ErrorSeverity.ERROR),
|
RULE_WITH_TOO_FEW_ALT_LABELS(122, "rule <arg>: must label all alternatives or none", ErrorSeverity.ERROR),
|
||||||
ALT_LABEL_REDEF(123, "rule alt label <arg> redefined in rule <arg2>, originally in <arg3>", ErrorSeverity.ERROR),
|
ALT_LABEL_REDEF(123, "rule alt label <arg> redefined in rule <arg2>, originally in <arg3>", ErrorSeverity.ERROR),
|
||||||
ALT_LABEL_CONFLICTS_WITH_RULE(124, "rule alt label <arg> conflicts with rule <arg2>", ErrorSeverity.ERROR),
|
ALT_LABEL_CONFLICTS_WITH_RULE(124, "rule alt label <arg> conflicts with rule <arg2>", ErrorSeverity.ERROR),
|
||||||
|
IMPLICIT_TOKEN_DEFINITION(125, "implicit definition of token <arg> in parser", ErrorSeverity.WARNING),
|
||||||
|
IMPLICIT_STRING_DEFINITION(126, "cannot create implicit token for string literal <arg> in non-combined grammar", ErrorSeverity.ERROR),
|
||||||
|
|
||||||
/** Documentation comment is unterminated */
|
/** Documentation comment is unterminated */
|
||||||
//UNTERMINATED_DOC_COMMENT(, "", ErrorSeverity.ERROR),
|
//UNTERMINATED_DOC_COMMENT(, "", ErrorSeverity.ERROR),
|
||||||
|
|
||||||
|
@ -185,16 +188,18 @@ public enum ErrorType {
|
||||||
|
|
||||||
;
|
;
|
||||||
|
|
||||||
public String msg;
|
public final String msg;
|
||||||
public int code; // unique, deterministic unchanging error code once we release
|
public final int code; // unique, deterministic unchanging error code once we release
|
||||||
public ErrorSeverity severity;
|
public final ErrorSeverity severity;
|
||||||
public Boolean abortsAnalysis;
|
public final Boolean abortsAnalysis;
|
||||||
public Boolean abortsCodegen;
|
public final Boolean abortsCodegen;
|
||||||
|
|
||||||
ErrorType(int code, String msg, ErrorSeverity severity) {
|
ErrorType(int code, String msg, ErrorSeverity severity) {
|
||||||
this.code = code;
|
this.code = code;
|
||||||
this.msg = msg;
|
this.msg = msg;
|
||||||
this.severity = severity;
|
this.severity = severity;
|
||||||
|
this.abortsAnalysis = false;
|
||||||
|
this.abortsCodegen = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorType(String msg, ErrorSeverity severity, boolean abortsAnalysis) {
|
// ErrorType(String msg, ErrorSeverity severity, boolean abortsAnalysis) {
|
||||||
|
|
|
@ -30,8 +30,14 @@
|
||||||
package org.antlr.v4.tool.interp;
|
package org.antlr.v4.tool.interp;
|
||||||
|
|
||||||
import org.antlr.v4.Tool;
|
import org.antlr.v4.Tool;
|
||||||
import org.antlr.v4.runtime.*;
|
import org.antlr.v4.runtime.Parser;
|
||||||
import org.antlr.v4.runtime.atn.*;
|
import org.antlr.v4.runtime.ParserRuleContext;
|
||||||
|
import org.antlr.v4.runtime.Token;
|
||||||
|
import org.antlr.v4.runtime.TokenStream;
|
||||||
|
import org.antlr.v4.runtime.atn.ATN;
|
||||||
|
import org.antlr.v4.runtime.atn.ATNState;
|
||||||
|
import org.antlr.v4.runtime.atn.DecisionState;
|
||||||
|
import org.antlr.v4.runtime.atn.ParserATNSimulator;
|
||||||
import org.antlr.v4.runtime.dfa.DFA;
|
import org.antlr.v4.runtime.dfa.DFA;
|
||||||
import org.antlr.v4.runtime.misc.NotNull;
|
import org.antlr.v4.runtime.misc.NotNull;
|
||||||
import org.antlr.v4.runtime.misc.Nullable;
|
import org.antlr.v4.runtime.misc.Nullable;
|
||||||
|
@ -80,14 +86,14 @@ public class ParserInterpreter {
|
||||||
atnSimulator = new ParserATNSimulator<Token>(new DummyParser(g, input), g.atn);
|
atnSimulator = new ParserATNSimulator<Token>(new DummyParser(g, input), g.atn);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int predictATN(@NotNull DFA dfa, @NotNull SymbolStream<Token> input,
|
public int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||||
@Nullable ParserRuleContext outerContext,
|
@Nullable ParserRuleContext outerContext,
|
||||||
boolean useContext)
|
boolean useContext)
|
||||||
{
|
{
|
||||||
return atnSimulator.predictATN(dfa, input, outerContext);
|
return atnSimulator.predictATN(dfa, input, outerContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int adaptivePredict(@NotNull SymbolStream<Token> input, int decision,
|
public int adaptivePredict(@NotNull TokenStream input, int decision,
|
||||||
@Nullable ParserRuleContext outerContext)
|
@Nullable ParserRuleContext outerContext)
|
||||||
{
|
{
|
||||||
return atnSimulator.adaptivePredict(input, decision, outerContext);
|
return atnSimulator.adaptivePredict(input, decision, outerContext);
|
||||||
|
|
|
@ -35,12 +35,12 @@ import org.antlr.v4.automata.ATNPrinter;
|
||||||
import org.antlr.v4.automata.LexerATNFactory;
|
import org.antlr.v4.automata.LexerATNFactory;
|
||||||
import org.antlr.v4.automata.ParserATNFactory;
|
import org.antlr.v4.automata.ParserATNFactory;
|
||||||
import org.antlr.v4.codegen.CodeGenerator;
|
import org.antlr.v4.codegen.CodeGenerator;
|
||||||
import org.antlr.v4.misc.Utils;
|
|
||||||
import org.antlr.v4.runtime.ANTLRInputStream;
|
import org.antlr.v4.runtime.ANTLRInputStream;
|
||||||
import org.antlr.v4.runtime.CharStream;
|
import org.antlr.v4.runtime.CharStream;
|
||||||
import org.antlr.v4.runtime.CommonToken;
|
import org.antlr.v4.runtime.CommonToken;
|
||||||
import org.antlr.v4.runtime.CommonTokenStream;
|
import org.antlr.v4.runtime.CommonTokenStream;
|
||||||
import org.antlr.v4.runtime.Lexer;
|
import org.antlr.v4.runtime.Lexer;
|
||||||
|
import org.antlr.v4.runtime.RuleContext;
|
||||||
import org.antlr.v4.runtime.Token;
|
import org.antlr.v4.runtime.Token;
|
||||||
import org.antlr.v4.runtime.TokenSource;
|
import org.antlr.v4.runtime.TokenSource;
|
||||||
import org.antlr.v4.runtime.TokenStream;
|
import org.antlr.v4.runtime.TokenStream;
|
||||||
|
@ -50,9 +50,11 @@ import org.antlr.v4.runtime.atn.ATNState;
|
||||||
import org.antlr.v4.runtime.atn.DecisionState;
|
import org.antlr.v4.runtime.atn.DecisionState;
|
||||||
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||||
import org.antlr.v4.runtime.dfa.DFA;
|
import org.antlr.v4.runtime.dfa.DFA;
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
import org.antlr.v4.runtime.misc.Nullable;
|
import org.antlr.v4.runtime.misc.Nullable;
|
||||||
import org.antlr.v4.semantics.SemanticPipeline;
|
import org.antlr.v4.semantics.SemanticPipeline;
|
||||||
import org.antlr.v4.tool.ANTLRMessage;
|
import org.antlr.v4.tool.ANTLRMessage;
|
||||||
|
import org.antlr.v4.tool.DefaultToolListener;
|
||||||
import org.antlr.v4.tool.DOTGenerator;
|
import org.antlr.v4.tool.DOTGenerator;
|
||||||
import org.antlr.v4.tool.Grammar;
|
import org.antlr.v4.tool.Grammar;
|
||||||
import org.antlr.v4.tool.GrammarSemanticsMessage;
|
import org.antlr.v4.tool.GrammarSemanticsMessage;
|
||||||
|
@ -382,12 +384,12 @@ public abstract class BaseTest {
|
||||||
|
|
||||||
|
|
||||||
/** Return true if all is ok, no errors */
|
/** Return true if all is ok, no errors */
|
||||||
protected boolean antlr(String fileName, String grammarFileName, String grammarStr, String... extraOptions) {
|
protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean defaultListener, String... extraOptions) {
|
||||||
boolean allIsWell = true;
|
boolean allIsWell = true;
|
||||||
System.out.println("dir "+tmpdir);
|
System.out.println("dir "+tmpdir);
|
||||||
mkdir(tmpdir);
|
mkdir(tmpdir);
|
||||||
writeFile(tmpdir, fileName, grammarStr);
|
writeFile(tmpdir, fileName, grammarStr);
|
||||||
try {
|
ErrorQueue equeue = new ErrorQueue();
|
||||||
final List<String> options = new ArrayList<String>();
|
final List<String> options = new ArrayList<String>();
|
||||||
Collections.addAll(options, extraOptions);
|
Collections.addAll(options, extraOptions);
|
||||||
options.add("-o");
|
options.add("-o");
|
||||||
|
@ -395,14 +397,24 @@ public abstract class BaseTest {
|
||||||
options.add("-lib");
|
options.add("-lib");
|
||||||
options.add(tmpdir);
|
options.add(tmpdir);
|
||||||
options.add(new File(tmpdir,grammarFileName).toString());
|
options.add(new File(tmpdir,grammarFileName).toString());
|
||||||
|
try {
|
||||||
final String[] optionsA = new String[options.size()];
|
final String[] optionsA = new String[options.size()];
|
||||||
options.toArray(optionsA);
|
options.toArray(optionsA);
|
||||||
ErrorQueue equeue = new ErrorQueue();
|
|
||||||
Tool antlr = newTool(optionsA);
|
Tool antlr = newTool(optionsA);
|
||||||
antlr.addListener(equeue);
|
antlr.addListener(equeue);
|
||||||
|
if (defaultListener) {
|
||||||
|
antlr.addListener(new DefaultToolListener(antlr));
|
||||||
|
}
|
||||||
antlr.processGrammarsOnCommandLine();
|
antlr.processGrammarsOnCommandLine();
|
||||||
if ( equeue.errors.size()>0 ) {
|
}
|
||||||
|
catch (Exception e) {
|
||||||
allIsWell = false;
|
allIsWell = false;
|
||||||
|
System.err.println("problems building grammar: "+e);
|
||||||
|
e.printStackTrace(System.err);
|
||||||
|
}
|
||||||
|
|
||||||
|
allIsWell = equeue.errors.isEmpty();
|
||||||
|
if ( !defaultListener && !equeue.errors.isEmpty() ) {
|
||||||
System.err.println("antlr reports errors from "+options);
|
System.err.println("antlr reports errors from "+options);
|
||||||
for (int i = 0; i < equeue.errors.size(); i++) {
|
for (int i = 0; i < equeue.errors.size(); i++) {
|
||||||
ANTLRMessage msg = equeue.errors.get(i);
|
ANTLRMessage msg = equeue.errors.get(i);
|
||||||
|
@ -412,12 +424,14 @@ public abstract class BaseTest {
|
||||||
System.out.println(grammarStr);
|
System.out.println(grammarStr);
|
||||||
System.out.println("###");
|
System.out.println("###");
|
||||||
}
|
}
|
||||||
|
if ( !defaultListener && !equeue.warnings.isEmpty() ) {
|
||||||
|
System.err.println("antlr reports warnings from "+options);
|
||||||
|
for (int i = 0; i < equeue.warnings.size(); i++) {
|
||||||
|
ANTLRMessage msg = equeue.warnings.get(i);
|
||||||
|
System.err.println(msg);
|
||||||
}
|
}
|
||||||
catch (Exception e) {
|
|
||||||
allIsWell = false;
|
|
||||||
System.err.println("problems building grammar: "+e);
|
|
||||||
e.printStackTrace(System.err);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return allIsWell;
|
return allIsWell;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -477,10 +491,24 @@ public abstract class BaseTest {
|
||||||
@Nullable String parserName,
|
@Nullable String parserName,
|
||||||
String lexerName,
|
String lexerName,
|
||||||
String... extraOptions)
|
String... extraOptions)
|
||||||
|
{
|
||||||
|
return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Return true if all is well */
|
||||||
|
protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
|
||||||
|
String grammarStr,
|
||||||
|
@Nullable String parserName,
|
||||||
|
String lexerName,
|
||||||
|
boolean defaultListener,
|
||||||
|
String... extraOptions)
|
||||||
{
|
{
|
||||||
boolean allIsWell =
|
boolean allIsWell =
|
||||||
antlr(grammarFileName, grammarFileName, grammarStr, extraOptions);
|
antlr(grammarFileName, grammarFileName, grammarStr, defaultListener, extraOptions);
|
||||||
boolean ok;
|
if (!allIsWell) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
List<String> files = new ArrayList<String>();
|
List<String> files = new ArrayList<String>();
|
||||||
if ( lexerName!=null ) {
|
if ( lexerName!=null ) {
|
||||||
files.add(lexerName+".java");
|
files.add(lexerName+".java");
|
||||||
|
@ -498,8 +526,7 @@ public abstract class BaseTest {
|
||||||
files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseParseListener.java");
|
files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseParseListener.java");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ok = compile(files.toArray(new String[files.size()]));
|
allIsWell = compile(files.toArray(new String[files.size()]));
|
||||||
if ( !ok ) { allIsWell = false; }
|
|
||||||
return allIsWell;
|
return allIsWell;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1097,12 +1124,22 @@ public abstract class BaseTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString(int start, int stop) {
|
public String getText() {
|
||||||
return null;
|
throw new UnsupportedOperationException("can't give strings");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString(Token start, Token stop) {
|
public String getText(Interval interval) {
|
||||||
|
throw new UnsupportedOperationException("can't give strings");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getText(RuleContext ctx) {
|
||||||
|
throw new UnsupportedOperationException("can't give strings");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getText(Token start, Token stop) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -360,14 +360,14 @@ public class TestATNConstruction extends BaseTest {
|
||||||
@Test public void testNestedAstar() throws Exception {
|
@Test public void testNestedAstar() throws Exception {
|
||||||
Grammar g = new Grammar(
|
Grammar g = new Grammar(
|
||||||
"parser grammar P;\n"+
|
"parser grammar P;\n"+
|
||||||
"a : (',' ID*)*;");
|
"a : (COMMA ID*)*;");
|
||||||
String expecting =
|
String expecting =
|
||||||
"RuleStart_a_0->StarLoopEntry_13\n" +
|
"RuleStart_a_0->StarLoopEntry_13\n" +
|
||||||
"StarLoopEntry_13->StarBlockStart_11\n" +
|
"StarLoopEntry_13->StarBlockStart_11\n" +
|
||||||
"StarLoopEntry_13->s14\n" +
|
"StarLoopEntry_13->s14\n" +
|
||||||
"StarBlockStart_11->s2\n" +
|
"StarBlockStart_11->s2\n" +
|
||||||
"s14->RuleStop_a_1\n" +
|
"s14->RuleStop_a_1\n" +
|
||||||
"s2-','->StarLoopEntry_8\n" +
|
"s2-COMMA->StarLoopEntry_8\n" +
|
||||||
"RuleStop_a_1-EOF->s16\n" +
|
"RuleStop_a_1-EOF->s16\n" +
|
||||||
"StarLoopEntry_8->StarBlockStart_6\n" +
|
"StarLoopEntry_8->StarBlockStart_6\n" +
|
||||||
"StarLoopEntry_8->s9\n" +
|
"StarLoopEntry_8->s9\n" +
|
||||||
|
|
|
@ -118,22 +118,22 @@ public class TestActionTranslation extends BaseTest {
|
||||||
|
|
||||||
@Test public void testRuleRefs() throws Exception {
|
@Test public void testRuleRefs() throws Exception {
|
||||||
String action = "$lab.start; $c.text;";
|
String action = "$lab.start; $c.text;";
|
||||||
String expected = "(((AContext)_localctx).lab!=null?(((AContext)_localctx).lab.start):null); (((AContext)_localctx).c!=null?_input.toString(((AContext)_localctx).c.start,((AContext)_localctx).c.stop):null);";
|
String expected = "(((AContext)_localctx).lab!=null?(((AContext)_localctx).lab.start):null); (((AContext)_localctx).c!=null?_input.getText(((AContext)_localctx).c.start,((AContext)_localctx).c.stop):null);";
|
||||||
testActions(attributeTemplate, "inline", action, expected);
|
testActions(attributeTemplate, "inline", action, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test public void testRefToTextAttributeForCurrentRule() throws Exception {
|
@Test public void testRefToTextAttributeForCurrentRule() throws Exception {
|
||||||
String action = "$a.text; $text";
|
String action = "$a.text; $text";
|
||||||
String expected =
|
String expected =
|
||||||
"(_localctx.a!=null?_input.toString(_localctx.a.start,_localctx.a.stop):" +
|
"(_localctx.a!=null?_input.getText(_localctx.a.start,_localctx.a.stop):" +
|
||||||
"null); _input.toString(_localctx.start, _input.LT(-1))";
|
"null); _input.getText(_localctx.start, _input.LT(-1))";
|
||||||
testActions(attributeTemplate, "init", action, expected);
|
testActions(attributeTemplate, "init", action, expected);
|
||||||
expected =
|
expected =
|
||||||
"_input.toString(_localctx.start, _input.LT(-1)); _input.toString(_localctx.start, _input.LT(-1))";
|
"_input.getText(_localctx.start, _input.LT(-1)); _input.getText(_localctx.start, _input.LT(-1))";
|
||||||
testActions(attributeTemplate, "inline", action, expected);
|
testActions(attributeTemplate, "inline", action, expected);
|
||||||
expected =
|
expected =
|
||||||
"(_localctx.a!=null?_input.toString(_localctx.a.start,_localctx.a.stop):null);" +
|
"(_localctx.a!=null?_input.getText(_localctx.a.start,_localctx.a.stop):null);" +
|
||||||
" _input.toString(_localctx.start, _input.LT(-1))";
|
" _input.getText(_localctx.start, _input.LT(-1))";
|
||||||
testActions(attributeTemplate, "finally", action, expected);
|
testActions(attributeTemplate, "finally", action, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ public class TestAttributeChecks extends BaseTest {
|
||||||
String attributeTemplate =
|
String attributeTemplate =
|
||||||
"parser grammar A;\n"+
|
"parser grammar A;\n"+
|
||||||
"@members {<members>}\n" +
|
"@members {<members>}\n" +
|
||||||
|
"tokens{ID;}\n" +
|
||||||
"a[int x] returns [int y]\n" +
|
"a[int x] returns [int y]\n" +
|
||||||
"@init {<init>}\n" +
|
"@init {<init>}\n" +
|
||||||
" : id=ID ids+=ID lab=b[34] {\n" +
|
" : id=ID ids+=ID lab=b[34] {\n" +
|
||||||
|
@ -24,8 +25,8 @@ public class TestAttributeChecks extends BaseTest {
|
||||||
"d : ;\n";
|
"d : ;\n";
|
||||||
|
|
||||||
String[] membersChecks = {
|
String[] membersChecks = {
|
||||||
"$a", "error(29): A.g4:2:11: unknown attribute reference a in $a\n",
|
"$a", "error(63): A.g4:2:11: unknown attribute reference a in $a\n",
|
||||||
"$a.y", "error(29): A.g4:2:11: unknown attribute reference a in $a.y\n",
|
"$a.y", "error(63): A.g4:2:11: unknown attribute reference a in $a.y\n",
|
||||||
};
|
};
|
||||||
|
|
||||||
String[] initChecks = {
|
String[] initChecks = {
|
||||||
|
@ -36,8 +37,8 @@ public class TestAttributeChecks extends BaseTest {
|
||||||
"$lab.e", "",
|
"$lab.e", "",
|
||||||
"$ids", "",
|
"$ids", "",
|
||||||
|
|
||||||
"$c", "error(29): A.g4:4:8: unknown attribute reference c in $c\n",
|
"$c", "error(63): A.g4:5:8: unknown attribute reference c in $c\n",
|
||||||
"$a.q", "error(31): A.g4:4:10: unknown attribute q for rule a in $a.q\n",
|
"$a.q", "error(65): A.g4:5:10: unknown attribute q for rule a in $a.q\n",
|
||||||
};
|
};
|
||||||
|
|
||||||
String[] inlineChecks = {
|
String[] inlineChecks = {
|
||||||
|
@ -58,19 +59,19 @@ public class TestAttributeChecks extends BaseTest {
|
||||||
};
|
};
|
||||||
|
|
||||||
String[] bad_inlineChecks = {
|
String[] bad_inlineChecks = {
|
||||||
"$lab", "error(33): A.g4:6:4: missing attribute access on rule reference lab in $lab\n",
|
"$lab", "error(67): A.g4:7:4: missing attribute access on rule reference lab in $lab\n",
|
||||||
"$q", "error(29): A.g4:6:4: unknown attribute reference q in $q\n",
|
"$q", "error(63): A.g4:7:4: unknown attribute reference q in $q\n",
|
||||||
"$q.y", "error(29): A.g4:6:4: unknown attribute reference q in $q.y\n",
|
"$q.y", "error(63): A.g4:7:4: unknown attribute reference q in $q.y\n",
|
||||||
"$q = 3", "error(29): A.g4:6:4: unknown attribute reference q in $q\n",
|
"$q = 3", "error(63): A.g4:7:4: unknown attribute reference q in $q\n",
|
||||||
"$q = 3;", "error(29): A.g4:6:4: unknown attribute reference q in $q = 3;\n",
|
"$q = 3;", "error(63): A.g4:7:4: unknown attribute reference q in $q = 3;\n",
|
||||||
"$q.y = 3;", "error(29): A.g4:6:4: unknown attribute reference q in $q.y = 3;\n",
|
"$q.y = 3;", "error(63): A.g4:7:4: unknown attribute reference q in $q.y = 3;\n",
|
||||||
"$q = $blort;", "error(29): A.g4:6:4: unknown attribute reference q in $q = $blort;\n" +
|
"$q = $blort;", "error(63): A.g4:7:4: unknown attribute reference q in $q = $blort;\n" +
|
||||||
"error(29): A.g4:6:9: unknown attribute reference blort in $blort\n",
|
"error(63): A.g4:7:9: unknown attribute reference blort in $blort\n",
|
||||||
"$a.ick", "error(31): A.g4:6:6: unknown attribute ick for rule a in $a.ick\n",
|
"$a.ick", "error(65): A.g4:7:6: unknown attribute ick for rule a in $a.ick\n",
|
||||||
"$a.ick = 3;", "error(31): A.g4:6:6: unknown attribute ick for rule a in $a.ick = 3;\n",
|
"$a.ick = 3;", "error(65): A.g4:7:6: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||||
"$b.d", "error(30): A.g4:6:6: cannot access rule d's parameter: $b.d\n", // can't see rule ref's arg
|
"$b.d", "error(64): A.g4:7:6: cannot access rule d's parameter: $b.d\n", // can't see rule ref's arg
|
||||||
"$d.text", "error(29): A.g4:6:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref
|
"$d.text", "error(63): A.g4:7:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref
|
||||||
"$lab.d", "error(30): A.g4:6:8: cannot access rule d's parameter: $lab.d\n",
|
"$lab.d", "error(64): A.g4:7:8: cannot access rule d's parameter: $lab.d\n",
|
||||||
};
|
};
|
||||||
|
|
||||||
String[] finallyChecks = {
|
String[] finallyChecks = {
|
||||||
|
@ -84,20 +85,20 @@ public class TestAttributeChecks extends BaseTest {
|
||||||
"$id.text", "",
|
"$id.text", "",
|
||||||
"$ids", "",
|
"$ids", "",
|
||||||
|
|
||||||
"$lab", "error(33): A.g4:9:14: missing attribute access on rule reference lab in $lab\n",
|
"$lab", "error(67): A.g4:10:14: missing attribute access on rule reference lab in $lab\n",
|
||||||
"$q", "error(29): A.g4:9:14: unknown attribute reference q in $q\n",
|
"$q", "error(63): A.g4:10:14: unknown attribute reference q in $q\n",
|
||||||
"$q.y", "error(29): A.g4:9:14: unknown attribute reference q in $q.y\n",
|
"$q.y", "error(63): A.g4:10:14: unknown attribute reference q in $q.y\n",
|
||||||
"$q = 3", "error(29): A.g4:9:14: unknown attribute reference q in $q\n",
|
"$q = 3", "error(63): A.g4:10:14: unknown attribute reference q in $q\n",
|
||||||
"$q = 3;", "error(29): A.g4:9:14: unknown attribute reference q in $q = 3;\n",
|
"$q = 3;", "error(63): A.g4:10:14: unknown attribute reference q in $q = 3;\n",
|
||||||
"$q.y = 3;", "error(29): A.g4:9:14: unknown attribute reference q in $q.y = 3;\n",
|
"$q.y = 3;", "error(63): A.g4:10:14: unknown attribute reference q in $q.y = 3;\n",
|
||||||
"$q = $blort;", "error(29): A.g4:9:14: unknown attribute reference q in $q = $blort;\n" +
|
"$q = $blort;", "error(63): A.g4:10:14: unknown attribute reference q in $q = $blort;\n" +
|
||||||
"error(29): A.g4:9:19: unknown attribute reference blort in $blort\n",
|
"error(63): A.g4:10:19: unknown attribute reference blort in $blort\n",
|
||||||
"$a.ick", "error(31): A.g4:9:16: unknown attribute ick for rule a in $a.ick\n",
|
"$a.ick", "error(65): A.g4:10:16: unknown attribute ick for rule a in $a.ick\n",
|
||||||
"$a.ick = 3;", "error(31): A.g4:9:16: unknown attribute ick for rule a in $a.ick = 3;\n",
|
"$a.ick = 3;", "error(65): A.g4:10:16: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||||
"$b.e", "error(29): A.g4:9:14: unknown attribute reference b in $b.e\n", // can't see rule refs outside alts
|
"$b.e", "error(63): A.g4:10:14: unknown attribute reference b in $b.e\n", // can't see rule refs outside alts
|
||||||
"$b.d", "error(29): A.g4:9:14: unknown attribute reference b in $b.d\n",
|
"$b.d", "error(63): A.g4:10:14: unknown attribute reference b in $b.d\n",
|
||||||
"$c.text", "error(29): A.g4:9:14: unknown attribute reference c in $c.text\n",
|
"$c.text", "error(63): A.g4:10:14: unknown attribute reference c in $c.text\n",
|
||||||
"$lab.d", "error(30): A.g4:9:18: cannot access rule d's parameter: $lab.d\n",
|
"$lab.d", "error(64): A.g4:10:18: cannot access rule d's parameter: $lab.d\n",
|
||||||
};
|
};
|
||||||
|
|
||||||
String[] dynMembersChecks = {
|
String[] dynMembersChecks = {
|
||||||
|
@ -200,6 +201,7 @@ public class TestAttributeChecks extends BaseTest {
|
||||||
@Test public void testTokenRef() throws RecognitionException {
|
@Test public void testTokenRef() throws RecognitionException {
|
||||||
String grammar =
|
String grammar =
|
||||||
"parser grammar S;\n" +
|
"parser grammar S;\n" +
|
||||||
|
"tokens{ID;}\n" +
|
||||||
"a : x=ID {Token t = $x; t = $ID;} ;\n";
|
"a : x=ID {Token t = $x; t = $ID;} ;\n";
|
||||||
String expected =
|
String expected =
|
||||||
"";
|
"";
|
||||||
|
|
|
@ -37,6 +37,7 @@ public class TestBasicSemanticErrors extends BaseTest {
|
||||||
"parser grammar U;\n" +
|
"parser grammar U;\n" +
|
||||||
"options { foo=bar; k=\"3\";}\n" +
|
"options { foo=bar; k=\"3\";}\n" +
|
||||||
"tokens {\n" +
|
"tokens {\n" +
|
||||||
|
" ID;\n" +
|
||||||
" f='fkj';\n" +
|
" f='fkj';\n" +
|
||||||
" S = 'a';\n" +
|
" S = 'a';\n" +
|
||||||
"}\n" +
|
"}\n" +
|
||||||
|
@ -50,18 +51,18 @@ public class TestBasicSemanticErrors extends BaseTest {
|
||||||
"b : ( options { ick=bar; greedy=true; } : ID )+ ;\n" +
|
"b : ( options { ick=bar; greedy=true; } : ID )+ ;\n" +
|
||||||
"c : ID<blue> ID<x=y> ;",
|
"c : ID<blue> ID<x=y> ;",
|
||||||
// YIELDS
|
// YIELDS
|
||||||
"warning(48): U.g4:2:10: illegal option foo\n" +
|
"warning(83): U.g4:2:10: illegal option foo\n" +
|
||||||
"warning(48): U.g4:2:19: illegal option k\n" +
|
"warning(83): U.g4:2:19: illegal option k\n" +
|
||||||
"error(26): U.g4:4:8: token names must start with an uppercase letter: f\n" +
|
"error(60): U.g4:5:8: token names must start with an uppercase letter: f\n" +
|
||||||
"error(25): U.g4:4:8: can't assign string value to token name f in non-combined grammar\n" +
|
"error(59): U.g4:5:8: can't assign string value to token name f in non-combined grammar\n" +
|
||||||
"error(25): U.g4:5:8: can't assign string value to token name S in non-combined grammar\n" +
|
"error(59): U.g4:6:8: can't assign string value to token name S in non-combined grammar\n" +
|
||||||
"warning(48): U.g4:8:10: illegal option x\n" +
|
"warning(83): U.g4:9:10: illegal option x\n" +
|
||||||
"error(20): U.g4:8:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
"error(54): U.g4:9:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||||
"error(20): U.g4:7:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
"error(54): U.g4:8:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||||
"warning(48): U.g4:11:10: illegal option blech\n" +
|
"warning(83): U.g4:12:10: illegal option blech\n" +
|
||||||
"warning(48): U.g4:11:21: illegal option greedy\n" +
|
"warning(83): U.g4:12:21: illegal option greedy\n" +
|
||||||
"warning(48): U.g4:14:16: illegal option ick\n" +
|
"warning(83): U.g4:15:16: illegal option ick\n" +
|
||||||
"warning(48): U.g4:15:16: illegal option x\n",
|
"warning(83): U.g4:16:16: illegal option x\n",
|
||||||
};
|
};
|
||||||
|
|
||||||
@Test public void testU() { super.testErrors(U, false); }
|
@Test public void testU() { super.testErrors(U, false); }
|
||||||
|
|
|
@ -501,29 +501,34 @@ public class TestCompositeGrammars extends BaseTest {
|
||||||
ErrorQueue equeue = new ErrorQueue();
|
ErrorQueue equeue = new ErrorQueue();
|
||||||
String slave =
|
String slave =
|
||||||
"parser grammar T;\n" +
|
"parser grammar T;\n" +
|
||||||
|
"tokens{T;}\n" +
|
||||||
"x : T ;\n" ;
|
"x : T ;\n" ;
|
||||||
mkdir(tmpdir);
|
mkdir(tmpdir);
|
||||||
writeFile(tmpdir, "T.g4", slave);
|
writeFile(tmpdir, "T.g4", slave);
|
||||||
slave =
|
slave =
|
||||||
"parser grammar S;\n" +
|
"parser grammar S;\n" +
|
||||||
"import T;\n" +
|
"import T;\n" +
|
||||||
|
"tokens{S;}\n" +
|
||||||
"y : S ;\n" ;
|
"y : S ;\n" ;
|
||||||
mkdir(tmpdir);
|
mkdir(tmpdir);
|
||||||
writeFile(tmpdir, "S.g4", slave);
|
writeFile(tmpdir, "S.g4", slave);
|
||||||
|
|
||||||
slave =
|
slave =
|
||||||
"parser grammar C;\n" +
|
"parser grammar C;\n" +
|
||||||
|
"tokens{C;}\n" +
|
||||||
"i : C ;\n" ;
|
"i : C ;\n" ;
|
||||||
mkdir(tmpdir);
|
mkdir(tmpdir);
|
||||||
writeFile(tmpdir, "C.g4", slave);
|
writeFile(tmpdir, "C.g4", slave);
|
||||||
slave =
|
slave =
|
||||||
"parser grammar B;\n" +
|
"parser grammar B;\n" +
|
||||||
|
"tokens{B;}\n" +
|
||||||
"j : B ;\n" ;
|
"j : B ;\n" ;
|
||||||
mkdir(tmpdir);
|
mkdir(tmpdir);
|
||||||
writeFile(tmpdir, "B.g4", slave);
|
writeFile(tmpdir, "B.g4", slave);
|
||||||
slave =
|
slave =
|
||||||
"parser grammar A;\n" +
|
"parser grammar A;\n" +
|
||||||
"import B,C;\n" +
|
"import B,C;\n" +
|
||||||
|
"tokens{A;}\n" +
|
||||||
"k : A ;\n" ;
|
"k : A ;\n" ;
|
||||||
mkdir(tmpdir);
|
mkdir(tmpdir);
|
||||||
writeFile(tmpdir, "A.g4", slave);
|
writeFile(tmpdir, "A.g4", slave);
|
||||||
|
@ -531,12 +536,13 @@ public class TestCompositeGrammars extends BaseTest {
|
||||||
String master =
|
String master =
|
||||||
"grammar M;\n" +
|
"grammar M;\n" +
|
||||||
"import S,A;\n" +
|
"import S,A;\n" +
|
||||||
|
"tokens{M;}\n" +
|
||||||
"a : M ;\n" ;
|
"a : M ;\n" ;
|
||||||
writeFile(tmpdir, "M.g4", master);
|
writeFile(tmpdir, "M.g4", master);
|
||||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||||
|
|
||||||
assertEquals(equeue.errors.toString(), "[]");
|
assertEquals("[]", equeue.errors.toString());
|
||||||
assertEquals(equeue.warnings.toString(), "[]");
|
assertEquals("[]", equeue.warnings.toString());
|
||||||
String expectedTokenIDToTypeMap = "{EOF=-1, M=3, S=4, T=5, A=6, B=7, C=8}";
|
String expectedTokenIDToTypeMap = "{EOF=-1, M=3, S=4, T=5, A=6, B=7, C=8}";
|
||||||
String expectedStringLiteralToTypeMap = "{}";
|
String expectedStringLiteralToTypeMap = "{}";
|
||||||
String expectedTypeToTokenList = "[M, S, T, A, B, C]";
|
String expectedTypeToTokenList = "[M, S, T, A, B, C]";
|
||||||
|
@ -653,7 +659,7 @@ public class TestCompositeGrammars extends BaseTest {
|
||||||
"s : a ;\n" +
|
"s : a ;\n" +
|
||||||
"B : 'b' ;" + // defines B from inherited token space
|
"B : 'b' ;" + // defines B from inherited token space
|
||||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||||
boolean ok = antlr("M.g4", "M.g4", master);
|
boolean ok = antlr("M.g4", "M.g4", master, false);
|
||||||
boolean expecting = true; // should be ok
|
boolean expecting = true; // should be ok
|
||||||
assertEquals(expecting, ok);
|
assertEquals(expecting, ok);
|
||||||
}
|
}
|
||||||
|
|
|
@ -252,13 +252,13 @@ public class TestLeftRecursion extends BaseTest {
|
||||||
"s : q=e {System.out.println($e.v);} ;\n" +
|
"s : q=e {System.out.println($e.v);} ;\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"e returns [int v]\n" +
|
"e returns [int v]\n" +
|
||||||
" : a=e op='*' b=e {$v = $a.v * $b.v;} -> mult\n" +
|
" : a=e op='*' b=e {$v = $a.v * $b.v;} # mult\n" +
|
||||||
" | a=e '+' b=e {$v = $a.v + $b.v;} -> add\n" +
|
" | a=e '+' b=e {$v = $a.v + $b.v;} # add\n" +
|
||||||
" | INT {$v = $INT.int;} -> anInt\n" +
|
" | INT {$v = $INT.int;} # anInt\n" +
|
||||||
" | '(' x=e ')' {$v = $x.v;} -> parens\n" +
|
" | '(' x=e ')' {$v = $x.v;} # parens\n" +
|
||||||
" | x=e '++' {$v = $x.v+1;} -> inc\n" +
|
" | x=e '++' {$v = $x.v+1;} # inc\n" +
|
||||||
" | e '--' -> dec\n" +
|
" | e '--' # dec\n" +
|
||||||
" | ID {$v = 3;} -> anID\n" +
|
" | ID {$v = 3;} # anID\n" +
|
||||||
" ; \n" +
|
" ; \n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
|
@ -304,18 +304,18 @@ public class TestLeftRecursion extends BaseTest {
|
||||||
"// START:stat\n" +
|
"// START:stat\n" +
|
||||||
"prog: stat ;\n" +
|
"prog: stat ;\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"stat: expr NEWLINE -> printExpr\n" +
|
"stat: expr NEWLINE # printExpr\n" +
|
||||||
" | ID '=' expr NEWLINE -> assign\n" +
|
" | ID '=' expr NEWLINE # assign\n" +
|
||||||
" | NEWLINE -> blank\n" +
|
" | NEWLINE # blank\n" +
|
||||||
" ;\n" +
|
" ;\n" +
|
||||||
"// END:stat\n" +
|
"// END:stat\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"// START:expr\n" +
|
"// START:expr\n" +
|
||||||
"expr: expr ('*'|'/') expr -> MulDiv\n" +
|
"expr: expr ('*'|'/') expr # MulDiv\n" +
|
||||||
" | expr ('+'|'-') expr -> AddSub\n" +
|
" | expr ('+'|'-') expr # AddSub\n" +
|
||||||
" | INT -> int\n" +
|
" | INT # int\n" +
|
||||||
" | ID -> id\n" +
|
" | ID # id\n" +
|
||||||
" | '(' expr ')' -> parens\n" +
|
" | '(' expr ')' # parens\n" +
|
||||||
" ;\n" +
|
" ;\n" +
|
||||||
"// END:expr\n" +
|
"// END:expr\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
|
|
|
@ -178,8 +178,8 @@ public class TestListeners extends BaseTest {
|
||||||
" walker.walk(new LeafListener(), $r.ctx);" +
|
" walker.walk(new LeafListener(), $r.ctx);" +
|
||||||
"}\n" +
|
"}\n" +
|
||||||
" : r=e ;\n" +
|
" : r=e ;\n" +
|
||||||
"e : e '(' eList ')' -> Call\n" +
|
"e : e '(' eList ')' # Call\n" +
|
||||||
" | INT -> Int\n" +
|
" | INT # Int\n" +
|
||||||
" ; \n" +
|
" ; \n" +
|
||||||
"eList : e (',' e)* ;\n" +
|
"eList : e (',' e)* ;\n" +
|
||||||
"MULT: '*' ;\n" +
|
"MULT: '*' ;\n" +
|
||||||
|
|
|
@ -35,7 +35,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
||||||
@Test public void testNongreedyLoopOnEndIsNop() throws Exception {
|
@Test public void testNongreedyLoopOnEndIsNop() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"s @after {dumpDFA();} : any ID EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"s @after {dumpDFA();} : any ID EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"any : .* ;\n"+
|
"any : .* ;\n"+
|
||||||
"INT : '0'..'9'+ ;\n" +
|
"INT : '0'..'9'+ ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
|
@ -58,7 +58,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
||||||
@Test public void testNongreedyPlusLoopOnEndIsNop() throws Exception {
|
@Test public void testNongreedyPlusLoopOnEndIsNop() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"s @after {dumpDFA();} : any ID EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"s @after {dumpDFA();} : any ID EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"any : .+ ;\n"+ // .+ on end of rule always gives no viable alt. can't bypass but can't match
|
"any : .+ ;\n"+ // .+ on end of rule always gives no viable alt. can't bypass but can't match
|
||||||
"INT : '0'..'9'+ ;\n" +
|
"INT : '0'..'9'+ ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
|
@ -196,7 +196,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
||||||
@Test public void testNongreedyLoopCantSeeEOF() throws Exception {
|
@Test public void testNongreedyLoopCantSeeEOF() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"s @after {dumpDFA();} : block EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"s @after {dumpDFA();} : block EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"block : '{' .* '}' ;\n"+
|
"block : '{' .* '}' ;\n"+
|
||||||
"EQ : '=' ;\n" +
|
"EQ : '=' ;\n" +
|
||||||
"INT : '0'..'9'+ ;\n" +
|
"INT : '0'..'9'+ ;\n" +
|
||||||
|
@ -228,7 +228,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
||||||
@Test public void testNongreedyLoop() throws Exception {
|
@Test public void testNongreedyLoop() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"s @after {dumpDFA();} : ifstat ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"s @after {dumpDFA();} : ifstat ';' EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ifstat : 'if' '(' .* ')' block ;\n" +
|
"ifstat : 'if' '(' .* ')' block ;\n" +
|
||||||
"block : '{' '}' ;\n"+
|
"block : '{' '}' ;\n"+
|
||||||
"EQ : '=' ;\n" +
|
"EQ : '=' ;\n" +
|
||||||
|
@ -267,7 +267,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
||||||
@Test public void testNongreedyLoopPassingThroughAnotherNongreedy() throws Exception {
|
@Test public void testNongreedyLoopPassingThroughAnotherNongreedy() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"s @after {dumpDFA();} : ifstat ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"s @after {dumpDFA();} : ifstat ';' EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ifstat : 'if' '(' .* ')' block ;\n" +
|
"ifstat : 'if' '(' .* ')' block ;\n" +
|
||||||
"block : '{' (block|.)* '}' ;\n"+
|
"block : '{' (block|.)* '}' ;\n"+
|
||||||
"EQ : '=' ;\n" +
|
"EQ : '=' ;\n" +
|
||||||
|
@ -330,7 +330,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
||||||
// EOF on end means LL(*) can identify when to stop the loop.
|
// EOF on end means LL(*) can identify when to stop the loop.
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"s @after {dumpDFA();} : stat* ID '=' ID ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"s @after {dumpDFA();} : stat* ID '=' ID ';' EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"stat : 'if' '(' INT ')' stat\n" +
|
"stat : 'if' '(' INT ')' stat\n" +
|
||||||
" | 'return' INT ';'\n" +
|
" | 'return' INT ';'\n" +
|
||||||
" | ID '=' (INT|ID) ';'\n" +
|
" | ID '=' (INT|ID) ';'\n" +
|
||||||
|
@ -392,7 +392,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"random : s ;" + // call s so s isn't followed by EOF directly
|
"random : s ;" + // call s so s isn't followed by EOF directly
|
||||||
"s @after {dumpDFA();} : (options {greedy=false;} : stat)* ID '=' ID ';'\n" +
|
"s @after {dumpDFA();} : (options {greedy=false;} : stat)* ID '=' ID ';'\n" +
|
||||||
" {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
" {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"stat : 'if' '(' INT ')' stat\n" +
|
"stat : 'if' '(' INT ')' stat\n" +
|
||||||
" | 'return' INT ';'\n" +
|
" | 'return' INT ';'\n" +
|
||||||
" | ID '=' (INT|ID) ';'\n" +
|
" | ID '=' (INT|ID) ';'\n" +
|
||||||
|
@ -450,7 +450,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
||||||
@Test public void testHTMLTags() throws Exception {
|
@Test public void testHTMLTags() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"s @after {dumpDFA();} : (item)+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"s @after {dumpDFA();} : (item)+ {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"item : tag | . ;\n" +
|
"item : tag | . ;\n" +
|
||||||
"tag : '<' '/'? .* '>' ;\n" +
|
"tag : '<' '/'? .* '>' ;\n" +
|
||||||
"EQ : '=' ;\n" +
|
"EQ : '=' ;\n" +
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class TestParserExec extends BaseTest {
|
||||||
@Test public void testBasic() throws Exception {
|
@Test public void testBasic() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"a : ID INT {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"a : ID INT {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
"INT : '0'..'9'+;\n" +
|
"INT : '0'..'9'+;\n" +
|
||||||
"WS : (' '|'\\n') {skip();} ;\n";
|
"WS : (' '|'\\n') {skip();} ;\n";
|
||||||
|
@ -80,7 +80,7 @@ public class TestParserExec extends BaseTest {
|
||||||
@Test public void testAPlus() throws Exception {
|
@Test public void testAPlus() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"a : ID+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"a : ID+ {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
"WS : (' '|'\\n') {skip();} ;\n";
|
"WS : (' '|'\\n') {skip();} ;\n";
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ public class TestParserExec extends BaseTest {
|
||||||
@Test public void testAorAPlus() throws Exception {
|
@Test public void testAorAPlus() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"a : (ID|ID)+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"a : (ID|ID)+ {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
"WS : (' '|'\\n') {skip();} ;\n";
|
"WS : (' '|'\\n') {skip();} ;\n";
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ public class TestParserExec extends BaseTest {
|
||||||
@Test public void testAStar() throws Exception {
|
@Test public void testAStar() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"a : ID* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"a : ID* {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
"WS : (' '|'\\n') {skip();} ;\n";
|
"WS : (' '|'\\n') {skip();} ;\n";
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ public class TestParserExec extends BaseTest {
|
||||||
@Test public void testAorAStar() throws Exception {
|
@Test public void testAorAStar() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"a : (ID|ID)* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"a : (ID|ID)* {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
"WS : (' '|'\\n') {skip();} ;\n";
|
"WS : (' '|'\\n') {skip();} ;\n";
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ public class TestParserExec extends BaseTest {
|
||||||
@Test public void testAorBPlus() throws Exception {
|
@Test public void testAorBPlus() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"a : (ID|INT{;})+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"a : (ID|INT{;})+ {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
"INT : '0'..'9'+;\n" +
|
"INT : '0'..'9'+;\n" +
|
||||||
"WS : (' '|'\\n') {skip();} ;\n";
|
"WS : (' '|'\\n') {skip();} ;\n";
|
||||||
|
@ -149,7 +149,7 @@ public class TestParserExec extends BaseTest {
|
||||||
@Test public void testAorBStar() throws Exception {
|
@Test public void testAorBStar() throws Exception {
|
||||||
String grammar =
|
String grammar =
|
||||||
"grammar T;\n" +
|
"grammar T;\n" +
|
||||||
"a : (ID|INT{;})* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
"a : (ID|INT{;})* {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||||
"ID : 'a'..'z'+ ;\n" +
|
"ID : 'a'..'z'+ ;\n" +
|
||||||
"INT : '0'..'9'+;\n" +
|
"INT : '0'..'9'+;\n" +
|
||||||
"WS : (' '|'\\n') {skip();} ;\n";
|
"WS : (' '|'\\n') {skip();} ;\n";
|
||||||
|
|
|
@ -433,7 +433,7 @@ public class TestPerformance extends BaseTest {
|
||||||
extraOptions.add("-atn");
|
extraOptions.add("-atn");
|
||||||
}
|
}
|
||||||
String[] extraOptionsArray = extraOptions.toArray(new String[extraOptions.size()]);
|
String[] extraOptionsArray = extraOptions.toArray(new String[extraOptions.size()]);
|
||||||
boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, "JavaParser", "JavaLexer", extraOptionsArray);
|
boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, "JavaParser", "JavaLexer", true, extraOptionsArray);
|
||||||
assertTrue(success);
|
assertTrue(success);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,9 +20,9 @@ public class TestSymbolIssues extends BaseTest {
|
||||||
"\n" +
|
"\n" +
|
||||||
"ID : 'a'..'z'+ ID ;",
|
"ID : 'a'..'z'+ ID ;",
|
||||||
// YIELDS
|
// YIELDS
|
||||||
"warning(48): A.g4:2:10: illegal option opt\n" +
|
"warning(83): A.g4:2:10: illegal option opt\n" +
|
||||||
"warning(48): A.g4:2:21: illegal option k\n" +
|
"warning(83): A.g4:2:21: illegal option k\n" +
|
||||||
"error(59): A.g4:7:1: redefinition of header action\n" +
|
"error(94): A.g4:7:1: redefinition of header action\n" +
|
||||||
"warning(51): A.g4:2:10: illegal option opt\n" +
|
"warning(51): A.g4:2:10: illegal option opt\n" +
|
||||||
"error(19): A.g4:11:0: rule a redefinition\n" +
|
"error(19): A.g4:11:0: rule a redefinition\n" +
|
||||||
"error(60): A.g4:5:1: redefinition of members action\n" +
|
"error(60): A.g4:5:1: redefinition of members action\n" +
|
||||||
|
@ -34,7 +34,7 @@ public class TestSymbolIssues extends BaseTest {
|
||||||
static String[] B = {
|
static String[] B = {
|
||||||
// INPUT
|
// INPUT
|
||||||
"parser grammar B;\n" +
|
"parser grammar B;\n" +
|
||||||
"tokens { X='x'; Y; }\n" +
|
"tokens { ID; FOO; X='x'; Y; }\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"a : s=ID b+=ID X=ID '.' ;\n" +
|
"a : s=ID b+=ID X=ID '.' ;\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
|
@ -42,16 +42,18 @@ public class TestSymbolIssues extends BaseTest {
|
||||||
"\n" +
|
"\n" +
|
||||||
"s : FOO ;",
|
"s : FOO ;",
|
||||||
// YIELDS
|
// YIELDS
|
||||||
"error(25): B.g4:2:9: can't assign string value to token name X in non-combined grammar\n" +
|
"error(59): B.g4:2:18: can't assign string value to token name X in non-combined grammar\n" +
|
||||||
"error(35): B.g4:4:4: label s conflicts with rule with same name\n" +
|
"error(69): B.g4:4:4: label s conflicts with rule with same name\n" +
|
||||||
"error(35): B.g4:4:9: label b conflicts with rule with same name\n" +
|
"error(69): B.g4:4:9: label b conflicts with rule with same name\n" +
|
||||||
"error(36): B.g4:4:15: label X conflicts with token with same name\n" +
|
"error(70): B.g4:4:15: label X conflicts with token with same name\n" +
|
||||||
"error(40): B.g4:6:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n"
|
"error(75): B.g4:6:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n" +
|
||||||
|
"error(126): B.g4:4:20: cannot create implicit token for string literal '.' in non-combined grammar\n"
|
||||||
};
|
};
|
||||||
|
|
||||||
static String[] D = {
|
static String[] D = {
|
||||||
// INPUT
|
// INPUT
|
||||||
"parser grammar D;\n" +
|
"parser grammar D;\n" +
|
||||||
|
"tokens{ID;}\n" +
|
||||||
"a[int j] \n" +
|
"a[int j] \n" +
|
||||||
" : i=ID j=ID ;\n" +
|
" : i=ID j=ID ;\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
|
@ -61,8 +63,8 @@ public class TestSymbolIssues extends BaseTest {
|
||||||
" : ID ;",
|
" : ID ;",
|
||||||
|
|
||||||
// YIELDS
|
// YIELDS
|
||||||
"error(37): D.g4:3:21: label j conflicts with rule a's return value or parameter with same name\n" +
|
"error(72): D.g4:4:21: label j conflicts with rule a's return value or parameter with same name\n" +
|
||||||
"error(41): D.g4:5:0: rule b's argument i conflicts a return value with same name\n"
|
"error(76): D.g4:6:0: rule b's argument i conflicts a return value with same name\n"
|
||||||
};
|
};
|
||||||
|
|
||||||
static String[] E = {
|
static String[] E = {
|
||||||
|
@ -78,10 +80,10 @@ public class TestSymbolIssues extends BaseTest {
|
||||||
"a : A ;\n",
|
"a : A ;\n",
|
||||||
|
|
||||||
// YIELDS
|
// YIELDS
|
||||||
"error(73): E.g4:4:8: cannot redefine B; token name already defined\n" +
|
"error(108): E.g4:4:8: cannot redefine B; token name already defined\n" +
|
||||||
"error(73): E.g4:5:4: cannot redefine C; token name already defined\n" +
|
"error(108): E.g4:5:4: cannot redefine C; token name already defined\n" +
|
||||||
"error(73): E.g4:6:8: cannot redefine D; token name already defined\n" +
|
"error(108): E.g4:6:8: cannot redefine D; token name already defined\n" +
|
||||||
"error(72): E.g4:7:8: cannot alias X='e'; string already assigned to E\n"
|
"error(107): E.g4:7:8: cannot alias X='e'; string already assigned to E\n"
|
||||||
};
|
};
|
||||||
|
|
||||||
@Test public void testA() { super.testErrors(A, false); }
|
@Test public void testA() { super.testErrors(A, false); }
|
||||||
|
|
|
@ -1,806 +0,0 @@
|
||||||
/*
|
|
||||||
[The "BSD license"]
|
|
||||||
Copyright (c) 2011 Terence Parr
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions
|
|
||||||
are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in the
|
|
||||||
documentation and/or other materials provided with the distribution.
|
|
||||||
3. The name of the author may not be used to endorse or promote products
|
|
||||||
derived from this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
||||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
||||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
||||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
||||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*/
|
|
||||||
package org.antlr.v4.test;
|
|
||||||
|
|
||||||
import org.antlr.v4.runtime.TokenRewriteStream;
|
|
||||||
import org.antlr.v4.tool.LexerGrammar;
|
|
||||||
import org.antlr.v4.tool.interp.LexerInterpreter;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
public class TestTokenRewriteStream extends BaseTest {
|
|
||||||
|
|
||||||
/** Public default constructor used by TestRig */
|
|
||||||
public TestTokenRewriteStream() {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testInsertBeforeIndex0() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
LexerInterpreter lexInterp = new LexerInterpreter(g, "abc");
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexInterp);
|
|
||||||
tokens.insertBefore(0, "0");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "0abc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testInsertAfterLastIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.insertAfter(2, "x");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "abcx";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(1, "x");
|
|
||||||
tokens.insertAfter(1, "x");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "axbxc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceIndex0() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(0, "x");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "xbc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceLastIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, "x");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "abx";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceMiddleIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(1, "x");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "axc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testToStringStartStop() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"ID : 'a'..'z'+;\n" +
|
|
||||||
"INT : '0'..'9'+;\n" +
|
|
||||||
"SEMI : ';';\n" +
|
|
||||||
"MUL : '*';\n" +
|
|
||||||
"ASSIGN : '=';\n" +
|
|
||||||
"WS : ' '+;\n");
|
|
||||||
// Tokens: 0123456789
|
|
||||||
// Input: x = 3 * 0;
|
|
||||||
String input = "x = 3 * 0;";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
|
|
||||||
|
|
||||||
String result = tokens.toOriginalString();
|
|
||||||
String expecting = "x = 3 * 0;";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
result = tokens.toString();
|
|
||||||
expecting = "x = 0;";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
result = tokens.toString(0,9);
|
|
||||||
expecting = "x = 0;";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
result = tokens.toString(4,8);
|
|
||||||
expecting = "0";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testToStringStartStop2() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"ID : 'a'..'z'+;\n" +
|
|
||||||
"INT : '0'..'9'+;\n" +
|
|
||||||
"SEMI : ';';\n" +
|
|
||||||
"ASSIGN : '=';\n" +
|
|
||||||
"PLUS : '+';\n" +
|
|
||||||
"MULT : '*';\n" +
|
|
||||||
"WS : ' '+;\n");
|
|
||||||
// Tokens: 012345678901234567
|
|
||||||
// Input: x = 3 * 0 + 2 * 0;
|
|
||||||
String input = "x = 3 * 0 + 2 * 0;";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
|
|
||||||
String result = tokens.toOriginalString();
|
|
||||||
String expecting = "x = 3 * 0 + 2 * 0;";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
|
|
||||||
result = tokens.toString();
|
|
||||||
expecting = "x = 0 + 2 * 0;";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
result = tokens.toString(0,17);
|
|
||||||
expecting = "x = 0 + 2 * 0;";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
result = tokens.toString(4,8);
|
|
||||||
expecting = "0";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
result = tokens.toString(0,8);
|
|
||||||
expecting = "x = 0";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
result = tokens.toString(12,16);
|
|
||||||
expecting = "2 * 0";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
tokens.insertAfter(17, "// comment");
|
|
||||||
result = tokens.toString(12,18);
|
|
||||||
expecting = "2 * 0;// comment";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
|
|
||||||
result = tokens.toString(0,8); // try again after insert at end
|
|
||||||
expecting = "x = 0";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test public void test2ReplaceMiddleIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(1, "x");
|
|
||||||
tokens.replace(1, "y");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "ayc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(0, "_");
|
|
||||||
tokens.replace(1, "x");
|
|
||||||
tokens.replace(1, "y");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "_ayc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(1, "x");
|
|
||||||
tokens.delete(1);
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "ac";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testInsertInPriorReplace() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(0, 2, "x");
|
|
||||||
tokens.insertBefore(1, "0");
|
|
||||||
Exception exc = null;
|
|
||||||
try {
|
|
||||||
tokens.toString();
|
|
||||||
}
|
|
||||||
catch (IllegalArgumentException iae) {
|
|
||||||
exc = iae;
|
|
||||||
}
|
|
||||||
String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<4>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@2,2:2='c',<5>,1:2]:\"x\">";
|
|
||||||
assertNotNull(exc);
|
|
||||||
assertEquals(expecting, exc.getMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testInsertThenReplaceSameIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(0, "0");
|
|
||||||
tokens.replace(0, "x"); // supercedes insert at 0
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "0xbc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void test2InsertMiddleIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(1, "x");
|
|
||||||
tokens.insertBefore(1, "y");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "ayxbc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void test2InsertThenReplaceIndex0() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(0, "x");
|
|
||||||
tokens.insertBefore(0, "y");
|
|
||||||
tokens.replace(0, "z");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "yxzbc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, "x");
|
|
||||||
tokens.insertBefore(2, "y");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "abyx";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testInsertThenReplaceLastIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(2, "y");
|
|
||||||
tokens.replace(2, "x");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "abyx";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, "x");
|
|
||||||
tokens.insertAfter(2, "y");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "abxy";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcccba";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, 4, "x");
|
|
||||||
tokens.insertBefore(2, "y");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "abyxba";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcccba";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, 4, "x");
|
|
||||||
tokens.insertBefore(4, "y"); // no effect; within range of a replace
|
|
||||||
Exception exc = null;
|
|
||||||
try {
|
|
||||||
tokens.toString();
|
|
||||||
}
|
|
||||||
catch (IllegalArgumentException iae) {
|
|
||||||
exc = iae;
|
|
||||||
}
|
|
||||||
String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<5>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"x\">";
|
|
||||||
assertNotNull(exc);
|
|
||||||
assertEquals(expecting, exc.getMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcccba";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, 4, "x");
|
|
||||||
tokens.insertAfter(4, "y");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "abxyba";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceAll() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcccba";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(0, 6, "x");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "x";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceSubsetThenFetch() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcccba";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, 4, "xyz");
|
|
||||||
String result = tokens.toString(0,6);
|
|
||||||
String expecting = "abxyzba";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceThenReplaceSuperset() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcccba";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, 4, "xyz");
|
|
||||||
tokens.replace(3, 5, "foo"); // overlaps, error
|
|
||||||
Exception exc = null;
|
|
||||||
try {
|
|
||||||
tokens.toString();
|
|
||||||
}
|
|
||||||
catch (IllegalArgumentException iae) {
|
|
||||||
exc = iae;
|
|
||||||
}
|
|
||||||
String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<5>,1:3]..[@5,5:5='b',<4>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
|
||||||
assertNotNull(exc);
|
|
||||||
assertEquals(expecting, exc.getMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcccba";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, 4, "xyz");
|
|
||||||
tokens.replace(1, 3, "foo"); // overlap, error
|
|
||||||
Exception exc = null;
|
|
||||||
try {
|
|
||||||
tokens.toString();
|
|
||||||
}
|
|
||||||
catch (IllegalArgumentException iae) {
|
|
||||||
exc = iae;
|
|
||||||
}
|
|
||||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@3,3:3='c',<5>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
|
||||||
assertNotNull(exc);
|
|
||||||
assertEquals(expecting, exc.getMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcba";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, 2, "xyz");
|
|
||||||
tokens.replace(0, 3, "foo");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "fooa";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// June 2, 2008 I rewrote core of rewrite engine; just adding lots more tests here
|
|
||||||
|
|
||||||
@Test public void testCombineInserts() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(0, "x");
|
|
||||||
tokens.insertBefore(0, "y");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "yxabc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testCombine3Inserts() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(1, "x");
|
|
||||||
tokens.insertBefore(0, "y");
|
|
||||||
tokens.insertBefore(1, "z");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "yazxbc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(0, 2, "foo");
|
|
||||||
tokens.insertBefore(0, "z"); // combine with left edge of rewrite
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "zfoo";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.delete(0, 2);
|
|
||||||
tokens.insertBefore(0, "z"); // combine with left edge of rewrite
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "z"; // make sure combo is not znull
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testDisjointInserts() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(1, "x");
|
|
||||||
tokens.insertBefore(2, "y");
|
|
||||||
tokens.insertBefore(0, "z");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "zaxbyc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testOverlappingReplace() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(1, 2, "foo");
|
|
||||||
tokens.replace(0, 3, "bar"); // wipes prior nested replace
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "bar";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testOverlappingReplace2() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(0, 3, "bar");
|
|
||||||
tokens.replace(1, 2, "foo"); // cannot split earlier replace
|
|
||||||
Exception exc = null;
|
|
||||||
try {
|
|
||||||
tokens.toString();
|
|
||||||
}
|
|
||||||
catch (IllegalArgumentException iae) {
|
|
||||||
exc = iae;
|
|
||||||
}
|
|
||||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@2,2:2='c',<5>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@3,3:3='c',<5>,1:3]:\"bar\">";
|
|
||||||
assertNotNull(exc);
|
|
||||||
assertEquals(expecting, exc.getMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testOverlappingReplace3() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(1, 2, "foo");
|
|
||||||
tokens.replace(0, 2, "bar"); // wipes prior nested replace
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "barc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testOverlappingReplace4() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(1, 2, "foo");
|
|
||||||
tokens.replace(1, 3, "bar"); // wipes prior nested replace
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "abar";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testDropIdenticalReplace() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(1, 2, "foo");
|
|
||||||
tokens.replace(1, 2, "foo"); // drop previous, identical
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "afooc";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testDropPrevCoveredInsert() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(1, "foo");
|
|
||||||
tokens.replace(1, 2, "foo"); // kill prev insert
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "afoofoo";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testLeaveAloneDisjointInsert() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(1, "x");
|
|
||||||
tokens.replace(2, 3, "foo");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "axbfoo";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testLeaveAloneDisjointInsert2() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abcc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.replace(2, 3, "foo");
|
|
||||||
tokens.insertBefore(1, "x");
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "axbfoo";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
|
|
||||||
LexerGrammar g = new LexerGrammar(
|
|
||||||
"lexer grammar t;\n"+
|
|
||||||
"A : 'a';\n" +
|
|
||||||
"B : 'b';\n" +
|
|
||||||
"C : 'c';\n");
|
|
||||||
String input = "abc";
|
|
||||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
|
||||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
|
||||||
tokens.fill();
|
|
||||||
tokens.insertBefore(2, "y");
|
|
||||||
tokens.delete(2);
|
|
||||||
String result = tokens.toString();
|
|
||||||
String expecting = "aby";
|
|
||||||
assertEquals(expecting, result);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -0,0 +1,879 @@
|
||||||
|
/*
|
||||||
|
[The "BSD license"]
|
||||||
|
Copyright (c) 2011 Terence Parr
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions
|
||||||
|
are met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
3. The name of the author may not be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||||
|
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||||
|
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||||
|
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||||
|
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
package org.antlr.v4.test;
|
||||||
|
|
||||||
|
import org.antlr.v4.runtime.CommonTokenStream;
|
||||||
|
import org.antlr.v4.runtime.TokenStreamRewriter;
|
||||||
|
import org.antlr.v4.runtime.misc.Interval;
|
||||||
|
import org.antlr.v4.tool.LexerGrammar;
|
||||||
|
import org.antlr.v4.tool.interp.LexerInterpreter;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
public class TestTokenStreamRewriter extends BaseTest {
|
||||||
|
|
||||||
|
/** Public default constructor used by TestRig */
|
||||||
|
public TestTokenStreamRewriter() {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testInsertBeforeIndex0() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, "abc");
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(0, "0");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "0abc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testInsertAfterLastIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertAfter(2, "x");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "abcx";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(1, "x");
|
||||||
|
tokens.insertAfter(1, "x");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "axbxc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceIndex0() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(0, "x");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "xbc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceLastIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, "x");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "abx";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceMiddleIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(1, "x");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "axc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testToStringStartStop() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"ID : 'a'..'z'+;\n" +
|
||||||
|
"INT : '0'..'9'+;\n" +
|
||||||
|
"SEMI : ';';\n" +
|
||||||
|
"MUL : '*';\n" +
|
||||||
|
"ASSIGN : '=';\n" +
|
||||||
|
"WS : ' '+;\n");
|
||||||
|
// Tokens: 0123456789
|
||||||
|
// Input: x = 3 * 0;
|
||||||
|
String input = "x = 3 * 0;";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(4, 8, "0");
|
||||||
|
stream.fill();
|
||||||
|
// replace 3 * 0 with 0
|
||||||
|
|
||||||
|
String result = tokens.getTokenStream().getText();
|
||||||
|
String expecting = "x = 3 * 0;";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
result = tokens.getText();
|
||||||
|
expecting = "x = 0;";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
result = tokens.getText(Interval.of(0, 9));
|
||||||
|
expecting = "x = 0;";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
result = tokens.getText(Interval.of(4, 8));
|
||||||
|
expecting = "0";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testToStringStartStop2() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"ID : 'a'..'z'+;\n" +
|
||||||
|
"INT : '0'..'9'+;\n" +
|
||||||
|
"SEMI : ';';\n" +
|
||||||
|
"ASSIGN : '=';\n" +
|
||||||
|
"PLUS : '+';\n" +
|
||||||
|
"MULT : '*';\n" +
|
||||||
|
"WS : ' '+;\n");
|
||||||
|
// Tokens: 012345678901234567
|
||||||
|
// Input: x = 3 * 0 + 2 * 0;
|
||||||
|
String input = "x = 3 * 0 + 2 * 0;";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
|
||||||
|
String result = tokens.getTokenStream().getText();
|
||||||
|
String expecting = "x = 3 * 0 + 2 * 0;";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
tokens.replace(4, 8, "0");
|
||||||
|
stream.fill();
|
||||||
|
// replace 3 * 0 with 0
|
||||||
|
result = tokens.getText();
|
||||||
|
expecting = "x = 0 + 2 * 0;";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
result = tokens.getText(Interval.of(0, 17));
|
||||||
|
expecting = "x = 0 + 2 * 0;";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
result = tokens.getText(Interval.of(4, 8));
|
||||||
|
expecting = "0";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
result = tokens.getText(Interval.of(0, 8));
|
||||||
|
expecting = "x = 0";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
result = tokens.getText(Interval.of(12, 16));
|
||||||
|
expecting = "2 * 0";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
tokens.insertAfter(17, "// comment");
|
||||||
|
result = tokens.getText(Interval.of(12, 18));
|
||||||
|
expecting = "2 * 0;// comment";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
|
||||||
|
result = tokens.getText(Interval.of(0, 8));
|
||||||
|
stream.fill();
|
||||||
|
// try again after insert at end
|
||||||
|
expecting = "x = 0";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test public void test2ReplaceMiddleIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(1, "x");
|
||||||
|
tokens.replace(1, "y");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "ayc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(0, "_");
|
||||||
|
tokens.replace(1, "x");
|
||||||
|
tokens.replace(1, "y");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "_ayc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(1, "x");
|
||||||
|
tokens.delete(1);
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "ac";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testInsertInPriorReplace() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(0, 2, "x");
|
||||||
|
tokens.insertBefore(1, "0");
|
||||||
|
Exception exc = null;
|
||||||
|
try {
|
||||||
|
tokens.getText();
|
||||||
|
}
|
||||||
|
catch (IllegalArgumentException iae) {
|
||||||
|
exc = iae;
|
||||||
|
}
|
||||||
|
String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<4>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@2,2:2='c',<5>,1:2]:\"x\">";
|
||||||
|
assertNotNull(exc);
|
||||||
|
assertEquals(expecting, exc.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testInsertThenReplaceSameIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(0, "0");
|
||||||
|
tokens.replace(0, "x");
|
||||||
|
stream.fill();
|
||||||
|
// supercedes insert at 0
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "0xbc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void test2InsertMiddleIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(1, "x");
|
||||||
|
tokens.insertBefore(1, "y");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "ayxbc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void test2InsertThenReplaceIndex0() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(0, "x");
|
||||||
|
tokens.insertBefore(0, "y");
|
||||||
|
tokens.replace(0, "z");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "yxzbc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, "x");
|
||||||
|
tokens.insertBefore(2, "y");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "abyx";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testInsertThenReplaceLastIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(2, "y");
|
||||||
|
tokens.replace(2, "x");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "abyx";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, "x");
|
||||||
|
tokens.insertAfter(2, "y");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "abxy";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcccba";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, 4, "x");
|
||||||
|
tokens.insertBefore(2, "y");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "abyxba";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcccba";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, 4, "x");
|
||||||
|
tokens.insertBefore(4, "y");
|
||||||
|
stream.fill(); // no effect; within range of a replace
|
||||||
|
Exception exc = null;
|
||||||
|
try {
|
||||||
|
tokens.getText();
|
||||||
|
}
|
||||||
|
catch (IllegalArgumentException iae) {
|
||||||
|
exc = iae;
|
||||||
|
}
|
||||||
|
String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<5>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"x\">";
|
||||||
|
assertNotNull(exc);
|
||||||
|
assertEquals(expecting, exc.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcccba";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, 4, "x");
|
||||||
|
tokens.insertAfter(4, "y");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "abxyba";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceAll() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcccba";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(0, 6, "x");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "x";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceSubsetThenFetch() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcccba";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, 4, "xyz");
|
||||||
|
String result = tokens.getText(Interval.of(0, 6));
|
||||||
|
String expecting = "abxyzba";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceThenReplaceSuperset() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcccba";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, 4, "xyz");
|
||||||
|
tokens.replace(3, 5, "foo");
|
||||||
|
stream.fill();
|
||||||
|
// overlaps, error
|
||||||
|
Exception exc = null;
|
||||||
|
try {
|
||||||
|
tokens.getText();
|
||||||
|
}
|
||||||
|
catch (IllegalArgumentException iae) {
|
||||||
|
exc = iae;
|
||||||
|
}
|
||||||
|
String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<5>,1:3]..[@5,5:5='b',<4>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||||
|
assertNotNull(exc);
|
||||||
|
assertEquals(expecting, exc.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcccba";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, 4, "xyz");
|
||||||
|
tokens.replace(1, 3, "foo");
|
||||||
|
stream.fill();
|
||||||
|
// overlap, error
|
||||||
|
Exception exc = null;
|
||||||
|
try {
|
||||||
|
tokens.getText();
|
||||||
|
}
|
||||||
|
catch (IllegalArgumentException iae) {
|
||||||
|
exc = iae;
|
||||||
|
}
|
||||||
|
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@3,3:3='c',<5>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||||
|
assertNotNull(exc);
|
||||||
|
assertEquals(expecting, exc.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcba";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, 2, "xyz");
|
||||||
|
tokens.replace(0, 3, "foo");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "fooa";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testCombineInserts() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(0, "x");
|
||||||
|
tokens.insertBefore(0, "y");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "yxabc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testCombine3Inserts() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(1, "x");
|
||||||
|
tokens.insertBefore(0, "y");
|
||||||
|
tokens.insertBefore(1, "z");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "yazxbc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(0, 2, "foo");
|
||||||
|
tokens.insertBefore(0, "z");
|
||||||
|
stream.fill();
|
||||||
|
// combine with left edge of rewrite
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "zfoo";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.delete(0, 2);
|
||||||
|
tokens.insertBefore(0, "z");
|
||||||
|
stream.fill();
|
||||||
|
// combine with left edge of rewrite
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "z";
|
||||||
|
stream.fill();
|
||||||
|
// make sure combo is not znull
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testDisjointInserts() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(1, "x");
|
||||||
|
tokens.insertBefore(2, "y");
|
||||||
|
tokens.insertBefore(0, "z");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "zaxbyc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testOverlappingReplace() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(1, 2, "foo");
|
||||||
|
tokens.replace(0, 3, "bar");
|
||||||
|
stream.fill();
|
||||||
|
// wipes prior nested replace
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "bar";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testOverlappingReplace2() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(0, 3, "bar");
|
||||||
|
tokens.replace(1, 2, "foo");
|
||||||
|
stream.fill();
|
||||||
|
// cannot split earlier replace
|
||||||
|
Exception exc = null;
|
||||||
|
try {
|
||||||
|
tokens.getText();
|
||||||
|
}
|
||||||
|
catch (IllegalArgumentException iae) {
|
||||||
|
exc = iae;
|
||||||
|
}
|
||||||
|
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@2,2:2='c',<5>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@3,3:3='c',<5>,1:3]:\"bar\">";
|
||||||
|
assertNotNull(exc);
|
||||||
|
assertEquals(expecting, exc.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testOverlappingReplace3() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(1, 2, "foo");
|
||||||
|
tokens.replace(0, 2, "bar");
|
||||||
|
stream.fill();
|
||||||
|
// wipes prior nested replace
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "barc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testOverlappingReplace4() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(1, 2, "foo");
|
||||||
|
tokens.replace(1, 3, "bar");
|
||||||
|
stream.fill();
|
||||||
|
// wipes prior nested replace
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "abar";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testDropIdenticalReplace() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(1, 2, "foo");
|
||||||
|
tokens.replace(1, 2, "foo");
|
||||||
|
stream.fill();
|
||||||
|
// drop previous, identical
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "afooc";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testDropPrevCoveredInsert() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(1, "foo");
|
||||||
|
tokens.replace(1, 2, "foo");
|
||||||
|
stream.fill();
|
||||||
|
// kill prev insert
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "afoofoo";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testLeaveAloneDisjointInsert() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(1, "x");
|
||||||
|
tokens.replace(2, 3, "foo");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "axbfoo";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testLeaveAloneDisjointInsert2() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abcc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.replace(2, 3, "foo");
|
||||||
|
tokens.insertBefore(1, "x");
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "axbfoo";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
|
||||||
|
LexerGrammar g = new LexerGrammar(
|
||||||
|
"lexer grammar t;\n"+
|
||||||
|
"A : 'a';\n" +
|
||||||
|
"B : 'b';\n" +
|
||||||
|
"C : 'c';\n");
|
||||||
|
String input = "abc";
|
||||||
|
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||||
|
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||||
|
stream.fill();
|
||||||
|
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||||
|
tokens.insertBefore(2, "y");
|
||||||
|
tokens.delete(2);
|
||||||
|
String result = tokens.getText();
|
||||||
|
String expecting = "aby";
|
||||||
|
assertEquals(expecting, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -8,37 +8,37 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"grammar A;\n" +
|
"grammar A;\n" +
|
||||||
"",
|
"",
|
||||||
// YIELDS
|
// YIELDS
|
||||||
"error(64): A.g4::: grammar A has no rules\n",
|
"error(99): A.g4::: grammar A has no rules\n",
|
||||||
|
|
||||||
"A;",
|
"A;",
|
||||||
"error(16): <string>:1:0: 'A' came as a complete surprise to me\n",
|
"error(50): <string>:1:0: 'A' came as a complete surprise to me\n",
|
||||||
|
|
||||||
"grammar ;",
|
"grammar ;",
|
||||||
"error(16): <string>:1:8: ';' came as a complete surprise to me while looking for an identifier\n",
|
"error(50): <string>:1:8: ';' came as a complete surprise to me while looking for an identifier\n",
|
||||||
|
|
||||||
"grammar A\n" +
|
"grammar A\n" +
|
||||||
"a : ID ;\n",
|
"a : ID ;\n",
|
||||||
"error(16): <string>:2:0: missing SEMI at 'a'\n",
|
"error(50): <string>:2:0: missing SEMI at 'a'\n",
|
||||||
|
|
||||||
"grammar A;\n" +
|
"grammar A;\n" +
|
||||||
"a : ID ;;\n"+
|
"a : ID ;;\n"+
|
||||||
"b : B ;",
|
"b : B ;",
|
||||||
"error(16): A.g4:2:8: ';' came as a complete surprise to me\n",
|
"error(50): A.g4:2:8: ';' came as a complete surprise to me\n",
|
||||||
|
|
||||||
"grammar A;;\n" +
|
"grammar A;;\n" +
|
||||||
"a : ID ;\n",
|
"a : ID ;\n",
|
||||||
"error(16): A;.g4:1:10: ';' came as a complete surprise to me\n",
|
"error(50): A;.g4:1:10: ';' came as a complete surprise to me\n",
|
||||||
|
|
||||||
"grammar A;\n" +
|
"grammar A;\n" +
|
||||||
"a @init : ID ;\n",
|
"a @init : ID ;\n",
|
||||||
"error(16): A.g4:2:8: mismatched input ':' expecting ACTION while matching rule preamble\n",
|
"error(50): A.g4:2:8: mismatched input ':' expecting ACTION while matching rule preamble\n",
|
||||||
|
|
||||||
"grammar A;\n" +
|
"grammar A;\n" +
|
||||||
"a ( A | B ) D ;\n" +
|
"a ( A | B ) D ;\n" +
|
||||||
"b : B ;",
|
"b : B ;",
|
||||||
"error(16): A.g4:2:3: '(' came as a complete surprise to me while matching rule preamble\n" +
|
"error(50): A.g4:2:3: '(' came as a complete surprise to me while matching rule preamble\n" +
|
||||||
"error(16): A.g4:2:11: mismatched input ')' expecting SEMI while matching a rule\n" +
|
"error(50): A.g4:2:11: mismatched input ')' expecting SEMI while matching a rule\n" +
|
||||||
"error(16): A.g4:2:15: mismatched input ';' expecting COLON while matching a lexer rule\n",
|
"error(50): A.g4:2:15: mismatched input ';' expecting COLON while matching a lexer rule\n",
|
||||||
};
|
};
|
||||||
|
|
||||||
@Test public void testA() { super.testErrors(A, true); }
|
@Test public void testA() { super.testErrors(A, true); }
|
||||||
|
@ -48,7 +48,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"grammar A;\n" +
|
"grammar A;\n" +
|
||||||
"a : : A ;\n" +
|
"a : : A ;\n" +
|
||||||
"b : B ;",
|
"b : B ;",
|
||||||
"error(16): A.g4:2:4: ':' came as a complete surprise to me while matching alternative\n",
|
"error(50): A.g4:2:4: ':' came as a complete surprise to me while matching alternative\n",
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"grammar A;\n" +
|
"grammar A;\n" +
|
||||||
"a : A \n" +
|
"a : A \n" +
|
||||||
"b : B ;",
|
"b : B ;",
|
||||||
"error(16): A.g4:3:0: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n",
|
"error(50): A.g4:3:0: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n",
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"lexer grammar A;\n" +
|
"lexer grammar A;\n" +
|
||||||
"A : 'a' \n" +
|
"A : 'a' \n" +
|
||||||
"B : 'b' ;",
|
"B : 'b' ;",
|
||||||
"error(16): A.g4:3:0: unterminated rule (missing ';') detected at 'B :' while looking for lexer rule element\n",
|
"error(50): A.g4:3:0: unterminated rule (missing ';') detected at 'B :' while looking for lexer rule element\n",
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"grammar A;\n" +
|
"grammar A;\n" +
|
||||||
"a : A \n" +
|
"a : A \n" +
|
||||||
"b[int i] returns [int y] : B ;",
|
"b[int i] returns [int y] : B ;",
|
||||||
"error(16): A.g4:3:9: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n"
|
"error(50): A.g4:3:9: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n"
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
" catch [Exception e] {...}\n" +
|
" catch [Exception e] {...}\n" +
|
||||||
"b : B ;\n",
|
"b : B ;\n",
|
||||||
|
|
||||||
"error(16): A.g4:2:4: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n"
|
"error(50): A.g4:2:4: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n"
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"a : A \n" +
|
"a : A \n" +
|
||||||
" catch [Exception e] {...}\n",
|
" catch [Exception e] {...}\n",
|
||||||
|
|
||||||
"error(16): A.g4:2:4: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n"
|
"error(50): A.g4:2:4: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n"
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"a @ options {k=1;} : A ;\n" +
|
"a @ options {k=1;} : A ;\n" +
|
||||||
"b : B ;",
|
"b : B ;",
|
||||||
|
|
||||||
"error(16): A.g4:2:4: 'options {' came as a complete surprise to me while looking for an identifier\n"
|
"error(50): A.g4:2:4: 'options {' came as a complete surprise to me while looking for an identifier\n"
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"a } : A ;\n" +
|
"a } : A ;\n" +
|
||||||
"b : B ;",
|
"b : B ;",
|
||||||
|
|
||||||
"error(16): A.g4:2:2: '}' came as a complete surprise to me while matching rule preamble\n"
|
"error(50): A.g4:2:2: '}' came as a complete surprise to me while matching rule preamble\n"
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
@ -135,8 +135,8 @@ public class TestToolSyntaxErrors extends BaseTest {
|
||||||
"mode foo;\n" +
|
"mode foo;\n" +
|
||||||
"b : B ;",
|
"b : B ;",
|
||||||
|
|
||||||
"error(16): A.g4:4:0: 'b' came as a complete surprise to me\n" +
|
"error(50): A.g4:4:0: 'b' came as a complete surprise to me\n" +
|
||||||
"error(16): A.g4:4:6: mismatched input ';' expecting COLON while matching a lexer rule\n"
|
"error(50): A.g4:4:6: mismatched input ';' expecting COLON while matching a lexer rule\n"
|
||||||
};
|
};
|
||||||
super.testErrors(pair, true);
|
super.testErrors(pair, true);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue