forked from jasder/antlr
Merge remote-tracking branch 'parrt/master' into basetest-error-reporting
This commit is contained in:
commit
9d92446125
|
@ -1,6 +1,8 @@
|
|||
package org.antlr.v4.gunit;
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import org.antlr.runtime.ANTLRFileStream;
|
||||
import org.antlr.runtime.CommonTokenStream;
|
||||
import org.antlr.runtime.RuleReturnScope;
|
||||
import org.antlr.runtime.tree.BufferedTreeNodeStream;
|
||||
import org.antlr.runtime.tree.CommonTree;
|
||||
import org.antlr.runtime.tree.CommonTreeNodeStream;
|
||||
|
@ -8,17 +10,19 @@ import org.antlr.stringtemplate.AutoIndentWriter;
|
|||
import org.antlr.stringtemplate.StringTemplate;
|
||||
import org.antlr.stringtemplate.StringTemplateGroup;
|
||||
|
||||
import java.io.*;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.FileReader;
|
||||
import java.io.FileWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class Gen {
|
||||
// TODO: don't hardcode
|
||||
public static final String TEMPLATE_FILE =
|
||||
"/Users/parrt/antlr/code/antlr4/main/gunit/resources/org/antlr/v4/gunit/jUnit.stg";
|
||||
"/Users/parrt/antlr/code/antlr4/gunit/resources/org/antlr/v4/gunit/jUnit.stg";
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
if ( args.length==0 ) System.exit(0);
|
||||
if ( args.length==0 ) {help(); System.exit(0);}
|
||||
String outputDirName = ".";
|
||||
String fileName = args[0];
|
||||
if ( args[0].equals("-o") ) {
|
||||
|
|
|
@ -23,17 +23,17 @@
|
|||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.abego</groupId>
|
||||
<artifactId>treelayout.core</artifactId>
|
||||
<version>1.0</version>
|
||||
<scope>system</scope>
|
||||
<systemPath>${project.basedir}/lib/org.abego.treelayout.core.jar</systemPath>
|
||||
<groupId>org.abego.treelayout</groupId>
|
||||
<artifactId>org.abego.treelayout.core</artifactId>
|
||||
<version>1.0.1</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
||||
<sourceDirectory>src</sourceDirectory>
|
||||
<resources/>
|
||||
|
||||
<plugins>
|
||||
<plugin>
|
||||
|
|
|
@ -29,6 +29,10 @@
|
|||
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
/** How to emit recognition errors */
|
||||
|
@ -72,4 +76,30 @@ public interface ANTLRErrorListener<Symbol> {
|
|||
int charPositionInLine,
|
||||
String msg,
|
||||
@Nullable RecognitionException e);
|
||||
|
||||
/** Called when the parser detects a true ambiguity: an input sequence can be matched
|
||||
* literally by two or more pass through the grammar. ANTLR resolves the ambiguity in
|
||||
* favor of the alternative appearing first in the grammar. The start and stop index are
|
||||
* zero-based absolute indices into the token stream. ambigAlts is a set of alternative numbers
|
||||
* that can match the input sequence. This method is only called when we are parsing with
|
||||
* full context.
|
||||
*/
|
||||
void reportAmbiguity(@NotNull Parser recognizer,
|
||||
DFA dfa, int startIndex, int stopIndex, @NotNull IntervalSet ambigAlts,
|
||||
@NotNull ATNConfigSet configs);
|
||||
|
||||
void reportAttemptingFullContext(@NotNull Parser recognizer,
|
||||
@NotNull DFA dfa,
|
||||
int startIndex, int stopIndex,
|
||||
@NotNull ATNConfigSet configs);
|
||||
|
||||
/** Called by the parser when it find a conflict that is resolved by retrying the parse
|
||||
* with full context. This is not a warning; it simply notifies you that your grammar
|
||||
* is more complicated than Strong LL can handle. The parser moved up to full context
|
||||
* parsing for that input sequence.
|
||||
*/
|
||||
void reportContextSensitivity(@NotNull Parser recognizer,
|
||||
@NotNull DFA dfa,
|
||||
int startIndex, int stopIndex,
|
||||
@NotNull ATNConfigSet configs);
|
||||
}
|
||||
|
|
|
@ -1,10 +1,5 @@
|
|||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||
import org.antlr.v4.runtime.atn.DecisionState;
|
||||
import org.antlr.v4.runtime.atn.SemanticContext;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
|
@ -114,30 +109,4 @@ public interface ANTLRErrorStrategy {
|
|||
void reportError(@NotNull Parser recognizer,
|
||||
@Nullable RecognitionException e)
|
||||
throws RecognitionException;
|
||||
|
||||
/** Called when the parser detects a true ambiguity: an input sequence can be matched
|
||||
* literally by two or more pass through the grammar. ANTLR resolves the ambiguity in
|
||||
* favor of the alternative appearing first in the grammar. The start and stop index are
|
||||
* zero-based absolute indices into the token stream. ambigAlts is a set of alternative numbers
|
||||
* that can match the input sequence. This method is only called when we are parsing with
|
||||
* full context.
|
||||
*/
|
||||
void reportAmbiguity(@NotNull Parser recognizer,
|
||||
DFA dfa, int startIndex, int stopIndex, @NotNull IntervalSet ambigAlts,
|
||||
@NotNull ATNConfigSet configs);
|
||||
|
||||
void reportAttemptingFullContext(@NotNull Parser recognizer,
|
||||
@NotNull DFA dfa,
|
||||
int startIndex, int stopIndex,
|
||||
@NotNull ATNConfigSet configs);
|
||||
|
||||
/** Called by the parser when it find a conflict that is resolved by retrying the parse
|
||||
* with full context. This is not a warning; it simply notifies you that your grammar
|
||||
* is more complicated than Strong LL can handle. The parser moved up to full context
|
||||
* parsing for that input sequence.
|
||||
*/
|
||||
void reportContextSensitivity(@NotNull Parser recognizer,
|
||||
@NotNull DFA dfa,
|
||||
int startIndex, int stopIndex,
|
||||
@NotNull ATNConfigSet configs);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
|
@ -210,7 +212,9 @@ public class ANTLRInputStream implements CharStream {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String substring(int start, int stop) {
|
||||
public String getText(Interval interval) {
|
||||
int start = interval.a;
|
||||
int stop = interval.b;
|
||||
if ( stop >= n ) stop = n-1;
|
||||
int count = stop - start + 1;
|
||||
if ( start >= n ) return "";
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2011 Terence Parr
|
||||
Copyright (c) 2012 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -26,26 +26,53 @@
|
|||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
/** A stream of either tokens or tree nodes */
|
||||
public interface SymbolStream<T> extends IntStream {
|
||||
/** Get the symbol at absolute index i; 0..n-1.
|
||||
* This is only valid if the underlying stream implementation buffers
|
||||
* all of the incoming objects.
|
||||
*
|
||||
* @throws UnsupportedOperationException if the index {@code i} is outside
|
||||
* the marked region and the stream does not support accessing symbols by
|
||||
* index outside of marked regions.
|
||||
*/
|
||||
public T get(int i);
|
||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
|
||||
/** Get symbol at current input pointer + {@code k} ahead where {@code k=1}
|
||||
* is next symbol. k<0 indicates objects in the past. So -1 is previous
|
||||
* Object and -2 is two Objects ago. {@code LT(0)} is undefined. For i>=n,
|
||||
* return an object representing EOF. Return {@code null} for {@code LT(0)}
|
||||
* and any index that results in an absolute index that is negative.
|
||||
*/
|
||||
T LT(int k);
|
||||
/**
|
||||
*
|
||||
* @author Sam Harwell
|
||||
*/
|
||||
public class BaseErrorListener<Symbol> implements ANTLRErrorListener<Symbol> {
|
||||
|
||||
@Override
|
||||
public <T extends Symbol> void error(Recognizer<T, ?> recognizer,
|
||||
T offendingSymbol,
|
||||
int line,
|
||||
int charPositionInLine,
|
||||
String msg,
|
||||
RecognitionException e)
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportAmbiguity(Parser recognizer,
|
||||
DFA dfa,
|
||||
int startIndex,
|
||||
int stopIndex,
|
||||
IntervalSet ambigAlts,
|
||||
ATNConfigSet configs)
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportAttemptingFullContext(Parser recognizer,
|
||||
DFA dfa,
|
||||
int startIndex,
|
||||
int stopIndex,
|
||||
ATNConfigSet configs)
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportContextSensitivity(Parser recognizer,
|
||||
DFA dfa,
|
||||
int startIndex,
|
||||
int stopIndex,
|
||||
ATNConfigSet configs)
|
||||
{
|
||||
}
|
||||
}
|
|
@ -29,7 +29,13 @@
|
|||
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import java.util.*;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
|
||||
/** Buffer all input tokens but do on-demand fetching of new tokens from
|
||||
* lexer. Useful when the parser or lexer has to set context/mode info before
|
||||
|
@ -235,30 +241,39 @@ public class BufferedTokenStream<T extends Token> implements TokenStream {
|
|||
|
||||
/** Grab *all* tokens from stream and return string */
|
||||
@Override
|
||||
public String toString() {
|
||||
if ( p == -1 ) setup();
|
||||
fill();
|
||||
return toString(0, tokens.size()-1);
|
||||
}
|
||||
public String toString() { return getText(); }
|
||||
|
||||
/** Get the text of all tokens in this buffer. */
|
||||
public String getText() {
|
||||
if ( p == -1 ) setup();
|
||||
fill();
|
||||
return getText(Interval.of(0,size()-1));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(int start, int stop) {
|
||||
public String getText(Interval interval) {
|
||||
int start = interval.a;
|
||||
int stop = interval.b;
|
||||
if ( start<0 || stop<0 ) return "";
|
||||
if ( p == -1 ) setup();
|
||||
if ( stop>=tokens.size() ) stop = tokens.size()-1;
|
||||
StringBuilder buf = new StringBuilder();
|
||||
for (int i = start; i <= stop; i++) {
|
||||
T t = tokens.get(i);
|
||||
if ( t.getType()==Token.EOF ) break;
|
||||
buf.append(t.getText());
|
||||
}
|
||||
return buf.toString();
|
||||
|
||||
StringBuilder buf = new StringBuilder();
|
||||
for (int i = start; i <= stop; i++) {
|
||||
T t = tokens.get(i);
|
||||
if ( t.getType()==Token.EOF ) break;
|
||||
buf.append(t.getText());
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText(RuleContext ctx) { return getText(ctx.getSourceInterval()); }
|
||||
|
||||
@Override
|
||||
public String toString(Token start, Token stop) {
|
||||
public String getText(Token start, Token stop) {
|
||||
if ( start!=null && stop!=null ) {
|
||||
return toString(start.getTokenIndex(), stop.getTokenIndex());
|
||||
return getText(Interval.of(start.getTokenIndex(), stop.getTokenIndex()));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
/** A source of characters for an ANTLR lexer */
|
||||
public interface CharStream extends IntStream {
|
||||
public static final int EOF = -1;
|
||||
|
@ -37,6 +39,7 @@ public interface CharStream extends IntStream {
|
|||
/** For unbuffered streams, you can't use this; primarily I'm providing
|
||||
* a useful interface for action code. Just make sure actions don't
|
||||
* use this on streams that don't support it.
|
||||
* @param interval
|
||||
*/
|
||||
public String substring(int start, int stop);
|
||||
public String getText(Interval interval);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public class CommonToken implements WritableToken, Serializable {
|
||||
|
@ -109,7 +111,7 @@ public class CommonToken implements WritableToken, Serializable {
|
|||
if ( input==null ) return null;
|
||||
int n = input.size();
|
||||
if ( start<n && stop<n) {
|
||||
return input.substring(start,stop);
|
||||
return input.getText(Interval.of(start,stop));
|
||||
}
|
||||
else {
|
||||
return "<EOF>";
|
||||
|
|
|
@ -32,7 +32,7 @@ package org.antlr.v4.runtime;
|
|||
*
|
||||
* @author Sam Harwell
|
||||
*/
|
||||
public class ConsoleErrorListener implements ANTLRErrorListener<Object> {
|
||||
public class ConsoleErrorListener extends BaseErrorListener<Object> {
|
||||
public static final ConsoleErrorListener INSTANCE = new ConsoleErrorListener();
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,8 +29,14 @@
|
|||
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNState;
|
||||
import org.antlr.v4.runtime.atn.BlockStartState;
|
||||
import org.antlr.v4.runtime.atn.PlusBlockStartState;
|
||||
import org.antlr.v4.runtime.atn.PlusLoopbackState;
|
||||
import org.antlr.v4.runtime.atn.RuleTransition;
|
||||
import org.antlr.v4.runtime.atn.StarLoopEntryState;
|
||||
import org.antlr.v4.runtime.atn.StarLoopbackState;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
|
||||
|
@ -159,7 +165,7 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
// If already recovering, don't try to sync
|
||||
if ( errorRecoveryMode ) return;
|
||||
|
||||
SymbolStream<Token> tokens = recognizer.getInputStream();
|
||||
TokenStream tokens = recognizer.getInputStream();
|
||||
int la = tokens.LA(1);
|
||||
|
||||
// try cheaper subset first; might get lucky. seems to shave a wee bit off
|
||||
|
@ -195,11 +201,11 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
NoViableAltException e)
|
||||
throws RecognitionException
|
||||
{
|
||||
SymbolStream<Token> tokens = recognizer.getInputStream();
|
||||
TokenStream tokens = recognizer.getInputStream();
|
||||
String input;
|
||||
if (tokens instanceof TokenStream) {
|
||||
if ( e.startToken.getType()==Token.EOF ) input = "<EOF>";
|
||||
else input = ((TokenStream)tokens).toString(e.startToken, e.offendingToken);
|
||||
else input = ((TokenStream)tokens).getText(e.startToken, e.offendingToken);
|
||||
}
|
||||
else {
|
||||
input = "<unknown input>";
|
||||
|
@ -549,25 +555,4 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
ttype = recognizer.getInputStream().LA(1);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportAmbiguity(@NotNull Parser recognizer,
|
||||
DFA dfa, int startIndex, int stopIndex, @NotNull IntervalSet ambigAlts,
|
||||
@NotNull ATNConfigSet configs)
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportAttemptingFullContext(@NotNull Parser recognizer,
|
||||
@NotNull DFA dfa,
|
||||
int startIndex, int stopIndex,
|
||||
@NotNull ATNConfigSet configs)
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportContextSensitivity(@NotNull Parser recognizer, @NotNull DFA dfa,
|
||||
int startIndex, int stopIndex, @NotNull ATNConfigSet configs)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,17 +31,19 @@ package org.antlr.v4.runtime;
|
|||
|
||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
|
||||
public class DiagnosticErrorStrategy extends DefaultErrorStrategy {
|
||||
public class DiagnosticErrorListener extends BaseErrorListener<Token> {
|
||||
@Override
|
||||
public void reportAmbiguity(@NotNull Parser recognizer,
|
||||
DFA dfa, int startIndex, int stopIndex, @NotNull IntervalSet ambigAlts,
|
||||
@NotNull ATNConfigSet configs)
|
||||
{
|
||||
recognizer.notifyErrorListeners("reportAmbiguity d=" + dfa.decision + ": ambigAlts=" + ambigAlts + ":" + configs + ", input='" +
|
||||
recognizer.getInputString(startIndex, stopIndex) + "'");
|
||||
recognizer.notifyErrorListeners("reportAmbiguity d=" + dfa.decision +
|
||||
": ambigAlts=" + ambigAlts + ", input='" +
|
||||
recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)) + "'");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,15 +52,17 @@ public class DiagnosticErrorStrategy extends DefaultErrorStrategy {
|
|||
int startIndex, int stopIndex,
|
||||
@NotNull ATNConfigSet configs)
|
||||
{
|
||||
recognizer.notifyErrorListeners("reportAttemptingFullContext d=" + dfa.decision + ": " + configs + ", input='" +
|
||||
recognizer.getInputString(startIndex, stopIndex) + "'");
|
||||
recognizer.notifyErrorListeners("reportAttemptingFullContext d=" +
|
||||
dfa.decision + ", input='" +
|
||||
recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)) + "'");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportContextSensitivity(@NotNull Parser recognizer, @NotNull DFA dfa,
|
||||
int startIndex, int stopIndex, @NotNull ATNConfigSet configs)
|
||||
{
|
||||
recognizer.notifyErrorListeners("reportContextSensitivity d=" + dfa.decision + ": " + configs + ", input='" +
|
||||
recognizer.getInputString(startIndex, stopIndex) + "'");
|
||||
recognizer.notifyErrorListeners("reportContextSensitivity d=" +
|
||||
dfa.decision + ", input='" +
|
||||
recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)) + "'");
|
||||
}
|
||||
}
|
|
@ -29,10 +29,10 @@
|
|||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.EmptyStackException;
|
||||
import java.util.List;
|
||||
|
||||
/** A lexer is recognizer that draws input symbols from a character stream.
|
||||
* lexer grammars result in a subclass of this object. A Lexer object
|
||||
|
@ -308,11 +308,10 @@ public abstract class Lexer extends Recognizer<Integer, LexerATNSimulator>
|
|||
|
||||
public void notifyListeners(LexerNoViableAltException e) {
|
||||
String msg = "token recognition error at: '"+
|
||||
_input.substring(_tokenStartCharIndex, _input.index())+"'";
|
||||
List<? extends ANTLRErrorListener<? super Integer>> listeners = getErrorListeners();
|
||||
for (ANTLRErrorListener<? super Integer> listener : listeners) {
|
||||
listener.error(this, null, _tokenStartLine, _tokenStartCharPositionInLine, msg, e);
|
||||
}
|
||||
_input.getText(Interval.of(_tokenStartCharIndex, _input.index()))+"'";
|
||||
|
||||
ANTLRErrorListener<? super Integer> listener = getErrorListenerDispatch();
|
||||
listener.error(this, null, _tokenStartLine, _tokenStartCharPositionInLine, msg, e);
|
||||
}
|
||||
|
||||
public String getCharErrorDisplay(int c) {
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.Utils;
|
||||
|
||||
public class LexerNoViableAltException extends RecognitionException {
|
||||
|
@ -57,7 +58,7 @@ public class LexerNoViableAltException extends RecognitionException {
|
|||
public String toString() {
|
||||
String symbol = "";
|
||||
if (startIndex >= 0 && startIndex < input.size()) {
|
||||
symbol = getInputStream().substring(startIndex, startIndex);
|
||||
symbol = getInputStream().getText(Interval.of(startIndex,startIndex));
|
||||
symbol = Utils.escapeWhitespace(symbol, false);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,10 +41,10 @@ public class NoViableAltException extends RecognitionException {
|
|||
* not be buffering tokens so get a reference to it. (At the
|
||||
* time the error occurred, of course the stream needs to keep a
|
||||
* buffer all of the tokens but later we might not have access to those.)
|
||||
*/
|
||||
*/
|
||||
public Token startToken;
|
||||
|
||||
public <Symbol extends Token> NoViableAltException(Parser recognizer) { // LL(1) error
|
||||
public NoViableAltException(Parser recognizer) { // LL(1) error
|
||||
this(recognizer,recognizer.getInputStream(),
|
||||
recognizer.getCurrentToken(),
|
||||
recognizer.getCurrentToken(),
|
||||
|
@ -52,12 +52,12 @@ public class NoViableAltException extends RecognitionException {
|
|||
recognizer._ctx);
|
||||
}
|
||||
|
||||
public <Symbol> NoViableAltException(Parser recognizer,
|
||||
SymbolStream<Symbol> input,
|
||||
Token startToken,
|
||||
Token offendingToken,
|
||||
ATNConfigSet deadEndConfigs,
|
||||
ParserRuleContext<?> ctx)
|
||||
public NoViableAltException(Parser recognizer,
|
||||
TokenStream input,
|
||||
Token startToken,
|
||||
Token offendingToken,
|
||||
ATNConfigSet deadEndConfigs,
|
||||
ParserRuleContext<?> ctx)
|
||||
{
|
||||
super(recognizer, input, ctx);
|
||||
this.deadEndConfigs = deadEndConfigs;
|
||||
|
|
|
@ -45,18 +45,18 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator<Token>
|
|||
public class TraceListener implements ParseListener<Token> {
|
||||
@Override
|
||||
public void enterNonLRRule(ParserRuleContext<Token> ctx) {
|
||||
System.out.println("enter " + getRuleNames()[ctx.ruleIndex] + ", LT(1)=" + _input.LT(1).getText());
|
||||
System.out.println("enter " + getRuleNames()[ctx.getRuleIndex()] + ", LT(1)=" + _input.LT(1).getText());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void exitEveryRule(ParserRuleContext<Token> ctx) {
|
||||
System.out.println("exit "+getRuleNames()[ctx.ruleIndex]+", LT(1)="+_input.LT(1).getText());
|
||||
System.out.println("exit "+getRuleNames()[ctx.getRuleIndex()]+", LT(1)="+_input.LT(1).getText());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visitTerminal(ParserRuleContext<Token> parent, Token token) {
|
||||
System.out.println("consume "+token+" rule "+
|
||||
getRuleNames()[parent.ruleIndex]+
|
||||
getRuleNames()[parent.getRuleIndex()]+
|
||||
" alt="+parent.altNum);
|
||||
}
|
||||
}
|
||||
|
@ -295,18 +295,6 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator<Token>
|
|||
this._input = input;
|
||||
}
|
||||
|
||||
public String getInputString(int start) {
|
||||
return getInputString(start, getInputStream().index());
|
||||
}
|
||||
|
||||
public String getInputString(int start, int stop) {
|
||||
SymbolStream<Token> input = getInputStream();
|
||||
if ( input instanceof TokenStream ) {
|
||||
return ((TokenStream)input).toString(start,stop);
|
||||
}
|
||||
return "n/a";
|
||||
}
|
||||
|
||||
/** Match needs to return the current input symbol, which gets put
|
||||
* into the label for the associated token ref; e.g., x=ID.
|
||||
*/
|
||||
|
@ -327,10 +315,9 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator<Token>
|
|||
line = ((Token) offendingToken).getLine();
|
||||
charPositionInLine = ((Token) offendingToken).getCharPositionInLine();
|
||||
}
|
||||
List<? extends ANTLRErrorListener<? super Token>> listeners = getErrorListeners();
|
||||
for (ANTLRErrorListener<? super Token> listener : listeners) {
|
||||
listener.error(this, offendingToken, line, charPositionInLine, msg, e);
|
||||
}
|
||||
|
||||
ANTLRErrorListener<? super Token> listener = getErrorListenerDispatch();
|
||||
listener.error(this, offendingToken, line, charPositionInLine, msg, e);
|
||||
}
|
||||
|
||||
/** Consume the current symbol and return it. E.g., given the following
|
||||
|
@ -381,12 +368,12 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator<Token>
|
|||
public void enterRule(ParserRuleContext<Token> localctx, int ruleIndex) {
|
||||
_ctx = localctx;
|
||||
_ctx.start = _input.LT(1);
|
||||
_ctx.ruleIndex = ruleIndex;
|
||||
if (_buildParseTrees) addContextToParseTree();
|
||||
if ( _parseListeners != null) triggerEnterRuleEvent();
|
||||
}
|
||||
|
||||
public void exitRule() {
|
||||
_ctx.stop = _input.LT(-1);
|
||||
// trigger event on _ctx, before it reverts to parent
|
||||
if ( _parseListeners != null) triggerExitRuleEvent();
|
||||
_ctx = (ParserRuleContext<Token>)_ctx.parent;
|
||||
|
@ -408,10 +395,10 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator<Token>
|
|||
public void pushNewRecursionContext(ParserRuleContext<Token> localctx, int ruleIndex) {
|
||||
_ctx = localctx;
|
||||
_ctx.start = _input.LT(1);
|
||||
_ctx.ruleIndex = ruleIndex;
|
||||
}
|
||||
|
||||
public void unrollRecursionContexts(ParserRuleContext<Token> _parentctx) {
|
||||
_ctx.stop = _input.LT(-1);
|
||||
ParserRuleContext<Token> retctx = _ctx; // save current ctx (return value)
|
||||
|
||||
// unroll so _ctx is as it was before call to recursive method
|
||||
|
|
|
@ -30,6 +30,7 @@ package org.antlr.v4.runtime;
|
|||
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNState;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
import org.antlr.v4.runtime.tree.ParseTree;
|
||||
|
@ -102,12 +103,15 @@ public class ParserRuleContext<Symbol extends Token> extends RuleContext {
|
|||
|
||||
public Symbol start, stop;
|
||||
|
||||
/** Set during parsing to identify which rule parser is in. */
|
||||
public int ruleIndex;
|
||||
|
||||
/** Set during parsing to identify which alt of rule parser is in. */
|
||||
public int altNum;
|
||||
|
||||
/**
|
||||
* The exception which forced this rule to return. If the rule successfully
|
||||
* completed, this is {@code null}.
|
||||
*/
|
||||
public RecognitionException exception;
|
||||
|
||||
public ParserRuleContext() { }
|
||||
|
||||
/** COPY a ctx (I'm deliberately not using copy constructor) */
|
||||
|
@ -119,7 +123,6 @@ public class ParserRuleContext<Symbol extends Token> extends RuleContext {
|
|||
|
||||
this.start = ctx.start;
|
||||
this.stop = ctx.stop;
|
||||
this.ruleIndex = ctx.ruleIndex;
|
||||
}
|
||||
|
||||
public ParserRuleContext(@Nullable ParserRuleContext<Symbol> parent, int invokingStateNumber, int stateNumber) {
|
||||
|
@ -289,7 +292,10 @@ public class ParserRuleContext<Symbol extends Token> extends RuleContext {
|
|||
public int getChildCount() { return children!=null ? children.size() : 0; }
|
||||
|
||||
@Override
|
||||
public int getRuleIndex() { return ruleIndex; }
|
||||
public Interval getSourceInterval() {
|
||||
if ( start==null || stop==null ) return Interval.INVALID;
|
||||
return Interval.of(start.getTokenIndex(), stop.getTokenIndex());
|
||||
}
|
||||
|
||||
public Symbol getStart() { return start; }
|
||||
public Symbol getStop() { return stop; }
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2012 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Sam Harwell
|
||||
*/
|
||||
public class ProxyErrorListener<Symbol> implements ANTLRErrorListener<Symbol> {
|
||||
private final Collection<? extends ANTLRErrorListener<? super Symbol>> delegates;
|
||||
|
||||
public ProxyErrorListener(Collection<? extends ANTLRErrorListener<? super Symbol>> delegates) {
|
||||
this.delegates = delegates;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Symbol> void error(Recognizer<T, ?> recognizer,
|
||||
T offendingSymbol,
|
||||
int line,
|
||||
int charPositionInLine,
|
||||
String msg,
|
||||
RecognitionException e)
|
||||
{
|
||||
for (ANTLRErrorListener<? super Symbol> listener : delegates) {
|
||||
listener.error(recognizer, offendingSymbol, line, charPositionInLine, msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportAmbiguity(Parser recognizer,
|
||||
DFA dfa,
|
||||
int startIndex,
|
||||
int stopIndex,
|
||||
IntervalSet ambigAlts,
|
||||
ATNConfigSet configs)
|
||||
{
|
||||
for (ANTLRErrorListener<? super Symbol> listener : delegates) {
|
||||
listener.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, ambigAlts, configs);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportAttemptingFullContext(Parser recognizer,
|
||||
DFA dfa,
|
||||
int startIndex,
|
||||
int stopIndex,
|
||||
ATNConfigSet configs)
|
||||
{
|
||||
for (ANTLRErrorListener<? super Symbol> listener : delegates) {
|
||||
listener.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, configs);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportContextSensitivity(Parser recognizer,
|
||||
DFA dfa,
|
||||
int startIndex,
|
||||
int stopIndex,
|
||||
ATNConfigSet configs)
|
||||
{
|
||||
for (ANTLRErrorListener<? super Symbol> listener : delegates) {
|
||||
listener.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, configs);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -120,6 +120,10 @@ public abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
|
|||
return new ArrayList<ANTLRErrorListener<? super Symbol>>(_listeners);
|
||||
}
|
||||
|
||||
public ANTLRErrorListener<? super Symbol> getErrorListenerDispatch() {
|
||||
return new ProxyErrorListener<Symbol>(getErrorListeners());
|
||||
}
|
||||
|
||||
// subclass needs to override these if there are sempreds or actions
|
||||
// that the ATN interp needs to execute
|
||||
public boolean sempred(@Nullable RuleContext _localctx, int ruleIndex, int actionIndex) {
|
||||
|
|
|
@ -208,7 +208,12 @@ public class RuleContext implements ParseTree.RuleNode {
|
|||
return invokingState == -1;
|
||||
}
|
||||
|
||||
// satisfy the ParseTree interface
|
||||
// satisfy the ParseTree / SyntaxTree interface
|
||||
|
||||
@Override
|
||||
public Interval getSourceInterval() {
|
||||
return Interval.INVALID;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RuleContext getRuleContext() { return this; }
|
||||
|
@ -219,6 +224,27 @@ public class RuleContext implements ParseTree.RuleNode {
|
|||
@Override
|
||||
public RuleContext getPayload() { return this; }
|
||||
|
||||
/** Return the combined text of all child nodes. This method only considers
|
||||
* tokens which have been added to the parse tree.
|
||||
* <p>
|
||||
* Since tokens on hidden channels (e.g. whitespace or comments) are not
|
||||
* added to the parse trees, they will not appear in the output of this
|
||||
* method.
|
||||
*/
|
||||
@Override
|
||||
public String getText() {
|
||||
if (getChildCount() == 0) {
|
||||
return "";
|
||||
}
|
||||
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (int i = 0; i < getChildCount(); i++) {
|
||||
builder.append(getChild(i).getText());
|
||||
}
|
||||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
public int getRuleIndex() { return -1; }
|
||||
|
||||
@Override
|
||||
|
@ -231,14 +257,6 @@ public class RuleContext implements ParseTree.RuleNode {
|
|||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Interval getSourceInterval() {
|
||||
if ( getChildCount()==0 ) return Interval.INVALID;
|
||||
int start = getChild(0).getSourceInterval().a;
|
||||
int stop = getChild(getChildCount()-1).getSourceInterval().b;
|
||||
return new Interval(start, stop);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T accept(ParseTreeVisitor<? extends T> visitor) { return visitor.visitChildren(this); }
|
||||
|
||||
|
|
|
@ -28,9 +28,13 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/** Useful for dumping out the input stream after doing some
|
||||
* augmentation or other manipulations.
|
||||
|
@ -354,30 +358,35 @@ public class TokenRewriteStream extends CommonTokenStream {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
fill();
|
||||
return toString(MIN_TOKEN_INDEX, size()-1);
|
||||
}
|
||||
public String toString() { return getText(); }
|
||||
|
||||
public String toString(String programName) {
|
||||
fill();
|
||||
return toString(programName, MIN_TOKEN_INDEX, size()-1);
|
||||
return toString(programName, Interval.of(0, size() - 1));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(int start, int end) {
|
||||
return toString(DEFAULT_PROGRAM_NAME, start, end);
|
||||
public String getText() {
|
||||
fill();
|
||||
return getText(Interval.of(0,size()-1));
|
||||
}
|
||||
|
||||
public String toString(String programName, int start, int end) {
|
||||
@Override
|
||||
public String getText(Interval interval) {
|
||||
return toString(DEFAULT_PROGRAM_NAME, interval);
|
||||
}
|
||||
|
||||
public String toString(String programName, Interval interval) {
|
||||
List<RewriteOperation> rewrites = programs.get(programName);
|
||||
int start = interval.a;
|
||||
int stop = interval.b;
|
||||
|
||||
// ensure start/end are in range
|
||||
if ( end>tokens.size()-1 ) end = tokens.size()-1;
|
||||
if ( stop>tokens.size()-1 ) stop = tokens.size()-1;
|
||||
if ( start<0 ) start = 0;
|
||||
|
||||
if ( rewrites==null || rewrites.isEmpty() ) {
|
||||
return toOriginalString(start,end); // no instructions to execute
|
||||
return toOriginalString(start,stop); // no instructions to execute
|
||||
}
|
||||
StringBuilder buf = new StringBuilder();
|
||||
|
||||
|
@ -386,7 +395,7 @@ public class TokenRewriteStream extends CommonTokenStream {
|
|||
|
||||
// Walk buffer, executing instructions and emitting tokens
|
||||
int i = start;
|
||||
while ( i <= end && i < tokens.size() ) {
|
||||
while ( i <= stop && i < tokens.size() ) {
|
||||
RewriteOperation op = indexToOp.get(i);
|
||||
indexToOp.remove(i); // remove so any left have index size-1
|
||||
Token t = tokens.get(i);
|
||||
|
@ -403,12 +412,10 @@ public class TokenRewriteStream extends CommonTokenStream {
|
|||
// include stuff after end if it's last index in buffer
|
||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||
// foo if end==lastValidIndex.
|
||||
if ( end==tokens.size()-1 ) {
|
||||
if ( stop==tokens.size()-1 ) {
|
||||
// Scan any remaining operations after last token
|
||||
// should be included (they will be inserts).
|
||||
Iterator<RewriteOperation> it = indexToOp.values().iterator();
|
||||
while (it.hasNext()) {
|
||||
RewriteOperation op = it.next();
|
||||
for (RewriteOperation op : indexToOp.values()) {
|
||||
if ( op.index >= tokens.size()-1 ) buf.append(op.text);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,10 @@
|
|||
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
/** A stream of tokens accessing tokens from a TokenSource */
|
||||
public interface TokenStream extends SymbolStream<Token> {
|
||||
public interface TokenStream extends IntStream {
|
||||
/** Get Token at current input pointer + i ahead where i=1 is next Token.
|
||||
* i<0 indicates tokens in the past. So -1 is previous token and -2 is
|
||||
* two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken.
|
||||
|
@ -38,14 +40,8 @@ public interface TokenStream extends SymbolStream<Token> {
|
|||
* that is negative.
|
||||
* TODO (Sam): Throw exception for invalid k?
|
||||
*/
|
||||
@Override
|
||||
public Token LT(int k);
|
||||
|
||||
/** How far ahead has the stream been asked to look? The return
|
||||
* value is a valid index from 0..n-1.
|
||||
*/
|
||||
// int range();
|
||||
|
||||
/** Get a token at an absolute index i; 0..n-1. This is really only
|
||||
* needed for profiling and debugging and token stream rewriting.
|
||||
* If you don't want to buffer up tokens, then this method makes no
|
||||
|
@ -53,7 +49,6 @@ public interface TokenStream extends SymbolStream<Token> {
|
|||
* I believe DebugTokenStream can easily be altered to not use
|
||||
* this method, removing the dependency.
|
||||
*/
|
||||
@Override
|
||||
public Token get(int i);
|
||||
|
||||
/** Where is this stream pulling tokens from? This is not the name, but
|
||||
|
@ -61,18 +56,25 @@ public interface TokenStream extends SymbolStream<Token> {
|
|||
*/
|
||||
public TokenSource getTokenSource();
|
||||
|
||||
/** Return the text of all tokens from start to stop, inclusive.
|
||||
* If the stream does not buffer all the tokens then it can just
|
||||
* return "" or null; Users should not access $ruleLabel.text in
|
||||
* an action of course in that case.
|
||||
/** Return the text of all tokens from within the interval.
|
||||
* If the stream does not buffer all the tokens then it must
|
||||
* throw UnsupportedOperationException;
|
||||
* Users should not access $ruleLabel.text in an action of course in
|
||||
* that case.
|
||||
* @param interval
|
||||
*/
|
||||
public String toString(int start, int stop);
|
||||
public String getText(Interval interval);
|
||||
|
||||
public String getText();
|
||||
|
||||
public String getText(RuleContext ctx);
|
||||
|
||||
/** Because the user is not required to use a token with an index stored
|
||||
* in it, we must provide a means for two token objects themselves to
|
||||
* indicate the start/end location. Most often this will just delegate
|
||||
* to the other toString(int,int). This is also parallel with
|
||||
* the TreeNodeStream.toString(Object,Object).
|
||||
* to the other getText(Interval).
|
||||
* If the stream does not buffer all the tokens then it must
|
||||
* throw UnsupportedOperationException;
|
||||
*/
|
||||
public String toString(Token start, Token stop);
|
||||
public String getText(Token start, Token stop);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,557 @@
|
|||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/** Useful for rewriting out a buffered input token stream after doing some
|
||||
* augmentation or other manipulations on it.
|
||||
*
|
||||
* You can insert stuff, replace, and delete chunks. Note that the
|
||||
* operations are done lazily--only if you convert the buffer to a
|
||||
* String with getText(). This is very efficient because you are not moving
|
||||
* data around all the time. As the buffer of tokens is converted to strings,
|
||||
* the getText() method(s) scan the input token stream and check
|
||||
* to see if there is an operation at the current index.
|
||||
* If so, the operation is done and then normal String
|
||||
* rendering continues on the buffer. This is like having multiple Turing
|
||||
* machine instruction streams (programs) operating on a single input tape. :)
|
||||
*
|
||||
* This rewriter makes no modifications to the token stream. It does not
|
||||
* ask the stream to fill itself up nor does it advance the input cursor.
|
||||
* The token stream index() will return the same value before and after
|
||||
* any getText() call.
|
||||
*
|
||||
* The rewriter only works on tokens that you have in the buffer and
|
||||
* ignores the current input cursor. If you are buffering tokens on-demand,
|
||||
* calling getText() halfway through the input will only do rewrites
|
||||
* for those tokens in the first half of the file.
|
||||
*
|
||||
* Since the operations are done lazily at getText-time, operations do not
|
||||
* screw up the token index values. That is, an insert operation at token
|
||||
* index i does not change the index values for tokens i+1..n-1.
|
||||
*
|
||||
* Because operations never actually alter the buffer, you may always get
|
||||
* the original token stream back without undoing anything. Since
|
||||
* the instructions are queued up, you can easily simulate transactions and
|
||||
* roll back any changes if there is an error just by removing instructions.
|
||||
* For example,
|
||||
*
|
||||
* CharStream input = new ANTLRFileStream("input");
|
||||
* TLexer lex = new TLexer(input);
|
||||
* CommonTokenStream tokens = new CommonTokenStream(lex);
|
||||
* T parser = new T(tokens);
|
||||
* TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
|
||||
* parser.startRule();
|
||||
*
|
||||
* Then in the rules, you can execute (assuming rewriter is visible):
|
||||
* Token t,u;
|
||||
* ...
|
||||
* rewriter.insertAfter(t, "text to put after t");}
|
||||
* rewriter.insertAfter(u, "text after u");}
|
||||
* System.out.println(tokens.toString());
|
||||
*
|
||||
* You can also have multiple "instruction streams" and get multiple
|
||||
* rewrites from a single pass over the input. Just name the instruction
|
||||
* streams and use that name again when printing the buffer. This could be
|
||||
* useful for generating a C file and also its header file--all from the
|
||||
* same buffer:
|
||||
*
|
||||
* tokens.insertAfter("pass1", t, "text to put after t");}
|
||||
* tokens.insertAfter("pass2", u, "text after u");}
|
||||
* System.out.println(tokens.toString("pass1"));
|
||||
* System.out.println(tokens.toString("pass2"));
|
||||
*
|
||||
* If you don't use named rewrite streams, a "default" stream is used as
|
||||
* the first example shows.
|
||||
*/
|
||||
public class TokenStreamRewriter {
|
||||
public static final String DEFAULT_PROGRAM_NAME = "default";
|
||||
public static final int PROGRAM_INIT_SIZE = 100;
|
||||
public static final int MIN_TOKEN_INDEX = 0;
|
||||
|
||||
// Define the rewrite operation hierarchy
|
||||
|
||||
public class RewriteOperation {
|
||||
/** What index into rewrites List are we? */
|
||||
protected int instructionIndex;
|
||||
/** Token buffer index. */
|
||||
protected int index;
|
||||
protected Object text;
|
||||
|
||||
protected RewriteOperation(int index) {
|
||||
this.index = index;
|
||||
}
|
||||
|
||||
protected RewriteOperation(int index, Object text) {
|
||||
this.index = index;
|
||||
this.text = text;
|
||||
}
|
||||
/** Execute the rewrite operation by possibly adding to the buffer.
|
||||
* Return the index of the next token to operate on.
|
||||
*/
|
||||
public int execute(StringBuilder buf) {
|
||||
return index;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
String opName = getClass().getName();
|
||||
int $index = opName.indexOf('$');
|
||||
opName = opName.substring($index+1, opName.length());
|
||||
return "<"+opName+"@"+tokens.get(index)+
|
||||
":\""+text+"\">";
|
||||
}
|
||||
}
|
||||
|
||||
class InsertBeforeOp extends RewriteOperation {
|
||||
public InsertBeforeOp(int index, Object text) {
|
||||
super(index,text);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int execute(StringBuilder buf) {
|
||||
buf.append(text);
|
||||
if ( tokens.get(index).getType()!=Token.EOF ) {
|
||||
buf.append(tokens.get(index).getText());
|
||||
}
|
||||
return index+1;
|
||||
}
|
||||
}
|
||||
|
||||
/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
|
||||
* instructions.
|
||||
*/
|
||||
class ReplaceOp extends RewriteOperation {
|
||||
protected int lastIndex;
|
||||
public ReplaceOp(int from, int to, Object text) {
|
||||
super(from,text);
|
||||
lastIndex = to;
|
||||
}
|
||||
@Override
|
||||
public int execute(StringBuilder buf) {
|
||||
if ( text!=null ) {
|
||||
buf.append(text);
|
||||
}
|
||||
return lastIndex+1;
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
if ( text==null ) {
|
||||
return "<DeleteOp@"+tokens.get(index)+
|
||||
".."+tokens.get(lastIndex)+">";
|
||||
}
|
||||
return "<ReplaceOp@"+tokens.get(index)+
|
||||
".."+tokens.get(lastIndex)+":\""+text+"\">";
|
||||
}
|
||||
}
|
||||
|
||||
/** Our source stream */
|
||||
protected final TokenStream tokens;
|
||||
|
||||
/** You may have multiple, named streams of rewrite operations.
|
||||
* I'm calling these things "programs."
|
||||
* Maps String (name) -> rewrite (List)
|
||||
*/
|
||||
protected final Map<String, List<RewriteOperation>> programs;
|
||||
|
||||
/** Map String (program name) -> Integer index */
|
||||
protected final Map<String, Integer> lastRewriteTokenIndexes;
|
||||
|
||||
public TokenStreamRewriter(TokenStream tokens) {
|
||||
this.tokens = tokens;
|
||||
programs = new HashMap<String, List<RewriteOperation>>();
|
||||
programs.put(DEFAULT_PROGRAM_NAME,
|
||||
new ArrayList<RewriteOperation>(PROGRAM_INIT_SIZE));
|
||||
lastRewriteTokenIndexes = new HashMap<String, Integer>();
|
||||
}
|
||||
|
||||
public final TokenStream getTokenStream() {
|
||||
return tokens;
|
||||
}
|
||||
|
||||
public void rollback(int instructionIndex) {
|
||||
rollback(DEFAULT_PROGRAM_NAME, instructionIndex);
|
||||
}
|
||||
|
||||
/** Rollback the instruction stream for a program so that
|
||||
* the indicated instruction (via instructionIndex) is no
|
||||
* longer in the stream. UNTESTED!
|
||||
*/
|
||||
public void rollback(String programName, int instructionIndex) {
|
||||
List<RewriteOperation> is = programs.get(programName);
|
||||
if ( is!=null ) {
|
||||
programs.put(programName, is.subList(MIN_TOKEN_INDEX,instructionIndex));
|
||||
}
|
||||
}
|
||||
|
||||
public void deleteProgram() {
|
||||
deleteProgram(DEFAULT_PROGRAM_NAME);
|
||||
}
|
||||
|
||||
/** Reset the program so that no instructions exist */
|
||||
public void deleteProgram(String programName) {
|
||||
rollback(programName, MIN_TOKEN_INDEX);
|
||||
}
|
||||
|
||||
public void insertAfter(Token t, Object text) {
|
||||
insertAfter(DEFAULT_PROGRAM_NAME, t, text);
|
||||
}
|
||||
|
||||
public void insertAfter(int index, Object text) {
|
||||
insertAfter(DEFAULT_PROGRAM_NAME, index, text);
|
||||
}
|
||||
|
||||
public void insertAfter(String programName, Token t, Object text) {
|
||||
insertAfter(programName,t.getTokenIndex(), text);
|
||||
}
|
||||
|
||||
public void insertAfter(String programName, int index, Object text) {
|
||||
// to insert after, just insert before next index (even if past end)
|
||||
insertBefore(programName,index+1, text);
|
||||
}
|
||||
|
||||
public void insertBefore(Token t, Object text) {
|
||||
insertBefore(DEFAULT_PROGRAM_NAME, t, text);
|
||||
}
|
||||
|
||||
public void insertBefore(int index, Object text) {
|
||||
insertBefore(DEFAULT_PROGRAM_NAME, index, text);
|
||||
}
|
||||
|
||||
public void insertBefore(String programName, Token t, Object text) {
|
||||
insertBefore(programName, t.getTokenIndex(), text);
|
||||
}
|
||||
|
||||
public void insertBefore(String programName, int index, Object text) {
|
||||
RewriteOperation op = new InsertBeforeOp(index,text);
|
||||
List<RewriteOperation> rewrites = getProgram(programName);
|
||||
op.instructionIndex = rewrites.size();
|
||||
rewrites.add(op);
|
||||
}
|
||||
|
||||
public void replace(int index, Object text) {
|
||||
replace(DEFAULT_PROGRAM_NAME, index, index, text);
|
||||
}
|
||||
|
||||
public void replace(int from, int to, Object text) {
|
||||
replace(DEFAULT_PROGRAM_NAME, from, to, text);
|
||||
}
|
||||
|
||||
public void replace(Token indexT, Object text) {
|
||||
replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text);
|
||||
}
|
||||
|
||||
public void replace(Token from, Token to, Object text) {
|
||||
replace(DEFAULT_PROGRAM_NAME, from, to, text);
|
||||
}
|
||||
|
||||
public void replace(String programName, int from, int to, @Nullable Object text) {
|
||||
if ( from > to || from<0 || to<0 || to >= tokens.size() ) {
|
||||
throw new IllegalArgumentException("replace: range invalid: "+from+".."+to+"(size="+tokens.size()+")");
|
||||
}
|
||||
RewriteOperation op = new ReplaceOp(from, to, text);
|
||||
List<RewriteOperation> rewrites = getProgram(programName);
|
||||
op.instructionIndex = rewrites.size();
|
||||
rewrites.add(op);
|
||||
}
|
||||
|
||||
public void replace(String programName, Token from, Token to, @Nullable Object text) {
|
||||
replace(programName,
|
||||
from.getTokenIndex(),
|
||||
to.getTokenIndex(),
|
||||
text);
|
||||
}
|
||||
|
||||
public void delete(int index) {
|
||||
delete(DEFAULT_PROGRAM_NAME, index, index);
|
||||
}
|
||||
|
||||
public void delete(int from, int to) {
|
||||
delete(DEFAULT_PROGRAM_NAME, from, to);
|
||||
}
|
||||
|
||||
public void delete(Token indexT) {
|
||||
delete(DEFAULT_PROGRAM_NAME, indexT, indexT);
|
||||
}
|
||||
|
||||
public void delete(Token from, Token to) {
|
||||
delete(DEFAULT_PROGRAM_NAME, from, to);
|
||||
}
|
||||
|
||||
public void delete(String programName, int from, int to) {
|
||||
replace(programName,from,to,null);
|
||||
}
|
||||
|
||||
public void delete(String programName, Token from, Token to) {
|
||||
replace(programName,from,to,null);
|
||||
}
|
||||
|
||||
public int getLastRewriteTokenIndex() {
|
||||
return getLastRewriteTokenIndex(DEFAULT_PROGRAM_NAME);
|
||||
}
|
||||
|
||||
protected int getLastRewriteTokenIndex(String programName) {
|
||||
Integer I = lastRewriteTokenIndexes.get(programName);
|
||||
if ( I==null ) {
|
||||
return -1;
|
||||
}
|
||||
return I;
|
||||
}
|
||||
|
||||
protected void setLastRewriteTokenIndex(String programName, int i) {
|
||||
lastRewriteTokenIndexes.put(programName, i);
|
||||
}
|
||||
|
||||
protected List<RewriteOperation> getProgram(String name) {
|
||||
List<RewriteOperation> is = programs.get(name);
|
||||
if ( is==null ) {
|
||||
is = initializeProgram(name);
|
||||
}
|
||||
return is;
|
||||
}
|
||||
|
||||
private List<RewriteOperation> initializeProgram(String name) {
|
||||
List<RewriteOperation> is = new ArrayList<RewriteOperation>(PROGRAM_INIT_SIZE);
|
||||
programs.put(name, is);
|
||||
return is;
|
||||
}
|
||||
|
||||
/** Return the text from the original tokens altered per the
|
||||
* instructions given to this rewriter.
|
||||
*/
|
||||
public String getText() {
|
||||
return getText(DEFAULT_PROGRAM_NAME, Interval.of(0,tokens.size()-1));
|
||||
}
|
||||
|
||||
/** Return the text associated with the tokens in the interval from the
|
||||
* original token stream but with the alterations given to this rewriter.
|
||||
* The interval refers to the indexes in the original token stream.
|
||||
* We do not alter the token stream in any way, so the indexes
|
||||
* and intervals are still consistent. Includes any operations done
|
||||
* to the first and last token in the interval. So, if you did an
|
||||
* insertBefore on the first token, you would get that insertion.
|
||||
* The same is true if you do an insertAfter the stop token.
|
||||
*/
|
||||
public String getText(Interval interval) {
|
||||
return getText(DEFAULT_PROGRAM_NAME, interval);
|
||||
}
|
||||
|
||||
public String getText(String programName, Interval interval) {
|
||||
List<RewriteOperation> rewrites = programs.get(programName);
|
||||
int start = interval.a;
|
||||
int stop = interval.b;
|
||||
|
||||
// ensure start/end are in range
|
||||
if ( stop>tokens.size()-1 ) stop = tokens.size()-1;
|
||||
if ( start<0 ) start = 0;
|
||||
|
||||
if ( rewrites==null || rewrites.isEmpty() ) {
|
||||
return tokens.getText(interval); // no instructions to execute
|
||||
}
|
||||
StringBuilder buf = new StringBuilder();
|
||||
|
||||
// First, optimize instruction stream
|
||||
Map<Integer, RewriteOperation> indexToOp = reduceToSingleOperationPerIndex(rewrites);
|
||||
|
||||
// Walk buffer, executing instructions and emitting tokens
|
||||
int i = start;
|
||||
while ( i <= stop && i < tokens.size() ) {
|
||||
RewriteOperation op = indexToOp.get(i);
|
||||
indexToOp.remove(i); // remove so any left have index size-1
|
||||
Token t = tokens.get(i);
|
||||
if ( op==null ) {
|
||||
// no operation at that index, just dump token
|
||||
if ( t.getType()!=Token.EOF ) buf.append(t.getText());
|
||||
i++; // move to next token
|
||||
}
|
||||
else {
|
||||
i = op.execute(buf); // execute operation and skip
|
||||
}
|
||||
}
|
||||
|
||||
// include stuff after end if it's last index in buffer
|
||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||
// foo if end==lastValidIndex.
|
||||
if ( stop==tokens.size()-1 ) {
|
||||
// Scan any remaining operations after last token
|
||||
// should be included (they will be inserts).
|
||||
for (RewriteOperation op : indexToOp.values()) {
|
||||
if ( op.index >= tokens.size()-1 ) buf.append(op.text);
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/** We need to combine operations and report invalid operations (like
|
||||
* overlapping replaces that are not completed nested). Inserts to
|
||||
* same index need to be combined etc... Here are the cases:
|
||||
*
|
||||
* I.i.u I.j.v leave alone, nonoverlapping
|
||||
* I.i.u I.i.v combine: Iivu
|
||||
*
|
||||
* R.i-j.u R.x-y.v | i-j in x-y delete first R
|
||||
* R.i-j.u R.i-j.v delete first R
|
||||
* R.i-j.u R.x-y.v | x-y in i-j ERROR
|
||||
* R.i-j.u R.x-y.v | boundaries overlap ERROR
|
||||
*
|
||||
* Delete special case of replace (text==null):
|
||||
* D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
*
|
||||
* I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
|
||||
* we're not deleting i)
|
||||
* I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
|
||||
* R.x-y.v I.i.u | i in x-y ERROR
|
||||
* R.x-y.v I.x.u R.x-y.uv (combine, delete I)
|
||||
* R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
|
||||
*
|
||||
* I.i.u = insert u before op @ index i
|
||||
* R.x-y.u = replace x-y indexed tokens with u
|
||||
*
|
||||
* First we need to examine replaces. For any replace op:
|
||||
*
|
||||
* 1. wipe out any insertions before op within that range.
|
||||
* 2. Drop any replace op before that is contained completely within
|
||||
* that range.
|
||||
* 3. Throw exception upon boundary overlap with any previous replace.
|
||||
*
|
||||
* Then we can deal with inserts:
|
||||
*
|
||||
* 1. for any inserts to same index, combine even if not adjacent.
|
||||
* 2. for any prior replace with same left boundary, combine this
|
||||
* insert with replace and delete this replace.
|
||||
* 3. throw exception if index in same range as previous replace
|
||||
*
|
||||
* Don't actually delete; make op null in list. Easier to walk list.
|
||||
* Later we can throw as we add to index -> op map.
|
||||
*
|
||||
* Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
* inserted stuff would be before the replace range. But, if you
|
||||
* add tokens in front of a method body '{' and then delete the method
|
||||
* body, I think the stuff before the '{' you added should disappear too.
|
||||
*
|
||||
* Return a map from token index to operation.
|
||||
*/
|
||||
protected Map<Integer, RewriteOperation> reduceToSingleOperationPerIndex(List<RewriteOperation> rewrites) {
|
||||
// System.out.println("rewrites="+rewrites);
|
||||
|
||||
// WALK REPLACES
|
||||
for (int i = 0; i < rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
if ( op==null ) continue;
|
||||
if ( !(op instanceof ReplaceOp) ) continue;
|
||||
ReplaceOp rop = (ReplaceOp)rewrites.get(i);
|
||||
// Wipe prior inserts within range
|
||||
List<? extends InsertBeforeOp> inserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
|
||||
for (InsertBeforeOp iop : inserts) {
|
||||
if ( iop.index == rop.index ) {
|
||||
// E.g., insert before 2, delete 2..2; update replace
|
||||
// text to include insert before, kill insert
|
||||
rewrites.set(iop.instructionIndex, null);
|
||||
rop.text = iop.text.toString() + (rop.text!=null?rop.text.toString():"");
|
||||
}
|
||||
else if ( iop.index > rop.index && iop.index <= rop.lastIndex ) {
|
||||
// delete insert as it's a no-op.
|
||||
rewrites.set(iop.instructionIndex, null);
|
||||
}
|
||||
}
|
||||
// Drop any prior replaces contained within
|
||||
List<? extends ReplaceOp> prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
|
||||
for (ReplaceOp prevRop : prevReplaces) {
|
||||
if ( prevRop.index>=rop.index && prevRop.lastIndex <= rop.lastIndex ) {
|
||||
// delete replace as it's a no-op.
|
||||
rewrites.set(prevRop.instructionIndex, null);
|
||||
continue;
|
||||
}
|
||||
// throw exception unless disjoint or identical
|
||||
boolean disjoint =
|
||||
prevRop.lastIndex<rop.index || prevRop.index > rop.lastIndex;
|
||||
boolean same =
|
||||
prevRop.index==rop.index && prevRop.lastIndex==rop.lastIndex;
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
if ( prevRop.text==null && rop.text==null && !disjoint ) {
|
||||
//System.out.println("overlapping deletes: "+prevRop+", "+rop);
|
||||
rewrites.set(prevRop.instructionIndex, null); // kill first delete
|
||||
rop.index = Math.min(prevRop.index, rop.index);
|
||||
rop.lastIndex = Math.max(prevRop.lastIndex, rop.lastIndex);
|
||||
System.out.println("new rop "+rop);
|
||||
}
|
||||
else if ( !disjoint && !same ) {
|
||||
throw new IllegalArgumentException("replace op boundaries of "+rop+
|
||||
" overlap with previous "+prevRop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WALK INSERTS
|
||||
for (int i = 0; i < rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
if ( op==null ) continue;
|
||||
if ( !(op instanceof InsertBeforeOp) ) continue;
|
||||
InsertBeforeOp iop = (InsertBeforeOp)rewrites.get(i);
|
||||
// combine current insert with prior if any at same index
|
||||
List<? extends InsertBeforeOp> prevInserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
|
||||
for (InsertBeforeOp prevIop : prevInserts) {
|
||||
if ( prevIop.index == iop.index ) { // combine objects
|
||||
// convert to strings...we're in process of toString'ing
|
||||
// whole token buffer so no lazy eval issue with any templates
|
||||
iop.text = catOpText(iop.text,prevIop.text);
|
||||
// delete redundant prior insert
|
||||
rewrites.set(prevIop.instructionIndex, null);
|
||||
}
|
||||
}
|
||||
// look for replaces where iop.index is in range; error
|
||||
List<? extends ReplaceOp> prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
|
||||
for (ReplaceOp rop : prevReplaces) {
|
||||
if ( iop.index == rop.index ) {
|
||||
rop.text = catOpText(iop.text,rop.text);
|
||||
rewrites.set(i, null); // delete current insert
|
||||
continue;
|
||||
}
|
||||
if ( iop.index >= rop.index && iop.index <= rop.lastIndex ) {
|
||||
throw new IllegalArgumentException("insert op "+iop+
|
||||
" within boundaries of previous "+rop);
|
||||
}
|
||||
}
|
||||
}
|
||||
// System.out.println("rewrites after="+rewrites);
|
||||
Map<Integer, RewriteOperation> m = new HashMap<Integer, RewriteOperation>();
|
||||
for (int i = 0; i < rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
if ( op==null ) continue; // ignore deleted ops
|
||||
if ( m.get(op.index)!=null ) {
|
||||
throw new Error("should only be one op per index");
|
||||
}
|
||||
m.put(op.index, op);
|
||||
}
|
||||
//System.out.println("index to op: "+m);
|
||||
return m;
|
||||
}
|
||||
|
||||
protected String catOpText(Object a, Object b) {
|
||||
String x = "";
|
||||
String y = "";
|
||||
if ( a!=null ) x = a.toString();
|
||||
if ( b!=null ) y = b.toString();
|
||||
return x+y;
|
||||
}
|
||||
|
||||
/** Get all operations before an index of a particular kind */
|
||||
protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind, int before) {
|
||||
List<T> ops = new ArrayList<T>();
|
||||
for (int i=0; i<before && i<rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
if ( op==null ) continue; // ignore deleted
|
||||
if ( kind.isInstance(op) ) {
|
||||
ops.add((T)op);
|
||||
}
|
||||
}
|
||||
return ops;
|
||||
}
|
||||
|
||||
}
|
|
@ -29,6 +29,8 @@
|
|||
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
|
@ -195,7 +197,11 @@ public class UnbufferedCharStream implements CharStream {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String substring(int start, int stop) {
|
||||
return null; // map to buffer indexes
|
||||
public String getText(Interval interval) {
|
||||
if (interval.a < bufferStartIndex || interval.b >= bufferStartIndex + n) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
return new String(data, interval.a, interval.length());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.LookaheadStream;
|
||||
|
||||
/** A token stream that pulls tokens from the source on-demand and
|
||||
|
@ -77,22 +78,56 @@ public class UnbufferedTokenStream<T extends Token>
|
|||
@Override
|
||||
public TokenSource getTokenSource() { return tokenSource; }
|
||||
|
||||
@Override
|
||||
public String toString(int start, int stop) {
|
||||
throw new UnsupportedOperationException("unbuffered stream can't give strings");
|
||||
}
|
||||
@Override
|
||||
public String getText(Interval interval) {
|
||||
int bufferStartIndex = currentElementIndex - p;
|
||||
int bufferStopIndex = bufferStartIndex + data.size() - 1;
|
||||
|
||||
@Override
|
||||
public String toString(Token start, Token stop) {
|
||||
throw new UnsupportedOperationException("unbuffered stream can't give strings");
|
||||
}
|
||||
int start = interval.a;
|
||||
int stop = interval.b;
|
||||
if (start < bufferStartIndex || stop > bufferStopIndex) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
StringBuilder buf = new StringBuilder();
|
||||
for (int i = start; i <= stop; i++) {
|
||||
T t = data.get(i - bufferStartIndex);
|
||||
buf.append(t.getText());
|
||||
}
|
||||
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText() {
|
||||
return getText(Interval.of(0,index()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText(RuleContext ctx) {
|
||||
return getText(ctx.getSourceInterval());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText(Token start, Token stop) {
|
||||
if ( start!=null && stop!=null ) {
|
||||
return getText(Interval.of(start.getTokenIndex(), stop.getTokenIndex()));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int LA(int i) { return LT(i).getType(); }
|
||||
|
||||
@Override
|
||||
public T get(int i) {
|
||||
throw new UnsupportedOperationException("Absolute token indexes are meaningless in an unbuffered stream");
|
||||
int bufferStartIndex = currentElementIndex - p;
|
||||
int bufferStopIndex = bufferStartIndex + data.size() - 1;
|
||||
if (i < bufferStartIndex || i > bufferStopIndex) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
return data.get(i - bufferStartIndex);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -60,6 +60,10 @@ public abstract class ATNSimulator {
|
|||
int p = 0;
|
||||
atn.grammarType = toInt(data[p++]);
|
||||
atn.maxTokenType = toInt(data[p++]);
|
||||
|
||||
//
|
||||
// STATES
|
||||
//
|
||||
int nstates = toInt(data[p++]);
|
||||
for (int i=1; i<=nstates; i++) {
|
||||
int stype = toInt(data[p++]);
|
||||
|
@ -75,6 +79,10 @@ public abstract class ATNSimulator {
|
|||
}
|
||||
atn.addState(s);
|
||||
}
|
||||
|
||||
//
|
||||
// RULES
|
||||
//
|
||||
int nrules = toInt(data[p++]);
|
||||
if ( atn.grammarType == ATN.LEXER ) {
|
||||
atn.ruleToTokenType = new int[nrules];
|
||||
|
@ -92,11 +100,19 @@ public abstract class ATNSimulator {
|
|||
atn.ruleToActionIndex[i] = actionIndex;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// MODES
|
||||
//
|
||||
int nmodes = toInt(data[p++]);
|
||||
for (int i=0; i<nmodes; i++) {
|
||||
int s = toInt(data[p++]);
|
||||
atn.modeToStartState.add((TokensStartState)atn.states.get(s));
|
||||
}
|
||||
|
||||
//
|
||||
// SETS
|
||||
//
|
||||
int nsets = toInt(data[p++]);
|
||||
for (int i=1; i<=nsets; i++) {
|
||||
int nintervals = toInt(data[p]);
|
||||
|
@ -108,6 +124,10 @@ public abstract class ATNSimulator {
|
|||
p += 2;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// EDGES
|
||||
//
|
||||
int nedges = toInt(data[p++]);
|
||||
for (int i=1; i<=nedges; i++) {
|
||||
int src = toInt(data[p]);
|
||||
|
@ -125,6 +145,10 @@ public abstract class ATNSimulator {
|
|||
srcState.addTransition(trans);
|
||||
p += 6;
|
||||
}
|
||||
|
||||
//
|
||||
// DECISIONS
|
||||
//
|
||||
int ndecisions = toInt(data[p++]);
|
||||
for (int i=1; i<=ndecisions; i++) {
|
||||
int s = toInt(data[p++]);
|
||||
|
@ -134,6 +158,7 @@ public abstract class ATNSimulator {
|
|||
decState.decision = i-1;
|
||||
decState.isGreedy = isGreedy==1;
|
||||
}
|
||||
|
||||
return atn;
|
||||
}
|
||||
|
||||
|
|
|
@ -114,29 +114,34 @@ public class LL1Analyzer {
|
|||
|
||||
int n = s.getNumberOfTransitions();
|
||||
for (int i=0; i<n; i++) {
|
||||
Transition t = s.transition(i);
|
||||
if ( t.getClass() == RuleTransition.class ) {
|
||||
RuleContext newContext =
|
||||
new RuleContext(ctx, s.stateNumber);
|
||||
_LOOK(t.target, newContext, look, lookBusy, seeThruPreds);
|
||||
}
|
||||
else if ( t.isEpsilon() && seeThruPreds ) {
|
||||
_LOOK(t.target, ctx, look, lookBusy, seeThruPreds);
|
||||
}
|
||||
else if ( t.getClass() == WildcardTransition.class ) {
|
||||
look.addAll( IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType) );
|
||||
}
|
||||
else {
|
||||
Transition t = s.transition(i);
|
||||
if ( t.getClass() == RuleTransition.class ) {
|
||||
RuleContext newContext =
|
||||
new RuleContext(ctx, s.stateNumber);
|
||||
_LOOK(t.target, newContext, look, lookBusy, seeThruPreds);
|
||||
}
|
||||
else if ( t instanceof PredicateTransition ) {
|
||||
if ( seeThruPreds ) {
|
||||
_LOOK(t.target, ctx, look, lookBusy, seeThruPreds);
|
||||
}
|
||||
}
|
||||
else if ( t.isEpsilon() ) {
|
||||
_LOOK(t.target, ctx, look, lookBusy, seeThruPreds);
|
||||
}
|
||||
else if ( t.getClass() == WildcardTransition.class ) {
|
||||
look.addAll( IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType) );
|
||||
}
|
||||
else {
|
||||
// System.out.println("adding "+ t);
|
||||
IntervalSet set = t.label();
|
||||
if (set != null) {
|
||||
if (t instanceof NotSetTransition) {
|
||||
set = set.complement(IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType));
|
||||
}
|
||||
look.addAll(set);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
IntervalSet set = t.label();
|
||||
if (set != null) {
|
||||
if (t instanceof NotSetTransition) {
|
||||
set = set.complement(IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType));
|
||||
}
|
||||
look.addAll(set);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,9 +29,15 @@
|
|||
|
||||
package org.antlr.v4.runtime.atn;
|
||||
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.CharStream;
|
||||
import org.antlr.v4.runtime.IntStream;
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.LexerNoViableAltException;
|
||||
import org.antlr.v4.runtime.RuleContext;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.dfa.DFAState;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
|
@ -241,7 +247,8 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
t = input.LA(1);
|
||||
}
|
||||
|
||||
return failOrAccept(prevAccept, input, prevAccept.state.configset, t);
|
||||
ATNConfigSet reach = prevAccept.state != null ? prevAccept.state.configset : null;
|
||||
return failOrAccept(prevAccept, input, reach, t);
|
||||
}
|
||||
|
||||
protected int execATN(@NotNull CharStream input, @NotNull ATNConfigSet s0, @Nullable DFAState ds0) {
|
||||
|
@ -580,7 +587,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
if ( dfa_debug ) {
|
||||
System.out.format("no edge for %s\n", getTokenName(input.LA(1)));
|
||||
System.out.format("ATN exec upon %s at DFA state %d = %s\n",
|
||||
input.substring(startIndex, input.index()), s.stateNumber, s.configset);
|
||||
input.getText(Interval.of(startIndex, input.index())), s.stateNumber, s.configset);
|
||||
}
|
||||
|
||||
int ttype = execATN(input, s.configset, s);
|
||||
|
@ -712,7 +719,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
/** Get the text of the current token */
|
||||
@NotNull
|
||||
public String getText(@NotNull CharStream input) {
|
||||
return input.substring(this.startIndex, input.index());
|
||||
return input.getText(Interval.of(startIndex, input.index()));
|
||||
}
|
||||
|
||||
public int getLine() {
|
||||
|
|
|
@ -34,10 +34,11 @@ import org.antlr.v4.runtime.NoViableAltException;
|
|||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.ParserRuleContext;
|
||||
import org.antlr.v4.runtime.RuleContext;
|
||||
import org.antlr.v4.runtime.SymbolStream;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.dfa.DFAState;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
@ -285,7 +286,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
public void reset() {
|
||||
}
|
||||
|
||||
public int adaptivePredict(@NotNull SymbolStream<? extends Symbol> input, int decision,
|
||||
public int adaptivePredict(@NotNull TokenStream input, int decision,
|
||||
@Nullable ParserRuleContext<?> outerContext)
|
||||
{
|
||||
predict_calls++;
|
||||
|
@ -311,7 +312,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
}
|
||||
}
|
||||
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull SymbolStream<? extends Symbol> input,
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||
@Nullable ParserRuleContext<?> outerContext)
|
||||
{
|
||||
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
||||
|
@ -343,7 +344,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
}
|
||||
|
||||
public int execDFA(@NotNull DFA dfa, @NotNull DFAState s0,
|
||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
@Nullable ParserRuleContext<?> outerContext)
|
||||
{
|
||||
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
||||
|
@ -396,8 +397,9 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
if ( dfa_debug && t>=0 ) System.out.println("no edge for "+parser.getTokenNames()[t]);
|
||||
int alt;
|
||||
if ( dfa_debug ) {
|
||||
Interval interval = Interval.of(startIndex, parser.getTokenStream().index());
|
||||
System.out.println("ATN exec upon "+
|
||||
parser.getInputString(startIndex) +
|
||||
parser.getTokenStream().getText(interval) +
|
||||
" at DFA state "+s.stateNumber);
|
||||
}
|
||||
|
||||
|
@ -499,7 +501,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
|
||||
*/
|
||||
public int execATN(@NotNull DFA dfa, @NotNull DFAState s0,
|
||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
ParserRuleContext<?> outerContext)
|
||||
{
|
||||
if ( debug ) System.out.println("execATN decision "+dfa.decision+" exec LA(1)=="+ getLookaheadName(input));
|
||||
|
@ -626,7 +628,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
public ATNConfigSet execATNWithFullContext(DFA dfa,
|
||||
DFAState D, // how far we got before failing over
|
||||
@NotNull ATNConfigSet s0,
|
||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
ParserRuleContext<?> outerContext,
|
||||
int nalts,
|
||||
boolean greedy)
|
||||
|
@ -960,7 +962,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
if ( debug ) System.out.println("Loop back; push "+config.state.stateNumber+", stack="+config.context);
|
||||
}
|
||||
else if ( config.state.getClass()==LoopEndState.class ) {
|
||||
if ( debug ) System.out.println("Loop end; pop, stack="+config.context);
|
||||
if ( debug ) System.out.println("Loop end; pop, stack=" + config.context);
|
||||
RuleContext p = config.context;
|
||||
LoopEndState end = (LoopEndState) config.state;
|
||||
while ( !p.isEmpty() && p.invokingState == end.loopBackStateNumber ) {
|
||||
|
@ -1293,7 +1295,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
return String.valueOf(t);
|
||||
}
|
||||
|
||||
public String getLookaheadName(SymbolStream<? extends Symbol> input) {
|
||||
public String getLookaheadName(TokenStream input) {
|
||||
return getTokenName(input.LA(1));
|
||||
}
|
||||
|
||||
|
@ -1318,7 +1320,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
}
|
||||
|
||||
@NotNull
|
||||
public NoViableAltException noViableAlt(@NotNull SymbolStream<? extends Symbol> input,
|
||||
public NoViableAltException noViableAlt(@NotNull TokenStream input,
|
||||
@NotNull ParserRuleContext<?> outerContext,
|
||||
@NotNull ATNConfigSet configs,
|
||||
int startIndex)
|
||||
|
@ -1405,18 +1407,20 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
|
||||
public void reportAttemptingFullContext(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
||||
if ( debug || retry_debug ) {
|
||||
System.out.println("reportAttemptingFullContext decision="+dfa.decision+":"+configs+
|
||||
", input="+parser.getInputString(startIndex, stopIndex));
|
||||
Interval interval = Interval.of(startIndex, stopIndex);
|
||||
System.out.println("reportAttemptingFullContext decision="+dfa.decision+":"+configs+
|
||||
", input="+parser.getTokenStream().getText(interval));
|
||||
}
|
||||
if ( parser!=null ) parser.getErrorHandler().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, configs);
|
||||
if ( parser!=null ) parser.getErrorListenerDispatch().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, configs);
|
||||
}
|
||||
|
||||
public void reportContextSensitivity(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
||||
if ( debug || retry_debug ) {
|
||||
Interval interval = Interval.of(startIndex, stopIndex);
|
||||
System.out.println("reportContextSensitivity decision="+dfa.decision+":"+configs+
|
||||
", input="+parser.getInputString(startIndex, stopIndex));
|
||||
", input="+parser.getTokenStream().getText(interval));
|
||||
}
|
||||
if ( parser!=null ) parser.getErrorHandler().reportContextSensitivity(parser, dfa, startIndex, stopIndex, configs);
|
||||
if ( parser!=null ) parser.getErrorListenerDispatch().reportContextSensitivity(parser, dfa, startIndex, stopIndex, configs);
|
||||
}
|
||||
|
||||
/** If context sensitive parsing, we know it's ambiguity not conflict */
|
||||
|
@ -1441,11 +1445,12 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
// }
|
||||
// i++;
|
||||
// }
|
||||
Interval interval = Interval.of(startIndex, stopIndex);
|
||||
System.out.println("reportAmbiguity "+
|
||||
ambigAlts+":"+configs+
|
||||
", input="+parser.getInputString(startIndex, stopIndex));
|
||||
", input="+parser.getTokenStream().getText(interval));
|
||||
}
|
||||
if ( parser!=null ) parser.getErrorHandler().reportAmbiguity(parser, dfa, startIndex, stopIndex,
|
||||
if ( parser!=null ) parser.getErrorListenerDispatch().reportAmbiguity(parser, dfa, startIndex, stopIndex,
|
||||
ambigAlts, configs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,7 +91,13 @@ public abstract class Transition {
|
|||
@NotNull
|
||||
public ATNState target;
|
||||
|
||||
protected Transition(@NotNull ATNState target) { this.target = target; }
|
||||
protected Transition(@NotNull ATNState target) {
|
||||
if (target == null) {
|
||||
throw new NullPointerException("target cannot be null.");
|
||||
}
|
||||
|
||||
this.target = target;
|
||||
}
|
||||
|
||||
public int getSerializationType() { return 0; }
|
||||
|
||||
|
|
|
@ -49,11 +49,10 @@ public class Interval {
|
|||
/** Interval objects are used readonly so share all with the
|
||||
* same single value a==b up to some max size. Use an array as a perfect hash.
|
||||
* Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new
|
||||
* Interval object with a..a in it. On Java.g, 218623 IntervalSets
|
||||
* Interval object with a..a in it. On Java.g4, 218623 IntervalSets
|
||||
* have a..a (set with 1 element).
|
||||
*/
|
||||
public static Interval create(int a, int b) {
|
||||
//return new Interval(a,b);
|
||||
public static Interval of(int a, int b) {
|
||||
// cache just a..a
|
||||
if ( a!=b || a<0 || a>INTERVAL_POOL_MAX_VALUE ) {
|
||||
return new Interval(a,b);
|
||||
|
@ -64,6 +63,14 @@ public class Interval {
|
|||
return cache[a];
|
||||
}
|
||||
|
||||
/** return number of elements between a and b inclusively. x..x is length 1.
|
||||
* if b < a, then length is 0. 9..10 has length 2.
|
||||
*/
|
||||
public int length() {
|
||||
if ( b<a ) return 0;
|
||||
return b-a+1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if ( o==null ) {
|
||||
|
@ -112,12 +119,12 @@ public class Interval {
|
|||
|
||||
/** Return the interval computed from combining this and other */
|
||||
public Interval union(Interval other) {
|
||||
return Interval.create(Math.min(a,other.a), Math.max(b,other.b));
|
||||
return Interval.of(Math.min(a, other.a), Math.max(b, other.b));
|
||||
}
|
||||
|
||||
/** Return the interval in common between this and o */
|
||||
public Interval intersection(Interval other) {
|
||||
return Interval.create(Math.max(a,other.a), Math.min(b,other.b));
|
||||
return Interval.of(Math.max(a, other.a), Math.min(b, other.b));
|
||||
}
|
||||
|
||||
/** Return the interval with elements from this not in other;
|
||||
|
@ -129,13 +136,13 @@ public class Interval {
|
|||
Interval diff = null;
|
||||
// other.a to left of this.a (or same)
|
||||
if ( other.startsBeforeNonDisjoint(this) ) {
|
||||
diff = Interval.create(Math.max(this.a,other.b+1),
|
||||
this.b);
|
||||
diff = Interval.of(Math.max(this.a, other.b + 1),
|
||||
this.b);
|
||||
}
|
||||
|
||||
// other.a to right of this.a
|
||||
else if ( other.startsAfterNonDisjoint(this) ) {
|
||||
diff = Interval.create(this.a, other.a-1);
|
||||
diff = Interval.of(this.a, other.a - 1);
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
|
|
|
@ -28,9 +28,15 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime.misc;
|
||||
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Set;
|
||||
|
||||
/** A set of integers that relies on ranges being common to do
|
||||
* "run-length-encoded" like compression (if you view an IntSet like
|
||||
|
@ -111,7 +117,7 @@ public class IntervalSet implements IntSet {
|
|||
* {1..5, 6..7, 10..20}. Adding 4..8 yields {1..8, 10..20}.
|
||||
*/
|
||||
public void add(int a, int b) {
|
||||
add(Interval.create(a,b));
|
||||
add(Interval.of(a, b));
|
||||
}
|
||||
|
||||
// copy on write so we can cache a..a intervals and sets of that
|
||||
|
|
|
@ -29,7 +29,15 @@
|
|||
|
||||
package org.antlr.v4.runtime.misc;
|
||||
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.ANTLRInputStream;
|
||||
import org.antlr.v4.runtime.CharStream;
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.DiagnosticErrorListener;
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.ParserRuleContext;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
|
||||
import java.io.FileInputStream;
|
||||
import java.io.InputStream;
|
||||
|
@ -45,9 +53,13 @@ import java.lang.reflect.Method;
|
|||
* [-print]
|
||||
* [-tokens] [-gui] [-ps file.ps]
|
||||
* [-trace]
|
||||
* [-diagnostics]
|
||||
* [input-filename]
|
||||
*/
|
||||
public class TestRig {
|
||||
|
||||
public static final String LEXER_START_RULE_NAME = "tokens";
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
String grammarName;
|
||||
String startRuleName;
|
||||
|
@ -57,11 +69,15 @@ public class TestRig {
|
|||
String psFile = null;
|
||||
boolean showTokens = false;
|
||||
boolean trace = false;
|
||||
boolean diagnostics = false;
|
||||
String encoding = null;
|
||||
if ( args.length < 2 ) {
|
||||
System.err.println("java org.antlr.v4.runtime.misc.TestRig GrammarName startRuleName" +
|
||||
" [-tokens] [-print] [-gui] [-ps file.ps] [-encoding encodingname] [-trace]"+
|
||||
" [input-filename]");
|
||||
System.err.println("java org.antlr.v4.runtime.misc.TestRig GrammarName startRuleName\n" +
|
||||
" [-tokens] [-print] [-gui] [-ps file.ps] [-encoding encodingname]\n" +
|
||||
" [-trace] [-diagnostics]\n"+
|
||||
" [input-filename]");
|
||||
System.err.println("Use startRuleName='tokens' if GrammarName is a lexer grammar.");
|
||||
System.err.println("Omitting input-filename makes rig read from stdin.");
|
||||
return;
|
||||
}
|
||||
int i=0;
|
||||
|
@ -88,6 +104,9 @@ public class TestRig {
|
|||
else if ( arg.equals("-trace") ) {
|
||||
trace = true;
|
||||
}
|
||||
else if ( arg.equals("-diagnostics") ) {
|
||||
diagnostics = true;
|
||||
}
|
||||
else if ( arg.equals("-encoding") ) {
|
||||
if ( i>=args.length ) {
|
||||
System.err.println("missing encoding on -encoding");
|
||||
|
@ -107,16 +126,11 @@ public class TestRig {
|
|||
}
|
||||
// System.out.println("exec "+grammarName+"."+startRuleName);
|
||||
String lexerName = grammarName+"Lexer";
|
||||
String parserName = grammarName+"Parser";
|
||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||
Class lexerClass = cl.loadClass(lexerName);
|
||||
if ( lexerClass==null ) {
|
||||
System.err.println("Can't load "+lexerName);
|
||||
}
|
||||
Class parserClass = cl.loadClass(parserName);
|
||||
if ( parserClass==null ) {
|
||||
System.err.println("Can't load "+parserName);
|
||||
}
|
||||
|
||||
InputStream is = System.in;
|
||||
if ( inputFile!=null ) {
|
||||
|
@ -144,10 +158,17 @@ public class TestRig {
|
|||
}
|
||||
}
|
||||
|
||||
if ( startRuleName.equals(LEXER_START_RULE_NAME) ) return;
|
||||
|
||||
String parserName = grammarName+"Parser";
|
||||
Class parserClass = cl.loadClass(parserName);
|
||||
if ( parserClass==null ) {
|
||||
System.err.println("Can't load "+parserName);
|
||||
}
|
||||
Constructor<Parser> parserCtor = parserClass.getConstructor(TokenStream.class);
|
||||
Parser parser = parserCtor.newInstance(tokens);
|
||||
|
||||
parser.setErrorHandler(new DiagnosticErrorStrategy());
|
||||
if ( diagnostics ) parser.addErrorListener(new DiagnosticErrorListener());
|
||||
|
||||
if ( printTree || gui || psFile!=null ) {
|
||||
parser.setBuildParseTree(true);
|
||||
|
|
|
@ -84,6 +84,9 @@ public interface ParseTree extends SyntaxTree {
|
|||
return visitor.visitTerminal(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText() { return symbol.getText(); }
|
||||
|
||||
@Override
|
||||
public String toStringTree(Parser parser) {
|
||||
return toString();
|
||||
|
@ -135,6 +138,12 @@ public interface ParseTree extends SyntaxTree {
|
|||
/** The ParseTreeVisitor needs a double dispatch method */
|
||||
public <T> T accept(ParseTreeVisitor<? extends T> visitor);
|
||||
|
||||
/** Return the combined text of all leaf nodes. Does not get any
|
||||
* off-channel tokens (if any) so won't return whitespace and
|
||||
* comments if they are sent to parser on hidden channel.
|
||||
*/
|
||||
public String getText();
|
||||
|
||||
/** Specialize toStringTree so that it can print out more information
|
||||
* based upon the parser.
|
||||
*/
|
||||
|
|
|
@ -41,7 +41,7 @@ public interface SyntaxTree extends Tree {
|
|||
* node is a leaf, then the interval represents a single token.
|
||||
*
|
||||
* If source interval is unknown, this does not return null.
|
||||
* It returns an interval of length 0.
|
||||
* It returns Interval.INVALID.
|
||||
*/
|
||||
Interval getSourceInterval();
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
grammar T;
|
||||
s : f f EOF;
|
||||
f : | x;
|
||||
x : 'a' 'b';
|
||||
s : e ';' ;
|
||||
e : e '*' e
|
||||
| ID
|
||||
| INT
|
||||
;
|
||||
INT : '0'..'9'+;
|
||||
WS : (' '|'\n') {skip();} ;
|
||||
|
|
|
@ -7,7 +7,7 @@ import org.antlr.v4.runtime.atn.ParserATNSimulator;
|
|||
import java.io.File;
|
||||
|
||||
/** Parse a java file or directory of java files using the generated parser
|
||||
* ANTLR builds from java.g
|
||||
* ANTLR builds from java.g4
|
||||
*/
|
||||
class TestJava {
|
||||
public static long lexerTime = 0;
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
import org.antlr.v4.runtime.ANTLRFileStream;
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.DiagnosticErrorStrategy;
|
||||
import org.antlr.v4.runtime.DiagnosticErrorListener;
|
||||
|
||||
public class TestR {
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
@ -41,7 +41,7 @@ public class TestR {
|
|||
// }
|
||||
RParser p = new RParser(tokens);
|
||||
p.setBuildParseTree(true);
|
||||
p.setErrorHandler(new DiagnosticErrorStrategy());
|
||||
p.addErrorListener(new DiagnosticErrorListener());
|
||||
p.prog();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import org.antlr.v4.runtime.atn.ParserATNSimulator;
|
|||
import java.io.File;
|
||||
|
||||
/** Parse a java file or directory of java files using the generated parser
|
||||
* ANTLR builds from java.g
|
||||
* ANTLR builds from java.g4
|
||||
*/
|
||||
class TestYang {
|
||||
public static long lexerTime = 0;
|
||||
|
|
|
@ -37,7 +37,7 @@ recRule(ruleName, precArgDef, argName, primaryAlts, opAlts, setResultAction,
|
|||
userRetvals, leftRecursiveRuleRefLabels) ::=
|
||||
<<
|
||||
<ruleName>[<precArgDef>]<if(userRetvals)> returns [<userRetvals>]<endif>
|
||||
: ( <primaryAlts:{alt | <alt.altText> }; separator="\n | ">
|
||||
: ( {} <primaryAlts:{alt | <alt.altText> }; separator="\n | ">
|
||||
)
|
||||
( <opAlts; separator="\n | ">
|
||||
)*
|
||||
|
|
|
@ -117,7 +117,7 @@ Parser(parser, funcs, atn, sempredFuncs, superclass) ::= <<
|
|||
|
||||
Parser_(parser, funcs, atn, sempredFuncs, ctor, extras, superclass) ::= <<
|
||||
@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"})
|
||||
public class <parser.name> extends <superclass> {
|
||||
public <if(parser.abstractRecognizer)>abstract <endif>class <parser.name> extends <superclass> {
|
||||
<if(parser.tokens)>
|
||||
public static final int
|
||||
<parser.tokens:{k | <k>=<parser.tokens.(k)>}; separator=", ", wrap, anchor>;
|
||||
|
@ -229,11 +229,11 @@ RuleFunction(currentRule,code,locals,ruleCtx,altLabelCtxs,namedActions,finallyAc
|
|||
int _alt;
|
||||
<endif>
|
||||
<code>
|
||||
_localctx.stop = _input.LT(-1);
|
||||
<postamble; separator="\n">
|
||||
<namedActions.after>
|
||||
}
|
||||
catch (RecognitionException re) {
|
||||
_localctx.exception = re;
|
||||
_errHandler.reportError(this, re);
|
||||
_errHandler.recover(this, re);
|
||||
}
|
||||
|
@ -265,11 +265,11 @@ LeftRecursiveRuleFunction(currentRule,code,locals,ruleCtx,altLabelCtxs,
|
|||
int _alt;
|
||||
<endif>
|
||||
<code>
|
||||
_localctx.stop = _input.LT(-1);
|
||||
<postamble; separator="\n">
|
||||
<namedActions.after>
|
||||
}
|
||||
catch (RecognitionException re) {
|
||||
_localctx.exception = re;
|
||||
_errHandler.reportError(this, re);
|
||||
_errHandler.recover(this, re);
|
||||
}
|
||||
|
@ -538,12 +538,12 @@ TokenPropertyRef_int(t) ::= "(<ctx(t)>.<t.label>!=null?Integer.valueOf(<ctx(t)>.
|
|||
|
||||
RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>!=null?(<ctx(r)>.<r.label>.start):null)"
|
||||
RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>!=null?(<ctx(r)>.<r.label>.stop):null)"
|
||||
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>!=null?_input.toString(<ctx(r)>.<r.label>.start,<ctx(r)>.<r.label>.stop):null)"
|
||||
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>!=null?_input.getText(<ctx(r)>.<r.label>.start,<ctx(r)>.<r.label>.stop):null)"
|
||||
RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>"
|
||||
|
||||
ThisRulePropertyRef_start(r) ::= "_localctx.start"
|
||||
ThisRulePropertyRef_stop(r) ::= "_localctx.stop"
|
||||
ThisRulePropertyRef_text(r) ::= "_input.toString(_localctx.start, _input.LT(-1))"
|
||||
ThisRulePropertyRef_text(r) ::= "_input.getText(_localctx.start, _input.LT(-1))"
|
||||
ThisRulePropertyRef_ctx(r) ::= "_localctx"
|
||||
|
||||
NonLocalAttrRef(s) ::= "((<s.ruleName; format=\"cap\">Context)getInvokingContext(<s.ruleIndex>)).<s.name>"
|
||||
|
@ -608,6 +608,7 @@ public static class <struct.name> extends <superClass><if(interfaces)> implement
|
|||
super(parent, state);
|
||||
<struct.ctorAttrs:{a | this.<a.name> = <a.name>;}; separator="\n">
|
||||
}
|
||||
@Override public int getRuleIndex() { return RULE_<struct.derivedFromName>; }
|
||||
<if(struct.provideCopyFrom)> <! don't need copy unless we have subclasses !>
|
||||
public <struct.name>() { }
|
||||
public void copyFrom(<struct.name> ctx) {
|
||||
|
@ -638,7 +639,7 @@ public void <if(method.isEnter)>enter<else>exit<endif>Rule(ParseTreeListener\<<I
|
|||
|
||||
VisitorDispatchMethod(method) ::= <<
|
||||
@Override
|
||||
public \<T> T accept(ParseTreeVisitor\<? extends T> visitor) {
|
||||
public \<T> T accept(ParseTreeVisitor\<? extends T> visitor) {
|
||||
if ( visitor instanceof <parser.grammarName>Visitor ) return ((<parser.grammarName>Visitor\<T>)visitor).visit<struct.derivedFromName; format="cap">(this);
|
||||
else return null;
|
||||
}
|
||||
|
@ -713,7 +714,7 @@ import org.antlr.v4.runtime.misc.*;
|
|||
|
||||
Lexer(lexer, atn, actionFuncs, sempredFuncs) ::= <<
|
||||
@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"})
|
||||
public class <lexer.name> extends Lexer {
|
||||
public <if(lexer.abstractRecognizer)>abstract <endif>class <lexer.name> extends Lexer {
|
||||
public static final int
|
||||
<lexer.tokens:{k | <k>=<lexer.tokens.(k)>}; separator=", ", wrap, anchor>;
|
||||
<rest(lexer.modes):{m| public static final int <m> = <i>;}; separator="\n">
|
||||
|
|
|
@ -70,13 +70,21 @@ import java.io.StringWriter;
|
|||
import java.io.Writer;
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
public class Tool {
|
||||
public String VERSION = "4.0-"+new Date();
|
||||
|
||||
public static final String GRAMMAR_EXTENSION = ".g4";
|
||||
public static final String LEGACY_GRAMMAR_EXTENSION = ".g";
|
||||
|
||||
public static final List<String> ALL_GRAMMAR_EXTENSIONS =
|
||||
Collections.unmodifiableList(Arrays.asList(GRAMMAR_EXTENSION, LEGACY_GRAMMAR_EXTENSION));
|
||||
|
||||
public static enum OptionArgType { NONE, STRING } // NONE implies boolean
|
||||
public static class Option {
|
||||
String fieldName;
|
||||
|
@ -116,6 +124,7 @@ public class Tool {
|
|||
public boolean gen_listener = true;
|
||||
public boolean gen_parse_listener = false;
|
||||
public boolean gen_visitor = false;
|
||||
public boolean abstract_recognizer = false;
|
||||
|
||||
public static Option[] optionDefs = {
|
||||
new Option("outputDirectory", "-o", OptionArgType.STRING, "specify output directory where all output is generated"),
|
||||
|
@ -133,6 +142,7 @@ public class Tool {
|
|||
new Option("gen_parse_listener", "-no-parse-listener", "don't generate parse listener (default)"),
|
||||
new Option("gen_visitor", "-visitor", "generate parse tree visitor"),
|
||||
new Option("gen_visitor", "-no-visitor", "don't generate parse tree visitor (default)"),
|
||||
new Option("abstract_recognizer", "-abstract", "generate abstract recognizer classes"),
|
||||
|
||||
new Option("saveLexer", "-Xsave-lexer", "save temp lexer file created for combined grammars"),
|
||||
new Option("launch_ST_inspector", "-XdbgST", "launch StringTemplate visualizer on generated code"),
|
||||
|
@ -157,8 +167,7 @@ public class Tool {
|
|||
public ErrorManager errMgr = new ErrorManager(this);
|
||||
public LogManager logMgr = new LogManager();
|
||||
|
||||
List<ANTLRToolListener> listeners =
|
||||
Collections.synchronizedList(new ArrayList<ANTLRToolListener>());
|
||||
List<ANTLRToolListener> listeners = new CopyOnWriteArrayList<ANTLRToolListener>();
|
||||
|
||||
/** Track separately so if someone adds a listener, it's the only one
|
||||
* instead of it and the default stderr listener.
|
||||
|
@ -382,16 +391,31 @@ public class Tool {
|
|||
return null;
|
||||
}
|
||||
|
||||
/** Try current dir then dir of g then lib dir */
|
||||
public GrammarRootAST loadImportedGrammar(Grammar g, String fileName) throws IOException {
|
||||
g.tool.log("grammar", "load "+fileName + " from " + g.fileName);
|
||||
File importedFile = getImportedGrammarFile(g, fileName);
|
||||
/**
|
||||
* Try current dir then dir of g then lib dir
|
||||
* @param g
|
||||
* @param name The imported grammar name.
|
||||
*/
|
||||
public Grammar loadImportedGrammar(Grammar g, String name) throws IOException {
|
||||
g.tool.log("grammar", "load " + name + " from " + g.fileName);
|
||||
File importedFile = null;
|
||||
for (String extension : ALL_GRAMMAR_EXTENSIONS) {
|
||||
importedFile = getImportedGrammarFile(g, name + extension);
|
||||
if (importedFile != null) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( importedFile==null ) {
|
||||
errMgr.toolError(ErrorType.CANNOT_FIND_IMPORTED_FILE, fileName, g.fileName);
|
||||
errMgr.toolError(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, name, g.fileName);
|
||||
return null;
|
||||
}
|
||||
|
||||
ANTLRFileStream in = new ANTLRFileStream(importedFile.getAbsolutePath());
|
||||
return load(in);
|
||||
GrammarRootAST root = load(in);
|
||||
Grammar imported = createGrammar(root);
|
||||
imported.fileName = importedFile.getAbsolutePath();
|
||||
return imported;
|
||||
}
|
||||
|
||||
public GrammarRootAST loadFromString(String grammar) {
|
||||
|
@ -447,13 +471,13 @@ public class Tool {
|
|||
* files. If the outputDir set by -o is not present it will be created.
|
||||
* The final filename is sensitive to the output directory and
|
||||
* the directory where the grammar file was found. If -o is /tmp
|
||||
* and the original grammar file was foo/t.g then output files
|
||||
* and the original grammar file was foo/t.g4 then output files
|
||||
* go in /tmp/foo.
|
||||
*
|
||||
* The output dir -o spec takes precedence if it's absolute.
|
||||
* E.g., if the grammar file dir is absolute the output dir is given
|
||||
* precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java" as
|
||||
* output (assuming t.g holds T.java).
|
||||
* precendence. "-o /tmp /usr/lib/t.g4" results in "/tmp/T.java" as
|
||||
* output (assuming t.g4 holds T.java).
|
||||
*
|
||||
* If no -o is specified, then just write to the directory where the
|
||||
* grammar file was found.
|
||||
|
@ -465,7 +489,7 @@ public class Tool {
|
|||
return new StringWriter();
|
||||
}
|
||||
// output directory is a function of where the grammar file lives
|
||||
// for subdir/T.g, you get subdir here. Well, depends on -o etc...
|
||||
// for subdir/T.g4, you get subdir here. Well, depends on -o etc...
|
||||
// But, if this is a .tokens file, then we force the output to
|
||||
// be the base output directory (or current directory if there is not a -o)
|
||||
//
|
||||
|
@ -531,9 +555,9 @@ public class Tool {
|
|||
fileDirectory = fileNameWithPath.substring(0, fileNameWithPath.lastIndexOf(File.separatorChar));
|
||||
}
|
||||
if ( haveOutputDir ) {
|
||||
// -o /tmp /var/lib/t.g => /tmp/T.java
|
||||
// -o subdir/output /usr/lib/t.g => subdir/output/T.java
|
||||
// -o . /usr/lib/t.g => ./T.java
|
||||
// -o /tmp /var/lib/t.g4 => /tmp/T.java
|
||||
// -o subdir/output /usr/lib/t.g4 => subdir/output/T.java
|
||||
// -o . /usr/lib/t.g4 => ./T.java
|
||||
if (fileDirectory != null &&
|
||||
(new File(fileDirectory).isAbsolute() ||
|
||||
fileDirectory.startsWith("~"))) { // isAbsolute doesn't count this :(
|
||||
|
@ -541,7 +565,7 @@ public class Tool {
|
|||
outputDir = new File(outputDirectory);
|
||||
}
|
||||
else {
|
||||
// -o /tmp subdir/t.g => /tmp/subdir/t.g
|
||||
// -o /tmp subdir/t.g4 => /tmp/subdir/t.g4
|
||||
if (fileDirectory != null) {
|
||||
outputDir = new File(outputDirectory, fileDirectory);
|
||||
}
|
||||
|
|
|
@ -133,6 +133,11 @@ public class ATNSerializer {
|
|||
if ( s==null ) continue; // might be optimized away
|
||||
for (int i=0; i<s.getNumberOfTransitions(); i++) {
|
||||
Transition t = s.transition(i);
|
||||
|
||||
if (atn.states.get(t.target.stateNumber) == null) {
|
||||
throw new IllegalStateException("Cannot serialize a transition to a removed state.");
|
||||
}
|
||||
|
||||
int src = s.stateNumber;
|
||||
int trg = t.target.stateNumber;
|
||||
int edgeType = Transition.serializationTypes.get(t.getClass());
|
||||
|
|
|
@ -257,7 +257,7 @@ public class CodeGenerator {
|
|||
}
|
||||
}
|
||||
|
||||
/** Generate TParser.java and TLexer.java from T.g if combined, else
|
||||
/** Generate TParser.java and TLexer.java from T.g4 if combined, else
|
||||
* just use T.java as output regardless of type.
|
||||
*/
|
||||
public String getRecognizerFileName() {
|
||||
|
|
|
@ -49,6 +49,7 @@ public class Lexer extends OutputModelObject {
|
|||
public String[] tokenNames;
|
||||
public Set<String> ruleNames;
|
||||
public Collection<String> modes;
|
||||
public boolean abstractRecognizer;
|
||||
|
||||
@ModelElement public SerializedATN atn;
|
||||
@ModelElement public LinkedHashMap<Rule, RuleActionFunction> actionFuncs =
|
||||
|
@ -89,6 +90,7 @@ public class Lexer extends OutputModelObject {
|
|||
}
|
||||
}
|
||||
ruleNames = g.rules.keySet();
|
||||
abstractRecognizer = g.isAbstract();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ public class Parser extends OutputModelObject {
|
|||
public Set<String> ruleNames;
|
||||
public Collection<Rule> rules;
|
||||
public ParserFile file;
|
||||
public boolean abstractRecognizer;
|
||||
|
||||
@ModelElement public List<RuleFunction> funcs = new ArrayList<RuleFunction>();
|
||||
@ModelElement public SerializedATN atn;
|
||||
|
@ -89,5 +90,7 @@ public class Parser extends OutputModelObject {
|
|||
} else {
|
||||
superclass = new DefaultParserSuperClass();
|
||||
}
|
||||
|
||||
abstractRecognizer = g.isAbstract();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,17 +128,18 @@ outerAlternative returns [boolean isLeftRec]
|
|||
{otherAlt((AltAST)$start, currentOuterAltNumber);}
|
||||
;
|
||||
|
||||
binary
|
||||
: ^( ALT recurse (op=token)+ {setTokenPrec($op.t, currentOuterAltNumber);} recurse ACTION? )
|
||||
// (ALT (= a e) (= op (SET '*' '/')) (= b e) {}) (ALT INT {}) (ALT '(' (= x e) ')' {})
|
||||
binaryMultipleOp
|
||||
: ^( ALT recurse bops recurse ACTION? )
|
||||
;
|
||||
|
||||
binaryMultipleOp
|
||||
: ^( ALT recurse
|
||||
( ^( BLOCK ( ^( ALT (op=token)+ {setTokenPrec($op.t, currentOuterAltNumber);} ) )+ )
|
||||
| ^(SET (op=token)+ {setTokenPrec($op.t, currentOuterAltNumber);})
|
||||
)
|
||||
recurse ACTION?
|
||||
)
|
||||
bops: ^(ASSIGN ID bops)
|
||||
| ^( BLOCK ( ^( ALT (op=token)+ {setTokenPrec($op.t, currentOuterAltNumber);} ) )+ )
|
||||
| ^(SET (op=token)+ {setTokenPrec($op.t, currentOuterAltNumber);})
|
||||
;
|
||||
|
||||
binary
|
||||
: ^( ALT recurse (op=token)+ {setTokenPrec($op.t, currentOuterAltNumber);} recurse ACTION? )
|
||||
;
|
||||
|
||||
ternary
|
||||
|
@ -222,4 +223,4 @@ atom
|
|||
| ^(WILDCARD elementOptions)
|
||||
| WILDCARD
|
||||
| ^(DOT ID element)
|
||||
;
|
||||
;
|
||||
|
|
|
@ -137,7 +137,7 @@ public class TokenVocabParser {
|
|||
}
|
||||
|
||||
/** Return a File descriptor for vocab file. Look in library or
|
||||
* in -o output path. antlr -o foo T.g U.g where U needs T.tokens
|
||||
* in -o output path. antlr -o foo T.g4 U.g4 where U needs T.tokens
|
||||
* won't work unless we look in foo too. If we do not find the
|
||||
* file in the lib directory then must assume that the .tokens file
|
||||
* is going to be generated as part of this build and we have defined
|
||||
|
|
|
@ -74,6 +74,7 @@ public class BasicSemanticChecks extends GrammarTreeVisitor {
|
|||
add("TokenLabelType");
|
||||
add("superClass");
|
||||
add("filter");
|
||||
add("abstract");
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -83,6 +84,7 @@ public class BasicSemanticChecks extends GrammarTreeVisitor {
|
|||
add("tokenVocab");
|
||||
add("TokenLabelType");
|
||||
add("superClass");
|
||||
add("abstract");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ package org.antlr.v4.semantics;
|
|||
|
||||
import org.antlr.v4.analysis.LeftRecursiveRuleTransformer;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.tool.*;
|
||||
import org.antlr.v4.tool.ast.GrammarAST;
|
||||
|
||||
|
@ -116,7 +117,7 @@ public class SemanticPipeline {
|
|||
}
|
||||
else {
|
||||
assignTokenTypes(g, collector.tokensDefs,
|
||||
collector.tokenIDRefs, collector.strings);
|
||||
collector.tokenIDRefs, collector.terminals);
|
||||
}
|
||||
|
||||
// CHECK RULE REFS NOW (that we've defined rules in grammar)
|
||||
|
@ -163,7 +164,7 @@ public class SemanticPipeline {
|
|||
}
|
||||
|
||||
void assignTokenTypes(Grammar g, List<GrammarAST> tokensDefs,
|
||||
List<GrammarAST> tokenIDs, Set<String> strings)
|
||||
List<GrammarAST> tokenIDs, List<GrammarAST> terminals)
|
||||
{
|
||||
//Grammar G = g.getOutermostGrammar(); // put in root, even if imported
|
||||
|
||||
|
@ -174,6 +175,9 @@ public class SemanticPipeline {
|
|||
String lit = alias.getChild(1).getText();
|
||||
g.defineTokenAlias(name, lit);
|
||||
}
|
||||
else {
|
||||
g.defineTokenName(alias.getText());
|
||||
}
|
||||
}
|
||||
|
||||
// DEFINE TOKEN TYPES FOR X : 'x' ; RULES
|
||||
|
@ -187,10 +191,25 @@ public class SemanticPipeline {
|
|||
*/
|
||||
|
||||
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
|
||||
for (GrammarAST idAST : tokenIDs) { g.defineTokenName(idAST.getText()); }
|
||||
for (GrammarAST idAST : tokenIDs) {
|
||||
if (g.getTokenType(idAST.getText()) == Token.INVALID_TYPE) {
|
||||
g.tool.errMgr.grammarError(ErrorType.IMPLICIT_TOKEN_DEFINITION, g.fileName, idAST.token, idAST.getText());
|
||||
}
|
||||
|
||||
g.defineTokenName(idAST.getText());
|
||||
}
|
||||
|
||||
// VERIFY TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
|
||||
for (GrammarAST termAST : terminals) {
|
||||
if (termAST.getType() != ANTLRParser.STRING_LITERAL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (g.getTokenType(termAST.getText()) == Token.INVALID_TYPE) {
|
||||
g.tool.errMgr.grammarError(ErrorType.IMPLICIT_STRING_DEFINITION, g.fileName, termAST.token, termAST.getText());
|
||||
}
|
||||
}
|
||||
|
||||
// DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
|
||||
for (String s : strings) { g.defineStringLiteral(s); }
|
||||
g.tool.log("semantics", "tokens="+g.tokenNameToTypeMap);
|
||||
g.tool.log("semantics", "strings="+g.stringLiteralToTypeMap);
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ public class ErrorManager {
|
|||
locationValid = true;
|
||||
}
|
||||
|
||||
messageFormatST.add("id", msg.errorType.ordinal());
|
||||
messageFormatST.add("id", msg.errorType.code);
|
||||
messageFormatST.add("text", messageST);
|
||||
|
||||
if (locationValid) reportST.add("location", locationST);
|
||||
|
|
|
@ -129,7 +129,7 @@ public enum ErrorType {
|
|||
//TOKEN_VOCAB_IN_DELEGATE(, "tokenVocab option ignored in imported grammar <arg>", ErrorSeverity.ERROR),
|
||||
OPTIONS_IN_DELEGATE(109, "options ignored in imported grammar <arg>", ErrorSeverity.WARNING),
|
||||
// TOKEN_ALIAS_IN_DELEGATE(, "can't assign string to token name <arg> to string in imported grammar <arg2>", ErrorSeverity.ERROR),
|
||||
CANNOT_FIND_IMPORTED_FILE(110, "can't find or load grammar <arg> from <arg2>", ErrorSeverity.ERROR),
|
||||
CANNOT_FIND_IMPORTED_GRAMMAR(110, "can't find or load grammar <arg> from <arg2>", ErrorSeverity.ERROR),
|
||||
INVALID_IMPORT(111, "<arg.typeString> grammar <arg.name> cannot import <arg2.typeString> grammar <arg2.name>", ErrorSeverity.ERROR),
|
||||
IMPORTED_TOKENS_RULE_EMPTY(112, "", ErrorSeverity.ERROR),
|
||||
IMPORT_NAME_CLASH(113, "<arg.typeString> grammar <arg.name> and imported <arg2.typeString> grammar <arg2.name> both generate <arg2.recognizerName>", ErrorSeverity.ERROR),
|
||||
|
@ -144,12 +144,15 @@ public enum ErrorType {
|
|||
RULE_WITH_TOO_FEW_ALT_LABELS(122, "rule <arg>: must label all alternatives or none", ErrorSeverity.ERROR),
|
||||
ALT_LABEL_REDEF(123, "rule alt label <arg> redefined in rule <arg2>, originally in <arg3>", ErrorSeverity.ERROR),
|
||||
ALT_LABEL_CONFLICTS_WITH_RULE(124, "rule alt label <arg> conflicts with rule <arg2>", ErrorSeverity.ERROR),
|
||||
IMPLICIT_TOKEN_DEFINITION(125, "implicit definition of token <arg> in parser", ErrorSeverity.WARNING),
|
||||
IMPLICIT_STRING_DEFINITION(126, "cannot create implicit token for string literal <arg> in non-combined grammar", ErrorSeverity.ERROR),
|
||||
|
||||
/** Documentation comment is unterminated */
|
||||
//UNTERMINATED_DOC_COMMENT(, "", ErrorSeverity.ERROR),
|
||||
|
||||
// Dependency sorting errors
|
||||
//
|
||||
/** t1.g -> t2.g -> t3.g ->t1.g */
|
||||
/** t1.g4 -> t2.g4 -> t3.g4 ->t1.g4 */
|
||||
CIRCULAR_DEPENDENCY(130, "your grammars contain a circular dependency and cannot be sorted into a valid build order", ErrorSeverity.ERROR),
|
||||
|
||||
// Simple informational messages
|
||||
|
@ -185,16 +188,18 @@ public enum ErrorType {
|
|||
|
||||
;
|
||||
|
||||
public String msg;
|
||||
public int code; // unique, deterministic unchanging error code once we release
|
||||
public ErrorSeverity severity;
|
||||
public Boolean abortsAnalysis;
|
||||
public Boolean abortsCodegen;
|
||||
public final String msg;
|
||||
public final int code; // unique, deterministic unchanging error code once we release
|
||||
public final ErrorSeverity severity;
|
||||
public final Boolean abortsAnalysis;
|
||||
public final Boolean abortsCodegen;
|
||||
|
||||
ErrorType(int code, String msg, ErrorSeverity severity) {
|
||||
this.code = code;
|
||||
this.msg = msg;
|
||||
this.severity = severity;
|
||||
this.abortsAnalysis = false;
|
||||
this.abortsCodegen = false;
|
||||
}
|
||||
|
||||
// ErrorType(String msg, ErrorSeverity severity, boolean abortsAnalysis) {
|
||||
|
|
|
@ -49,11 +49,24 @@ import org.antlr.v4.runtime.misc.IntSet;
|
|||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
import org.antlr.v4.tool.ast.*;
|
||||
import org.antlr.v4.tool.ast.ActionAST;
|
||||
import org.antlr.v4.tool.ast.GrammarAST;
|
||||
import org.antlr.v4.tool.ast.GrammarASTErrorNode;
|
||||
import org.antlr.v4.tool.ast.GrammarASTWithOptions;
|
||||
import org.antlr.v4.tool.ast.GrammarRootAST;
|
||||
import org.antlr.v4.tool.ast.PredAST;
|
||||
import org.antlr.v4.tool.ast.TerminalAST;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class Grammar implements AttributeResolver {
|
||||
public static final String GRAMMAR_FROM_STRING_NAME = "<string>";
|
||||
|
@ -232,20 +245,17 @@ public class Grammar implements AttributeResolver {
|
|||
importedGrammarName = t.getText();
|
||||
tool.log("grammar", "import " + t.getText());
|
||||
}
|
||||
GrammarAST grammarAST = null;
|
||||
Grammar g;
|
||||
try {
|
||||
grammarAST = tool.loadImportedGrammar(this, importedGrammarName + ".g");
|
||||
g = tool.loadImportedGrammar(this, importedGrammarName);
|
||||
}
|
||||
catch (IOException ioe) {
|
||||
tool.errMgr.toolError(ErrorType.CANNOT_FIND_IMPORTED_FILE, ioe, importedGrammarName+".g");
|
||||
tool.errMgr.toolError(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, ioe,
|
||||
importedGrammarName);
|
||||
continue;
|
||||
}
|
||||
// did it come back as error node or missing?
|
||||
if ( grammarAST==null || grammarAST instanceof GrammarASTErrorNode) return;
|
||||
GrammarRootAST ast = (GrammarRootAST)grammarAST;
|
||||
Grammar g = tool.createGrammar(ast);
|
||||
File f = tool.getImportedGrammarFile(this, importedGrammarName+".g");
|
||||
g.fileName = f.getAbsolutePath();
|
||||
if ( g == null ) continue;
|
||||
g.parent = this;
|
||||
importedGrammars.add(g);
|
||||
g.loadImportedGrammars(); // recursively pursue any imports in this import
|
||||
|
@ -362,6 +372,11 @@ public class Grammar implements AttributeResolver {
|
|||
return parent.getOutermostGrammar();
|
||||
}
|
||||
|
||||
public boolean isAbstract() {
|
||||
return Boolean.parseBoolean(getOptionString("abstract"))
|
||||
|| (tool != null && tool.abstract_recognizer);
|
||||
}
|
||||
|
||||
/** Get the name of the generated recognizer; may or may not be same
|
||||
* as grammar name.
|
||||
* Recognizer is TParser and TLexer from T if combined, else
|
||||
|
@ -377,9 +392,16 @@ public class Grammar implements AttributeResolver {
|
|||
buf.append(g.name);
|
||||
buf.append('_');
|
||||
}
|
||||
if (isAbstract()) {
|
||||
buf.append("Abstract");
|
||||
}
|
||||
buf.append(name);
|
||||
qualifiedName = buf.toString();
|
||||
}
|
||||
else if (isAbstract()) {
|
||||
qualifiedName = "Abstract" + name;
|
||||
}
|
||||
|
||||
if ( isCombined() || (isLexer() && implicitLexer!=null) )
|
||||
{
|
||||
suffix = Grammar.getGrammarTypeToFileNameSuffix(getType());
|
||||
|
|
|
@ -317,7 +317,14 @@ public class GrammarTransformPipeline {
|
|||
(GrammarAST)adaptor.create(ANTLRParser.RULES, "RULES");
|
||||
lexerAST.addChild(lexerRulesRoot);
|
||||
List<GrammarAST> rulesWeMoved = new ArrayList<GrammarAST>();
|
||||
GrammarASTWithOptions[] rules = ((List<?>)combinedRulesRoot.getChildren()).toArray(new GrammarASTWithOptions[0]);
|
||||
GrammarASTWithOptions[] rules;
|
||||
if (combinedRulesRoot.getChildCount() > 0) {
|
||||
rules = ((List<?>)combinedRulesRoot.getChildren()).toArray(new GrammarASTWithOptions[0]);
|
||||
}
|
||||
else {
|
||||
rules = new GrammarASTWithOptions[0];
|
||||
}
|
||||
|
||||
if ( rules!=null ) {
|
||||
for (GrammarASTWithOptions r : rules) {
|
||||
String ruleName = r.getChild(0).getText();
|
||||
|
|
|
@ -30,8 +30,14 @@
|
|||
package org.antlr.v4.tool.interp;
|
||||
|
||||
import org.antlr.v4.Tool;
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.ParserRuleContext;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNState;
|
||||
import org.antlr.v4.runtime.atn.DecisionState;
|
||||
import org.antlr.v4.runtime.atn.ParserATNSimulator;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
@ -80,14 +86,14 @@ public class ParserInterpreter {
|
|||
atnSimulator = new ParserATNSimulator<Token>(new DummyParser(g, input), g.atn);
|
||||
}
|
||||
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull SymbolStream<Token> input,
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||
@Nullable ParserRuleContext outerContext,
|
||||
boolean useContext)
|
||||
{
|
||||
return atnSimulator.predictATN(dfa, input, outerContext);
|
||||
}
|
||||
|
||||
public int adaptivePredict(@NotNull SymbolStream<Token> input, int decision,
|
||||
public int adaptivePredict(@NotNull TokenStream input, int decision,
|
||||
@Nullable ParserRuleContext outerContext)
|
||||
{
|
||||
return atnSimulator.adaptivePredict(input, decision, outerContext);
|
||||
|
|
|
@ -35,16 +35,31 @@ import org.antlr.v4.automata.ATNPrinter;
|
|||
import org.antlr.v4.automata.LexerATNFactory;
|
||||
import org.antlr.v4.automata.ParserATNFactory;
|
||||
import org.antlr.v4.codegen.CodeGenerator;
|
||||
import org.antlr.v4.misc.Utils;
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.ANTLRInputStream;
|
||||
import org.antlr.v4.runtime.CharStream;
|
||||
import org.antlr.v4.runtime.CommonToken;
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.RuleContext;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenSource;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.WritableToken;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNState;
|
||||
import org.antlr.v4.runtime.atn.DecisionState;
|
||||
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
import org.antlr.v4.semantics.SemanticPipeline;
|
||||
import org.antlr.v4.tool.*;
|
||||
import org.antlr.v4.tool.ANTLRMessage;
|
||||
import org.antlr.v4.tool.DefaultToolListener;
|
||||
import org.antlr.v4.tool.DOTGenerator;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.GrammarSemanticsMessage;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -56,13 +71,29 @@ import javax.tools.JavaCompiler;
|
|||
import javax.tools.JavaFileObject;
|
||||
import javax.tools.StandardJavaFileManager;
|
||||
import javax.tools.ToolProvider;
|
||||
import java.io.*;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PipedInputStream;
|
||||
import java.io.PipedOutputStream;
|
||||
import java.io.PrintStream;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.URLClassLoader;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
|
@ -353,7 +384,7 @@ public abstract class BaseTest {
|
|||
|
||||
|
||||
/** Return true if all is ok, no errors */
|
||||
protected boolean antlr(String fileName, String grammarFileName, String grammarStr, String... extraOptions) {
|
||||
protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean defaultListener, String... extraOptions) {
|
||||
boolean allIsWell = true;
|
||||
System.out.println("dir "+tmpdir);
|
||||
mkdir(tmpdir);
|
||||
|
@ -371,6 +402,9 @@ public abstract class BaseTest {
|
|||
options.toArray(optionsA);
|
||||
Tool antlr = newTool(optionsA);
|
||||
antlr.addListener(equeue);
|
||||
if (defaultListener) {
|
||||
antlr.addListener(new DefaultToolListener(antlr));
|
||||
}
|
||||
antlr.processGrammarsOnCommandLine();
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
@ -379,8 +413,8 @@ public abstract class BaseTest {
|
|||
e.printStackTrace(System.err);
|
||||
}
|
||||
|
||||
if ( equeue.errors.size()>0 ) {
|
||||
allIsWell = false;
|
||||
allIsWell = equeue.errors.isEmpty();
|
||||
if ( !defaultListener && !equeue.errors.isEmpty() ) {
|
||||
System.err.println("antlr reports errors from "+options);
|
||||
for (int i = 0; i < equeue.errors.size(); i++) {
|
||||
ANTLRMessage msg = equeue.errors.get(i);
|
||||
|
@ -390,6 +424,13 @@ public abstract class BaseTest {
|
|||
System.out.println(grammarStr);
|
||||
System.out.println("###");
|
||||
}
|
||||
if ( !defaultListener && !equeue.warnings.isEmpty() ) {
|
||||
System.err.println("antlr reports warnings from "+options);
|
||||
for (int i = 0; i < equeue.warnings.size(); i++) {
|
||||
ANTLRMessage msg = equeue.warnings.get(i);
|
||||
System.err.println(msg);
|
||||
}
|
||||
}
|
||||
|
||||
return allIsWell;
|
||||
}
|
||||
|
@ -431,11 +472,11 @@ public abstract class BaseTest {
|
|||
String input, boolean debug)
|
||||
{
|
||||
boolean success = rawGenerateAndBuildRecognizer(grammarFileName,
|
||||
grammarStr,
|
||||
parserName,
|
||||
lexerName,
|
||||
"-parse-listener",
|
||||
"-visitor");
|
||||
grammarStr,
|
||||
parserName,
|
||||
lexerName,
|
||||
"-parse-listener",
|
||||
"-visitor");
|
||||
assertTrue(success);
|
||||
writeFile(tmpdir, "input", input);
|
||||
return rawExecRecognizer(parserName,
|
||||
|
@ -450,10 +491,24 @@ public abstract class BaseTest {
|
|||
@Nullable String parserName,
|
||||
String lexerName,
|
||||
String... extraOptions)
|
||||
{
|
||||
return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions);
|
||||
}
|
||||
|
||||
/** Return true if all is well */
|
||||
protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
|
||||
String grammarStr,
|
||||
@Nullable String parserName,
|
||||
String lexerName,
|
||||
boolean defaultListener,
|
||||
String... extraOptions)
|
||||
{
|
||||
boolean allIsWell =
|
||||
antlr(grammarFileName, grammarFileName, grammarStr, extraOptions);
|
||||
boolean ok;
|
||||
antlr(grammarFileName, grammarFileName, grammarStr, defaultListener, extraOptions);
|
||||
if (!allIsWell) {
|
||||
return false;
|
||||
}
|
||||
|
||||
List<String> files = new ArrayList<String>();
|
||||
if ( lexerName!=null ) {
|
||||
files.add(lexerName+".java");
|
||||
|
@ -471,8 +526,7 @@ public abstract class BaseTest {
|
|||
files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseParseListener.java");
|
||||
}
|
||||
}
|
||||
ok = compile(files.toArray(new String[files.size()]));
|
||||
if ( !ok ) { allIsWell = false; }
|
||||
allIsWell = compile(files.toArray(new String[files.size()]));
|
||||
return allIsWell;
|
||||
}
|
||||
|
||||
|
@ -612,38 +666,19 @@ public abstract class BaseTest {
|
|||
msg = msg.replaceAll("\r","\\\\r");
|
||||
msg = msg.replaceAll("\t","\\\\t");
|
||||
|
||||
// ignore error number
|
||||
if ( expect!=null ) expect = stripErrorNum(expect);
|
||||
actual = stripErrorNum(actual);
|
||||
assertEquals("error in: "+msg,expect,actual);
|
||||
}
|
||||
}
|
||||
|
||||
// can be multi-line
|
||||
//error(29): A.g:2:11: unknown attribute reference a in $a
|
||||
//error(29): A.g:2:11: unknown attribute reference a in $a
|
||||
String stripErrorNum(String errs) {
|
||||
String[] lines = errs.split("\n");
|
||||
for (int i=0; i<lines.length; i++) {
|
||||
String s = lines[i];
|
||||
int lp = s.indexOf("error(");
|
||||
int rp = s.indexOf(')', lp);
|
||||
if ( lp>=0 && rp>=0 ) {
|
||||
lines[i] = s.substring(0, lp) + s.substring(rp+1, s.length());
|
||||
}
|
||||
}
|
||||
return Utils.join(lines, "\n");
|
||||
}
|
||||
|
||||
public String getFilenameFromFirstLineOfGrammar(String line) {
|
||||
String fileName = "<string>";
|
||||
int grIndex = line.lastIndexOf("grammar");
|
||||
int semi = line.lastIndexOf(';');
|
||||
if ( grIndex>=0 && semi>=0 ) {
|
||||
int space = line.indexOf(' ', grIndex);
|
||||
fileName = line.substring(space+1, semi)+".g";
|
||||
fileName = line.substring(space+1, semi)+Tool.GRAMMAR_EXTENSION;
|
||||
}
|
||||
if ( fileName.length()==".g".length() ) fileName = "<string>";
|
||||
if ( fileName.length()==Tool.GRAMMAR_EXTENSION.length() ) fileName = "<string>";
|
||||
return fileName;
|
||||
}
|
||||
|
||||
|
@ -897,7 +932,7 @@ public abstract class BaseTest {
|
|||
createParserST =
|
||||
new ST(
|
||||
" <parserName> parser = new <parserName>(tokens);\n" +
|
||||
" parser.setErrorHandler(new DiagnosticErrorStrategy());\n");
|
||||
" parser.addErrorListener(new DiagnosticErrorListener());\n");
|
||||
}
|
||||
outputFileST.add("createParser", createParserST);
|
||||
outputFileST.add("parserName", parserName);
|
||||
|
@ -1089,12 +1124,22 @@ public abstract class BaseTest {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String toString(int start, int stop) {
|
||||
return null;
|
||||
public String getText() {
|
||||
throw new UnsupportedOperationException("can't give strings");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(Token start, Token stop) {
|
||||
public String getText(Interval interval) {
|
||||
throw new UnsupportedOperationException("can't give strings");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText(RuleContext ctx) {
|
||||
throw new UnsupportedOperationException("can't give strings");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText(Token start, Token stop) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -360,14 +360,14 @@ public class TestATNConstruction extends BaseTest {
|
|||
@Test public void testNestedAstar() throws Exception {
|
||||
Grammar g = new Grammar(
|
||||
"parser grammar P;\n"+
|
||||
"a : (',' ID*)*;");
|
||||
"a : (COMMA ID*)*;");
|
||||
String expecting =
|
||||
"RuleStart_a_0->StarLoopEntry_13\n" +
|
||||
"StarLoopEntry_13->StarBlockStart_11\n" +
|
||||
"StarLoopEntry_13->s14\n" +
|
||||
"StarBlockStart_11->s2\n" +
|
||||
"s14->RuleStop_a_1\n" +
|
||||
"s2-','->StarLoopEntry_8\n" +
|
||||
"s2-COMMA->StarLoopEntry_8\n" +
|
||||
"RuleStop_a_1-EOF->s16\n" +
|
||||
"StarLoopEntry_8->StarBlockStart_6\n" +
|
||||
"StarLoopEntry_8->s9\n" +
|
||||
|
|
|
@ -118,22 +118,22 @@ public class TestActionTranslation extends BaseTest {
|
|||
|
||||
@Test public void testRuleRefs() throws Exception {
|
||||
String action = "$lab.start; $c.text;";
|
||||
String expected = "(((AContext)_localctx).lab!=null?(((AContext)_localctx).lab.start):null); (((AContext)_localctx).c!=null?_input.toString(((AContext)_localctx).c.start,((AContext)_localctx).c.stop):null);";
|
||||
String expected = "(((AContext)_localctx).lab!=null?(((AContext)_localctx).lab.start):null); (((AContext)_localctx).c!=null?_input.getText(((AContext)_localctx).c.start,((AContext)_localctx).c.stop):null);";
|
||||
testActions(attributeTemplate, "inline", action, expected);
|
||||
}
|
||||
|
||||
@Test public void testRefToTextAttributeForCurrentRule() throws Exception {
|
||||
String action = "$a.text; $text";
|
||||
String expected =
|
||||
"(_localctx.a!=null?_input.toString(_localctx.a.start,_localctx.a.stop):" +
|
||||
"null); _input.toString(_localctx.start, _input.LT(-1))";
|
||||
"(_localctx.a!=null?_input.getText(_localctx.a.start,_localctx.a.stop):" +
|
||||
"null); _input.getText(_localctx.start, _input.LT(-1))";
|
||||
testActions(attributeTemplate, "init", action, expected);
|
||||
expected =
|
||||
"_input.toString(_localctx.start, _input.LT(-1)); _input.toString(_localctx.start, _input.LT(-1))";
|
||||
"_input.getText(_localctx.start, _input.LT(-1)); _input.getText(_localctx.start, _input.LT(-1))";
|
||||
testActions(attributeTemplate, "inline", action, expected);
|
||||
expected =
|
||||
"(_localctx.a!=null?_input.toString(_localctx.a.start,_localctx.a.stop):null);" +
|
||||
" _input.toString(_localctx.start, _input.LT(-1))";
|
||||
"(_localctx.a!=null?_input.getText(_localctx.a.start,_localctx.a.stop):null);" +
|
||||
" _input.getText(_localctx.start, _input.LT(-1))";
|
||||
testActions(attributeTemplate, "finally", action, expected);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ public class TestAttributeChecks extends BaseTest {
|
|||
String attributeTemplate =
|
||||
"parser grammar A;\n"+
|
||||
"@members {<members>}\n" +
|
||||
"tokens{ID;}\n" +
|
||||
"a[int x] returns [int y]\n" +
|
||||
"@init {<init>}\n" +
|
||||
" : id=ID ids+=ID lab=b[34] {\n" +
|
||||
|
@ -24,8 +25,8 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"d : ;\n";
|
||||
|
||||
String[] membersChecks = {
|
||||
"$a", "error(29): A.g:2:11: unknown attribute reference a in $a\n",
|
||||
"$a.y", "error(29): A.g:2:11: unknown attribute reference a in $a.y\n",
|
||||
"$a", "error(63): A.g4:2:11: unknown attribute reference a in $a\n",
|
||||
"$a.y", "error(63): A.g4:2:11: unknown attribute reference a in $a.y\n",
|
||||
};
|
||||
|
||||
String[] initChecks = {
|
||||
|
@ -36,8 +37,8 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"$lab.e", "",
|
||||
"$ids", "",
|
||||
|
||||
"$c", "error(29): A.g:4:8: unknown attribute reference c in $c\n",
|
||||
"$a.q", "error(31): A.g:4:10: unknown attribute q for rule a in $a.q\n",
|
||||
"$c", "error(63): A.g4:5:8: unknown attribute reference c in $c\n",
|
||||
"$a.q", "error(65): A.g4:5:10: unknown attribute q for rule a in $a.q\n",
|
||||
};
|
||||
|
||||
String[] inlineChecks = {
|
||||
|
@ -58,19 +59,19 @@ public class TestAttributeChecks extends BaseTest {
|
|||
};
|
||||
|
||||
String[] bad_inlineChecks = {
|
||||
"$lab", "error(33): A.g:6:4: missing attribute access on rule reference lab in $lab\n",
|
||||
"$q", "error(29): A.g:6:4: unknown attribute reference q in $q\n",
|
||||
"$q.y", "error(29): A.g:6:4: unknown attribute reference q in $q.y\n",
|
||||
"$q = 3", "error(29): A.g:6:4: unknown attribute reference q in $q\n",
|
||||
"$q = 3;", "error(29): A.g:6:4: unknown attribute reference q in $q = 3;\n",
|
||||
"$q.y = 3;", "error(29): A.g:6:4: unknown attribute reference q in $q.y = 3;\n",
|
||||
"$q = $blort;", "error(29): A.g:6:4: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(29): A.g:6:9: unknown attribute reference blort in $blort\n",
|
||||
"$a.ick", "error(31): A.g:6:6: unknown attribute ick for rule a in $a.ick\n",
|
||||
"$a.ick = 3;", "error(31): A.g:6:6: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||
"$b.d", "error(30): A.g:6:6: cannot access rule d's parameter: $b.d\n", // can't see rule ref's arg
|
||||
"$d.text", "error(29): A.g:6:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref
|
||||
"$lab.d", "error(30): A.g:6:8: cannot access rule d's parameter: $lab.d\n",
|
||||
"$lab", "error(67): A.g4:7:4: missing attribute access on rule reference lab in $lab\n",
|
||||
"$q", "error(63): A.g4:7:4: unknown attribute reference q in $q\n",
|
||||
"$q.y", "error(63): A.g4:7:4: unknown attribute reference q in $q.y\n",
|
||||
"$q = 3", "error(63): A.g4:7:4: unknown attribute reference q in $q\n",
|
||||
"$q = 3;", "error(63): A.g4:7:4: unknown attribute reference q in $q = 3;\n",
|
||||
"$q.y = 3;", "error(63): A.g4:7:4: unknown attribute reference q in $q.y = 3;\n",
|
||||
"$q = $blort;", "error(63): A.g4:7:4: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(63): A.g4:7:9: unknown attribute reference blort in $blort\n",
|
||||
"$a.ick", "error(65): A.g4:7:6: unknown attribute ick for rule a in $a.ick\n",
|
||||
"$a.ick = 3;", "error(65): A.g4:7:6: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||
"$b.d", "error(64): A.g4:7:6: cannot access rule d's parameter: $b.d\n", // can't see rule ref's arg
|
||||
"$d.text", "error(63): A.g4:7:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref
|
||||
"$lab.d", "error(64): A.g4:7:8: cannot access rule d's parameter: $lab.d\n",
|
||||
};
|
||||
|
||||
String[] finallyChecks = {
|
||||
|
@ -84,20 +85,20 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"$id.text", "",
|
||||
"$ids", "",
|
||||
|
||||
"$lab", "error(33): A.g:9:14: missing attribute access on rule reference lab in $lab\n",
|
||||
"$q", "error(29): A.g:9:14: unknown attribute reference q in $q\n",
|
||||
"$q.y", "error(29): A.g:9:14: unknown attribute reference q in $q.y\n",
|
||||
"$q = 3", "error(29): A.g:9:14: unknown attribute reference q in $q\n",
|
||||
"$q = 3;", "error(29): A.g:9:14: unknown attribute reference q in $q = 3;\n",
|
||||
"$q.y = 3;", "error(29): A.g:9:14: unknown attribute reference q in $q.y = 3;\n",
|
||||
"$q = $blort;", "error(29): A.g:9:14: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(29): A.g:9:19: unknown attribute reference blort in $blort\n",
|
||||
"$a.ick", "error(31): A.g:9:16: unknown attribute ick for rule a in $a.ick\n",
|
||||
"$a.ick = 3;", "error(31): A.g:9:16: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||
"$b.e", "error(29): A.g:9:14: unknown attribute reference b in $b.e\n", // can't see rule refs outside alts
|
||||
"$b.d", "error(29): A.g:9:14: unknown attribute reference b in $b.d\n",
|
||||
"$c.text", "error(29): A.g:9:14: unknown attribute reference c in $c.text\n",
|
||||
"$lab.d", "error(30): A.g:9:18: cannot access rule d's parameter: $lab.d\n",
|
||||
"$lab", "error(67): A.g4:10:14: missing attribute access on rule reference lab in $lab\n",
|
||||
"$q", "error(63): A.g4:10:14: unknown attribute reference q in $q\n",
|
||||
"$q.y", "error(63): A.g4:10:14: unknown attribute reference q in $q.y\n",
|
||||
"$q = 3", "error(63): A.g4:10:14: unknown attribute reference q in $q\n",
|
||||
"$q = 3;", "error(63): A.g4:10:14: unknown attribute reference q in $q = 3;\n",
|
||||
"$q.y = 3;", "error(63): A.g4:10:14: unknown attribute reference q in $q.y = 3;\n",
|
||||
"$q = $blort;", "error(63): A.g4:10:14: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(63): A.g4:10:19: unknown attribute reference blort in $blort\n",
|
||||
"$a.ick", "error(65): A.g4:10:16: unknown attribute ick for rule a in $a.ick\n",
|
||||
"$a.ick = 3;", "error(65): A.g4:10:16: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||
"$b.e", "error(63): A.g4:10:14: unknown attribute reference b in $b.e\n", // can't see rule refs outside alts
|
||||
"$b.d", "error(63): A.g4:10:14: unknown attribute reference b in $b.d\n",
|
||||
"$c.text", "error(63): A.g4:10:14: unknown attribute reference c in $c.text\n",
|
||||
"$lab.d", "error(64): A.g4:10:18: cannot access rule d's parameter: $lab.d\n",
|
||||
};
|
||||
|
||||
String[] dynMembersChecks = {
|
||||
|
@ -105,11 +106,11 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"$S::i", "",
|
||||
"$S::i=$S::i", "",
|
||||
|
||||
"$b::f", "error(54): A.g:3:1: unknown dynamic scope: b in $b::f\n",
|
||||
"$S::j", "error(55): A.g:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j\n",
|
||||
"$S::j = 3;", "error(55): A.g:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n",
|
||||
"$S::j = $S::k;", "error(55): A.g:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" +
|
||||
"error(55): A.g:3:12: unknown dynamically-scoped attribute for scope S: k in $S::k\n",
|
||||
"$b::f", "error(54): A.g4:3:1: unknown dynamic scope: b in $b::f\n",
|
||||
"$S::j", "error(55): A.g4:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j\n",
|
||||
"$S::j = 3;", "error(55): A.g4:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n",
|
||||
"$S::j = $S::k;", "error(55): A.g4:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" +
|
||||
"error(55): A.g4:3:12: unknown dynamically-scoped attribute for scope S: k in $S::k\n",
|
||||
};
|
||||
|
||||
String[] dynInitChecks = {
|
||||
|
@ -122,10 +123,10 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"$a::z", "",
|
||||
"$S", "",
|
||||
|
||||
"$S::j", "error(55): A.g:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j\n",
|
||||
"$S::j = 3;", "error(55): A.g:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n",
|
||||
"$S::j = $S::k;", "error(55): A.g:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" +
|
||||
"error(55): A.g:8:19: unknown dynamically-scoped attribute for scope S: k in $S::k\n",
|
||||
"$S::j", "error(55): A.g4:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j\n",
|
||||
"$S::j = 3;", "error(55): A.g4:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n",
|
||||
"$S::j = $S::k;", "error(55): A.g4:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" +
|
||||
"error(55): A.g4:8:19: unknown dynamically-scoped attribute for scope S: k in $S::k\n",
|
||||
};
|
||||
|
||||
String[] dynInlineChecks = {
|
||||
|
@ -138,27 +139,27 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"$S::i=$S::i", "",
|
||||
"$a::z", "",
|
||||
|
||||
"$S::j", "error(55): A.g:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j\n",
|
||||
"$S::j = 3;", "error(55): A.g:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n",
|
||||
"$S::j = $S::k;", "error(55): A.g:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" +
|
||||
"error(55): A.g:10:15: unknown dynamically-scoped attribute for scope S: k in $S::k\n",
|
||||
"$Q[-1]::y", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[-1]::y\n",
|
||||
"$Q[-i]::y", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[-i]::y\n",
|
||||
"$Q[i]::y", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[i]::y\n",
|
||||
"$Q[0]::y", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[0]::y\n",
|
||||
"$Q[-1]::y = 23;", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[-1]::y = 23;\n",
|
||||
"$Q[-i]::y = 23;", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[-i]::y = 23;\n",
|
||||
"$Q[i]::y = 23;", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[i]::y = 23;\n",
|
||||
"$Q[0]::y = 23;", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[0]::y = 23;\n",
|
||||
"$S[-1]::y", "error(55): A.g:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-1]::y\n",
|
||||
"$S[-i]::y", "error(55): A.g:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-i]::y\n",
|
||||
"$S[i]::y", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S[i]::y\n",
|
||||
"$S[0]::y", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S[0]::y\n",
|
||||
"$S[-1]::y = 23;", "error(55): A.g:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-1]::y = 23;\n",
|
||||
"$S[-i]::y = 23;", "error(55): A.g:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-i]::y = 23;\n",
|
||||
"$S[i]::y = 23;", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S[i]::y = 23;\n",
|
||||
"$S[0]::y = 23;", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S[0]::y = 23;\n",
|
||||
"$S[$S::y]::i", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S::y\n"
|
||||
"$S::j", "error(55): A.g4:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j\n",
|
||||
"$S::j = 3;", "error(55): A.g4:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n",
|
||||
"$S::j = $S::k;", "error(55): A.g4:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" +
|
||||
"error(55): A.g4:10:15: unknown dynamically-scoped attribute for scope S: k in $S::k\n",
|
||||
"$Q[-1]::y", "error(54): A.g4:10:4: unknown dynamic scope: Q in $Q[-1]::y\n",
|
||||
"$Q[-i]::y", "error(54): A.g4:10:4: unknown dynamic scope: Q in $Q[-i]::y\n",
|
||||
"$Q[i]::y", "error(54): A.g4:10:4: unknown dynamic scope: Q in $Q[i]::y\n",
|
||||
"$Q[0]::y", "error(54): A.g4:10:4: unknown dynamic scope: Q in $Q[0]::y\n",
|
||||
"$Q[-1]::y = 23;", "error(54): A.g4:10:4: unknown dynamic scope: Q in $Q[-1]::y = 23;\n",
|
||||
"$Q[-i]::y = 23;", "error(54): A.g4:10:4: unknown dynamic scope: Q in $Q[-i]::y = 23;\n",
|
||||
"$Q[i]::y = 23;", "error(54): A.g4:10:4: unknown dynamic scope: Q in $Q[i]::y = 23;\n",
|
||||
"$Q[0]::y = 23;", "error(54): A.g4:10:4: unknown dynamic scope: Q in $Q[0]::y = 23;\n",
|
||||
"$S[-1]::y", "error(55): A.g4:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-1]::y\n",
|
||||
"$S[-i]::y", "error(55): A.g4:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-i]::y\n",
|
||||
"$S[i]::y", "error(55): A.g4:10:10: unknown dynamically-scoped attribute for scope S: y in $S[i]::y\n",
|
||||
"$S[0]::y", "error(55): A.g4:10:10: unknown dynamically-scoped attribute for scope S: y in $S[0]::y\n",
|
||||
"$S[-1]::y = 23;", "error(55): A.g4:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-1]::y = 23;\n",
|
||||
"$S[-i]::y = 23;", "error(55): A.g4:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-i]::y = 23;\n",
|
||||
"$S[i]::y = 23;", "error(55): A.g4:10:10: unknown dynamically-scoped attribute for scope S: y in $S[i]::y = 23;\n",
|
||||
"$S[0]::y = 23;", "error(55): A.g4:10:10: unknown dynamically-scoped attribute for scope S: y in $S[0]::y = 23;\n",
|
||||
"$S[$S::y]::i", "error(55): A.g4:10:10: unknown dynamically-scoped attribute for scope S: y in $S::y\n"
|
||||
};
|
||||
|
||||
String[] dynFinallyChecks = {
|
||||
|
@ -171,10 +172,10 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"$S::i=$S::i", "",
|
||||
"$a::z", "",
|
||||
|
||||
"$S::j", "error(55): A.g:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j\n",
|
||||
"$S::j = 3;", "error(55): A.g:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n",
|
||||
"$S::j = $S::k;", "error(55): A.g:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" +
|
||||
"error(55): A.g:12:25: unknown dynamically-scoped attribute for scope S: k in $S::k\n",
|
||||
"$S::j", "error(55): A.g4:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j\n",
|
||||
"$S::j = 3;", "error(55): A.g4:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n",
|
||||
"$S::j = $S::k;", "error(55): A.g4:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" +
|
||||
"error(55): A.g4:12:25: unknown dynamically-scoped attribute for scope S: k in $S::k\n",
|
||||
};
|
||||
|
||||
@Test public void testMembersActions() throws RecognitionException {
|
||||
|
@ -200,6 +201,7 @@ public class TestAttributeChecks extends BaseTest {
|
|||
@Test public void testTokenRef() throws RecognitionException {
|
||||
String grammar =
|
||||
"parser grammar S;\n" +
|
||||
"tokens{ID;}\n" +
|
||||
"a : x=ID {Token t = $x; t = $ID;} ;\n";
|
||||
String expected =
|
||||
"";
|
||||
|
|
|
@ -37,6 +37,7 @@ public class TestBasicSemanticErrors extends BaseTest {
|
|||
"parser grammar U;\n" +
|
||||
"options { foo=bar; k=\"3\";}\n" +
|
||||
"tokens {\n" +
|
||||
" ID;\n" +
|
||||
" f='fkj';\n" +
|
||||
" S = 'a';\n" +
|
||||
"}\n" +
|
||||
|
@ -50,18 +51,18 @@ public class TestBasicSemanticErrors extends BaseTest {
|
|||
"b : ( options { ick=bar; greedy=true; } : ID )+ ;\n" +
|
||||
"c : ID<blue> ID<x=y> ;",
|
||||
// YIELDS
|
||||
"warning(47): U.g:2:10: illegal option foo\n" +
|
||||
"warning(47): U.g:2:19: illegal option k\n" +
|
||||
": U.g:4:8: token names must start with an uppercase letter: f\n" +
|
||||
": U.g:4:8: can't assign string value to token name f in non-combined grammar\n" +
|
||||
": U.g:5:8: can't assign string value to token name S in non-combined grammar\n" +
|
||||
"warning(47): U.g:8:10: illegal option x\n" +
|
||||
": U.g:8:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||
": U.g:7:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||
"warning(47): U.g:11:10: illegal option blech\n" +
|
||||
"warning(47): U.g:11:21: illegal option greedy\n" +
|
||||
"warning(47): U.g:14:16: illegal option ick\n" +
|
||||
"warning(47): U.g:15:16: illegal option x\n",
|
||||
"warning(83): U.g4:2:10: illegal option foo\n" +
|
||||
"warning(83): U.g4:2:19: illegal option k\n" +
|
||||
"error(60): U.g4:5:8: token names must start with an uppercase letter: f\n" +
|
||||
"error(59): U.g4:5:8: can't assign string value to token name f in non-combined grammar\n" +
|
||||
"error(59): U.g4:6:8: can't assign string value to token name S in non-combined grammar\n" +
|
||||
"warning(83): U.g4:9:10: illegal option x\n" +
|
||||
"error(54): U.g4:9:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||
"error(54): U.g4:8:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||
"warning(83): U.g4:12:10: illegal option blech\n" +
|
||||
"warning(83): U.g4:12:21: illegal option greedy\n" +
|
||||
"warning(83): U.g4:15:16: illegal option ick\n" +
|
||||
"warning(83): U.g4:16:16: illegal option x\n",
|
||||
};
|
||||
|
||||
@Test public void testU() { super.testErrors(U, false); }
|
||||
|
|
|
@ -43,7 +43,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar S;\n" +
|
||||
"a : B . C ;\n"; // not qualified ID
|
||||
mkdir(tmpdir);
|
||||
Grammar g = new Grammar(tmpdir + "/S.g", grammar);
|
||||
Grammar g = new Grammar(tmpdir + "/S.g4", grammar);
|
||||
g.name = "S";
|
||||
|
||||
ErrorQueue equeue = new ErrorQueue();
|
||||
|
@ -61,14 +61,14 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar S;\n" +
|
||||
"a : B {System.out.println(\"S.a\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"s : a ;\n" +
|
||||
"B : 'b' ;" + // defines B from inherited token space
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "b", debug);
|
||||
assertEquals("S.a\n", found);
|
||||
}
|
||||
|
@ -78,13 +78,13 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar S;\n" +
|
||||
"a : '=' 'a' {System.out.println(\"S.a\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"s : a ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "=a", debug);
|
||||
assertEquals("S.a\n", found);
|
||||
}
|
||||
|
@ -97,14 +97,14 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar S;\n" +
|
||||
"a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"s : label=a[3] {System.out.println($label.y);} ;\n" +
|
||||
"B : 'b' ;" + // defines B from inherited token space
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "b", debug);
|
||||
assertEquals("S.a1000\n", found);
|
||||
}
|
||||
|
@ -117,14 +117,14 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar S;\n" +
|
||||
"a : B {System.out.print(\"S.a\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"s : a {System.out.println($a.text);} ;\n" +
|
||||
"B : 'b' ;" + // defines B from inherited token space
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "b", debug);
|
||||
assertEquals("S.ab\n", found);
|
||||
}
|
||||
|
@ -137,13 +137,13 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"}\n" +
|
||||
"a : B ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"grammar M;\n" + // uses no rules from the import
|
||||
"import S;\n" +
|
||||
"s : 'b' {foo();} ;\n" + // gS is import pointer
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "b", debug);
|
||||
assertEquals("foo\n", found);
|
||||
}
|
||||
|
@ -154,18 +154,18 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"a : b {System.out.println(\"S.a\");} ;\n" +
|
||||
"b : B ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String slave2 =
|
||||
"parser grammar T;\n" +
|
||||
"a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a
|
||||
writeFile(tmpdir, "T.g", slave2);
|
||||
writeFile(tmpdir, "T.g4", slave2);
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S,T;\n" +
|
||||
"s : a ;\n" +
|
||||
"B : 'b' ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "b", debug);
|
||||
assertEquals("S.a\n", found);
|
||||
}
|
||||
|
@ -176,13 +176,13 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"tokens { A; B; C; }\n" +
|
||||
"x : A {System.out.println(\"S.x\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String slave2 =
|
||||
"parser grammar T;\n" +
|
||||
"tokens { C; B; A; }\n" + // reverse order
|
||||
"y : A {System.out.println(\"T.y\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "T.g", slave2);
|
||||
writeFile(tmpdir, "T.g4", slave2);
|
||||
// The lexer will create rules to match letters a, b, c.
|
||||
// The associated token types A, B, C must have the same value
|
||||
// and all import'd parsers. Since ANTLR regenerates all imports
|
||||
|
@ -202,7 +202,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"A : 'a' ;\n" +
|
||||
"C : 'c' ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "aa", debug);
|
||||
assertEquals("S.x\n" +
|
||||
"T.y\n", found);
|
||||
|
@ -215,13 +215,13 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"tokens { A; B; C; }\n" +
|
||||
"x : A {System.out.println(\"S.x\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String slave2 =
|
||||
"parser grammar T;\n" +
|
||||
"tokens { C; B; A; }\n" + // reverse order
|
||||
"y : A {System.out.println(\"T.y\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "T.g", slave2);
|
||||
writeFile(tmpdir, "T.g4", slave2);
|
||||
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
|
@ -231,8 +231,8 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"A : 'a' ;\n" +
|
||||
"C : 'c' ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
writeFile(tmpdir, "M.g", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g", master, equeue);
|
||||
writeFile(tmpdir, "M.g4", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||
|
||||
String expectedTokenIDToTypeMap = "{EOF=-1, B=3, A=4, C=5, WS=6}";
|
||||
String expectedStringLiteralToTypeMap = "{'c'=5, 'a'=4, 'b'=3}";
|
||||
|
@ -244,7 +244,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
|
||||
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
|
||||
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "aa", debug);
|
||||
assertEquals("S.x\n" +
|
||||
"T.y\n", found);
|
||||
|
@ -260,18 +260,18 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"INT : '0'..'9'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"s : x INT ;\n";
|
||||
writeFile(tmpdir, "M.g", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g", master, equeue);
|
||||
writeFile(tmpdir, "M.g4", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||
|
||||
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
|
||||
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"s", "x 34 9", debug);
|
||||
assertEquals("S.x\n", found);
|
||||
}
|
||||
|
@ -284,15 +284,15 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"tokens { A='a'; }\n" +
|
||||
"x : A {System.out.println(\"S.x\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"s : x ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
writeFile(tmpdir, "M.g", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g", master, equeue);
|
||||
writeFile(tmpdir, "M.g4", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||
|
||||
Object expectedArg = "S";
|
||||
ErrorType expectedMsgID = ErrorType.OPTIONS_IN_DELEGATE;
|
||||
|
@ -310,15 +310,15 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar S;\n" +
|
||||
"options {toke\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"s : x ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
writeFile(tmpdir, "M.g", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g", master, equeue);
|
||||
writeFile(tmpdir, "M.g4", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||
|
||||
assertEquals(ErrorType.SYNTAX_ERROR, equeue.errors.get(0).errorType);
|
||||
}
|
||||
|
@ -329,13 +329,13 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"a : b {System.out.println(\"S.a\");} ;\n" +
|
||||
"b : B ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"b : 'b'|'c' ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"a", "c", debug);
|
||||
assertEquals("S.a\n", found);
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
" ;\n" +
|
||||
"init : '=' INT ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "JavaDecl.g", slave);
|
||||
writeFile(tmpdir, "JavaDecl.g4", slave);
|
||||
String master =
|
||||
"grammar Java;\n" +
|
||||
"import JavaDecl;\n" +
|
||||
|
@ -360,7 +360,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"INT : '0'..'9'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
// for float to work in decl, type must be overridden
|
||||
String found = execParser("Java.g", master, "JavaParser", "JavaLexer",
|
||||
String found = execParser("Java.g4", master, "JavaParser", "JavaLexer",
|
||||
"prog", "float x = 3;", debug);
|
||||
assertEquals("JavaDecl: floatx=3;\n", found);
|
||||
}
|
||||
|
@ -371,20 +371,20 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"a : b {System.out.println(\"S.a\");} ;\n" +
|
||||
"b : B ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
|
||||
String slave2 =
|
||||
"parser grammar T;\n" +
|
||||
"tokens { A='x'; }\n" +
|
||||
"b : B {System.out.println(\"T.b\");} ;\n";
|
||||
writeFile(tmpdir, "T.g", slave2);
|
||||
writeFile(tmpdir, "T.g4", slave2);
|
||||
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S, T;\n" +
|
||||
"b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"a", "c", debug);
|
||||
assertEquals("M.b\n" +
|
||||
"S.a\n", found);
|
||||
|
@ -397,7 +397,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"A : 'a' {System.out.println(\"S.A\");} ;\n" +
|
||||
"C : 'c' ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"lexer grammar M;\n" +
|
||||
"import S;\n" +
|
||||
|
@ -409,7 +409,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"[@1,1:1='b',<3>,1:1]\n" +
|
||||
"[@2,2:2='c',<6>,1:2]\n" +
|
||||
"[@3,3:2='<EOF>',<-1>,1:3]\n";
|
||||
String found = execLexer("M.g", master, "M", "abc", debug);
|
||||
String found = execLexer("M.g4", master, "M", "abc", debug);
|
||||
assertEquals(expecting, found);
|
||||
}
|
||||
|
||||
|
@ -419,13 +419,13 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"A : 'a' {System.out.println(\"S.A\");} ;\n" +
|
||||
"B : 'b' {System.out.println(\"S.B\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"lexer grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"A : 'a' B {System.out.println(\"M.A\");} ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execLexer("M.g", master, "M", "ab", debug);
|
||||
String found = execLexer("M.g4", master, "M", "ab", debug);
|
||||
assertEquals("M.A\n" +
|
||||
"[@0,0:1='ab',<3>,1:0]\n" +
|
||||
"[@1,2:1='<EOF>',<-1>,1:2]\n", found);
|
||||
|
@ -440,14 +440,14 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"lexer grammar S;\n" +
|
||||
"ID : 'a'..'z'+ ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"a : A {System.out.println(\"M.a: \"+$A);} ;\n" +
|
||||
"A : 'abc' {System.out.println(\"M.A\");} ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
String found = execParser("M.g", master, "MParser", "MLexer",
|
||||
String found = execParser("M.g4", master, "MParser", "MLexer",
|
||||
"a", "abc", debug);
|
||||
|
||||
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
|
||||
|
@ -464,20 +464,20 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar T;\n" +
|
||||
"a : T ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "T.g", slave);
|
||||
writeFile(tmpdir, "T.g4", slave);
|
||||
String slave2 =
|
||||
"parser grammar S;\n" +
|
||||
"import T;\n" +
|
||||
"a : S ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave2);
|
||||
writeFile(tmpdir, "S.g4", slave2);
|
||||
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"a : M ;\n" ;
|
||||
writeFile(tmpdir, "M.g", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g", master, equeue);
|
||||
writeFile(tmpdir, "M.g4", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||
|
||||
String expectedTokenIDToTypeMap = "{EOF=-1, M=3}"; // S and T aren't imported; overridden
|
||||
String expectedStringLiteralToTypeMap = "{}";
|
||||
|
@ -492,7 +492,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
|
||||
|
||||
boolean ok =
|
||||
rawGenerateAndBuildRecognizer("M.g", master, "MParser", null);
|
||||
rawGenerateAndBuildRecognizer("M.g4", master, "MParser", null);
|
||||
boolean expecting = true; // should be ok
|
||||
assertEquals(expecting, ok);
|
||||
}
|
||||
|
@ -501,42 +501,48 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
ErrorQueue equeue = new ErrorQueue();
|
||||
String slave =
|
||||
"parser grammar T;\n" +
|
||||
"tokens{T;}\n" +
|
||||
"x : T ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "T.g", slave);
|
||||
writeFile(tmpdir, "T.g4", slave);
|
||||
slave =
|
||||
"parser grammar S;\n" +
|
||||
"import T;\n" +
|
||||
"tokens{S;}\n" +
|
||||
"y : S ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
|
||||
slave =
|
||||
"parser grammar C;\n" +
|
||||
"tokens{C;}\n" +
|
||||
"i : C ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "C.g", slave);
|
||||
writeFile(tmpdir, "C.g4", slave);
|
||||
slave =
|
||||
"parser grammar B;\n" +
|
||||
"tokens{B;}\n" +
|
||||
"j : B ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "B.g", slave);
|
||||
writeFile(tmpdir, "B.g4", slave);
|
||||
slave =
|
||||
"parser grammar A;\n" +
|
||||
"import B,C;\n" +
|
||||
"tokens{A;}\n" +
|
||||
"k : A ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "A.g", slave);
|
||||
writeFile(tmpdir, "A.g4", slave);
|
||||
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S,A;\n" +
|
||||
"tokens{M;}\n" +
|
||||
"a : M ;\n" ;
|
||||
writeFile(tmpdir, "M.g", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g", master, equeue);
|
||||
writeFile(tmpdir, "M.g4", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||
|
||||
assertEquals(equeue.errors.toString(), "[]");
|
||||
assertEquals(equeue.warnings.toString(), "[]");
|
||||
assertEquals("[]", equeue.errors.toString());
|
||||
assertEquals("[]", equeue.warnings.toString());
|
||||
String expectedTokenIDToTypeMap = "{EOF=-1, M=3, S=4, T=5, A=6, B=7, C=8}";
|
||||
String expectedStringLiteralToTypeMap = "{}";
|
||||
String expectedTypeToTokenList = "[M, S, T, A, B, C]";
|
||||
|
@ -548,7 +554,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
realElements(g.typeToTokenList).toString());
|
||||
|
||||
boolean ok =
|
||||
rawGenerateAndBuildRecognizer("M.g", master, "MParser", null);
|
||||
rawGenerateAndBuildRecognizer("M.g4", master, "MParser", null);
|
||||
boolean expecting = true; // should be ok
|
||||
assertEquals(expecting, ok);
|
||||
}
|
||||
|
@ -559,20 +565,20 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar T;\n" +
|
||||
"x : T ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "T.g", slave);
|
||||
writeFile(tmpdir, "T.g4", slave);
|
||||
String slave2 =
|
||||
"parser grammar S;\n" + // A, B, C token type order
|
||||
"import T;\n" +
|
||||
"a : S ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave2);
|
||||
writeFile(tmpdir, "S.g4", slave2);
|
||||
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
"a : M x ;\n" ; // x MUST BE VISIBLE TO M
|
||||
writeFile(tmpdir, "M.g", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g", master, equeue);
|
||||
writeFile(tmpdir, "M.g4", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||
|
||||
String expectedTokenIDToTypeMap = "{EOF=-1, M=3, T=4}";
|
||||
String expectedStringLiteralToTypeMap = "{}";
|
||||
|
@ -597,29 +603,29 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"T3: '3';\n" +
|
||||
"T4: '4';\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "L.g", gstr);
|
||||
writeFile(tmpdir, "L.g4", gstr);
|
||||
gstr =
|
||||
"parser grammar G1;\n" +
|
||||
"s: a | b;\n" +
|
||||
"a: T1;\n" +
|
||||
"b: T2;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "G1.g", gstr);
|
||||
writeFile(tmpdir, "G1.g4", gstr);
|
||||
|
||||
gstr =
|
||||
"parser grammar G2;\n" +
|
||||
"import G1;\n" +
|
||||
"a: T3;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "G2.g", gstr);
|
||||
writeFile(tmpdir, "G2.g4", gstr);
|
||||
String G3str =
|
||||
"grammar G3;\n" +
|
||||
"import G2;\n" +
|
||||
"b: T4;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "G3.g", G3str);
|
||||
writeFile(tmpdir, "G3.g4", G3str);
|
||||
|
||||
Grammar g = new Grammar(tmpdir+"/G3.g", G3str, equeue);
|
||||
Grammar g = new Grammar(tmpdir+"/G3.g4", G3str, equeue);
|
||||
|
||||
String expectedTokenIDToTypeMap = "{EOF=-1, T4=3, T3=4}";
|
||||
String expectedStringLiteralToTypeMap = "{}";
|
||||
|
@ -634,7 +640,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
|
||||
|
||||
boolean ok =
|
||||
rawGenerateAndBuildRecognizer("G3.g", G3str, "G3Parser", null);
|
||||
rawGenerateAndBuildRecognizer("G3.g4", G3str, "G3Parser", null);
|
||||
boolean expecting = true; // should be ok
|
||||
assertEquals(expecting, ok);
|
||||
}
|
||||
|
@ -644,7 +650,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"parser grammar S;\n" +
|
||||
"a : B {System.out.print(\"S.a\");} ;\n";
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g", slave);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S;\n" +
|
||||
|
@ -653,7 +659,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"s : a ;\n" +
|
||||
"B : 'b' ;" + // defines B from inherited token space
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
boolean ok = antlr("M.g", "M.g", master);
|
||||
boolean ok = antlr("M.g4", "M.g4", master, false);
|
||||
boolean expecting = true; // should be ok
|
||||
assertEquals(expecting, ok);
|
||||
}
|
||||
|
|
|
@ -49,13 +49,13 @@ public class TestFullContextParsing extends BaseTest {
|
|||
" : ID | ID {;} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n"+
|
||||
"WS : (' '|'\\t'|'\\n')+ {skip();} ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"abc", true);
|
||||
String expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-ID->:s1=>1\n"; // not ctx sensitive
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:0 reportAmbiguity d=0: ambigAlts={1..2}:[(1,1,[]), (1,2,[])],conflictingAlts={1..2}, input='abc'\n",
|
||||
assertEquals("line 1:0 reportAmbiguity d=0: ambigAlts={1..2}, input='abc'\n",
|
||||
this.stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -70,26 +70,26 @@ public class TestFullContextParsing extends BaseTest {
|
|||
"ID : 'a'..'z'+ ;\n"+
|
||||
"INT : '0'..'9'+ ;\n"+
|
||||
"WS : (' '|'\\t'|'\\n')+ {skip();} ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"$ 34 abc", true);
|
||||
String expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-INT->s1\n" +
|
||||
"s1-ID->s2^\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=1: [(28,1,[18 10]), (20,2,[10])], input='34abc'\n" +
|
||||
"line 1:2 reportContextSensitivity d=1: [(20,1,[10])],uniqueAlt=1, input='34'\n",
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=1, input='34abc'\n" +
|
||||
"line 1:2 reportContextSensitivity d=1, input='34'\n",
|
||||
this.stderrDuringParse);
|
||||
|
||||
result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"@ 34 abc", true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-INT->s1\n" +
|
||||
"s1-ID->s2^\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=1: [(28,1,[22 14]), (24,2,[14])], input='34abc'\n" +
|
||||
"line 1:5 reportContextSensitivity d=1: [(1,2,[])],uniqueAlt=2, input='34abc'\n",
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=1, input='34abc'\n" +
|
||||
"line 1:5 reportContextSensitivity d=1, input='34abc'\n",
|
||||
this.stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -104,22 +104,17 @@ public class TestFullContextParsing extends BaseTest {
|
|||
"ID : 'a'..'z'+ ;\n"+
|
||||
"INT : '0'..'9'+ ;\n"+
|
||||
"WS : (' '|'\\t'|'\\n')+ {skip();} ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"$ 34 abc @ 34 abc", true);
|
||||
String expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-EOF->:s3=>2\n" +
|
||||
"s0-'@'->:s2=>1\n" +
|
||||
"s0-'$'->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 2:\n" +
|
||||
"s0-INT->s1\n" +
|
||||
"s1-ID->s2^\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=2: [(30,1,[20 10]), (22,2,[10])], input='34abc'\n" +
|
||||
"line 1:2 reportContextSensitivity d=2: [(22,1,[10])],uniqueAlt=1, input='34'\n" +
|
||||
"line 1:14 reportAttemptingFullContext d=2: [(30,1,[24 14]), (26,2,[14])], input='34abc'\n" +
|
||||
"line 1:14 reportContextSensitivity d=2: [(8,2,[18]), (12,2,[18]), (1,2,[])],uniqueAlt=2, input='34abc'\n",
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=2, input='34abc'\n" +
|
||||
"line 1:2 reportContextSensitivity d=2, input='34'\n" +
|
||||
"line 1:14 reportAttemptingFullContext d=2, input='34abc'\n" +
|
||||
"line 1:14 reportContextSensitivity d=2, input='34abc'\n",
|
||||
this.stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -136,13 +131,9 @@ public class TestFullContextParsing extends BaseTest {
|
|||
"ID : 'a'..'z'+ ;\n"+
|
||||
"WS : (' '|'\\t'|'\\n')+ {skip();} ;\n";
|
||||
String input = "{ if x then return }";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
String expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-'if'->:s1=>1\n" +
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'}'->:s1=>2\n";
|
||||
assertEquals(expecting, result);
|
||||
|
@ -150,30 +141,23 @@ public class TestFullContextParsing extends BaseTest {
|
|||
|
||||
input =
|
||||
"{ if x then if y then return else foo }";
|
||||
result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-'if'->:s1=>1\n" +
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->:s1=>1\n" +
|
||||
"s0-'else'->s1^\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:29 reportAmbiguity d=1: ambigAlts={1..2}:[(25,1,[]), (25,2,[],up=1)],conflictingAlts={1..2},dipsIntoOuterContext, input='else'\n",
|
||||
assertEquals("line 1:29 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
"line 1:38 reportAmbiguity d=1: ambigAlts={1..2}, input='elsefoo}'\n",
|
||||
this.stderrDuringParse);
|
||||
|
||||
input = "{ if x then return else foo }";
|
||||
result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-'if'->:s1=>1\n" +
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->:s1=>1\n";
|
||||
"s0-'else'->s1^\n";
|
||||
assertEquals(expecting, result);
|
||||
// Technically, this input sequence is not ambiguous because else
|
||||
// uniquely predicts going into the optional subrule. else cannot
|
||||
|
@ -181,55 +165,51 @@ public class TestFullContextParsing extends BaseTest {
|
|||
// the start of a stat. But, we are using the theory that
|
||||
// SLL(1)=LL(1) and so we are avoiding full context parsing
|
||||
// by declaring all else clause parsing to be ambiguous.
|
||||
assertEquals("line 1:19 reportAmbiguity d=1: ambigAlts={1..2}:[(25,1,[]), (25,2,[],up=1)],conflictingAlts={1..2},dipsIntoOuterContext, input='else'\n",
|
||||
assertEquals("line 1:19 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
"line 1:19 reportContextSensitivity d=1, input='else'\n",
|
||||
this.stderrDuringParse);
|
||||
|
||||
input = "{ if x then return else foo }";
|
||||
result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-'if'->:s1=>1\n" +
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->:s1=>1\n";
|
||||
"s0-'else'->s1^\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:19 reportAmbiguity d=1: ambigAlts={1..2}:[(25,1,[]), (25,2,[],up=1)],conflictingAlts={1..2},dipsIntoOuterContext, input='else'\n",
|
||||
assertEquals("line 1:19 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
"line 1:19 reportContextSensitivity d=1, input='else'\n",
|
||||
this.stderrDuringParse);
|
||||
|
||||
input =
|
||||
"{ if x then return else foo\n" +
|
||||
"if x then if y then return else foo }";
|
||||
result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-'if'->:s1=>1\n" +
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->:s1=>1\n" +
|
||||
"s0-'else'->s1^\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:19 reportAmbiguity d=1: ambigAlts={1..2}:[(25,1,[]), (25,2,[],up=1)],conflictingAlts={1..2},dipsIntoOuterContext, input='else'\n",
|
||||
assertEquals("line 1:19 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
"line 1:19 reportContextSensitivity d=1, input='else'\n" +
|
||||
"line 2:27 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
"line 2:36 reportAmbiguity d=1: ambigAlts={1..2}, input='elsefoo}'\n",
|
||||
this.stderrDuringParse);
|
||||
|
||||
input =
|
||||
"{ if x then return else foo\n" +
|
||||
"if x then if y then return else foo }";
|
||||
result = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-'if'->:s1=>1\n" +
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->:s1=>1\n" +
|
||||
"s0-'else'->s1^\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:19 reportAmbiguity d=1: ambigAlts={1..2}:[(25,1,[]), (25,2,[],up=1)],conflictingAlts={1..2},dipsIntoOuterContext, input='else'\n",
|
||||
assertEquals("line 1:19 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
"line 1:19 reportContextSensitivity d=1, input='else'\n" +
|
||||
"line 2:27 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
"line 2:36 reportAmbiguity d=1: ambigAlts={1..2}, input='elsefoo}'\n",
|
||||
this.stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -255,14 +235,14 @@ public class TestFullContextParsing extends BaseTest {
|
|||
"ID : [a-z]+ ;\n" +
|
||||
"";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "prog", "a(i)<-x", true);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a(i)<-x", true);
|
||||
assertEquals("pass.\n", found);
|
||||
|
||||
String expecting =
|
||||
"line 1:4 reportAttemptingFullContext d=1: [(35,1,[27 15 8]), (41,1,[27 15 8]), (49,1,[27 15 8]), (35,2,[27 21 8]), (41,2,[27 21 8]), (49,2,[27 21 8])], input='a(i)<-'\n" +
|
||||
"line 1:7 reportContextSensitivity d=1: [(53,2,[])],uniqueAlt=2, input='a(i)<-x'\n" +
|
||||
"line 1:3 reportAttemptingFullContext d=3: [(35,1,[27 21 8]), (41,2,[27 21 8]), (49,3,[27 21 8])], input='a(i)'\n" +
|
||||
"line 1:7 reportAmbiguity d=3: ambigAlts={2..3}:[(53,2,[]), (53,3,[])],conflictingAlts={2..3}, input='a(i)<-x'\n";
|
||||
"line 1:4 reportAttemptingFullContext d=1, input='a(i)<-'\n" +
|
||||
"line 1:7 reportContextSensitivity d=1, input='a(i)<-x'\n" +
|
||||
"line 1:3 reportAttemptingFullContext d=3, input='a(i)'\n" +
|
||||
"line 1:7 reportAmbiguity d=3: ambigAlts={2..3}, input='a(i)<-x'\n";
|
||||
assertEquals(expecting, this.stderrDuringParse);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,17 +15,17 @@ public class TestLeftRecursion extends BaseTest {
|
|||
" ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"s", "x", debug);
|
||||
String expecting = "(s (a x))\n";
|
||||
assertEquals(expecting, found);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"s", "x y", debug);
|
||||
expecting = "(s (a (a x) y))\n";
|
||||
assertEquals(expecting, found);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"s", "x y z", debug);
|
||||
expecting = "(s (a (a (a x) y) z))\n";
|
||||
assertEquals(expecting, found);
|
||||
|
@ -40,7 +40,7 @@ public class TestLeftRecursion extends BaseTest {
|
|||
" ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"s", "x y z", debug);
|
||||
String expecting = "(s (a (a (a x) y) z))\n";
|
||||
assertEquals(expecting, found);
|
||||
|
@ -228,6 +228,24 @@ public class TestLeftRecursion extends BaseTest {
|
|||
runTests(grammar, tests, "s");
|
||||
}
|
||||
|
||||
@Test public void testLabelsOnOpSubrule() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" +
|
||||
"e : a=e op=('*'|'/') b=e {}\n" +
|
||||
" | INT {}\n" +
|
||||
" | '(' x=e ')' {}\n" +
|
||||
" ;\n" +
|
||||
"INT : '0'..'9'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
String[] tests = {
|
||||
"4", "(s (e 4))",
|
||||
"1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))",
|
||||
"(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))",
|
||||
};
|
||||
runTests(grammar, tests, "s");
|
||||
}
|
||||
|
||||
@Test public void testReturnValueAndActionsAndLabels() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
|
@ -276,8 +294,69 @@ public class TestLeftRecursion extends BaseTest {
|
|||
runTests(grammar, tests, "s");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAmbigLR() throws Exception {
|
||||
String grammar =
|
||||
"// START: g\n" +
|
||||
"grammar Expr;\n" +
|
||||
"// END: g\n" +
|
||||
"\n" +
|
||||
"// START:stat\n" +
|
||||
"prog: stat ;\n" +
|
||||
"\n" +
|
||||
"stat: expr NEWLINE -> printExpr\n" +
|
||||
" | ID '=' expr NEWLINE -> assign\n" +
|
||||
" | NEWLINE -> blank\n" +
|
||||
" ;\n" +
|
||||
"// END:stat\n" +
|
||||
"\n" +
|
||||
"// START:expr\n" +
|
||||
"expr: expr ('*'|'/') expr -> MulDiv\n" +
|
||||
" | expr ('+'|'-') expr -> AddSub\n" +
|
||||
" | INT -> int\n" +
|
||||
" | ID -> id\n" +
|
||||
" | '(' expr ')' -> parens\n" +
|
||||
" ;\n" +
|
||||
"// END:expr\n" +
|
||||
"\n" +
|
||||
"// show marginal cost of adding a clear/wipe command for memory\n" +
|
||||
"\n" +
|
||||
"// START:tokens\n" +
|
||||
"MUL : '*' ; // assigns token name to '*' used above in grammar\n" +
|
||||
"DIV : '/' ;\n" +
|
||||
"ADD : '+' ;\n" +
|
||||
"SUB : '-' ;\n" +
|
||||
"ID : [a-zA-Z]+ ; // match identifiers\n" +
|
||||
"INT : [0-9]+ ; // match integers\n" +
|
||||
"NEWLINE:'\\r'? '\\n' ; // return newlines to parser (is end-statement signal)\n" +
|
||||
"WS : [ \\t]+ -> skip ; // toss out whitespace\n" +
|
||||
"// END:tokens\n";
|
||||
String result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "1\n", true);
|
||||
assertNull(stderrDuringParse);
|
||||
|
||||
result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "a = 5\n", true);
|
||||
assertNull(stderrDuringParse);
|
||||
|
||||
result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "b = 6\n", true);
|
||||
assertNull(stderrDuringParse);
|
||||
|
||||
result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "a+b*2\n", true);
|
||||
assertEquals("line 1:1 reportAttemptingFullContext d=3, input='+'\n" +
|
||||
"line 1:1 reportContextSensitivity d=3, input='+'\n" +
|
||||
"line 1:3 reportAttemptingFullContext d=3, input='*'\n" +
|
||||
"line 1:3 reportAmbiguity d=3: ambigAlts={1..2}, input='*'\n",
|
||||
stderrDuringParse);
|
||||
|
||||
result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "(1+2)*3\n", true);
|
||||
assertEquals("line 1:2 reportAttemptingFullContext d=3, input='+'\n" +
|
||||
"line 1:2 reportContextSensitivity d=3, input='+'\n" +
|
||||
"line 1:5 reportAttemptingFullContext d=3, input='*'\n" +
|
||||
"line 1:5 reportContextSensitivity d=3, input='*'\n",
|
||||
stderrDuringParse);
|
||||
}
|
||||
|
||||
public void runTests(String grammar, String[] tests, String startRule) {
|
||||
rawGenerateAndBuildRecognizer("T.g", grammar, "TParser", "TLexer");
|
||||
rawGenerateAndBuildRecognizer("T.g4", grammar, "TParser", "TLexer");
|
||||
writeRecognizerAndCompile("TParser",
|
||||
"TLexer",
|
||||
startRule,
|
||||
|
|
|
@ -37,7 +37,7 @@ public class TestLexerErrors extends BaseTest {
|
|||
String grammar =
|
||||
"lexer grammar L;\n" +
|
||||
"A : 'a' 'b' ;\n";
|
||||
String tokens = execLexer("L.g", grammar, "L", "x");
|
||||
String tokens = execLexer("L.g4", grammar, "L", "x");
|
||||
String expectingTokens =
|
||||
"[@0,1:0='<EOF>',<-1>,1:1]\n";
|
||||
assertEquals(expectingTokens, tokens);
|
||||
|
@ -50,7 +50,7 @@ public class TestLexerErrors extends BaseTest {
|
|||
String grammar =
|
||||
"lexer grammar L;\n" +
|
||||
"A : 'a' 'b' ;\n";
|
||||
String tokens = execLexer("L.g", grammar, "L", "abx");
|
||||
String tokens = execLexer("L.g4", grammar, "L", "abx");
|
||||
String expectingTokens =
|
||||
"[@0,0:1='ab',<3>,1:0]\n" +
|
||||
"[@1,3:2='<EOF>',<-1>,1:3]\n";
|
||||
|
@ -64,7 +64,7 @@ public class TestLexerErrors extends BaseTest {
|
|||
String grammar =
|
||||
"lexer grammar L;\n" +
|
||||
"A : 'a' 'b' ;\n";
|
||||
String tokens = execLexer("L.g", grammar, "L", "ax");
|
||||
String tokens = execLexer("L.g4", grammar, "L", "ax");
|
||||
String expectingTokens =
|
||||
"[@0,2:1='<EOF>',<-1>,1:2]\n";
|
||||
assertEquals(expectingTokens, tokens);
|
||||
|
@ -77,7 +77,7 @@ public class TestLexerErrors extends BaseTest {
|
|||
String grammar =
|
||||
"lexer grammar L;\n" +
|
||||
"A : 'a' 'b' ;\n";
|
||||
String tokens = execLexer("L.g", grammar, "L", "abax");
|
||||
String tokens = execLexer("L.g4", grammar, "L", "abax");
|
||||
String expectingTokens =
|
||||
"[@0,0:1='ab',<3>,1:0]\n" +
|
||||
"[@1,4:3='<EOF>',<-1>,1:4]\n";
|
||||
|
@ -95,7 +95,7 @@ public class TestLexerErrors extends BaseTest {
|
|||
// The first ab caches the DFA then abx goes through the DFA but
|
||||
// into the ATN for the x, which fails. Must go back into DFA
|
||||
// and return to previous dfa accept state
|
||||
String tokens = execLexer("L.g", grammar, "L", "ababx");
|
||||
String tokens = execLexer("L.g4", grammar, "L", "ababx");
|
||||
String expectingTokens =
|
||||
"[@0,0:1='ab',<3>,1:0]\n" +
|
||||
"[@1,2:3='ab',<3>,1:2]\n" +
|
||||
|
@ -116,7 +116,7 @@ public class TestLexerErrors extends BaseTest {
|
|||
// into the ATN for the c. It marks that hasn't except state
|
||||
// and then keeps going in the ATN. It fails on the x, but
|
||||
// uses the previous accepted in the ATN not DFA
|
||||
String tokens = execLexer("L.g", grammar, "L", "ababcx");
|
||||
String tokens = execLexer("L.g4", grammar, "L", "ababcx");
|
||||
String expectingTokens =
|
||||
"[@0,0:1='ab',<3>,1:0]\n" +
|
||||
"[@1,2:4='abc',<4>,1:2]\n" +
|
||||
|
@ -131,7 +131,7 @@ public class TestLexerErrors extends BaseTest {
|
|||
String grammar =
|
||||
"lexer grammar L;\n" +
|
||||
"A : 'abc' ;\n";
|
||||
String tokens = execLexer("L.g", grammar, "L", "abx");
|
||||
String tokens = execLexer("L.g4", grammar, "L", "abx");
|
||||
String expectingTokens =
|
||||
"[@0,3:2='<EOF>',<-1>,1:3]\n";
|
||||
assertEquals(expectingTokens, tokens);
|
||||
|
@ -142,4 +142,29 @@ public class TestLexerErrors extends BaseTest {
|
|||
|
||||
// TEST RECOVERY
|
||||
|
||||
/**
|
||||
* This is a regression test for #45 "NullPointerException in LexerATNSimulator.execDFA".
|
||||
* https://github.com/antlr/antlr4/issues/46
|
||||
*/
|
||||
@Test
|
||||
public void testLexerExecDFA() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"start : ID ':' expr;\n" +
|
||||
"expr : primary expr? {} | expr '->' ID;\n" +
|
||||
"primary : ID;\n" +
|
||||
"ID : [a-z]+;\n" +
|
||||
"\n";
|
||||
String result = execLexer("T.g4", grammar, "TLexer", "x : x", false);
|
||||
String expecting =
|
||||
"[@0,0:0='x',<5>,1:0]\n" +
|
||||
"[@1,2:2=':',<4>,1:2]\n" +
|
||||
"[@2,4:4='x',<5>,1:4]\n" +
|
||||
"[@3,5:4='<EOF>',<-1>,1:5]\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:1 token recognition error at: ' '\n" +
|
||||
"line 1:3 token recognition error at: ' '\n",
|
||||
this.stderrDuringParse);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ public class TestLexerExec extends BaseTest {
|
|||
String grammar =
|
||||
"lexer grammar L;\n"+
|
||||
"QUOTE : '\"' ;\n"; // make sure this compiles
|
||||
String found = execLexer("L.g", grammar, "L", "\"");
|
||||
String found = execLexer("L.g4", grammar, "L", "\"");
|
||||
String expecting =
|
||||
"[@0,0:0='\"',<3>,1:0]\n" +
|
||||
"[@1,1:0='<EOF>',<-1>,1:1]\n";
|
||||
|
@ -20,7 +20,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"A : '-' I ;\n" +
|
||||
"I : '0'..'9'+ ;\n"+
|
||||
"WS : (' '|'\\n') {skip();} ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34 -21 3");
|
||||
String found = execLexer("L.g4", grammar, "L", "34 -21 3");
|
||||
String expecting =
|
||||
"[@0,0:1='34',<4>,1:0]\n" +
|
||||
"[@1,3:5='-21',<3>,1:3]\n" +
|
||||
|
@ -34,7 +34,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+
|
||||
"WS : (' '|'\\n') {skip();} ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34 34");
|
||||
String found = execLexer("L.g4", grammar, "L", "34 34");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"I\n" +
|
||||
|
@ -49,7 +49,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+
|
||||
"WS : (' '|'\\n') -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34 34");
|
||||
String found = execLexer("L.g4", grammar, "L", "34 34");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"I\n" +
|
||||
|
@ -64,7 +64,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+
|
||||
"WS : '#' -> more ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34#10");
|
||||
String found = execLexer("L.g4", grammar, "L", "34#10");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"I\n" +
|
||||
|
@ -79,7 +79,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+
|
||||
"HASH : '#' -> type(HASH) ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34#");
|
||||
String found = execLexer("L.g4", grammar, "L", "34#");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"[@0,0:1='34',<3>,1:0]\n" +
|
||||
|
@ -93,7 +93,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+
|
||||
"HASH : '#' -> type(HASH), skip, more ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34#11");
|
||||
String found = execLexer("L.g4", grammar, "L", "34#11");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"I\n" +
|
||||
|
@ -111,7 +111,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"mode STRING_MODE;\n"+
|
||||
"STRING : '\"' {popMode();} ;\n"+
|
||||
"ANY : . {more();} ;\n";
|
||||
String found = execLexer("L.g", grammar, "L", "\"abc\" \"ab\"");
|
||||
String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\"");
|
||||
String expecting =
|
||||
"[@0,0:4='\"abc\"',<5>,1:0]\n" +
|
||||
"[@1,6:9='\"ab\"',<5>,1:6]\n" +
|
||||
|
@ -127,7 +127,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"mode STRING_MODE;\n"+
|
||||
"STRING : '\"' -> popMode ;\n"+
|
||||
"ANY : . -> more ;\n";
|
||||
String found = execLexer("L.g", grammar, "L", "\"abc\" \"ab\"");
|
||||
String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\"");
|
||||
String expecting =
|
||||
"[@0,0:4='\"abc\"',<5>,1:0]\n" +
|
||||
"[@1,6:9='\"ab\"',<5>,1:6]\n" +
|
||||
|
@ -143,7 +143,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"mode STRING_MODE;\n"+
|
||||
"STRING : '\"' -> mode(DEFAULT_MODE) ;\n"+
|
||||
"ANY : . -> more ;\n";
|
||||
String found = execLexer("L.g", grammar, "L", "\"abc\" \"ab\"");
|
||||
String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\"");
|
||||
String expecting =
|
||||
"[@0,0:4='\"abc\"',<5>,1:0]\n" +
|
||||
"[@1,6:9='\"ab\"',<5>,1:6]\n" +
|
||||
|
@ -157,7 +157,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"KEND : 'end' ;\n" + // has priority
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\n')+ ;";
|
||||
String found = execLexer("L.g", grammar, "L", "end eend ending a");
|
||||
String found = execLexer("L.g4", grammar, "L", "end eend ending a");
|
||||
String expecting =
|
||||
"[@0,0:2='end',<3>,1:0]\n" +
|
||||
"[@1,3:3=' ',<5>,1:3]\n" +
|
||||
|
@ -180,7 +180,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"ID : 'a'..'z'+ ;\n" +
|
||||
"fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;\n" +
|
||||
"WS : (' '|'\n')+ ;";
|
||||
String found = execLexer("L.g", grammar, "L", "x 0 1 a.b a.l");
|
||||
String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l");
|
||||
String expecting =
|
||||
"[@0,0:0='x',<7>,1:0]\n" +
|
||||
"[@1,1:1=' ',<8>,1:1]\n" +
|
||||
|
@ -205,7 +205,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n" +
|
||||
"DONE : EOF ;\n" +
|
||||
"A : 'a';\n";
|
||||
String found = execLexer("L.g", grammar, "L", "");
|
||||
String found = execLexer("L.g4", grammar, "L", "");
|
||||
String expecting =
|
||||
"[@0,0:-1='<EOF>',<3>,1:0]\n" +
|
||||
"[@1,0:-1='<EOF>',<-1>,1:0]\n";
|
||||
|
@ -218,12 +218,12 @@ public class TestLexerExec extends BaseTest {
|
|||
"A : 'a' EOF ;\n"+
|
||||
"B : 'a';\n"+
|
||||
"C : 'c';\n";
|
||||
String found = execLexer("L.g", grammar, "L", "");
|
||||
String found = execLexer("L.g4", grammar, "L", "");
|
||||
String expecting =
|
||||
"[@0,0:-1='<EOF>',<-1>,1:0]\n";
|
||||
assertEquals(expecting, found);
|
||||
|
||||
found = execLexer("L.g", grammar, "L", "a");
|
||||
found = execLexer("L.g4", grammar, "L", "a");
|
||||
expecting =
|
||||
"[@0,0:0='a',<3>,1:0]\n" +
|
||||
"[@1,1:0='<EOF>',<-1>,1:1]\n";
|
||||
|
@ -235,7 +235,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+
|
||||
"WS : [ \\n\\u000D] -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34\r\n 34");
|
||||
String found = execLexer("L.g4", grammar, "L", "34\r\n 34");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"I\n" +
|
||||
|
@ -250,7 +250,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+
|
||||
"WS : [ \\n\\u000D]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34\r\n 34");
|
||||
String found = execLexer("L.g4", grammar, "L", "34\r\n 34");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"I\n" +
|
||||
|
@ -265,7 +265,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : ~[ab \n] ~[ \ncd]* {System.out.println(\"I\");} ;\n"+
|
||||
"WS : [ \\n\\u000D]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "xaf");
|
||||
String found = execLexer("L.g4", grammar, "L", "xaf");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"[@0,0:2='xaf',<3>,1:0]\n" +
|
||||
|
@ -278,7 +278,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : (~[ab \n]|'a') {System.out.println(\"I\");} ;\n"+
|
||||
"WS : [ \\n\\u000D]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "a x");
|
||||
String found = execLexer("L.g4", grammar, "L", "a x");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"I\n" +
|
||||
|
@ -294,7 +294,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"I : [0-9]+ {System.out.println(\"I\");} ;\n"+
|
||||
"ID : [a-zA-Z] [a-zA-Z0-9]* {System.out.println(\"ID\");} ;\n"+
|
||||
"WS : [ \\n\\u0009\r]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34\r 34 a2 abc \n ");
|
||||
String found = execLexer("L.g4", grammar, "L", "34\r 34 a2 abc \n ");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"I\n" +
|
||||
|
@ -313,7 +313,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : [0-]+ {System.out.println(\"I\");} ;\n"+
|
||||
"WS : [ \\n\\u000D]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "00\r\n");
|
||||
String found = execLexer("L.g4", grammar, "L", "00\r\n");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"[@0,0:1='00',<3>,1:0]\n" +
|
||||
|
@ -326,7 +326,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"I : [0-9]+ {System.out.println(\"I\");} ;\n"+
|
||||
"WS : [ \\u]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "34 ");
|
||||
String found = execLexer("L.g4", grammar, "L", "34 ");
|
||||
String expecting =
|
||||
"I\n" +
|
||||
"[@0,0:1='34',<3>,1:0]\n" +
|
||||
|
@ -339,7 +339,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"DASHBRACK : [\\-\\]]+ {System.out.println(\"DASHBRACK\");} ;\n"+
|
||||
"WS : [ \\u]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "- ] ");
|
||||
String found = execLexer("L.g4", grammar, "L", "- ] ");
|
||||
String expecting =
|
||||
"DASHBRACK\n" +
|
||||
"DASHBRACK\n" +
|
||||
|
@ -354,7 +354,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"A : [z-a9]+ {System.out.println(\"A\");} ;\n"+
|
||||
"WS : [ \\u]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "9");
|
||||
String found = execLexer("L.g4", grammar, "L", "9");
|
||||
String expecting =
|
||||
"A\n" +
|
||||
"[@0,0:0='9',<3>,1:0]\n" +
|
||||
|
@ -367,7 +367,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"A : [\"a-z]+ {System.out.println(\"A\");} ;\n"+
|
||||
"WS : [ \n\t]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "b\"a");
|
||||
String found = execLexer("L.g4", grammar, "L", "b\"a");
|
||||
String expecting =
|
||||
"A\n" +
|
||||
"[@0,0:2='b\"a',<3>,1:0]\n" +
|
||||
|
@ -380,7 +380,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"lexer grammar L;\n"+
|
||||
"A : [\"\\\\ab]+ {System.out.println(\"A\");} ;\n"+
|
||||
"WS : [ \n\t]+ -> skip ;";
|
||||
String found = execLexer("L.g", grammar, "L", "b\"\\a");
|
||||
String found = execLexer("L.g4", grammar, "L", "b\"\\a");
|
||||
String expecting =
|
||||
"A\n" +
|
||||
"[@0,0:3='b\"\\a',<3>,1:0]\n" +
|
||||
|
|
|
@ -28,7 +28,7 @@ public class TestListeners extends BaseTest {
|
|||
"INT : [0-9]+ ;\n" +
|
||||
"ID : [a-z]+ ;\n" +
|
||||
"WS : [ \\t\\n]+ -> skip ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1 2", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false);
|
||||
String expecting = "(a 1 2)\n" +
|
||||
"1\n" +
|
||||
"2\n";
|
||||
|
@ -61,13 +61,13 @@ public class TestListeners extends BaseTest {
|
|||
"INT : [0-9]+ ;\n" +
|
||||
"ID : [a-z]+ ;\n" +
|
||||
"WS : [ \\t\\n]+ -> skip ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1 2", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false);
|
||||
String expecting =
|
||||
"(a 1 2)\n" +
|
||||
"1 2 [1, 2]\n";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = execParser("T.g", grammar, "TParser", "TLexer", "s", "abc", false);
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false);
|
||||
expecting = "(a abc)\n" +
|
||||
"[@0,0:2='abc',<6>,1:0]\n";
|
||||
assertEquals(expecting, result);
|
||||
|
@ -103,12 +103,12 @@ public class TestListeners extends BaseTest {
|
|||
"INT : [0-9]+ ;\n" +
|
||||
"ID : [a-z]+ ;\n" +
|
||||
"WS : [ \\t\\n]+ -> skip ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1 2", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false);
|
||||
String expecting = "(a (b 1) (b 2))\n" +
|
||||
"1 2 1\n";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = execParser("T.g", grammar, "TParser", "TLexer", "s", "abc", false);
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false);
|
||||
expecting = "(a (b abc))\n" +
|
||||
"abc\n";
|
||||
assertEquals(expecting, result);
|
||||
|
@ -145,7 +145,7 @@ public class TestListeners extends BaseTest {
|
|||
"ADD : '+' ;\n" +
|
||||
"INT : [0-9]+ ;\n" +
|
||||
"WS : [ \\t\\n]+ -> skip ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1+2*3", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1+2*3", false);
|
||||
String expecting =
|
||||
"(e (e 1) + (e (e 2) * (e 3)))\n" +
|
||||
"1\n" +
|
||||
|
@ -186,13 +186,13 @@ public class TestListeners extends BaseTest {
|
|||
"ADD : '+' ;\n" +
|
||||
"INT : [0-9]+ ;\n" +
|
||||
"WS : [ \\t\\n]+ -> skip ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "1(2,3)", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1(2,3)", false);
|
||||
String expecting =
|
||||
"(e (e 1) ( (eList (e 2) , (e 3)) ))\n" +
|
||||
"1\n" +
|
||||
"2\n" +
|
||||
"3\n" +
|
||||
"1 [14 6]\n";
|
||||
"1 [16 6]\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,19 +35,19 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
@Test public void testNongreedyLoopOnEndIsNop() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {dumpDFA();} : any ID EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"s @after {dumpDFA();} : any ID EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"any : .* ;\n"+
|
||||
"INT : '0'..'9'+ ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x", true);
|
||||
assertEquals("x\n" +
|
||||
"Decision 0:\n" +
|
||||
"s0-ID->:s1=>2\n", found);
|
||||
assertEquals(null, this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"34 x", true);
|
||||
assertEquals("34x\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -58,12 +58,12 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
@Test public void testNongreedyPlusLoopOnEndIsNop() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {dumpDFA();} : any ID EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"s @after {dumpDFA();} : any ID EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"any : .+ ;\n"+ // .+ on end of rule always gives no viable alt. can't bypass but can't match
|
||||
"INT : '0'..'9'+ ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x", true);
|
||||
assertEquals("x\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -80,7 +80,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"INT : '0'..'9'+ ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x", true);
|
||||
assertEquals("alt 1\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -91,7 +91,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s0-ID->:s1=>2\n", found);
|
||||
assertEquals(null, this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"34", true);
|
||||
assertEquals("alt 2\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -102,7 +102,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s0-INT->:s1=>2\n", found);
|
||||
assertEquals(null, this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"34 x", true);
|
||||
assertEquals("alt 1\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -125,7 +125,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"INT : '0'..'9'+ ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"2 3 x", true);
|
||||
assertEquals("alt 1\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -139,7 +139,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s0-ID->:s2=>2\n", found);
|
||||
assertEquals(null, this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"2 3", true);
|
||||
assertEquals("alt 2\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -151,7 +151,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s0-INT->:s1=>2\n", found);
|
||||
assertEquals("line 1:0 no viable alternative at input '2'\n", this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"a b c 3", true);
|
||||
assertEquals("alt 2\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -176,34 +176,27 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"INT : '0'..'9'+ ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x", true);
|
||||
assertEquals("alt 1\n" +
|
||||
"Decision 0:\n" +
|
||||
"s0-ID->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-ID->:s1=>2\n", found);
|
||||
assertEquals("line 1:0 extraneous input 'x' expecting <EOF>\n", this.stderrDuringParse);
|
||||
"s0-ID->:s1=>1\n", found);
|
||||
assertNull(this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"34", true);
|
||||
assertEquals("alt 1\n" +
|
||||
"Decision 0:\n" +
|
||||
"s0-INT->s1\n" +
|
||||
"s1-EOF->:s2=>1\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-INT->:s1=>2\n", found); // resolves INT EOF to alt 1 from s since ambig 'tween a and b
|
||||
assertEquals("line 1:2 reportAmbiguity d=0: ambigAlts={1..2}:[(1,1,[]), (1,2,[])],conflictingAlts={1..2}, input='34'\n" +
|
||||
"line 1:0 extraneous input '34' expecting <EOF>\n",
|
||||
"s1-EOF->:s2=>1\n", found); // resolves INT EOF to alt 1 from s since ambig 'tween a and b
|
||||
assertEquals("line 1:2 reportAmbiguity d=0: ambigAlts={1..2}, input='34'\n",
|
||||
this.stderrDuringParse);
|
||||
}
|
||||
|
||||
@Test public void testNongreedyLoopCantSeeEOF() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {dumpDFA();} : block EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"s @after {dumpDFA();} : block EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"block : '{' .* '}' ;\n"+
|
||||
"EQ : '=' ;\n" +
|
||||
"INT : '0'..'9'+ ;\n" +
|
||||
|
@ -211,14 +204,14 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String input =
|
||||
"{ }";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("{}\n" +
|
||||
"Decision 0:\n" +
|
||||
"s0-'}'->:s1=>2\n", found);
|
||||
input =
|
||||
"{a b { }";
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("{ab{}\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -227,7 +220,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s0-ID->:s1=>1\n", found);
|
||||
input =
|
||||
"{ } a 2 { }"; // FAILS to match since it terminates loop at first { }
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("", found); // should not print output; resync kills rest of input til '}' then returns normally
|
||||
}
|
||||
|
@ -235,7 +228,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
@Test public void testNongreedyLoop() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {dumpDFA();} : ifstat ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"s @after {dumpDFA();} : ifstat ';' EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ifstat : 'if' '(' .* ')' block ;\n" +
|
||||
"block : '{' '}' ;\n"+
|
||||
"EQ : '=' ;\n" +
|
||||
|
@ -244,7 +237,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String input =
|
||||
"if ( x=34 ) { } ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("if(x=34){};\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -256,7 +249,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s3-'}'->:s4=>2\n", found);
|
||||
input =
|
||||
"if ( ))) ) { } ;";
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("if()))){};\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -266,7 +259,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s3-'}'->:s4=>2\n", found);
|
||||
input =
|
||||
"if (() { } a 2) { } ;"; // The first { } should match block so should stop
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("", found); // should not finish to print output
|
||||
}
|
||||
|
@ -274,7 +267,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
@Test public void testNongreedyLoopPassingThroughAnotherNongreedy() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {dumpDFA();} : ifstat ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"s @after {dumpDFA();} : ifstat ';' EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ifstat : 'if' '(' .* ')' block ;\n" +
|
||||
"block : '{' (block|.)* '}' ;\n"+
|
||||
"EQ : '=' ;\n" +
|
||||
|
@ -283,7 +276,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String input =
|
||||
"if ( x=34 ) { {return a} b 34 } ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("if(x=34){{returna}b34};\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -309,7 +302,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
|
||||
input =
|
||||
"if ( ()) ) { {return a} b 34 } ;";
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("if(())){{returna}b34};\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -337,7 +330,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
// EOF on end means LL(*) can identify when to stop the loop.
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {dumpDFA();} : stat* ID '=' ID ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"s @after {dumpDFA();} : stat* ID '=' ID ';' EOF {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"stat : 'if' '(' INT ')' stat\n" +
|
||||
" | 'return' INT ';'\n" +
|
||||
" | ID '=' (INT|ID) ';'\n" +
|
||||
|
@ -351,7 +344,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
String input =
|
||||
"x=1; a=b;";
|
||||
String found = null;
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("x=1;a=b;\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -360,13 +353,10 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s2-INT->:s3=>1\n" +
|
||||
"s2-ID->s4\n" +
|
||||
"s4-';'->s5\n" +
|
||||
"s5-EOF->:s6=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-ID->:s1=>3\n", found);
|
||||
"s5-EOF->:s6=>2\n", found);
|
||||
input =
|
||||
"if ( 1 ) { x=3; { return 4; } } return 99; abc=def;";
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("if(1){x=3;{return4;}}return99;abc=def;\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -376,22 +366,10 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s3-'='->s4\n" +
|
||||
"s4-ID->s5\n" +
|
||||
"s5-';'->s6\n" +
|
||||
"s6-EOF->:s7=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'{'->:s2=>4\n" +
|
||||
"s0-'if'->:s1=>1\n" +
|
||||
"s0-'return'->:s4=>2\n" +
|
||||
"s0-ID->:s3=>3\n" +
|
||||
"\n" +
|
||||
"Decision 2:\n" +
|
||||
"s0-'{'->:s2=>1\n" +
|
||||
"s0-'return'->:s3=>1\n" +
|
||||
"s0-'}'->:s4=>2\n" +
|
||||
"s0-ID->:s1=>1\n", found);
|
||||
"s6-EOF->:s7=>2\n", found);
|
||||
input =
|
||||
"x=1; a=3;"; // FAILS to match since it can't match last element
|
||||
execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
// can't match EOF to ID '=' '3' ';'
|
||||
assertEquals("line 1:9 no viable alternative at input '<EOF>'\n",
|
||||
|
@ -399,7 +377,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
|
||||
input =
|
||||
"x=1; a=b; z=3;"; // FAILS to match since it can't match last element
|
||||
execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("line 1:14 no viable alternative at input '<EOF>'\n",
|
||||
this.stderrDuringParse);
|
||||
|
@ -414,7 +392,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"random : s ;" + // call s so s isn't followed by EOF directly
|
||||
"s @after {dumpDFA();} : (options {greedy=false;} : stat)* ID '=' ID ';'\n" +
|
||||
" {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
" {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"stat : 'if' '(' INT ')' stat\n" +
|
||||
" | 'return' INT ';'\n" +
|
||||
" | ID '=' (INT|ID) ';'\n" +
|
||||
|
@ -428,7 +406,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
String input =
|
||||
"x=1; a=b; x=y;";
|
||||
String found = null;
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("x=1;a=b;\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -436,13 +414,10 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s1-'='->s2\n" +
|
||||
"s2-INT->:s3=>1\n" +
|
||||
"s2-ID->s4\n" +
|
||||
"s4-';'->:s5=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-ID->:s1=>3\n", found); // ignores x=1 that follows first a=b assignment
|
||||
"s4-';'->:s5=>2\n", found); // ignores x=1 that follows first a=b assignment
|
||||
input =
|
||||
"if ( 1 ) { x=3; { return 4; } } return 99; abc=def;";
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("if(1){x=3;{return4;}}return99;abc=def;\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -451,29 +426,17 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s0-ID->s3\n" +
|
||||
"s3-'='->s4\n" +
|
||||
"s4-ID->s5\n" +
|
||||
"s5-';'->:s6=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'{'->:s2=>4\n" +
|
||||
"s0-'if'->:s1=>1\n" +
|
||||
"s0-'return'->:s4=>2\n" +
|
||||
"s0-ID->:s3=>3\n" +
|
||||
"\n" +
|
||||
"Decision 2:\n" +
|
||||
"s0-'{'->:s2=>1\n" +
|
||||
"s0-'return'->:s3=>1\n" +
|
||||
"s0-'}'->:s4=>2\n" +
|
||||
"s0-ID->:s1=>1\n", found);
|
||||
"s5-';'->:s6=>2\n", found);
|
||||
input =
|
||||
"x=1; a=3;"; // FAILS to match since it can't match either stat
|
||||
execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
// can't match EOF to ID '=' '0' ';'
|
||||
assertEquals("line 1:9 no viable alternative at input '<EOF>'\n",
|
||||
this.stderrDuringParse);
|
||||
input =
|
||||
"x=1; a=b; z=3;"; // stops at a=b; ignores z=3;
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
input, true);
|
||||
assertEquals("x=1;a=b;\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -481,16 +444,13 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s1-'='->s2\n" +
|
||||
"s2-INT->:s3=>1\n" +
|
||||
"s2-ID->s4\n" +
|
||||
"s4-';'->:s5=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-ID->:s1=>3\n", found); // should not finish all input
|
||||
"s4-';'->:s5=>2\n", found); // should not finish all input
|
||||
}
|
||||
|
||||
@Test public void testHTMLTags() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {dumpDFA();} : (item)+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"s @after {dumpDFA();} : (item)+ {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"item : tag | . ;\n" +
|
||||
"tag : '<' '/'? .* '>' ;\n" +
|
||||
"EQ : '=' ;\n" +
|
||||
|
@ -501,14 +461,9 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = null;
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"<a>foo</a>", true);
|
||||
assertEquals("<a>foo</a>\n" +
|
||||
"Decision 0:\n" +
|
||||
"s0-EOF->:s3=>2\n" +
|
||||
"s0-'<'->:s1=>1\n" +
|
||||
"s0-ID->:s2=>1\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'<'->s1\n" +
|
||||
"s0-ID->:s5=>2\n" +
|
||||
|
@ -527,20 +482,16 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"Decision 3:\n" +
|
||||
"s0-'>'->:s2=>2\n" +
|
||||
"s0-ID->:s1=>1\n", found);
|
||||
assertEquals("line 1:6 reportAttemptingFullContext d=1: [(20,1,[14 6]), (16,2,[6])], input='<a>foo<'\n" +
|
||||
"line 1:6 reportAmbiguity d=1: ambigAlts={1..2}:[(26,1,[32 32 32 32 14 6]), (33,1,[14 6]), (22,1,[14 6 10 10]), (26,1,[14 6 10 10]), (33,1,[14 6 10 10]), (20,1,[14 6 10 10 10]), (16,1,[6 10 10 10]), (1,1,[]), (22,2,[14 6 10 10 10 10]), (26,2,[14 6 10 10 10 10]), (33,2,[14 6 10 10 10 10]), (20,2,[14 6 10 10 10 10 10]), (16,2,[6 10 10 10 10 10]), (1,2,[])],conflictingAlts={1..2}, input='<a>foo<'\n" +
|
||||
"line 1:10 reportAttemptingFullContext d=1: [(20,1,[14 6]), (16,2,[6])], input='</a>'\n" +
|
||||
"line 1:10 reportAmbiguity d=1: ambigAlts={1..2}:[(35,1,[]), (35,2,[])],conflictingAlts={1..2}, input='</a>'\n" +
|
||||
"line 1:7 reportAmbiguity d=2: ambigAlts={1..2}:[(26,1,[]), (33,1,[]), (26,2,[]), (33,2,[])],conflictingAlts={1..2}, input='/'\n",
|
||||
assertEquals("line 1:6 reportAttemptingFullContext d=1, input='<a>foo<'\n" +
|
||||
"line 1:6 reportAmbiguity d=1: ambigAlts={1..2}, input='<a>foo<'\n" +
|
||||
"line 1:10 reportAttemptingFullContext d=1, input='</a>'\n" +
|
||||
"line 1:10 reportAmbiguity d=1: ambigAlts={1..2}, input='</a>'\n" +
|
||||
"line 1:7 reportAmbiguity d=2: ambigAlts={1..2}, input='/'\n",
|
||||
this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"<a></a>", true);
|
||||
assertEquals("<a></a>\n" +
|
||||
"Decision 0:\n" +
|
||||
"s0-EOF->:s2=>2\n" +
|
||||
"s0-'<'->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'<'->s1\n" +
|
||||
"s1-'/'->s2\n" +
|
||||
|
@ -557,13 +508,9 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"Decision 3:\n" +
|
||||
"s0-'>'->:s2=>2\n" +
|
||||
"s0-ID->:s1=>1\n", found);
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"</b><a src=\"abc\", width=32>", true);
|
||||
assertEquals("</b><asrc=\"abc\",width=32>\n" +
|
||||
"Decision 0:\n" +
|
||||
"s0-EOF->:s2=>2\n" +
|
||||
"s0-'<'->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'<'->s1\n" +
|
||||
"s1-'/'->s2\n" +
|
||||
|
@ -609,7 +556,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
|
||||
String found = null;
|
||||
System.out.println(grammar);
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
",=foo <a x= 3>32skidoo<a><img>", true);
|
||||
assertEquals("<ax=3>\n" +
|
||||
"<a>\n" +
|
||||
|
@ -626,15 +573,6 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s3-INT->s3\n" +
|
||||
"s4-'='->s3\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" + // (tag|header)
|
||||
"s0-'<'->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 2:\n" + // (...)*
|
||||
"s0-EOF->:s3=>2\n" +
|
||||
"s0-'<'->:s2=>1\n" +
|
||||
"s0-','->:s1=>1\n" +
|
||||
"s0-INT->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 3:\n" + // .+
|
||||
"s0-'x'->:s1=>1\n" +
|
||||
"s0-'>'->:s2=>2\n" +
|
||||
|
@ -644,7 +582,7 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
assertEquals(null,
|
||||
this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x x<a>", true);
|
||||
assertEquals("<a>\n" +
|
||||
"Decision 0:\n" +
|
||||
|
@ -655,22 +593,15 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s4-ID->s5\n" +
|
||||
"s5-'>'->:s6=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'<'->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 2:\n" +
|
||||
"s0-EOF->:s2=>2\n" +
|
||||
"s0-'x'->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 3:\n" +
|
||||
"s0-'>'->:s2=>2\n" +
|
||||
"s0-ID->:s1=>1\n", found);
|
||||
// gets line 1:3 no viable alternative at input '>'. Why??
|
||||
// oH! it sees .+ and figures it matches > so <> predicts tag CORRECT!
|
||||
// Seeing '.' in a lookahead prediction can be misleading!!
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x <><a>", true);
|
||||
assertEquals("null\n" +
|
||||
assertEquals("<\n" +
|
||||
"<a>\n" +
|
||||
"Decision 0:\n" +
|
||||
"s0-'x'->s1\n" +
|
||||
|
@ -682,14 +613,6 @@ public class TestNonGreedyLoops extends BaseTest {
|
|||
"s4-'>'->:s7=>2\n" +
|
||||
"s4-'<'->:s5=>2\n" +
|
||||
"\n" +
|
||||
"Decision 1:\n" +
|
||||
"s0-'<'->:s1=>1\n" +
|
||||
"\n" +
|
||||
"Decision 2:\n" +
|
||||
"s0-EOF->:s3=>2\n" +
|
||||
"s0-'x'->:s1=>1\n" +
|
||||
"s0-'>'->:s2=>1\n" +
|
||||
"\n" +
|
||||
"Decision 3:\n" +
|
||||
"s0-'>'->:s1=>2\n" +
|
||||
"s0-ID->:s2=>1\n", // doesn't match tag; null
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.automata.ATNSerializer;
|
||||
import org.junit.Test;
|
||||
|
||||
/** test runtime parse errors */
|
||||
|
@ -36,7 +37,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' 'b' ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aa", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aa", false);
|
||||
String expecting = "line 1:1 mismatched input 'a' expecting 'b'\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -46,7 +47,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' 'b' ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aab", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false);
|
||||
String expecting = "line 1:1 extraneous input 'a' expecting 'b'\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -56,7 +57,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' ('b'|'c') ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aab", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false);
|
||||
String expecting = "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -66,7 +67,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' 'b' 'c' ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "ac", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false);
|
||||
String expecting = "line 1:1 missing 'b' at 'c'\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -76,7 +77,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "a", "ac", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false);
|
||||
String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<3>,1:1]\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -85,7 +86,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' ('b'|'c') 'd' ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "ad", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false);
|
||||
String expecting = "line 1:1 missing {'b', 'c'} at 'd'\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -95,7 +96,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "a", "ad", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false);
|
||||
String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<3>,1:1]\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -107,7 +108,7 @@ public class TestParseErrors extends BaseTest {
|
|||
" | 'a' 'c'" +
|
||||
";\n" +
|
||||
"q : 'e' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "ae", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ae", false);
|
||||
String expecting = "line 1:1 no viable alternative at input 'ae'\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -121,7 +122,7 @@ public class TestParseErrors extends BaseTest {
|
|||
" ;\n" +
|
||||
"q : 'e' ;\n";
|
||||
System.out.println(grammar);
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "abe", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abe", false);
|
||||
String expecting = "line 1:2 no viable alternative at input 'abe'\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -134,7 +135,7 @@ public class TestParseErrors extends BaseTest {
|
|||
" | 'a'+ 'c'" +
|
||||
";\n" +
|
||||
"q : 'e' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aaae", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aaae", false);
|
||||
String expecting = "line 1:3 no viable alternative at input 'aaae'\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -144,7 +145,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' 'b'*;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aabc", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false);
|
||||
String expecting = "line 1:1 extraneous input 'a' expecting {<EOF>, 'b'}\n" +
|
||||
"line 1:3 token recognition error at: 'c'\n";
|
||||
String result = stderrDuringParse;
|
||||
|
@ -156,7 +157,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' 'b'* 'c';";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aacabc", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false);
|
||||
String expecting =
|
||||
"line 1:1 extraneous input 'a' expecting {'b', 'c'}\n";
|
||||
String result = stderrDuringParse;
|
||||
|
@ -167,7 +168,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' 'b'* 'c' ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "ababbc", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false);
|
||||
String expecting = "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -177,7 +178,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' 'b'* 'c' ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "abaaababc", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false);
|
||||
String expecting =
|
||||
"line 1:2 extraneous input 'a' expecting {'b', 'c'}\n" +
|
||||
"line 1:6 extraneous input 'a' expecting {'b', 'c'}\n";
|
||||
|
@ -191,7 +192,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' ('b'|'z'{;})*;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aabc", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false);
|
||||
String expecting = "line 1:1 extraneous input 'a' expecting {<EOF>, 'b', 'z'}\n" +
|
||||
"line 1:3 token recognition error at: 'c'\n";
|
||||
String result = stderrDuringParse;
|
||||
|
@ -203,7 +204,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' ('b'|'z'{;})* 'c';";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aacabc", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false);
|
||||
String expecting =
|
||||
"line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'}\n";
|
||||
String result = stderrDuringParse;
|
||||
|
@ -214,7 +215,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' ('b'|'z'{;})* 'c' ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "ababbc", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false);
|
||||
String expecting = "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n";
|
||||
String result = stderrDuringParse;
|
||||
assertEquals(expecting, result);
|
||||
|
@ -224,7 +225,7 @@ public class TestParseErrors extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' ('b'|'z'{;})* 'c' ;";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "abaaababc", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false);
|
||||
String expecting =
|
||||
"line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n" +
|
||||
"line 1:6 extraneous input 'a' expecting {'b', 'z', 'c'}\n";
|
||||
|
@ -248,9 +249,32 @@ public class TestParseErrors extends BaseTest {
|
|||
"@init\n" +
|
||||
"{ System.out.println(getExpectedTokens().toString(tokenNames)); }\n" +
|
||||
" : ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "start", "dog and software", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "dog and software", false);
|
||||
String expecting = "{'hardware', 'software'}\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for #45 "NullPointerException in ATNConfig.hashCode".
|
||||
* https://github.com/antlr/antlr4/issues/45
|
||||
*
|
||||
* The original cause of this issue was an error in the tool's ATN state optimization,
|
||||
* which is now detected early in {@link ATNSerializer} by ensuring that all
|
||||
* serialized transitions point to states which were not removed.
|
||||
*/
|
||||
@Test
|
||||
public void testInvalidATNStateRemoval() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"start : ID ':' expr;\n" +
|
||||
"expr : primary expr? {} | expr '->' ID;\n" +
|
||||
"primary : ID;\n" +
|
||||
"ID : [a-z]+;\n" +
|
||||
"\n";
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x:x", true);
|
||||
String expecting = "";
|
||||
assertEquals(expecting, result);
|
||||
assertNull(this.stderrDuringParse);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ public class TestParseTrees extends BaseTest {
|
|||
"@after {System.out.println($r.ctx.toStringTree(this));}\n" +
|
||||
" :r=a ;\n" +
|
||||
"a : 'x' {System.out.println(getRuleInvocationStack());} ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "x", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false);
|
||||
String expecting = "[a, s]\n(a x)\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ public class TestParseTrees extends BaseTest {
|
|||
" :r=a ;\n" +
|
||||
"a : 'x' 'y'\n" +
|
||||
" ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "xy", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xy", false);
|
||||
String expecting = "(a x y)\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ public class TestParseTrees extends BaseTest {
|
|||
" :r=a ;\n" +
|
||||
"a : 'x' | 'y'\n" +
|
||||
" ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "y", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y", false);
|
||||
String expecting = "(a y)\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ public class TestParseTrees extends BaseTest {
|
|||
" :r=a ;\n" +
|
||||
"a : ('x' | 'y')* 'z'\n" +
|
||||
" ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "xyyxyxz", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xyyxyxz", false);
|
||||
String expecting = "(a x y y x y x z)\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ public class TestParseTrees extends BaseTest {
|
|||
"a : b 'x'\n" +
|
||||
" ;\n" +
|
||||
"b : 'y' ;\n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "yx", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "yx", false);
|
||||
String expecting = "(a (b y) x)\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ public class TestParseTrees extends BaseTest {
|
|||
"a : 'x' 'y'\n" +
|
||||
" ;\n" +
|
||||
"Z : 'z'; \n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "xzy", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzy", false);
|
||||
String expecting = "(a x z y)\n"; // ERRORs not shown. z is colored red in tree view
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ public class TestParseTrees extends BaseTest {
|
|||
"a : 'x' | 'y'\n" +
|
||||
" ;\n" +
|
||||
"Z : 'z'; \n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "z", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "z", false);
|
||||
String expecting = "(a z)\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ public class TestParseTrees extends BaseTest {
|
|||
"a : 'x' 'y'* '!'\n" +
|
||||
" ;\n" +
|
||||
"Z : 'z'; \n";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "xzyy!", false);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzyy!", false);
|
||||
String expecting = "(a x z y y !)\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ public class TestParserExec extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"abc 34", false);
|
||||
assertEquals("", found);
|
||||
assertEquals(null, stderrDuringParse);
|
||||
|
@ -52,12 +52,12 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testBasic() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ID INT {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"a : ID INT {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"abc 34", false);
|
||||
assertEquals("abc34\n", found);
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ public class TestParserExec extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"34", false);
|
||||
assertEquals("alt 2\n", found);
|
||||
}
|
||||
|
@ -80,11 +80,11 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAPlus() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ID+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"a : ID+ {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"a b c", false);
|
||||
assertEquals("abc\n", found);
|
||||
}
|
||||
|
@ -93,11 +93,11 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAorAPlus() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : (ID|ID)+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"a : (ID|ID)+ {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"a b c", false);
|
||||
assertEquals("abc\n", found);
|
||||
}
|
||||
|
@ -105,14 +105,14 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAStar() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ID* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"a : ID* {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"", false);
|
||||
assertEquals("\n", found);
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"a b c", false);
|
||||
assertEquals("abc\n", found);
|
||||
}
|
||||
|
@ -121,14 +121,14 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAorAStar() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : (ID|ID)* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"a : (ID|ID)* {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"", false);
|
||||
assertEquals("\n", found);
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"a b c", false);
|
||||
assertEquals("abc\n", found);
|
||||
}
|
||||
|
@ -136,12 +136,12 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAorBPlus() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : (ID|INT{;})+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"a : (ID|INT{;})+ {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"a 34 c", false);
|
||||
assertEquals("a34c\n", found);
|
||||
}
|
||||
|
@ -149,15 +149,15 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAorBStar() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : (ID|INT{;})* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"a : (ID|INT{;})* {System.out.println(_input.getText(Interval.of(0,_input.index()-1)));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"", false);
|
||||
assertEquals("\n", found);
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "a",
|
||||
"a 34 c", false);
|
||||
assertEquals("a34c\n", found);
|
||||
}
|
||||
|
@ -175,19 +175,19 @@ public class TestParserExec extends BaseTest {
|
|||
"a : 'a' s ('b' s)?;\n"
|
||||
;
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s", "x", false);
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false);
|
||||
assertEquals("", found);
|
||||
assertNull(this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s", "axbx", false);
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "axbx", false);
|
||||
assertEquals("", found);
|
||||
assertNull(this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s", "ax", false);
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "ax", false);
|
||||
assertEquals("", found);
|
||||
assertNull(this.stderrDuringParse);
|
||||
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s", "aaxbx", false);
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "aaxbx", false);
|
||||
assertEquals("", found);
|
||||
assertNull(this.stderrDuringParse);
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ public class TestParserExec extends BaseTest {
|
|||
"WS : (' ' | '\\t')+ -> skip;\n"
|
||||
;
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "stmt",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "stmt",
|
||||
"if x if x a else b", true);
|
||||
String expecting = "";
|
||||
assertEquals(expecting, found);
|
||||
|
|
|
@ -28,22 +28,48 @@
|
|||
|
||||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.runtime.dfa.*;
|
||||
import org.antlr.v4.runtime.ANTLRFileStream;
|
||||
import org.antlr.v4.runtime.ANTLRInputStream;
|
||||
import org.antlr.v4.runtime.BailErrorStrategy;
|
||||
import org.antlr.v4.runtime.BaseErrorListener;
|
||||
import org.antlr.v4.runtime.CharStream;
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.DefaultErrorStrategy;
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.RecognitionException;
|
||||
import org.antlr.v4.runtime.Recognizer;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.atn.ATNConfig;
|
||||
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||
import org.antlr.v4.runtime.atn.ParserATNSimulator;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.dfa.DFAState;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
import org.antlr.v4.runtime.tree.ParseTree;
|
||||
import org.antlr.v4.runtime.tree.ParseTreeListener;
|
||||
import org.antlr.v4.runtime.tree.ParseTreeWalker;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.reflect.*;
|
||||
import java.net.*;
|
||||
import java.util.*;
|
||||
import java.util.logging.*;
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.URL;
|
||||
import java.net.URLClassLoader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
public class TestPerformance extends BaseTest {
|
||||
/** Parse all java files under this package within the JDK_SOURCE_ROOT. */
|
||||
|
@ -52,8 +78,8 @@ public class TestPerformance extends BaseTest {
|
|||
private static final boolean RECURSIVE = true;
|
||||
|
||||
/**
|
||||
* True to use the Java grammar with expressions in the v4 left-recursive syntax (Java-LR.g). False to use
|
||||
* the standard grammar (Java.g). In either case, the grammar is renamed in the temporary directory to Java.g
|
||||
* True to use the Java grammar with expressions in the v4 left-recursive syntax (Java-LR.g4). False to use
|
||||
* the standard grammar (Java.g4). In either case, the grammar is renamed in the temporary directory to Java.g4
|
||||
* before compiling.
|
||||
*/
|
||||
private static final boolean USE_LR_GRAMMAR = true;
|
||||
|
@ -395,8 +421,8 @@ public class TestPerformance extends BaseTest {
|
|||
}
|
||||
|
||||
protected void compileJavaParser(boolean leftRecursive) throws IOException {
|
||||
String grammarFileName = "Java.g";
|
||||
String sourceName = leftRecursive ? "Java-LR.g" : "Java.g";
|
||||
String grammarFileName = "Java.g4";
|
||||
String sourceName = leftRecursive ? "Java-LR.g4" : "Java.g4";
|
||||
String body = load(sourceName, null);
|
||||
@SuppressWarnings({"ConstantConditions"})
|
||||
List<String> extraOptions = new ArrayList<String>();
|
||||
|
@ -407,7 +433,7 @@ public class TestPerformance extends BaseTest {
|
|||
extraOptions.add("-atn");
|
||||
}
|
||||
String[] extraOptionsArray = extraOptions.toArray(new String[extraOptions.size()]);
|
||||
boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, "JavaParser", "JavaLexer", extraOptionsArray);
|
||||
boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, "JavaParser", "JavaLexer", true, extraOptionsArray);
|
||||
assertTrue(success);
|
||||
}
|
||||
|
||||
|
@ -513,7 +539,7 @@ public class TestPerformance extends BaseTest {
|
|||
void parseFile(CharStream input);
|
||||
}
|
||||
|
||||
private static class DescriptiveErrorListener implements ANTLRErrorListener<Token> {
|
||||
private static class DescriptiveErrorListener extends BaseErrorListener<Token> {
|
||||
public static DescriptiveErrorListener INSTANCE = new DescriptiveErrorListener();
|
||||
|
||||
@Override
|
||||
|
|
|
@ -11,7 +11,7 @@ public class TestSemPredEvalLexer extends BaseTest {
|
|||
"E2 : {true}? 'enum' ;\n" + // winner not E1 or ID
|
||||
"ID : 'a'..'z'+ ;\n"+
|
||||
"WS : (' '|'\\n') {skip();} ;";
|
||||
String found = execLexer("L.g", grammar, "L", "enum abc", true);
|
||||
String found = execLexer("L.g4", grammar, "L", "enum abc", true);
|
||||
String expecting =
|
||||
"[@0,0:3='enum',<4>,1:0]\n" +
|
||||
"[@1,5:7='abc',<5>,1:5]\n" +
|
||||
|
@ -26,7 +26,7 @@ public class TestSemPredEvalLexer extends BaseTest {
|
|||
"E2 : 'enum' {true}? ;\n" + // winner not E1 or ID
|
||||
"ID : 'a'..'z'+ ;\n"+
|
||||
"WS : (' '|'\\n') {skip();} ;";
|
||||
String found = execLexer("L.g", grammar, "L", "enum abc enum", true);
|
||||
String found = execLexer("L.g4", grammar, "L", "enum abc enum", true);
|
||||
String expecting =
|
||||
"[@0,0:3='enum',<4>,1:0]\n" +
|
||||
"[@1,5:7='abc',<5>,1:5]\n" +
|
||||
|
@ -50,7 +50,7 @@ public class TestSemPredEvalLexer extends BaseTest {
|
|||
"B : {int n=0;} ({n<=2}? DIGIT {n++})+ ;\n" +
|
||||
"fragment DIGIT : '0'..'9' ;\n"+
|
||||
"WS : (' '|'\\n') {skip();} ;";
|
||||
String found = execLexer("L.g", grammar, "L", "1234 56", true);
|
||||
String found = execLexer("L.g4", grammar, "L", "1234 56", true);
|
||||
String expecting =
|
||||
"[@0,0:3='enum',<4>,1:0]\n" +
|
||||
"[@1,5:7='abc',<5>,1:5]\n" +
|
||||
|
|
|
@ -45,7 +45,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"3 4 x", false);
|
||||
String expecting =
|
||||
"alt 2\n" +
|
||||
|
@ -70,7 +70,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x ; y", false);
|
||||
String expecting = "";
|
||||
assertEquals(expecting, found);
|
||||
|
@ -95,7 +95,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x y 3", false);
|
||||
String expecting =
|
||||
"alt 2\n" +
|
||||
|
@ -120,7 +120,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x y", false);
|
||||
String expecting =
|
||||
"alt 1\n" +
|
||||
|
@ -147,13 +147,16 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x; y", true);
|
||||
String expecting =
|
||||
"alt 1\n" +
|
||||
"alt 1\n";
|
||||
assertEquals(expecting, found);
|
||||
assertEquals("line 1:0 reportAmbiguity d=0: ambigAlts={1..2}:[(6,1,[],up=1), (1,1,[],up=1), (6,2,[],up=1), (1,2,[],up=1), (6,3,[],{1:0}?,up=1), (1,3,[],{1:0}?,up=1)],hasSemanticContext=true,conflictingAlts={1..3},dipsIntoOuterContext, input='x'\n",
|
||||
assertEquals("line 1:0 reportAttemptingFullContext d=0, input='x'\n" +
|
||||
"line 1:0 reportAmbiguity d=0: ambigAlts={1..2}, input='x'\n" +
|
||||
"line 1:3 reportAttemptingFullContext d=0, input='y'\n" +
|
||||
"line 1:3 reportAmbiguity d=0: ambigAlts={1..2}, input='y'\n",
|
||||
this.stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -177,14 +180,17 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"34; x; y", true);
|
||||
String expecting =
|
||||
"alt 1\n" +
|
||||
"alt 2\n" +
|
||||
"alt 2\n";
|
||||
assertEquals(expecting, found);
|
||||
assertEquals("line 1:4 reportAmbiguity d=0: ambigAlts={2..3}:[(6,2,[],up=1), (10,2,[],up=1), (1,2,[],up=1), (6,3,[],up=1), (10,3,[],up=1), (1,3,[],up=1), (6,4,[],{1:0}?,up=1), (10,4,[],{1:0}?,up=1), (1,4,[],{1:0}?,up=1)],hasSemanticContext=true,conflictingAlts={2..4},dipsIntoOuterContext, input='x'\n",
|
||||
assertEquals("line 1:4 reportAttemptingFullContext d=0, input='x'\n" +
|
||||
"line 1:4 reportAmbiguity d=0: ambigAlts={2..3}, input='x'\n" +
|
||||
"line 1:7 reportAttemptingFullContext d=0, input='y'\n" +
|
||||
"line 1:7 reportAmbiguity d=0: ambigAlts={2..3}, input='y'\n",
|
||||
this.stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -202,7 +208,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"y 3 x 4", false);
|
||||
String expecting =
|
||||
"alt 2\n" +
|
||||
|
@ -223,7 +229,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"y 3 x 4", false);
|
||||
String expecting = "line 1:0 no viable alternative at input 'y'\n";
|
||||
String result = stderrDuringParse;
|
||||
|
@ -241,7 +247,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x x y", false);
|
||||
String expecting =
|
||||
"alt 2\n" +
|
||||
|
@ -266,7 +272,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x 4", false);
|
||||
String expecting =
|
||||
"alt 1\n";
|
||||
|
@ -289,7 +295,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x x y", false);
|
||||
String expecting =
|
||||
"alt 1\n" +
|
||||
|
@ -315,7 +321,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"x x y", false);
|
||||
String expecting =
|
||||
"i=1\n" +
|
||||
|
@ -346,7 +352,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"a b", false);
|
||||
String expecting =
|
||||
"alt 2\n" +
|
||||
|
@ -376,7 +382,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"a b", false);
|
||||
String expecting =
|
||||
"";
|
||||
|
@ -397,7 +403,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"a;", false);
|
||||
String expecting =
|
||||
"alt 2\n";
|
||||
|
@ -417,7 +423,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"a;", false);
|
||||
String expecting =
|
||||
"alt 2\n";
|
||||
|
@ -441,7 +447,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"a!", false);
|
||||
String expecting =
|
||||
"eval=true\n" + // now we are parsing
|
||||
|
@ -467,7 +473,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"a!", false);
|
||||
String expecting =
|
||||
"eval=true\n" +
|
||||
|
@ -494,7 +500,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s",
|
||||
"a!", false);
|
||||
String expecting =
|
||||
"eval=true\n" +
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TestSets extends BaseTest {
|
|||
"fragment A : '1' | '2';\n" +
|
||||
"fragment B : '3' '4';\n" +
|
||||
"C : A | B;\n";
|
||||
String found = execParser("P.g", grammar, "PParser", "PLexer",
|
||||
String found = execParser("P.g4", grammar, "PParser", "PLexer",
|
||||
"a", "34", debug);
|
||||
assertEquals("34\n", found);
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ public class TestSets extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : t=('x'|'y') {System.out.println($t.text);} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "x", debug);
|
||||
assertEquals("x\n", found);
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ public class TestSets extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "zz", debug);
|
||||
assertEquals("z\n", found);
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ public class TestSets extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ~'x' 'z' {System.out.println(_input);} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "zz", debug);
|
||||
assertEquals("zz\n", found);
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ public class TestSets extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : t=~'x' 'z' {System.out.println($t.text);} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "zz", debug);
|
||||
assertEquals("z\n", found);
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ public class TestSets extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a @after {System.out.println(_input);} : 'a' | 'b' |'c' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "b", debug);
|
||||
assertEquals("b\n", found);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println($A.text);} ;\n" +
|
||||
"A : ~'b' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "x", debug);
|
||||
assertEquals("x\n", found);
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A? 'c' {System.out.println(_input);} ;\n" +
|
||||
"A : 'b' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "bc", debug);
|
||||
assertEquals("bc\n", found);
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println(_input);} ;\n" +
|
||||
"A : 'b'? 'c' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "bc", debug);
|
||||
assertEquals("bc\n", found);
|
||||
}
|
||||
|
@ -131,10 +131,10 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println(_input);} ;\n" +
|
||||
"A : 'b'* 'c' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "bbbbc", debug);
|
||||
assertEquals("bbbbc\n", found);
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "c", debug);
|
||||
assertEquals("c\n", found);
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println(_input);} ;\n" +
|
||||
"A : 'b'+ 'c' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "bbbbc", debug);
|
||||
assertEquals("bbbbc\n", found);
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ public class TestSets extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ('a'|'b')? 'c' {System.out.println(_input);} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "ac", debug);
|
||||
assertEquals("ac\n", found);
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ public class TestSets extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ('a'|'b')* 'c' {System.out.println(_input);} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "abaac", debug);
|
||||
assertEquals("abaac\n", found);
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ public class TestSets extends BaseTest {
|
|||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ('a'|'b')+ 'c' {System.out.println(_input);} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "abaac", debug);
|
||||
assertEquals("abaac\n", found);
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println(_input);} ;\n" +
|
||||
"A : ('a'|'b')? 'c' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "ac", debug);
|
||||
assertEquals("ac\n", found);
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println(_input);} ;\n" +
|
||||
"A : ('a'|'b')* 'c' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "abaac", debug);
|
||||
assertEquals("abaac\n", found);
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println(_input);} ;\n" +
|
||||
"A : ('a'|'b')+ 'c' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "abaac", debug);
|
||||
assertEquals("abaac\n", found);
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println($A.text);} ;\n" +
|
||||
"A : ~('b'|'c') ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "x", debug);
|
||||
assertEquals("x\n", found);
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ public class TestSets extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : A {System.out.println($A.text);} ;\n" +
|
||||
"A : h=~('b'|'c') ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "x", debug);
|
||||
assertEquals("x\n", found);
|
||||
}
|
||||
|
@ -232,7 +232,7 @@ public class TestSets extends BaseTest {
|
|||
"a : A {System.out.println($A.text);} ;\n" +
|
||||
"A : ~('a'|B) ;\n" +
|
||||
"B : 'b' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "x", debug);
|
||||
assertEquals("x\n", found);
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ public class TestSets extends BaseTest {
|
|||
"a : A {System.out.println($A.text);} ;\n" +
|
||||
"A : ~('a'|B) ;\n" +
|
||||
"B : 'b'|'c' ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "x", debug);
|
||||
assertEquals("x\n", found);
|
||||
}
|
||||
|
@ -255,7 +255,7 @@ public class TestSets extends BaseTest {
|
|||
"A : ('a'|B) ;\n" +
|
||||
"fragment\n" +
|
||||
"B : ~('a'|'c') ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "x", debug);
|
||||
assertEquals("x\n", found);
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ public class TestSets extends BaseTest {
|
|||
"B : ~('a'|C) ;\n" +
|
||||
"fragment\n" +
|
||||
"C : 'c'|'d' ;\n ";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "x", debug);
|
||||
assertEquals("x\n", found);
|
||||
}
|
||||
|
@ -280,7 +280,7 @@ public class TestSets extends BaseTest {
|
|||
"a : (A {System.out.println($A.text);})+ ;\n" +
|
||||
"A : [AaBb] ;\n" +
|
||||
"WS : (' '|'\\n')+ {skip();} ;\n";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer",
|
||||
String found = execParser("T.g4", grammar, "TParser", "TLexer",
|
||||
"a", "A a B b", debug);
|
||||
assertEquals("A\n" +
|
||||
"a\n" +
|
||||
|
|
|
@ -20,20 +20,21 @@ public class TestSymbolIssues extends BaseTest {
|
|||
"\n" +
|
||||
"ID : 'a'..'z'+ ID ;",
|
||||
// YIELDS
|
||||
"warning(51): A.g:2:10: illegal option opt\n" +
|
||||
"error(60): A.g:7:1: redefinition of header action\n" +
|
||||
"warning(51): A.g:2:10: illegal option opt\n" +
|
||||
"error(19): A.g:11:0: rule a redefinition\n" +
|
||||
"error(60): A.g:5:1: redefinition of members action\n" +
|
||||
"error(47): A.g:9:37: rule b has no defined parameters\n" +
|
||||
"error(24): A.g:9:43: reference to undefined rule: q\n" +
|
||||
"error(46): A.g:10:31: missing parameter(s) on rule reference: a\n"
|
||||
"warning(83): A.g4:2:10: illegal option opt\n" +
|
||||
"warning(83): A.g4:2:21: illegal option k\n" +
|
||||
"error(94): A.g4:7:1: redefinition of header action\n" +
|
||||
"warning(51): A.g4:2:10: illegal option opt\n" +
|
||||
"error(19): A.g4:11:0: rule a redefinition\n" +
|
||||
"error(60): A.g4:5:1: redefinition of members action\n" +
|
||||
"error(47): A.g4:9:37: rule b has no defined parameters\n" +
|
||||
"error(24): A.g4:9:43: reference to undefined rule: q\n" +
|
||||
"error(46): A.g4:10:31: missing parameter(s) on rule reference: a\n"
|
||||
};
|
||||
|
||||
static String[] B = {
|
||||
// INPUT
|
||||
"parser grammar B;\n" +
|
||||
"tokens { X='x'; Y; }\n" +
|
||||
"tokens { ID; FOO; X='x'; Y; }\n" +
|
||||
"\n" +
|
||||
"a : s=ID b+=ID X=ID '.' ;\n" +
|
||||
"\n" +
|
||||
|
@ -41,16 +42,18 @@ public class TestSymbolIssues extends BaseTest {
|
|||
"\n" +
|
||||
"s : FOO ;",
|
||||
// YIELDS
|
||||
"error(26): B.g:2:9: can't assign string value to token name X in non-combined grammar\n" +
|
||||
"error(36): B.g:4:4: label s conflicts with rule with same name\n" +
|
||||
"error(36): B.g:4:9: label b conflicts with rule with same name\n" +
|
||||
"error(37): B.g:4:15: label X conflicts with token with same name\n" +
|
||||
"error(42): B.g:6:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n"
|
||||
"error(59): B.g4:2:18: can't assign string value to token name X in non-combined grammar\n" +
|
||||
"error(69): B.g4:4:4: label s conflicts with rule with same name\n" +
|
||||
"error(69): B.g4:4:9: label b conflicts with rule with same name\n" +
|
||||
"error(70): B.g4:4:15: label X conflicts with token with same name\n" +
|
||||
"error(75): B.g4:6:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n" +
|
||||
"error(126): B.g4:4:20: cannot create implicit token for string literal '.' in non-combined grammar\n"
|
||||
};
|
||||
|
||||
static String[] D = {
|
||||
// INPUT
|
||||
"parser grammar D;\n" +
|
||||
"tokens{ID;}\n" +
|
||||
"a[int j] \n" +
|
||||
" : i=ID j=ID ;\n" +
|
||||
"\n" +
|
||||
|
@ -60,8 +63,8 @@ public class TestSymbolIssues extends BaseTest {
|
|||
" : ID ;",
|
||||
|
||||
// YIELDS
|
||||
"error(39): D.g:3:21: label j conflicts with rule a's return value or parameter with same name\n" +
|
||||
"error(43): D.g:5:0: rule b's argument i conflicts a return value with same name\n"
|
||||
"error(72): D.g4:4:21: label j conflicts with rule a's return value or parameter with same name\n" +
|
||||
"error(76): D.g4:6:0: rule b's argument i conflicts a return value with same name\n"
|
||||
};
|
||||
|
||||
static String[] E = {
|
||||
|
@ -77,10 +80,10 @@ public class TestSymbolIssues extends BaseTest {
|
|||
"a : A ;\n",
|
||||
|
||||
// YIELDS
|
||||
"error(74): E.g:4:8: cannot redefine B; token name already defined\n" +
|
||||
"error(74): E.g:5:4: cannot redefine C; token name already defined\n" +
|
||||
"error(74): E.g:6:8: cannot redefine D; token name already defined\n" +
|
||||
"error(73): E.g:7:8: cannot alias X='e'; string already assigned to E\n"
|
||||
"error(108): E.g4:4:8: cannot redefine B; token name already defined\n" +
|
||||
"error(108): E.g4:5:4: cannot redefine C; token name already defined\n" +
|
||||
"error(108): E.g4:6:8: cannot redefine D; token name already defined\n" +
|
||||
"error(107): E.g4:7:8: cannot alias X='e'; string already assigned to E\n"
|
||||
};
|
||||
|
||||
@Test public void testA() { super.testErrors(A, false); }
|
||||
|
|
|
@ -1,806 +0,0 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2011 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.runtime.TokenRewriteStream;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.interp.LexerInterpreter;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestTokenRewriteStream extends BaseTest {
|
||||
|
||||
/** Public default constructor used by TestRig */
|
||||
public TestTokenRewriteStream() {
|
||||
}
|
||||
|
||||
@Test public void testInsertBeforeIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
LexerInterpreter lexInterp = new LexerInterpreter(g, "abc");
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexInterp);
|
||||
tokens.insertBefore(0, "0");
|
||||
String result = tokens.toString();
|
||||
String expecting = "0abc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertAfterLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.insertAfter(2, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abcx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertAfter(1, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "axbxc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "xbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "axc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testToStringStartStop() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"ID : 'a'..'z'+;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"SEMI : ';';\n" +
|
||||
"MUL : '*';\n" +
|
||||
"ASSIGN : '=';\n" +
|
||||
"WS : ' '+;\n");
|
||||
// Tokens: 0123456789
|
||||
// Input: x = 3 * 0;
|
||||
String input = "x = 3 * 0;";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
|
||||
|
||||
String result = tokens.toOriginalString();
|
||||
String expecting = "x = 3 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString();
|
||||
expecting = "x = 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString(0,9);
|
||||
expecting = "x = 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString(4,8);
|
||||
expecting = "0";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testToStringStartStop2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"ID : 'a'..'z'+;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"SEMI : ';';\n" +
|
||||
"ASSIGN : '=';\n" +
|
||||
"PLUS : '+';\n" +
|
||||
"MULT : '*';\n" +
|
||||
"WS : ' '+;\n");
|
||||
// Tokens: 012345678901234567
|
||||
// Input: x = 3 * 0 + 2 * 0;
|
||||
String input = "x = 3 * 0 + 2 * 0;";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
|
||||
String result = tokens.toOriginalString();
|
||||
String expecting = "x = 3 * 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
|
||||
result = tokens.toString();
|
||||
expecting = "x = 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString(0,17);
|
||||
expecting = "x = 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString(4,8);
|
||||
expecting = "0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString(0,8);
|
||||
expecting = "x = 0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString(12,16);
|
||||
expecting = "2 * 0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
tokens.insertAfter(17, "// comment");
|
||||
result = tokens.toString(12,18);
|
||||
expecting = "2 * 0;// comment";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString(0,8); // try again after insert at end
|
||||
expecting = "x = 0";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
|
||||
@Test public void test2ReplaceMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, "x");
|
||||
tokens.replace(1, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "ayc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(0, "_");
|
||||
tokens.replace(1, "x");
|
||||
tokens.replace(1, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "_ayc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, "x");
|
||||
tokens.delete(1);
|
||||
String result = tokens.toString();
|
||||
String expecting = "ac";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertInPriorReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, 2, "x");
|
||||
tokens.insertBefore(1, "0");
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<4>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@2,2:2='c',<5>,1:2]:\"x\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testInsertThenReplaceSameIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(0, "0");
|
||||
tokens.replace(0, "x"); // supercedes insert at 0
|
||||
String result = tokens.toString();
|
||||
String expecting = "0xbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(1, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "ayxbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertThenReplaceIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(0, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
tokens.replace(0, "z");
|
||||
String result = tokens.toString();
|
||||
String expecting = "yxzbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abyx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertThenReplaceLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.replace(2, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abyx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, "x");
|
||||
tokens.insertAfter(2, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abxy";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abyxba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertBefore(4, "y"); // no effect; within range of a replace
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<5>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"x\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertAfter(4, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abxyba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceAll() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, 6, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "x";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceSubsetThenFetch() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "xyz");
|
||||
String result = tokens.toString(0,6);
|
||||
String expecting = "abxyzba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenReplaceSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "xyz");
|
||||
tokens.replace(3, 5, "foo"); // overlaps, error
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<5>,1:3]..[@5,5:5='b',<4>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "xyz");
|
||||
tokens.replace(1, 3, "foo"); // overlap, error
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@3,3:3='c',<5>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 2, "xyz");
|
||||
tokens.replace(0, 3, "foo");
|
||||
String result = tokens.toString();
|
||||
String expecting = "fooa";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
// June 2, 2008 I rewrote core of rewrite engine; just adding lots more tests here
|
||||
|
||||
@Test public void testCombineInserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(0, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "yxabc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombine3Inserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
tokens.insertBefore(1, "z");
|
||||
String result = tokens.toString();
|
||||
String expecting = "yazxbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, 2, "foo");
|
||||
tokens.insertBefore(0, "z"); // combine with left edge of rewrite
|
||||
String result = tokens.toString();
|
||||
String expecting = "zfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.delete(0, 2);
|
||||
tokens.insertBefore(0, "z"); // combine with left edge of rewrite
|
||||
String result = tokens.toString();
|
||||
String expecting = "z"; // make sure combo is not znull
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDisjointInserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.insertBefore(0, "z");
|
||||
String result = tokens.toString();
|
||||
String expecting = "zaxbyc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(0, 3, "bar"); // wipes prior nested replace
|
||||
String result = tokens.toString();
|
||||
String expecting = "bar";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, 3, "bar");
|
||||
tokens.replace(1, 2, "foo"); // cannot split earlier replace
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@2,2:2='c',<5>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@3,3:3='c',<5>,1:3]:\"bar\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace3() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(0, 2, "bar"); // wipes prior nested replace
|
||||
String result = tokens.toString();
|
||||
String expecting = "barc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace4() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(1, 3, "bar"); // wipes prior nested replace
|
||||
String result = tokens.toString();
|
||||
String expecting = "abar";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDropIdenticalReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(1, 2, "foo"); // drop previous, identical
|
||||
String result = tokens.toString();
|
||||
String expecting = "afooc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDropPrevCoveredInsert() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "foo");
|
||||
tokens.replace(1, 2, "foo"); // kill prev insert
|
||||
String result = tokens.toString();
|
||||
String expecting = "afoofoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testLeaveAloneDisjointInsert() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.replace(2, 3, "foo");
|
||||
String result = tokens.toString();
|
||||
String expecting = "axbfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testLeaveAloneDisjointInsert2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 3, "foo");
|
||||
tokens.insertBefore(1, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "axbfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.delete(2);
|
||||
String result = tokens.toString();
|
||||
String expecting = "aby";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,879 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2011 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.TokenStreamRewriter;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.interp.LexerInterpreter;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestTokenStreamRewriter extends BaseTest {
|
||||
|
||||
/** Public default constructor used by TestRig */
|
||||
public TestTokenStreamRewriter() {
|
||||
}
|
||||
|
||||
@Test public void testInsertBeforeIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, "abc");
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "0");
|
||||
String result = tokens.getText();
|
||||
String expecting = "0abc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertAfterLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertAfter(2, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abcx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertAfter(1, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "axbxc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "xbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "axc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testToStringStartStop() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"ID : 'a'..'z'+;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"SEMI : ';';\n" +
|
||||
"MUL : '*';\n" +
|
||||
"ASSIGN : '=';\n" +
|
||||
"WS : ' '+;\n");
|
||||
// Tokens: 0123456789
|
||||
// Input: x = 3 * 0;
|
||||
String input = "x = 3 * 0;";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(4, 8, "0");
|
||||
stream.fill();
|
||||
// replace 3 * 0 with 0
|
||||
|
||||
String result = tokens.getTokenStream().getText();
|
||||
String expecting = "x = 3 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText();
|
||||
expecting = "x = 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 9));
|
||||
expecting = "x = 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(4, 8));
|
||||
expecting = "0";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testToStringStartStop2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"ID : 'a'..'z'+;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"SEMI : ';';\n" +
|
||||
"ASSIGN : '=';\n" +
|
||||
"PLUS : '+';\n" +
|
||||
"MULT : '*';\n" +
|
||||
"WS : ' '+;\n");
|
||||
// Tokens: 012345678901234567
|
||||
// Input: x = 3 * 0 + 2 * 0;
|
||||
String input = "x = 3 * 0 + 2 * 0;";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
|
||||
String result = tokens.getTokenStream().getText();
|
||||
String expecting = "x = 3 * 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
tokens.replace(4, 8, "0");
|
||||
stream.fill();
|
||||
// replace 3 * 0 with 0
|
||||
result = tokens.getText();
|
||||
expecting = "x = 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 17));
|
||||
expecting = "x = 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(4, 8));
|
||||
expecting = "0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 8));
|
||||
expecting = "x = 0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(12, 16));
|
||||
expecting = "2 * 0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
tokens.insertAfter(17, "// comment");
|
||||
result = tokens.getText(Interval.of(12, 18));
|
||||
expecting = "2 * 0;// comment";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 8));
|
||||
stream.fill();
|
||||
// try again after insert at end
|
||||
expecting = "x = 0";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
|
||||
@Test public void test2ReplaceMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, "x");
|
||||
tokens.replace(1, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "ayc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "_");
|
||||
tokens.replace(1, "x");
|
||||
tokens.replace(1, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "_ayc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, "x");
|
||||
tokens.delete(1);
|
||||
String result = tokens.getText();
|
||||
String expecting = "ac";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertInPriorReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, 2, "x");
|
||||
tokens.insertBefore(1, "0");
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<4>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@2,2:2='c',<5>,1:2]:\"x\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testInsertThenReplaceSameIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "0");
|
||||
tokens.replace(0, "x");
|
||||
stream.fill();
|
||||
// supercedes insert at 0
|
||||
String result = tokens.getText();
|
||||
String expecting = "0xbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(1, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "ayxbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertThenReplaceIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
tokens.replace(0, "z");
|
||||
String result = tokens.getText();
|
||||
String expecting = "yxzbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abyx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertThenReplaceLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.replace(2, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abyx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, "x");
|
||||
tokens.insertAfter(2, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abxy";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abyxba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertBefore(4, "y");
|
||||
stream.fill(); // no effect; within range of a replace
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<5>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"x\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertAfter(4, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abxyba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceAll() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, 6, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "x";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceSubsetThenFetch() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "xyz");
|
||||
String result = tokens.getText(Interval.of(0, 6));
|
||||
String expecting = "abxyzba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenReplaceSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "xyz");
|
||||
tokens.replace(3, 5, "foo");
|
||||
stream.fill();
|
||||
// overlaps, error
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<5>,1:3]..[@5,5:5='b',<4>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "xyz");
|
||||
tokens.replace(1, 3, "foo");
|
||||
stream.fill();
|
||||
// overlap, error
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@3,3:3='c',<5>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 2, "xyz");
|
||||
tokens.replace(0, 3, "foo");
|
||||
String result = tokens.getText();
|
||||
String expecting = "fooa";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "yxabc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombine3Inserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
tokens.insertBefore(1, "z");
|
||||
String result = tokens.getText();
|
||||
String expecting = "yazxbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, 2, "foo");
|
||||
tokens.insertBefore(0, "z");
|
||||
stream.fill();
|
||||
// combine with left edge of rewrite
|
||||
String result = tokens.getText();
|
||||
String expecting = "zfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.delete(0, 2);
|
||||
tokens.insertBefore(0, "z");
|
||||
stream.fill();
|
||||
// combine with left edge of rewrite
|
||||
String result = tokens.getText();
|
||||
String expecting = "z";
|
||||
stream.fill();
|
||||
// make sure combo is not znull
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDisjointInserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.insertBefore(0, "z");
|
||||
String result = tokens.getText();
|
||||
String expecting = "zaxbyc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(0, 3, "bar");
|
||||
stream.fill();
|
||||
// wipes prior nested replace
|
||||
String result = tokens.getText();
|
||||
String expecting = "bar";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, 3, "bar");
|
||||
tokens.replace(1, 2, "foo");
|
||||
stream.fill();
|
||||
// cannot split earlier replace
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@2,2:2='c',<5>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@3,3:3='c',<5>,1:3]:\"bar\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace3() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(0, 2, "bar");
|
||||
stream.fill();
|
||||
// wipes prior nested replace
|
||||
String result = tokens.getText();
|
||||
String expecting = "barc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace4() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(1, 3, "bar");
|
||||
stream.fill();
|
||||
// wipes prior nested replace
|
||||
String result = tokens.getText();
|
||||
String expecting = "abar";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDropIdenticalReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(1, 2, "foo");
|
||||
stream.fill();
|
||||
// drop previous, identical
|
||||
String result = tokens.getText();
|
||||
String expecting = "afooc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDropPrevCoveredInsert() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "foo");
|
||||
tokens.replace(1, 2, "foo");
|
||||
stream.fill();
|
||||
// kill prev insert
|
||||
String result = tokens.getText();
|
||||
String expecting = "afoofoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testLeaveAloneDisjointInsert() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.replace(2, 3, "foo");
|
||||
String result = tokens.getText();
|
||||
String expecting = "axbfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testLeaveAloneDisjointInsert2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 3, "foo");
|
||||
tokens.insertBefore(1, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "axbfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.delete(2);
|
||||
String result = tokens.getText();
|
||||
String expecting = "aby";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
}
|
|
@ -136,7 +136,7 @@ public class TestTokenTypeAssignment extends BaseTest {
|
|||
"A : 'a' ;\n" +
|
||||
"B : '}' ;\n"+
|
||||
"WS : (' '|'\\n') {skip();} ;";
|
||||
String found = execParser("P.g", grammar, "PParser", "PLexer",
|
||||
String found = execParser("P.g4", grammar, "PParser", "PLexer",
|
||||
"a", "a}", false);
|
||||
assertEquals("a}\n", found);
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ public class TestTokenTypeAssignment extends BaseTest {
|
|||
"A : 'a' ;\n" +
|
||||
"B : '}' ;\n"+
|
||||
"WS : (' '|'\\n') {skip();} ;";
|
||||
String found = execParser("P.g", grammar, "PParser", "PLexer",
|
||||
String found = execParser("P.g4", grammar, "PParser", "PLexer",
|
||||
"a", "a}", false);
|
||||
assertEquals("a}\n", found);
|
||||
}
|
||||
|
|
|
@ -8,37 +8,37 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"grammar A;\n" +
|
||||
"",
|
||||
// YIELDS
|
||||
"error(63): A.g::: grammar A has no rules\n",
|
||||
"error(99): A.g4::: grammar A has no rules\n",
|
||||
|
||||
"A;",
|
||||
"error(17): <string>:1:0: 'A' came as a complete surprise to me\n",
|
||||
"error(50): <string>:1:0: 'A' came as a complete surprise to me\n",
|
||||
|
||||
"grammar ;",
|
||||
"error(17): <string>:1:8: ';' came as a complete surprise to me while looking for an identifier\n",
|
||||
"error(50): <string>:1:8: ';' came as a complete surprise to me while looking for an identifier\n",
|
||||
|
||||
"grammar A\n" +
|
||||
"a : ID ;\n",
|
||||
"error(17): <string>:2:0: missing SEMI at 'a'\n",
|
||||
"error(50): <string>:2:0: missing SEMI at 'a'\n",
|
||||
|
||||
"grammar A;\n" +
|
||||
"a : ID ;;\n"+
|
||||
"b : B ;",
|
||||
"error(17): A.g:2:8: ';' came as a complete surprise to me\n",
|
||||
"error(50): A.g4:2:8: ';' came as a complete surprise to me\n",
|
||||
|
||||
"grammar A;;\n" +
|
||||
"a : ID ;\n",
|
||||
"error(17): A;.g:1:10: ';' came as a complete surprise to me\n",
|
||||
"error(50): A;.g4:1:10: ';' came as a complete surprise to me\n",
|
||||
|
||||
"grammar A;\n" +
|
||||
"a @init : ID ;\n",
|
||||
"error(17): A.g:2:8: mismatched input ':' expecting ACTION while matching rule preamble\n",
|
||||
"error(50): A.g4:2:8: mismatched input ':' expecting ACTION while matching rule preamble\n",
|
||||
|
||||
"grammar A;\n" +
|
||||
"a ( A | B ) D ;\n" +
|
||||
"b : B ;",
|
||||
": A.g:2:3: '(' came as a complete surprise to me while matching rule preamble\n" +
|
||||
": A.g:2:11: mismatched input ')' expecting SEMI while matching a rule\n" +
|
||||
": A.g:2:15: mismatched input ';' expecting COLON while matching a lexer rule\n",
|
||||
"error(50): A.g4:2:3: '(' came as a complete surprise to me while matching rule preamble\n" +
|
||||
"error(50): A.g4:2:11: mismatched input ')' expecting SEMI while matching a rule\n" +
|
||||
"error(50): A.g4:2:15: mismatched input ';' expecting COLON while matching a lexer rule\n",
|
||||
};
|
||||
|
||||
@Test public void testA() { super.testErrors(A, true); }
|
||||
|
@ -48,7 +48,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"grammar A;\n" +
|
||||
"a : : A ;\n" +
|
||||
"b : B ;",
|
||||
"error(17): A.g:2:4: ':' came as a complete surprise to me while matching alternative\n",
|
||||
"error(50): A.g4:2:4: ':' came as a complete surprise to me while matching alternative\n",
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"grammar A;\n" +
|
||||
"a : A \n" +
|
||||
"b : B ;",
|
||||
"error(17): A.g:3:0: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n",
|
||||
"error(50): A.g4:3:0: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n",
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"lexer grammar A;\n" +
|
||||
"A : 'a' \n" +
|
||||
"B : 'b' ;",
|
||||
"error(17): A.g:3:0: unterminated rule (missing ';') detected at 'B :' while looking for lexer rule element\n",
|
||||
"error(50): A.g4:3:0: unterminated rule (missing ';') detected at 'B :' while looking for lexer rule element\n",
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"grammar A;\n" +
|
||||
"a : A \n" +
|
||||
"b[int i] returns [int y] : B ;",
|
||||
"error(17): A.g:3:9: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n"
|
||||
"error(50): A.g4:3:9: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
" catch [Exception e] {...}\n" +
|
||||
"b : B ;\n",
|
||||
|
||||
"error(17): A.g:2:4: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n"
|
||||
"error(50): A.g4:2:4: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"a : A \n" +
|
||||
" catch [Exception e] {...}\n",
|
||||
|
||||
"error(17): A.g:2:4: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n"
|
||||
"error(50): A.g4:2:4: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"a @ options {k=1;} : A ;\n" +
|
||||
"b : B ;",
|
||||
|
||||
"error(17): A.g:2:4: 'options {' came as a complete surprise to me while looking for an identifier\n"
|
||||
"error(50): A.g4:2:4: 'options {' came as a complete surprise to me while looking for an identifier\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"a } : A ;\n" +
|
||||
"b : B ;",
|
||||
|
||||
"error(17): A.g:2:2: '}' came as a complete surprise to me while matching rule preamble\n"
|
||||
"error(50): A.g4:2:2: '}' came as a complete surprise to me while matching rule preamble\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -135,8 +135,8 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"mode foo;\n" +
|
||||
"b : B ;",
|
||||
|
||||
": A.g:4:0: 'b' came as a complete surprise to me\n" +
|
||||
": A.g:4:6: mismatched input ';' expecting COLON while matching a lexer rule\n"
|
||||
"error(50): A.g4:4:0: 'b' came as a complete surprise to me\n" +
|
||||
"error(50): A.g4:4:6: mismatched input ';' expecting COLON while matching a lexer rule\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
|
|
@ -87,12 +87,12 @@ public class TestTopologicalSort extends BaseTest {
|
|||
@Test
|
||||
public void testSimpleTokenDependence() throws Exception {
|
||||
Graph g = new Graph();
|
||||
g.addEdge("Java.g", "MyJava.tokens"); // Java feeds off manual token file
|
||||
g.addEdge("Java.tokens", "Java.g");
|
||||
g.addEdge("Def.g", "Java.tokens"); // walkers feed off generated tokens
|
||||
g.addEdge("Ref.g", "Java.tokens");
|
||||
g.addEdge("Java.g4", "MyJava.tokens"); // Java feeds off manual token file
|
||||
g.addEdge("Java.tokens", "Java.g4");
|
||||
g.addEdge("Def.g4", "Java.tokens"); // walkers feed off generated tokens
|
||||
g.addEdge("Ref.g4", "Java.tokens");
|
||||
|
||||
String expecting = "[MyJava.tokens, Java.g, Java.tokens, Ref.g, Def.g]";
|
||||
String expecting = "[MyJava.tokens, Java.g4, Java.tokens, Ref.g4, Def.g4]";
|
||||
List nodes = g.sort();
|
||||
String result = nodes.toString();
|
||||
assertEquals(expecting, result);
|
||||
|
@ -101,12 +101,12 @@ public class TestTopologicalSort extends BaseTest {
|
|||
@Test
|
||||
public void testParserLexerCombo() throws Exception {
|
||||
Graph g = new Graph();
|
||||
g.addEdge("JavaLexer.tokens", "JavaLexer.g");
|
||||
g.addEdge("JavaParser.g", "JavaLexer.tokens");
|
||||
g.addEdge("Def.g", "JavaLexer.tokens");
|
||||
g.addEdge("Ref.g", "JavaLexer.tokens");
|
||||
g.addEdge("JavaLexer.tokens", "JavaLexer.g4");
|
||||
g.addEdge("JavaParser.g4", "JavaLexer.tokens");
|
||||
g.addEdge("Def.g4", "JavaLexer.tokens");
|
||||
g.addEdge("Ref.g4", "JavaLexer.tokens");
|
||||
|
||||
String expecting = "[JavaLexer.g, JavaLexer.tokens, JavaParser.g, Ref.g, Def.g]";
|
||||
String expecting = "[JavaLexer.g4, JavaLexer.tokens, JavaParser.g4, Ref.g4, Def.g4]";
|
||||
List nodes = g.sort();
|
||||
String result = nodes.toString();
|
||||
assertEquals(expecting, result);
|
||||
|
|
Loading…
Reference in New Issue