forked from jasder/antlr
Merge branch 'master' into stack-graphs-integration
This commit is contained in:
commit
08e3ddefc0
|
@ -23,17 +23,17 @@
|
|||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.abego</groupId>
|
||||
<artifactId>treelayout.core</artifactId>
|
||||
<version>1.0</version>
|
||||
<scope>system</scope>
|
||||
<systemPath>${project.basedir}/lib/org.abego.treelayout.core.jar</systemPath>
|
||||
<groupId>org.abego.treelayout</groupId>
|
||||
<artifactId>org.abego.treelayout.core</artifactId>
|
||||
<version>1.0.1</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
||||
<sourceDirectory>src</sourceDirectory>
|
||||
<resources/>
|
||||
|
||||
<plugins>
|
||||
<plugin>
|
||||
|
|
|
@ -165,7 +165,7 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
// If already recovering, don't try to sync
|
||||
if ( errorRecoveryMode ) return;
|
||||
|
||||
SymbolStream<Token> tokens = recognizer.getInputStream();
|
||||
TokenStream tokens = recognizer.getInputStream();
|
||||
int la = tokens.LA(1);
|
||||
|
||||
// try cheaper subset first; might get lucky. seems to shave a wee bit off
|
||||
|
@ -201,7 +201,7 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
NoViableAltException e)
|
||||
throws RecognitionException
|
||||
{
|
||||
SymbolStream<Token> tokens = recognizer.getInputStream();
|
||||
TokenStream tokens = recognizer.getInputStream();
|
||||
String input;
|
||||
if (tokens instanceof TokenStream) {
|
||||
if ( e.startToken.getType()==Token.EOF ) input = "<EOF>";
|
||||
|
|
|
@ -41,10 +41,10 @@ public class NoViableAltException extends RecognitionException {
|
|||
* not be buffering tokens so get a reference to it. (At the
|
||||
* time the error occurred, of course the stream needs to keep a
|
||||
* buffer all of the tokens but later we might not have access to those.)
|
||||
*/
|
||||
*/
|
||||
public Token startToken;
|
||||
|
||||
public <Symbol extends Token> NoViableAltException(Parser recognizer) { // LL(1) error
|
||||
public NoViableAltException(Parser recognizer) { // LL(1) error
|
||||
this(recognizer,recognizer.getInputStream(),
|
||||
recognizer.getCurrentToken(),
|
||||
recognizer.getCurrentToken(),
|
||||
|
@ -52,12 +52,12 @@ public class NoViableAltException extends RecognitionException {
|
|||
recognizer._ctx);
|
||||
}
|
||||
|
||||
public <Symbol> NoViableAltException(Parser recognizer,
|
||||
SymbolStream<Symbol> input,
|
||||
Token startToken,
|
||||
Token offendingToken,
|
||||
ATNConfigSet deadEndConfigs,
|
||||
ParserRuleContext<?> ctx)
|
||||
public NoViableAltException(Parser recognizer,
|
||||
TokenStream input,
|
||||
Token startToken,
|
||||
Token offendingToken,
|
||||
ATNConfigSet deadEndConfigs,
|
||||
ParserRuleContext<?> ctx)
|
||||
{
|
||||
super(recognizer, input, ctx);
|
||||
this.deadEndConfigs = deadEndConfigs;
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2011 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
/** A stream of either tokens or tree nodes */
|
||||
public interface SymbolStream<T> extends IntStream {
|
||||
/** Get the symbol at absolute index i; 0..n-1.
|
||||
* This is only valid if the underlying stream implementation buffers
|
||||
* all of the incoming objects.
|
||||
*
|
||||
* @throws UnsupportedOperationException if the index {@code i} is outside
|
||||
* the marked region and the stream does not support accessing symbols by
|
||||
* index outside of marked regions.
|
||||
*/
|
||||
public T get(int i);
|
||||
|
||||
/** Get symbol at current input pointer + {@code k} ahead where {@code k=1}
|
||||
* is next symbol. k<0 indicates objects in the past. So -1 is previous
|
||||
* Object and -2 is two Objects ago. {@code LT(0)} is undefined. For i>=n,
|
||||
* return an object representing EOF. Return {@code null} for {@code LT(0)}
|
||||
* and any index that results in an absolute index that is negative.
|
||||
*/
|
||||
T LT(int k);
|
||||
}
|
|
@ -32,7 +32,7 @@ package org.antlr.v4.runtime;
|
|||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
/** A stream of tokens accessing tokens from a TokenSource */
|
||||
public interface TokenStream extends SymbolStream<Token> {
|
||||
public interface TokenStream extends IntStream {
|
||||
/** Get Token at current input pointer + i ahead where i=1 is next Token.
|
||||
* i<0 indicates tokens in the past. So -1 is previous token and -2 is
|
||||
* two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken.
|
||||
|
@ -40,7 +40,6 @@ public interface TokenStream extends SymbolStream<Token> {
|
|||
* that is negative.
|
||||
* TODO (Sam): Throw exception for invalid k?
|
||||
*/
|
||||
@Override
|
||||
public Token LT(int k);
|
||||
|
||||
/** Get a token at an absolute index i; 0..n-1. This is really only
|
||||
|
@ -50,7 +49,6 @@ public interface TokenStream extends SymbolStream<Token> {
|
|||
* I believe DebugTokenStream can easily be altered to not use
|
||||
* this method, removing the dependency.
|
||||
*/
|
||||
@Override
|
||||
public Token get(int i);
|
||||
|
||||
/** Where is this stream pulling tokens from? This is not the name, but
|
||||
|
@ -67,6 +65,8 @@ public interface TokenStream extends SymbolStream<Token> {
|
|||
*/
|
||||
public String getText(Interval interval);
|
||||
|
||||
public String getText();
|
||||
|
||||
public String getText(RuleContext ctx);
|
||||
|
||||
/** Because the user is not required to use a token with an index stored
|
||||
|
|
|
@ -0,0 +1,557 @@
|
|||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/** Useful for rewriting out a buffered input token stream after doing some
|
||||
* augmentation or other manipulations on it.
|
||||
*
|
||||
* You can insert stuff, replace, and delete chunks. Note that the
|
||||
* operations are done lazily--only if you convert the buffer to a
|
||||
* String with getText(). This is very efficient because you are not moving
|
||||
* data around all the time. As the buffer of tokens is converted to strings,
|
||||
* the getText() method(s) scan the input token stream and check
|
||||
* to see if there is an operation at the current index.
|
||||
* If so, the operation is done and then normal String
|
||||
* rendering continues on the buffer. This is like having multiple Turing
|
||||
* machine instruction streams (programs) operating on a single input tape. :)
|
||||
*
|
||||
* This rewriter makes no modifications to the token stream. It does not
|
||||
* ask the stream to fill itself up nor does it advance the input cursor.
|
||||
* The token stream index() will return the same value before and after
|
||||
* any getText() call.
|
||||
*
|
||||
* The rewriter only works on tokens that you have in the buffer and
|
||||
* ignores the current input cursor. If you are buffering tokens on-demand,
|
||||
* calling getText() halfway through the input will only do rewrites
|
||||
* for those tokens in the first half of the file.
|
||||
*
|
||||
* Since the operations are done lazily at getText-time, operations do not
|
||||
* screw up the token index values. That is, an insert operation at token
|
||||
* index i does not change the index values for tokens i+1..n-1.
|
||||
*
|
||||
* Because operations never actually alter the buffer, you may always get
|
||||
* the original token stream back without undoing anything. Since
|
||||
* the instructions are queued up, you can easily simulate transactions and
|
||||
* roll back any changes if there is an error just by removing instructions.
|
||||
* For example,
|
||||
*
|
||||
* CharStream input = new ANTLRFileStream("input");
|
||||
* TLexer lex = new TLexer(input);
|
||||
* CommonTokenStream tokens = new CommonTokenStream(lex);
|
||||
* T parser = new T(tokens);
|
||||
* TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
|
||||
* parser.startRule();
|
||||
*
|
||||
* Then in the rules, you can execute (assuming rewriter is visible):
|
||||
* Token t,u;
|
||||
* ...
|
||||
* rewriter.insertAfter(t, "text to put after t");}
|
||||
* rewriter.insertAfter(u, "text after u");}
|
||||
* System.out.println(tokens.toString());
|
||||
*
|
||||
* You can also have multiple "instruction streams" and get multiple
|
||||
* rewrites from a single pass over the input. Just name the instruction
|
||||
* streams and use that name again when printing the buffer. This could be
|
||||
* useful for generating a C file and also its header file--all from the
|
||||
* same buffer:
|
||||
*
|
||||
* tokens.insertAfter("pass1", t, "text to put after t");}
|
||||
* tokens.insertAfter("pass2", u, "text after u");}
|
||||
* System.out.println(tokens.toString("pass1"));
|
||||
* System.out.println(tokens.toString("pass2"));
|
||||
*
|
||||
* If you don't use named rewrite streams, a "default" stream is used as
|
||||
* the first example shows.
|
||||
*/
|
||||
public class TokenStreamRewriter {
|
||||
public static final String DEFAULT_PROGRAM_NAME = "default";
|
||||
public static final int PROGRAM_INIT_SIZE = 100;
|
||||
public static final int MIN_TOKEN_INDEX = 0;
|
||||
|
||||
// Define the rewrite operation hierarchy
|
||||
|
||||
public class RewriteOperation {
|
||||
/** What index into rewrites List are we? */
|
||||
protected int instructionIndex;
|
||||
/** Token buffer index. */
|
||||
protected int index;
|
||||
protected Object text;
|
||||
|
||||
protected RewriteOperation(int index) {
|
||||
this.index = index;
|
||||
}
|
||||
|
||||
protected RewriteOperation(int index, Object text) {
|
||||
this.index = index;
|
||||
this.text = text;
|
||||
}
|
||||
/** Execute the rewrite operation by possibly adding to the buffer.
|
||||
* Return the index of the next token to operate on.
|
||||
*/
|
||||
public int execute(StringBuilder buf) {
|
||||
return index;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
String opName = getClass().getName();
|
||||
int $index = opName.indexOf('$');
|
||||
opName = opName.substring($index+1, opName.length());
|
||||
return "<"+opName+"@"+tokens.get(index)+
|
||||
":\""+text+"\">";
|
||||
}
|
||||
}
|
||||
|
||||
class InsertBeforeOp extends RewriteOperation {
|
||||
public InsertBeforeOp(int index, Object text) {
|
||||
super(index,text);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int execute(StringBuilder buf) {
|
||||
buf.append(text);
|
||||
if ( tokens.get(index).getType()!=Token.EOF ) {
|
||||
buf.append(tokens.get(index).getText());
|
||||
}
|
||||
return index+1;
|
||||
}
|
||||
}
|
||||
|
||||
/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
|
||||
* instructions.
|
||||
*/
|
||||
class ReplaceOp extends RewriteOperation {
|
||||
protected int lastIndex;
|
||||
public ReplaceOp(int from, int to, Object text) {
|
||||
super(from,text);
|
||||
lastIndex = to;
|
||||
}
|
||||
@Override
|
||||
public int execute(StringBuilder buf) {
|
||||
if ( text!=null ) {
|
||||
buf.append(text);
|
||||
}
|
||||
return lastIndex+1;
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
if ( text==null ) {
|
||||
return "<DeleteOp@"+tokens.get(index)+
|
||||
".."+tokens.get(lastIndex)+">";
|
||||
}
|
||||
return "<ReplaceOp@"+tokens.get(index)+
|
||||
".."+tokens.get(lastIndex)+":\""+text+"\">";
|
||||
}
|
||||
}
|
||||
|
||||
/** Our source stream */
|
||||
protected final TokenStream tokens;
|
||||
|
||||
/** You may have multiple, named streams of rewrite operations.
|
||||
* I'm calling these things "programs."
|
||||
* Maps String (name) -> rewrite (List)
|
||||
*/
|
||||
protected final Map<String, List<RewriteOperation>> programs;
|
||||
|
||||
/** Map String (program name) -> Integer index */
|
||||
protected final Map<String, Integer> lastRewriteTokenIndexes;
|
||||
|
||||
public TokenStreamRewriter(TokenStream tokens) {
|
||||
this.tokens = tokens;
|
||||
programs = new HashMap<String, List<RewriteOperation>>();
|
||||
programs.put(DEFAULT_PROGRAM_NAME,
|
||||
new ArrayList<RewriteOperation>(PROGRAM_INIT_SIZE));
|
||||
lastRewriteTokenIndexes = new HashMap<String, Integer>();
|
||||
}
|
||||
|
||||
public final TokenStream getTokenStream() {
|
||||
return tokens;
|
||||
}
|
||||
|
||||
public void rollback(int instructionIndex) {
|
||||
rollback(DEFAULT_PROGRAM_NAME, instructionIndex);
|
||||
}
|
||||
|
||||
/** Rollback the instruction stream for a program so that
|
||||
* the indicated instruction (via instructionIndex) is no
|
||||
* longer in the stream. UNTESTED!
|
||||
*/
|
||||
public void rollback(String programName, int instructionIndex) {
|
||||
List<RewriteOperation> is = programs.get(programName);
|
||||
if ( is!=null ) {
|
||||
programs.put(programName, is.subList(MIN_TOKEN_INDEX,instructionIndex));
|
||||
}
|
||||
}
|
||||
|
||||
public void deleteProgram() {
|
||||
deleteProgram(DEFAULT_PROGRAM_NAME);
|
||||
}
|
||||
|
||||
/** Reset the program so that no instructions exist */
|
||||
public void deleteProgram(String programName) {
|
||||
rollback(programName, MIN_TOKEN_INDEX);
|
||||
}
|
||||
|
||||
public void insertAfter(Token t, Object text) {
|
||||
insertAfter(DEFAULT_PROGRAM_NAME, t, text);
|
||||
}
|
||||
|
||||
public void insertAfter(int index, Object text) {
|
||||
insertAfter(DEFAULT_PROGRAM_NAME, index, text);
|
||||
}
|
||||
|
||||
public void insertAfter(String programName, Token t, Object text) {
|
||||
insertAfter(programName,t.getTokenIndex(), text);
|
||||
}
|
||||
|
||||
public void insertAfter(String programName, int index, Object text) {
|
||||
// to insert after, just insert before next index (even if past end)
|
||||
insertBefore(programName,index+1, text);
|
||||
}
|
||||
|
||||
public void insertBefore(Token t, Object text) {
|
||||
insertBefore(DEFAULT_PROGRAM_NAME, t, text);
|
||||
}
|
||||
|
||||
public void insertBefore(int index, Object text) {
|
||||
insertBefore(DEFAULT_PROGRAM_NAME, index, text);
|
||||
}
|
||||
|
||||
public void insertBefore(String programName, Token t, Object text) {
|
||||
insertBefore(programName, t.getTokenIndex(), text);
|
||||
}
|
||||
|
||||
public void insertBefore(String programName, int index, Object text) {
|
||||
RewriteOperation op = new InsertBeforeOp(index,text);
|
||||
List<RewriteOperation> rewrites = getProgram(programName);
|
||||
op.instructionIndex = rewrites.size();
|
||||
rewrites.add(op);
|
||||
}
|
||||
|
||||
public void replace(int index, Object text) {
|
||||
replace(DEFAULT_PROGRAM_NAME, index, index, text);
|
||||
}
|
||||
|
||||
public void replace(int from, int to, Object text) {
|
||||
replace(DEFAULT_PROGRAM_NAME, from, to, text);
|
||||
}
|
||||
|
||||
public void replace(Token indexT, Object text) {
|
||||
replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text);
|
||||
}
|
||||
|
||||
public void replace(Token from, Token to, Object text) {
|
||||
replace(DEFAULT_PROGRAM_NAME, from, to, text);
|
||||
}
|
||||
|
||||
public void replace(String programName, int from, int to, @Nullable Object text) {
|
||||
if ( from > to || from<0 || to<0 || to >= tokens.size() ) {
|
||||
throw new IllegalArgumentException("replace: range invalid: "+from+".."+to+"(size="+tokens.size()+")");
|
||||
}
|
||||
RewriteOperation op = new ReplaceOp(from, to, text);
|
||||
List<RewriteOperation> rewrites = getProgram(programName);
|
||||
op.instructionIndex = rewrites.size();
|
||||
rewrites.add(op);
|
||||
}
|
||||
|
||||
public void replace(String programName, Token from, Token to, @Nullable Object text) {
|
||||
replace(programName,
|
||||
from.getTokenIndex(),
|
||||
to.getTokenIndex(),
|
||||
text);
|
||||
}
|
||||
|
||||
public void delete(int index) {
|
||||
delete(DEFAULT_PROGRAM_NAME, index, index);
|
||||
}
|
||||
|
||||
public void delete(int from, int to) {
|
||||
delete(DEFAULT_PROGRAM_NAME, from, to);
|
||||
}
|
||||
|
||||
public void delete(Token indexT) {
|
||||
delete(DEFAULT_PROGRAM_NAME, indexT, indexT);
|
||||
}
|
||||
|
||||
public void delete(Token from, Token to) {
|
||||
delete(DEFAULT_PROGRAM_NAME, from, to);
|
||||
}
|
||||
|
||||
public void delete(String programName, int from, int to) {
|
||||
replace(programName,from,to,null);
|
||||
}
|
||||
|
||||
public void delete(String programName, Token from, Token to) {
|
||||
replace(programName,from,to,null);
|
||||
}
|
||||
|
||||
public int getLastRewriteTokenIndex() {
|
||||
return getLastRewriteTokenIndex(DEFAULT_PROGRAM_NAME);
|
||||
}
|
||||
|
||||
protected int getLastRewriteTokenIndex(String programName) {
|
||||
Integer I = lastRewriteTokenIndexes.get(programName);
|
||||
if ( I==null ) {
|
||||
return -1;
|
||||
}
|
||||
return I;
|
||||
}
|
||||
|
||||
protected void setLastRewriteTokenIndex(String programName, int i) {
|
||||
lastRewriteTokenIndexes.put(programName, i);
|
||||
}
|
||||
|
||||
protected List<RewriteOperation> getProgram(String name) {
|
||||
List<RewriteOperation> is = programs.get(name);
|
||||
if ( is==null ) {
|
||||
is = initializeProgram(name);
|
||||
}
|
||||
return is;
|
||||
}
|
||||
|
||||
private List<RewriteOperation> initializeProgram(String name) {
|
||||
List<RewriteOperation> is = new ArrayList<RewriteOperation>(PROGRAM_INIT_SIZE);
|
||||
programs.put(name, is);
|
||||
return is;
|
||||
}
|
||||
|
||||
/** Return the text from the original tokens altered per the
|
||||
* instructions given to this rewriter.
|
||||
*/
|
||||
public String getText() {
|
||||
return getText(DEFAULT_PROGRAM_NAME, Interval.of(0,tokens.size()-1));
|
||||
}
|
||||
|
||||
/** Return the text associated with the tokens in the interval from the
|
||||
* original token stream but with the alterations given to this rewriter.
|
||||
* The interval refers to the indexes in the original token stream.
|
||||
* We do not alter the token stream in any way, so the indexes
|
||||
* and intervals are still consistent. Includes any operations done
|
||||
* to the first and last token in the interval. So, if you did an
|
||||
* insertBefore on the first token, you would get that insertion.
|
||||
* The same is true if you do an insertAfter the stop token.
|
||||
*/
|
||||
public String getText(Interval interval) {
|
||||
return getText(DEFAULT_PROGRAM_NAME, interval);
|
||||
}
|
||||
|
||||
public String getText(String programName, Interval interval) {
|
||||
List<RewriteOperation> rewrites = programs.get(programName);
|
||||
int start = interval.a;
|
||||
int stop = interval.b;
|
||||
|
||||
// ensure start/end are in range
|
||||
if ( stop>tokens.size()-1 ) stop = tokens.size()-1;
|
||||
if ( start<0 ) start = 0;
|
||||
|
||||
if ( rewrites==null || rewrites.isEmpty() ) {
|
||||
return tokens.getText(interval); // no instructions to execute
|
||||
}
|
||||
StringBuilder buf = new StringBuilder();
|
||||
|
||||
// First, optimize instruction stream
|
||||
Map<Integer, RewriteOperation> indexToOp = reduceToSingleOperationPerIndex(rewrites);
|
||||
|
||||
// Walk buffer, executing instructions and emitting tokens
|
||||
int i = start;
|
||||
while ( i <= stop && i < tokens.size() ) {
|
||||
RewriteOperation op = indexToOp.get(i);
|
||||
indexToOp.remove(i); // remove so any left have index size-1
|
||||
Token t = tokens.get(i);
|
||||
if ( op==null ) {
|
||||
// no operation at that index, just dump token
|
||||
if ( t.getType()!=Token.EOF ) buf.append(t.getText());
|
||||
i++; // move to next token
|
||||
}
|
||||
else {
|
||||
i = op.execute(buf); // execute operation and skip
|
||||
}
|
||||
}
|
||||
|
||||
// include stuff after end if it's last index in buffer
|
||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||
// foo if end==lastValidIndex.
|
||||
if ( stop==tokens.size()-1 ) {
|
||||
// Scan any remaining operations after last token
|
||||
// should be included (they will be inserts).
|
||||
for (RewriteOperation op : indexToOp.values()) {
|
||||
if ( op.index >= tokens.size()-1 ) buf.append(op.text);
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/** We need to combine operations and report invalid operations (like
|
||||
* overlapping replaces that are not completed nested). Inserts to
|
||||
* same index need to be combined etc... Here are the cases:
|
||||
*
|
||||
* I.i.u I.j.v leave alone, nonoverlapping
|
||||
* I.i.u I.i.v combine: Iivu
|
||||
*
|
||||
* R.i-j.u R.x-y.v | i-j in x-y delete first R
|
||||
* R.i-j.u R.i-j.v delete first R
|
||||
* R.i-j.u R.x-y.v | x-y in i-j ERROR
|
||||
* R.i-j.u R.x-y.v | boundaries overlap ERROR
|
||||
*
|
||||
* Delete special case of replace (text==null):
|
||||
* D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
*
|
||||
* I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
|
||||
* we're not deleting i)
|
||||
* I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
|
||||
* R.x-y.v I.i.u | i in x-y ERROR
|
||||
* R.x-y.v I.x.u R.x-y.uv (combine, delete I)
|
||||
* R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
|
||||
*
|
||||
* I.i.u = insert u before op @ index i
|
||||
* R.x-y.u = replace x-y indexed tokens with u
|
||||
*
|
||||
* First we need to examine replaces. For any replace op:
|
||||
*
|
||||
* 1. wipe out any insertions before op within that range.
|
||||
* 2. Drop any replace op before that is contained completely within
|
||||
* that range.
|
||||
* 3. Throw exception upon boundary overlap with any previous replace.
|
||||
*
|
||||
* Then we can deal with inserts:
|
||||
*
|
||||
* 1. for any inserts to same index, combine even if not adjacent.
|
||||
* 2. for any prior replace with same left boundary, combine this
|
||||
* insert with replace and delete this replace.
|
||||
* 3. throw exception if index in same range as previous replace
|
||||
*
|
||||
* Don't actually delete; make op null in list. Easier to walk list.
|
||||
* Later we can throw as we add to index -> op map.
|
||||
*
|
||||
* Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
* inserted stuff would be before the replace range. But, if you
|
||||
* add tokens in front of a method body '{' and then delete the method
|
||||
* body, I think the stuff before the '{' you added should disappear too.
|
||||
*
|
||||
* Return a map from token index to operation.
|
||||
*/
|
||||
protected Map<Integer, RewriteOperation> reduceToSingleOperationPerIndex(List<RewriteOperation> rewrites) {
|
||||
// System.out.println("rewrites="+rewrites);
|
||||
|
||||
// WALK REPLACES
|
||||
for (int i = 0; i < rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
if ( op==null ) continue;
|
||||
if ( !(op instanceof ReplaceOp) ) continue;
|
||||
ReplaceOp rop = (ReplaceOp)rewrites.get(i);
|
||||
// Wipe prior inserts within range
|
||||
List<? extends InsertBeforeOp> inserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
|
||||
for (InsertBeforeOp iop : inserts) {
|
||||
if ( iop.index == rop.index ) {
|
||||
// E.g., insert before 2, delete 2..2; update replace
|
||||
// text to include insert before, kill insert
|
||||
rewrites.set(iop.instructionIndex, null);
|
||||
rop.text = iop.text.toString() + (rop.text!=null?rop.text.toString():"");
|
||||
}
|
||||
else if ( iop.index > rop.index && iop.index <= rop.lastIndex ) {
|
||||
// delete insert as it's a no-op.
|
||||
rewrites.set(iop.instructionIndex, null);
|
||||
}
|
||||
}
|
||||
// Drop any prior replaces contained within
|
||||
List<? extends ReplaceOp> prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
|
||||
for (ReplaceOp prevRop : prevReplaces) {
|
||||
if ( prevRop.index>=rop.index && prevRop.lastIndex <= rop.lastIndex ) {
|
||||
// delete replace as it's a no-op.
|
||||
rewrites.set(prevRop.instructionIndex, null);
|
||||
continue;
|
||||
}
|
||||
// throw exception unless disjoint or identical
|
||||
boolean disjoint =
|
||||
prevRop.lastIndex<rop.index || prevRop.index > rop.lastIndex;
|
||||
boolean same =
|
||||
prevRop.index==rop.index && prevRop.lastIndex==rop.lastIndex;
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
if ( prevRop.text==null && rop.text==null && !disjoint ) {
|
||||
//System.out.println("overlapping deletes: "+prevRop+", "+rop);
|
||||
rewrites.set(prevRop.instructionIndex, null); // kill first delete
|
||||
rop.index = Math.min(prevRop.index, rop.index);
|
||||
rop.lastIndex = Math.max(prevRop.lastIndex, rop.lastIndex);
|
||||
System.out.println("new rop "+rop);
|
||||
}
|
||||
else if ( !disjoint && !same ) {
|
||||
throw new IllegalArgumentException("replace op boundaries of "+rop+
|
||||
" overlap with previous "+prevRop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WALK INSERTS
|
||||
for (int i = 0; i < rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
if ( op==null ) continue;
|
||||
if ( !(op instanceof InsertBeforeOp) ) continue;
|
||||
InsertBeforeOp iop = (InsertBeforeOp)rewrites.get(i);
|
||||
// combine current insert with prior if any at same index
|
||||
List<? extends InsertBeforeOp> prevInserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
|
||||
for (InsertBeforeOp prevIop : prevInserts) {
|
||||
if ( prevIop.index == iop.index ) { // combine objects
|
||||
// convert to strings...we're in process of toString'ing
|
||||
// whole token buffer so no lazy eval issue with any templates
|
||||
iop.text = catOpText(iop.text,prevIop.text);
|
||||
// delete redundant prior insert
|
||||
rewrites.set(prevIop.instructionIndex, null);
|
||||
}
|
||||
}
|
||||
// look for replaces where iop.index is in range; error
|
||||
List<? extends ReplaceOp> prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
|
||||
for (ReplaceOp rop : prevReplaces) {
|
||||
if ( iop.index == rop.index ) {
|
||||
rop.text = catOpText(iop.text,rop.text);
|
||||
rewrites.set(i, null); // delete current insert
|
||||
continue;
|
||||
}
|
||||
if ( iop.index >= rop.index && iop.index <= rop.lastIndex ) {
|
||||
throw new IllegalArgumentException("insert op "+iop+
|
||||
" within boundaries of previous "+rop);
|
||||
}
|
||||
}
|
||||
}
|
||||
// System.out.println("rewrites after="+rewrites);
|
||||
Map<Integer, RewriteOperation> m = new HashMap<Integer, RewriteOperation>();
|
||||
for (int i = 0; i < rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
if ( op==null ) continue; // ignore deleted ops
|
||||
if ( m.get(op.index)!=null ) {
|
||||
throw new Error("should only be one op per index");
|
||||
}
|
||||
m.put(op.index, op);
|
||||
}
|
||||
//System.out.println("index to op: "+m);
|
||||
return m;
|
||||
}
|
||||
|
||||
protected String catOpText(Object a, Object b) {
|
||||
String x = "";
|
||||
String y = "";
|
||||
if ( a!=null ) x = a.toString();
|
||||
if ( b!=null ) y = b.toString();
|
||||
return x+y;
|
||||
}
|
||||
|
||||
/** Get all operations before an index of a particular kind */
|
||||
protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind, int before) {
|
||||
List<T> ops = new ArrayList<T>();
|
||||
for (int i=0; i<before && i<rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
if ( op==null ) continue; // ignore deleted
|
||||
if ( kind.isInstance(op) ) {
|
||||
ops.add((T)op);
|
||||
}
|
||||
}
|
||||
return ops;
|
||||
}
|
||||
|
||||
}
|
|
@ -98,6 +98,11 @@ public class UnbufferedTokenStream<T extends Token>
|
|||
return buf.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText() {
|
||||
return getText(Interval.of(0,index()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText(RuleContext ctx) {
|
||||
return getText(ctx.getSourceInterval());
|
||||
|
|
|
@ -34,8 +34,8 @@ import org.antlr.v4.runtime.NoViableAltException;
|
|||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.ParserRuleContext;
|
||||
import org.antlr.v4.runtime.RuleContext;
|
||||
import org.antlr.v4.runtime.SymbolStream;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.dfa.DFAState;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
@ -286,7 +286,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
public void reset() {
|
||||
}
|
||||
|
||||
public int adaptivePredict(@NotNull SymbolStream<? extends Symbol> input, int decision,
|
||||
public int adaptivePredict(@NotNull TokenStream input, int decision,
|
||||
@Nullable ParserRuleContext<?> outerContext)
|
||||
{
|
||||
predict_calls++;
|
||||
|
@ -312,7 +312,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
}
|
||||
}
|
||||
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull SymbolStream<? extends Symbol> input,
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||
@Nullable ParserRuleContext<?> outerContext)
|
||||
{
|
||||
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
||||
|
@ -349,7 +349,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
}
|
||||
|
||||
public int execDFA(@NotNull DFA dfa, @NotNull DFAState s0,
|
||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
@Nullable ParserRuleContext<?> outerContext)
|
||||
{
|
||||
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
||||
|
@ -510,7 +510,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
|
||||
*/
|
||||
public int execATN(@NotNull DFA dfa, @NotNull DFAState s0,
|
||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
ParserRuleContext<?> outerContext)
|
||||
{
|
||||
if ( debug ) System.out.println("execATN decision "+dfa.decision+" exec LA(1)=="+ getLookaheadName(input));
|
||||
|
@ -644,7 +644,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
public ATNConfigSet execATNWithFullContext(DFA dfa,
|
||||
DFAState D, // how far we got before failing over
|
||||
@NotNull ATNConfigSet s0,
|
||||
@NotNull SymbolStream<? extends Symbol> input, int startIndex,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
ParserRuleContext<?> outerContext,
|
||||
int nalts,
|
||||
boolean greedy)
|
||||
|
@ -1324,7 +1324,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
return String.valueOf(t);
|
||||
}
|
||||
|
||||
public String getLookaheadName(SymbolStream<? extends Symbol> input) {
|
||||
public String getLookaheadName(TokenStream input) {
|
||||
return getTokenName(input.LA(1));
|
||||
}
|
||||
|
||||
|
@ -1349,7 +1349,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
|
|||
}
|
||||
|
||||
@NotNull
|
||||
public NoViableAltException noViableAlt(@NotNull SymbolStream<? extends Symbol> input,
|
||||
public NoViableAltException noViableAlt(@NotNull TokenStream input,
|
||||
@NotNull ParserRuleContext<?> outerContext,
|
||||
@NotNull ATNConfigSet configs,
|
||||
int startIndex)
|
||||
|
|
|
@ -74,6 +74,7 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
public class Tool {
|
||||
public String VERSION = "4.0-"+new Date();
|
||||
|
@ -166,8 +167,7 @@ public class Tool {
|
|||
public ErrorManager errMgr = new ErrorManager(this);
|
||||
public LogManager logMgr = new LogManager();
|
||||
|
||||
List<ANTLRToolListener> listeners =
|
||||
Collections.synchronizedList(new ArrayList<ANTLRToolListener>());
|
||||
List<ANTLRToolListener> listeners = new CopyOnWriteArrayList<ANTLRToolListener>();
|
||||
|
||||
/** Track separately so if someone adds a listener, it's the only one
|
||||
* instead of it and the default stderr listener.
|
||||
|
|
|
@ -31,6 +31,7 @@ package org.antlr.v4.semantics;
|
|||
|
||||
import org.antlr.v4.analysis.LeftRecursiveRuleTransformer;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.tool.*;
|
||||
import org.antlr.v4.tool.ast.GrammarAST;
|
||||
|
||||
|
@ -116,7 +117,7 @@ public class SemanticPipeline {
|
|||
}
|
||||
else {
|
||||
assignTokenTypes(g, collector.tokensDefs,
|
||||
collector.tokenIDRefs, collector.strings);
|
||||
collector.tokenIDRefs, collector.terminals);
|
||||
}
|
||||
|
||||
// CHECK RULE REFS NOW (that we've defined rules in grammar)
|
||||
|
@ -163,7 +164,7 @@ public class SemanticPipeline {
|
|||
}
|
||||
|
||||
void assignTokenTypes(Grammar g, List<GrammarAST> tokensDefs,
|
||||
List<GrammarAST> tokenIDs, Set<String> strings)
|
||||
List<GrammarAST> tokenIDs, List<GrammarAST> terminals)
|
||||
{
|
||||
//Grammar G = g.getOutermostGrammar(); // put in root, even if imported
|
||||
|
||||
|
@ -174,6 +175,9 @@ public class SemanticPipeline {
|
|||
String lit = alias.getChild(1).getText();
|
||||
g.defineTokenAlias(name, lit);
|
||||
}
|
||||
else {
|
||||
g.defineTokenName(alias.getText());
|
||||
}
|
||||
}
|
||||
|
||||
// DEFINE TOKEN TYPES FOR X : 'x' ; RULES
|
||||
|
@ -187,10 +191,25 @@ public class SemanticPipeline {
|
|||
*/
|
||||
|
||||
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
|
||||
for (GrammarAST idAST : tokenIDs) { g.defineTokenName(idAST.getText()); }
|
||||
for (GrammarAST idAST : tokenIDs) {
|
||||
if (g.getTokenType(idAST.getText()) == Token.INVALID_TYPE) {
|
||||
g.tool.errMgr.grammarError(ErrorType.IMPLICIT_TOKEN_DEFINITION, g.fileName, idAST.token, idAST.getText());
|
||||
}
|
||||
|
||||
g.defineTokenName(idAST.getText());
|
||||
}
|
||||
|
||||
// VERIFY TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
|
||||
for (GrammarAST termAST : terminals) {
|
||||
if (termAST.getType() != ANTLRParser.STRING_LITERAL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (g.getTokenType(termAST.getText()) == Token.INVALID_TYPE) {
|
||||
g.tool.errMgr.grammarError(ErrorType.IMPLICIT_STRING_DEFINITION, g.fileName, termAST.token, termAST.getText());
|
||||
}
|
||||
}
|
||||
|
||||
// DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
|
||||
for (String s : strings) { g.defineStringLiteral(s); }
|
||||
g.tool.log("semantics", "tokens="+g.tokenNameToTypeMap);
|
||||
g.tool.log("semantics", "strings="+g.stringLiteralToTypeMap);
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ public class ErrorManager {
|
|||
locationValid = true;
|
||||
}
|
||||
|
||||
messageFormatST.add("id", msg.errorType.ordinal());
|
||||
messageFormatST.add("id", msg.errorType.code);
|
||||
messageFormatST.add("text", messageST);
|
||||
|
||||
if (locationValid) reportST.add("location", locationST);
|
||||
|
|
|
@ -144,6 +144,9 @@ public enum ErrorType {
|
|||
RULE_WITH_TOO_FEW_ALT_LABELS(122, "rule <arg>: must label all alternatives or none", ErrorSeverity.ERROR),
|
||||
ALT_LABEL_REDEF(123, "rule alt label <arg> redefined in rule <arg2>, originally in <arg3>", ErrorSeverity.ERROR),
|
||||
ALT_LABEL_CONFLICTS_WITH_RULE(124, "rule alt label <arg> conflicts with rule <arg2>", ErrorSeverity.ERROR),
|
||||
IMPLICIT_TOKEN_DEFINITION(125, "implicit definition of token <arg> in parser", ErrorSeverity.WARNING),
|
||||
IMPLICIT_STRING_DEFINITION(126, "cannot create implicit token for string literal <arg> in non-combined grammar", ErrorSeverity.ERROR),
|
||||
|
||||
/** Documentation comment is unterminated */
|
||||
//UNTERMINATED_DOC_COMMENT(, "", ErrorSeverity.ERROR),
|
||||
|
||||
|
@ -185,16 +188,18 @@ public enum ErrorType {
|
|||
|
||||
;
|
||||
|
||||
public String msg;
|
||||
public int code; // unique, deterministic unchanging error code once we release
|
||||
public ErrorSeverity severity;
|
||||
public Boolean abortsAnalysis;
|
||||
public Boolean abortsCodegen;
|
||||
public final String msg;
|
||||
public final int code; // unique, deterministic unchanging error code once we release
|
||||
public final ErrorSeverity severity;
|
||||
public final Boolean abortsAnalysis;
|
||||
public final Boolean abortsCodegen;
|
||||
|
||||
ErrorType(int code, String msg, ErrorSeverity severity) {
|
||||
this.code = code;
|
||||
this.msg = msg;
|
||||
this.severity = severity;
|
||||
this.abortsAnalysis = false;
|
||||
this.abortsCodegen = false;
|
||||
}
|
||||
|
||||
// ErrorType(String msg, ErrorSeverity severity, boolean abortsAnalysis) {
|
||||
|
|
|
@ -30,8 +30,14 @@
|
|||
package org.antlr.v4.tool.interp;
|
||||
|
||||
import org.antlr.v4.Tool;
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.ParserRuleContext;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNState;
|
||||
import org.antlr.v4.runtime.atn.DecisionState;
|
||||
import org.antlr.v4.runtime.atn.ParserATNSimulator;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
@ -80,14 +86,14 @@ public class ParserInterpreter {
|
|||
atnSimulator = new ParserATNSimulator<Token>(new DummyParser(g, input), g.atn);
|
||||
}
|
||||
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull SymbolStream<Token> input,
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||
@Nullable ParserRuleContext outerContext,
|
||||
boolean useContext)
|
||||
{
|
||||
return atnSimulator.predictATN(dfa, input, outerContext);
|
||||
}
|
||||
|
||||
public int adaptivePredict(@NotNull SymbolStream<Token> input, int decision,
|
||||
public int adaptivePredict(@NotNull TokenStream input, int decision,
|
||||
@Nullable ParserRuleContext outerContext)
|
||||
{
|
||||
return atnSimulator.adaptivePredict(input, decision, outerContext);
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.antlr.v4.runtime.misc.Interval;
|
|||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
import org.antlr.v4.semantics.SemanticPipeline;
|
||||
import org.antlr.v4.tool.ANTLRMessage;
|
||||
import org.antlr.v4.tool.DefaultToolListener;
|
||||
import org.antlr.v4.tool.DOTGenerator;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.GrammarSemanticsMessage;
|
||||
|
@ -383,7 +384,7 @@ public abstract class BaseTest {
|
|||
|
||||
|
||||
/** Return true if all is ok, no errors */
|
||||
protected boolean antlr(String fileName, String grammarFileName, String grammarStr, String... extraOptions) {
|
||||
protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean defaultListener, String... extraOptions) {
|
||||
boolean allIsWell = true;
|
||||
System.out.println("dir "+tmpdir);
|
||||
mkdir(tmpdir);
|
||||
|
@ -401,9 +402,12 @@ public abstract class BaseTest {
|
|||
ErrorQueue equeue = new ErrorQueue();
|
||||
Tool antlr = newTool(optionsA);
|
||||
antlr.addListener(equeue);
|
||||
if (defaultListener) {
|
||||
antlr.addListener(new DefaultToolListener(antlr));
|
||||
}
|
||||
antlr.processGrammarsOnCommandLine();
|
||||
if ( equeue.errors.size()>0 ) {
|
||||
allIsWell = false;
|
||||
allIsWell = equeue.errors.isEmpty();
|
||||
if ( !defaultListener && !equeue.errors.isEmpty() ) {
|
||||
System.err.println("antlr reports errors from "+options);
|
||||
for (int i = 0; i < equeue.errors.size(); i++) {
|
||||
ANTLRMessage msg = equeue.errors.get(i);
|
||||
|
@ -413,6 +417,13 @@ public abstract class BaseTest {
|
|||
System.out.println(grammarStr);
|
||||
System.out.println("###");
|
||||
}
|
||||
if ( !defaultListener && !equeue.warnings.isEmpty() ) {
|
||||
System.err.println("antlr reports warnings from "+options);
|
||||
for (int i = 0; i < equeue.warnings.size(); i++) {
|
||||
ANTLRMessage msg = equeue.warnings.get(i);
|
||||
System.err.println(msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
allIsWell = false;
|
||||
|
@ -478,10 +489,24 @@ public abstract class BaseTest {
|
|||
@Nullable String parserName,
|
||||
String lexerName,
|
||||
String... extraOptions)
|
||||
{
|
||||
return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions);
|
||||
}
|
||||
|
||||
/** Return true if all is well */
|
||||
protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
|
||||
String grammarStr,
|
||||
@Nullable String parserName,
|
||||
String lexerName,
|
||||
boolean defaultListener,
|
||||
String... extraOptions)
|
||||
{
|
||||
boolean allIsWell =
|
||||
antlr(grammarFileName, grammarFileName, grammarStr, extraOptions);
|
||||
boolean ok;
|
||||
antlr(grammarFileName, grammarFileName, grammarStr, defaultListener, extraOptions);
|
||||
if (!allIsWell) {
|
||||
return false;
|
||||
}
|
||||
|
||||
List<String> files = new ArrayList<String>();
|
||||
if ( lexerName!=null ) {
|
||||
files.add(lexerName+".java");
|
||||
|
@ -499,8 +524,7 @@ public abstract class BaseTest {
|
|||
files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseParseListener.java");
|
||||
}
|
||||
}
|
||||
ok = compile(files.toArray(new String[files.size()]));
|
||||
if ( !ok ) { allIsWell = false; }
|
||||
allIsWell = compile(files.toArray(new String[files.size()]));
|
||||
return allIsWell;
|
||||
}
|
||||
|
||||
|
@ -1097,6 +1121,11 @@ public abstract class BaseTest {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText() {
|
||||
throw new UnsupportedOperationException("can't give strings");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getText(Interval interval) {
|
||||
throw new UnsupportedOperationException("can't give strings");
|
||||
|
|
|
@ -360,14 +360,14 @@ public class TestATNConstruction extends BaseTest {
|
|||
@Test public void testNestedAstar() throws Exception {
|
||||
Grammar g = new Grammar(
|
||||
"parser grammar P;\n"+
|
||||
"a : (',' ID*)*;");
|
||||
"a : (COMMA ID*)*;");
|
||||
String expecting =
|
||||
"RuleStart_a_0->StarLoopEntry_13\n" +
|
||||
"StarLoopEntry_13->StarBlockStart_11\n" +
|
||||
"StarLoopEntry_13->s14\n" +
|
||||
"StarBlockStart_11->s2\n" +
|
||||
"s14->RuleStop_a_1\n" +
|
||||
"s2-','->StarLoopEntry_8\n" +
|
||||
"s2-COMMA->StarLoopEntry_8\n" +
|
||||
"RuleStop_a_1-EOF->s16\n" +
|
||||
"StarLoopEntry_8->StarBlockStart_6\n" +
|
||||
"StarLoopEntry_8->s9\n" +
|
||||
|
|
|
@ -9,6 +9,7 @@ public class TestAttributeChecks extends BaseTest {
|
|||
String attributeTemplate =
|
||||
"parser grammar A;\n"+
|
||||
"@members {<members>}\n" +
|
||||
"tokens{ID;}\n" +
|
||||
"a[int x] returns [int y]\n" +
|
||||
"@init {<init>}\n" +
|
||||
" : id=ID ids+=ID lab=b[34] {\n" +
|
||||
|
@ -24,8 +25,8 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"d : ;\n";
|
||||
|
||||
String[] membersChecks = {
|
||||
"$a", "error(29): A.g4:2:11: unknown attribute reference a in $a\n",
|
||||
"$a.y", "error(29): A.g4:2:11: unknown attribute reference a in $a.y\n",
|
||||
"$a", "error(63): A.g4:2:11: unknown attribute reference a in $a\n",
|
||||
"$a.y", "error(63): A.g4:2:11: unknown attribute reference a in $a.y\n",
|
||||
};
|
||||
|
||||
String[] initChecks = {
|
||||
|
@ -36,8 +37,8 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"$lab.e", "",
|
||||
"$ids", "",
|
||||
|
||||
"$c", "error(29): A.g4:4:8: unknown attribute reference c in $c\n",
|
||||
"$a.q", "error(31): A.g4:4:10: unknown attribute q for rule a in $a.q\n",
|
||||
"$c", "error(63): A.g4:5:8: unknown attribute reference c in $c\n",
|
||||
"$a.q", "error(65): A.g4:5:10: unknown attribute q for rule a in $a.q\n",
|
||||
};
|
||||
|
||||
String[] inlineChecks = {
|
||||
|
@ -58,19 +59,19 @@ public class TestAttributeChecks extends BaseTest {
|
|||
};
|
||||
|
||||
String[] bad_inlineChecks = {
|
||||
"$lab", "error(33): A.g4:6:4: missing attribute access on rule reference lab in $lab\n",
|
||||
"$q", "error(29): A.g4:6:4: unknown attribute reference q in $q\n",
|
||||
"$q.y", "error(29): A.g4:6:4: unknown attribute reference q in $q.y\n",
|
||||
"$q = 3", "error(29): A.g4:6:4: unknown attribute reference q in $q\n",
|
||||
"$q = 3;", "error(29): A.g4:6:4: unknown attribute reference q in $q = 3;\n",
|
||||
"$q.y = 3;", "error(29): A.g4:6:4: unknown attribute reference q in $q.y = 3;\n",
|
||||
"$q = $blort;", "error(29): A.g4:6:4: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(29): A.g4:6:9: unknown attribute reference blort in $blort\n",
|
||||
"$a.ick", "error(31): A.g4:6:6: unknown attribute ick for rule a in $a.ick\n",
|
||||
"$a.ick = 3;", "error(31): A.g4:6:6: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||
"$b.d", "error(30): A.g4:6:6: cannot access rule d's parameter: $b.d\n", // can't see rule ref's arg
|
||||
"$d.text", "error(29): A.g4:6:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref
|
||||
"$lab.d", "error(30): A.g4:6:8: cannot access rule d's parameter: $lab.d\n",
|
||||
"$lab", "error(67): A.g4:7:4: missing attribute access on rule reference lab in $lab\n",
|
||||
"$q", "error(63): A.g4:7:4: unknown attribute reference q in $q\n",
|
||||
"$q.y", "error(63): A.g4:7:4: unknown attribute reference q in $q.y\n",
|
||||
"$q = 3", "error(63): A.g4:7:4: unknown attribute reference q in $q\n",
|
||||
"$q = 3;", "error(63): A.g4:7:4: unknown attribute reference q in $q = 3;\n",
|
||||
"$q.y = 3;", "error(63): A.g4:7:4: unknown attribute reference q in $q.y = 3;\n",
|
||||
"$q = $blort;", "error(63): A.g4:7:4: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(63): A.g4:7:9: unknown attribute reference blort in $blort\n",
|
||||
"$a.ick", "error(65): A.g4:7:6: unknown attribute ick for rule a in $a.ick\n",
|
||||
"$a.ick = 3;", "error(65): A.g4:7:6: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||
"$b.d", "error(64): A.g4:7:6: cannot access rule d's parameter: $b.d\n", // can't see rule ref's arg
|
||||
"$d.text", "error(63): A.g4:7:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref
|
||||
"$lab.d", "error(64): A.g4:7:8: cannot access rule d's parameter: $lab.d\n",
|
||||
};
|
||||
|
||||
String[] finallyChecks = {
|
||||
|
@ -84,20 +85,20 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"$id.text", "",
|
||||
"$ids", "",
|
||||
|
||||
"$lab", "error(33): A.g4:9:14: missing attribute access on rule reference lab in $lab\n",
|
||||
"$q", "error(29): A.g4:9:14: unknown attribute reference q in $q\n",
|
||||
"$q.y", "error(29): A.g4:9:14: unknown attribute reference q in $q.y\n",
|
||||
"$q = 3", "error(29): A.g4:9:14: unknown attribute reference q in $q\n",
|
||||
"$q = 3;", "error(29): A.g4:9:14: unknown attribute reference q in $q = 3;\n",
|
||||
"$q.y = 3;", "error(29): A.g4:9:14: unknown attribute reference q in $q.y = 3;\n",
|
||||
"$q = $blort;", "error(29): A.g4:9:14: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(29): A.g4:9:19: unknown attribute reference blort in $blort\n",
|
||||
"$a.ick", "error(31): A.g4:9:16: unknown attribute ick for rule a in $a.ick\n",
|
||||
"$a.ick = 3;", "error(31): A.g4:9:16: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||
"$b.e", "error(29): A.g4:9:14: unknown attribute reference b in $b.e\n", // can't see rule refs outside alts
|
||||
"$b.d", "error(29): A.g4:9:14: unknown attribute reference b in $b.d\n",
|
||||
"$c.text", "error(29): A.g4:9:14: unknown attribute reference c in $c.text\n",
|
||||
"$lab.d", "error(30): A.g4:9:18: cannot access rule d's parameter: $lab.d\n",
|
||||
"$lab", "error(67): A.g4:10:14: missing attribute access on rule reference lab in $lab\n",
|
||||
"$q", "error(63): A.g4:10:14: unknown attribute reference q in $q\n",
|
||||
"$q.y", "error(63): A.g4:10:14: unknown attribute reference q in $q.y\n",
|
||||
"$q = 3", "error(63): A.g4:10:14: unknown attribute reference q in $q\n",
|
||||
"$q = 3;", "error(63): A.g4:10:14: unknown attribute reference q in $q = 3;\n",
|
||||
"$q.y = 3;", "error(63): A.g4:10:14: unknown attribute reference q in $q.y = 3;\n",
|
||||
"$q = $blort;", "error(63): A.g4:10:14: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(63): A.g4:10:19: unknown attribute reference blort in $blort\n",
|
||||
"$a.ick", "error(65): A.g4:10:16: unknown attribute ick for rule a in $a.ick\n",
|
||||
"$a.ick = 3;", "error(65): A.g4:10:16: unknown attribute ick for rule a in $a.ick = 3;\n",
|
||||
"$b.e", "error(63): A.g4:10:14: unknown attribute reference b in $b.e\n", // can't see rule refs outside alts
|
||||
"$b.d", "error(63): A.g4:10:14: unknown attribute reference b in $b.d\n",
|
||||
"$c.text", "error(63): A.g4:10:14: unknown attribute reference c in $c.text\n",
|
||||
"$lab.d", "error(64): A.g4:10:18: cannot access rule d's parameter: $lab.d\n",
|
||||
};
|
||||
|
||||
String[] dynMembersChecks = {
|
||||
|
@ -200,6 +201,7 @@ public class TestAttributeChecks extends BaseTest {
|
|||
@Test public void testTokenRef() throws RecognitionException {
|
||||
String grammar =
|
||||
"parser grammar S;\n" +
|
||||
"tokens{ID;}\n" +
|
||||
"a : x=ID {Token t = $x; t = $ID;} ;\n";
|
||||
String expected =
|
||||
"";
|
||||
|
|
|
@ -37,6 +37,7 @@ public class TestBasicSemanticErrors extends BaseTest {
|
|||
"parser grammar U;\n" +
|
||||
"options { foo=bar; k=\"3\";}\n" +
|
||||
"tokens {\n" +
|
||||
" ID;\n" +
|
||||
" f='fkj';\n" +
|
||||
" S = 'a';\n" +
|
||||
"}\n" +
|
||||
|
@ -50,18 +51,18 @@ public class TestBasicSemanticErrors extends BaseTest {
|
|||
"b : ( options { ick=bar; greedy=true; } : ID )+ ;\n" +
|
||||
"c : ID<blue> ID<x=y> ;",
|
||||
// YIELDS
|
||||
"warning(48): U.g4:2:10: illegal option foo\n" +
|
||||
"warning(48): U.g4:2:19: illegal option k\n" +
|
||||
"error(26): U.g4:4:8: token names must start with an uppercase letter: f\n" +
|
||||
"error(25): U.g4:4:8: can't assign string value to token name f in non-combined grammar\n" +
|
||||
"error(25): U.g4:5:8: can't assign string value to token name S in non-combined grammar\n" +
|
||||
"warning(48): U.g4:8:10: illegal option x\n" +
|
||||
"error(20): U.g4:8:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||
"error(20): U.g4:7:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||
"warning(48): U.g4:11:10: illegal option blech\n" +
|
||||
"warning(48): U.g4:11:21: illegal option greedy\n" +
|
||||
"warning(48): U.g4:14:16: illegal option ick\n" +
|
||||
"warning(48): U.g4:15:16: illegal option x\n",
|
||||
"warning(83): U.g4:2:10: illegal option foo\n" +
|
||||
"warning(83): U.g4:2:19: illegal option k\n" +
|
||||
"error(60): U.g4:5:8: token names must start with an uppercase letter: f\n" +
|
||||
"error(59): U.g4:5:8: can't assign string value to token name f in non-combined grammar\n" +
|
||||
"error(59): U.g4:6:8: can't assign string value to token name S in non-combined grammar\n" +
|
||||
"warning(83): U.g4:9:10: illegal option x\n" +
|
||||
"error(54): U.g4:9:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||
"error(54): U.g4:8:0: repeated grammar prequel spec (option, token, or import); please merge\n" +
|
||||
"warning(83): U.g4:12:10: illegal option blech\n" +
|
||||
"warning(83): U.g4:12:21: illegal option greedy\n" +
|
||||
"warning(83): U.g4:15:16: illegal option ick\n" +
|
||||
"warning(83): U.g4:16:16: illegal option x\n",
|
||||
};
|
||||
|
||||
@Test public void testU() { super.testErrors(U, false); }
|
||||
|
|
|
@ -501,29 +501,34 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
ErrorQueue equeue = new ErrorQueue();
|
||||
String slave =
|
||||
"parser grammar T;\n" +
|
||||
"tokens{T;}\n" +
|
||||
"x : T ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "T.g4", slave);
|
||||
slave =
|
||||
"parser grammar S;\n" +
|
||||
"import T;\n" +
|
||||
"tokens{S;}\n" +
|
||||
"y : S ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "S.g4", slave);
|
||||
|
||||
slave =
|
||||
"parser grammar C;\n" +
|
||||
"tokens{C;}\n" +
|
||||
"i : C ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "C.g4", slave);
|
||||
slave =
|
||||
"parser grammar B;\n" +
|
||||
"tokens{B;}\n" +
|
||||
"j : B ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "B.g4", slave);
|
||||
slave =
|
||||
"parser grammar A;\n" +
|
||||
"import B,C;\n" +
|
||||
"tokens{A;}\n" +
|
||||
"k : A ;\n" ;
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "A.g4", slave);
|
||||
|
@ -531,12 +536,13 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
String master =
|
||||
"grammar M;\n" +
|
||||
"import S,A;\n" +
|
||||
"tokens{M;}\n" +
|
||||
"a : M ;\n" ;
|
||||
writeFile(tmpdir, "M.g4", master);
|
||||
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
|
||||
|
||||
assertEquals(equeue.errors.toString(), "[]");
|
||||
assertEquals(equeue.warnings.toString(), "[]");
|
||||
assertEquals("[]", equeue.errors.toString());
|
||||
assertEquals("[]", equeue.warnings.toString());
|
||||
String expectedTokenIDToTypeMap = "{EOF=-1, M=3, S=4, T=5, A=6, B=7, C=8}";
|
||||
String expectedStringLiteralToTypeMap = "{}";
|
||||
String expectedTypeToTokenList = "[M, S, T, A, B, C]";
|
||||
|
@ -653,7 +659,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
"s : a ;\n" +
|
||||
"B : 'b' ;" + // defines B from inherited token space
|
||||
"WS : (' '|'\\n') {skip();} ;\n" ;
|
||||
boolean ok = antlr("M.g4", "M.g4", master);
|
||||
boolean ok = antlr("M.g4", "M.g4", master, false);
|
||||
boolean expecting = true; // should be ok
|
||||
assertEquals(expecting, ok);
|
||||
}
|
||||
|
|
|
@ -433,7 +433,7 @@ public class TestPerformance extends BaseTest {
|
|||
extraOptions.add("-atn");
|
||||
}
|
||||
String[] extraOptionsArray = extraOptions.toArray(new String[extraOptions.size()]);
|
||||
boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, "JavaParser", "JavaLexer", extraOptionsArray);
|
||||
boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, "JavaParser", "JavaLexer", true, extraOptionsArray);
|
||||
assertTrue(success);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,9 +20,9 @@ public class TestSymbolIssues extends BaseTest {
|
|||
"\n" +
|
||||
"ID : 'a'..'z'+ ID ;",
|
||||
// YIELDS
|
||||
"warning(48): A.g4:2:10: illegal option opt\n" +
|
||||
"warning(48): A.g4:2:21: illegal option k\n" +
|
||||
"error(59): A.g4:7:1: redefinition of header action\n" +
|
||||
"warning(83): A.g4:2:10: illegal option opt\n" +
|
||||
"warning(83): A.g4:2:21: illegal option k\n" +
|
||||
"error(94): A.g4:7:1: redefinition of header action\n" +
|
||||
"warning(51): A.g4:2:10: illegal option opt\n" +
|
||||
"error(19): A.g4:11:0: rule a redefinition\n" +
|
||||
"error(60): A.g4:5:1: redefinition of members action\n" +
|
||||
|
@ -34,7 +34,7 @@ public class TestSymbolIssues extends BaseTest {
|
|||
static String[] B = {
|
||||
// INPUT
|
||||
"parser grammar B;\n" +
|
||||
"tokens { X='x'; Y; }\n" +
|
||||
"tokens { ID; FOO; X='x'; Y; }\n" +
|
||||
"\n" +
|
||||
"a : s=ID b+=ID X=ID '.' ;\n" +
|
||||
"\n" +
|
||||
|
@ -42,16 +42,18 @@ public class TestSymbolIssues extends BaseTest {
|
|||
"\n" +
|
||||
"s : FOO ;",
|
||||
// YIELDS
|
||||
"error(25): B.g4:2:9: can't assign string value to token name X in non-combined grammar\n" +
|
||||
"error(35): B.g4:4:4: label s conflicts with rule with same name\n" +
|
||||
"error(35): B.g4:4:9: label b conflicts with rule with same name\n" +
|
||||
"error(36): B.g4:4:15: label X conflicts with token with same name\n" +
|
||||
"error(40): B.g4:6:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n"
|
||||
"error(59): B.g4:2:18: can't assign string value to token name X in non-combined grammar\n" +
|
||||
"error(69): B.g4:4:4: label s conflicts with rule with same name\n" +
|
||||
"error(69): B.g4:4:9: label b conflicts with rule with same name\n" +
|
||||
"error(70): B.g4:4:15: label X conflicts with token with same name\n" +
|
||||
"error(75): B.g4:6:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n" +
|
||||
"error(126): B.g4:4:20: cannot create implicit token for string literal '.' in non-combined grammar\n"
|
||||
};
|
||||
|
||||
static String[] D = {
|
||||
// INPUT
|
||||
"parser grammar D;\n" +
|
||||
"tokens{ID;}\n" +
|
||||
"a[int j] \n" +
|
||||
" : i=ID j=ID ;\n" +
|
||||
"\n" +
|
||||
|
@ -61,8 +63,8 @@ public class TestSymbolIssues extends BaseTest {
|
|||
" : ID ;",
|
||||
|
||||
// YIELDS
|
||||
"error(37): D.g4:3:21: label j conflicts with rule a's return value or parameter with same name\n" +
|
||||
"error(41): D.g4:5:0: rule b's argument i conflicts a return value with same name\n"
|
||||
"error(72): D.g4:4:21: label j conflicts with rule a's return value or parameter with same name\n" +
|
||||
"error(76): D.g4:6:0: rule b's argument i conflicts a return value with same name\n"
|
||||
};
|
||||
|
||||
static String[] E = {
|
||||
|
@ -78,10 +80,10 @@ public class TestSymbolIssues extends BaseTest {
|
|||
"a : A ;\n",
|
||||
|
||||
// YIELDS
|
||||
"error(73): E.g4:4:8: cannot redefine B; token name already defined\n" +
|
||||
"error(73): E.g4:5:4: cannot redefine C; token name already defined\n" +
|
||||
"error(73): E.g4:6:8: cannot redefine D; token name already defined\n" +
|
||||
"error(72): E.g4:7:8: cannot alias X='e'; string already assigned to E\n"
|
||||
"error(108): E.g4:4:8: cannot redefine B; token name already defined\n" +
|
||||
"error(108): E.g4:5:4: cannot redefine C; token name already defined\n" +
|
||||
"error(108): E.g4:6:8: cannot redefine D; token name already defined\n" +
|
||||
"error(107): E.g4:7:8: cannot alias X='e'; string already assigned to E\n"
|
||||
};
|
||||
|
||||
@Test public void testA() { super.testErrors(A, false); }
|
||||
|
|
|
@ -1,807 +0,0 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2011 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.runtime.TokenRewriteStream;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.interp.LexerInterpreter;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestTokenRewriteStream extends BaseTest {
|
||||
|
||||
/** Public default constructor used by TestRig */
|
||||
public TestTokenRewriteStream() {
|
||||
}
|
||||
|
||||
@Test public void testInsertBeforeIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
LexerInterpreter lexInterp = new LexerInterpreter(g, "abc");
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexInterp);
|
||||
tokens.insertBefore(0, "0");
|
||||
String result = tokens.toString();
|
||||
String expecting = "0abc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertAfterLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.insertAfter(2, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abcx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertAfter(1, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "axbxc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "xbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "axc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testToStringStartStop() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"ID : 'a'..'z'+;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"SEMI : ';';\n" +
|
||||
"MUL : '*';\n" +
|
||||
"ASSIGN : '=';\n" +
|
||||
"WS : ' '+;\n");
|
||||
// Tokens: 0123456789
|
||||
// Input: x = 3 * 0;
|
||||
String input = "x = 3 * 0;";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
|
||||
|
||||
String result = tokens.toOriginalString();
|
||||
String expecting = "x = 3 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.toString();
|
||||
expecting = "x = 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 9));
|
||||
expecting = "x = 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(4, 8));
|
||||
expecting = "0";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testToStringStartStop2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"ID : 'a'..'z'+;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"SEMI : ';';\n" +
|
||||
"ASSIGN : '=';\n" +
|
||||
"PLUS : '+';\n" +
|
||||
"MULT : '*';\n" +
|
||||
"WS : ' '+;\n");
|
||||
// Tokens: 012345678901234567
|
||||
// Input: x = 3 * 0 + 2 * 0;
|
||||
String input = "x = 3 * 0 + 2 * 0;";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
|
||||
String result = tokens.toOriginalString();
|
||||
String expecting = "x = 3 * 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
|
||||
result = tokens.toString();
|
||||
expecting = "x = 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 17));
|
||||
expecting = "x = 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(4, 8));
|
||||
expecting = "0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 8));
|
||||
expecting = "x = 0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(12, 16));
|
||||
expecting = "2 * 0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
tokens.insertAfter(17, "// comment");
|
||||
result = tokens.getText(Interval.of(12, 18));
|
||||
expecting = "2 * 0;// comment";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 8)); // try again after insert at end
|
||||
expecting = "x = 0";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
|
||||
@Test public void test2ReplaceMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, "x");
|
||||
tokens.replace(1, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "ayc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(0, "_");
|
||||
tokens.replace(1, "x");
|
||||
tokens.replace(1, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "_ayc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, "x");
|
||||
tokens.delete(1);
|
||||
String result = tokens.toString();
|
||||
String expecting = "ac";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertInPriorReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, 2, "x");
|
||||
tokens.insertBefore(1, "0");
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<4>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@2,2:2='c',<5>,1:2]:\"x\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testInsertThenReplaceSameIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(0, "0");
|
||||
tokens.replace(0, "x"); // supercedes insert at 0
|
||||
String result = tokens.toString();
|
||||
String expecting = "0xbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(1, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "ayxbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertThenReplaceIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(0, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
tokens.replace(0, "z");
|
||||
String result = tokens.toString();
|
||||
String expecting = "yxzbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abyx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertThenReplaceLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.replace(2, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abyx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, "x");
|
||||
tokens.insertAfter(2, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abxy";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abyxba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertBefore(4, "y"); // no effect; within range of a replace
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<5>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"x\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertAfter(4, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "abxyba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceAll() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, 6, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "x";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceSubsetThenFetch() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "xyz");
|
||||
String result = tokens.getText(Interval.of(0, 6));
|
||||
String expecting = "abxyzba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenReplaceSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "xyz");
|
||||
tokens.replace(3, 5, "foo"); // overlaps, error
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<5>,1:3]..[@5,5:5='b',<4>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 4, "xyz");
|
||||
tokens.replace(1, 3, "foo"); // overlap, error
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@3,3:3='c',<5>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 2, "xyz");
|
||||
tokens.replace(0, 3, "foo");
|
||||
String result = tokens.toString();
|
||||
String expecting = "fooa";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
// June 2, 2008 I rewrote core of rewrite engine; just adding lots more tests here
|
||||
|
||||
@Test public void testCombineInserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(0, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
String result = tokens.toString();
|
||||
String expecting = "yxabc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombine3Inserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
tokens.insertBefore(1, "z");
|
||||
String result = tokens.toString();
|
||||
String expecting = "yazxbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, 2, "foo");
|
||||
tokens.insertBefore(0, "z"); // combine with left edge of rewrite
|
||||
String result = tokens.toString();
|
||||
String expecting = "zfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.delete(0, 2);
|
||||
tokens.insertBefore(0, "z"); // combine with left edge of rewrite
|
||||
String result = tokens.toString();
|
||||
String expecting = "z"; // make sure combo is not znull
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDisjointInserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.insertBefore(0, "z");
|
||||
String result = tokens.toString();
|
||||
String expecting = "zaxbyc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(0, 3, "bar"); // wipes prior nested replace
|
||||
String result = tokens.toString();
|
||||
String expecting = "bar";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(0, 3, "bar");
|
||||
tokens.replace(1, 2, "foo"); // cannot split earlier replace
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.toString();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@2,2:2='c',<5>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@3,3:3='c',<5>,1:3]:\"bar\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace3() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(0, 2, "bar"); // wipes prior nested replace
|
||||
String result = tokens.toString();
|
||||
String expecting = "barc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace4() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(1, 3, "bar"); // wipes prior nested replace
|
||||
String result = tokens.toString();
|
||||
String expecting = "abar";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDropIdenticalReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(1, 2, "foo"); // drop previous, identical
|
||||
String result = tokens.toString();
|
||||
String expecting = "afooc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDropPrevCoveredInsert() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "foo");
|
||||
tokens.replace(1, 2, "foo"); // kill prev insert
|
||||
String result = tokens.toString();
|
||||
String expecting = "afoofoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testLeaveAloneDisjointInsert() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.replace(2, 3, "foo");
|
||||
String result = tokens.toString();
|
||||
String expecting = "axbfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testLeaveAloneDisjointInsert2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.replace(2, 3, "foo");
|
||||
tokens.insertBefore(1, "x");
|
||||
String result = tokens.toString();
|
||||
String expecting = "axbfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
|
||||
tokens.fill();
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.delete(2);
|
||||
String result = tokens.toString();
|
||||
String expecting = "aby";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,879 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2011 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.TokenStreamRewriter;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.interp.LexerInterpreter;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestTokenStreamRewriter extends BaseTest {
|
||||
|
||||
/** Public default constructor used by TestRig */
|
||||
public TestTokenStreamRewriter() {
|
||||
}
|
||||
|
||||
@Test public void testInsertBeforeIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, "abc");
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "0");
|
||||
String result = tokens.getText();
|
||||
String expecting = "0abc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertAfterLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertAfter(2, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abcx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertAfter(1, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "axbxc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "xbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "axc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testToStringStartStop() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"ID : 'a'..'z'+;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"SEMI : ';';\n" +
|
||||
"MUL : '*';\n" +
|
||||
"ASSIGN : '=';\n" +
|
||||
"WS : ' '+;\n");
|
||||
// Tokens: 0123456789
|
||||
// Input: x = 3 * 0;
|
||||
String input = "x = 3 * 0;";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(4, 8, "0");
|
||||
stream.fill();
|
||||
// replace 3 * 0 with 0
|
||||
|
||||
String result = tokens.getTokenStream().getText();
|
||||
String expecting = "x = 3 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText();
|
||||
expecting = "x = 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 9));
|
||||
expecting = "x = 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(4, 8));
|
||||
expecting = "0";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testToStringStartStop2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"ID : 'a'..'z'+;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"SEMI : ';';\n" +
|
||||
"ASSIGN : '=';\n" +
|
||||
"PLUS : '+';\n" +
|
||||
"MULT : '*';\n" +
|
||||
"WS : ' '+;\n");
|
||||
// Tokens: 012345678901234567
|
||||
// Input: x = 3 * 0 + 2 * 0;
|
||||
String input = "x = 3 * 0 + 2 * 0;";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
|
||||
String result = tokens.getTokenStream().getText();
|
||||
String expecting = "x = 3 * 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
tokens.replace(4, 8, "0");
|
||||
stream.fill();
|
||||
// replace 3 * 0 with 0
|
||||
result = tokens.getText();
|
||||
expecting = "x = 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 17));
|
||||
expecting = "x = 0 + 2 * 0;";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(4, 8));
|
||||
expecting = "0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 8));
|
||||
expecting = "x = 0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(12, 16));
|
||||
expecting = "2 * 0";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
tokens.insertAfter(17, "// comment");
|
||||
result = tokens.getText(Interval.of(12, 18));
|
||||
expecting = "2 * 0;// comment";
|
||||
assertEquals(expecting, result);
|
||||
|
||||
result = tokens.getText(Interval.of(0, 8));
|
||||
stream.fill();
|
||||
// try again after insert at end
|
||||
expecting = "x = 0";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
|
||||
@Test public void test2ReplaceMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, "x");
|
||||
tokens.replace(1, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "ayc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "_");
|
||||
tokens.replace(1, "x");
|
||||
tokens.replace(1, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "_ayc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, "x");
|
||||
tokens.delete(1);
|
||||
String result = tokens.getText();
|
||||
String expecting = "ac";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertInPriorReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, 2, "x");
|
||||
tokens.insertBefore(1, "0");
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<4>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@2,2:2='c',<5>,1:2]:\"x\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testInsertThenReplaceSameIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "0");
|
||||
tokens.replace(0, "x");
|
||||
stream.fill();
|
||||
// supercedes insert at 0
|
||||
String result = tokens.getText();
|
||||
String expecting = "0xbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertMiddleIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(1, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "ayxbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void test2InsertThenReplaceIndex0() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
tokens.replace(0, "z");
|
||||
String result = tokens.getText();
|
||||
String expecting = "yxzbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abyx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertThenReplaceLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.replace(2, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abyx";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, "x");
|
||||
tokens.insertAfter(2, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abxy";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abyxba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertBefore(4, "y");
|
||||
stream.fill(); // no effect; within range of a replace
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<5>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"x\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "x");
|
||||
tokens.insertAfter(4, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "abxyba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceAll() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, 6, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "x";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceSubsetThenFetch() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "xyz");
|
||||
String result = tokens.getText(Interval.of(0, 6));
|
||||
String expecting = "abxyzba";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenReplaceSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "xyz");
|
||||
tokens.replace(3, 5, "foo");
|
||||
stream.fill();
|
||||
// overlaps, error
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<5>,1:3]..[@5,5:5='b',<4>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcccba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 4, "xyz");
|
||||
tokens.replace(1, 3, "foo");
|
||||
stream.fill();
|
||||
// overlap, error
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@3,3:3='c',<5>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcba";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 2, "xyz");
|
||||
tokens.replace(0, 3, "foo");
|
||||
String result = tokens.getText();
|
||||
String expecting = "fooa";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(0, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
String result = tokens.getText();
|
||||
String expecting = "yxabc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombine3Inserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(0, "y");
|
||||
tokens.insertBefore(1, "z");
|
||||
String result = tokens.getText();
|
||||
String expecting = "yazxbc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, 2, "foo");
|
||||
tokens.insertBefore(0, "z");
|
||||
stream.fill();
|
||||
// combine with left edge of rewrite
|
||||
String result = tokens.getText();
|
||||
String expecting = "zfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.delete(0, 2);
|
||||
tokens.insertBefore(0, "z");
|
||||
stream.fill();
|
||||
// combine with left edge of rewrite
|
||||
String result = tokens.getText();
|
||||
String expecting = "z";
|
||||
stream.fill();
|
||||
// make sure combo is not znull
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDisjointInserts() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.insertBefore(0, "z");
|
||||
String result = tokens.getText();
|
||||
String expecting = "zaxbyc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(0, 3, "bar");
|
||||
stream.fill();
|
||||
// wipes prior nested replace
|
||||
String result = tokens.getText();
|
||||
String expecting = "bar";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(0, 3, "bar");
|
||||
tokens.replace(1, 2, "foo");
|
||||
stream.fill();
|
||||
// cannot split earlier replace
|
||||
Exception exc = null;
|
||||
try {
|
||||
tokens.getText();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
exc = iae;
|
||||
}
|
||||
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@2,2:2='c',<5>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@3,3:3='c',<5>,1:3]:\"bar\">";
|
||||
assertNotNull(exc);
|
||||
assertEquals(expecting, exc.getMessage());
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace3() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(0, 2, "bar");
|
||||
stream.fill();
|
||||
// wipes prior nested replace
|
||||
String result = tokens.getText();
|
||||
String expecting = "barc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testOverlappingReplace4() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(1, 3, "bar");
|
||||
stream.fill();
|
||||
// wipes prior nested replace
|
||||
String result = tokens.getText();
|
||||
String expecting = "abar";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDropIdenticalReplace() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(1, 2, "foo");
|
||||
tokens.replace(1, 2, "foo");
|
||||
stream.fill();
|
||||
// drop previous, identical
|
||||
String result = tokens.getText();
|
||||
String expecting = "afooc";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testDropPrevCoveredInsert() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "foo");
|
||||
tokens.replace(1, 2, "foo");
|
||||
stream.fill();
|
||||
// kill prev insert
|
||||
String result = tokens.getText();
|
||||
String expecting = "afoofoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testLeaveAloneDisjointInsert() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(1, "x");
|
||||
tokens.replace(2, 3, "foo");
|
||||
String result = tokens.getText();
|
||||
String expecting = "axbfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testLeaveAloneDisjointInsert2() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abcc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.replace(2, 3, "foo");
|
||||
tokens.insertBefore(1, "x");
|
||||
String result = tokens.getText();
|
||||
String expecting = "axbfoo";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
@Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
|
||||
LexerGrammar g = new LexerGrammar(
|
||||
"lexer grammar t;\n"+
|
||||
"A : 'a';\n" +
|
||||
"B : 'b';\n" +
|
||||
"C : 'c';\n");
|
||||
String input = "abc";
|
||||
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
|
||||
CommonTokenStream stream = new CommonTokenStream(lexEngine);
|
||||
stream.fill();
|
||||
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
|
||||
tokens.insertBefore(2, "y");
|
||||
tokens.delete(2);
|
||||
String result = tokens.getText();
|
||||
String expecting = "aby";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
}
|
|
@ -8,37 +8,37 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"grammar A;\n" +
|
||||
"",
|
||||
// YIELDS
|
||||
"error(64): A.g4::: grammar A has no rules\n",
|
||||
"error(99): A.g4::: grammar A has no rules\n",
|
||||
|
||||
"A;",
|
||||
"error(16): <string>:1:0: 'A' came as a complete surprise to me\n",
|
||||
"error(50): <string>:1:0: 'A' came as a complete surprise to me\n",
|
||||
|
||||
"grammar ;",
|
||||
"error(16): <string>:1:8: ';' came as a complete surprise to me while looking for an identifier\n",
|
||||
"error(50): <string>:1:8: ';' came as a complete surprise to me while looking for an identifier\n",
|
||||
|
||||
"grammar A\n" +
|
||||
"a : ID ;\n",
|
||||
"error(16): <string>:2:0: missing SEMI at 'a'\n",
|
||||
"error(50): <string>:2:0: missing SEMI at 'a'\n",
|
||||
|
||||
"grammar A;\n" +
|
||||
"a : ID ;;\n"+
|
||||
"b : B ;",
|
||||
"error(16): A.g4:2:8: ';' came as a complete surprise to me\n",
|
||||
"error(50): A.g4:2:8: ';' came as a complete surprise to me\n",
|
||||
|
||||
"grammar A;;\n" +
|
||||
"a : ID ;\n",
|
||||
"error(16): A;.g4:1:10: ';' came as a complete surprise to me\n",
|
||||
"error(50): A;.g4:1:10: ';' came as a complete surprise to me\n",
|
||||
|
||||
"grammar A;\n" +
|
||||
"a @init : ID ;\n",
|
||||
"error(16): A.g4:2:8: mismatched input ':' expecting ACTION while matching rule preamble\n",
|
||||
"error(50): A.g4:2:8: mismatched input ':' expecting ACTION while matching rule preamble\n",
|
||||
|
||||
"grammar A;\n" +
|
||||
"a ( A | B ) D ;\n" +
|
||||
"b : B ;",
|
||||
"error(16): A.g4:2:3: '(' came as a complete surprise to me while matching rule preamble\n" +
|
||||
"error(16): A.g4:2:11: mismatched input ')' expecting SEMI while matching a rule\n" +
|
||||
"error(16): A.g4:2:15: mismatched input ';' expecting COLON while matching a lexer rule\n",
|
||||
"error(50): A.g4:2:3: '(' came as a complete surprise to me while matching rule preamble\n" +
|
||||
"error(50): A.g4:2:11: mismatched input ')' expecting SEMI while matching a rule\n" +
|
||||
"error(50): A.g4:2:15: mismatched input ';' expecting COLON while matching a lexer rule\n",
|
||||
};
|
||||
|
||||
@Test public void testA() { super.testErrors(A, true); }
|
||||
|
@ -48,7 +48,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"grammar A;\n" +
|
||||
"a : : A ;\n" +
|
||||
"b : B ;",
|
||||
"error(16): A.g4:2:4: ':' came as a complete surprise to me while matching alternative\n",
|
||||
"error(50): A.g4:2:4: ':' came as a complete surprise to me while matching alternative\n",
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"grammar A;\n" +
|
||||
"a : A \n" +
|
||||
"b : B ;",
|
||||
"error(16): A.g4:3:0: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n",
|
||||
"error(50): A.g4:3:0: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n",
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"lexer grammar A;\n" +
|
||||
"A : 'a' \n" +
|
||||
"B : 'b' ;",
|
||||
"error(16): A.g4:3:0: unterminated rule (missing ';') detected at 'B :' while looking for lexer rule element\n",
|
||||
"error(50): A.g4:3:0: unterminated rule (missing ';') detected at 'B :' while looking for lexer rule element\n",
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"grammar A;\n" +
|
||||
"a : A \n" +
|
||||
"b[int i] returns [int y] : B ;",
|
||||
"error(16): A.g4:3:9: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n"
|
||||
"error(50): A.g4:3:9: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
" catch [Exception e] {...}\n" +
|
||||
"b : B ;\n",
|
||||
|
||||
"error(16): A.g4:2:4: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n"
|
||||
"error(50): A.g4:2:4: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"a : A \n" +
|
||||
" catch [Exception e] {...}\n",
|
||||
|
||||
"error(16): A.g4:2:4: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n"
|
||||
"error(50): A.g4:2:4: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"a @ options {k=1;} : A ;\n" +
|
||||
"b : B ;",
|
||||
|
||||
"error(16): A.g4:2:4: 'options {' came as a complete surprise to me while looking for an identifier\n"
|
||||
"error(50): A.g4:2:4: 'options {' came as a complete surprise to me while looking for an identifier\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"a } : A ;\n" +
|
||||
"b : B ;",
|
||||
|
||||
"error(16): A.g4:2:2: '}' came as a complete surprise to me while matching rule preamble\n"
|
||||
"error(50): A.g4:2:2: '}' came as a complete surprise to me while matching rule preamble\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
@ -135,8 +135,8 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
"mode foo;\n" +
|
||||
"b : B ;",
|
||||
|
||||
"error(16): A.g4:4:0: 'b' came as a complete surprise to me\n" +
|
||||
"error(16): A.g4:4:6: mismatched input ';' expecting COLON while matching a lexer rule\n"
|
||||
"error(50): A.g4:4:0: 'b' came as a complete surprise to me\n" +
|
||||
"error(50): A.g4:4:6: mismatched input ';' expecting COLON while matching a lexer rule\n"
|
||||
};
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue