added a LexerInterpreter and copied in unit tests

[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 8867]
This commit is contained in:
parrt 2011-07-16 12:57:25 -08:00
parent 9fb93fd090
commit 7a109d29d5
16 changed files with 1746 additions and 54 deletions

View File

@ -40,7 +40,7 @@ import java.util.*;
*
* TODO: rename since lexer not under. or reorg parser/treeparser; treeparser under parser?
*/
public abstract class BaseRecognizer extends Recognizer<ParserInterpreter> {
public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
public static final int MEMO_RULE_FAILED = -2;
public static final int MEMO_RULE_UNKNOWN = -1;

View File

@ -28,7 +28,7 @@
*/
package org.antlr.v4.runtime;
import org.antlr.v4.runtime.atn.LexerInterpreter;
import org.antlr.v4.runtime.atn.LexerATNSimulator;
import org.antlr.v4.runtime.misc.QStack;
import java.util.EmptyStackException;
@ -38,7 +38,7 @@ import java.util.EmptyStackException;
* uses simplified match() and error recovery mechanisms in the interest
* of speed.
*/
public abstract class Lexer extends Recognizer<LexerInterpreter>
public abstract class Lexer extends Recognizer<LexerATNSimulator>
implements TokenSource
{
public static final int DEFAULT_MODE = 0;

View File

@ -34,7 +34,7 @@ import org.antlr.v4.runtime.misc.*;
import java.util.*;
public abstract class ATNInterpreter {
public abstract class ATNSimulator {
/** Must distinguish between missing edge and edge we know leads nowhere */
public static DFAState ERROR;
public ATN atn;
@ -47,7 +47,7 @@ public abstract class ATNInterpreter {
ERROR.stateNumber = Integer.MAX_VALUE;
}
public ATNInterpreter(ATN atn) {
public ATNSimulator(ATN atn) {
this.atn = atn;
}

View File

@ -34,7 +34,7 @@ import org.antlr.v4.runtime.dfa.*;
import org.antlr.v4.runtime.misc.OrderedHashSet;
/** "dup" of ParserInterpreter */
public class LexerInterpreter extends ATNInterpreter {
public class LexerATNSimulator extends ATNSimulator {
public static boolean debug = false;
public static boolean dfa_debug = false;
public static final int NUM_EDGES = 255;
@ -47,11 +47,11 @@ public class LexerInterpreter extends ATNInterpreter {
public static int ATN_failover = 0;
public static int match_calls = 0;
public LexerInterpreter(ATN atn) {
public LexerATNSimulator(ATN atn) {
this(null, atn);
}
public LexerInterpreter(Lexer recog, ATN atn) {
public LexerATNSimulator(Lexer recog, ATN atn) {
super(atn);
dfa = new DFA[atn.modeToStartState.size()];
for (int i=0; i<atn.modeToStartState.size(); i++) {

View File

@ -30,10 +30,10 @@ class TestJava {
System.out.println("Total time " + (stop - start) + "ms.");
System.out.println("finished parsing OK");
System.out.println(LexerInterpreter.ATN_failover+" lexer failovers");
System.out.println(LexerInterpreter.match_calls+" lexer match calls");
System.out.println(ParserInterpreter.ATN_failover+" parser failovers");
System.out.println(ParserInterpreter.predict_calls +" parser predict calls");
System.out.println(LexerATNSimulator.ATN_failover+" lexer failovers");
System.out.println(LexerATNSimulator.match_calls+" lexer match calls");
System.out.println(ParserATNSimulator.ATN_failover+" parser failovers");
System.out.println(ParserATNSimulator.predict_calls +" parser predict calls");
if ( profile ) {
System.out.println("num decisions "+profiler.numDecisions);
}

View File

@ -30,10 +30,10 @@ class TestYang {
System.out.println("Total time " + (stop - start) + "ms.");
System.out.println("finished parsing OK");
System.out.println(LexerInterpreter.ATN_failover+" lexer failovers");
System.out.println(LexerInterpreter.match_calls+" lexer match calls");
System.out.println(ParserInterpreter.ATN_failover+" parser failovers");
System.out.println(ParserInterpreter.predict_calls +" parser predict calls");
System.out.println(LexerATNSimulator.ATN_failover+" lexer failovers");
System.out.println(LexerATNSimulator.match_calls+" lexer match calls");
System.out.println(ParserATNSimulator.ATN_failover+" parser failovers");
System.out.println(ParserATNSimulator.predict_calls +" parser predict calls");
if ( profile ) {
System.out.println("num decisions "+profiler.numDecisions);
}

View File

@ -87,7 +87,7 @@ case <index> : <actions.(index)> break;}; separator="\n">
ctor(p) ::= <<
public <p.name>(TokenStream input) {
super(input);
_interp = new ParserInterpreter(this,_ATN);
_interp = new ParserATNSimulator(this,_ATN);
}
>>
@ -529,7 +529,7 @@ public class <lexer.name> extends Lexer {
public <lexer.name>(CharStream input) {
super(input);
_interp = new LexerInterpreter(this,_ATN);
_interp = new LexerATNSimulator(this,_ATN);
}
public String getGrammarFileName() { return "<lexerFile.fileName>"; }
@ -552,7 +552,7 @@ SerializedATN(model) ::= <<
public static final String _serializedATN =
"<model.serialized; wrap={"+<\n>"}, anchor>";
public static final ATN _ATN =
ATNInterpreter.deserialize(_serializedATN.toCharArray());
ATNSimulator.deserialize(_serializedATN.toCharArray());
static {
org.antlr.v4.tool.DOTGenerator dot = new org.antlr.v4.tool.DOTGenerator(null);
//System.out.println(dot.getDOT(_ATN.decisionToATNState.get(0)));

View File

@ -186,57 +186,57 @@ public class ATNSerializer {
public String decode(char[] data) {
StringBuilder buf = new StringBuilder();
int p = 0;
int grammarType = ATNInterpreter.toInt(data[p++]);
int maxType = ATNInterpreter.toInt(data[p++]);
int grammarType = ATNSimulator.toInt(data[p++]);
int maxType = ATNSimulator.toInt(data[p++]);
buf.append("max type "+maxType+"\n");
int nstates = ATNInterpreter.toInt(data[p++]);
int nstates = ATNSimulator.toInt(data[p++]);
for (int i=1; i<=nstates; i++) {
int stype = ATNInterpreter.toInt(data[p++]);
int ruleIndex = ATNInterpreter.toInt(data[p++]);
int stype = ATNSimulator.toInt(data[p++]);
int ruleIndex = ATNSimulator.toInt(data[p++]);
if ( stype==0 ) continue; // ignore bad type of states
buf.append((i - 1) + ":" +
ATNState.serializationNames[stype] + " "+
ruleIndex + "\n");
}
int nrules = ATNInterpreter.toInt(data[p++]);
int nrules = ATNSimulator.toInt(data[p++]);
for (int i=1; i<=nrules; i++) {
int s = ATNInterpreter.toInt(data[p++]);
int arg1 = ATNInterpreter.toInt(data[p++]);
int arg2 = ATNInterpreter.toInt(data[p++]);
int s = ATNSimulator.toInt(data[p++]);
int arg1 = ATNSimulator.toInt(data[p++]);
int arg2 = ATNSimulator.toInt(data[p++]);
buf.append("rule "+i+":"+s+" "+arg1+","+arg2+'\n');
}
int nmodes = ATNInterpreter.toInt(data[p++]);
int nmodes = ATNSimulator.toInt(data[p++]);
for (int i=0; i<nmodes; i++) {
int s = ATNInterpreter.toInt(data[p++]);
int s = ATNSimulator.toInt(data[p++]);
buf.append("mode "+i+":"+s+'\n');
}
int nsets = ATNInterpreter.toInt(data[p++]);
int nsets = ATNSimulator.toInt(data[p++]);
for (int i=1; i<=nsets; i++) {
int nintervals = ATNInterpreter.toInt(data[p++]);
int nintervals = ATNSimulator.toInt(data[p++]);
buf.append((i-1)+":");
for (int j=1; j<=nintervals; j++) {
if ( j>1 ) buf.append(", ");
buf.append(getTokenName(ATNInterpreter.toInt(data[p]))+".."+getTokenName(ATNInterpreter.toInt(data[p+1])));
buf.append(getTokenName(ATNSimulator.toInt(data[p]))+".."+getTokenName(ATNSimulator.toInt(data[p + 1])));
p += 2;
}
buf.append("\n");
}
int nedges = ATNInterpreter.toInt(data[p++]);
int nedges = ATNSimulator.toInt(data[p++]);
for (int i=1; i<=nedges; i++) {
int src = ATNInterpreter.toInt(data[p]);
int trg = ATNInterpreter.toInt(data[p+1]);
int ttype = ATNInterpreter.toInt(data[p+2]);
int arg1 = ATNInterpreter.toInt(data[p+3]);
int arg2 = ATNInterpreter.toInt(data[p+4]);
int src = ATNSimulator.toInt(data[p]);
int trg = ATNSimulator.toInt(data[p + 1]);
int ttype = ATNSimulator.toInt(data[p + 2]);
int arg1 = ATNSimulator.toInt(data[p + 3]);
int arg2 = ATNSimulator.toInt(data[p + 4]);
buf.append(src+"->"+trg+
" "+Transition.serializationNames[ttype]+
" "+arg1+","+arg2+
"\n");
p += 5;
}
int ndecisions = ATNInterpreter.toInt(data[p++]);
int ndecisions = ATNSimulator.toInt(data[p++]);
for (int i=1; i<=ndecisions; i++) {
int s = ATNInterpreter.toInt(data[p++]);
int s = ATNSimulator.toInt(data[p++]);
buf.append((i-1)+":"+s+"\n");
}
return buf.toString();

View File

@ -0,0 +1,103 @@
/*
[The "BSD license"]
Copyright (c) 2011 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.tool.interp;
import org.antlr.v4.Tool;
import org.antlr.v4.runtime.*;
import org.antlr.v4.runtime.atn.LexerATNSimulator;
import org.antlr.v4.tool.LexerGrammar;
public class LexerInterpreter implements TokenSource {
protected LexerGrammar g;
protected LexerATNSimulator interp;
protected CharStream input;
public LexerInterpreter(LexerGrammar g) {
Tool antlr = new Tool();
antlr.process(g);
interp = new LexerATNSimulator(g.atn);
}
public void setInput(String inputString) {
input = new ANTLRStringStream(inputString);
}
public void setInput(CharStream input) {
this.input = input;
}
public String getSourceName() { return g.name; }
public Token nextToken() {
int start = input.index();
int ttype = interp.match(input, Lexer.DEFAULT_MODE);
int stop = input.index()-1;
int tokenStartCharPositionInLine = input.getCharPositionInLine();
int tokenStartLine = input.getLine();
Token t = new CommonToken(input, ttype, Token.DEFAULT_CHANNEL, start, stop);
t.setLine(tokenStartLine);
t.setCharPositionInLine(tokenStartCharPositionInLine);
return t;
/*
outer:
while (true) {
token = null;
channel = Token.DEFAULT_CHANNEL;
tokenStartCharIndex = input.index();
tokenStartCharPositionInLine = input.getCharPositionInLine();
tokenStartLine = input.getLine();
text = null;
do {
type = Token.INVALID_TYPE;
if ( input.LA(1)==CharStream.EOF ) {
Token eof = new CommonToken(input,Token.EOF,
Token.DEFAULT_CHANNEL,
input.index(),input.index());
eof.setLine(getLine());
eof.setCharPositionInLine(getCharPositionInLine());
return eof;
}
// System.out.println("nextToken at "+((char)input.LA(1))+
// " in mode "+mode+
// " at index "+input.index());
int ttype = _interp.match(input, mode);
// System.out.println("accepted ttype "+ttype);
if ( type == Token.INVALID_TYPE) type = ttype;
if ( type==SKIP ) {
continue outer;
}
} while ( type==MORE );
if ( token==null ) emit();
return token;
}
*/
}
}

View File

@ -139,7 +139,7 @@ public abstract class BaseTest {
return expectingTokenTypes;
}
public List<Integer> getTokenTypes(String input, LexerInterpreter lexerATN) {
public List<Integer> getTokenTypes(String input, LexerATNSimulator lexerATN) {
ANTLRStringStream in = new ANTLRStringStream(input);
List<Integer> tokenTypes = new ArrayList<Integer>();
int ttype = 0;
@ -155,7 +155,7 @@ public abstract class BaseTest {
CharStream input,
boolean adaptive)
{
LexerInterpreter interp = new LexerInterpreter(atn);
LexerATNSimulator interp = new LexerATNSimulator(atn);
List<String> tokenTypes = new ArrayList<String>();
int ttype;
do {

View File

@ -117,7 +117,7 @@ public class TestATNDeserialization extends BaseTest {
ATN atn = createATN(g);
char[] data = Utils.toCharArray(ATNSerializer.getSerialized(g, atn));
String atnData = ATNSerializer.getDecoded(g, atn);
ATN atn2 = ParserInterpreter.deserialize(data);
ATN atn2 = ParserATNSimulator.deserialize(data);
String atn2Data = ATNSerializer.getDecoded(g, atn2);
assertEquals(atnData, atn2Data);

View File

@ -261,7 +261,7 @@ public class TestATNInterpreter extends BaseTest {
int expected)
{
ATN lexatn = createATN(lg);
LexerInterpreter lexInterp = new LexerInterpreter(lexatn);
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn);
List<Integer> types = getTokenTypes(inputString, lexInterp);
System.out.println(types);
@ -272,7 +272,7 @@ public class TestATNInterpreter extends BaseTest {
ParserATNFactory f = new ParserATNFactory(g);
ATN atn = f.createATN();
ParserInterpreter interp = new ParserInterpreter(atn);
ParserATNSimulator interp = new ParserATNSimulator(atn);
TokenStream input = new IntTokenStream(types);
ATNState startState = atn.ruleToStartState[g.getRule("a").index];
if ( startState.transition(0).target instanceof BlockStartState ) {

View File

@ -260,7 +260,7 @@ public class TestATNParserPrediction extends BaseTest {
"e : A | ;\n"); // TODO: try with three alts
ATN lexatn = createATN(lg);
LexerInterpreter lexInterp = new LexerInterpreter(lexatn);
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn);
semanticProcess(lg);
g.importVocab(lg);
@ -279,7 +279,7 @@ public class TestATNParserPrediction extends BaseTest {
RuleContext a_e_ctx = new RuleContext(a_ctx, a_e_invoke.stateNumber, bStart.stateNumber);
RuleContext b_e_ctx = new RuleContext(b_ctx, b_e_invoke.stateNumber, bStart.stateNumber);
ParserInterpreter interp = new ParserInterpreter(atn);
ParserATNSimulator interp = new ParserATNSimulator(atn);
interp.setContextSensitive(true);
List<Integer> types = getTokenTypes("ab", lexInterp);
System.out.println(types);
@ -479,7 +479,7 @@ public class TestATNParserPrediction extends BaseTest {
{
Tool.internalOption_ShowATNConfigsInDFA = true;
ATN lexatn = createATN(lg);
LexerInterpreter lexInterp = new LexerInterpreter(lexatn);
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn);
List<Integer> types = getTokenTypes(inputString, lexInterp);
System.out.println(types);
@ -498,7 +498,7 @@ public class TestATNParserPrediction extends BaseTest {
if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index]));
// Check ATN prediction
ParserInterpreter interp = new ParserInterpreter(atn);
ParserATNSimulator interp = new ParserATNSimulator(atn);
TokenStream input = new IntTokenStream(types);
ATNState startState = atn.decisionToState.get(decision);
DFA dfa = new DFA(startState);
@ -523,7 +523,7 @@ public class TestATNParserPrediction extends BaseTest {
{
Tool.internalOption_ShowATNConfigsInDFA = true;
ATN lexatn = createATN(lg);
LexerInterpreter lexInterp = new LexerInterpreter(lexatn);
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn);
semanticProcess(lg);
g.importVocab(lg);
@ -537,7 +537,7 @@ public class TestATNParserPrediction extends BaseTest {
// System.out.println(dot.getDOT(atn.ruleToStartState.get(g.getRule("b"))));
// System.out.println(dot.getDOT(atn.ruleToStartState.get(g.getRule("e"))));
ParserInterpreter interp = new ParserInterpreter(atn);
ParserATNSimulator interp = new ParserATNSimulator(atn);
List<Integer> types = getTokenTypes(inputString, lexInterp);
System.out.println(types);
TokenStream input = new IntTokenStream(types);
@ -559,7 +559,7 @@ public class TestATNParserPrediction extends BaseTest {
{
// Tool.internalOption_ShowATNConfigsInDFA = true;
ATN lexatn = createATN(lg);
LexerInterpreter lexInterp = new LexerInterpreter(lexatn);
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn);
semanticProcess(lg);
g.importVocab(lg);
@ -568,7 +568,7 @@ public class TestATNParserPrediction extends BaseTest {
ParserATNFactory f = new ParserATNFactory(g);
ATN atn = f.createATN();
ParserInterpreter interp = new ParserInterpreter(atn);
ParserATNSimulator interp = new ParserATNSimulator(atn);
for (int i=0; i<inputString.length; i++) {
// Check DFA
List<Integer> types = getTokenTypes(inputString[i], lexInterp);

View File

@ -0,0 +1,228 @@
/*
[The "BSD license"]
Copyright (c) 2011 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test;
import org.antlr.v4.runtime.*;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.interp.LexerInterpreter;
import org.junit.Test;
public class TestCommonTokenStream extends BaseTest {
@Test public void testFirstToken() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
String result = tokens.LT(1).getText();
String expecting = "x";
assertEquals(expecting, result);
}
@Test public void test2ndToken() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
String result = tokens.LT(2).getText();
String expecting = " ";
assertEquals(expecting, result);
}
@Test public void testCompleteBuffer() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
int i = 1;
Token t = tokens.LT(i);
while ( t.getType()!=Token.EOF ) {
i++;
t = tokens.LT(i);
}
tokens.LT(i++); // push it past end
tokens.LT(i++);
String result = tokens.toString();
String expecting = "x = 3 * 0 + 2 * 0;";
assertEquals(expecting, result);
}
@Test public void testCompleteBufferAfterConsuming() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
Token t = tokens.LT(1);
while ( t.getType()!=Token.EOF ) {
tokens.consume();
t = tokens.LT(1);
}
tokens.consume();
tokens.LT(1); // push it past end
tokens.consume();
tokens.LT(1);
String result = tokens.toString();
String expecting = "x = 3 * 0 + 2 * 0;";
assertEquals(expecting, result);
}
@Test public void testLookback() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
tokens.consume(); // get x into buffer
Token t = tokens.LT(-1);
assertEquals("x", t.getText());
tokens.consume();
tokens.consume(); // consume '='
t = tokens.LT(-3);
assertEquals("x", t.getText());
t = tokens.LT(-2);
assertEquals(" ", t.getText());
t = tokens.LT(-1);
assertEquals("=", t.getText());
}
@Test public void testOffChannel() throws Exception {
TokenSource lexer = // simulate input " x =34 ;\n"
new TokenSource() {
int i = 0;
Token[] tokens = {
new CommonToken(1," "),
new CommonToken(1,"x"),
new CommonToken(1," "),
new CommonToken(1,"="),
new CommonToken(1,"34"),
new CommonToken(1," "),
new CommonToken(1," "),
new CommonToken(1,";"),
new CommonToken(1,"\n"),
new CommonToken(Token.EOF,"")
};
{
tokens[0].setChannel(Lexer.HIDDEN);
tokens[2].setChannel(Lexer.HIDDEN);
tokens[5].setChannel(Lexer.HIDDEN);
tokens[6].setChannel(Lexer.HIDDEN);
tokens[8].setChannel(Lexer.HIDDEN);
}
public Token nextToken() {
return tokens[i++];
}
public String getSourceName() { return "test"; }
};
CommonTokenStream tokens = new CommonTokenStream(lexer);
assertEquals("x", tokens.LT(1).getText()); // must skip first off channel token
tokens.consume();
assertEquals("=", tokens.LT(1).getText());
assertEquals("x", tokens.LT(-1).getText());
tokens.consume();
assertEquals("34", tokens.LT(1).getText());
assertEquals("=", tokens.LT(-1).getText());
tokens.consume();
assertEquals(";", tokens.LT(1).getText());
assertEquals("34", tokens.LT(-1).getText());
tokens.consume();
assertEquals(Token.EOF, tokens.LA(1));
assertEquals(";", tokens.LT(-1).getText());
assertEquals("34", tokens.LT(-2).getText());
assertEquals("=", tokens.LT(-3).getText());
assertEquals("x", tokens.LT(-4).getText());
}
}

View File

@ -0,0 +1,970 @@
/*
[The "BSD license"]
Copyright (c) 2011 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test;
public class TestCompositeGrammars extends BaseTest {
protected boolean debug = false;
/*
@Test
public void testWildcardStillWorks() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String grammar =
"parser grammar S;\n" +
"a : B . C ;\n"; // not qualified ID
Grammar g = new Grammar(grammar);
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
}
@Test public void testDelegatorInvokesDelegateRule() throws Exception {
String slave =
"parser grammar S;\n" +
"a : B {System.out.println(\"S.a\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"s : a ;\n" +
"B : 'b' ;" + // defines B from inherited token space
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"s", "b", debug);
assertEquals("S.a\n", found);
}
@Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception {
// must generate something like:
// public int a(int x) throws RecognitionException { return gS.a(x); }
// in M.
String slave =
"parser grammar S;\n" +
"a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"s : label=a[3] {System.out.println($label.y);} ;\n" +
"B : 'b' ;" + // defines B from inherited token space
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"s", "b", debug);
assertEquals("S.a1000\n", found);
}
@Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception {
// must generate something like:
// public int a(int x) throws RecognitionException { return gS.a(x); }
// in M.
String slave =
"parser grammar S;\n" +
"a : B {System.out.print(\"S.a\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"s : a {System.out.println($a.text);} ;\n" +
"B : 'b' ;" + // defines B from inherited token space
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"s", "b", debug);
assertEquals("S.ab\n", found);
}
@Test public void testDelegatorAccessesDelegateMembers() throws Exception {
String slave =
"parser grammar S;\n" +
"@members {\n" +
" public void foo() {System.out.println(\"foo\");}\n" +
"}\n" +
"a : B ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" + // uses no rules from the import
"import S;\n" +
"s : 'b' {gS.foo();} ;\n" + // gS is import pointer
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"s", "b", debug);
assertEquals("foo\n", found);
}
@Test public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception {
String slave =
"parser grammar S;\n" +
"a : b {System.out.println(\"S.a\");} ;\n" +
"b : B ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String slave2 =
"parser grammar T;\n" +
"a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a
writeFile(tmpdir, "T.g", slave2);
String master =
"grammar M;\n" +
"import S,T;\n" +
"s : a ;\n" +
"B : 'b' ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"s", "b", debug);
assertEquals("S.a\n", found);
}
@Test public void testDelegatesSeeSameTokenType() throws Exception {
String slave =
"parser grammar S;\n" + // A, B, C token type order
"tokens { A; B; C; }\n" +
"x : A {System.out.println(\"S.x\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String slave2 =
"parser grammar T;\n" +
"tokens { C; B; A; }\n" + // reverse order
"y : A {System.out.println(\"T.y\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "T.g", slave2);
// The lexer will create rules to match letters a, b, c.
// The associated token types A, B, C must have the same value
// and all import'd parsers. Since ANTLR regenerates all imports
// for use with the delegator M, it can generate the same token type
// mapping in each parser:
// public static final int C=6;
// public static final int EOF=-1;
// public static final int B=5;
// public static final int WS=7;
// public static final int A=4;
String master =
"grammar M;\n" +
"import S,T;\n" +
"s : x y ;\n" + // matches AA, which should be "aa"
"B : 'b' ;\n" + // another order: B, A, C
"A : 'a' ;\n" +
"C : 'c' ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"s", "aa", debug);
assertEquals("S.x\n" +
"T.y\n", found);
}
@Test public void testDelegatesSeeSameTokenType2() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar S;\n" + // A, B, C token type order
"tokens { A; B; C; }\n" +
"x : A {System.out.println(\"S.x\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String slave2 =
"parser grammar T;\n" +
"tokens { C; B; A; }\n" + // reverse order
"y : A {System.out.println(\"T.y\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "T.g", slave2);
String master =
"grammar M;\n" +
"import S,T;\n" +
"s : x y ;\n" + // matches AA, which should be "aa"
"B : 'b' ;\n" + // another order: B, A, C
"A : 'a' ;\n" +
"C : 'c' ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, WS=7]";
String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[A, B, C, WS]";
assertEquals(expectedTokenIDToTypeMap,
realElements(g.composite.tokenIDToTypeMap).toString());
assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
assertEquals(expectedTypeToTokenList,
realElements(g.composite.typeToTokenList).toString());
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
}
@Test public void testCombinedImportsCombined() throws Exception {
// for now, we don't allow combined to import combined
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"grammar S;\n" + // A, B, C token type order
"tokens { A; B; C; }\n" +
"x : 'x' INT {System.out.println(\"S.x\");} ;\n" +
"INT : '0'..'9'+ ;\n" +
"WS : (' '|'\\n') {skip();} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"s : x INT ;\n";
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
String expectedError = "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: combined grammar M cannot import combined grammar S";
assertEquals("unexpected errors: "+equeue, expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
}
@Test public void testSameStringTwoNames() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar S;\n" +
"tokens { A='a'; }\n" +
"x : A {System.out.println(\"S.x\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String slave2 =
"parser grammar T;\n" +
"tokens { X='a'; }\n" +
"y : X {System.out.println(\"T.y\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "T.g", slave2);
String master =
"grammar M;\n" +
"import S,T;\n" +
"s : x y ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
String expectedTokenIDToTypeMap = "[A=4, WS=5, X=6]";
String expectedStringLiteralToTypeMap = "{'a'=4}";
String expectedTypeToTokenList = "[A, WS, X]";
assertEquals(expectedTokenIDToTypeMap,
realElements(g.composite.tokenIDToTypeMap).toString());
assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
assertEquals(expectedTypeToTokenList,
realElements(g.composite.typeToTokenList).toString());
Object expectedArg = "X='a'";
Object expectedArg2 = "A";
int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_CONFLICT;
GrammarSemanticsMessage expectedMessage =
new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
checkGrammarSemanticsError(equeue, expectedMessage);
assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
String expectedError =
"error(158): T.g:2:10: cannot alias X='a'; string already assigned to A";
assertEquals(expectedError, equeue.errors.get(0).toString());
}
@Test public void testSameNameTwoStrings() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar S;\n" +
"tokens { A='a'; }\n" +
"x : A {System.out.println(\"S.x\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String slave2 =
"parser grammar T;\n" +
"tokens { A='x'; }\n" +
"y : A {System.out.println(\"T.y\");} ;\n";
writeFile(tmpdir, "T.g", slave2);
String master =
"grammar M;\n" +
"import S,T;\n" +
"s : x y ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
String expectedTokenIDToTypeMap = "[A=4, T__6=6, WS=5]";
String expectedStringLiteralToTypeMap = "{'a'=4, 'x'=6}";
String expectedTypeToTokenList = "[A, WS, T__6]";
assertEquals(expectedTokenIDToTypeMap,
realElements(g.composite.tokenIDToTypeMap).toString());
assertEquals(expectedStringLiteralToTypeMap, sortMapToString(g.composite.stringLiteralToTypeMap));
assertEquals(expectedTypeToTokenList,
realElements(g.composite.typeToTokenList).toString());
Object expectedArg = "A='x'";
Object expectedArg2 = "'a'";
int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT;
GrammarSemanticsMessage expectedMessage =
new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
checkGrammarSemanticsError(equeue, expectedMessage);
assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
String expectedError =
"error(159): T.g:2:10: cannot alias A='x'; token name already assigned to 'a'";
assertEquals(expectedError, equeue.errors.get(0).toString());
}
@Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar S;\n" +
"options {tokenVocab=whatever;}\n" +
"tokens { A='a'; }\n" +
"x : A {System.out.println(\"S.x\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"s : x ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
Object expectedArg = "S";
int expectedMsgID = ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE;
GrammarSemanticsMessage expectedMessage =
new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
checkGrammarSemanticsWarning(equeue, expectedMessage);
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
assertEquals("unexpected errors: "+equeue, 1, equeue.warnings.size());
String expectedError =
"warning(160): S.g:2:10: tokenVocab option ignored in imported grammar S";
assertEquals(expectedError, equeue.warnings.get(0).toString());
}
@Test public void testImportedTokenVocabWorksInRoot() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar S;\n" +
"tokens { A='a'; }\n" +
"x : A {System.out.println(\"S.x\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String tokens =
"A=99\n";
writeFile(tmpdir, "Test.tokens", tokens);
String master =
"grammar M;\n" +
"options {tokenVocab=Test;}\n" +
"import S;\n" +
"s : x ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
String expectedTokenIDToTypeMap = "[A=99, WS=101]";
String expectedStringLiteralToTypeMap = "{'a'=100}";
String expectedTypeToTokenList = "[A, 'a', WS]";
assertEquals(expectedTokenIDToTypeMap,
realElements(g.composite.tokenIDToTypeMap).toString());
assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
assertEquals(expectedTypeToTokenList,
realElements(g.composite.typeToTokenList).toString());
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
}
@Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar S;\n" +
"options {toke\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"s : x ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
// whole bunch of errors from bad S.g file
assertEquals("unexpected errors: "+equeue, 5, equeue.errors.size());
}
@Test public void testSyntaxErrorsInImportsNotThrownOut2() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar S;\n" +
": A {System.out.println(\"S.x\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"s : x ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
// whole bunch of errors from bad S.g file
assertEquals("unexpected errors: "+equeue, 3, equeue.errors.size());
}
@Test public void testDelegatorRuleOverridesDelegate() throws Exception {
String slave =
"parser grammar S;\n" +
"a : b {System.out.println(\"S.a\");} ;\n" +
"b : B ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"b : 'b'|'c' ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"a", "c", debug);
assertEquals("S.a\n", found);
}
@Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception {
String slave =
"parser grammar JavaDecl;\n" +
"type : 'int' ;\n" +
"decl : type ID ';'\n" +
" | type ID init ';' {System.out.println(\"JavaDecl: \"+$decl.text);}\n" +
" ;\n" +
"init : '=' INT ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "JavaDecl.g", slave);
String master =
"grammar Java;\n" +
"import JavaDecl;\n" +
"prog : decl ;\n" +
"type : 'int' | 'float' ;\n" +
"\n" +
"ID : 'a'..'z'+ ;\n" +
"INT : '0'..'9'+ ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
// for float to work in decl, type must be overridden
String found = execParser("Java.g", master, "JavaParser", "JavaLexer",
"prog", "float x = 3;", debug);
assertEquals("JavaDecl: floatx=3;\n", found);
}
@Test public void testDelegatorRuleOverridesDelegates() throws Exception {
String slave =
"parser grammar S;\n" +
"a : b {System.out.println(\"S.a\");} ;\n" +
"b : B ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String slave2 =
"parser grammar T;\n" +
"tokens { A='x'; }\n" +
"b : B {System.out.println(\"T.b\");} ;\n";
writeFile(tmpdir, "T.g", slave2);
String master =
"grammar M;\n" +
"import S, T;\n" +
"b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"a", "c", debug);
assertEquals("M.b\n" +
"S.a\n", found);
}
// LEXER INHERITANCE
@Test public void testLexerDelegatorInvokesDelegateRule() throws Exception {
String slave =
"lexer grammar S;\n" +
"A : 'a' {System.out.println(\"S.A\");} ;\n" +
"C : 'c' ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"lexer grammar M;\n" +
"import S;\n" +
"B : 'b' ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execLexer("M.g", master, "M", "abc", debug);
assertEquals("S.A\nabc\n", found);
}
@Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception {
String slave =
"lexer grammar S;\n" +
"A : 'a' {System.out.println(\"S.A\");} ;\n" +
"B : 'b' {System.out.println(\"S.B\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"lexer grammar M;\n" +
"import S;\n" +
"A : 'a' B {System.out.println(\"M.A\");} ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execLexer("M.g", master, "M", "ab", debug);
assertEquals("S.B\n" +
"M.A\n" +
"ab\n", found);
}
@Test public void testLexerDelegatorRuleOverridesDelegateLeavingNoRules() throws Exception {
// M.Tokens has nothing to predict tokens from S. Should
// not include S.Tokens alt in this case?
String slave =
"lexer grammar S;\n" +
"A : 'a' {System.out.println(\"S.A\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"lexer grammar M;\n" +
"import S;\n" +
"A : 'a' {System.out.println(\"M.A\");} ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
writeFile(tmpdir, "/M.g", master);
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
composite.assignTokenTypes();
composite.defineGrammarSymbols();
composite.createNFAs();
g.createLookaheadDFAs(false);
// predict only alts from M not S
String expectingDFA =
".s0-'a'->.s1\n" +
".s0-{'\\n', ' '}->:s3=>2\n" +
".s1-<EOT>->:s2=>1\n";
org.antlr.analysis.DFA dfa = g.getLookaheadDFA(1);
FASerializer serializer = new FASerializer(g);
String result = serializer.serialize(dfa.startState);
assertEquals(expectingDFA, result);
// must not be a "unreachable alt: Tokens" error
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
}
@Test public void testInvalidImportMechanism() throws Exception {
// M.Tokens has nothing to predict tokens from S. Should
// not include S.Tokens alt in this case?
String slave =
"lexer grammar S;\n" +
"A : 'a' {System.out.println(\"S.A\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"tree grammar M;\n" +
"import S;\n" +
"a : A ;";
writeFile(tmpdir, "/M.g", master);
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
String expectedError =
"error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: tree grammar M cannot import lexer grammar S";
assertEquals(expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
}
@Test public void testSyntacticPredicateRulesAreNotInherited() throws Exception {
// if this compiles, it means that synpred1_S is defined in S.java
// but not MParser.java. MParser has its own synpred1_M which must
// be separate to compile.
String slave =
"parser grammar S;\n" +
"a : 'a' {System.out.println(\"S.a1\");}\n" +
" | 'a' {System.out.println(\"S.a2\");}\n" +
" ;\n" +
"b : 'x' | 'y' {;} ;\n"; // preds generated but not need in DFA here
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"options {backtrack=true;}\n" +
"import S;\n" +
"start : a b ;\n" +
"nonsense : 'q' | 'q' {;} ;" + // forces def of preds here in M
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"start", "ax", debug);
assertEquals("S.a1\n", found);
}
@Test public void testKeywordVSIDGivesNoWarning() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"lexer grammar S;\n" +
"A : 'abc' {System.out.println(\"S.A\");} ;\n" +
"ID : 'a'..'z'+ ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"a : A {System.out.println(\"M.a\");} ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
String found = execParser("M.g", master, "MParser", "MLexer",
"a", "abc", debug);
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size());
assertEquals("S.A\nM.a\n", found);
}
@Test public void testWarningForUndefinedToken() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"lexer grammar S;\n" +
"A : 'abc' {System.out.println(\"S.A\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"a : ABC A {System.out.println(\"M.a\");} ;\n" +
"WS : (' '|'\\n') {skip();} ;\n" ;
// A is defined in S but M should still see it and not give warning.
// only problem is ABC.
rawGenerateAndBuildRecognizer("M.g", master, "MParser", "MLexer", debug);
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size());
String expectedError =
"warning(105): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+ File.separator+"M.g:3:5: no lexer rule corresponding to token: ABC";
assertEquals(expectedError, equeue.warnings.get(0).toString().replaceFirst("\\-[0-9]+",""));
}
// Make sure that M can import S that imports T.
@Test public void test3LevelImport() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar T;\n" +
"a : T ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "T.g", slave);
String slave2 =
"parser grammar S;\n" + // A, B, C token type order
"import T;\n" +
"a : S ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave2);
String master =
"grammar M;\n" +
"import S;\n" +
"a : M ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
g.composite.defineGrammarSymbols();
String expectedTokenIDToTypeMap = "[M=4, S=5, T=6]";
String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[M, S, T]";
assertEquals(expectedTokenIDToTypeMap,
realElements(g.composite.tokenIDToTypeMap).toString());
assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
assertEquals(expectedTypeToTokenList,
realElements(g.composite.typeToTokenList).toString());
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
boolean ok =
rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
boolean expecting = true; // should be ok
assertEquals(expecting, ok);
}
@Test public void testBigTreeOfImports() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar T;\n" +
"x : T ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "T.g", slave);
slave =
"parser grammar S;\n" +
"import T;\n" +
"y : S ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
slave =
"parser grammar C;\n" +
"i : C ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "C.g", slave);
slave =
"parser grammar B;\n" +
"j : B ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "B.g", slave);
slave =
"parser grammar A;\n" +
"import B,C;\n" +
"k : A ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "A.g", slave);
String master =
"grammar M;\n" +
"import S,A;\n" +
"a : M ;\n" ;
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
g.composite.defineGrammarSymbols();
String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, M=7, S=8, T=9]";
String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[A, B, C, M, S, T]";
assertEquals(expectedTokenIDToTypeMap,
realElements(g.composite.tokenIDToTypeMap).toString());
assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
assertEquals(expectedTypeToTokenList,
realElements(g.composite.typeToTokenList).toString());
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
boolean ok =
rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
boolean expecting = true; // should be ok
assertEquals(expecting, ok);
}
@Test public void testRulesVisibleThroughMultilevelImport() throws Exception {
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String slave =
"parser grammar T;\n" +
"x : T ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "T.g", slave);
String slave2 =
"parser grammar S;\n" + // A, B, C token type order
"import T;\n" +
"a : S ;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave2);
String master =
"grammar M;\n" +
"import S;\n" +
"a : M x ;\n" ; // x MUST BE VISIBLE TO M
writeFile(tmpdir, "M.g", master);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
g.composite.defineGrammarSymbols();
String expectedTokenIDToTypeMap = "[M=4, S=5, T=6]";
String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[M, S, T]";
assertEquals(expectedTokenIDToTypeMap,
realElements(g.composite.tokenIDToTypeMap).toString());
assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
assertEquals(expectedTypeToTokenList,
realElements(g.composite.typeToTokenList).toString());
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
}
@Test public void testNestedComposite() throws Exception {
// Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
ErrorManager.setErrorListener(equeue);
String gstr =
"lexer grammar L;\n" +
"T1: '1';\n" +
"T2: '2';\n" +
"T3: '3';\n" +
"T4: '4';\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "L.g", gstr);
gstr =
"parser grammar G1;\n" +
"s: a | b;\n" +
"a: T1;\n" +
"b: T2;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "G1.g", gstr);
gstr =
"parser grammar G2;\n" +
"import G1;\n" +
"a: T3;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "G2.g", gstr);
String G3str =
"grammar G3;\n" +
"import G2;\n" +
"b: T4;\n" ;
mkdir(tmpdir);
writeFile(tmpdir, "G3.g", G3str);
Tool antlr = newTool(new String[] {"-lib", tmpdir});
CompositeGrammar composite = new CompositeGrammar();
Grammar g = new Grammar(antlr,tmpdir+"/G3.g",composite);
composite.setDelegationRoot(g);
g.parseAndBuildAST();
g.composite.assignTokenTypes();
g.composite.defineGrammarSymbols();
String expectedTokenIDToTypeMap = "[T1=4, T2=5, T3=6, T4=7]";
String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[T1, T2, T3, T4]";
assertEquals(expectedTokenIDToTypeMap,
realElements(g.composite.tokenIDToTypeMap).toString());
assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
assertEquals(expectedTypeToTokenList,
realElements(g.composite.typeToTokenList).toString());
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
boolean ok =
rawGenerateAndBuildRecognizer("G3.g", G3str, "G3Parser", null, false);
boolean expecting = true; // should be ok
assertEquals(expecting, ok);
}
@Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception {
String slave =
"parser grammar S;\n" +
"a : B {System.out.print(\"S.a\");} ;\n";
mkdir(tmpdir);
writeFile(tmpdir, "S.g", slave);
String master =
"grammar M;\n" +
"import S;\n" +
"@header{package mypackage;}\n" +
"@lexer::header{package mypackage;}\n" +
"s : a ;\n" +
"B : 'b' ;" + // defines B from inherited token space
"WS : (' '|'\\n') {skip();} ;\n" ;
boolean ok = antlr("M.g", "M.g", master, debug);
boolean expecting = true; // should be ok
assertEquals(expecting, ok);
}
*/
}

View File

@ -0,0 +1,391 @@
/*
[The "BSD license"]
Copyright (c) 2011 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test;
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.misc.IntervalSet;
import org.junit.Test;
import java.util.*;
public class TestIntervalSet extends BaseTest {
/** Public default constructor used by TestRig */
public TestIntervalSet() {
}
@Test
public void testSingleElement() throws Exception {
IntervalSet s = IntervalSet.of(99);
String expecting = "99";
assertEquals(s.toString(), expecting);
}
@Test public void testIsolatedElements() throws Exception {
IntervalSet s = new IntervalSet();
s.add(1);
s.add('z');
s.add('\uFFF0');
String expecting = "{1, 122, 65520}";
assertEquals(s.toString(), expecting);
}
@Test public void testMixedRangesAndElements() throws Exception {
IntervalSet s = new IntervalSet();
s.add(1);
s.add('a','z');
s.add('0','9');
String expecting = "{1, 48..57, 97..122}";
assertEquals(s.toString(), expecting);
}
@Test public void testSimpleAnd() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
IntervalSet s2 = IntervalSet.of(13,15);
String expecting = "13..15";
String result = (s.and(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testRangeAndIsolatedElement() throws Exception {
IntervalSet s = IntervalSet.of('a','z');
IntervalSet s2 = IntervalSet.of('d');
String expecting = "100";
String result = (s.and(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testEmptyIntersection() throws Exception {
IntervalSet s = IntervalSet.of('a','z');
IntervalSet s2 = IntervalSet.of('0','9');
String expecting = "{}";
String result = (s.and(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testEmptyIntersectionSingleElements() throws Exception {
IntervalSet s = IntervalSet.of('a');
IntervalSet s2 = IntervalSet.of('d');
String expecting = "{}";
String result = (s.and(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testNotSingleElement() throws Exception {
IntervalSet vocabulary = IntervalSet.of(1,1000);
vocabulary.add(2000,3000);
IntervalSet s = IntervalSet.of(50,50);
String expecting = "{1..49, 51..1000, 2000..3000}";
String result = (s.complement(vocabulary)).toString();
assertEquals(result, expecting);
}
@Test public void testNotSet() throws Exception {
IntervalSet vocabulary = IntervalSet.of(1,1000);
IntervalSet s = IntervalSet.of(50,60);
s.add(5);
s.add(250,300);
String expecting = "{1..4, 6..49, 61..249, 301..1000}";
String result = (s.complement(vocabulary)).toString();
assertEquals(result, expecting);
}
@Test public void testNotEqualSet() throws Exception {
IntervalSet vocabulary = IntervalSet.of(1,1000);
IntervalSet s = IntervalSet.of(1,1000);
String expecting = "{}";
String result = (s.complement(vocabulary)).toString();
assertEquals(result, expecting);
}
@Test public void testNotSetEdgeElement() throws Exception {
IntervalSet vocabulary = IntervalSet.of(1,2);
IntervalSet s = IntervalSet.of(1);
String expecting = "2";
String result = (s.complement(vocabulary)).toString();
assertEquals(result, expecting);
}
@Test public void testNotSetFragmentedVocabulary() throws Exception {
IntervalSet vocabulary = IntervalSet.of(1,255);
vocabulary.add(1000,2000);
vocabulary.add(9999);
IntervalSet s = IntervalSet.of(50,60);
s.add(3);
s.add(250,300);
s.add(10000); // this is outside range of vocab and should be ignored
String expecting = "{1..2, 4..49, 61..249, 1000..2000, 9999}";
String result = (s.complement(vocabulary)).toString();
assertEquals(result, expecting);
}
@Test public void testSubtractOfCompletelyContainedRange() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
IntervalSet s2 = IntervalSet.of(12,15);
String expecting = "{10..11, 16..20}";
String result = (s.subtract(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testSubtractOfOverlappingRangeFromLeft() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
IntervalSet s2 = IntervalSet.of(5,11);
String expecting = "12..20";
String result = (s.subtract(s2)).toString();
assertEquals(result, expecting);
IntervalSet s3 = IntervalSet.of(5,10);
expecting = "11..20";
result = (s.subtract(s3)).toString();
assertEquals(result, expecting);
}
@Test public void testSubtractOfOverlappingRangeFromRight() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
IntervalSet s2 = IntervalSet.of(15,25);
String expecting = "10..14";
String result = (s.subtract(s2)).toString();
assertEquals(result, expecting);
IntervalSet s3 = IntervalSet.of(20,25);
expecting = "10..19";
result = (s.subtract(s3)).toString();
assertEquals(result, expecting);
}
@Test public void testSubtractOfCompletelyCoveredRange() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
IntervalSet s2 = IntervalSet.of(1,25);
String expecting = "{}";
String result = (s.subtract(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testSubtractOfRangeSpanningMultipleRanges() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
s.add(30,40);
s.add(50,60); // s has 3 ranges now: 10..20, 30..40, 50..60
IntervalSet s2 = IntervalSet.of(5,55); // covers one and touches 2nd range
String expecting = "56..60";
String result = (s.subtract(s2)).toString();
assertEquals(result, expecting);
IntervalSet s3 = IntervalSet.of(15,55); // touches both
expecting = "{10..14, 56..60}";
result = (s.subtract(s3)).toString();
assertEquals(result, expecting);
}
/** The following was broken:
{0..113, 115..65534}-{0..115, 117..65534}=116..65534
*/
@Test public void testSubtractOfWackyRange() throws Exception {
IntervalSet s = IntervalSet.of(0,113);
s.add(115,200);
IntervalSet s2 = IntervalSet.of(0,115);
s2.add(117,200);
String expecting = "116";
String result = (s.subtract(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testSimpleEquals() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
IntervalSet s2 = IntervalSet.of(10,20);
Boolean expecting = new Boolean(true);
Boolean result = new Boolean(s.equals(s2));
assertEquals(result, expecting);
IntervalSet s3 = IntervalSet.of(15,55);
expecting = new Boolean(false);
result = new Boolean(s.equals(s3));
assertEquals(result, expecting);
}
@Test public void testEquals() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
s.add(2);
s.add(499,501);
IntervalSet s2 = IntervalSet.of(10,20);
s2.add(2);
s2.add(499,501);
Boolean expecting = new Boolean(true);
Boolean result = new Boolean(s.equals(s2));
assertEquals(result, expecting);
IntervalSet s3 = IntervalSet.of(10,20);
s3.add(2);
expecting = new Boolean(false);
result = new Boolean(s.equals(s3));
assertEquals(result, expecting);
}
@Test public void testSingleElementMinusDisjointSet() throws Exception {
IntervalSet s = IntervalSet.of(15,15);
IntervalSet s2 = IntervalSet.of(1,5);
s2.add(10,20);
String expecting = "{}"; // 15 - {1..5, 10..20} = {}
String result = s.subtract(s2).toString();
assertEquals(result, expecting);
}
@Test public void testMembership() throws Exception {
IntervalSet s = IntervalSet.of(15,15);
s.add(50,60);
assertTrue(!s.member(0));
assertTrue(!s.member(20));
assertTrue(!s.member(100));
assertTrue(s.member(15));
assertTrue(s.member(55));
assertTrue(s.member(50));
assertTrue(s.member(60));
}
// {2,15,18} & 10..20
@Test public void testIntersectionWithTwoContainedElements() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
IntervalSet s2 = IntervalSet.of(2,2);
s2.add(15);
s2.add(18);
String expecting = "{15, 18}";
String result = (s.and(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testIntersectionWithTwoContainedElementsReversed() throws Exception {
IntervalSet s = IntervalSet.of(10,20);
IntervalSet s2 = IntervalSet.of(2,2);
s2.add(15);
s2.add(18);
String expecting = "{15, 18}";
String result = (s2.and(s)).toString();
assertEquals(result, expecting);
}
@Test public void testComplement() throws Exception {
IntervalSet s = IntervalSet.of(100,100);
s.add(101,101);
IntervalSet s2 = IntervalSet.of(100,102);
String expecting = "102";
String result = (s.complement(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testComplement2() throws Exception {
IntervalSet s = IntervalSet.of(100,101);
IntervalSet s2 = IntervalSet.of(100,102);
String expecting = "102";
String result = (s.complement(s2)).toString();
assertEquals(result, expecting);
}
@Test public void testComplement3() throws Exception {
IntervalSet s = IntervalSet.of(1,96);
s.add(99, Lexer.MAX_CHAR_VALUE);
String expecting = "97..98";
String result = (s.complement(1, Lexer.MAX_CHAR_VALUE)).toString();
assertEquals(result, expecting);
}
@Test public void testMergeOfRangesAndSingleValues() throws Exception {
// {0..41, 42, 43..65534}
IntervalSet s = IntervalSet.of(0,41);
s.add(42);
s.add(43,65534);
String expecting = "0..65534";
String result = s.toString();
assertEquals(result, expecting);
}
@Test public void testMergeOfRangesAndSingleValuesReverse() throws Exception {
IntervalSet s = IntervalSet.of(43,65534);
s.add(42);
s.add(0,41);
String expecting = "0..65534";
String result = s.toString();
assertEquals(result, expecting);
}
@Test public void testMergeWhereAdditionMergesTwoExistingIntervals() throws Exception {
// 42, 10, {0..9, 11..41, 43..65534}
IntervalSet s = IntervalSet.of(42);
s.add(10);
s.add(0,9);
s.add(43,65534);
s.add(11,41);
String expecting = "0..65534";
String result = s.toString();
assertEquals(result, expecting);
}
@Test public void testMergeWithDoubleOverlap() throws Exception {
IntervalSet s = IntervalSet.of(1,10);
s.add(20,30);
s.add(5,25); // overlaps two!
String expecting = "1..30";
String result = s.toString();
assertEquals(result, expecting);
}
@Test public void testSize() throws Exception {
IntervalSet s = IntervalSet.of(20,30);
s.add(50,55);
s.add(5,19);
String expecting = "32";
String result = String.valueOf(s.size());
assertEquals(result, expecting);
}
@Test public void testToList() throws Exception {
IntervalSet s = IntervalSet.of(20,25);
s.add(50,55);
s.add(5,5);
String expecting = "[5, 20, 21, 22, 23, 24, 25, 50, 51, 52, 53, 54, 55]";
List foo = new ArrayList();
String result = String.valueOf(s.toList());
assertEquals(result, expecting);
}
/** The following was broken:
{'\u0000'..'s', 'u'..'\uFFFE'} & {'\u0000'..'q', 's'..'\uFFFE'}=
{'\u0000'..'q', 's'}!!!! broken...
'q' is 113 ascii
'u' is 117
*/
@Test public void testNotRIntersectionNotT() throws Exception {
IntervalSet s = IntervalSet.of(0,'s');
s.add('u',200);
IntervalSet s2 = IntervalSet.of(0,'q');
s2.add('s',200);
String expecting = "{0..113, 115, 117..200}";
String result = (s.and(s2)).toString();
assertEquals(result, expecting);
}
}