work in progress

This commit is contained in:
ericvergnaud 2014-10-14 00:17:03 +08:00
parent 7841db1eeb
commit ea20af25ab
106 changed files with 5356 additions and 0 deletions

View File

@ -0,0 +1,35 @@
package org.antlr.v4.testgen;
import java.io.File;
import org.stringtemplate.v4.STGroup;
public class CompositeLexerTestMethod extends LexerTestMethod {
public Grammar[] slaveGrammars;
public CompositeLexerTestMethod(String name, String grammarName,
String input, String expectedOutput,
String expectedErrors, String ... slaves) {
super(name, grammarName, input, expectedOutput, expectedErrors, null);
this.slaveGrammars = new Grammar[slaves.length];
for(int i=0;i<slaves.length;i++)
this.slaveGrammars[i] = new Grammar(name + "_" + slaves[i], slaves[i]);
}
@Override
public void loadGrammars(File grammarDir, String testFileName) throws Exception {
for(Grammar slave : slaveGrammars)
slave.load(new File(grammarDir, testFileName));
super.loadGrammars(grammarDir, testFileName);
}
@Override
public void generateGrammars(STGroup group) {
for(Grammar slave : slaveGrammars)
slave.generate(group);
super.generateGrammars(group);
}
}

View File

@ -0,0 +1,35 @@
package org.antlr.v4.testgen;
import java.io.File;
import org.stringtemplate.v4.STGroup;
public class CompositeParserTestMethod extends ParserTestMethod {
public Grammar[] slaveGrammars;
public CompositeParserTestMethod(String name, String grammarName,
String startRule, String input, String expectedOutput,
String expectedErrors, String ... slaves) {
super(name, grammarName, startRule, input, expectedOutput, expectedErrors, null);
this.slaveGrammars = new Grammar[slaves.length];
for(int i=0;i<slaves.length;i++)
this.slaveGrammars[i] = new Grammar(name + "_" + slaves[i], slaves[i]);
}
@Override
public void loadGrammars(File grammarDir, String testFileName) throws Exception {
for(Grammar slave : slaveGrammars)
slave.load(new File(grammarDir, testFileName));
super.loadGrammars(grammarDir, testFileName);
}
@Override
public void generateGrammars(STGroup group) {
for(Grammar slave : slaveGrammars)
slave.generate(group);
super.generateGrammars(group);
}
}

View File

@ -0,0 +1,540 @@
package org.antlr.v4.testgen;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.stringtemplate.v4.ST;
import org.stringtemplate.v4.STGroup;
import org.stringtemplate.v4.STGroupFile;
public class Generator {
public static void main(String[] args) throws Exception {
Map<String, File> configs = readConfigs();
File source = configs.get("Source");
for(Map.Entry<String, File> item : configs.entrySet()) {
if("Source".equals(item.getKey()))
continue;
Generator gen = new Generator(item.getKey(), source, item.getValue());
gen.generateTests();
}
}
private static Map<String, File> readConfigs() throws Exception {
Map<String, File> configs = new HashMap<String, File>();
configs.put("Source", readGrammarDir()); // source of test templates
configs.put("Firefox", readFirefoxDir()); // generated Firefox tests
return configs;
}
private static File readFirefoxDir() {
// TODO read from env variable
return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-javascript/tool/test/org/antlr/v4/js/test/firefox");
}
private static File readGrammarDir() throws Exception {
File parent = readThisDir();
return new File(parent, "grammars");
}
private static File readThisDir() throws Exception {
String className = Generator.class.getName().replace(".", "/");
className = className.substring(0, className.lastIndexOf("/") + 1);
URL url = ClassLoader.getSystemResource(className);
return new File(url.toURI());
}
public static String escape(String s) {
return s==null ? null : s.replace("\\","\\\\").replace("\n", "\\n").replace("\"","\\\"");
}
String target;
File input;
File output;
STGroup group;
public Generator(String target, File input, File output) {
this.target = target;
this.input = input;
this.output = output;
}
private void generateTests() throws Exception {
this.group = readTemplates();
Collection<TestFile> tests = buildTests();
for(TestFile test : tests) {
String code = generateTestCode(test);
writeTestFile(test, code);
}
}
private STGroup readTemplates() throws Exception {
if(!output.exists())
throw new FileNotFoundException(output.getAbsolutePath());
String name = target + ".test.stg";
File file = new File(output, name);
if(!file.exists())
throw new FileNotFoundException(file.getAbsolutePath());
return new STGroupFile(file.getAbsolutePath());
}
private String generateTestCode(TestFile test) throws Exception {
test.generateUnitTests(group);
ST template = group.getInstanceOf("TestFile");
template.add("file", test);
return template.render();
}
private void writeTestFile(TestFile test, String code) throws Exception {
File file = new File(output, "Test" + test.getName() + ".java");
OutputStream stream = new FileOutputStream(file);
try {
stream.write(code.getBytes());
} finally {
stream.close();
}
}
private Collection<TestFile> buildTests() throws Exception {
List<TestFile> list = new ArrayList<TestFile>();
list.add(buildLexerExec());
list.add(buildParserExec());
list.add(buildCompositeLexers());
list.add(buildCompositeParsers());
list.add(buildFullContextParsing());
return list;
}
private TestFile buildFullContextParsing() throws Exception {
TestFile file = new TestFile("FullContextParsing");
file.addParserTest(input, "AmbigYieldsCtxSensitiveDFA", "T", "s", "abc",
"Decision 0:\n" +
"s0-ID->:s1^=>1\n",
"line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", null);
file.addParserTest(input, "CtxSensitiveDFA", "T", "s", "$ 34 abc",
"Decision 1:\n" +
"s0-INT->s1\n" +
"s1-ID->:s2^=>1\n",
"line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" +
"line 1:2 reportContextSensitivity d=1 (e), input='34'\n", 1);
file.addParserTest(input, "CtxSensitiveDFA", "T", "s", "@ 34 abc",
"Decision 1:\n" +
"s0-INT->s1\n" +
"s1-ID->:s2^=>1\n",
"line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" +
"line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n", 2);
file.addParserTest(input, "CtxSensitiveDFATwoDiffInput", "T", "s",
"$ 34 abc @ 34 abc",
"Decision 2:\n" +
"s0-INT->s1\n" +
"s1-ID->:s2^=>1\n",
"line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\n" +
"line 1:2 reportContextSensitivity d=2 (e), input='34'\n" +
"line 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\n" +
"line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", null);
file.addParserTest(input, "SLLSeesEOFInLLGrammar", "T", "s",
"34 abc",
"Decision 0:\n" +
"s0-INT->s1\n" +
"s1-ID->:s2^=>1\n",
"line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\n" +
"line 1:0 reportContextSensitivity d=0 (e), input='34'\n", null);
file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s",
"{ if x then return }",
"Decision 1:\n" +
"s0-'}'->:s1=>2\n", null, 1);
file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s",
"{ if x then return else foo }",
"Decision 1:\n" +
"s0-'else'->:s1^=>1\n",
"line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", 2);
file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s",
"{ if x then if y then return else foo }",
"Decision 1:\n" +
"s0-'else'->:s1^=>1\n" +
"s0-'}'->:s2=>2\n",
"line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 3);
// should not be ambiguous because the second 'else bar' clearly
// indicates that the first else should match to the innermost if.
// LL_EXACT_AMBIG_DETECTION makes us keep going to resolve
file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s",
"{ if x then if y then return else foo else bar }",
"Decision 1:\n" +
"s0-'else'->:s1^=>1\n",
"line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\n" +
"line 1:38 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:38 reportContextSensitivity d=1 (stat), input='else'\n", 4);
file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s",
"{ if x then return else foo\n" +
"if x then if y then return else foo }",
"Decision 1:\n" +
"s0-'else'->:s1^=>1\n" +
"s0-'}'->:s2=>2\n",
"line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" +
"line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 5);
file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s",
"{ if x then return else foo\n" +
"if x then if y then return else foo }",
"Decision 1:\n" +
"s0-'else'->:s1^=>1\n" +
"s0-'}'->:s2=>2\n",
"line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" +
"line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" +
"line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 6);
file.addParserTest(input, "LoopsSimulateTailRecursion", "T", "prog",
"a(i)<-x",
"pass: a(i)<-x\n",
"line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\n" +
"line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n", null);
file.addParserTest(input, "AmbiguityNoLoop", "T", "prog",
"a@",
"alt 1\n",
"line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\n" +
"line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\n" +
"line 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\n" +
"line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n", null);
file.addParserTest(input, "ExprAmbiguity", "T", "s",
"a+b",
"(expr a + (expr b))\n",
"line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" +
"line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n", 1);
file.addParserTest(input, "ExprAmbiguity", "T", "s",
"a+b*c",
"(expr a + (expr b * (expr c)))\n",
"line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" +
"line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" +
"line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" +
"line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n", 2);
return file;
}
private TestFile buildCompositeLexers() throws Exception {
TestFile file = new TestFile("CompositeLexers");
file.addCompositeLexerTest(input, "LexerDelegatorInvokesDelegateRule", "M", "abc",
"S.A\n" +
"[@0,0:0='a',<3>,1:0]\n" +
"[@1,1:1='b',<1>,1:1]\n" +
"[@2,2:2='c',<4>,1:2]\n" +
"[@3,3:2='<EOF>',<-1>,1:3]\n", null, "S");
file.addCompositeLexerTest(input, "LexerDelegatorRuleOverridesDelegate", "M", "ab",
"M.A\n" +
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:1='<EOF>',<-1>,1:2]\n", null, "S");
return file;
}
private TestFile buildLexerExec() throws Exception {
TestFile file = new TestFile("LexerExec");
file.addLexerTest(input, "QuoteTranslation", "L", "\"",
"[@0,0:0='\"',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n", null);
file.addLexerTest(input, "RefToRuleDoesNotSetTokenNorEmitAnother", "L", "34 -21 3",
"[@0,0:1='34',<2>,1:0]\n" +
"[@1,3:5='-21',<1>,1:3]\n" +
"[@2,7:7='3',<2>,1:7]\n" +
"[@3,8:7='<EOF>',<-1>,1:8]\n", null);
file.addLexerTest(input, "Slashes", "L", "\\ / \\/ /\\",
"[@0,0:0='\\',<1>,1:0]\n" +
"[@1,2:2='/',<2>,1:2]\n" +
"[@2,4:5='\\/',<3>,1:4]\n" +
"[@3,7:8='/\\',<4>,1:7]\n" +
"[@4,9:8='<EOF>',<-1>,1:9]\n", null);
file.addLexerTest(input, "Parentheses", "L", "-.-.-!",
"[@0,0:4='-.-.-',<1>,1:0]\n" +
"[@1,5:5='!',<3>,1:5]\n" +
"[@2,6:5='<EOF>',<-1>,1:6]\n", null);
file.addLexerTest(input, "NonGreedyTermination", "L", "\"hi\"\"mom\"",
"[@0,0:3='\"hi\"',<1>,1:0]\n" +
"[@1,4:8='\"mom\"',<1>,1:4]\n" +
"[@2,9:8='<EOF>',<-1>,1:9]\n", null, 1);
file.addLexerTest(input, "NonGreedyTermination", "L", "\"\"\"mom\"",
"[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" +
"[@1,7:6='<EOF>',<-1>,1:7]\n", null, 2);
file.addLexerTest(input, "GreedyOptional", "L", "//blah\n//blah\n",
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
"[@1,14:13='<EOF>',<-1>,3:14]\n", null);
file.addLexerTest(input, "NonGreedyOptional", "L", "//blah\n//blah\n",
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
"[@2,14:13='<EOF>',<-1>,3:7]\n", null);
file.addLexerTest(input, "GreedyClosure", "L", "//blah\n//blah\n",
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
"[@1,14:13='<EOF>',<-1>,3:14]\n", null);
file.addLexerTest(input, "NonGreedyClosure", "L", "//blah\n//blah\n",
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
"[@2,14:13='<EOF>',<-1>,3:7]\n", null);
file.addLexerTest(input, "GreedyPositiveClosure", "L", "//blah\n//blah\n",
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
"[@1,14:13='<EOF>',<-1>,3:14]\n", null);
file.addLexerTest(input, "NonGreedyPositiveClosure", "L", "//blah\n//blah\n",
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
"[@2,14:13='<EOF>',<-1>,3:7]\n", null);
file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardStar", "L",
"/* ick */\n" +
"/* /* */\n" +
"/* /*nested*/ */\n",
"[@0,0:8='/* ick */',<1>,1:0]\n" +
"[@1,9:9='\\n',<2>,1:9]\n" +
"[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" +
"[@3,35:35='\\n',<2>,3:16]\n" +
"[@4,36:35='<EOF>',<-1>,4:17]\n", null, 1);
file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardStar", "L",
"/* ick */x\n" +
"/* /* */x\n" +
"/* /*nested*/ */x\n",
"[@0,0:8='/* ick */',<1>,1:0]\n" +
"[@1,10:10='\\n',<2>,1:10]\n" +
"[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" +
"[@3,38:38='\\n',<2>,3:17]\n" +
"[@4,39:38='<EOF>',<-1>,4:18]\n",
"line 1:9 token recognition error at: 'x'\n" +
"line 3:16 token recognition error at: 'x'\n", 2);
file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardPlus", "L",
"/* ick */\n" +
"/* /* */\n" +
"/* /*nested*/ */\n",
"[@0,0:8='/* ick */',<1>,1:0]\n" +
"[@1,9:9='\\n',<2>,1:9]\n" +
"[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" +
"[@3,35:35='\\n',<2>,3:16]\n" +
"[@4,36:35='<EOF>',<-1>,4:17]\n", null, 1);
file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardPlus", "L",
"/* ick */x\n" +
"/* /* */x\n" +
"/* /*nested*/ */x\n",
"[@0,0:8='/* ick */',<1>,1:0]\n" +
"[@1,10:10='\\n',<2>,1:10]\n" +
"[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" +
"[@3,38:38='\\n',<2>,3:17]\n" +
"[@4,39:38='<EOF>',<-1>,4:18]\n",
"line 1:9 token recognition error at: 'x'\n" +
"line 3:16 token recognition error at: 'x'\n", 2);
file.addLexerTest(input, "ActionPlacement", "L", "ab",
"stuff0: \n" +
"stuff1: a\n" +
"stuff2: ab\n" +
"ab\n" +
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:1='<EOF>',<-1>,1:2]\n", null);
file.addLexerTest(input, "GreedyConfigs", "L", "ab",
"ab\n" +
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:1='<EOF>',<-1>,1:2]\n", null);
file.addLexerTest(input, "NonGreedyConfigs", "L", "qb",
"a\n" +
"b\n" +
"[@0,0:0='a',<1>,1:0]\n" +
"[@1,1:1='b',<3>,1:1]\n" +
"[@2,2:1='<EOF>',<-1>,1:2]\n", null);
file.addLexerTest(input, "KeywordID", "L", "end eend ending a",
"[@0,0:2='end',<1>,1:0]\n" +
"[@1,3:3=' ',<3>,1:3]\n" +
"[@2,4:7='eend',<2>,1:4]\n" +
"[@3,8:8=' ',<3>,1:8]\n" +
"[@4,9:14='ending',<2>,1:9]\n" +
"[@5,15:15=' ',<3>,1:15]\n" +
"[@6,16:16='a',<2>,1:16]\n" +
"[@7,17:16='<EOF>',<-1>,1:17]\n", null);
file.addLexerTest(input, "HexVsID", "L", "x 0 1 a.b a.l",
"[@0,0:0='x',<5>,1:0]\n" +
"[@1,1:1=' ',<6>,1:1]\n" +
"[@2,2:2='0',<2>,1:2]\n" +
"[@3,3:3=' ',<6>,1:3]\n" +
"[@4,4:4='1',<2>,1:4]\n" +
"[@5,5:5=' ',<6>,1:5]\n" +
"[@6,6:6='a',<5>,1:6]\n" +
"[@7,7:7='.',<4>,1:7]\n" +
"[@8,8:8='b',<5>,1:8]\n" +
"[@9,9:9=' ',<6>,1:9]\n" +
"[@10,10:10='a',<5>,1:10]\n" +
"[@11,11:11='.',<4>,1:11]\n" +
"[@12,12:12='l',<5>,1:12]\n" +
"[@13,13:12='<EOF>',<-1>,1:13]\n",null);
file.addLexerTest(input, "EOFByItself", "L", "",
"[@0,0:-1='<EOF>',<1>,1:0]\n" +
"[@1,0:-1='<EOF>',<-1>,1:0]\n", null);
file.addLexerTest(input, "EOFSuffixInFirstRule", "L", "",
"[@0,0:-1='<EOF>',<-1>,1:0]\n", null, 1);
file.addLexerTest(input, "EOFSuffixInFirstRule", "L", "a",
"[@0,0:0='a',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n", null, 2);
file.addLexerTest(input, "CharSet", "L", "34\r\n 34",
"I\n" +
"I\n" +
"[@0,0:1='34',<1>,1:0]\n" +
"[@1,5:6='34',<1>,2:1]\n" +
"[@2,7:6='<EOF>',<-1>,2:3]\n", null);
file.addLexerTest(input, "CharSetPlus", "L", "34\r\n 34",
"I\n" +
"I\n" +
"[@0,0:1='34',<1>,1:0]\n" +
"[@1,5:6='34',<1>,2:1]\n" +
"[@2,7:6='<EOF>',<-1>,2:3]\n", null);
file.addLexerTest(input, "CharSetNot", "L", "xaf",
"I\n" +
"[@0,0:2='xaf',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n", null);
file.addLexerTest(input, "CharSetInSet", "L", "a x",
"I\n" +
"I\n" +
"[@0,0:0='a',<1>,1:0]\n" +
"[@1,2:2='x',<1>,1:2]\n" +
"[@2,3:2='<EOF>',<-1>,1:3]\n", null);
file.addLexerTest(input, "CharSetRange", "L", "34\r 34 a2 abc \n ",
"I\n" +
"I\n" +
"ID\n" +
"ID\n" +
"[@0,0:1='34',<1>,1:0]\n" +
"[@1,4:5='34',<1>,1:4]\n" +
"[@2,7:8='a2',<2>,1:7]\n" +
"[@3,10:12='abc',<2>,1:10]\n" +
"[@4,18:17='<EOF>',<-1>,2:3]\n", null);
file.addLexerTest(input, "CharSetWithMissingEndRange", "L", "00\r\n",
"I\n" +
"[@0,0:1='00',<1>,1:0]\n" +
"[@1,4:3='<EOF>',<-1>,2:0]\n", null);
file.addLexerTest(input, "CharSetWithMissingEscapeChar", "L", "34 ",
"I\n" +
"[@0,0:1='34',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n", null);
file.addLexerTest(input, "CharSetWithEscapedChar", "L", "- ] ",
"DASHBRACK\n" +
"DASHBRACK\n" +
"[@0,0:0='-',<1>,1:0]\n" +
"[@1,2:2=']',<1>,1:2]\n" +
"[@2,4:3='<EOF>',<-1>,1:4]\n", null);
file.addLexerTest(input, "CharSetWithReversedRange", "L", "9",
"A\n" +
"[@0,0:0='9',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n", null);
file.addLexerTest(input, "CharSetWithQuote", "L", "b\"a",
"A\n" +
"[@0,0:2='b\"a',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n", null, 1);
file.addLexerTest(input, "CharSetWithQuote", "L", "b\"\\a",
"A\n" +
"[@0,0:3='b\"\\a',<1>,1:0]\n" +
"[@1,4:3='<EOF>',<-1>,1:4]\n", null, 2);
final int TOKENS = 4;
final int LABEL = 5;
final int IDENTIFIER = 6;
file.addLexerTest(input, "PositionAdjustingLexer", "L",
"tokens\n" +
"tokens {\n" +
"notLabel\n" +
"label1 =\n" +
"label2 +=\n" +
"notLabel\n",
"[@0,0:5='tokens',<" + IDENTIFIER + ">,1:0]\n" +
"[@1,7:12='tokens',<" + TOKENS + ">,2:0]\n" +
"[@2,14:14='{',<3>,2:7]\n" +
"[@3,16:23='notLabel',<" + IDENTIFIER + ">,3:0]\n" +
"[@4,25:30='label1',<" + LABEL + ">,4:0]\n" +
"[@5,32:32='=',<1>,4:7]\n" +
"[@6,34:39='label2',<" + LABEL + ">,5:0]\n" +
"[@7,41:42='+=',<2>,5:7]\n" +
"[@8,44:51='notLabel',<" + IDENTIFIER + ">,6:0]\n" +
"[@9,53:52='<EOF>',<-1>,7:0]\n", null);
file.addLexerTest(input, "LargeLexer", "L", "KW400",
"[@0,0:4='KW400',<402>,1:0]\n" +
"[@1,5:4='<EOF>',<-1>,1:5]\n", null);
return file;
}
private TestFile buildCompositeParsers() throws Exception {
TestFile file = new TestFile("CompositeParsers");
file.importErrorQueue = true;
file.importGrammar = true;
file.addCompositeParserTest(input, "DelegatorInvokesDelegateRule", "M", "s", "b", "S.a\n", null, "S");
file.addCompositeParserTest(input, "BringInLiteralsFromDelegate", "M", "s", "=a", "S.a\n", null, "S");
file.addCompositeParserTest(input, "DelegatorInvokesDelegateRuleWithArgs", "M", "s", "a", "S.a1000\n", null, "S");
file.addCompositeParserTest(input, "DelegatorInvokesDelegateRuleWithReturnStruct", "M", "s", "b", "S.ab\n", null, "S");
file.addCompositeParserTest(input, "DelegatorAccessesDelegateMembers", "M", "s", "b", "foo\n", null, "S");
file.addCompositeParserTest(input, "DelegatorInvokesFirstVersionOfDelegateRule", "M", "s", "b", "S.a\n", null, "S", "T");
CompositeParserTestMethod ct = file.addCompositeParserTest(input, "DelegatesSeeSameTokenType", "M", "s", "aa", "S.x\nT.y\n", null, "S", "T");
ct.afterGrammar = "writeFile(tmpdir, \"M.g4\", grammar);\n" +
"ErrorQueue equeue = new ErrorQueue();\n" +
"Grammar g = new Grammar(tmpdir+\"/M.g4\", grammar, equeue);\n" +
"String expectedTokenIDToTypeMap = \"{EOF=-1, B=1, A=2, C=3, WS=4}\";\n" +
"String expectedStringLiteralToTypeMap = \"{'a'=2, 'b'=1, 'c'=3}\";\n" +
"String expectedTypeToTokenList = \"[B, A, C, WS]\";\n" +
"assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString());\n" +
"assertEquals(expectedStringLiteralToTypeMap, sort(g.stringLiteralToTypeMap).toString());\n" +
"assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString());\n" +
"assertEquals(\"unexpected errors: \"+equeue, 0, equeue.errors.size());\n";
ct = file.addCompositeParserTest(input, "CombinedImportsCombined", "M", "s", "x 34 9", "S.x\n", null, "S");
ct.afterGrammar = "writeFile(tmpdir, \"M.g4\", grammar);\n" +
"ErrorQueue equeue = new ErrorQueue();\n" +
"new Grammar(tmpdir+\"/M.g4\", grammar, equeue);\n" +
"assertEquals(\"unexpected errors: \" + equeue, 0, equeue.errors.size());\n";
file.addCompositeParserTest(input, "DelegatorRuleOverridesDelegate", "M", "a", "c", "S.a\n", null, "S");
file.addCompositeParserTest(input, "DelegatorRuleOverridesLookaheadInDelegate", "M", "prog", "float x = 3;", "Decl: floatx=3;\n", null, "S");
file.addCompositeParserTest(input, "DelegatorRuleOverridesDelegates", "M", "a", "c", "M.b\nS.a\n", null, "S", "T");
file.addCompositeParserTest(input, "KeywordVSIDOrder", "M", "a", "abc",
"M.A\n" +
"M.a: [@0,0:2='abc',<1>,1:0]\n", null, "S");
file.addCompositeParserTest(input, "ImportedRuleWithAction", "M", "s", "b", "", null, "S");
file.addCompositeParserTest(input, "ImportedGrammarWithEmptyOptions", "M", "s", "b", "", null, "S");
file.addCompositeParserTest(input, "ImportLexerWithOnlyFragmentRules", "M", "program", "test test", "", null, "S");
return file;
}
private TestFile buildParserExec() throws Exception {
TestFile file = new TestFile("ParserExec");
file.addParserTest(input, "Labels", "T", "a", "abc 34", "", null);
file.addParserTest(input, "ListLabelsOnSet", "T", "a", "abc 34", "", null);
file.addParserTest(input, "AorB", "T", "a", "34", "alt 2\n", null);
file.addParserTest(input, "Basic", "T", "a", "abc 34", "abc34\n", null);
file.addParserTest(input, "APlus", "T", "a", "a b c", "abc\n", null);
file.addParserTest(input, "AorAPlus", "T", "a", "a b c", "abc\n", null);
file.addParserTest(input, "IfIfElseGreedyBinding1", "T", "start",
"if y if y x else x", "if y x else x\nif y if y x else x\n", null);
file.addParserTest(input, "IfIfElseGreedyBinding2", "T", "start",
"if y if y x else x", "if y x else x\nif y if y x else x\n", null);
file.addParserTest(input, "IfIfElseNonGreedyBinding1", "T", "start",
"if y if y x else x", "if y x\nif y if y x else x\n", null);
file.addParserTest(input, "IfIfElseNonGreedyBinding2", "T", "start",
"if y if y x else x", "if y x\nif y if y x else x\n", null);
file.addParserTest(input, "AStar", "T", "a", "", "\n", null, 1);
file.addParserTest(input, "AStar", "T", "a", "a b c", "abc\n", null, 2);
file.addParserTest(input, "LL1OptionalBlock", "T", "a", "", "\n", null, 1);
file.addParserTest(input, "LL1OptionalBlock", "T", "a", "a", "a\n", null, 2);
file.addParserTest(input, "AorAStar", "T", "a", "", "\n", null, 1);
file.addParserTest(input, "AorAStar", "T", "a", "a b c", "abc\n", null, 2);
file.addParserTest(input, "AorBPlus", "T", "a", "a 34 c", "a34c\n", null);
file.addParserTest(input, "AorBStar", "T", "a", "", "\n", null, 1);
file.addParserTest(input, "AorBStar", "T", "a", "a 34 c", "a34c\n", null, 2);
file.addParserTest(input, "Optional", "T", "stat", "x", "", null, 1);
file.addParserTest(input, "Optional", "T", "stat", "if x", "", null, 2);
file.addParserTest(input, "Optional", "T", "stat", "if x else x", "", null, 3);
file.addParserTest(input, "Optional", "T", "stat", "if if x else x", "", null, 4);
file.addParserTest(input, "PredicatedIfIfElse", "T", "s", "if x if x a else b", "", null);
/* file.addTest(input, "StartRuleWithoutEOF", "T", "s", "abc 34",
"Decision 0:\n" + "s0-ID->s1\n" + "s1-INT->s2\n" + "s2-EOF->:s3=>1\n", null); */
file.addParserTest(input, "LabelAliasingAcrossLabeledAlternatives", "T", "start", "xy", "x\ny\n", null);
file.addParserTest(input, "PredictionIssue334", "T", "file_", "a", "(file_ (item a) <EOF>)\n", null);
file.addParserTest(input, "ListLabelForClosureContext", "T", "expression", "a", "", null);
return file;
}
}

View File

@ -0,0 +1,51 @@
package org.antlr.v4.testgen;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import org.stringtemplate.v4.ST;
import org.stringtemplate.v4.STGroup;
public class Grammar {
public String fileName;
public String grammarName;
public String[] lines;
public ST template;
public Grammar(String fileName, String grammarName) {
this.fileName = fileName;
this.grammarName = grammarName;
}
public void load(File grammarDir) throws Exception {
template = loadGrammar(grammarDir, fileName);
}
protected ST loadGrammar(File grammarDir, String grammarFileName) throws Exception {
File file = new File(grammarDir, grammarFileName + ".st");
InputStream input = new FileInputStream(file);
try {
byte[] data = new byte[(int)file.length()];
int next = 0;
while(input.available()>0) {
int read = input.read(data, next, data.length - next);
next += read;
}
String s = new String(data);
return new ST(s);
} finally {
input.close();
}
}
public void generate(STGroup group) {
template.add("grammarName", grammarName);
template.groupThatCreatedThisInstance = group; // so templates get interpreted
lines = template.render().split("\n");
for(int i=0;i<lines.length;i++)
lines[i] = Generator.escape(lines[i]);
}
}

View File

@ -0,0 +1,15 @@
package org.antlr.v4.testgen;
public class LexerTestMethod extends TestMethod {
public String[] outputLines;
public LexerTestMethod(String name, String grammarName, String input,
String expectedOutput, String expectedErrors, Integer index) {
super(name, grammarName, input, expectedOutput, expectedErrors, index);
outputLines = expectedOutput.split("\n");
for(int i=0;i<outputLines.length;i++)
outputLines[i] = Generator.escape(outputLines[i]);
}
}

View File

@ -0,0 +1,13 @@
package org.antlr.v4.testgen;
public class ParserTestMethod extends TestMethod {
public String startRule;
public ParserTestMethod(String name, String grammarName, String startRule,
String input, String expectedOutput, String expectedErrors, Integer index) {
super(name, grammarName, input, expectedOutput, expectedErrors, index);
this.startRule = startRule;
}
}

View File

@ -0,0 +1,82 @@
package org.antlr.v4.testgen;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import org.stringtemplate.v4.ST;
import org.stringtemplate.v4.STGroup;
public class TestFile {
List<TestMethod> unitTests = new ArrayList<TestMethod>();
public String name;
public List<String> tests = new ArrayList<String>();
public boolean importErrorQueue = false;
public boolean importGrammar = false;
public TestFile(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void addParserTest(File grammarDir, String name, String grammarName, String methodName,
String input, String expectedOutput, String expectedErrors) throws Exception {
addParserTest( grammarDir, name, grammarName, methodName, input, expectedOutput, expectedErrors, null);
}
public ParserTestMethod addParserTest(File grammarDir, String name, String grammarName, String methodName,
String input, String expectedOutput, String expectedErrors, Integer index) throws Exception {
ParserTestMethod tm = new ParserTestMethod(name, grammarName, methodName, input, expectedOutput, expectedErrors, index);
tm.loadGrammars(grammarDir, this.name);
unitTests.add(tm);
return tm;
}
public CompositeParserTestMethod addCompositeParserTest(File grammarDir, String name, String grammarName, String methodName,
String input, String expectedOutput, String expectedErrors, String ... slaves) throws Exception {
CompositeParserTestMethod tm = new CompositeParserTestMethod(name, grammarName, methodName, input, expectedOutput, expectedErrors, slaves);
tm.loadGrammars(grammarDir, this.name);
unitTests.add(tm);
return tm;
}
public LexerTestMethod addLexerTest(File grammarDir, String name, String grammarName,
String input, String expectedOutput, String expectedErrors) throws Exception {
return addLexerTest(grammarDir, name, grammarName, input, expectedOutput, expectedErrors, null);
}
public LexerTestMethod addLexerTest(File grammarDir, String name, String grammarName,
String input, String expectedOutput, String expectedErrors, Integer index) throws Exception {
LexerTestMethod tm = new LexerTestMethod(name, grammarName, input, expectedOutput, expectedErrors, index);
tm.loadGrammars(grammarDir, this.name);
unitTests.add(tm);
return tm;
}
public CompositeLexerTestMethod addCompositeLexerTest(File grammarDir, String name, String grammarName,
String input, String expectedOutput, String expectedErrors, String ... slaves) throws Exception {
CompositeLexerTestMethod tm = new CompositeLexerTestMethod(name, grammarName, input, expectedOutput, expectedErrors, slaves);
tm.loadGrammars(grammarDir, this.name);
unitTests.add(tm);
return tm;
}
public void generateUnitTests(STGroup group) {
for(TestMethod tm : unitTests) {
tm.generateGrammars(group);
String name = tm.getClass().getSimpleName();
ST template = group.getInstanceOf(name);
template.add("test", tm);
tests.add(template.render());
}
}
}

View File

@ -0,0 +1,33 @@
package org.antlr.v4.testgen;
import java.io.File;
import org.stringtemplate.v4.STGroup;
public abstract class TestMethod {
public String name;
public Grammar grammar;
public String afterGrammar;
public String input;
public String expectedOutput;
public String expectedErrors;
protected TestMethod(String name, String grammarName, String input,
String expectedOutput, String expectedErrors, Integer index) {
this.name = name + (index==null ? "" : "_" + index);
this.grammar = new Grammar(name, grammarName);
this.input = Generator.escape(input);
this.expectedOutput = Generator.escape(expectedOutput);
this.expectedErrors = Generator.escape(expectedErrors);
}
public void loadGrammars(File grammarDir, String testFileName) throws Exception {
grammar.load(new File(grammarDir, testFileName));
}
public void generateGrammars(STGroup group) {
grammar.generate(group);
}
}

View File

@ -0,0 +1,4 @@
lexer grammar M;
import S;
B : 'b';
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar S;
A : 'a' {<write("\"S.a\"")>};
C : 'c' ;

View File

@ -0,0 +1,4 @@
lexer grammar M;
import S;
A : 'a' B {<write("\"M.A\"")>};
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar S;
A : 'a' {<write("\"S.A\"")>};
B : 'b' {<write("\"S.B\"")>};

View File

@ -0,0 +1,4 @@
grammar M;
import S;
s : a ;
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,2 @@
parser grammar S;
a : '=' 'a' {<write("\"S.a\"")>};

View File

@ -0,0 +1,3 @@
grammar M;
import S;
s : x INT;

View File

@ -0,0 +1,5 @@
parser grammar S;
tokens { A, B, C }
x : 'x' INT {<writeln("\"S.x\"")>};
INT : '0'..'9'+ ;
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,17 @@
// The lexer will create rules to match letters a, b, c.
// The associated token types A, B, C must have the same value
// and all import'd parsers. Since ANTLR regenerates all imports
// for use with the delegator M, it can generate the same token type
// mapping in each parser:
// public static final int C=6;
// public static final int EOF=-1;
// public static final int B=5;
// public static final int WS=7;
// public static final int A=4;
grammar M;
import S,T;
s : x y ; // matches AA, which should be 'aa'
B : 'b' ; // another order: B, A, C
A : 'a' ;
C : 'c' ;
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,3 @@
parser grammar S;
tokens { A, B, C }
x : A {<write("\"S.x\"")>};

View File

@ -0,0 +1,3 @@
parser grammar S;
tokens { C, B, A } // reverse order
y : A {<write("\"T.y\"")>};

View File

@ -0,0 +1,4 @@
grammar M; // uses no rules from the import
import S;
s : 'b'{<invoke_foo()>}; // gS is import pointer
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,5 @@
parser grammar S;
@members {
<declare_foo()>
}
a : B;

View File

@ -0,0 +1,5 @@
grammar M;
import S;
s : a ;
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,5 @@
grammar M;
import S;
s : label=a[3] {<writeln("$label.y")>} ;
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,2 @@
parser grammar S;
a[int x] returns [int y] : B {<write("\"S.a\"")>;$y=1000;};

View File

@ -0,0 +1,5 @@
grammar M;
import S;
s : a {<write("$a.text")>} ;
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,2 @@
parser grammar S;
A : B {<write("\"S.a\"")>};

View File

@ -0,0 +1,2 @@
parser grammar S;
a : B {<writeln("\"S.a\"")>};

View File

@ -0,0 +1,5 @@
grammar M;
import S,T;
s : a ;
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,3 @@
parser grammar S;
a : B {<writeln("\"S.a\"")>};
b : B;

View File

@ -0,0 +1,2 @@
parser grammar T;
a : B {<writeln("\"T.a\"")>};

View File

@ -0,0 +1,4 @@
grammar M;
import S;
b : 'b'|'c';
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,3 @@
parser grammar S;
a : b {<write("\"S.a\"")>};
b : B ;

View File

@ -0,0 +1,4 @@
grammar M;
import S, T;
b : 'b'|'c' {<write("\"M.b\"")>}|B|A;
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,4 @@
parser grammar S;
a : b {<write("\"S.a\"")>};
b : 'b' ;

View File

@ -0,0 +1,3 @@
parser grammar S;
tokens { A }
b : 'b' {<write("\"T.b\"")>};

View File

@ -0,0 +1,7 @@
grammar M;
import S;
prog : decl ;
type_ : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+ ;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,7 @@
parser grammar S;
type_ : 'int' ;
decl : type_ ID ';'
| type_ ID init ';' {
<write("\"decl: \" + $text")>
};
init : '=' INT;

View File

@ -0,0 +1,4 @@
grammar M;
import S;
program : 'test' 'test';
WS : (UNICODE_CLASS_Zs)+ -> skip;

View File

@ -0,0 +1,6 @@
lexer grammar S;
fragment
UNICODE_CLASS_Zs : '\u0020' | '\u00A0' | '\u1680' | '\u180E'
| '\u2000'..'\u200A'
| '\u202F' | '\u205F' | '\u3000'
;

View File

@ -0,0 +1,5 @@
grammar M;
import S;
s : a;
B : 'b';
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,3 @@
parser grammar S;
options {}
a : B;

View File

@ -0,0 +1,5 @@
grammar M;
import S;
s : a;
B : 'b';
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,2 @@
parser grammar S;
a : @after {} : B;

View File

@ -0,0 +1,5 @@
grammar M;
import S;
a : A {<writeln("\"M.a: \"+$A")>};
A : 'abc' {<writeln("\"M.A\"")>};
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,2 @@
lexer grammar S;
ID : 'a'..'z'+;

View File

@ -0,0 +1,5 @@
grammar <grammarName>;
s @after {<dumpDFA()>}
: ID | ID {} ;
ID : 'a'..'z'+;
WS : (' '|'\t'|'\n')+ -> skip ;

View File

@ -0,0 +1,12 @@
grammar <grammarName>;
prog
@init {<LL_EXACT_AMBIG_DETECTION()>}
: expr expr {<writeln("\"alt 1\"")>}
| expr
;
expr: '@'
| ID '@'
| ID
;
ID : [a-z]+ ;
WS : [ \r\n\t]+ -> skip ;

View File

@ -0,0 +1,9 @@
grammar <grammarName>;
s @after {<dumpDFA()>}
: '$' a | '@' b ;
a : e ID ;
b : e INT ID ;
e : INT | ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+ ;
WS : (' '|'\t'|'\n')+ -> skip ;

View File

@ -0,0 +1,9 @@
grammar <grammarName>;
s @after {<dumpDFA()>}
: ('$' a | '@' b)+ ;
a : e ID ;
b : e INT ID ;
e : INT | ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+ ;
WS : (' '|'\t'|'\n')+ -> skip ;

View File

@ -0,0 +1,13 @@
grammar <grammarName>;
s
@init {<LL_EXACT_AMBIG_DETECTION()>}
: expr[0] {<ToStringTree("$expr.ctx"):writeln()>};
expr[int _p]
: ID
(
{5 >= $_p}? '*' expr[6]
| {4 >= $_p}? '+' expr[5]
)*
;
ID : [a-zA-Z]+ ;
WS : [ \r\n\t]+ -> skip ;

View File

@ -0,0 +1,10 @@
grammar <grammarName>;
s
@init {<LL_EXACT_AMBIG_DETECTION()>}
@after {<dumpDFA()>}
: '{' stat* '}' ;
stat: 'if' ID 'then' stat ('else' ID)?
| 'return
;
ID : 'a'..'z'+ ;
WS : (' '|'\t'|'\n')+ -> skip ;

View File

@ -0,0 +1,15 @@
grammar <grammarName>;
prog
@init {<LL_EXACT_AMBIG_DETECTION()>}
: expr_or_assign*;
expr_or_assign
: expr '++' {<writeln("\"fail.\"")>}
| expr {<writeln("\"pass: \"+$expr.text")>}
;
expr: expr_primary ('\<-' ID)?;
expr_primary
: '(' ID ')'
| ID '(' ID ')'
| ID
;
ID : [a-z]+ ;

View File

@ -0,0 +1,9 @@
grammar <grammarName>;
s @after {<dumpDFA()>}
: a;
a : e ID ;
b : e INT ID ;
e : INT | ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+ ;
WS : (' '|'\t'|'\n')+ -> skip ;

View File

@ -0,0 +1,8 @@
lexer grammar <grammarName>;
I : ({<writeln("\"stuff fail: \" + this.text")>} 'a'
| {<writeln("\"stuff0: \" + this.text")>}
'a' {<writeln("\"stuff1: \" + this.text")>}
'b' {<writeln("\"stuff2: \" + this.text")>})
{<writeln("this.text")>} ;
WS : (' '|'\n') -> skip ;
J : .;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
I : '0'..'9'+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D] -> skip ;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
I : (~[ab \\n]|'a') {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
I : ~[ab \n] ~[ \ncd]* {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
I : '0'..'9'+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
I : [0-9]+ {<writeln("\"I\"")>} ;
ID : [a-zA-Z] [a-zA-Z0-9]* {<writeln("\"ID\"")>} ;
WS : [ \n\u0009\r]+ -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
DASHBRACK : [\\-\]]+ {<writeln("\"DASHBRACK\"")>} ;
WS : [ \u]+ -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
I : [0-]+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
I : [0-9]+ {<writeln("\"I\"")>} ;
WS : [ \u]+ -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
A : ["a-z]+ {<writeln("\"A\"")>} ;
WS : [ \n\t]+ -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
A : [z-a9]+ {<writeln("\"A\"")>} ;
WS : [ \u]+ -> skip ;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
DONE : EOF ;
A : 'a';

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
A : 'a' EOF ;
B : 'a';
C : 'c';

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT*;
WS : (' '|'\t')+;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
I : ('a' | 'ab') {<writeln("this.text")>} ;
WS : (' '|'\n') -> skip ;
J : .;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT?;
WS : (' '|'\t')+;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
CMT : ('//' .*? '\n')+;
WS : (' '|'\t')+;

View File

@ -0,0 +1,8 @@
lexer grammar <grammarName>;
HexLiteral : '0' ('x'|'X') HexDigit+ ;
DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ;
FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ;
DOT : '.' ;
ID : 'a'..'z'+ ;
fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
KEND : 'end' ; // has priority
ID : 'a'..'z'+ ;
WS : (' '|'\n') -> skip ;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT*?;
WS : (' '|'\t')+;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
I : .*? ('a' | 'ab') {<writeln("this.text")>} ;
WS : (' '|'\n') -> skip ;
J : . {<writeln("this.text")>};

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT??;
WS : (' '|'\t')+;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
CMT : ('//' .*? '\n')+?;
WS : (' '|'\t')+;

View File

@ -0,0 +1,2 @@
lexer grammar <grammarName>;
STRING : '\"' ('\"\"' | .)*? '\"';

View File

@ -0,0 +1,7 @@
lexer grammar <grammarName>;
START_BLOCK: '-.-.-';
ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+;
fragment LETTER: L_A|L_K;
fragment L_A: '.-';
fragment L_K: '-.-';
SEPARATOR: '!';

View File

@ -0,0 +1,34 @@
lexer grammar PositionAdjustingLexer;
@members {
<PositionAdjustingLexer()>
}
ASSIGN : '=' ;
PLUS_ASSIGN : '+=' ;
LCURLY: '{';
// 'tokens' followed by '{'
TOKENS : 'tokens' IGNORED '{';
// IDENTIFIER followed by '+=' or '='
LABEL
: IDENTIFIER IGNORED '+'? '='
;
IDENTIFIER
: [a-zA-Z_] [a-zA-Z0-9_]*
;
fragment
IGNORED
: [ \t\r\n]*
;
NEWLINE
: [\r\n]+ -> skip
;
WS
: [ \t]+ -> skip
;

View File

@ -0,0 +1,2 @@
lexer grammar <grammarName>;
QUOTE : '"' ; // make sure this compiles

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
CMT : '/*' (CMT | .)+? '*/' ;
WS : (' '|'\t')+;

View File

@ -0,0 +1,3 @@
lexer grammar <grammarName>;
CMT : '/*' (CMT | .)*? '*/' ;
WS : (' '|'\t')+;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
A : '-' I ;
I : '0'..'9'+ ;
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,6 @@
lexer grammar <grammarName>;
Backslash : '\\\\';
Slash : '/';
Vee : '\\\\/';
Wedge : '/\\\\';
WS : [ \t] -> skip;

View File

@ -0,0 +1,6 @@
grammar <grammarName>;
a : ID+ {
<writeln("$text")>
};
ID : 'a'..'z'+;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,6 @@
grammar <grammarName>;
a : ID* {
<writeln("$text")>
};
ID : 'a'..'z'+;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,6 @@
grammar <grammarName>;
a : (ID|ID)+ {
<writeln("$text")>
};
ID : 'a'..'z'+;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,6 @@
grammar <grammarName>;
a : (ID|ID)* {
<writeln("$text")>
};
ID : 'a'..'z'+;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,9 @@
grammar <grammarName>;
a : ID {
<writeln("\"alt 1\"")>
} | INT {
<writeln("\"alt 2\"")>
};
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') -> skip ;

View File

@ -0,0 +1,8 @@
grammar <grammarName>;
a : (ID|INT{
})+ {
<writeln("$text")>
};
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') -> skip ;

View File

@ -0,0 +1,8 @@
grammar <grammarName>;
a : (ID|INT{
})* {
<writeln("$text")>
};
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') -> skip ;

View File

@ -0,0 +1,7 @@
grammar <grammarName>;
a : ID INT {
<writeln("$text")>
};
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,8 @@
grammar <grammarName>;
start : statement+ ;
statement : 'x' | ifStatement;
ifStatement : 'if' 'y' statement ('else' statement)? {
<writeln("$text")>
};
ID : 'a'..'z'+ ;
WS : (' '|'\n') -> channel(HIDDEN);

View File

@ -0,0 +1,8 @@
grammar <grammarName>;
start : statement+ ;
statement : 'x' | ifStatement;
ifStatement : 'if' 'y' statement ('else' statement|) {
<writeln("$text")>
};
ID : 'a'..'z'+ ;
WS : (' '|'\n') -> channel(HIDDEN);

View File

@ -0,0 +1,8 @@
grammar <grammarName>;
start : statement+ ;
statement : 'x' | ifStatement;
ifStatement : 'if' 'y' statement ('else' statement)?? {
<writeln("$text")>
};
ID : 'a'..'z'+ ;
WS : (' '|'\n') -> channel(HIDDEN);

View File

@ -0,0 +1,8 @@
grammar <grammarName>;
start : statement+ ;
statement : 'x' | ifStatement;
ifStatement : 'if' 'y' statement (|'else' statement) {
<writeln("$text")>
};
ID : 'a'..'z'+ ;
WS : (' '|'\n') -> channel(HIDDEN);

View File

@ -0,0 +1,7 @@
grammar <grammarName>;
a : (ID|{}INT)? {
<writeln("$text")>
};
ID : 'a'..'z'+;
INT : '0'..'9'+ ;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,8 @@
grammar <grammarName>;
start : a* EOF;
a
: label=subrule { <writeln("$label.text")> } #One
| label='y' { <writeln("$label.text")> } #Two
;
subrule : 'x';
WS : (' '|'\n') -> skip ;

View File

@ -0,0 +1,6 @@
grammar <grammarName>;
a : b1=b b2+=b* b3+=';' ;
b : id_=ID val+=INT*;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') -> skip ;

Some files were not shown because too many files have changed in this diff Show More