This commit is contained in:
ericvergnaud 2014-10-16 23:53:54 +08:00
parent 4f657e3e52
commit 3d04a72d52
8 changed files with 123 additions and 0 deletions

View File

@ -115,9 +115,99 @@ public class Generator {
list.add(buildParserErrors());
list.add(buildParserExec());
list.add(buildParseTrees());
list.add(buildSemPredEvalLexer());
return list;
}
private TestFile buildSemPredEvalLexer() throws Exception {
TestFile file = new TestFile("SemPredEvalLexer");
file.addLexerTest(input, "DisableRule", "L",
"enum abc",
"[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<3>,1:5]\n" +
"[@2,8:7='<EOF>',<-1>,1:8]\n" +
"s0-' '->:s5=>4\n" +
"s0-'a'->:s6=>3\n" +
"s0-'e'->:s1=>3\n" +
":s1=>3-'n'->:s2=>3\n" +
":s2=>3-'u'->:s3=>3\n" +
":s6=>3-'b'->:s6=>3\n" +
":s6=>3-'c'->:s6=>3\n",
null);
file.addLexerTest(input, "IDvsEnum", "L",
"enum abc enum",
"[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<2>,1:5]\n" +
"[@2,9:12='enum',<2>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s5=>3\n" +
"s0-'a'->:s4=>2\n" +
"s0-'e'->:s1=>2\n" +
":s1=>2-'n'->:s2=>2\n" +
":s2=>2-'u'->:s3=>2\n" +
":s4=>2-'b'->:s4=>2\n" +
":s4=>2-'c'->:s4=>2\n", // no 'm'-> transition...conflicts with pred
null);
file.addLexerTest(input, "IDnotEnum", "L",
"enum abc enum",
"[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<2>,1:5]\n" +
"[@2,9:12='enum',<2>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s2=>3\n", // no edges in DFA for enum/id. all paths lead to pred.
null);
file.addLexerTest(input, "EnumNotID", "L",
"enum abc enum",
"[@0,0:3='enum',<1>,1:0]\n" +
"[@1,5:7='abc',<2>,1:5]\n" +
"[@2,9:12='enum',<1>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s3=>3\n", // no edges in DFA for enum/id. all paths lead to pred.
null);
file.addLexerTest(input, "Indent", "L",
"abc\n def \n",
"INDENT\n" + // action output
"[@0,0:2='abc',<1>,1:0]\n" + // ID
"[@1,3:3='\\n',<3>,1:3]\n" + // NL
"[@2,4:5=' ',<2>,2:0]\n" + // INDENT
"[@3,6:8='def',<1>,2:2]\n" + // ID
"[@4,9:10=' ',<4>,2:5]\n" + // WS
"[@5,11:11='\\n',<3>,2:7]\n" +
"[@6,12:11='<EOF>',<-1>,3:8]\n" +
"s0-'\n" +
"'->:s2=>3\n" +
"s0-'a'->:s1=>1\n" +
"s0-'d'->:s1=>1\n" +
":s1=>1-'b'->:s1=>1\n" +
":s1=>1-'c'->:s1=>1\n" +
":s1=>1-'e'->:s1=>1\n" +
":s1=>1-'f'->:s1=>1\n",
null);
file.addLexerTest(input, "LexerInputPositionSensitivePredicates", "L",
"a cde\nabcde\n",
"a\n" +
"cde\n" +
"ab\n" +
"cde\n" +
"[@0,0:0='a',<1>,1:0]\n" +
"[@1,2:4='cde',<2>,1:2]\n" +
"[@2,6:7='ab',<1>,2:0]\n" +
"[@3,8:10='cde',<2>,2:2]\n" +
"[@4,12:11='<EOF>',<-1>,3:0]\n",
null);
file.addLexerTest(input, "PredicatedKeywords", "L",
"enum enu a",
"enum!\n" +
"ID enu\n" +
"ID a\n" +
"[@0,0:3='enum',<1>,1:0]\n" +
"[@1,5:7='enu',<2>,1:5]\n" +
"[@2,9:9='a',<2>,1:9]\n" +
"[@3,10:9='<EOF>',<-1>,1:10]\n",
null);
return file;
}
private TestFile buildParseTrees() throws Exception {
TestFile file = new TestFile("ParseTrees");
file.addParserTest(input, "TokenAndRuleContextString", "T", "s",

View File

@ -0,0 +1,5 @@
lexer grammar <grammarName>;
E1 : 'enum' { <False()> }? ;
E2 : 'enum' { <True()> }? ; // winner not E1 or ID
ID : 'a'..'z'+ ;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
ENUM : [a-z]+ { <TextEquals("enum")> }? ;
ID : [a-z]+ ;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
ENUM : [a-z]+ { <False()> }? ;
ID : [a-z]+ ;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
ENUM : 'enum' { <False()> }? ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,6 @@
lexer grammar <grammarName>;
ID : [a-z]+ ;
INDENT : [ \t]+ { <TokenStartColumnEquals("0")> }? \n" +
{ <writeln("\"INDENT\"")> } ;"+
NL : '\n';
WS : [ \t]+ ;

View File

@ -0,0 +1,6 @@
lexer grammar <grammarName>;
WORD1 : ID1+ { <Text():writeln()> } ;
WORD2 : ID2+ { <Text():writeln()> } ;
fragment ID1 : { <Column()> \< 2 }? [a-zA-Z];
fragment ID2 : { <Column()> >= 2 }? [a-zA-Z];
WS : (' '|'\n') -> skip;

View File

@ -0,0 +1,4 @@
lexer grammar <grammarName>;
ENUM : [a-z]+ { <TextEquals("enum")> }? { <writeln("\"enum!\"")> } ;
ID : [a-z]+ { <PlusText("ID"):writeln()> } ;
WS : [ \n] -> skip ;