TestLexerExec for Java is working

This commit is contained in:
Sam Harwell 2015-01-22 06:13:36 -06:00
parent 4ec94e0fae
commit 42ba8c4530
79 changed files with 1058 additions and 5142 deletions

View File

@ -1,5 +1,6 @@
TestFolders ::= [
"CompositeLexers": [],
"LexerExec": [],
"ParseTrees": []
]

View File

@ -0,0 +1,31 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
ab
%>
Output() ::= <<
stuff0: <!required space!>
stuff1: a
stuff2: ab
ab
[@0,0:1='ab',\<1>,1:0]
[@1,2:1='\<EOF>',\<-1>,1:2]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : ({<PlusText("stuff fail: "):writeln()>} 'a'
| {<PlusText("stuff0: "):writeln()>}
'a' {<PlusText("stuff1: "):writeln()>}
'b' {<PlusText("stuff2: "):writeln()>})
{<Text():writeln()>} ;
WS : (' '|'\n') -> skip ;
J : .;
>>

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "34<\n> 34"
Output() ::= <<
I
I
[@0,0:1='34',\<1>,1:0]
[@1,4:5='34',\<1>,2:1]
[@2,6:5='\<EOF>',\<-1>,2:3]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : '0'..'9'+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D] -> skip ;
>>

View File

@ -0,0 +1,25 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
a x
%>
Output() ::= <<
I
I
[@0,0:0='a',\<1>,1:0]
[@1,2:2='x',\<1>,1:2]
[@2,3:2='\<EOF>',\<-1>,1:3]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : (~[ab \\n]|'a') {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;
>>

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
xaf
%>
Output() ::= <<
I
[@0,0:2='xaf',\<1>,1:0]
[@1,3:2='\<EOF>',\<-1>,1:3]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : ~[ab \n] ~[ \ncd]* {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;
>>

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "34<\n> 34"
Output() ::= <<
I
I
[@0,0:1='34',\<1>,1:0]
[@1,4:5='34',\<1>,2:1]
[@2,6:5='\<EOF>',\<-1>,2:3]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : '0'..'9'+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;
>>

View File

@ -0,0 +1,28 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "34<\n> 34 a2 abc <\n> "
Output() ::= <<
I
I
ID
ID
[@0,0:1='34',\<1>,1:0]
[@1,4:5='34',\<1>,2:1]
[@2,7:8='a2',\<2>,2:4]
[@3,10:12='abc',\<2>,2:7]
[@4,18:17='\<EOF>',\<-1>,3:3]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : [0-9]+ {<writeln("\"I\"")>} ;
ID : [a-zA-Z] [a-zA-Z0-9]* {<writeln("\"ID\"")>} ;
WS : [ \n\u0009\r]+ -> skip ;
>>

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "- ] "
Output() ::= <<
DASHBRACK
DASHBRACK
[@0,0:0='-',\<1>,1:0]
[@1,2:2=']',\<1>,1:2]
[@2,4:3='\<EOF>',\<-1>,1:4]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
DASHBRACK : [\\-\]]+ {<writeln("\"DASHBRACK\"")>} ;
WS : [ \u]+ -> skip ;
>>

View File

@ -0,0 +1,21 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "00<\n>"
Output() ::= <<
I
[@0,0:1='00',\<1>,1:0]
[@1,3:2='\<EOF>',\<-1>,2:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : [0-]+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;
>>

View File

@ -0,0 +1,21 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "34 "
Output() ::= <<
I
[@0,0:1='34',\<1>,1:0]
[@1,3:2='\<EOF>',\<-1>,1:3]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : [0-9]+ {<writeln("\"I\"")>} ;
WS : [ \u]+ -> skip ;
>>

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
b"a
%>
Output() ::= <<
A
[@0,0:2='b"a',\<1>,1:0]
[@1,3:2='\<EOF>',\<-1>,1:3]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
A : ["a-z]+ {<writeln("\"A\"")>} ;
WS : [ \n\t]+ -> skip ;
>>

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
b"\a
%>
Output() ::= <<
A
[@0,0:3='b"\a',\<1>,1:0]
[@1,4:3='\<EOF>',\<-1>,1:4]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
A : ["\\ab]+ {<writeln("\"A\"")>} ;
WS : [ \n\t]+ -> skip ;
>>

View File

@ -0,0 +1,21 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "9"
Output() ::= <<
A
[@0,0:0='9',\<1>,1:0]
[@1,1:0='\<EOF>',\<-1>,1:1]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
A : [z-a9]+ {<writeln("\"A\"")>} ;
WS : [ \u]+ -> skip ;
>>

View File

@ -0,0 +1,20 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= ""
Output() ::= <<
[@0,0:-1='\<EOF>',\<1>,1:0]
[@1,0:-1='\<EOF>',\<-1>,1:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
DONE : EOF ;
A : 'a';
>>

View File

@ -0,0 +1,15 @@
import "EOFSuffixInFirstRule.stg"
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= ""
Output() ::= <<
[@0,0:-1='\<EOF>',\<-1>,1:0]<\n>
>>
Errors() ::= ""

View File

@ -0,0 +1,16 @@
import "EOFSuffixInFirstRule.stg"
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "a"
Output() ::= <<
[@0,0:0='a',\<1>,1:0]
[@1,1:0='\<EOF>',\<-1>,1:1]<\n>
>>
Errors() ::= ""

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
//blah
//blah<\n>
>>
Output() ::= <<
[@0,0:13='//blah\n//blah\n',\<1>,1:0]
[@1,14:13='\<EOF>',\<-1>,3:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT*;
WS : (' '|'\t')+;
>>

View File

@ -0,0 +1,24 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
ab
%>
Output() ::= <<
ab
[@0,0:1='ab',\<1>,1:0]
[@1,2:1='\<EOF>',\<-1>,1:2]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : ('a' | 'ab') {<Text():writeln()>} ;
WS : (' '|'\n') -> skip ;
J : .;
>>

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
//blah
//blah<\n>
>>
Output() ::= <<
[@0,0:13='//blah\n//blah\n',\<1>,1:0]
[@1,14:13='\<EOF>',\<-1>,3:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT?;
WS : (' '|'\t')+;
>>

View File

@ -0,0 +1,23 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
//blah
//blah<\n>
>>
Output() ::= <<
[@0,0:13='//blah\n//blah\n',\<1>,1:0]
[@1,14:13='\<EOF>',\<-1>,3:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
CMT : ('//' .*? '\n')+;
WS : (' '|'\t')+;
>>

View File

@ -0,0 +1,39 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
x 0 1 a.b a.l
%>
Output() ::= <<
[@0,0:0='x',\<5>,1:0]
[@1,1:1=' ',\<6>,1:1]
[@2,2:2='0',\<2>,1:2]
[@3,3:3=' ',\<6>,1:3]
[@4,4:4='1',\<2>,1:4]
[@5,5:5=' ',\<6>,1:5]
[@6,6:6='a',\<5>,1:6]
[@7,7:7='.',\<4>,1:7]
[@8,8:8='b',\<5>,1:8]
[@9,9:9=' ',\<6>,1:9]
[@10,10:10='a',\<5>,1:10]
[@11,11:11='.',\<4>,1:11]
[@12,12:12='l',\<5>,1:12]
[@13,13:12='\<EOF>',\<-1>,1:13]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
HexLiteral : '0' ('x'|'X') HexDigit+ ;
DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ;
FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ;
DOT : '.' ;
ID : 'a'..'z'+ ;
fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;
WS : (' '|'\n')+;
>>

View File

@ -0,0 +1,43 @@
//TestFolders ::= [
//]
TestTemplates ::= [
"QuoteTranslation": [],
"RefToRuleDoesNotSetTokenNorEmitAnother": [],
"Slashes": [],
"Parentheses": [],
"NonGreedyTermination1": [],
"NonGreedyTermination2": [],
"GreedyOptional": [],
"NonGreedyOptional": [],
"GreedyClosure": [],
"NonGreedyClosure": [],
"GreedyPositiveClosure": [],
"NonGreedyPositiveClosure": [],
"RecursiveLexerRuleRefWithWildcardStar_1": [],
"RecursiveLexerRuleRefWithWildcardStar_2": [],
"RecursiveLexerRuleRefWithWildcardPlus_1": [],
"RecursiveLexerRuleRefWithWildcardPlus_2": [],
"ActionPlacement": [],
"GreedyConfigs": [],
"NonGreedyConfigs": [],
"KeywordID": [],
"HexVsID": [],
"EOFByItself": [],
"EOFSuffixInFirstRule_1": [],
"EOFSuffixInFirstRule_2": [],
"CharSet": [],
"CharSetPlus": [],
"CharSetNot": [],
"CharSetInSet": [],
"CharSetRange": [],
"CharSetWithMissingEndRange": [],
"CharSetWithMissingEscapeChar": [],
"CharSetWithEscapedChar": [],
"CharSetWithReversedRange": [],
"CharSetWithQuote1": [],
"CharSetWithQuote2": [],
"PositionAdjustingLexer": [],
"LargeLexer": [],
"ZeroLengthToken": []
]

View File

@ -0,0 +1,29 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
end eend ending a
%>
Output() ::= <<
[@0,0:2='end',\<1>,1:0]
[@1,3:3=' ',\<3>,1:3]
[@2,4:7='eend',\<2>,1:4]
[@3,8:8=' ',\<3>,1:8]
[@4,9:14='ending',\<2>,1:9]
[@5,15:15=' ',\<3>,1:15]
[@6,16:16='a',\<2>,1:16]
[@7,17:16='\<EOF>',\<-1>,1:17]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
KEND : 'end' ; // has priority
ID : 'a'..'z'+ ;
WS : (' '|'\n')+;
>>

View File

@ -1,4 +1,20 @@
lexer grammar L;
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= "KW400"
Output() ::= <<
[@0,0:4='KW400',\<402>,1:0]
[@1,5:4='\<EOF>',\<-1>,1:5]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
WS : [ \t\r\n]+ -> skip;
KW0 : 'KW' '0';
KW1 : 'KW' '1';
@ -4000,3 +4016,4 @@ KW3996 : 'KW' '3996';
KW3997 : 'KW' '3997';
KW3998 : 'KW' '3998';
KW3999 : 'KW' '3999';
>>

View File

@ -0,0 +1,24 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
//blah
//blah<\n>
>>
Output() ::= <<
[@0,0:6='//blah\n',\<1>,1:0]
[@1,7:13='//blah\n',\<1>,2:0]
[@2,14:13='\<EOF>',\<-1>,3:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT*?;
WS : (' '|'\t')+;
>>

View File

@ -0,0 +1,26 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
ab
%>
Output() ::= <<
a
b
[@0,0:0='a',\<1>,1:0]
[@1,1:1='b',\<3>,1:1]
[@2,2:1='\<EOF>',\<-1>,1:2]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
I : .*? ('a' | 'ab') {<Text():writeln()>} ;
WS : (' '|'\n') -> skip ;
J : . {<Text():writeln()>};
>>

View File

@ -0,0 +1,24 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
//blah
//blah<\n>
>>
Output() ::= <<
[@0,0:6='//blah\n',\<1>,1:0]
[@1,7:13='//blah\n',\<1>,2:0]
[@2,14:13='\<EOF>',\<-1>,3:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT??;
WS : (' '|'\t')+;
>>

View File

@ -0,0 +1,24 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
//blah
//blah<\n>
>>
Output() ::= <<
[@0,0:6='//blah\n',\<1>,1:0]
[@1,7:13='//blah\n',\<1>,2:0]
[@2,14:13='\<EOF>',\<-1>,3:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
CMT : ('//' .*? '\n')+?;
WS : (' '|'\t')+;
>>

View File

@ -0,0 +1,22 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
"hi""mom"
%>
Output() ::= <<
[@0,0:3='"hi"',\<1>,1:0]
[@1,4:8='"mom"',\<1>,1:4]
[@2,9:8='\<EOF>',\<-1>,1:9]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
STRING : '"' ('""' | .)*? '"';
>>

View File

@ -0,0 +1,21 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
"""mom"
%>
Output() ::= <<
[@0,0:6='"""mom"',\<1>,1:0]
[@1,7:6='\<EOF>',\<-1>,1:7]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
STRING : '"' ('""' | .)+? '"';
>>

View File

@ -0,0 +1,27 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
-.-.-!
%>
Output() ::= <<
[@0,0:4='-.-.-',\<1>,1:0]
[@1,5:5='!',\<3>,1:5]
[@2,6:5='\<EOF>',\<-1>,1:6]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
START_BLOCK: '-.-.-';
ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+;
fragment LETTER: L_A|L_K;
fragment L_A: '.-';
fragment L_K: '-.-';
SEPARATOR: '!';
>>

View File

@ -0,0 +1,70 @@
TestType() ::= "Lexer"
Grammar ::= [
"PositionAdjustingLexer": {<grammar("PositionAdjustingLexer")>}
]
Input() ::= <<
tokens
tokens {
notLabel
label1 =
label2 +=
notLabel<\n>
>>
TOKENS() ::= "\<4>"
LABEL() ::= "\<5>"
IDENTIFIER() ::= "\<6>"
Output() ::= <<
[@0,0:5='tokens',<IDENTIFIER()>,1:0]
[@1,7:12='tokens',<TOKENS()>,2:0]
[@2,14:14='{',\<3>,2:7]
[@3,16:23='notLabel',<IDENTIFIER()>,3:0]
[@4,25:30='label1',<LABEL()>,4:0]
[@5,32:32='=',\<1>,4:7]
[@6,34:39='label2',<LABEL()>,5:0]
[@7,41:42='+=',\<2>,5:7]
[@8,44:51='notLabel',<IDENTIFIER()>,6:0]
[@9,53:52='\<EOF>',\<-1>,7:0]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar PositionAdjustingLexer;
@members {
<PositionAdjustingLexer()>
}
ASSIGN : '=' ;
PLUS_ASSIGN : '+=' ;
LCURLY: '{';
// 'tokens' followed by '{'
TOKENS : 'tokens' IGNORED '{';
// IDENTIFIER followed by '+=' or '='
LABEL
: IDENTIFIER IGNORED '+'? '='
;
IDENTIFIER
: [a-zA-Z_] [a-zA-Z0-9_]*
;
fragment
IGNORED
: [ \t\r\n]*
;
NEWLINE
: [\r\n]+ -> skip
;
WS
: [ \t]+ -> skip
;
>>

View File

@ -0,0 +1,21 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
"
%>
Output() ::= <<
[@0,0:0='"',\<1>,1:0]
[@1,1:0='\<EOF>',\<-1>,1:1]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
QUOTE : '"' ; // make sure this compiles
>>

View File

@ -0,0 +1,23 @@
import "RecursiveLexerRuleRefWithWildcardPlus.stg"
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
/* ick */
/* /* */
/* /*nested*/ */<\n>
>>
Output() ::= <<
[@0,0:8='/* ick */',\<1>,1:0]
[@1,9:9='\n',\<2>,1:9]
[@2,10:34='/* /* */\n/* /*nested*/ */',\<1>,2:0]
[@3,35:35='\n',\<2>,3:16]
[@4,36:35='\<EOF>',\<-1>,4:0]<\n>
>>
Errors() ::= ""

View File

@ -0,0 +1,26 @@
import "RecursiveLexerRuleRefWithWildcardPlus.stg"
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
/* ick */x
/* /* */x
/* /*nested*/ */x<\n>
>>
Output() ::= <<
[@0,0:8='/* ick */',\<1>,1:0]
[@1,10:10='\n',\<2>,1:10]
[@2,11:36='/* /* */x\n/* /*nested*/ */',\<1>,2:0]
[@3,38:38='\n',\<2>,3:17]
[@4,39:38='\<EOF>',\<-1>,4:0]<\n>
>>
Errors() ::= <<
line 1:9 token recognition error at: 'x'
line 3:16 token recognition error at: 'x'<\n>
>>

View File

@ -0,0 +1,23 @@
import "RecursiveLexerRuleRefWithWildcardStar.stg"
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
/* ick */
/* /* */
/* /*nested*/ */<\n>
>>
Output() ::= <<
[@0,0:8='/* ick */',\<1>,1:0]
[@1,9:9='\n',\<2>,1:9]
[@2,10:34='/* /* */\n/* /*nested*/ */',\<1>,2:0]
[@3,35:35='\n',\<2>,3:16]
[@4,36:35='\<EOF>',\<-1>,4:0]<\n>
>>
Errors() ::= ""

View File

@ -0,0 +1,26 @@
import "RecursiveLexerRuleRefWithWildcardStar.stg"
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <<
/* ick */x
/* /* */x
/* /*nested*/ */x<\n>
>>
Output() ::= <<
[@0,0:8='/* ick */',\<1>,1:0]
[@1,10:10='\n',\<2>,1:10]
[@2,11:36='/* /* */x\n/* /*nested*/ */',\<1>,2:0]
[@3,38:38='\n',\<2>,3:17]
[@4,39:38='\<EOF>',\<-1>,4:0]<\n>
>>
Errors() ::= <<
line 1:9 token recognition error at: 'x'
line 3:16 token recognition error at: 'x'<\n>
>>

View File

@ -0,0 +1,25 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
34 -21 3
%>
Output() ::= <<
[@0,0:1='34',\<2>,1:0]
[@1,3:5='-21',\<1>,1:3]
[@2,7:7='3',\<2>,1:7]
[@3,8:7='\<EOF>',\<-1>,1:8]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
A : '-' I ;
I : '0'..'9'+ ;
WS : (' '|'\n') -> skip ;
>>

View File

@ -0,0 +1,28 @@
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
\ / \/ /\
%>
Output() ::= <<
[@0,0:0='\',\<1>,1:0]
[@1,2:2='/',\<2>,1:2]
[@2,4:5='\/',\<3>,1:4]
[@3,7:8='/\',\<4>,1:7]
[@4,9:8='\<EOF>',\<-1>,1:9]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
Backslash : '\\\\';
Slash : '/';
Vee : '\\\\/';
Wedge : '/\\\\';
WS : [ \t] -> skip;
>>

View File

@ -0,0 +1,34 @@
/*
* This is a regression test for antlr/antlr4#687 "Empty zero-length tokens
* cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match
* zero-length tokens"
*/
TestType() ::= "Lexer"
Grammar ::= [
"L": {<grammar("L")>}
]
Input() ::= <%
'xxx'
%>
Output() ::= <<
[@0,0:4=''xxx'',\<1>,1:0]
[@1,5:4='\<EOF>',\<-1>,1:5]<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
lexer grammar <grammarName>;
BeginString
: '\'' -> more, pushMode(StringMode)
;
mode StringMode;
StringMode_X : 'x' -> more;
StringMode_Done : -> more, mode(EndStringMode);
mode EndStringMode;
EndString : '\'' -> popMode;
>>

View File

@ -77,6 +77,7 @@ public class Antlr4TestGeneratorMojo extends AbstractMojo {
targetGroup.registerModelAdaptor(STGroup.class, new STGroupModelAdaptor());
targetGroup.defineDictionary("escape", new JavaEscapeStringMap());
targetGroup.defineDictionary("lines", new LinesStringMap());
targetGroup.defineDictionary("strlen", new StrlenStringMap());
String rootFolder = "org/antlr4/runtime/test/templates";
STGroup index = new STGroupFile(rootFolder + "/Index.stg");

View File

@ -0,0 +1,38 @@
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package org.antlr.mojo.antlr4.testgen;
import java.util.AbstractMap;
import java.util.Collections;
import java.util.Set;
/**
*
* @author Sam Harwell
*/
public class StrlenStringMap extends AbstractMap<String, Object> {
@Override
public Object get(Object key) {
if (key instanceof String) {
String str = (String)key;
return str.length();
}
return super.get(key);
}
@Override
public boolean containsKey(Object key) {
return key instanceof String;
}
@Override
public Set<Entry<String, Object>> entrySet() {
return Collections.emptySet();
}
}

View File

@ -154,7 +154,6 @@ public class Generator {
list.add(buildFullContextParsing());
list.add(buildLeftRecursion());
list.add(buildLexerErrors());
list.add(buildLexerExec());
list.add(buildListeners());
list.add(buildParserErrors());
list.add(buildParserExec());
@ -941,234 +940,6 @@ public class Generator {
return file;
}
private JUnitTestFile buildLexerExec() throws Exception {
JUnitTestFile file = new JUnitTestFile("LexerExec");
file.addLexerTest(input, "QuoteTranslation", "L", "\"",
"[@0,0:0='\"',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n", null);
file.addLexerTest(input, "RefToRuleDoesNotSetTokenNorEmitAnother", "L", "34 -21 3",
"[@0,0:1='34',<2>,1:0]\n" +
"[@1,3:5='-21',<1>,1:3]\n" +
"[@2,7:7='3',<2>,1:7]\n" +
"[@3,8:7='<EOF>',<-1>,1:8]\n", null);
file.addLexerTest(input, "Slashes", "L", "\\ / \\/ /\\",
"[@0,0:0='\\',<1>,1:0]\n" +
"[@1,2:2='/',<2>,1:2]\n" +
"[@2,4:5='\\/',<3>,1:4]\n" +
"[@3,7:8='/\\',<4>,1:7]\n" +
"[@4,9:8='<EOF>',<-1>,1:9]\n", null);
file.addLexerTest(input, "Parentheses", "L", "-.-.-!",
"[@0,0:4='-.-.-',<1>,1:0]\n" +
"[@1,5:5='!',<3>,1:5]\n" +
"[@2,6:5='<EOF>',<-1>,1:6]\n", null);
file.addLexerTest(input, "NonGreedyTermination1", "L", "\"hi\"\"mom\"",
"[@0,0:3='\"hi\"',<1>,1:0]\n" +
"[@1,4:8='\"mom\"',<1>,1:4]\n" +
"[@2,9:8='<EOF>',<-1>,1:9]\n", null);
file.addLexerTest(input, "NonGreedyTermination2", "L", "\"\"\"mom\"",
"[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" +
"[@1,7:6='<EOF>',<-1>,1:7]\n", null);
file.addLexerTest(input, "GreedyOptional", "L", "//blah\n//blah\n",
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
"[@1,14:13='<EOF>',<-1>,3:0]\n", null);
file.addLexerTest(input, "NonGreedyOptional", "L", "//blah\n//blah\n",
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
"[@2,14:13='<EOF>',<-1>,3:0]\n", null);
file.addLexerTest(input, "GreedyClosure", "L", "//blah\n//blah\n",
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
"[@1,14:13='<EOF>',<-1>,3:0]\n", null);
file.addLexerTest(input, "NonGreedyClosure", "L", "//blah\n//blah\n",
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
"[@2,14:13='<EOF>',<-1>,3:0]\n", null);
file.addLexerTest(input, "GreedyPositiveClosure", "L", "//blah\n//blah\n",
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
"[@1,14:13='<EOF>',<-1>,3:0]\n", null);
file.addLexerTest(input, "NonGreedyPositiveClosure", "L", "//blah\n//blah\n",
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
"[@2,14:13='<EOF>',<-1>,3:0]\n", null);
file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardStar", "L",
"/* ick */\n" +
"/* /* */\n" +
"/* /*nested*/ */\n",
"[@0,0:8='/* ick */',<1>,1:0]\n" +
"[@1,9:9='\\n',<2>,1:9]\n" +
"[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" +
"[@3,35:35='\\n',<2>,3:16]\n" +
"[@4,36:35='<EOF>',<-1>,4:0]\n", null, 1);
file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardStar", "L",
"/* ick */x\n" +
"/* /* */x\n" +
"/* /*nested*/ */x\n",
"[@0,0:8='/* ick */',<1>,1:0]\n" +
"[@1,10:10='\\n',<2>,1:10]\n" +
"[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" +
"[@3,38:38='\\n',<2>,3:17]\n" +
"[@4,39:38='<EOF>',<-1>,4:0]\n",
"line 1:9 token recognition error at: 'x'\n" +
"line 3:16 token recognition error at: 'x'\n", 2);
file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardPlus", "L",
"/* ick */\n" +
"/* /* */\n" +
"/* /*nested*/ */\n",
"[@0,0:8='/* ick */',<1>,1:0]\n" +
"[@1,9:9='\\n',<2>,1:9]\n" +
"[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" +
"[@3,35:35='\\n',<2>,3:16]\n" +
"[@4,36:35='<EOF>',<-1>,4:0]\n", null, 1);
file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardPlus", "L",
"/* ick */x\n" +
"/* /* */x\n" +
"/* /*nested*/ */x\n",
"[@0,0:8='/* ick */',<1>,1:0]\n" +
"[@1,10:10='\\n',<2>,1:10]\n" +
"[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" +
"[@3,38:38='\\n',<2>,3:17]\n" +
"[@4,39:38='<EOF>',<-1>,4:0]\n",
"line 1:9 token recognition error at: 'x'\n" +
"line 3:16 token recognition error at: 'x'\n", 2);
file.addLexerTest(input, "ActionPlacement", "L", "ab",
"stuff0: \n" +
"stuff1: a\n" +
"stuff2: ab\n" +
"ab\n" +
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:1='<EOF>',<-1>,1:2]\n", null);
file.addLexerTest(input, "GreedyConfigs", "L", "ab",
"ab\n" +
"[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:1='<EOF>',<-1>,1:2]\n", null);
file.addLexerTest(input, "NonGreedyConfigs", "L", "ab",
"a\n" +
"b\n" +
"[@0,0:0='a',<1>,1:0]\n" +
"[@1,1:1='b',<3>,1:1]\n" +
"[@2,2:1='<EOF>',<-1>,1:2]\n", null);
file.addLexerTest(input, "KeywordID", "L", "end eend ending a",
"[@0,0:2='end',<1>,1:0]\n" +
"[@1,3:3=' ',<3>,1:3]\n" +
"[@2,4:7='eend',<2>,1:4]\n" +
"[@3,8:8=' ',<3>,1:8]\n" +
"[@4,9:14='ending',<2>,1:9]\n" +
"[@5,15:15=' ',<3>,1:15]\n" +
"[@6,16:16='a',<2>,1:16]\n" +
"[@7,17:16='<EOF>',<-1>,1:17]\n", null);
file.addLexerTest(input, "HexVsID", "L", "x 0 1 a.b a.l",
"[@0,0:0='x',<5>,1:0]\n" +
"[@1,1:1=' ',<6>,1:1]\n" +
"[@2,2:2='0',<2>,1:2]\n" +
"[@3,3:3=' ',<6>,1:3]\n" +
"[@4,4:4='1',<2>,1:4]\n" +
"[@5,5:5=' ',<6>,1:5]\n" +
"[@6,6:6='a',<5>,1:6]\n" +
"[@7,7:7='.',<4>,1:7]\n" +
"[@8,8:8='b',<5>,1:8]\n" +
"[@9,9:9=' ',<6>,1:9]\n" +
"[@10,10:10='a',<5>,1:10]\n" +
"[@11,11:11='.',<4>,1:11]\n" +
"[@12,12:12='l',<5>,1:12]\n" +
"[@13,13:12='<EOF>',<-1>,1:13]\n",null);
file.addLexerTest(input, "EOFByItself", "L", "",
"[@0,0:-1='<EOF>',<1>,1:0]\n" +
"[@1,0:-1='<EOF>',<-1>,1:0]\n", null);
file.addLexerTest(input, "EOFSuffixInFirstRule", "L", "",
"[@0,0:-1='<EOF>',<-1>,1:0]\n", null, 1);
file.addLexerTest(input, "EOFSuffixInFirstRule", "L", "a",
"[@0,0:0='a',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n", null, 2);
file.addLexerTest(input, "CharSet", "L", "34\n 34",
"I\n" +
"I\n" +
"[@0,0:1='34',<1>,1:0]\n" +
"[@1,4:5='34',<1>,2:1]\n" +
"[@2,6:5='<EOF>',<-1>,2:3]\n", null);
file.addLexerTest(input, "CharSetPlus", "L", "34\n 34",
"I\n" +
"I\n" +
"[@0,0:1='34',<1>,1:0]\n" +
"[@1,4:5='34',<1>,2:1]\n" +
"[@2,6:5='<EOF>',<-1>,2:3]\n", null);
file.addLexerTest(input, "CharSetNot", "L", "xaf",
"I\n" +
"[@0,0:2='xaf',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n", null);
file.addLexerTest(input, "CharSetInSet", "L", "a x",
"I\n" +
"I\n" +
"[@0,0:0='a',<1>,1:0]\n" +
"[@1,2:2='x',<1>,1:2]\n" +
"[@2,3:2='<EOF>',<-1>,1:3]\n", null);
file.addLexerTest(input, "CharSetRange", "L", "34\n 34 a2 abc \n ",
"I\n" +
"I\n" +
"ID\n" +
"ID\n" +
"[@0,0:1='34',<1>,1:0]\n" +
"[@1,4:5='34',<1>,2:1]\n" +
"[@2,7:8='a2',<2>,2:4]\n" +
"[@3,10:12='abc',<2>,2:7]\n" +
"[@4,18:17='<EOF>',<-1>,3:3]\n", null);
file.addLexerTest(input, "CharSetWithMissingEndRange", "L", "00\n",
"I\n" +
"[@0,0:1='00',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,2:0]\n", null);
file.addLexerTest(input, "CharSetWithMissingEscapeChar", "L", "34 ",
"I\n" +
"[@0,0:1='34',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n", null);
file.addLexerTest(input, "CharSetWithEscapedChar", "L", "- ] ",
"DASHBRACK\n" +
"DASHBRACK\n" +
"[@0,0:0='-',<1>,1:0]\n" +
"[@1,2:2=']',<1>,1:2]\n" +
"[@2,4:3='<EOF>',<-1>,1:4]\n", null);
file.addLexerTest(input, "CharSetWithReversedRange", "L", "9",
"A\n" +
"[@0,0:0='9',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n", null);
file.addLexerTest(input, "CharSetWithQuote1", "L", "b\"a",
"A\n" +
"[@0,0:2='b\"a',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n", null);
file.addLexerTest(input, "CharSetWithQuote2", "L", "b\"\\a",
"A\n" +
"[@0,0:3='b\"\\a',<1>,1:0]\n" +
"[@1,4:3='<EOF>',<-1>,1:4]\n", null);
final int TOKENS = 4;
final int LABEL = 5;
final int IDENTIFIER = 6;
file.addLexerTest(input, "PositionAdjustingLexer", "PositionAdjustingLexer",
"tokens\n" +
"tokens {\n" +
"notLabel\n" +
"label1 =\n" +
"label2 +=\n" +
"notLabel\n",
"[@0,0:5='tokens',<" + IDENTIFIER + ">,1:0]\n" +
"[@1,7:12='tokens',<" + TOKENS + ">,2:0]\n" +
"[@2,14:14='{',<3>,2:7]\n" +
"[@3,16:23='notLabel',<" + IDENTIFIER + ">,3:0]\n" +
"[@4,25:30='label1',<" + LABEL + ">,4:0]\n" +
"[@5,32:32='=',<1>,4:7]\n" +
"[@6,34:39='label2',<" + LABEL + ">,5:0]\n" +
"[@7,41:42='+=',<2>,5:7]\n" +
"[@8,44:51='notLabel',<" + IDENTIFIER + ">,6:0]\n" +
"[@9,53:52='<EOF>',<-1>,7:0]\n", null);
file.addLexerTest(input, "LargeLexer", "L", "KW400",
"[@0,0:4='KW400',<402>,1:0]\n" +
"[@1,5:4='<EOF>',<-1>,1:5]\n", null);
/**
* This is a regression test for antlr/antlr4#687 "Empty zero-length tokens
* cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match
* zero-length tokens" */
file.addLexerTest(input, "ZeroLengthToken", "L", "'xxx'",
"[@0,0:4=''xxx'',<1>,1:0]\n" +
"[@1,5:4='<EOF>',<-1>,1:5]\n", null);
return file;
}
private JUnitTestFile buildCompositeParsers() throws Exception {
JUnitTestFile file = new JUnitTestFile("CompositeParsers");
file.importErrorQueue = true;

View File

@ -1,8 +0,0 @@
lexer grammar <grammarName>;
I : ({<PlusText("stuff fail: "):writeln()>} 'a'
| {<PlusText("stuff0: "):writeln()>}
'a' {<PlusText("stuff1: "):writeln()>}
'b' {<PlusText("stuff2: "):writeln()>})
{<Text():writeln()>} ;
WS : (' '|'\n') -> skip ;
J : .;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
I : '0'..'9'+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D] -> skip ;

View File

@ -1,4 +0,0 @@
lexer grammar <grammarName>;
I : (~[ab \\n]|'a') {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
I : ~[ab \n] ~[ \ncd]* {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
I : '0'..'9'+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;

View File

@ -1,4 +0,0 @@
lexer grammar <grammarName>;
I : [0-9]+ {<writeln("\"I\"")>} ;
ID : [a-zA-Z] [a-zA-Z0-9]* {<writeln("\"ID\"")>} ;
WS : [ \n\u0009\r]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
DASHBRACK : [\\-\]]+ {<writeln("\"DASHBRACK\"")>} ;
WS : [ \u]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
I : [0-]+ {<writeln("\"I\"")>} ;
WS : [ \n\u000D]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
I : [0-9]+ {<writeln("\"I\"")>} ;
WS : [ \u]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
A : ["a-z]+ {<writeln("\"A\"")>} ;
WS : [ \n\t]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
A : ["\\ab]+ {<writeln("\"A\"")>} ;
WS : [ \n\t]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
A : [z-a9]+ {<writeln("\"A\"")>} ;
WS : [ \u]+ -> skip ;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
DONE : EOF ;
A : 'a';

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT*;
WS : (' '|'\t')+;

View File

@ -1,4 +0,0 @@
lexer grammar <grammarName>;
I : ('a' | 'ab') {<Text():writeln()>} ;
WS : (' '|'\n') -> skip ;
J : .;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT?;
WS : (' '|'\t')+;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
CMT : ('//' .*? '\n')+;
WS : (' '|'\t')+;

View File

@ -1,8 +0,0 @@
lexer grammar <grammarName>;
HexLiteral : '0' ('x'|'X') HexDigit+ ;
DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ;
FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ;
DOT : '.' ;
ID : 'a'..'z'+ ;
fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;
WS : (' '|'\n')+;

View File

@ -1,4 +0,0 @@
lexer grammar <grammarName>;
KEND : 'end' ; // has priority
ID : 'a'..'z'+ ;
WS : (' '|'\n')+;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT*?;
WS : (' '|'\t')+;

View File

@ -1,4 +0,0 @@
lexer grammar <grammarName>;
I : .*? ('a' | 'ab') {<Text():writeln()>} ;
WS : (' '|'\n') -> skip ;
J : . {<Text():writeln()>};

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
CMT : '//' .*? '\n' CMT??;
WS : (' '|'\t')+;

View File

@ -1,3 +0,0 @@
lexer grammar <grammarName>;
CMT : ('//' .*? '\n')+?;
WS : (' '|'\t')+;

View File

@ -1,2 +0,0 @@
lexer grammar <grammarName>;
STRING : '"' ('""' | .)*? '"';

View File

@ -1,2 +0,0 @@
lexer grammar <grammarName>;
STRING : '"' ('""' | .)+? '"';

View File

@ -1,7 +0,0 @@
lexer grammar <grammarName>;
START_BLOCK: '-.-.-';
ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+;
fragment LETTER: L_A|L_K;
fragment L_A: '.-';
fragment L_K: '-.-';
SEPARATOR: '!';

View File

@ -1,34 +0,0 @@
lexer grammar PositionAdjustingLexer;
@members {
<PositionAdjustingLexer()>
}
ASSIGN : '=' ;
PLUS_ASSIGN : '+=' ;
LCURLY: '{';
// 'tokens' followed by '{'
TOKENS : 'tokens' IGNORED '{';
// IDENTIFIER followed by '+=' or '='
LABEL
: IDENTIFIER IGNORED '+'? '='
;
IDENTIFIER
: [a-zA-Z_] [a-zA-Z0-9_]*
;
fragment
IGNORED
: [ \t\r\n]*
;
NEWLINE
: [\r\n]+ -> skip
;
WS
: [ \t]+ -> skip
;

View File

@ -1,2 +0,0 @@
lexer grammar <grammarName>;
QUOTE : '"' ; // make sure this compiles

View File

@ -1,4 +0,0 @@
lexer grammar <grammarName>;
A : '-' I ;
I : '0'..'9'+ ;
WS : (' '|'\n') -> skip ;

View File

@ -1,6 +0,0 @@
lexer grammar <grammarName>;
Backslash : '\\\\';
Slash : '/';
Vee : '\\\\/';
Wedge : '/\\\\';
WS : [ \t] -> skip;

View File

@ -1,9 +0,0 @@
lexer grammar <grammarName>;
BeginString
: '\'' -> more, pushMode(StringMode)
;
mode StringMode;
StringMode_X : 'x' -> more;
StringMode_Done : -> more, mode(EndStringMode);
mode EndStringMode;
EndString : '\'' -> popMode;

File diff suppressed because it is too large Load Diff

View File

@ -29,9 +29,9 @@ public void test<test.name>() throws Exception {
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
}; separator="\n">
<test.Grammar:{grammar |
String grammar = <writeStringLiteral(test.Grammar.(grammar))>;
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
<if(test.AfterGrammar)>
<test.AfterGrammar>
<endif>
@ -54,6 +54,12 @@ CompositeLexerTestMethod(test) ::= <<
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
buildStringLiteral(text, variable) ::= <<
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
String <variable> = <variable>Builder.toString();
>>
writeStringLiteral(text) ::= <%
<if(isEmpty.(text))>
""
@ -85,9 +91,9 @@ public void test<test.name>() throws Exception {
writeFile(tmpdir, "<grammar.grammarName>.g4", slave_<grammar.grammarName>);
<endif>
};separator="\n", wrap, anchor>!>
<test.Grammar:{grammar |
String grammar = <writeStringLiteral(test.Grammar.(grammar))>;
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
<if(test.AfterGrammar)>
<test.AfterGrammar>
<endif>