rm gunit; hmm...my commits are wacked on this/last one. sorry!

This commit is contained in:
Terence Parr 2012-07-01 09:44:43 -07:00
parent f80166b39c
commit 6053ac5269
15 changed files with 433 additions and 825 deletions

View File

@ -1,80 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.antlr</groupId>
<artifactId>antlr4-gunit</artifactId>
<version>4.0-SNAPSHOT</version>
<packaging>jar</packaging>
<name>antlr4-gunit</name>
<url>http://www.antlr.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.10</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.antlr</groupId>
<artifactId>antlr4-runtime</artifactId>
<version>4.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.antlr</groupId>
<artifactId>antlr-runtime</artifactId>
<version>3.4.1-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.antlr</groupId>
<artifactId>ST4</artifactId>
<version>4.0.4</version>
</dependency>
</dependencies>
<build>
<sourceDirectory>src</sourceDirectory>
<resources>
<resource>
<directory>resources</directory>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.antlr</groupId>
<artifactId>antlr3-maven-plugin</artifactId>
<version>3.4</version>
<configuration>
<sourceDirectory>src</sourceDirectory>
<verbose>true</verbose>
</configuration>
<executions>
<execution>
<goals>
<goal>antlr</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>2.3.2</version>
<configuration>
<source>1.6</source>
<target>1.6</target>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,43 +0,0 @@
group jUnit;
jUnitClass(className, header, options, suites) ::= <<
<header>
import org.antlr.runtime.*;
import org.antlr.runtime.tree.*;
import org.junit.Test;
import org.junit.Before;
import static org.junit.Assert.*;
public class <className> extends org.antlr.v4.gunit.gUnitBase {
@Before public void setup() {
lexerClassName = "<options.lexer>";
parserClassName = "<options.parser>";
<if(options.adaptor)>
adaptorClassName = "<options.adaptor>";
<endif>
}
<suites>
}
>>
header(action) ::= "<action>"
testSuite(name,cases) ::= <<
<cases:{c | <c>}; separator="\n\n"> <! use {...} iterator to get <i> !>
>>
parserRuleTestSuccess(input,expecting) ::= <<
>>
parserRuleTestAST(ruleName,scriptLine,input,expecting) ::= <<
@Test public void test_<name><i>() throws Exception {
// gunit test on line <scriptLine>
RuleReturnScope rstruct = (RuleReturnScope)execParser("<ruleName>", "<input>", <scriptLine>);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "<expecting>";
assertEquals("testing rule <ruleName>", expecting, actual);
}
>>
string(s) ::= "<s>"

View File

@ -1,46 +0,0 @@
tree grammar ASTVerifier;
options {
ASTLabelType=CommonTree;
tokenVocab = gUnit;
}
@header {
package org.antlr.v4.gunit;
}
gUnitDef
: ^('gunit' ID DOC_COMMENT? (optionsSpec|header)* testsuite+)
;
optionsSpec
: ^(OPTIONS option+)
;
option
: ^('=' ID ID)
| ^('=' ID STRING)
;
header : ^('@header' ACTION);
testsuite
: ^(SUITE ID ID DOC_COMMENT? testcase+)
| ^(SUITE ID DOC_COMMENT? testcase+)
;
testcase
: ^(TEST_OK DOC_COMMENT? input)
| ^(TEST_FAIL DOC_COMMENT? input)
| ^(TEST_RETVAL DOC_COMMENT? input RETVAL)
| ^(TEST_STDOUT DOC_COMMENT? input STRING)
| ^(TEST_STDOUT DOC_COMMENT? input ML_STRING)
| ^(TEST_TREE DOC_COMMENT? input TREE)
| ^(TEST_ACTION DOC_COMMENT? input ACTION)
;
input
: STRING
| ML_STRING
| FILENAME
;

View File

@ -1,155 +0,0 @@
package org.antlr.v4.gunit;
import org.antlr.runtime.ANTLRFileStream;
import org.antlr.runtime.CommonTokenStream;
import org.antlr.runtime.RuleReturnScope;
import org.antlr.runtime.tree.BufferedTreeNodeStream;
import org.antlr.runtime.tree.CommonTree;
import org.antlr.runtime.tree.CommonTreeNodeStream;
import org.antlr.stringtemplate.AutoIndentWriter;
import org.antlr.stringtemplate.StringTemplate;
import org.antlr.stringtemplate.StringTemplateGroup;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.List;
public class Gen {
// TODO: don't hardcode
public static final String TEMPLATE_FILE =
"/Users/parrt/antlr/code/antlr4/gunit/resources/org/antlr/v4/gunit/jUnit.stg";
public static void main(String[] args) throws Exception {
if ( args.length==0 ) {help(); System.exit(0);}
String outputDirName = ".";
String fileName = args[0];
if ( args[0].equals("-o") ) {
if ( args.length<3 ) {
help();
System.exit(0);
}
outputDirName = args[1];
fileName = args[2];
}
new Gen().process(fileName, outputDirName);
}
public void process(String fileName, String outputDirName) throws Exception {
// PARSE SCRIPT
ANTLRFileStream fs = new ANTLRFileStream(fileName);
gUnitLexer lexer = new gUnitLexer(fs);
CommonTokenStream tokens = new CommonTokenStream(lexer);
gUnitParser parser = new gUnitParser(tokens);
RuleReturnScope r = parser.gUnitDef();
CommonTree scriptAST = (CommonTree)r.getTree();
System.out.println(scriptAST.toStringTree());
// ANALYZE
CommonTreeNodeStream nodes = new CommonTreeNodeStream(r.getTree());
Semantics sem = new Semantics(nodes);
sem.downup(scriptAST);
System.out.println("options="+sem.options);
// GENERATE CODE
FileReader fr = new FileReader(TEMPLATE_FILE);
StringTemplateGroup templates =
new StringTemplateGroup(fr);
fr.close();
BufferedTreeNodeStream bnodes = new BufferedTreeNodeStream(scriptAST);
jUnitGen gen = new jUnitGen(bnodes);
gen.setTemplateLib(templates);
RuleReturnScope r2 = gen.gUnitDef();
StringTemplate st = (StringTemplate)r2.getTemplate();
st.setAttribute("options", sem.options);
FileWriter fw = new FileWriter(outputDirName+"/"+sem.name+".java");
BufferedWriter bw = new BufferedWriter(fw);
st.write(new AutoIndentWriter(bw));
bw.close();
}
/** Borrowed from Leon Su in gunit v3 */
public static String escapeForJava(String inputString) {
// Gotta escape literal backslash before putting in specials that use escape.
inputString = inputString.replace("\\", "\\\\");
// Then double quotes need escaping (singles are OK of course).
inputString = inputString.replace("\"", "\\\"");
// note: replace newline to String ".\n", replace tab to String ".\t"
inputString = inputString.replace("\n", "\\n").replace("\t", "\\t").replace("\r", "\\r").replace("\b", "\\b").replace("\f", "\\f");
return inputString;
}
public static String normalizeTreeSpec(String t) {
List<String> words = new ArrayList<String>();
int i = 0;
StringBuilder word = new StringBuilder();
while ( i<t.length() ) {
if ( t.charAt(i)=='(' || t.charAt(i)==')' ) {
if ( word.length()>0 ) {
words.add(word.toString());
word.setLength(0);
}
words.add(String.valueOf(t.charAt(i)));
i++;
continue;
}
if ( Character.isWhitespace(t.charAt(i)) ) {
// upon WS, save word
if ( word.length()>0 ) {
words.add(word.toString());
word.setLength(0);
}
i++;
continue;
}
// ... "x" or ...("x"
if ( t.charAt(i)=='"' && (i-1)>=0 &&
(t.charAt(i-1)=='(' || Character.isWhitespace(t.charAt(i-1))) )
{
i++;
while ( i<t.length() && t.charAt(i)!='"' ) {
if ( t.charAt(i)=='\\' &&
(i+1)<t.length() && t.charAt(i+1)=='"' ) // handle \"
{
word.append('"');
i+=2;
continue;
}
word.append(t.charAt(i));
i++;
}
i++; // skip final "
words.add(word.toString());
word.setLength(0);
continue;
}
word.append(t.charAt(i));
i++;
}
if ( word.length()>0 ) {
words.add(word.toString());
}
//System.out.println("words="+words);
StringBuilder buf = new StringBuilder();
for (int j=0; j<words.size(); j++) {
if ( j>0 && !words.get(j).equals(")") &&
!words.get(j-1).equals("(") ) {
buf.append(' ');
}
buf.append(words.get(j));
}
return buf.toString();
}
public static void help() {
System.err.println("org.antlr.v4.gunit.Gen [-o output-dir] gunit-file");
}
}

View File

@ -1,21 +0,0 @@
package org.antlr.v4.gunit;
import org.antlr.runtime.*;
import org.antlr.runtime.tree.BufferedTreeNodeStream;
import org.antlr.runtime.tree.Tree;
public class Interp {
public static void main(String[] args) throws Exception {
String fileName = args[0];
ANTLRFileStream fs = new ANTLRFileStream(fileName);
gUnitLexer lexer = new gUnitLexer(fs);
CommonTokenStream tokens = new CommonTokenStream(lexer);
gUnitParser parser = new gUnitParser(tokens);
RuleReturnScope r = parser.gUnitDef();
System.out.println(((Tree)r.getTree()).toStringTree());
BufferedTreeNodeStream nodes = new BufferedTreeNodeStream(r.getTree());
ASTVerifier verifier = new ASTVerifier(nodes);
verifier.gUnitDef();
}
}

View File

@ -1,36 +0,0 @@
tree grammar Semantics;
options {
filter=true;
ASTLabelType=CommonTree;
tokenVocab = gUnit;
}
@header {
package org.antlr.v4.gunit;
import java.util.Map;
import java.util.HashMap;
}
@members {
public String name;
public Map<String,String> options = new HashMap<String,String>();
}
topdown
: optionsSpec
| gUnitDef
;
gUnitDef
: ^('gunit' ID .*) {name = $ID.text;}
;
optionsSpec
: ^(OPTIONS option+)
;
option
: ^('=' o=ID v=ID) {options.put($o.text, $v.text);}
| ^('=' o=ID v=STRING) {options.put($o.text, $v.text);}
;

View File

@ -1,155 +0,0 @@
grammar gUnit;
options {
output=AST;
ASTLabelType=CommonTree;
}
tokens { SUITE; TEST_OK; TEST_FAIL; TEST_RETVAL; TEST_STDOUT; TEST_TREE; TEST_ACTION; }
@header {
package org.antlr.v4.gunit;
}
@lexer::header {
package org.antlr.v4.gunit;
}
gUnitDef
: DOC_COMMENT? 'gunit' ID ';' (optionsSpec|header)* testsuite+
-> ^('gunit' ID DOC_COMMENT? optionsSpec? header? testsuite+)
;
optionsSpec
: OPTIONS (option ';')+ '}' -> ^(OPTIONS option+)
;
option
: ID '=' optionValue -> ^('=' ID optionValue)
;
optionValue
: ID
| STRING
;
header : '@header' ACTION -> ^('@header' ACTION);
testsuite
: DOC_COMMENT? treeRule=ID 'walks' parserRule=ID ':' testcase+
-> ^(SUITE $treeRule $parserRule DOC_COMMENT? testcase+)
| DOC_COMMENT? ID ':' testcase+ -> ^(SUITE ID DOC_COMMENT? testcase+)
;
testcase
: DOC_COMMENT? input 'OK' -> ^(TEST_OK DOC_COMMENT? input)
| DOC_COMMENT? input 'FAIL' -> ^(TEST_FAIL DOC_COMMENT? input)
| DOC_COMMENT? input 'returns' RETVAL -> ^(TEST_RETVAL DOC_COMMENT? input RETVAL)
| DOC_COMMENT? input '->' STRING -> ^(TEST_STDOUT DOC_COMMENT? input STRING)
| DOC_COMMENT? input '->' ML_STRING -> ^(TEST_STDOUT DOC_COMMENT? input ML_STRING)
| DOC_COMMENT? input '->' TREE -> ^(TEST_TREE DOC_COMMENT? input TREE)
| DOC_COMMENT? input '->' ACTION -> ^(TEST_ACTION DOC_COMMENT? input ACTION)
;
input
: STRING
| ML_STRING
| FILENAME
;
ACTION
: '{' ('\\}'|'\\' ~'}'|~('\\'|'}'))* '}' {setText(getText().substring(1, getText().length()-1));}
;
RETVAL
: NESTED_RETVAL {setText(getText().substring(1, getText().length()-1));}
;
fragment
NESTED_RETVAL :
'['
( options {greedy=false;}
: NESTED_RETVAL
| .
)*
']'
;
TREE : NESTED_AST (' '? NESTED_AST)*;
fragment
NESTED_AST
: '('
( NESTED_AST
| STRING_
| ~('('|')'|'"')
)*
')'
;
OPTIONS : 'options' WS* '{' ;
ID : ID_ ('.' ID_)* ;
fragment
ID_ : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
;
WS : ( ' '
| '\t'
| '\r'
| '\n'
) {$channel=HIDDEN;}
;
SL_COMMENT
: '//' ~('\r'|'\n')* '\r'? '\n' {$channel=HIDDEN;}
;
DOC_COMMENT
: '/**' (options {greedy=false;}:.)* '*/'
;
ML_COMMENT
: '/*' ~'*' (options {greedy=false;}:.)* '*/' {$channel=HIDDEN;}
;
STRING : STRING_ {setText(getText().substring(1, getText().length()-1));} ;
fragment
STRING_
: '"' ('\\"'|'\\' ~'"'|~('\\'|'"'))+ '"'
;
ML_STRING
: '<<' .* '>>' {setText(getText().substring(2, getText().length()-2));}
;
FILENAME
: '/' ID ('/' ID)*
| ID ('/' ID)+
;
/*
fragment
ESC : '\\'
( 'n'
| 'r'
| 't'
| 'b'
| 'f'
| '"'
| '\''
| '\\'
| '>'
| 'u' XDIGIT XDIGIT XDIGIT XDIGIT
| . // unknown, leave as it is
)
;
*/
fragment
XDIGIT :
'0' .. '9'
| 'a' .. 'f'
| 'A' .. 'F'
;

View File

@ -1,49 +0,0 @@
package org.antlr.v4.gunit;
import org.antlr.runtime.*;
import org.antlr.runtime.tree.TreeAdaptor;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
public class gUnitBase {
public String lexerClassName;
public String parserClassName;
public String adaptorClassName;
public Object execParser(
String ruleName,
String input,
int scriptLine)
throws Exception
{
ANTLRStringStream is = new ANTLRStringStream(input);
Class lexerClass = Class.forName(lexerClassName);
Class[] lexArgTypes = new Class[]{CharStream.class};
Constructor lexConstructor = lexerClass.getConstructor(lexArgTypes);
Object[] lexArgs = new Object[]{is};
TokenSource lexer = (TokenSource)lexConstructor.newInstance(lexArgs);
is.setLine(scriptLine);
CommonTokenStream tokens = new CommonTokenStream(lexer);
Class parserClass = Class.forName(parserClassName);
Class[] parArgTypes = new Class[]{TokenStream.class};
Constructor parConstructor = parserClass.getConstructor(parArgTypes);
Object[] parArgs = new Object[]{tokens};
Parser parser = (Parser)parConstructor.newInstance(parArgs);
// set up customized tree adaptor if necessary
if ( adaptorClassName!=null ) {
parArgTypes = new Class[]{TreeAdaptor.class};
Method m = parserClass.getMethod("setTreeAdaptor", parArgTypes);
Class adaptorClass = Class.forName(adaptorClassName);
m.invoke(parser, adaptorClass.newInstance());
}
Method ruleMethod = parserClass.getMethod(ruleName);
// INVOKE RULE
return ruleMethod.invoke(parser);
}
}

View File

@ -1,53 +0,0 @@
tree grammar jUnitGen;
options {
output=template;
ASTLabelType=CommonTree;
tokenVocab = gUnit;
}
@header {
package org.antlr.v4.gunit;
}
gUnitDef
: ^('gunit' ID DOC_COMMENT? (optionsSpec|header)* suites+=testsuite+)
-> jUnitClass(className={$ID.text}, header={$header.st}, suites={$suites})
;
optionsSpec
: ^(OPTIONS option+)
;
option
: ^('=' ID ID)
| ^('=' ID STRING)
;
header : ^('@header' ACTION) -> header(action={$ACTION.text});
testsuite
: ^(SUITE rule=ID ID DOC_COMMENT? cases+=testcase[$rule.text]+)
| ^(SUITE rule=ID DOC_COMMENT? cases+=testcase[$rule.text]+)
-> testSuite(name={$rule.text}, cases={$cases})
;
testcase[String ruleName]
: ^(TEST_OK DOC_COMMENT? input)
| ^(TEST_FAIL DOC_COMMENT? input)
| ^(TEST_RETVAL DOC_COMMENT? input RETVAL)
| ^(TEST_STDOUT DOC_COMMENT? input STRING)
| ^(TEST_STDOUT DOC_COMMENT? input ML_STRING)
| ^(TEST_TREE DOC_COMMENT? input TREE)
-> parserRuleTestAST(ruleName={$ruleName},
input={$input.st},
expecting={Gen.normalizeTreeSpec($TREE.text)},
scriptLine={$input.start.getLine()})
| ^(TEST_ACTION DOC_COMMENT? input ACTION)
;
input
: STRING -> string(s={Gen.escapeForJava($STRING.text)})
| ML_STRING -> string(s={Gen.escapeForJava($ML_STRING.text)})
| FILENAME
;

View File

@ -1,6 +1,9 @@
/** Test ANTLRParser's AST construction. Translate to junit tests with: /** Test ANTLRParser's AST construction. Translate to junit tests with:
* *
* $ java org.antlr.v4.gunit.Gen TestASTStructure.gunit * $ java org.antlr.v4.gunit.Gen TestASTStructure.gunit
NO LONGER using gunit!!!
*/ */
gunit TestASTStructure; gunit TestASTStructure;

View File

@ -1,17 +1,65 @@
package org.antlr.v4.test; package org.antlr.v4.test;
import org.antlr.runtime.ANTLRStringStream;
import org.antlr.runtime.CharStream;
import org.antlr.runtime.CommonTokenStream;
import org.antlr.runtime.Parser;
import org.antlr.runtime.RuleReturnScope; import org.antlr.runtime.RuleReturnScope;
import org.antlr.runtime.TokenSource;
import org.antlr.runtime.TokenStream;
import org.antlr.runtime.tree.Tree; import org.antlr.runtime.tree.Tree;
import org.antlr.runtime.tree.TreeAdaptor;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
public class TestASTStructure extends org.antlr.v4.gunit.gUnitBase { // NO LONGER using gunit!!!
@Before public void setup() {
lexerClassName = "org.antlr.v4.parse.ANTLRLexer"; public class TestASTStructure {
parserClassName = "org.antlr.v4.parse.ANTLRParser"; String lexerClassName = "org.antlr.v4.parse.ANTLRLexer";
adaptorClassName = "org.antlr.v4.parse.GrammarASTAdaptor"; } String parserClassName = "org.antlr.v4.parse.ANTLRParser";
String adaptorClassName = "org.antlr.v4.parse.GrammarASTAdaptor";
public Object execParser(
String ruleName,
String input,
int scriptLine)
throws Exception
{
ANTLRStringStream is = new ANTLRStringStream(input);
Class lexerClass = Class.forName(lexerClassName);
Class[] lexArgTypes = new Class[]{CharStream.class};
Constructor lexConstructor = lexerClass.getConstructor(lexArgTypes);
Object[] lexArgs = new Object[]{is};
TokenSource lexer = (TokenSource)lexConstructor.newInstance(lexArgs);
is.setLine(scriptLine);
CommonTokenStream tokens = new CommonTokenStream(lexer);
Class parserClass = Class.forName(parserClassName);
Class[] parArgTypes = new Class[]{TokenStream.class};
Constructor parConstructor = parserClass.getConstructor(parArgTypes);
Object[] parArgs = new Object[]{tokens};
Parser parser = (Parser)parConstructor.newInstance(parArgs);
// set up customized tree adaptor if necessary
if ( adaptorClassName!=null ) {
parArgTypes = new Class[]{TreeAdaptor.class};
Method m = parserClass.getMethod("setTreeAdaptor", parArgTypes);
Class adaptorClass = Class.forName(adaptorClassName);
m.invoke(parser, adaptorClass.newInstance());
}
Method ruleMethod = parserClass.getMethod(ruleName);
// INVOKE RULE
return ruleMethod.invoke(parser);
}
@Test public void test_grammarSpec1() throws Exception { @Test public void test_grammarSpec1() throws Exception {
// gunit test on line 15 // gunit test on line 15
RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "parser grammar P; a : A;", 15); RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "parser grammar P; a : A;", 15);

View File

@ -0,0 +1,186 @@
/*
[The "BSD license"]
Copyright (c) 2011 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.BufferedTokenStream;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenSource;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.interp.LexerInterpreter;
import org.junit.Test;
public class TestBufferedTokenStream extends BaseTest {
protected TokenStream createTokenStream(TokenSource src) {
return new BufferedTokenStream<Token>(src);
}
@Test public void testFirstToken() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
TokenStream tokens = createTokenStream(lexEngine);
String result = tokens.LT(1).getText();
String expecting = "x";
assertEquals(expecting, result);
}
@Test public void test2ndToken() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
TokenStream tokens = createTokenStream(lexEngine);
String result = tokens.LT(2).getText();
String expecting = " ";
assertEquals(expecting, result);
}
@Test public void testCompleteBuffer() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
TokenStream tokens = createTokenStream(lexEngine);
int i = 1;
Token t = tokens.LT(i);
while ( t.getType()!=Token.EOF ) {
i++;
t = tokens.LT(i);
}
tokens.LT(i++); // push it past end
tokens.LT(i++);
String result = tokens.getText();
String expecting = "x = 3 * 0 + 2 * 0;";
assertEquals(expecting, result);
}
@Test public void testCompleteBufferAfterConsuming() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
TokenStream tokens = createTokenStream(lexEngine);
Token t = tokens.LT(1);
while ( t.getType()!=Token.EOF ) {
tokens.consume();
t = tokens.LT(1);
}
tokens.consume();
tokens.LT(1); // push it past end
tokens.consume();
tokens.LT(1);
String result = tokens.getText();
String expecting = "x = 3 * 0 + 2 * 0;";
assertEquals(expecting, result);
}
@Test public void testLookback() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
TokenStream tokens = createTokenStream(lexEngine);
tokens.consume(); // get x into buffer
Token t = tokens.LT(-1);
assertEquals("x", t.getText());
tokens.consume();
tokens.consume(); // consume '='
t = tokens.LT(-3);
assertEquals("x", t.getText());
t = tokens.LT(-2);
assertEquals(" ", t.getText());
t = tokens.LT(-1);
assertEquals("=", t.getText());
}
}

View File

@ -1,36 +1,5 @@
/*
[The "BSD license"]
Copyright (c) 2011 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test; package org.antlr.v4.test;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.BufferedTokenStream;
import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.CommonToken; import org.antlr.v4.runtime.CommonToken;
import org.antlr.v4.runtime.CommonTokenStream; import org.antlr.v4.runtime.CommonTokenStream;
@ -38,148 +7,15 @@ import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenFactory; import org.antlr.v4.runtime.TokenFactory;
import org.antlr.v4.runtime.TokenSource; import org.antlr.v4.runtime.TokenSource;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.WritableToken; import org.antlr.v4.runtime.WritableToken;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.interp.LexerInterpreter;
import org.junit.Test; import org.junit.Test;
public class TestCommonTokenStream extends BaseTest { public class TestCommonTokenStream extends TestBufferedTokenStream {
@Test public void testFirstToken() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
String result = tokens.LT(1).getText(); @Override
String expecting = "x"; protected TokenStream createTokenStream(TokenSource src) {
assertEquals(expecting, result); return new CommonTokenStream(src);
}
@Test public void test2ndToken() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
String result = tokens.LT(2).getText();
String expecting = " ";
assertEquals(expecting, result);
}
@Test public void testCompleteBuffer() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
int i = 1;
Token t = tokens.LT(i);
while ( t.getType()!=Token.EOF ) {
i++;
t = tokens.LT(i);
}
tokens.LT(i++); // push it past end
tokens.LT(i++);
String result = tokens.getText();
String expecting = "x = 3 * 0 + 2 * 0;";
assertEquals(expecting, result);
}
@Test public void testCompleteBufferAfterConsuming() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
Token t = tokens.LT(1);
while ( t.getType()!=Token.EOF ) {
tokens.consume();
t = tokens.LT(1);
}
tokens.consume();
tokens.LT(1); // push it past end
tokens.consume();
tokens.LT(1);
String result = tokens.getText();
String expecting = "x = 3 * 0 + 2 * 0;";
assertEquals(expecting, result);
}
@Test public void testLookback() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
tokens.consume(); // get x into buffer
Token t = tokens.LT(-1);
assertEquals("x", t.getText());
tokens.consume();
tokens.consume(); // consume '='
t = tokens.LT(-3);
assertEquals("x", t.getText());
t = tokens.LT(-2);
assertEquals(" ", t.getText());
t = tokens.LT(-1);
assertEquals("=", t.getText());
} }
@Test public void testOffChannel() throws Exception { @Test public void testOffChannel() throws Exception {

View File

@ -92,7 +92,7 @@ public class TestUnbufferedCharStream extends BaseTest {
assertEquals('2', input.LA(3)); assertEquals('2', input.LA(3));
assertEquals('3', input.LA(4)); assertEquals('3', input.LA(4));
assertEquals('4', input.LA(5)); assertEquals('4', input.LA(5));
assertEquals("4", input.getBuffer()); // shouldn't include x assertEquals("01234", input.getBuffer());
assertEquals(CharStream.EOF, input.LA(6)); assertEquals(CharStream.EOF, input.LA(6));
} }
@ -145,6 +145,19 @@ public class TestUnbufferedCharStream extends BaseTest {
assertEquals("xyz\uFFFF", input.getBuffer()); assertEquals("xyz\uFFFF", input.getBuffer());
} }
@Test public void test1MarkWithConsumesInSequence() throws Exception {
UnbufferedCharStream input = new UnbufferedCharStream(
new StringReader("xyz")
);
int m = input.mark();
input.consume(); // x, moves to y
input.consume(); // y
input.consume(); // z, moves to EOF
assertEquals(CharStream.EOF, input.LA(1));
input.release(m);
assertEquals("xyz\uFFFF", input.getBuffer());
}
@Test public void test2Mark() throws Exception { @Test public void test2Mark() throws Exception {
UnbufferedCharStream input = new UnbufferedCharStream( UnbufferedCharStream input = new UnbufferedCharStream(
new StringReader("xyz"), new StringReader("xyz"),
@ -158,7 +171,7 @@ public class TestUnbufferedCharStream extends BaseTest {
input.consume(); input.consume();
int m2 = input.mark(); int m2 = input.mark();
assertEquals(1, m2); // 2nd marker dropped at buffer index 1 assertEquals(1, m2); // 2nd marker dropped at buffer index 1
assertEquals("y", input.getBuffer()); assertEquals("yz", input.getBuffer());
assertEquals('z', input.LA(1)); // forces load assertEquals('z', input.LA(1)); // forces load
assertEquals("yz", input.getBuffer()); assertEquals("yz", input.getBuffer());
input.release(m2); // noop since not earliest in buf input.release(m2); // noop since not earliest in buf

View File

@ -0,0 +1,160 @@
package org.antlr.v4.test;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.UnbufferedTokenStream;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.interp.LexerInterpreter;
import org.junit.Test;
import java.io.StringReader;
public class TestUnbufferedTokenStream extends BaseTest {
@Test public void testLookahead() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 302;
CharStream input = new ANTLRInputStream(
new StringReader("x = 302;")
);
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
TokenStream tokens = new UnbufferedTokenStream<Token>(lexEngine);
assertEquals("x", tokens.LT(1).getText());
assertEquals(" ", tokens.LT(2).getText());
assertEquals("=", tokens.LT(3).getText());
assertEquals(" ", tokens.LT(4).getText());
assertEquals("302", tokens.LT(5).getText());
assertEquals(";", tokens.LT(6).getText());
}
@Test public void testNoBuffering() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 302;
CharStream input = new ANTLRInputStream(
new StringReader("x = 302;")
);
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
UnbufferedTokenStream<Token> tokens = new UnbufferedTokenStream<Token>(lexEngine);
assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
assertEquals("x", tokens.LT(1).getText());
assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
tokens.consume();
assertEquals(" ", tokens.LT(1).getText());
assertEquals("[[@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString());
tokens.consume();
assertEquals("=", tokens.LT(1).getText());
assertEquals("[[@2,2:2='=',<4>,1:2]]", tokens.getBuffer().toString());
tokens.consume();
assertEquals(" ", tokens.LT(1).getText());
assertEquals("[[@3,3:3=' ',<7>,1:3]]", tokens.getBuffer().toString());
tokens.consume();
assertEquals("302", tokens.LT(1).getText());
assertEquals("[[@4,4:6='302',<2>,1:4]]", tokens.getBuffer().toString());
tokens.consume();
assertEquals(";", tokens.LT(1).getText());
assertEquals("[[@5,7:7=';',<3>,1:7]]", tokens.getBuffer().toString());
}
@Test public void testMarkStart() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 302;
CharStream input = new ANTLRInputStream(
new StringReader("x = 302;")
);
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
UnbufferedTokenStream<Token> tokens = new UnbufferedTokenStream<Token>(lexEngine);
int m = tokens.mark();
assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
assertEquals("x", tokens.LT(1).getText());
tokens.consume(); // consume x
assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString());
tokens.consume(); // ' '
tokens.consume(); // =
tokens.consume(); // ' '
tokens.consume(); // 302
tokens.consume(); // ;
assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]," +
" [@2,2:2='=',<4>,1:2], [@3,3:3=' ',<7>,1:3]," +
" [@4,4:6='302',<2>,1:4], [@5,7:7=';',<3>,1:7]," +
" [@6,8:7='<EOF>',<-1>,1:8]]",
tokens.getBuffer().toString());
}
@Test public void testMarkThenRelease() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar t;\n"+
"ID : 'a'..'z'+;\n" +
"INT : '0'..'9'+;\n" +
"SEMI : ';';\n" +
"ASSIGN : '=';\n" +
"PLUS : '+';\n" +
"MULT : '*';\n" +
"WS : ' '+;\n");
// Tokens: 012345678901234567
// Input: x = 302;
CharStream input = new ANTLRInputStream(
new StringReader("x = 302 + 1;")
);
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
UnbufferedTokenStream<Token> tokens = new UnbufferedTokenStream<Token>(lexEngine);
int m = tokens.mark();
assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
assertEquals("x", tokens.LT(1).getText());
tokens.consume(); // consume x
assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString());
tokens.consume(); // ' '
tokens.consume(); // =
tokens.consume(); // ' '
assertEquals("302", tokens.LT(1).getText());
tokens.release(m); // "x = 302" is in buffer. next consume() should kill buffer
tokens.consume(); // 302
tokens.consume(); // ' '
m = tokens.mark(); // mark at the +
assertEquals("+", tokens.LT(1).getText());
tokens.consume(); // '+'
tokens.consume(); // ' '
tokens.consume(); // 1
tokens.consume(); // ;
assertEquals("<EOF>", tokens.LT(1).getText());
assertEquals("[[@6,8:8='+',<5>,1:8], [@7,9:9=' ',<7>,1:9]," +
" [@8,10:10='1',<2>,1:10], [@9,11:11=';',<3>,1:11]," +
" [@10,12:11='<EOF>',<-1>,1:12]]",
tokens.getBuffer().toString());
}
}