diff --git a/gunit/pom.xml b/gunit/pom.xml
deleted file mode 100644
index ebd21cb79..000000000
--- a/gunit/pom.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
- 4.0.0
-
- org.antlr
- antlr4-gunit
- 4.0-SNAPSHOT
- jar
-
- antlr4-gunit
- http://www.antlr.org
-
-
- UTF-8
-
-
-
-
- junit
- junit
- 4.10
- test
-
-
- org.antlr
- antlr4-runtime
- 4.0-SNAPSHOT
-
-
- org.antlr
- antlr-runtime
- 3.4.1-SNAPSHOT
-
-
- org.antlr
- ST4
- 4.0.4
-
-
-
-
-
- src
-
-
- resources
-
-
-
-
-
- org.antlr
- antlr3-maven-plugin
- 3.4
-
- src
- true
-
-
-
-
- antlr
-
-
-
-
-
-
- maven-compiler-plugin
- 2.3.2
-
-
- 1.6
-
-
-
-
-
-
diff --git a/gunit/resources/org/antlr/v4/gunit/jUnit.stg b/gunit/resources/org/antlr/v4/gunit/jUnit.stg
deleted file mode 100644
index 9aeb8d874..000000000
--- a/gunit/resources/org/antlr/v4/gunit/jUnit.stg
+++ /dev/null
@@ -1,43 +0,0 @@
-group jUnit;
-
-jUnitClass(className, header, options, suites) ::= <<
-
-
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.*;
-import org.junit.Test;
-import org.junit.Before;
-import static org.junit.Assert.*;
-
-public class extends org.antlr.v4.gunit.gUnitBase {
- @Before public void setup() {
- lexerClassName = "";
- parserClassName = "";
-
- adaptorClassName = "";
-
- }
-
-}
->>
-
-header(action) ::= ""
-
-testSuite(name,cases) ::= <<
-}; separator="\n\n"> !>
->>
-
-parserRuleTestSuccess(input,expecting) ::= <<
->>
-
-parserRuleTestAST(ruleName,scriptLine,input,expecting) ::= <<
-@Test public void test_() throws Exception {
- // gunit test on line
- RuleReturnScope rstruct = (RuleReturnScope)execParser("", "", );
- Object actual = ((Tree)rstruct.getTree()).toStringTree();
- Object expecting = "";
- assertEquals("testing rule ", expecting, actual);
-}
->>
-
-string(s) ::= ""
diff --git a/gunit/src/org/antlr/v4/gunit/ASTVerifier.g b/gunit/src/org/antlr/v4/gunit/ASTVerifier.g
deleted file mode 100644
index 876deeb02..000000000
--- a/gunit/src/org/antlr/v4/gunit/ASTVerifier.g
+++ /dev/null
@@ -1,46 +0,0 @@
-tree grammar ASTVerifier;
-
-options {
- ASTLabelType=CommonTree;
- tokenVocab = gUnit;
-}
-
-@header {
-package org.antlr.v4.gunit;
-}
-
-gUnitDef
- : ^('gunit' ID DOC_COMMENT? (optionsSpec|header)* testsuite+)
- ;
-
-optionsSpec
- : ^(OPTIONS option+)
- ;
-
-option
- : ^('=' ID ID)
- | ^('=' ID STRING)
- ;
-
-header : ^('@header' ACTION);
-
-testsuite
- : ^(SUITE ID ID DOC_COMMENT? testcase+)
- | ^(SUITE ID DOC_COMMENT? testcase+)
- ;
-
-testcase
- : ^(TEST_OK DOC_COMMENT? input)
- | ^(TEST_FAIL DOC_COMMENT? input)
- | ^(TEST_RETVAL DOC_COMMENT? input RETVAL)
- | ^(TEST_STDOUT DOC_COMMENT? input STRING)
- | ^(TEST_STDOUT DOC_COMMENT? input ML_STRING)
- | ^(TEST_TREE DOC_COMMENT? input TREE)
- | ^(TEST_ACTION DOC_COMMENT? input ACTION)
- ;
-
-input
- : STRING
- | ML_STRING
- | FILENAME
- ;
\ No newline at end of file
diff --git a/gunit/src/org/antlr/v4/gunit/Gen.java b/gunit/src/org/antlr/v4/gunit/Gen.java
deleted file mode 100644
index 39ceb705f..000000000
--- a/gunit/src/org/antlr/v4/gunit/Gen.java
+++ /dev/null
@@ -1,155 +0,0 @@
-package org.antlr.v4.gunit;
-
-import org.antlr.runtime.ANTLRFileStream;
-import org.antlr.runtime.CommonTokenStream;
-import org.antlr.runtime.RuleReturnScope;
-import org.antlr.runtime.tree.BufferedTreeNodeStream;
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.CommonTreeNodeStream;
-import org.antlr.stringtemplate.AutoIndentWriter;
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.stringtemplate.StringTemplateGroup;
-
-import java.io.BufferedWriter;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.util.ArrayList;
-import java.util.List;
-
-public class Gen {
- // TODO: don't hardcode
- public static final String TEMPLATE_FILE =
- "/Users/parrt/antlr/code/antlr4/gunit/resources/org/antlr/v4/gunit/jUnit.stg";
-
- public static void main(String[] args) throws Exception {
- if ( args.length==0 ) {help(); System.exit(0);}
- String outputDirName = ".";
- String fileName = args[0];
- if ( args[0].equals("-o") ) {
- if ( args.length<3 ) {
- help();
- System.exit(0);
- }
- outputDirName = args[1];
- fileName = args[2];
- }
-
- new Gen().process(fileName, outputDirName);
- }
-
- public void process(String fileName, String outputDirName) throws Exception {
- // PARSE SCRIPT
- ANTLRFileStream fs = new ANTLRFileStream(fileName);
- gUnitLexer lexer = new gUnitLexer(fs);
- CommonTokenStream tokens = new CommonTokenStream(lexer);
- gUnitParser parser = new gUnitParser(tokens);
- RuleReturnScope r = parser.gUnitDef();
-
- CommonTree scriptAST = (CommonTree)r.getTree();
- System.out.println(scriptAST.toStringTree());
-
- // ANALYZE
- CommonTreeNodeStream nodes = new CommonTreeNodeStream(r.getTree());
- Semantics sem = new Semantics(nodes);
- sem.downup(scriptAST);
-
- System.out.println("options="+sem.options);
-
- // GENERATE CODE
- FileReader fr = new FileReader(TEMPLATE_FILE);
- StringTemplateGroup templates =
- new StringTemplateGroup(fr);
- fr.close();
-
- BufferedTreeNodeStream bnodes = new BufferedTreeNodeStream(scriptAST);
- jUnitGen gen = new jUnitGen(bnodes);
- gen.setTemplateLib(templates);
- RuleReturnScope r2 = gen.gUnitDef();
- StringTemplate st = (StringTemplate)r2.getTemplate();
- st.setAttribute("options", sem.options);
-
- FileWriter fw = new FileWriter(outputDirName+"/"+sem.name+".java");
- BufferedWriter bw = new BufferedWriter(fw);
- st.write(new AutoIndentWriter(bw));
- bw.close();
- }
-
- /** Borrowed from Leon Su in gunit v3 */
- public static String escapeForJava(String inputString) {
- // Gotta escape literal backslash before putting in specials that use escape.
- inputString = inputString.replace("\\", "\\\\");
- // Then double quotes need escaping (singles are OK of course).
- inputString = inputString.replace("\"", "\\\"");
- // note: replace newline to String ".\n", replace tab to String ".\t"
- inputString = inputString.replace("\n", "\\n").replace("\t", "\\t").replace("\r", "\\r").replace("\b", "\\b").replace("\f", "\\f");
-
- return inputString;
- }
-
- public static String normalizeTreeSpec(String t) {
- List words = new ArrayList();
- int i = 0;
- StringBuilder word = new StringBuilder();
- while ( i0 ) {
- words.add(word.toString());
- word.setLength(0);
- }
- words.add(String.valueOf(t.charAt(i)));
- i++;
- continue;
- }
- if ( Character.isWhitespace(t.charAt(i)) ) {
- // upon WS, save word
- if ( word.length()>0 ) {
- words.add(word.toString());
- word.setLength(0);
- }
- i++;
- continue;
- }
-
- // ... "x" or ...("x"
- if ( t.charAt(i)=='"' && (i-1)>=0 &&
- (t.charAt(i-1)=='(' || Character.isWhitespace(t.charAt(i-1))) )
- {
- i++;
- while ( i0 ) {
- words.add(word.toString());
- }
- //System.out.println("words="+words);
- StringBuilder buf = new StringBuilder();
- for (int j=0; j0 && !words.get(j).equals(")") &&
- !words.get(j-1).equals("(") ) {
- buf.append(' ');
- }
- buf.append(words.get(j));
- }
- return buf.toString();
- }
-
- public static void help() {
- System.err.println("org.antlr.v4.gunit.Gen [-o output-dir] gunit-file");
- }
-}
diff --git a/gunit/src/org/antlr/v4/gunit/Interp.java b/gunit/src/org/antlr/v4/gunit/Interp.java
deleted file mode 100644
index 6cff54d29..000000000
--- a/gunit/src/org/antlr/v4/gunit/Interp.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package org.antlr.v4.gunit;
-
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.BufferedTreeNodeStream;
-import org.antlr.runtime.tree.Tree;
-
-public class Interp {
- public static void main(String[] args) throws Exception {
- String fileName = args[0];
- ANTLRFileStream fs = new ANTLRFileStream(fileName);
- gUnitLexer lexer = new gUnitLexer(fs);
- CommonTokenStream tokens = new CommonTokenStream(lexer);
- gUnitParser parser = new gUnitParser(tokens);
- RuleReturnScope r = parser.gUnitDef();
- System.out.println(((Tree)r.getTree()).toStringTree());
-
- BufferedTreeNodeStream nodes = new BufferedTreeNodeStream(r.getTree());
- ASTVerifier verifier = new ASTVerifier(nodes);
- verifier.gUnitDef();
- }
-}
diff --git a/gunit/src/org/antlr/v4/gunit/Semantics.g b/gunit/src/org/antlr/v4/gunit/Semantics.g
deleted file mode 100644
index 7dbb0d4c8..000000000
--- a/gunit/src/org/antlr/v4/gunit/Semantics.g
+++ /dev/null
@@ -1,36 +0,0 @@
-tree grammar Semantics;
-
-options {
- filter=true;
- ASTLabelType=CommonTree;
- tokenVocab = gUnit;
-}
-
-@header {
-package org.antlr.v4.gunit;
-import java.util.Map;
-import java.util.HashMap;
-}
-
-@members {
- public String name;
- public Map options = new HashMap();
-}
-
-topdown
- : optionsSpec
- | gUnitDef
- ;
-
-gUnitDef
- : ^('gunit' ID .*) {name = $ID.text;}
- ;
-
-optionsSpec
- : ^(OPTIONS option+)
- ;
-
-option
- : ^('=' o=ID v=ID) {options.put($o.text, $v.text);}
- | ^('=' o=ID v=STRING) {options.put($o.text, $v.text);}
- ;
diff --git a/gunit/src/org/antlr/v4/gunit/gUnit.g b/gunit/src/org/antlr/v4/gunit/gUnit.g
deleted file mode 100644
index 1c313788a..000000000
--- a/gunit/src/org/antlr/v4/gunit/gUnit.g
+++ /dev/null
@@ -1,155 +0,0 @@
-grammar gUnit;
-options {
- output=AST;
- ASTLabelType=CommonTree;
-}
-
-tokens { SUITE; TEST_OK; TEST_FAIL; TEST_RETVAL; TEST_STDOUT; TEST_TREE; TEST_ACTION; }
-
-@header {
-package org.antlr.v4.gunit;
-}
-@lexer::header {
-package org.antlr.v4.gunit;
-}
-
-gUnitDef
- : DOC_COMMENT? 'gunit' ID ';' (optionsSpec|header)* testsuite+
- -> ^('gunit' ID DOC_COMMENT? optionsSpec? header? testsuite+)
- ;
-
-optionsSpec
- : OPTIONS (option ';')+ '}' -> ^(OPTIONS option+)
- ;
-
-option
- : ID '=' optionValue -> ^('=' ID optionValue)
- ;
-
-optionValue
- : ID
- | STRING
- ;
-
-header : '@header' ACTION -> ^('@header' ACTION);
-
-testsuite
- : DOC_COMMENT? treeRule=ID 'walks' parserRule=ID ':' testcase+
- -> ^(SUITE $treeRule $parserRule DOC_COMMENT? testcase+)
- | DOC_COMMENT? ID ':' testcase+ -> ^(SUITE ID DOC_COMMENT? testcase+)
- ;
-
-testcase
- : DOC_COMMENT? input 'OK' -> ^(TEST_OK DOC_COMMENT? input)
- | DOC_COMMENT? input 'FAIL' -> ^(TEST_FAIL DOC_COMMENT? input)
- | DOC_COMMENT? input 'returns' RETVAL -> ^(TEST_RETVAL DOC_COMMENT? input RETVAL)
- | DOC_COMMENT? input '->' STRING -> ^(TEST_STDOUT DOC_COMMENT? input STRING)
- | DOC_COMMENT? input '->' ML_STRING -> ^(TEST_STDOUT DOC_COMMENT? input ML_STRING)
- | DOC_COMMENT? input '->' TREE -> ^(TEST_TREE DOC_COMMENT? input TREE)
- | DOC_COMMENT? input '->' ACTION -> ^(TEST_ACTION DOC_COMMENT? input ACTION)
- ;
-
-input
- : STRING
- | ML_STRING
- | FILENAME
- ;
-
-ACTION
- : '{' ('\\}'|'\\' ~'}'|~('\\'|'}'))* '}' {setText(getText().substring(1, getText().length()-1));}
- ;
-
-RETVAL
- : NESTED_RETVAL {setText(getText().substring(1, getText().length()-1));}
- ;
-
-fragment
-NESTED_RETVAL :
- '['
- ( options {greedy=false;}
- : NESTED_RETVAL
- | .
- )*
- ']'
- ;
-
-TREE : NESTED_AST (' '? NESTED_AST)*;
-
-fragment
-NESTED_AST
- : '('
- ( NESTED_AST
- | STRING_
- | ~('('|')'|'"')
- )*
- ')'
- ;
-
-OPTIONS : 'options' WS* '{' ;
-
-ID : ID_ ('.' ID_)* ;
-
-fragment
-ID_ : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
- ;
-
-WS : ( ' '
- | '\t'
- | '\r'
- | '\n'
- ) {$channel=HIDDEN;}
- ;
-
-SL_COMMENT
- : '//' ~('\r'|'\n')* '\r'? '\n' {$channel=HIDDEN;}
- ;
-
-DOC_COMMENT
- : '/**' (options {greedy=false;}:.)* '*/'
- ;
-
-ML_COMMENT
- : '/*' ~'*' (options {greedy=false;}:.)* '*/' {$channel=HIDDEN;}
- ;
-
-STRING : STRING_ {setText(getText().substring(1, getText().length()-1));} ;
-
-fragment
-STRING_
- : '"' ('\\"'|'\\' ~'"'|~('\\'|'"'))+ '"'
- ;
-
-ML_STRING
- : '<<' .* '>>' {setText(getText().substring(2, getText().length()-2));}
- ;
-
-FILENAME
- : '/' ID ('/' ID)*
- | ID ('/' ID)+
- ;
-
-/*
-fragment
-ESC : '\\'
- ( 'n'
- | 'r'
- | 't'
- | 'b'
- | 'f'
- | '"'
- | '\''
- | '\\'
- | '>'
- | 'u' XDIGIT XDIGIT XDIGIT XDIGIT
- | . // unknown, leave as it is
- )
- ;
-*/
-
-fragment
-XDIGIT :
- '0' .. '9'
- | 'a' .. 'f'
- | 'A' .. 'F'
- ;
-
diff --git a/gunit/src/org/antlr/v4/gunit/gUnitBase.java b/gunit/src/org/antlr/v4/gunit/gUnitBase.java
deleted file mode 100644
index 8d92509d3..000000000
--- a/gunit/src/org/antlr/v4/gunit/gUnitBase.java
+++ /dev/null
@@ -1,49 +0,0 @@
-package org.antlr.v4.gunit;
-
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.TreeAdaptor;
-
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-
-public class gUnitBase {
- public String lexerClassName;
- public String parserClassName;
- public String adaptorClassName;
-
- public Object execParser(
- String ruleName,
- String input,
- int scriptLine)
- throws Exception
- {
- ANTLRStringStream is = new ANTLRStringStream(input);
- Class lexerClass = Class.forName(lexerClassName);
- Class[] lexArgTypes = new Class[]{CharStream.class};
- Constructor lexConstructor = lexerClass.getConstructor(lexArgTypes);
- Object[] lexArgs = new Object[]{is};
- TokenSource lexer = (TokenSource)lexConstructor.newInstance(lexArgs);
- is.setLine(scriptLine);
-
- CommonTokenStream tokens = new CommonTokenStream(lexer);
-
- Class parserClass = Class.forName(parserClassName);
- Class[] parArgTypes = new Class[]{TokenStream.class};
- Constructor parConstructor = parserClass.getConstructor(parArgTypes);
- Object[] parArgs = new Object[]{tokens};
- Parser parser = (Parser)parConstructor.newInstance(parArgs);
-
- // set up customized tree adaptor if necessary
- if ( adaptorClassName!=null ) {
- parArgTypes = new Class[]{TreeAdaptor.class};
- Method m = parserClass.getMethod("setTreeAdaptor", parArgTypes);
- Class adaptorClass = Class.forName(adaptorClassName);
- m.invoke(parser, adaptorClass.newInstance());
- }
-
- Method ruleMethod = parserClass.getMethod(ruleName);
-
- // INVOKE RULE
- return ruleMethod.invoke(parser);
- }
-}
diff --git a/gunit/src/org/antlr/v4/gunit/jUnitGen.g b/gunit/src/org/antlr/v4/gunit/jUnitGen.g
deleted file mode 100644
index adb40d0a0..000000000
--- a/gunit/src/org/antlr/v4/gunit/jUnitGen.g
+++ /dev/null
@@ -1,53 +0,0 @@
-tree grammar jUnitGen;
-
-options {
- output=template;
- ASTLabelType=CommonTree;
- tokenVocab = gUnit;
-}
-
-@header {
-package org.antlr.v4.gunit;
-}
-
-gUnitDef
- : ^('gunit' ID DOC_COMMENT? (optionsSpec|header)* suites+=testsuite+)
- -> jUnitClass(className={$ID.text}, header={$header.st}, suites={$suites})
- ;
-
-optionsSpec
- : ^(OPTIONS option+)
- ;
-
-option
- : ^('=' ID ID)
- | ^('=' ID STRING)
- ;
-
-header : ^('@header' ACTION) -> header(action={$ACTION.text});
-
-testsuite
- : ^(SUITE rule=ID ID DOC_COMMENT? cases+=testcase[$rule.text]+)
- | ^(SUITE rule=ID DOC_COMMENT? cases+=testcase[$rule.text]+)
- -> testSuite(name={$rule.text}, cases={$cases})
- ;
-
-testcase[String ruleName]
- : ^(TEST_OK DOC_COMMENT? input)
- | ^(TEST_FAIL DOC_COMMENT? input)
- | ^(TEST_RETVAL DOC_COMMENT? input RETVAL)
- | ^(TEST_STDOUT DOC_COMMENT? input STRING)
- | ^(TEST_STDOUT DOC_COMMENT? input ML_STRING)
- | ^(TEST_TREE DOC_COMMENT? input TREE)
- -> parserRuleTestAST(ruleName={$ruleName},
- input={$input.st},
- expecting={Gen.normalizeTreeSpec($TREE.text)},
- scriptLine={$input.start.getLine()})
- | ^(TEST_ACTION DOC_COMMENT? input ACTION)
- ;
-
-input
- : STRING -> string(s={Gen.escapeForJava($STRING.text)})
- | ML_STRING -> string(s={Gen.escapeForJava($ML_STRING.text)})
- | FILENAME
- ;
\ No newline at end of file
diff --git a/tool/test/org/antlr/v4/test/TestASTStructure.gunit b/tool/test/org/antlr/v4/test/TestASTStructure.gunit
index 0d2b0dc74..9641b13d7 100644
--- a/tool/test/org/antlr/v4/test/TestASTStructure.gunit
+++ b/tool/test/org/antlr/v4/test/TestASTStructure.gunit
@@ -1,6 +1,9 @@
/** Test ANTLRParser's AST construction. Translate to junit tests with:
*
* $ java org.antlr.v4.gunit.Gen TestASTStructure.gunit
+
+ NO LONGER using gunit!!!
+
*/
gunit TestASTStructure;
diff --git a/tool/test/org/antlr/v4/test/TestASTStructure.java b/tool/test/org/antlr/v4/test/TestASTStructure.java
index bf2c7e828..49a7bf729 100644
--- a/tool/test/org/antlr/v4/test/TestASTStructure.java
+++ b/tool/test/org/antlr/v4/test/TestASTStructure.java
@@ -1,17 +1,65 @@
package org.antlr.v4.test;
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.Parser;
import org.antlr.runtime.RuleReturnScope;
+import org.antlr.runtime.TokenSource;
+import org.antlr.runtime.TokenStream;
import org.antlr.runtime.tree.Tree;
+import org.antlr.runtime.tree.TreeAdaptor;
import org.junit.Before;
import org.junit.Test;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
import static org.junit.Assert.assertEquals;
-public class TestASTStructure extends org.antlr.v4.gunit.gUnitBase {
- @Before public void setup() {
- lexerClassName = "org.antlr.v4.parse.ANTLRLexer";
- parserClassName = "org.antlr.v4.parse.ANTLRParser";
- adaptorClassName = "org.antlr.v4.parse.GrammarASTAdaptor"; }
+// NO LONGER using gunit!!!
+
+public class TestASTStructure {
+ String lexerClassName = "org.antlr.v4.parse.ANTLRLexer";
+ String parserClassName = "org.antlr.v4.parse.ANTLRParser";
+ String adaptorClassName = "org.antlr.v4.parse.GrammarASTAdaptor";
+
+ public Object execParser(
+ String ruleName,
+ String input,
+ int scriptLine)
+ throws Exception
+ {
+ ANTLRStringStream is = new ANTLRStringStream(input);
+ Class lexerClass = Class.forName(lexerClassName);
+ Class[] lexArgTypes = new Class[]{CharStream.class};
+ Constructor lexConstructor = lexerClass.getConstructor(lexArgTypes);
+ Object[] lexArgs = new Object[]{is};
+ TokenSource lexer = (TokenSource)lexConstructor.newInstance(lexArgs);
+ is.setLine(scriptLine);
+
+ CommonTokenStream tokens = new CommonTokenStream(lexer);
+
+ Class parserClass = Class.forName(parserClassName);
+ Class[] parArgTypes = new Class[]{TokenStream.class};
+ Constructor parConstructor = parserClass.getConstructor(parArgTypes);
+ Object[] parArgs = new Object[]{tokens};
+ Parser parser = (Parser)parConstructor.newInstance(parArgs);
+
+ // set up customized tree adaptor if necessary
+ if ( adaptorClassName!=null ) {
+ parArgTypes = new Class[]{TreeAdaptor.class};
+ Method m = parserClass.getMethod("setTreeAdaptor", parArgTypes);
+ Class adaptorClass = Class.forName(adaptorClassName);
+ m.invoke(parser, adaptorClass.newInstance());
+ }
+
+ Method ruleMethod = parserClass.getMethod(ruleName);
+
+ // INVOKE RULE
+ return ruleMethod.invoke(parser);
+ }
+
@Test public void test_grammarSpec1() throws Exception {
// gunit test on line 15
RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "parser grammar P; a : A;", 15);
diff --git a/tool/test/org/antlr/v4/test/TestBufferedTokenStream.java b/tool/test/org/antlr/v4/test/TestBufferedTokenStream.java
new file mode 100644
index 000000000..9e5625f60
--- /dev/null
+++ b/tool/test/org/antlr/v4/test/TestBufferedTokenStream.java
@@ -0,0 +1,186 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2011 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.v4.test;
+
+import org.antlr.v4.runtime.ANTLRInputStream;
+import org.antlr.v4.runtime.BufferedTokenStream;
+import org.antlr.v4.runtime.CharStream;
+import org.antlr.v4.runtime.Token;
+import org.antlr.v4.runtime.TokenSource;
+import org.antlr.v4.runtime.TokenStream;
+import org.antlr.v4.tool.LexerGrammar;
+import org.antlr.v4.tool.interp.LexerInterpreter;
+import org.junit.Test;
+
+public class TestBufferedTokenStream extends BaseTest {
+
+ protected TokenStream createTokenStream(TokenSource src) {
+ return new BufferedTokenStream(src);
+ }
+
+ @Test public void testFirstToken() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 3 * 0 + 2 * 0;
+ CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ TokenStream tokens = createTokenStream(lexEngine);
+
+ String result = tokens.LT(1).getText();
+ String expecting = "x";
+ assertEquals(expecting, result);
+ }
+
+ @Test public void test2ndToken() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 3 * 0 + 2 * 0;
+ CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ TokenStream tokens = createTokenStream(lexEngine);
+
+ String result = tokens.LT(2).getText();
+ String expecting = " ";
+ assertEquals(expecting, result);
+ }
+
+ @Test public void testCompleteBuffer() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 3 * 0 + 2 * 0;
+ CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ TokenStream tokens = createTokenStream(lexEngine);
+
+ int i = 1;
+ Token t = tokens.LT(i);
+ while ( t.getType()!=Token.EOF ) {
+ i++;
+ t = tokens.LT(i);
+ }
+ tokens.LT(i++); // push it past end
+ tokens.LT(i++);
+
+ String result = tokens.getText();
+ String expecting = "x = 3 * 0 + 2 * 0;";
+ assertEquals(expecting, result);
+ }
+
+ @Test public void testCompleteBufferAfterConsuming() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 3 * 0 + 2 * 0;
+ CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ TokenStream tokens = createTokenStream(lexEngine);
+
+ Token t = tokens.LT(1);
+ while ( t.getType()!=Token.EOF ) {
+ tokens.consume();
+ t = tokens.LT(1);
+ }
+ tokens.consume();
+ tokens.LT(1); // push it past end
+ tokens.consume();
+ tokens.LT(1);
+
+ String result = tokens.getText();
+ String expecting = "x = 3 * 0 + 2 * 0;";
+ assertEquals(expecting, result);
+ }
+
+ @Test public void testLookback() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 3 * 0 + 2 * 0;
+ CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ TokenStream tokens = createTokenStream(lexEngine);
+
+ tokens.consume(); // get x into buffer
+ Token t = tokens.LT(-1);
+ assertEquals("x", t.getText());
+
+ tokens.consume();
+ tokens.consume(); // consume '='
+ t = tokens.LT(-3);
+ assertEquals("x", t.getText());
+ t = tokens.LT(-2);
+ assertEquals(" ", t.getText());
+ t = tokens.LT(-1);
+ assertEquals("=", t.getText());
+ }
+
+}
diff --git a/tool/test/org/antlr/v4/test/TestCommonTokenStream.java b/tool/test/org/antlr/v4/test/TestCommonTokenStream.java
index 3190591d6..b73efcd77 100644
--- a/tool/test/org/antlr/v4/test/TestCommonTokenStream.java
+++ b/tool/test/org/antlr/v4/test/TestCommonTokenStream.java
@@ -1,36 +1,5 @@
-/*
- [The "BSD license"]
- Copyright (c) 2011 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
package org.antlr.v4.test;
-import org.antlr.v4.runtime.ANTLRInputStream;
-import org.antlr.v4.runtime.BufferedTokenStream;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.CommonToken;
import org.antlr.v4.runtime.CommonTokenStream;
@@ -38,151 +7,18 @@ import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenFactory;
import org.antlr.v4.runtime.TokenSource;
+import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.WritableToken;
-import org.antlr.v4.tool.LexerGrammar;
-import org.antlr.v4.tool.interp.LexerInterpreter;
import org.junit.Test;
-public class TestCommonTokenStream extends BaseTest {
- @Test public void testFirstToken() throws Exception {
- LexerGrammar g = new LexerGrammar(
- "lexer grammar t;\n"+
- "ID : 'a'..'z'+;\n" +
- "INT : '0'..'9'+;\n" +
- "SEMI : ';';\n" +
- "ASSIGN : '=';\n" +
- "PLUS : '+';\n" +
- "MULT : '*';\n" +
- "WS : ' '+;\n");
- // Tokens: 012345678901234567
- // Input: x = 3 * 0 + 2 * 0;
- CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
- LexerInterpreter lexEngine = new LexerInterpreter(g);
- lexEngine.setInput(input);
- BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
+public class TestCommonTokenStream extends TestBufferedTokenStream {
- String result = tokens.LT(1).getText();
- String expecting = "x";
- assertEquals(expecting, result);
- }
+ @Override
+ protected TokenStream createTokenStream(TokenSource src) {
+ return new CommonTokenStream(src);
+ }
- @Test public void test2ndToken() throws Exception {
- LexerGrammar g = new LexerGrammar(
- "lexer grammar t;\n"+
- "ID : 'a'..'z'+;\n" +
- "INT : '0'..'9'+;\n" +
- "SEMI : ';';\n" +
- "ASSIGN : '=';\n" +
- "PLUS : '+';\n" +
- "MULT : '*';\n" +
- "WS : ' '+;\n");
- // Tokens: 012345678901234567
- // Input: x = 3 * 0 + 2 * 0;
- CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
- LexerInterpreter lexEngine = new LexerInterpreter(g);
- lexEngine.setInput(input);
- BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
- String result = tokens.LT(2).getText();
- String expecting = " ";
- assertEquals(expecting, result);
- }
-
- @Test public void testCompleteBuffer() throws Exception {
- LexerGrammar g = new LexerGrammar(
- "lexer grammar t;\n"+
- "ID : 'a'..'z'+;\n" +
- "INT : '0'..'9'+;\n" +
- "SEMI : ';';\n" +
- "ASSIGN : '=';\n" +
- "PLUS : '+';\n" +
- "MULT : '*';\n" +
- "WS : ' '+;\n");
- // Tokens: 012345678901234567
- // Input: x = 3 * 0 + 2 * 0;
- CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
- LexerInterpreter lexEngine = new LexerInterpreter(g);
- lexEngine.setInput(input);
- BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
- int i = 1;
- Token t = tokens.LT(i);
- while ( t.getType()!=Token.EOF ) {
- i++;
- t = tokens.LT(i);
- }
- tokens.LT(i++); // push it past end
- tokens.LT(i++);
-
- String result = tokens.getText();
- String expecting = "x = 3 * 0 + 2 * 0;";
- assertEquals(expecting, result);
- }
-
- @Test public void testCompleteBufferAfterConsuming() throws Exception {
- LexerGrammar g = new LexerGrammar(
- "lexer grammar t;\n"+
- "ID : 'a'..'z'+;\n" +
- "INT : '0'..'9'+;\n" +
- "SEMI : ';';\n" +
- "ASSIGN : '=';\n" +
- "PLUS : '+';\n" +
- "MULT : '*';\n" +
- "WS : ' '+;\n");
- // Tokens: 012345678901234567
- // Input: x = 3 * 0 + 2 * 0;
- CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
- LexerInterpreter lexEngine = new LexerInterpreter(g);
- lexEngine.setInput(input);
- BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
- Token t = tokens.LT(1);
- while ( t.getType()!=Token.EOF ) {
- tokens.consume();
- t = tokens.LT(1);
- }
- tokens.consume();
- tokens.LT(1); // push it past end
- tokens.consume();
- tokens.LT(1);
-
- String result = tokens.getText();
- String expecting = "x = 3 * 0 + 2 * 0;";
- assertEquals(expecting, result);
- }
-
- @Test public void testLookback() throws Exception {
- LexerGrammar g = new LexerGrammar(
- "lexer grammar t;\n"+
- "ID : 'a'..'z'+;\n" +
- "INT : '0'..'9'+;\n" +
- "SEMI : ';';\n" +
- "ASSIGN : '=';\n" +
- "PLUS : '+';\n" +
- "MULT : '*';\n" +
- "WS : ' '+;\n");
- // Tokens: 012345678901234567
- // Input: x = 3 * 0 + 2 * 0;
- CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
- LexerInterpreter lexEngine = new LexerInterpreter(g);
- lexEngine.setInput(input);
- BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
- tokens.consume(); // get x into buffer
- Token t = tokens.LT(-1);
- assertEquals("x", t.getText());
-
- tokens.consume();
- tokens.consume(); // consume '='
- t = tokens.LT(-3);
- assertEquals("x", t.getText());
- t = tokens.LT(-2);
- assertEquals(" ", t.getText());
- t = tokens.LT(-1);
- assertEquals("=", t.getText());
- }
-
- @Test public void testOffChannel() throws Exception {
+ @Test public void testOffChannel() throws Exception {
TokenSource lexer = // simulate input " x =34 ;\n"
new TokenSource() {
int i = 0;
diff --git a/tool/test/org/antlr/v4/test/TestUnbufferedCharStream.java b/tool/test/org/antlr/v4/test/TestUnbufferedCharStream.java
index f2c41d826..13ed63110 100644
--- a/tool/test/org/antlr/v4/test/TestUnbufferedCharStream.java
+++ b/tool/test/org/antlr/v4/test/TestUnbufferedCharStream.java
@@ -92,7 +92,7 @@ public class TestUnbufferedCharStream extends BaseTest {
assertEquals('2', input.LA(3));
assertEquals('3', input.LA(4));
assertEquals('4', input.LA(5));
- assertEquals("4", input.getBuffer()); // shouldn't include x
+ assertEquals("01234", input.getBuffer());
assertEquals(CharStream.EOF, input.LA(6));
}
@@ -132,18 +132,31 @@ public class TestUnbufferedCharStream extends BaseTest {
assertEquals(CharStream.EOF, input.LA(1));
}
- @Test public void test1Mark() throws Exception {
+ @Test public void test1Mark() throws Exception {
UnbufferedCharStream input = new UnbufferedCharStream(
- new StringReader("xyz")
- );
- int m = input.mark();
- assertEquals('x', input.LA(1));
- assertEquals('y', input.LA(2));
- assertEquals('z', input.LA(3));
- input.release(m);
+ new StringReader("xyz")
+ );
+ int m = input.mark();
+ assertEquals('x', input.LA(1));
+ assertEquals('y', input.LA(2));
+ assertEquals('z', input.LA(3));
+ input.release(m);
assertEquals(CharStream.EOF, input.LA(4));
assertEquals("xyz\uFFFF", input.getBuffer());
- }
+ }
+
+ @Test public void test1MarkWithConsumesInSequence() throws Exception {
+ UnbufferedCharStream input = new UnbufferedCharStream(
+ new StringReader("xyz")
+ );
+ int m = input.mark();
+ input.consume(); // x, moves to y
+ input.consume(); // y
+ input.consume(); // z, moves to EOF
+ assertEquals(CharStream.EOF, input.LA(1));
+ input.release(m);
+ assertEquals("xyz\uFFFF", input.getBuffer());
+ }
@Test public void test2Mark() throws Exception {
UnbufferedCharStream input = new UnbufferedCharStream(
@@ -158,7 +171,7 @@ public class TestUnbufferedCharStream extends BaseTest {
input.consume();
int m2 = input.mark();
assertEquals(1, m2); // 2nd marker dropped at buffer index 1
- assertEquals("y", input.getBuffer());
+ assertEquals("yz", input.getBuffer());
assertEquals('z', input.LA(1)); // forces load
assertEquals("yz", input.getBuffer());
input.release(m2); // noop since not earliest in buf
diff --git a/tool/test/org/antlr/v4/test/TestUnbufferedTokenStream.java b/tool/test/org/antlr/v4/test/TestUnbufferedTokenStream.java
new file mode 100644
index 000000000..28ef4974d
--- /dev/null
+++ b/tool/test/org/antlr/v4/test/TestUnbufferedTokenStream.java
@@ -0,0 +1,160 @@
+package org.antlr.v4.test;
+
+import org.antlr.v4.runtime.ANTLRInputStream;
+import org.antlr.v4.runtime.CharStream;
+import org.antlr.v4.runtime.Token;
+import org.antlr.v4.runtime.TokenStream;
+import org.antlr.v4.runtime.UnbufferedTokenStream;
+import org.antlr.v4.tool.LexerGrammar;
+import org.antlr.v4.tool.interp.LexerInterpreter;
+import org.junit.Test;
+
+import java.io.StringReader;
+
+public class TestUnbufferedTokenStream extends BaseTest {
+ @Test public void testLookahead() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 302;
+ CharStream input = new ANTLRInputStream(
+ new StringReader("x = 302;")
+ );
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ TokenStream tokens = new UnbufferedTokenStream(lexEngine);
+
+ assertEquals("x", tokens.LT(1).getText());
+ assertEquals(" ", tokens.LT(2).getText());
+ assertEquals("=", tokens.LT(3).getText());
+ assertEquals(" ", tokens.LT(4).getText());
+ assertEquals("302", tokens.LT(5).getText());
+ assertEquals(";", tokens.LT(6).getText());
+ }
+
+ @Test public void testNoBuffering() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 302;
+ CharStream input = new ANTLRInputStream(
+ new StringReader("x = 302;")
+ );
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexEngine);
+
+ assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
+ assertEquals("x", tokens.LT(1).getText());
+ assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
+ tokens.consume();
+ assertEquals(" ", tokens.LT(1).getText());
+ assertEquals("[[@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString());
+ tokens.consume();
+ assertEquals("=", tokens.LT(1).getText());
+ assertEquals("[[@2,2:2='=',<4>,1:2]]", tokens.getBuffer().toString());
+ tokens.consume();
+ assertEquals(" ", tokens.LT(1).getText());
+ assertEquals("[[@3,3:3=' ',<7>,1:3]]", tokens.getBuffer().toString());
+ tokens.consume();
+ assertEquals("302", tokens.LT(1).getText());
+ assertEquals("[[@4,4:6='302',<2>,1:4]]", tokens.getBuffer().toString());
+ tokens.consume();
+ assertEquals(";", tokens.LT(1).getText());
+ assertEquals("[[@5,7:7=';',<3>,1:7]]", tokens.getBuffer().toString());
+ }
+
+ @Test public void testMarkStart() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 302;
+ CharStream input = new ANTLRInputStream(
+ new StringReader("x = 302;")
+ );
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexEngine);
+
+ int m = tokens.mark();
+ assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
+ assertEquals("x", tokens.LT(1).getText());
+ tokens.consume(); // consume x
+ assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString());
+ tokens.consume(); // ' '
+ tokens.consume(); // =
+ tokens.consume(); // ' '
+ tokens.consume(); // 302
+ tokens.consume(); // ;
+ assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]," +
+ " [@2,2:2='=',<4>,1:2], [@3,3:3=' ',<7>,1:3]," +
+ " [@4,4:6='302',<2>,1:4], [@5,7:7=';',<3>,1:7]," +
+ " [@6,8:7='',<-1>,1:8]]",
+ tokens.getBuffer().toString());
+ }
+
+ @Test public void testMarkThenRelease() throws Exception {
+ LexerGrammar g = new LexerGrammar(
+ "lexer grammar t;\n"+
+ "ID : 'a'..'z'+;\n" +
+ "INT : '0'..'9'+;\n" +
+ "SEMI : ';';\n" +
+ "ASSIGN : '=';\n" +
+ "PLUS : '+';\n" +
+ "MULT : '*';\n" +
+ "WS : ' '+;\n");
+ // Tokens: 012345678901234567
+ // Input: x = 302;
+ CharStream input = new ANTLRInputStream(
+ new StringReader("x = 302 + 1;")
+ );
+ LexerInterpreter lexEngine = new LexerInterpreter(g);
+ lexEngine.setInput(input);
+ UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexEngine);
+
+ int m = tokens.mark();
+ assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
+ assertEquals("x", tokens.LT(1).getText());
+ tokens.consume(); // consume x
+ assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString());
+ tokens.consume(); // ' '
+ tokens.consume(); // =
+ tokens.consume(); // ' '
+ assertEquals("302", tokens.LT(1).getText());
+ tokens.release(m); // "x = 302" is in buffer. next consume() should kill buffer
+ tokens.consume(); // 302
+ tokens.consume(); // ' '
+ m = tokens.mark(); // mark at the +
+ assertEquals("+", tokens.LT(1).getText());
+ tokens.consume(); // '+'
+ tokens.consume(); // ' '
+ tokens.consume(); // 1
+ tokens.consume(); // ;
+ assertEquals("", tokens.LT(1).getText());
+ assertEquals("[[@6,8:8='+',<5>,1:8], [@7,9:9=' ',<7>,1:9]," +
+ " [@8,10:10='1',<2>,1:10], [@9,11:11=';',<3>,1:11]," +
+ " [@10,12:11='',<-1>,1:12]]",
+ tokens.getBuffer().toString());
+ }
+}