factored out node construction for parse trees into a consume() method. added a start to the test parse trees. added the notion of an error node to the parse tree.
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 9104]
This commit is contained in:
parent
473e56d59f
commit
2707837bc8
|
@ -81,9 +81,9 @@ public interface ANTLRErrorStrategy {
|
|||
*/
|
||||
void sync(BaseRecognizer recognizer);
|
||||
|
||||
/** Reset the error handler. The parser invokes this
|
||||
* when it matches a valid token (indicating no longer in recovery mode)
|
||||
/** Reset the error handler. Call this when the parser
|
||||
* matches a valid token (indicating no longer in recovery mode)
|
||||
* and from its own reset method.
|
||||
*/
|
||||
void endErrorCondition();
|
||||
void endErrorCondition(BaseRecognizer recognizer);
|
||||
}
|
||||
|
|
|
@ -29,10 +29,14 @@
|
|||
package org.antlr.v4.runtime;
|
||||
|
||||
import com.sun.istack.internal.Nullable;
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.runtime.misc.*;
|
||||
import org.antlr.v4.runtime.atn.ATNConfig;
|
||||
import org.antlr.v4.runtime.atn.ParserATNSimulator;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.misc.OrderedHashSet;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/** A generic recognizer that can handle recognizers generated from
|
||||
* parser and tree grammars. This is all the parsing
|
||||
|
@ -54,11 +58,11 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
|
|||
protected boolean buildParseTrees;
|
||||
protected boolean traceATNStates;
|
||||
|
||||
/** This is true when we see an error and before having successfully
|
||||
* matched a token. Prevents generation of more than one error message
|
||||
/** This is true after we see an error and before having successfully
|
||||
* matched a token. Prevents generation of more than one error message
|
||||
* per error.
|
||||
*/
|
||||
// protected boolean errorRecovery = false;
|
||||
protected boolean errorRecoveryMode = false;
|
||||
|
||||
/** Did the recognizer encounter a syntax error? Track how many. */
|
||||
protected int syntaxErrors = 0;
|
||||
|
@ -70,8 +74,7 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
|
|||
/** reset the parser's state */
|
||||
public void reset() {
|
||||
if ( getInputStream()!=null ) getInputStream().seek(0);
|
||||
_errHandler.endErrorCondition();
|
||||
// errorRecovery = false;
|
||||
_errHandler.endErrorCondition(this);
|
||||
_ctx = null;
|
||||
}
|
||||
|
||||
|
@ -91,20 +94,14 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
|
|||
// System.out.println("match "+((TokenStream)input).LT(1)+" vs expected "+ttype);
|
||||
Object matchedSymbol = getCurrentInputSymbol();
|
||||
if ( getInputStream().LA(1)==ttype ) {
|
||||
getInputStream().consume();
|
||||
// errorRecovery = false;
|
||||
_errHandler.endErrorCondition();
|
||||
if ( buildParseTrees ) _ctx.addChild((Token)matchedSymbol);
|
||||
return matchedSymbol;
|
||||
_errHandler.endErrorCondition(this);
|
||||
consume();
|
||||
}
|
||||
return _errHandler.recoverInline(this);
|
||||
}
|
||||
|
||||
/** Match the wildcard: in a symbol */
|
||||
public void matchAny() {
|
||||
// errorRecovery = false;
|
||||
_errHandler.endErrorCondition();
|
||||
getInputStream().consume();
|
||||
else {
|
||||
matchedSymbol = _errHandler.recoverInline(this);
|
||||
}
|
||||
// if ( buildParseTrees ) _ctx.addChild((Token)matchedSymbol);
|
||||
return matchedSymbol;
|
||||
}
|
||||
|
||||
/** Track the RuleContext objects during the parse and hook them up
|
||||
|
@ -112,10 +109,10 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
|
|||
* The RuleContext returned from the start rule represents the root
|
||||
* of the parse tree.
|
||||
*
|
||||
* To built parse trees, all we have to do is put a hook in move()
|
||||
* and enterRule(). In move(), we had tokens to the current context
|
||||
* To built parse trees, all we have to do is put a hook in setState()
|
||||
* and enterRule(). In setState(), we add tokens to the current context
|
||||
* as children. By the time we get to enterRule(), we are already
|
||||
* in in invoke rule so we add this context As a child of the parent
|
||||
* in an invoked rule so we add this context as a child of the parent
|
||||
* (invoking) context. Simple and effective.
|
||||
*
|
||||
* Note that if we are not building parse trees, rule contexts
|
||||
|
@ -183,6 +180,32 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
|
|||
public void enterOuterAlt(ParserRuleContext localctx, int altNum) {
|
||||
_ctx = localctx;
|
||||
_ctx.altNum = altNum;
|
||||
addContextToParseTree();
|
||||
}
|
||||
|
||||
/** Consume the current symbol and return it. E.g., given the following
|
||||
* input with A being the current lookahead symbol:
|
||||
*
|
||||
* A B
|
||||
* ^
|
||||
*
|
||||
* this function moves the cursor to B and returns A.
|
||||
*
|
||||
* If the parser is creating parse trees, the current symbol
|
||||
* would also be added as a child to the current context (node).
|
||||
*/
|
||||
protected Object consume() {
|
||||
Object o = getCurrentInputSymbol();
|
||||
getInputStream().consume();
|
||||
if ( buildParseTrees ) {
|
||||
// TODO: tree parsers?
|
||||
if ( errorRecoveryMode ) _ctx.addErrorNode((Token) o);
|
||||
else _ctx.addChild((Token)o);
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
protected void addContextToParseTree() {
|
||||
if ( buildParseTrees ) {
|
||||
if ( _ctx.parent!=null ) _ctx.parent.addChild(_ctx);
|
||||
}
|
||||
|
@ -192,10 +215,6 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
|
|||
_ctx = (ParserRuleContext)_ctx.parent;
|
||||
}
|
||||
|
||||
public IntervalSet getExpectedTokens() {
|
||||
return _interp.atn.nextTokens(_ctx);
|
||||
}
|
||||
|
||||
public ParserRuleContext getInvokingContext(int ruleIndex) {
|
||||
ParserRuleContext p = _ctx;
|
||||
while ( p!=null ) {
|
||||
|
@ -210,6 +229,10 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
|
|||
return false;
|
||||
}
|
||||
|
||||
public IntervalSet getExpectedTokens() {
|
||||
return _interp.atn.nextTokens(_ctx);
|
||||
}
|
||||
|
||||
/** Return List<String> of the rules in your parser instance
|
||||
* leading up to a call to this method. You could override if
|
||||
* you want more details such as the file/line info of where
|
||||
|
|
|
@ -9,12 +9,6 @@ import org.antlr.v4.runtime.misc.IntervalSet;
|
|||
* and tree parsers.
|
||||
*/
|
||||
public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
||||
/** This is true when we see an error and before having successfully
|
||||
* matched a token. Prevents generation of more than one error message
|
||||
* per error.
|
||||
*/
|
||||
protected boolean errorRecovery = false;
|
||||
|
||||
/** The index into the input stream where the last error occurred.
|
||||
* This is used to prevent infinite loops where an error is found
|
||||
* but no token is consumed during recovery...another error is found,
|
||||
|
@ -25,13 +19,13 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
|
||||
protected IntervalSet lastErrorStates;
|
||||
|
||||
protected void beginErrorCondition() {
|
||||
errorRecovery = true;
|
||||
protected void beginErrorCondition(BaseRecognizer recognizer) {
|
||||
recognizer.errorRecoveryMode = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void endErrorCondition() {
|
||||
errorRecovery = false;
|
||||
public void endErrorCondition(BaseRecognizer recognizer) {
|
||||
recognizer.errorRecoveryMode = false;
|
||||
lastErrorStates = null;
|
||||
lastErrorIndex = -1;
|
||||
}
|
||||
|
@ -43,9 +37,9 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
{
|
||||
// if we've already reported an error and have not matched a token
|
||||
// yet successfully, don't report any errors.
|
||||
if ( errorRecovery ) return; // don't count spurious errors
|
||||
if (recognizer.errorRecoveryMode) return; // don't count spurious errors
|
||||
recognizer.syntaxErrors++;
|
||||
beginErrorCondition();
|
||||
beginErrorCondition(recognizer);
|
||||
if ( e instanceof NoViableAltException ) {
|
||||
reportNoViableAlternative(recognizer, (NoViableAltException) e);
|
||||
}
|
||||
|
@ -75,7 +69,8 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
// state in ATN; must be a case where LT(1) is in the recovery
|
||||
// token set so nothing got consumed. Consume a single token
|
||||
// at least to prevent an infinite loop; this is a failsafe.
|
||||
recognizer.getInputStream().consume();
|
||||
// recognizer.getInputStream().consume();
|
||||
recognizer.consume();
|
||||
}
|
||||
lastErrorIndex = recognizer.getInputStream().index();
|
||||
if ( lastErrorStates==null ) lastErrorStates = new IntervalSet();
|
||||
|
@ -89,14 +84,13 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
*/
|
||||
@Override
|
||||
public void sync(BaseRecognizer recognizer) {
|
||||
System.out.println("sync");
|
||||
// TODO: CACHE THESE RESULTS!!
|
||||
IntervalSet expecting = getExpectedTokens(recognizer);
|
||||
// TODO: subclass this class for treeparsers
|
||||
TokenStream tokens = (TokenStream)recognizer.getInputStream();
|
||||
Token la = tokens.LT(1);
|
||||
if ( expecting.contains(la.getType()) ) {
|
||||
endErrorCondition();
|
||||
endErrorCondition(recognizer);
|
||||
return;
|
||||
}
|
||||
reportUnwantedToken(recognizer);
|
||||
|
@ -134,9 +128,9 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
}
|
||||
|
||||
public void reportUnwantedToken(BaseRecognizer recognizer) {
|
||||
if ( errorRecovery ) return;
|
||||
if (recognizer.errorRecoveryMode) return;
|
||||
recognizer.syntaxErrors++;
|
||||
beginErrorCondition();
|
||||
beginErrorCondition(recognizer);
|
||||
|
||||
Token t = (Token)recognizer.getCurrentInputSymbol();
|
||||
String tokenName = getTokenErrorDisplay(t);
|
||||
|
@ -147,9 +141,9 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
}
|
||||
|
||||
public void reportMissingToken(BaseRecognizer recognizer) {
|
||||
if ( errorRecovery ) return;
|
||||
if (recognizer.errorRecoveryMode) return;
|
||||
recognizer.syntaxErrors++;
|
||||
beginErrorCondition();
|
||||
beginErrorCondition(recognizer);
|
||||
|
||||
Token t = (Token)recognizer.getCurrentInputSymbol();
|
||||
IntervalSet expecting = getExpectedTokens(recognizer);
|
||||
|
@ -192,11 +186,12 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
public Object recoverInline(BaseRecognizer recognizer)
|
||||
throws RecognitionException
|
||||
{
|
||||
IntervalSet expecting = getExpectedTokens(recognizer);
|
||||
Object currentSymbol = recognizer.getCurrentInputSymbol();
|
||||
|
||||
// SINGLE TOKEN DELETION
|
||||
// if next token is what we are looking for then "delete" this token
|
||||
int nextTokenType = recognizer.getInputStream().LA(2);
|
||||
IntervalSet expecting = getExpectedTokens(recognizer);
|
||||
if ( expecting.contains(nextTokenType) ) {
|
||||
reportUnwantedToken(recognizer);
|
||||
/*
|
||||
|
@ -205,18 +200,20 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
" since "+((TokenStream)recognizer.getInputStream()).LT(2)+
|
||||
" is what we want");
|
||||
*/
|
||||
recognizer.getInputStream().consume(); // simply delete extra token
|
||||
recognizer.consume(); // simply delete extra token
|
||||
// recognizer.getInputStream().consume(); // simply delete extra token
|
||||
// we want to return the token we're actually matching
|
||||
Object matchedSymbol = recognizer.getCurrentInputSymbol();
|
||||
recognizer.getInputStream().consume(); // move past ttype token as if all were ok
|
||||
endErrorCondition(recognizer); // we know next token is correct
|
||||
recognizer.consume(); // move past ttype token as if all were ok
|
||||
// recognizer.getInputStream().consume(); // move past ttype token as if all were ok
|
||||
return matchedSymbol;
|
||||
}
|
||||
|
||||
// can't recover with single token deletion, try insertion
|
||||
// SINGLE TOKEN INSERTION
|
||||
// if current token is consistent with what could come after current
|
||||
// ATN state, then we know we're missing a token; error recovery
|
||||
// is free to conjure up and insert the missing token
|
||||
|
||||
ATNState currentState = recognizer._interp.atn.states.get(recognizer._ctx.s);
|
||||
ATNState next = currentState.transition(0).target;
|
||||
IntervalSet expectingAtLL2 = recognizer._interp.atn.nextTokens(next, recognizer._ctx);
|
||||
|
@ -403,7 +400,7 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
recoverSet.addAll(follow);
|
||||
ctx = ctx.parent;
|
||||
}
|
||||
System.out.println("recover set "+recoverSet.toString(recognizer.getTokenNames()));
|
||||
// System.out.println("recover set "+recoverSet.toString(recognizer.getTokenNames()));
|
||||
return recoverSet;
|
||||
}
|
||||
|
||||
|
@ -413,7 +410,8 @@ public class DefaultANTLRErrorStrategy implements ANTLRErrorStrategy {
|
|||
int ttype = recognizer.getInputStream().LA(1);
|
||||
while (ttype != Token.EOF && !set.contains(ttype) ) {
|
||||
//System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
|
||||
recognizer.getInputStream().consume();
|
||||
// recognizer.getInputStream().consume();
|
||||
recognizer.consume();
|
||||
ttype = recognizer.getInputStream().LA(1);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,11 +28,15 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNState;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
import org.antlr.v4.runtime.tree.*;
|
||||
import org.antlr.v4.runtime.tree.ParseTree;
|
||||
import org.antlr.v4.runtime.tree.ParseTreeListener;
|
||||
import org.antlr.v4.runtime.tree.Trees;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/** Rules can return start/stop info as well as possible trees and templates.
|
||||
* Each context knows about invoking context and pointer into ATN so we
|
||||
|
@ -143,10 +147,17 @@ public class RuleContext implements ParseTree.RuleNode {
|
|||
}
|
||||
|
||||
public void addChild(Token matchedToken) {
|
||||
if ( children==null ) children = new ArrayList<ParseTree>();
|
||||
TokenNodeImpl t = new TokenNodeImpl(matchedToken);
|
||||
t.parent = this;
|
||||
t.s = this.s;
|
||||
addChild(t);
|
||||
}
|
||||
|
||||
public void addErrorNode(Token badToken) {
|
||||
TokenNodeImpl t = new ErrorNodeImpl(badToken);
|
||||
addChild(t);
|
||||
}
|
||||
|
||||
public void addChild(TokenNode t) {
|
||||
if ( children==null ) children = new ArrayList<ParseTree>();
|
||||
children.add(t);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,8 @@
|
|||
|
||||
package org.antlr.v4.runtime.tree;
|
||||
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.RuleContext;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.misc.Interval;
|
||||
|
||||
/** An interface to access the tree of RuleContext objects created
|
||||
|
@ -44,9 +45,11 @@ public interface ParseTree extends SyntaxTree {
|
|||
public interface RuleNode extends ParseTree {
|
||||
RuleContext getRuleContext();
|
||||
}
|
||||
|
||||
public interface TokenNode extends ParseTree {
|
||||
Token getToken();
|
||||
}
|
||||
|
||||
public static class TokenNodeImpl implements TokenNode {
|
||||
public Token token;
|
||||
public ParseTree parent;
|
||||
|
@ -80,6 +83,20 @@ public interface ParseTree extends SyntaxTree {
|
|||
}
|
||||
}
|
||||
|
||||
/** Represents a token that was consumed during resynchronization
|
||||
* rather than during a valid match operation. For example,
|
||||
* we will create this kind of a node during single token insertion
|
||||
* and deletion as well as during "consume until error recovery set"
|
||||
* upon no viable alternative exceptions.
|
||||
*/
|
||||
public static class ErrorNodeImpl extends TokenNodeImpl {
|
||||
public ErrorNodeImpl(Token token) {
|
||||
super(token);
|
||||
}
|
||||
@Override
|
||||
public String toString() { return "<ERROR: "+super.toString()+">"; }
|
||||
}
|
||||
|
||||
// the following methods narrow the return type; they are not additional methods
|
||||
ParseTree getParent();
|
||||
ParseTree getChild(int i);
|
||||
|
|
|
@ -31,9 +31,10 @@ package org.antlr.v4.runtime.tree;
|
|||
|
||||
import org.antlr.v4.runtime.*;
|
||||
|
||||
import java.util.regex.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/** A parser for a stream of tree nodes. "tree grammars" result in a subclass
|
||||
/** A parser for a stream of tree nodes. "tree grammars" result in a subclass
|
||||
* of this. All the error reporting and recovery is shared with Parser via
|
||||
* the BaseRecognizer superclass.
|
||||
*/
|
||||
|
@ -106,7 +107,7 @@ public class TreeParser extends BaseRecognizer {
|
|||
* corresponding UP node.
|
||||
*/
|
||||
public void matchAny(IntStream ignore) { // ignore stream, copy of input
|
||||
_errHandler.endErrorCondition();
|
||||
_errHandler.endErrorCondition(this);
|
||||
Object look = _input.LT(1);
|
||||
if ( _input.getTreeAdaptor().getChildCount(look)==0 ) {
|
||||
_input.consume(); // not subtree, consume 1 node and return
|
||||
|
|
|
@ -427,12 +427,12 @@ setState(<m.stateNumber>);
|
|||
if ( <if(!invert)>!<endif>(<expr>) ) {
|
||||
<if(m.labels)><m.labels:{l | <labelref(l)> = (Token)}><endif>_errHandler.recoverInline(this);
|
||||
}
|
||||
_input.consume();
|
||||
consume();
|
||||
>>
|
||||
|
||||
Wildcard(w) ::= <<
|
||||
setState(<w.stateNumber>);
|
||||
<if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>_input.LT(1); _input.consume();
|
||||
<if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>_input.LT(1); consume();
|
||||
>>
|
||||
|
||||
// ACTION STUFF
|
||||
|
|
|
@ -202,4 +202,17 @@ public class TestParseErrors extends BaseTest {
|
|||
String expecting = "{'hardware', 'software'}\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
// AST nodes with missing information
|
||||
|
||||
@Test public void testASTHasErrorNode() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"options { output=AST; }\n"+
|
||||
"a : 'a' 'b' ;";
|
||||
String result = execParser("T.g", grammar, "TParser", "TLexer", "a", "aab", false);
|
||||
String expecting = "a b\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
package org.antlr.v4.test;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestParseTrees extends BaseTest {
|
||||
@Test public void testTokenMismatch() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : 'a' 'b' ;";
|
||||
// String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aa", false);
|
||||
// String expecting = "line 1:1 mismatched input 'a' expecting 'b'\n";
|
||||
// String result = stderrDuringParse;
|
||||
// assertEquals(expecting, result);
|
||||
}
|
||||
}
|
|
@ -38,7 +38,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testBasic() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ID INT {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"a : ID INT {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
@ -51,7 +51,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAPlus() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ID+ {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"a : ID+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
|
@ -64,7 +64,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAorAPlus() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : (ID|ID)+ {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"a : (ID|ID)+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
|
@ -76,7 +76,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAStar() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : ID* {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"a : ID* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
|
@ -92,7 +92,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAorAStar() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : (ID|ID)* {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"a : (ID|ID)* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
||||
|
@ -107,7 +107,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAorBPlus() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : (ID|INT{;})+ {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"a : (ID|INT{;})+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
@ -120,7 +120,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testAorBStar() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : (ID|INT{;})* {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"a : (ID|INT{;})* {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"INT : '0'..'9'+;\n" +
|
||||
"WS : (' '|'\\n') {skip();} ;\n";
|
||||
|
@ -136,7 +136,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testNongreedyLoopCantSeeEOF() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s : block EOF {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"s : block EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"block : '{' .* '}' ;\n"+
|
||||
"EQ : '=' ;\n" +
|
||||
"INT : '0'..'9'+ ;\n" +
|
||||
|
@ -162,7 +162,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testNongreedyLoop() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s : ifstat ';' EOF {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"s : ifstat ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ifstat : 'if' '(' .* ')' block ;\n" +
|
||||
"block : '{' '}' ;\n"+
|
||||
"EQ : '=' ;\n" +
|
||||
|
@ -189,7 +189,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testNongreedyLoopPassingThroughAnotherNongreedy() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s : ifstat ';' EOF {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"s : ifstat ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ifstat : 'if' '(' .* ')' block ;\n" +
|
||||
"block : '{' (block|.)* '}' ;\n"+
|
||||
"EQ : '=' ;\n" +
|
||||
|
@ -207,7 +207,7 @@ public class TestParserExec extends BaseTest {
|
|||
// EOF on end means LL(*) can identify when to stop the loop.
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s : stat* ID '=' ID ';' EOF {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"s : stat* ID '=' ID ';' EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"stat : 'if' '(' INT ')' stat\n" +
|
||||
" | 'return' INT ';'\n" +
|
||||
" | ID '=' (INT|ID) ';'\n" +
|
||||
|
@ -234,12 +234,12 @@ public class TestParserExec extends BaseTest {
|
|||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
input, false);
|
||||
// can't match EOF to ID '=' '0' ';'
|
||||
assertEquals("no viable token at input [@8,9:9='<EOF>',<-1>,1:9], index 8\n", found);
|
||||
assertEquals("no viable token at input EOF, index 8\n", found);
|
||||
input =
|
||||
"x=1; a=b; z=3;"; // FAILS to match since it can't match last element
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
input, false);
|
||||
assertEquals("no viable token at input [@12,14:14='<EOF>',<-1>,1:14], index 12\n", found); // should not finish to print output
|
||||
assertEquals("no viable token at input EOF, index 12\n", found); // should not finish to print output
|
||||
}
|
||||
|
||||
@Test public void testStatLoopNongreedyNecessary() throws Exception {
|
||||
|
@ -250,7 +250,7 @@ public class TestParserExec extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"random : s ;" + // call s so s isn't followed by EOF directly
|
||||
"s : (options {greedy=false;} : stat)* ID '=' ID ';'\n" +
|
||||
" {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
" {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"stat : 'if' '(' INT ')' stat\n" +
|
||||
" | 'return' INT ';'\n" +
|
||||
" | ID '=' (INT|ID) ';'\n" +
|
||||
|
@ -277,7 +277,7 @@ public class TestParserExec extends BaseTest {
|
|||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
input, false);
|
||||
// can't match EOF to ID '=' '0' ';'
|
||||
assertEquals("no viable token at input [@8,9:9='<EOF>',<-1>,1:9], index 8\n", found);
|
||||
assertEquals("no viable token at input EOF, index 8\n", found);
|
||||
input =
|
||||
"x=1; a=b; z=3;"; // stops at a=b; ignores z=3;
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
|
@ -288,7 +288,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testHTMLTags() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"a : tag+ {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"a : tag+ {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"tag : '<' '/'? .* '>' ;\n" +
|
||||
"EQ : '=' ;\n" +
|
||||
"COMMA : ',' ;\n" +
|
||||
|
@ -300,7 +300,7 @@ public class TestParserExec extends BaseTest {
|
|||
String found = null;
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
"<a>foo</a>", false);
|
||||
assertEquals("<a>\n", found);
|
||||
assertEquals("<a>foo</a>\n", found);
|
||||
found = execParser("T.g", grammar, "TParser", "TLexer", "a",
|
||||
"<a></a>", false);
|
||||
assertEquals("<a></a>\n", found);
|
||||
|
@ -347,7 +347,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testNongreedyLoopEndOfRule() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s : ifstat EOF {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"s : ifstat EOF {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ifstat : 'if' '(' INT ')' .* ;\n" +
|
||||
"EQ : '=' ;\n" +
|
||||
"INT : '0'..'9'+ ;\n" +
|
||||
|
@ -375,7 +375,7 @@ public class TestParserExec extends BaseTest {
|
|||
@Test public void testNongreedyLoopEndOfRuleStuffFollowing() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s : ifstat '.' {System.out.println(input.toString(0,input.index()-1));} ;\n" +
|
||||
"s : ifstat '.' {System.out.println(_input.toString(0,_input.index()-1));} ;\n" +
|
||||
"ifstat : 'if' '(' INT ')' .* ;\n" +
|
||||
"EQ : '=' ;\n" +
|
||||
"INT : '0'..'9'+ ;\n" +
|
||||
|
@ -385,6 +385,6 @@ public class TestParserExec extends BaseTest {
|
|||
"if ( 34 ) a b .";
|
||||
String found = execParser("T.g", grammar, "TParser", "TLexer", "s",
|
||||
input, false);
|
||||
assertEquals("no viable token at input [@7,15:15='<EOF>',<-1>,1:15], index 7\n", found);
|
||||
assertEquals("no viable token at input EOF, index 7\nif(34)ab.\n", found);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue