Merge branch 'master' into interactive

This commit is contained in:
Terence Parr 2012-06-16 17:20:04 -07:00
commit b18475113b
37 changed files with 586 additions and 556 deletions

View File

@ -266,6 +266,14 @@ public abstract class Lexer extends Recognizer<Integer, LexerATNSimulator>
return getInterpreter().getCharPositionInLine(); return getInterpreter().getCharPositionInLine();
} }
public void setLine(int line) {
getInterpreter().setLine(line);
}
public void setCharPositionInLine(int charPositionInLine) {
getInterpreter().setCharPositionInLine(charPositionInLine);
}
/** What is the index of the current character of lookahead? */ /** What is the index of the current character of lookahead? */
public int getCharIndex() { public int getCharIndex() {
return _input.index(); return _input.index();
@ -279,7 +287,13 @@ public abstract class Lexer extends Recognizer<Integer, LexerATNSimulator>
return _text; return _text;
} }
return getInterpreter().getText(_input); return getInterpreter().getText(_input);
// return ((CharStream)input).substring(tokenStartCharIndex,getCharIndex()-1); }
/** Get the text from start of token to current lookahead char.
* Use this in predicates to test text matched so far in a lexer rule.
*/
public String getSpeculativeText() {
return getInterpreter().getSpeculativeText(_input);
} }
/** Set the complete text of this token; it wipes any previous /** Set the complete text of this token; it wipes any previous
@ -289,6 +303,10 @@ public abstract class Lexer extends Recognizer<Integer, LexerATNSimulator>
this._text = text; this._text = text;
} }
public void setToken(Token _token) {
this._token = _token;
}
public String[] getModeNames() { public String[] getModeNames() {
return null; return null;
} }

View File

@ -35,21 +35,13 @@ package org.antlr.v4.runtime;
*/ */
public interface Token { public interface Token {
public static final int INVALID_TYPE = 0; public static final int INVALID_TYPE = 0;
// public static final Token INVALID_TOKEN = new CommonToken(INVALID_TYPE);
public static final int MIN_TOKEN_TYPE = 1;
/** During lookahead operations, this "token" signifies we hit rule end ATN state /** During lookahead operations, this "token" signifies we hit rule end ATN state
* and did not follow it despite needing to. * and did not follow it despite needing to.
*/ */
public static final int EPSILON = -2; public static final int EPSILON = -2;
/** imaginary tree navigation type; traverse "get child" link */ public static final int MIN_USER_TOKEN_TYPE = 1;
public static final int DOWN = 1;
/** imaginary tree navigation type; finish with a child list */
public static final int UP = 2;
public static final int MIN_USER_TOKEN_TYPE = UP+1;
public static final int EOF = CharStream.EOF; public static final int EOF = CharStream.EOF;

View File

@ -43,6 +43,8 @@ public class ATNConfigSet extends OrderedHashSet<ATNConfig> {
// TODO: can we track conflicts as they are added to save scanning configs later? // TODO: can we track conflicts as they are added to save scanning configs later?
public int uniqueAlt; public int uniqueAlt;
public IntervalSet conflictingAlts; public IntervalSet conflictingAlts;
// Used in parser and lexer. In lexer, it indicates we hit a pred
// while computing a closure operation. Don't make a DFA state from this.
public boolean hasSemanticContext; public boolean hasSemanticContext;
public boolean dipsIntoOuterContext; public boolean dipsIntoOuterContext;

View File

@ -40,10 +40,10 @@ import java.util.HashSet;
import java.util.Set; import java.util.Set;
public class LL1Analyzer { public class LL1Analyzer {
/** Used during LOOK to detect computation cycles. E.g., ()* causes /** Special value added to the lookahead sets to indicate that we hit
* infinite loop without it. If we get to same state would be infinite * a predicate during analysis if seeThruPreds==false.
* loop.
*/ */
public static final int HIT_PRED = Token.INVALID_TYPE;
@NotNull @NotNull
public final ATN atn; public final ATN atn;
@ -65,7 +65,11 @@ public class LL1Analyzer {
_LOOK(s.transition(alt - 1).target, _LOOK(s.transition(alt - 1).target,
ParserRuleContext.EMPTY, ParserRuleContext.EMPTY,
look[alt], lookBusy, seeThruPreds); look[alt], lookBusy, seeThruPreds);
if ( look[alt].size()==0 ) look[alt] = null; // Wipe out lookahead for this alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if ( look[alt].size()==0 || look[alt].contains(HIT_PRED) ) {
look[alt] = null;
}
} }
return look; return look;
} }
@ -116,14 +120,16 @@ public class LL1Analyzer {
for (int i=0; i<n; i++) { for (int i=0; i<n; i++) {
Transition t = s.transition(i); Transition t = s.transition(i);
if ( t.getClass() == RuleTransition.class ) { if ( t.getClass() == RuleTransition.class ) {
RuleContext newContext = RuleContext newContext = new RuleContext(ctx, s.stateNumber);
new RuleContext(ctx, s.stateNumber);
_LOOK(t.target, newContext, look, lookBusy, seeThruPreds); _LOOK(t.target, newContext, look, lookBusy, seeThruPreds);
} }
else if ( t instanceof PredicateTransition ) { else if ( t instanceof PredicateTransition ) {
if ( seeThruPreds ) { if ( seeThruPreds ) {
_LOOK(t.target, ctx, look, lookBusy, seeThruPreds); _LOOK(t.target, ctx, look, lookBusy, seeThruPreds);
} }
else {
look.add(HIT_PRED);
}
} }
else if ( t.isEpsilon() ) { else if ( t.isEpsilon() ) {
_LOOK(t.target, ctx, look, lookBusy, seeThruPreds); _LOOK(t.target, ctx, look, lookBusy, seeThruPreds);

View File

@ -58,11 +58,11 @@ public class LexerATNSimulator extends ATNSimulator {
/** When we hit an accept state in either the DFA or the ATN, we /** When we hit an accept state in either the DFA or the ATN, we
* have to notify the character stream to start buffering characters * have to notify the character stream to start buffering characters
* via mark() and record the current state. The current state includes * via mark() and record the current state. The current sim state
* the current index into the input, the current line, and current * includes the current index into the input, the current line,
* character position in that line. Note that the Lexer is tracking * and current character position in that line. Note that the Lexer is
* the starting line and characterization of the token. These * tracking the starting line and characterization of the token. These
* variables track the state of the simulator when it hits an accept state. * variables track the "state" of the simulator when it hits an accept state.
* *
* We track these variables separately for the DFA and ATN simulation * We track these variables separately for the DFA and ATN simulation
* because the DFA simulation often has to fail over to the ATN * because the DFA simulation often has to fail over to the ATN
@ -71,18 +71,18 @@ public class LexerATNSimulator extends ATNSimulator {
* then the ATN does the accept and the DFA simulator that invoked it * then the ATN does the accept and the DFA simulator that invoked it
* can simply return the predicted token type. * can simply return the predicted token type.
*/ */
protected static class ExecState { protected static class SimState {
protected int index = -1; protected int index = -1;
protected int line = 0; protected int line = 0;
protected int charPos = -1; protected int charPos = -1;
protected DFAState state; protected DFAState dfaState;
protected ATNConfig config; protected ATNConfig config;
protected void reset() { protected void reset() {
index = -1; index = -1;
line = 0; line = 0;
charPos = -1; charPos = -1;
state = null; dfaState = null;
config = null; config = null;
} }
} }
@ -109,7 +109,7 @@ public class LexerATNSimulator extends ATNSimulator {
/** Used during DFA/ATN exec to record the most recent accept configuration info */ /** Used during DFA/ATN exec to record the most recent accept configuration info */
@NotNull @NotNull
protected final ExecState prevAccept = new ExecState(); protected final SimState prevAccept = new SimState();
public static int ATN_failover = 0; public static int ATN_failover = 0;
public static int match_calls = 0; public static int match_calls = 0;
@ -184,7 +184,7 @@ public class LexerATNSimulator extends ATNSimulator {
ATNState startState = atn.modeToStartState.get(mode); ATNState startState = atn.modeToStartState.get(mode);
if ( debug ) { if ( debug ) {
System.out.format("mode %d start: %s\n", mode, startState); System.out.format("matchATN mode %d start: %s\n", mode, startState);
} }
ATNConfigSet s0_closure = computeStartState(input, startState); ATNConfigSet s0_closure = computeStartState(input, startState);
@ -236,7 +236,7 @@ public class LexerATNSimulator extends ATNSimulator {
System.out.format("accept; predict %d in state %d\n", s.prediction, s.stateNumber); System.out.format("accept; predict %d in state %d\n", s.prediction, s.stateNumber);
} }
markAcceptState(prevAccept, input, s); captureSimState(prevAccept, input, s);
// keep going unless we're at EOF; check if something else could match // keep going unless we're at EOF; check if something else could match
// EOF never in DFA // EOF never in DFA
if ( t==CharStream.EOF ) break; if ( t==CharStream.EOF ) break;
@ -247,7 +247,7 @@ public class LexerATNSimulator extends ATNSimulator {
t = input.LA(1); t = input.LA(1);
} }
ATNConfigSet reach = prevAccept.state != null ? prevAccept.state.configset : null; ATNConfigSet reach = prevAccept.dfaState != null ? prevAccept.dfaState.configset : null;
return failOrAccept(prevAccept, input, reach, t); return failOrAccept(prevAccept, input, reach, t);
} }
@ -265,7 +265,7 @@ public class LexerATNSimulator extends ATNSimulator {
while ( true ) { // while more work while ( true ) { // while more work
if ( debug ) { if ( debug ) {
System.out.format("in reach starting closure: %s\n", closure); System.out.format("execATN loop starting closure: %s\n", closure);
} }
// As we move src->trg, src->trg, we keep track of the previous trg to // As we move src->trg, src->trg, we keep track of the previous trg to
@ -295,6 +295,10 @@ public class LexerATNSimulator extends ATNSimulator {
break; break;
} }
else if (target != null) { else if (target != null) {
if ( debug ) {
System.out.println("reuse state "+s.stateNumber+
" edge to "+target.stateNumber);
}
reach = target.configset; reach = target.configset;
} }
} }
@ -317,18 +321,18 @@ public class LexerATNSimulator extends ATNSimulator {
if (from != null) { if (from != null) {
addDFAEdge(from, t, ERROR); addDFAEdge(from, t, ERROR);
} }
break; break; // stop when we can't match any more char
} }
// Did we hit a stop state during reach op? // Did we hit a stop state during reach op?
processAcceptStates(input, reach); processAcceptConfigs(input, reach);
// Add an edge from s to target DFA found/created for reach // Add an edge from s to target DFA found/created for reach
target = addDFAEdge(s, t, reach); target = addDFAEdge(s, t, reach);
} }
else if (target.isAcceptState) { else if (target.isAcceptState) {
traceAcceptState(target.prediction); traceAcceptState(target.prediction);
markAcceptState(prevAccept, input, target); captureSimState(prevAccept, input, target);
} }
consume(input); consume(input);
@ -342,16 +346,16 @@ public class LexerATNSimulator extends ATNSimulator {
return failOrAccept(prevAccept, input, closure, t); return failOrAccept(prevAccept, input, closure, t);
} }
protected int failOrAccept(ExecState prevAccept, CharStream input, protected int failOrAccept(SimState prevAccept, CharStream input,
ATNConfigSet reach, int t) ATNConfigSet reach, int t)
{ {
if (prevAccept.state != null) { if (prevAccept.dfaState != null) {
int ruleIndex = prevAccept.state.lexerRuleIndex; int ruleIndex = prevAccept.dfaState.lexerRuleIndex;
int actionIndex = prevAccept.state.lexerActionIndex; int actionIndex = prevAccept.dfaState.lexerActionIndex;
accept(input, ruleIndex, actionIndex, accept(input, ruleIndex, actionIndex,
prevAccept.index, prevAccept.line, prevAccept.charPos); prevAccept.index, prevAccept.line, prevAccept.charPos);
tracePredict(prevAccept.state.prediction); tracePredict(prevAccept.dfaState.prediction);
return prevAccept.state.prediction; return prevAccept.dfaState.prediction;
} }
else if (prevAccept.config != null) { else if (prevAccept.config != null) {
int ruleIndex = prevAccept.config.state.ruleIndex; int ruleIndex = prevAccept.config.state.ruleIndex;
@ -389,23 +393,33 @@ public class LexerATNSimulator extends ATNSimulator {
} }
} }
protected void processAcceptStates(@NotNull CharStream input, @NotNull ATNConfigSet reach) { protected void processAcceptConfigs(@NotNull CharStream input, @NotNull ATNConfigSet reach) {
if ( debug ) {
System.out.format("processAcceptConfigs: reach=%s, prevAccept=%s, prevIndex=%d\n",
reach, prevAccept.config, prevAccept.index);
}
for (int ci=0; ci<reach.size(); ci++) { for (int ci=0; ci<reach.size(); ci++) {
ATNConfig c = reach.get(ci); ATNConfig c = reach.get(ci);
if ( c.state instanceof RuleStopState) { if ( c.state instanceof RuleStopState) {
if ( debug ) { if ( debug ) {
System.out.format("in reach we hit accept state %s index %d, reach=%s, prevAccept=%s, prevIndex=%d\n", System.out.format("processAcceptConfigs: hit accept config %s index %d\n",
c, input.index(), reach, prevAccept.config, prevAccept.index); c, input.index());
} }
int index = input.index(); int index = input.index();
if ( index > prevAccept.index ) { if ( index > prevAccept.index ) {
traceAcceptState(c.alt);
// will favor prev accept at same index so "int" is keyword not ID
markAcceptState(prevAccept, input, c);
if ( debug ) { if ( debug ) {
System.out.format("mark %s @ index=%d, %d:%d\n", c, index, prevAccept.line, prevAccept.charPos); if ( prevAccept.index>=0 ) {
System.out.println("processAcceptConfigs: found longer token");
}
} }
// condition > not >= will favor prev accept at same index.
// This way, "int" is keyword not ID if listed first.
traceAcceptState(c.alt);
if ( debug ) {
System.out.format("markExecSettings for %s @ index=%d, line %d:%d\n", c, index, prevAccept.line, prevAccept.charPos);
}
captureSimState(prevAccept, input, reach, c);
} }
// if we reach lexer accept state, toss out any configs in rest // if we reach lexer accept state, toss out any configs in rest
@ -546,13 +560,17 @@ public class LexerATNSimulator extends ATNSimulator {
ATNState p = config.state; ATNState p = config.state;
for (int i=0; i<p.getNumberOfTransitions(); i++) { for (int i=0; i<p.getNumberOfTransitions(); i++) {
Transition t = p.transition(i); Transition t = p.transition(i);
ATNConfig c = getEpsilonTarget(config, t); ATNConfig c = getEpsilonTarget(config, t, configs);
if ( c!=null ) closure(c, configs); if ( c!=null ) closure(c, configs);
} }
} }
// side-effect: can alter configs.hasSemanticContext
@Nullable @Nullable
public ATNConfig getEpsilonTarget(@NotNull ATNConfig config, @NotNull Transition t) { public ATNConfig getEpsilonTarget(@NotNull ATNConfig config,
@NotNull Transition t,
@NotNull ATNConfigSet configs)
{
ATNState p = config.state; ATNState p = config.state;
ATNConfig c = null; ATNConfig c = null;
if ( t.getClass() == RuleTransition.class ) { if ( t.getClass() == RuleTransition.class ) {
@ -565,7 +583,29 @@ public class LexerATNSimulator extends ATNSimulator {
System.out.format("Predicates cannot be evaluated without a recognizer; assuming true.\n"); System.out.format("Predicates cannot be evaluated without a recognizer; assuming true.\n");
} }
/* Track traversing semantic predicates. If we traverse,
we cannot add a DFA state for this "reach" computation
because the DFA would not test the predicate again in the
future. Rather than creating collections of semantic predicates
like v3 and testing them on prediction, v4 will test them on the
fly all the time using the ATN not the DFA. This is slower but
semantically it's not used that often. One of the key elements to
this predicate mechanism is not adding DFA states that see
predicates immediately afterwards in the ATN. For example,
a : ID {p1}? | ID {p2}? ;
should create the start state for rule 'a' (to save start state
competition), but should not create target of ID state. The
collection of ATN states the following ID references includes
states reached by traversing predicates. Since this is when we
test them, we cannot cash the DFA state target of ID.
*/
PredicateTransition pt = (PredicateTransition)t; PredicateTransition pt = (PredicateTransition)t;
if ( debug ) {
System.out.println("EVAL rule "+pt.ruleIndex+":"+pt.predIndex);
}
configs.hasSemanticContext = true;
if ( recog == null || recog.sempred(null, pt.ruleIndex, pt.predIndex) ) { if ( recog == null || recog.sempred(null, pt.ruleIndex, pt.predIndex) ) {
c = new ATNConfig(config, t.target, pt.getPredicate()); c = new ATNConfig(config, t.target, pt.getPredicate());
} }
@ -603,20 +643,27 @@ public class LexerATNSimulator extends ATNSimulator {
return ttype; return ttype;
} }
protected void markAcceptState(@NotNull ExecState state, @NotNull CharStream input, @NotNull DFAState dfaState) { protected void captureSimState(@NotNull SimState settings,
state.index = input.index(); @NotNull CharStream input,
state.line = line; @NotNull DFAState dfaState)
state.charPos = charPositionInLine; {
state.config = null; settings.index = input.index();
state.state = dfaState; settings.line = line;
settings.charPos = charPositionInLine;
settings.config = null;
settings.dfaState = dfaState;
} }
protected void markAcceptState(@NotNull ExecState state, @NotNull CharStream input, @NotNull ATNConfig config) { protected void captureSimState(@NotNull SimState settings,
state.index = input.index(); @NotNull CharStream input,
state.line = line; @NotNull ATNConfigSet ATNConfigs,
state.charPos = charPositionInLine; @NotNull ATNConfig config)
state.config = config; {
state.state = null; settings.index = input.index();
settings.line = line;
settings.charPos = charPositionInLine;
settings.config = config;
settings.dfaState = null;
} }
protected DFAState addDFAEdge(@NotNull DFAState from, protected DFAState addDFAEdge(@NotNull DFAState from,
@ -630,12 +677,13 @@ public class LexerATNSimulator extends ATNSimulator {
return to; return to;
} }
// System.out.println("MOVE "+p+" -> "+q+" upon "+getTokenName(t));
if (from == null || to == null) { if (from == null || to == null) {
return to; return to;
} }
if ( debug ) System.out.println("EDGE "+from+" -> "+to+" upon "+((char)t));
addDFAEdge(from, t, to); addDFAEdge(from, t, to);
return to; return to;
} }
@ -645,37 +693,19 @@ public class LexerATNSimulator extends ATNSimulator {
// make room for tokens 1..n and -1 masquerading as index 0 // make room for tokens 1..n and -1 masquerading as index 0
p.edges = new DFAState[MAX_DFA_EDGE+1]; // TODO: make adaptive p.edges = new DFAState[MAX_DFA_EDGE+1]; // TODO: make adaptive
} }
// if ( t==Token.EOF ) {
// System.out.println("state "+p+" has EOF edge");
// t = 0;
// }
p.edges[t] = q; // connect p.edges[t] = q; // connect
} }
/** Add a new DFA state if there isn't one with this set of /** Add a new DFA state if there isn't one with this set of
configurations already. This method also detects the first configurations already. This method also detects the first
configuration containing an ATN rule stop state. Later, when configuration containing an ATN rule stop state. Later, when
traversing the DFA, we will know which rule to accept. Also, we traversing the DFA, we will know which rule to accept.
detect if any of the configurations derived from traversing a
semantic predicate. If so, we cannot add a DFA state for this
because the DFA would not test the predicate again in the
future. Rather than creating collections of semantic predicates
like v3 and testing them on prediction, v4 will test them on the
fly all the time using the ATN not the DFA. This is slower but
semantically it's not used that often. One of the key elements to
this predicate mechanism is not adding DFA states that see
predicates immediately afterwards in the ATN. For example,
a : ID {p1}? | ID {p2}? ;
should create the start state for rule 'a' (to save start state
competition), but should not create target of ID state. The
collection of ATN states the following ID references includes
states reached by traversing predicates. Since this is when we
test them, we cannot cash the DFA state target of ID.
*/ */
@Nullable @Nullable
protected DFAState addDFAState(@NotNull ATNConfigSet configs) { protected DFAState addDFAState(@NotNull ATNConfigSet configs) {
// If we eval'd a predicate while filling configs, mustn't create DFA state
if ( configs.hasSemanticContext ) return null;
DFAState proposed = new DFAState(configs); DFAState proposed = new DFAState(configs);
DFAState existing = dfa[mode].states.get(proposed); DFAState existing = dfa[mode].states.get(proposed);
if ( existing!=null ) return existing; if ( existing!=null ) return existing;
@ -683,15 +713,10 @@ public class LexerATNSimulator extends ATNSimulator {
DFAState newState = proposed; DFAState newState = proposed;
ATNConfig firstConfigWithRuleStopState = null; ATNConfig firstConfigWithRuleStopState = null;
boolean traversedPredicate = false;
for (ATNConfig c : configs) { for (ATNConfig c : configs) {
if ( firstConfigWithRuleStopState==null && if ( c.state instanceof RuleStopState ) {
c.state instanceof RuleStopState )
{
firstConfigWithRuleStopState = c; firstConfigWithRuleStopState = c;
} break;
if ( c.semanticContext!=null && c.semanticContext!=SemanticContext.NONE ) {
traversedPredicate = true;
} }
} }
@ -702,8 +727,6 @@ public class LexerATNSimulator extends ATNSimulator {
newState.prediction = atn.ruleToTokenType[newState.lexerRuleIndex]; newState.prediction = atn.ruleToTokenType[newState.lexerRuleIndex];
} }
if ( traversedPredicate ) return null; // cannot cache
newState.stateNumber = dfa[mode].states.size(); newState.stateNumber = dfa[mode].states.size();
newState.configset = new ATNConfigSet(); newState.configset = new ATNConfigSet();
newState.configset.addAll(configs); newState.configset.addAll(configs);
@ -716,9 +739,21 @@ public class LexerATNSimulator extends ATNSimulator {
return dfa[mode]; return dfa[mode];
} }
/** Get the text of the current token */ /** Get the text of the current token from an *action* in lexer not
* predicate.
*/
@NotNull @NotNull
public String getText(@NotNull CharStream input) { public String getText(@NotNull CharStream input) {
// index is first lookahead char, don't include.
return input.getText(Interval.of(startIndex, input.index()-1));
}
/** Get the text from start of token to current lookahead char.
* Use this in predicates to test text matched so far in a lexer rule.
*/
@NotNull
public String getSpeculativeText(@NotNull CharStream input) {
// index is first lookahead char, don't include.
return input.getText(Interval.of(startIndex, input.index())); return input.getText(Interval.of(startIndex, input.index()));
} }
@ -726,10 +761,18 @@ public class LexerATNSimulator extends ATNSimulator {
return line; return line;
} }
public void setLine(int line) {
this.line = line;
}
public int getCharPositionInLine() { public int getCharPositionInLine() {
return charPositionInLine; return charPositionInLine;
} }
public void setCharPositionInLine(int charPositionInLine) {
this.charPositionInLine = charPositionInLine;
}
public void consume(@NotNull CharStream input) { public void consume(@NotNull CharStream input) {
int curChar = input.LA(1); int curChar = input.LA(1);
if ( curChar=='\n' ) { if ( curChar=='\n' ) {

View File

@ -750,7 +750,7 @@ public class ParserATNSimulator<Symbol extends Token> extends ATNSimulator {
return st.target; return st.target;
} }
} }
else if ( trans instanceof RangeTransition ) { else if ( trans instanceof RangeTransition ) { // TODO: can't happen in parser, right? remove
RangeTransition rt = (RangeTransition)trans; RangeTransition rt = (RangeTransition)trans;
if ( ttype>=rt.from && ttype<=rt.to ) return rt.target; if ( ttype>=rt.from && ttype<=rt.to ) return rt.target;
} }

View File

@ -127,7 +127,15 @@ public class TestRig {
// System.out.println("exec "+grammarName+"."+startRuleName); // System.out.println("exec "+grammarName+"."+startRuleName);
String lexerName = grammarName+"Lexer"; String lexerName = grammarName+"Lexer";
ClassLoader cl = Thread.currentThread().getContextClassLoader(); ClassLoader cl = Thread.currentThread().getContextClassLoader();
Class lexerClass = cl.loadClass(lexerName); Class lexerClass = null;
try {
lexerClass = cl.loadClass(lexerName);
}
catch (java.lang.ClassNotFoundException cnfe) {
// might be pure lexer grammar; no Lexer suffix then
lexerName = grammarName;
lexerClass = cl.loadClass(lexerName);
}
if ( lexerClass==null ) { if ( lexerClass==null ) {
System.err.println("Can't load "+lexerName); System.err.println("Can't load "+lexerName);
} }
@ -151,8 +159,9 @@ public class TestRig {
Lexer lexer = lexerCtor.newInstance(input); Lexer lexer = lexerCtor.newInstance(input);
CommonTokenStream tokens = new CommonTokenStream(lexer); CommonTokenStream tokens = new CommonTokenStream(lexer);
tokens.fill();
if ( showTokens ) { if ( showTokens ) {
tokens.fill();
for (Object tok : tokens.getTokens()) { for (Object tok : tokens.getTokens()) {
System.out.println(tok); System.out.println(tok);
} }

View File

@ -1,9 +1,5 @@
grammar T; lexer grammar T;
s : r=e ;
e : e '(' INT ')' A : 'a' {false}? ;
| INT B : 'a' ;
; WS : [ \n] ;
MULT: '*' ;
ADD : '+' ;
INT : [0-9]+ ;
WS : [ \t\n]+ -> skip ;

View File

@ -1,86 +0,0 @@
/*
[The "BSD license"]
Copyright (c) 2011 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import org.antlr.v4.runtime.ANTLRFileStream;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.tree.ParseTreeWalker;
public class TestA2 {
/** An example listener that squirrels away a return value in a field
* called v that we get added to the expression context objects
* by adding a return value to rule e. This is a version of A.g4
* that performs actions during the parse with user-defined actions.
* AND, we pass in a listener that gets executed during the parse
* and we use a listener on a tree walk that executes after the parse.
* So, it affect, we compute the result of the expression 3 times.
*/
public static class Do extends A2BaseListener {
A2Parser p;
public Do(A2Parser p) { this.p = p; }
@Override
public void exitAdd(A2Parser.AddContext ctx) {
ctx.v = ctx.e(0).v + ctx.e(1).v;
System.out.println("Add: " + ctx.v);
}
@Override
public void exitInt(A2Parser.IntContext ctx) {
ctx.v = Integer.valueOf(ctx.INT().getSymbol().getText());
System.out.println("Int: "+ctx.v);
}
@Override
public void exitMult(A2Parser.MultContext ctx) {
ctx.v = ctx.e(0).v * ctx.e(1).v;
System.out.println("Mult: " + ctx.v);
}
@Override
public void exitParens(A2Parser.ParensContext ctx) {
ctx.v = ctx.e().v;
System.out.println("Parens: "+ctx.v);
}
}
public static void main(String[] args) throws Exception {
A2Lexer lexer = new A2Lexer(new ANTLRFileStream(args[0]));
CommonTokenStream tokens = new CommonTokenStream(lexer);
A2Parser p = new A2Parser(tokens);
p.setBuildParseTree(true);
ParserRuleContext<Token> t = p.s();
System.out.println("tree = "+t.toStringTree(p));
ParseTreeWalker walker = new ParseTreeWalker();
Do doer = new Do(p);
walker.walk(doer, t);
A2Parser.EContext ectx = (A2Parser.EContext)t.getChild(0);
System.out.println("result from tree walk = "+ ectx.v);
}
}

View File

@ -5,11 +5,9 @@ import org.antlr.v4.runtime.CommonTokenStream;
public class TestT { public class TestT {
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
CharStream input = new ANTLRFileStream(args[0]); CharStream input = new ANTLRFileStream(args[0]);
TLexer lex = new TLexer(input); T lex = new T(input);
CommonTokenStream tokens = new CommonTokenStream(lex); CommonTokenStream tokens = new CommonTokenStream(lex);
TParser parser = new TParser(tokens); tokens.fill();
System.out.println(tokens.getTokens());
parser.setBuildParseTree(true);
parser.s();
} }
} }

View File

@ -1,6 +1,5 @@
import org.antlr.v4.runtime.ANTLRFileStream; import org.antlr.v4.runtime.ANTLRFileStream;
import org.antlr.v4.runtime.CommonTokenStream; import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.ParserRuleContext;
public class TestU { public class TestU {
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
@ -8,7 +7,7 @@ public class TestU {
CommonTokenStream tokens = new CommonTokenStream(t); CommonTokenStream tokens = new CommonTokenStream(t);
UParser p = new UParser(tokens); UParser p = new UParser(tokens);
p.setBuildParseTree(true); p.setBuildParseTree(true);
ParserRuleContext r = p.s(); // ParserRuleContext r = p.s();
System.out.println(r.toStringTree(p)); // System.out.println(r.toStringTree(p));
} }
} }

View File

@ -1,13 +1,32 @@
grammar U; grammar U;
s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;
e : e '.' ID @members {public static boolean java5 = true;}
| e '.' 'this'
| '-' e prog: ( enumDecl
| e '*' e | stat
| e ('+'|'-') e )*
| INT EOF
| ID ;
;
ID : 'a'..'z'+ ; enumDecl
INT : '0'..'9'+ ; : {java5}? 'enum' ID '{' ID (',' ID)* '}'
WS : (' '|'\n') {skip();} ; ;
args
: arg (',' arg )*
;
arg
: INT
;
stat: ID '=' expr ';' ;
expr: ID {System.out.println("ID "+$ID.text);}
| {!java5}? 'enum' {System.out.println("ID enum");}
| INT
;
ID : [a-zA-Z]+ ;
INT : [0-9]+ ;
WS : [ \t\n\r]+ -> skip ;

View File

@ -123,8 +123,8 @@ public <if(parser.abstractRecognizer)>abstract <endif>class <parser.name> extend
<parser.tokens:{k | <k>=<parser.tokens.(k)>}; separator=", ", wrap, anchor>; <parser.tokens:{k | <k>=<parser.tokens.(k)>}; separator=", ", wrap, anchor>;
<endif> <endif>
public static final String[] tokenNames = { public static final String[] tokenNames = {
"\<INVALID>", "\<INVALID>", "\<INVALID>", "\<INVALID>",
<parser.tokenNames:{t | <t>}; separator=", ", wrap, anchor> <parser.tokenNames:{t | <t>}; separator=", ", wrap, anchor>
}; };
public static final int public static final int
<parser.rules:{r | RULE_<r.name> = <r.index>}; separator=", ", wrap, anchor>; <parser.rules:{r | RULE_<r.name> = <r.index>}; separator=", ", wrap, anchor>;
@ -723,7 +723,7 @@ public <if(lexer.abstractRecognizer)>abstract <endif>class <lexer.name> extends
}; };
public static final String[] tokenNames = { public static final String[] tokenNames = {
"\<INVALID>", "\<INVALID>", "\<INVALID>", "\<INVALID>",
<lexer.tokenNames:{t | <t>}; separator=", ", wrap, anchor> <lexer.tokenNames:{t | <t>}; separator=", ", wrap, anchor>
}; };
public static final String[] ruleNames = { public static final String[] ruleNames = {

View File

@ -128,7 +128,7 @@ public class Tool {
public static Option[] optionDefs = { public static Option[] optionDefs = {
new Option("outputDirectory", "-o", OptionArgType.STRING, "specify output directory where all output is generated"), new Option("outputDirectory", "-o", OptionArgType.STRING, "specify output directory where all output is generated"),
new Option("libDirectory", "-lib", OptionArgType.STRING, "specify location of .token files"), new Option("libDirectory", "-lib", OptionArgType.STRING, "specify location of grammars, tokens files"),
new Option("report", "-report", "print out a report about the grammar(s) processed"), new Option("report", "-report", "print out a report about the grammar(s) processed"),
new Option("printGrammar", "-print", "print out the grammar without actions"), new Option("printGrammar", "-print", "print out the grammar without actions"),
new Option("debug", "-debug", "generate a parser that emits debugging events"), new Option("debug", "-debug", "generate a parser that emits debugging events"),

View File

@ -29,10 +29,10 @@
package org.antlr.v4.analysis; package org.antlr.v4.analysis;
import org.antlr.v4.misc.Utils;
import org.antlr.v4.runtime.atn.DecisionState; import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.runtime.atn.LL1Analyzer; import org.antlr.v4.runtime.atn.LL1Analyzer;
import org.antlr.v4.runtime.misc.IntervalSet; import org.antlr.v4.runtime.misc.IntervalSet;
import org.antlr.v4.misc.Utils;
import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.Grammar;
import java.util.ArrayList; import java.util.ArrayList;
@ -52,15 +52,13 @@ public class AnalysisPipeline {
if ( lr.listOfRecursiveCycles.size()>0 ) return; // bail out if ( lr.listOfRecursiveCycles.size()>0 ) return; // bail out
// BUILD DFA FOR EACH DECISION // BUILD DFA FOR EACH DECISION
if ( !g.isLexer() ) processParserOrTreeParser(); if ( !g.isLexer() ) processParser();
} }
void processParserOrTreeParser() { void processParser() {
g.decisionLOOK = g.decisionLOOK = new ArrayList<IntervalSet[]>(g.atn.getNumberOfDecisions()+1);
new ArrayList<IntervalSet[]>(g.atn.getNumberOfDecisions()+1);
for (DecisionState s : g.atn.decisionToState) { for (DecisionState s : g.atn.decisionToState) {
g.tool.log("LL1", "\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name); g.tool.log("LL1", "\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name);
LL1Analyzer anal = new LL1Analyzer(g.atn); LL1Analyzer anal = new LL1Analyzer(g.atn);
IntervalSet[] look = anal.getDecisionLookahead(s); IntervalSet[] look = anal.getDecisionLookahead(s);
g.tool.log("LL1", "look=" + Arrays.toString(look)); g.tool.log("LL1", "look=" + Arrays.toString(look));

View File

@ -243,7 +243,14 @@ COMMENT
ARG_OR_CHARSET ARG_OR_CHARSET
options {k=1;} options {k=1;}
: {isLexerRule}?=> LEXER_CHAR_SET {$type=LEXER_CHAR_SET;} : {isLexerRule}?=> LEXER_CHAR_SET {$type=LEXER_CHAR_SET;}
| {!isLexerRule}?=> ARG_ACTION {$type=ARG_ACTION;} | {!isLexerRule}?=> ARG_ACTION
{
$type=ARG_ACTION;
// Set the token text to our gathered string minus outer [ ]
String t = $text;
t = t.substring(1,t.length()-1);
setText(t);
}
; ;
fragment fragment
@ -261,57 +268,18 @@ LEXER_CHAR_SET
// //
fragment fragment
ARG_ACTION ARG_ACTION
@init : '['
{
StringBuffer theText = new StringBuffer();
}
: '['
( (
('\\')=>'\\' ARG_ACTION
(
(']')=>']'
{
// We do not include the \ character itself when picking up an escaped ]
//
theText.append(']');
}
| c=.
{
// We DO include the \ character when finding any other escape
//
theText.append('\\');
theText.append((char)$c);
}
)
| ('"')=>as=ACTION_STRING_LITERAL | ('"')=>ACTION_STRING_LITERAL
{
// Append the embedded string literal test
//
theText.append($as.text);
}
| ('\'')=>ac=ACTION_CHAR_LITERAL | ('\'')=>ACTION_CHAR_LITERAL
{
// Append the embedded chracter literal text
//
theText.append($ac.text);
}
| c=~']' | ~('['|']')
{
// Whatever else we found in the scan
//
theText.append((char)$c);
}
)* )*
']' ']'
{
// Set the token text to our gathered string
//
setText(theText.toString());
}
; ;
// ------- // -------

View File

@ -29,19 +29,19 @@
package org.antlr.v4.parse; package org.antlr.v4.parse;
import org.antlr.v4.tool.*; import org.antlr.v4.tool.Attribute;
import org.antlr.v4.tool.AttributeDict;
import org.antlr.v4.tool.ErrorManager;
import org.antlr.v4.tool.ErrorType;
import java.util.*; import java.util.ArrayList;
import java.util.List;
/** Parse args, return values, and dynamic scopes. /** Parse args, return values, locals
* *
* rule[arg1, arg2, ..., argN] returns [ret1, ..., retN] * rule[arg1, arg2, ..., argN] returns [ret1, ..., retN]
* scope { decl1; decl2; ... declN; }
* *
* The ',' and ';' are significant. Use \, and \; to use within * text is target language dependent. Java/C#/C/C++ would
* types if necessary like [Map<String\,String> foo, int y].
*
* arg, ret, and decl are target language dependent. Java/C#/C/C++ would
* use "int i" but ruby/python would use "i". * use "int i" but ruby/python would use "i".
*/ */
public class ScopeParser { public class ScopeParser {
@ -58,32 +58,15 @@ public class ScopeParser {
public static AttributeDict parseTypedArgList(String s, ErrorManager errMgr) { return parse(s, ',', errMgr); } public static AttributeDict parseTypedArgList(String s, ErrorManager errMgr) { return parse(s, ',', errMgr); }
public static AttributeDict parse(String s, char separator, ErrorManager errMgr) { public static AttributeDict parse(String s, char separator, ErrorManager errMgr) {
int i = 0;
int n = s.length();
AttributeDict dict = new AttributeDict(); AttributeDict dict = new AttributeDict();
while ( i<n ) { List<String> decls = splitDecls(s, separator);
StringBuilder buf = new StringBuilder(); for (String decl : decls) {
while ( i<n && s.charAt(i)!=separator ) { // System.out.println("decl="+decl);
if ( s.charAt(i)=='\\' ) { if ( decl.trim().length()>0 ) {
i++; Attribute a = parseAttributeDef(decl, errMgr);
if ( i<n && s.charAt(i)==separator ) {
buf.append(s.charAt(i));
i++;
continue;
}
buf.append('\\');
}
buf.append(s.charAt(i));
i++;
}
i++; // skip separator
String def = buf.toString();
//System.out.println("def="+ def);
if ( def.trim().length()>0 ) {
Attribute a = parseAttributeDef(def, errMgr);
dict.add(a); dict.add(a);
} }
} }
return dict; return dict;
} }
@ -163,13 +146,12 @@ public class ScopeParser {
* convert to a list of attributes. Allow nested square brackets etc... * convert to a list of attributes. Allow nested square brackets etc...
* Set separatorChar to ';' or ',' or whatever you want. * Set separatorChar to ';' or ',' or whatever you want.
*/ */
public static List<String> splitArgumentList(String s, int separatorChar) { public static List<String> splitDecls(String s, int separatorChar) {
List<String> args = new ArrayList<String>(); List<String> args = new ArrayList<String>();
_splitArgumentList(s, 0, -1, separatorChar, args); _splitArgumentList(s, 0, -1, separatorChar, args);
return args; return args;
} }
public static int _splitArgumentList(String actionText, public static int _splitArgumentList(String actionText,
int start, int start,
int targetChar, int targetChar,

View File

@ -32,10 +32,13 @@ package org.antlr.v4.semantics;
import org.antlr.v4.analysis.LeftRecursiveRuleTransformer; import org.antlr.v4.analysis.LeftRecursiveRuleTransformer;
import org.antlr.v4.parse.ANTLRParser; import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.Token;
import org.antlr.v4.tool.*; import org.antlr.v4.tool.ErrorType;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.Rule;
import org.antlr.v4.tool.ast.GrammarAST; import org.antlr.v4.tool.ast.GrammarAST;
import java.util.*; import java.util.List;
import java.util.Map;
/** Do as much semantic checking as we can and fill in grammar /** Do as much semantic checking as we can and fill in grammar
* with rules, actions, and token definitions. * with rules, actions, and token definitions.
@ -180,16 +183,6 @@ public class SemanticPipeline {
} }
} }
// DEFINE TOKEN TYPES FOR X : 'x' ; RULES
/* done by previous import
Map<String,String> litAliases = Grammar.getStringLiteralAliasesFromLexerRules(g.ast);
if ( litAliases!=null ) {
for (String lit : litAliases.keySet()) {
G.defineTokenAlias(litAliases.get(lit), lit);
}
}
*/
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT // DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
for (GrammarAST idAST : tokenIDs) { for (GrammarAST idAST : tokenIDs) {
if (g.getTokenType(idAST.getText()) == Token.INVALID_TYPE) { if (g.getTokenType(idAST.getText()) == Token.INVALID_TYPE) {

View File

@ -59,7 +59,6 @@ public class AttributeDict {
add(new Attribute("index")); add(new Attribute("index"));
add(new Attribute("pos")); add(new Attribute("pos"));
add(new Attribute("channel")); add(new Attribute("channel"));
add(new Attribute("tree"));
add(new Attribute("int")); add(new Attribute("int"));
}}; }};

View File

@ -51,13 +51,11 @@ import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable; import org.antlr.v4.runtime.misc.Nullable;
import org.antlr.v4.tool.ast.ActionAST; import org.antlr.v4.tool.ast.ActionAST;
import org.antlr.v4.tool.ast.GrammarAST; import org.antlr.v4.tool.ast.GrammarAST;
import org.antlr.v4.tool.ast.GrammarASTErrorNode;
import org.antlr.v4.tool.ast.GrammarASTWithOptions; import org.antlr.v4.tool.ast.GrammarASTWithOptions;
import org.antlr.v4.tool.ast.GrammarRootAST; import org.antlr.v4.tool.ast.GrammarRootAST;
import org.antlr.v4.tool.ast.PredAST; import org.antlr.v4.tool.ast.PredAST;
import org.antlr.v4.tool.ast.TerminalAST; import org.antlr.v4.tool.ast.TerminalAST;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
@ -450,7 +448,7 @@ public class Grammar implements AttributeResolver {
tokenName = "EOF"; tokenName = "EOF";
} }
else { else {
if ( ttype<typeToTokenList.size() ) { if ( ttype>0 && ttype<typeToTokenList.size() ) {
tokenName = typeToTokenList.get(ttype); tokenName = typeToTokenList.get(ttype);
if ( tokenName!=null && if ( tokenName!=null &&
tokenName.startsWith(AUTO_GENERATED_TOKEN_NAME_PREFIX) && tokenName.startsWith(AUTO_GENERATED_TOKEN_NAME_PREFIX) &&
@ -769,6 +767,16 @@ public class Grammar implements AttributeResolver {
// try with action in there // try with action in there
isLitRule = isLitRule =
wiz.parse(r, "(RULE %name:TOKEN_REF (BLOCK (ALT %lit:STRING_LITERAL ACTION)))", nodes); wiz.parse(r, "(RULE %name:TOKEN_REF (BLOCK (ALT %lit:STRING_LITERAL ACTION)))", nodes);
if ( isLitRule ) {
GrammarAST litNode = (GrammarAST)nodes.get("lit");
GrammarAST nameNode = (GrammarAST)nodes.get("name");
lexerRuleToStringLiteral.put(litNode.getText(), nameNode.getText());
continue;
}
nodes = new HashMap();
// try with pred in there
isLitRule =
wiz.parse(r, "(RULE %name:TOKEN_REF (BLOCK (ALT %lit:STRING_LITERAL SEMPRED)))", nodes);
if ( isLitRule ) { if ( isLitRule ) {
GrammarAST litNode = (GrammarAST)nodes.get("lit"); GrammarAST litNode = (GrammarAST)nodes.get("lit");
GrammarAST nameNode = (GrammarAST)nodes.get("name"); GrammarAST nameNode = (GrammarAST)nodes.get("name");

View File

@ -38,9 +38,20 @@ import org.antlr.v4.misc.DoubleKeyMap;
import org.antlr.v4.parse.ANTLRParser; import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.parse.BlockSetTransformer; import org.antlr.v4.parse.BlockSetTransformer;
import org.antlr.v4.parse.GrammarASTAdaptor; import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.tool.ast.*; import org.antlr.v4.tool.ast.AltAST;
import org.antlr.v4.tool.ast.BlockAST;
import org.antlr.v4.tool.ast.GrammarAST;
import org.antlr.v4.tool.ast.GrammarASTWithOptions;
import org.antlr.v4.tool.ast.GrammarRootAST;
import org.antlr.v4.tool.ast.RuleAST;
import org.antlr.v4.tool.ast.TerminalAST;
import java.util.*; import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/** Handle left-recursion and block-set transforms */ /** Handle left-recursion and block-set transforms */
public class GrammarTransformPipeline { public class GrammarTransformPipeline {
@ -257,7 +268,7 @@ public class GrammarTransformPipeline {
* *
* Move rules and actions to new tree, don't dup. Split AST apart. * Move rules and actions to new tree, don't dup. Split AST apart.
* We'll have this Grammar share token symbols later; don't generate * We'll have this Grammar share token symbols later; don't generate
* tokenVocab or tokens{} section. * tokenVocab or tokens{} section. Copy over named actions.
* *
* Side-effects: it removes children from GRAMMAR & RULES nodes * Side-effects: it removes children from GRAMMAR & RULES nodes
* in combined AST. Anything cut out is dup'd before * in combined AST. Anything cut out is dup'd before
@ -277,7 +288,7 @@ public class GrammarTransformPipeline {
lexerAST.token.setInputStream(combinedAST.token.getInputStream()); lexerAST.token.setInputStream(combinedAST.token.getInputStream());
lexerAST.addChild((GrammarAST)adaptor.create(ANTLRParser.ID, lexerName)); lexerAST.addChild((GrammarAST)adaptor.create(ANTLRParser.ID, lexerName));
// MOVE OPTIONS // COPY OPTIONS
GrammarAST optionsRoot = GrammarAST optionsRoot =
(GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.OPTIONS); (GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.OPTIONS);
if ( optionsRoot!=null ) { if ( optionsRoot!=null ) {
@ -292,12 +303,12 @@ public class GrammarTransformPipeline {
} }
} }
// MOVE lexer:: actions // COPY all named actions, but only move those with lexer:: scope
List<GrammarAST> actionsWeMoved = new ArrayList<GrammarAST>(); List<GrammarAST> actionsWeMoved = new ArrayList<GrammarAST>();
for (GrammarAST e : elements) { for (GrammarAST e : elements) {
if ( e.getType()==ANTLRParser.AT ) { if ( e.getType()==ANTLRParser.AT ) {
lexerAST.addChild((Tree)adaptor.dupTree(e));
if ( e.getChild(0).getText().equals("lexer") ) { if ( e.getChild(0).getText().equals("lexer") ) {
lexerAST.addChild((Tree)adaptor.dupTree(e));
actionsWeMoved.add(e); actionsWeMoved.add(e);
} }
} }

View File

@ -55,8 +55,6 @@ public class Rule implements AttributeResolver {
add(new Attribute("text")); add(new Attribute("text"));
add(new Attribute("start")); add(new Attribute("start"));
add(new Attribute("stop")); add(new Attribute("stop"));
add(new Attribute("tree"));
add(new Attribute("st"));
add(new Attribute("ctx")); add(new Attribute("ctx"));
}}; }};
@ -64,13 +62,8 @@ public class Rule implements AttributeResolver {
new AttributeDict(AttributeDict.DictType.PREDEFINED_LEXER_RULE) {{ new AttributeDict(AttributeDict.DictType.PREDEFINED_LEXER_RULE) {{
add(new Attribute("text")); add(new Attribute("text"));
add(new Attribute("type")); add(new Attribute("type"));
add(new Attribute("line")); add(new Attribute("channel"));
add(new Attribute("index")); add(new Attribute("mode"));
add(new Attribute("pos"));
add(new Attribute("channel"));
add(new Attribute("start"));
add(new Attribute("stop"));
add(new Attribute("int"));
}}; }};
public static Set<String> validLexerCommands = new HashSet<String>() {{ public static Set<String> validLexerCommands = new HashSet<String>() {{

View File

@ -80,7 +80,7 @@ public class TestATNInterpreter extends BaseTest {
errorTokenType = re.getOffendingToken().getType(); errorTokenType = re.getOffendingToken().getType();
} }
assertEquals(1, errorIndex); assertEquals(1, errorIndex);
assertEquals(errorTokenType, 5); assertEquals(3, errorTokenType);
} }
@Test public void testMustTrackPreviousGoodAlt2() throws Exception { @Test public void testMustTrackPreviousGoodAlt2() throws Exception {
@ -106,7 +106,7 @@ public class TestATNInterpreter extends BaseTest {
errorTokenType = re.getOffendingToken().getType(); errorTokenType = re.getOffendingToken().getType();
} }
assertEquals(2, errorIndex); assertEquals(2, errorIndex);
assertEquals(errorTokenType, 6); assertEquals(4, errorTokenType);
} }
@Test public void testMustTrackPreviousGoodAlt3() throws Exception { @Test public void testMustTrackPreviousGoodAlt3() throws Exception {
@ -129,7 +129,7 @@ public class TestATNInterpreter extends BaseTest {
errorTokenType = re.getOffendingToken().getType(); errorTokenType = re.getOffendingToken().getType();
} }
assertEquals(2, errorIndex); assertEquals(2, errorIndex);
assertEquals(errorTokenType, 6); assertEquals(4, errorTokenType);
} }
@Test public void testAmbigAltChooseFirst() throws Exception { @Test public void testAmbigAltChooseFirst() throws Exception {
@ -198,7 +198,7 @@ public class TestATNInterpreter extends BaseTest {
errorTokenType = re.getOffendingToken().getType(); errorTokenType = re.getOffendingToken().getType();
} }
assertEquals(2, errorIndex); assertEquals(2, errorIndex);
assertEquals(6, errorTokenType); assertEquals(4, errorTokenType);
checkMatchedAlt(lg, g, "abcd", 3); // ignores d on end checkMatchedAlt(lg, g, "abcd", 3); // ignores d on end
} }

View File

@ -42,7 +42,7 @@ public class TestATNSerialization extends BaseTest {
"parser grammar T;\n"+ "parser grammar T;\n"+
"a : A B ;"); "a : A B ;");
String expecting = String expecting =
"max type 4\n" + "max type 2\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:BASIC 0\n" + "2:BASIC 0\n" +
@ -52,8 +52,8 @@ public class TestATNSerialization extends BaseTest {
"rule 0:0\n" + "rule 0:0\n" +
"0->2 EPSILON 0,0,0\n" + "0->2 EPSILON 0,0,0\n" +
"1->6 ATOM -1,0,0\n" + "1->6 ATOM -1,0,0\n" +
"2->4 ATOM 3,0,0\n" + "2->4 ATOM 1,0,0\n" +
"4->5 ATOM 4,0,0\n" + "4->5 ATOM 2,0,0\n" +
"5->1 EPSILON 0,0,0\n"; "5->1 EPSILON 0,0,0\n";
ATN atn = createATN(g); ATN atn = createATN(g);
String result = ATNSerializer.getDecoded(g, atn); String result = ATNSerializer.getDecoded(g, atn);
@ -65,7 +65,7 @@ public class TestATNSerialization extends BaseTest {
"parser grammar T;\n"+ "parser grammar T;\n"+
"a : A EOF ;"); "a : A EOF ;");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:BASIC 0\n" + "2:BASIC 0\n" +
@ -75,7 +75,7 @@ public class TestATNSerialization extends BaseTest {
"rule 0:0\n" + "rule 0:0\n" +
"0->2 EPSILON 0,0,0\n" + "0->2 EPSILON 0,0,0\n" +
"1->6 ATOM -1,0,0\n" + "1->6 ATOM -1,0,0\n" +
"2->4 ATOM 3,0,0\n" + "2->4 ATOM 1,0,0\n" +
"4->5 ATOM -1,0,0\n" + "4->5 ATOM -1,0,0\n" +
"5->1 EPSILON 0,0,0\n"; "5->1 EPSILON 0,0,0\n";
ATN atn = createATN(g); ATN atn = createATN(g);
@ -88,7 +88,7 @@ public class TestATNSerialization extends BaseTest {
"parser grammar T;\n"+ "parser grammar T;\n"+
"a : (A|EOF) ;"); "a : (A|EOF) ;");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:BASIC 0\n" + "2:BASIC 0\n" +
@ -111,7 +111,7 @@ public class TestATNSerialization extends BaseTest {
"tokens {A; B; C;}\n" + "tokens {A; B; C;}\n" +
"a : ~A ;"); "a : ~A ;");
String expecting = String expecting =
"max type 5\n" + "max type 3\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:BASIC 0\n" + "2:BASIC 0\n" +
@ -136,7 +136,7 @@ public class TestATNSerialization extends BaseTest {
"tokens {A; B; C;}\n" + "tokens {A; B; C;}\n" +
"a : . ;"); "a : . ;");
String expecting = String expecting =
"max type 5\n" + "max type 3\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:BASIC 0\n" + "2:BASIC 0\n" +
@ -157,7 +157,7 @@ public class TestATNSerialization extends BaseTest {
"parser grammar T;\n"+ "parser grammar T;\n"+
"a : A | A B ;"); "a : A | A B ;");
String expecting = String expecting =
"max type 4\n" + "max type 2\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:BASIC 0\n" + "2:BASIC 0\n" +
@ -169,9 +169,9 @@ public class TestATNSerialization extends BaseTest {
"rule 0:0\n" + "rule 0:0\n" +
"0->8 EPSILON 0,0,0\n" + "0->8 EPSILON 0,0,0\n" +
"1->10 ATOM -1,0,0\n" + "1->10 ATOM -1,0,0\n" +
"2->9 ATOM 3,0,0\n" + "2->9 ATOM 1,0,0\n" +
"4->6 ATOM 3,0,0\n" + "4->6 ATOM 1,0,0\n" +
"6->9 ATOM 4,0,0\n" + "6->9 ATOM 2,0,0\n" +
"8->2 EPSILON 0,0,0\n" + "8->2 EPSILON 0,0,0\n" +
"8->4 EPSILON 0,0,0\n" + "8->4 EPSILON 0,0,0\n" +
"9->1 EPSILON 0,0,0\n" + "9->1 EPSILON 0,0,0\n" +
@ -186,7 +186,7 @@ public class TestATNSerialization extends BaseTest {
"parser grammar T;\n"+ "parser grammar T;\n"+
"a : A | A B | A B C ;"); "a : A | A B | A B C ;");
String expecting = String expecting =
"max type 5\n" + "max type 3\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:BASIC 0\n" + "2:BASIC 0\n" +
@ -201,12 +201,12 @@ public class TestATNSerialization extends BaseTest {
"rule 0:0\n" + "rule 0:0\n" +
"0->14 EPSILON 0,0,0\n" + "0->14 EPSILON 0,0,0\n" +
"1->16 ATOM -1,0,0\n" + "1->16 ATOM -1,0,0\n" +
"2->15 ATOM 3,0,0\n" + "2->15 ATOM 1,0,0\n" +
"4->6 ATOM 3,0,0\n" + "4->6 ATOM 1,0,0\n" +
"6->15 ATOM 4,0,0\n" + "6->15 ATOM 2,0,0\n" +
"8->10 ATOM 3,0,0\n" + "8->10 ATOM 1,0,0\n" +
"10->12 ATOM 4,0,0\n" + "10->12 ATOM 2,0,0\n" +
"12->15 ATOM 5,0,0\n" + "12->15 ATOM 3,0,0\n" +
"14->2 EPSILON 0,0,0\n" + "14->2 EPSILON 0,0,0\n" +
"14->4 EPSILON 0,0,0\n" + "14->4 EPSILON 0,0,0\n" +
"14->8 EPSILON 0,0,0\n" + "14->8 EPSILON 0,0,0\n" +
@ -222,7 +222,7 @@ public class TestATNSerialization extends BaseTest {
"parser grammar T;\n"+ "parser grammar T;\n"+
"a : A+ B ;"); "a : A+ B ;");
String expecting = String expecting =
"max type 4\n" + "max type 2\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:BASIC 0\n" + "2:BASIC 0\n" +
@ -236,13 +236,13 @@ public class TestATNSerialization extends BaseTest {
"rule 0:0\n" + "rule 0:0\n" +
"0->4 EPSILON 0,0,0\n" + "0->4 EPSILON 0,0,0\n" +
"1->10 ATOM -1,0,0\n" + "1->10 ATOM -1,0,0\n" +
"2->5 ATOM 3,0,0\n" + "2->5 ATOM 1,0,0\n" +
"4->2 EPSILON 0,0,0\n" + "4->2 EPSILON 0,0,0\n" +
"5->6 EPSILON 0,0,0\n" + "5->6 EPSILON 0,0,0\n" +
"6->4 EPSILON 0,0,0\n" + "6->4 EPSILON 0,0,0\n" +
"6->7 EPSILON 0,0,0\n" + "6->7 EPSILON 0,0,0\n" +
"7->8 EPSILON 0,0,0\n" + "7->8 EPSILON 0,0,0\n" +
"8->9 ATOM 4,0,0\n" + "8->9 ATOM 2,0,0\n" +
"9->1 EPSILON 0,0,0\n" + "9->1 EPSILON 0,0,0\n" +
"0:6 1\n"; "0:6 1\n";
ATN atn = createATN(g); ATN atn = createATN(g);
@ -256,7 +256,7 @@ public class TestATNSerialization extends BaseTest {
"a : e ;\n" + "a : e ;\n" +
"e : E ;\n"); "e : E ;\n");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:RULE_START 0\n" + "0:RULE_START 0\n" +
"1:RULE_STOP 0\n" + "1:RULE_STOP 0\n" +
"2:RULE_START 1\n" + "2:RULE_START 1\n" +
@ -274,7 +274,7 @@ public class TestATNSerialization extends BaseTest {
"3->5 EPSILON 0,0,0\n" + "3->5 EPSILON 0,0,0\n" +
"4->5 RULE 2,1,0\n" + "4->5 RULE 2,1,0\n" +
"5->1 EPSILON 0,0,0\n" + "5->1 EPSILON 0,0,0\n" +
"6->7 ATOM 3,0,0\n" + "6->7 ATOM 1,0,0\n" +
"7->3 EPSILON 0,0,0\n"; "7->3 EPSILON 0,0,0\n";
ATN atn = createATN(g); ATN atn = createATN(g);
String result = ATNSerializer.getDecoded(g, atn); String result = ATNSerializer.getDecoded(g, atn);
@ -287,7 +287,7 @@ public class TestATNSerialization extends BaseTest {
"A : 'a' ;\n" + "A : 'a' ;\n" +
"B : 'b' ;\n"); "B : 'b' ;\n");
String expecting = String expecting =
"max type 4\n" + "max type 2\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
@ -297,8 +297,8 @@ public class TestATNSerialization extends BaseTest {
"6:BASIC 0\n" + "6:BASIC 0\n" +
"7:BASIC 1\n" + "7:BASIC 1\n" +
"8:BASIC 1\n" + "8:BASIC 1\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"rule 1:3 4,-1\n" + "rule 1:3 2,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
"0->3 EPSILON 0,0,0\n" + "0->3 EPSILON 0,0,0\n" +
@ -319,13 +319,13 @@ public class TestATNSerialization extends BaseTest {
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"INT : '0'..'9' ;\n"); "INT : '0'..'9' ;\n");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
"3:BASIC 0\n" + "3:BASIC 0\n" +
"4:BASIC 0\n" + "4:BASIC 0\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
"1->3 EPSILON 0,0,0\n" + "1->3 EPSILON 0,0,0\n" +
@ -342,14 +342,14 @@ public class TestATNSerialization extends BaseTest {
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"INT : 'a' EOF ;\n"); "INT : 'a' EOF ;\n");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
"3:BASIC 0\n" + "3:BASIC 0\n" +
"5:BASIC 0\n" + "5:BASIC 0\n" +
"6:BASIC 0\n" + "6:BASIC 0\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
"1->3 EPSILON 0,0,0\n" + "1->3 EPSILON 0,0,0\n" +
@ -367,7 +367,7 @@ public class TestATNSerialization extends BaseTest {
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"INT : 'a' (EOF|'\n') ;\n"); "INT : 'a' (EOF|'\n') ;\n");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
@ -376,7 +376,7 @@ public class TestATNSerialization extends BaseTest {
"7:BASIC 0\n" + "7:BASIC 0\n" +
"9:BLOCK_START 0\n" + "9:BLOCK_START 0\n" +
"10:BLOCK_END 0\n" + "10:BLOCK_END 0\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
"1->3 EPSILON 0,0,0\n" + "1->3 EPSILON 0,0,0\n" +
@ -398,7 +398,7 @@ public class TestATNSerialization extends BaseTest {
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"INT : '0'..'9'+ ;\n"); "INT : '0'..'9'+ ;\n");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
@ -407,7 +407,7 @@ public class TestATNSerialization extends BaseTest {
"6:BLOCK_END 0\n" + "6:BLOCK_END 0\n" +
"7:PLUS_LOOP_BACK 0\n" + "7:PLUS_LOOP_BACK 0\n" +
"8:LOOP_END 0 7\n" + "8:LOOP_END 0 7\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
"1->5 EPSILON 0,0,0\n" + "1->5 EPSILON 0,0,0\n" +
@ -431,7 +431,7 @@ public class TestATNSerialization extends BaseTest {
"B : 'b' ;\n" + "B : 'b' ;\n" +
"C : 'c' {c} ;\n"); "C : 'c' {c} ;\n");
String expecting = String expecting =
"max type 5\n" + "max type 3\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
@ -447,9 +447,9 @@ public class TestATNSerialization extends BaseTest {
"13:BASIC 2\n" + "13:BASIC 2\n" +
"15:BASIC 2\n" + "15:BASIC 2\n" +
"16:BASIC 2\n" + "16:BASIC 2\n" +
"rule 0:1 3,0\n" + "rule 0:1 1,0\n" +
"rule 1:3 4,-1\n" + "rule 1:3 2,-1\n" +
"rule 2:5 5,1\n" + "rule 2:5 3,1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
"0->3 EPSILON 0,0,0\n" + "0->3 EPSILON 0,0,0\n" +
@ -476,13 +476,13 @@ public class TestATNSerialization extends BaseTest {
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"ID : ~('a'|'b')\n ;"); "ID : ~('a'|'b')\n ;");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
"3:BASIC 0\n" + "3:BASIC 0\n" +
"4:BASIC 0\n" + "4:BASIC 0\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0:'a'..'b'\n" + "0:'a'..'b'\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
@ -500,13 +500,13 @@ public class TestATNSerialization extends BaseTest {
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"ID : ('a'|'b'|'e'|'p'..'t')\n ;"); "ID : ('a'|'b'|'e'|'p'..'t')\n ;");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
"3:BASIC 0\n" + "3:BASIC 0\n" +
"4:BASIC 0\n" + "4:BASIC 0\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0:'a'..'b', 'e'..'e', 'p'..'t'\n" + "0:'a'..'b', 'e'..'e', 'p'..'t'\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
@ -524,13 +524,13 @@ public class TestATNSerialization extends BaseTest {
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"ID : ~('a'|'b'|'e'|'p'..'t')\n ;"); "ID : ~('a'|'b'|'e'|'p'..'t')\n ;");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
"3:BASIC 0\n" + "3:BASIC 0\n" +
"4:BASIC 0\n" + "4:BASIC 0\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0:'a'..'b', 'e'..'e', 'p'..'t'\n" + "0:'a'..'b', 'e'..'e', 'p'..'t'\n" +
"0->1 EPSILON 0,0,0\n" + "0->1 EPSILON 0,0,0\n" +
@ -551,7 +551,7 @@ public class TestATNSerialization extends BaseTest {
"COMMENT : '*/' {skip(); popMode();} ;\n" + "COMMENT : '*/' {skip(); popMode();} ;\n" +
"JUNK : . {more();} ;\n"); "JUNK : . {more();} ;\n");
String expecting = String expecting =
"max type 5\n" + "max type 3\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:TOKEN_START -1\n" + "1:TOKEN_START -1\n" +
"2:RULE_START 0\n" + "2:RULE_START 0\n" +
@ -573,9 +573,9 @@ public class TestATNSerialization extends BaseTest {
"19:BASIC 2\n" + "19:BASIC 2\n" +
"21:BASIC 2\n" + "21:BASIC 2\n" +
"22:BASIC 2\n" + "22:BASIC 2\n" +
"rule 0:2 3,-1\n" + "rule 0:2 1,-1\n" +
"rule 1:4 4,0\n" + "rule 1:4 2,0\n" +
"rule 2:6 5,1\n" + "rule 2:6 3,1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"mode 1:1\n" + "mode 1:1\n" +
"0->2 EPSILON 0,0,0\n" + "0->2 EPSILON 0,0,0\n" +
@ -611,14 +611,14 @@ public class TestATNSerialization extends BaseTest {
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"ID : ~('a'|'b') ~('e'|'p'..'t')\n ;"); "ID : ~('a'|'b') ~('e'|'p'..'t')\n ;");
String expecting = String expecting =
"max type 3\n" + "max type 1\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:RULE_START 0\n" + "1:RULE_START 0\n" +
"2:RULE_STOP 0\n" + "2:RULE_STOP 0\n" +
"3:BASIC 0\n" + "3:BASIC 0\n" +
"5:BASIC 0\n" + "5:BASIC 0\n" +
"6:BASIC 0\n" + "6:BASIC 0\n" +
"rule 0:1 3,-1\n" + "rule 0:1 1,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"0:'a'..'b'\n" + "0:'a'..'b'\n" +
"1:'e'..'e', 'p'..'t'\n" + "1:'e'..'e', 'p'..'t'\n" +
@ -642,7 +642,7 @@ public class TestATNSerialization extends BaseTest {
"C : 'c';\n"+ "C : 'c';\n"+
"D : 'd';\n"); "D : 'd';\n");
String expecting = String expecting =
"max type 6\n" + "max type 4\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:TOKEN_START -1\n" + "1:TOKEN_START -1\n" +
"2:RULE_START 0\n" + "2:RULE_START 0\n" +
@ -661,10 +661,10 @@ public class TestATNSerialization extends BaseTest {
"15:BASIC 2\n" + "15:BASIC 2\n" +
"16:BASIC 3\n" + "16:BASIC 3\n" +
"17:BASIC 3\n" + "17:BASIC 3\n" +
"rule 0:2 3,-1\n" + "rule 0:2 1,-1\n" +
"rule 1:4 4,-1\n" + "rule 1:4 2,-1\n" +
"rule 2:6 5,-1\n" + "rule 2:6 3,-1\n" +
"rule 3:8 6,-1\n" + "rule 3:8 4,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"mode 1:1\n" + "mode 1:1\n" +
"0->2 EPSILON 0,0,0\n" + "0->2 EPSILON 0,0,0\n" +
@ -699,7 +699,7 @@ public class TestATNSerialization extends BaseTest {
"mode M2;\n" + "mode M2;\n" +
"C : 'c';\n"); "C : 'c';\n");
String expecting = String expecting =
"max type 5\n" + "max type 3\n" +
"0:TOKEN_START -1\n" + "0:TOKEN_START -1\n" +
"1:TOKEN_START -1\n" + "1:TOKEN_START -1\n" +
"2:TOKEN_START -1\n" + "2:TOKEN_START -1\n" +
@ -715,9 +715,9 @@ public class TestATNSerialization extends BaseTest {
"12:BASIC 1\n" + "12:BASIC 1\n" +
"13:BASIC 2\n" + "13:BASIC 2\n" +
"14:BASIC 2\n" + "14:BASIC 2\n" +
"rule 0:3 3,-1\n" + "rule 0:3 1,-1\n" +
"rule 1:5 4,-1\n" + "rule 1:5 2,-1\n" +
"rule 2:7 5,-1\n" + "rule 2:7 3,-1\n" +
"mode 0:0\n" + "mode 0:0\n" +
"mode 1:1\n" + "mode 1:1\n" +
"mode 2:2\n" + "mode 2:2\n" +

View File

@ -125,8 +125,7 @@ public class TestActionTranslation extends BaseTest {
@Test public void testRefToTextAttributeForCurrentRule() throws Exception { @Test public void testRefToTextAttributeForCurrentRule() throws Exception {
String action = "$a.text; $text"; String action = "$a.text; $text";
String expected = String expected =
"(_localctx.a!=null?_input.getText(_localctx.a.start,_localctx.a.stop):" + "_input.getText(_localctx.start, _input.LT(-1)); _input.getText(_localctx.start, _input.LT(-1))";
"null); _input.getText(_localctx.start, _input.LT(-1))";
testActions(attributeTemplate, "init", action, expected); testActions(attributeTemplate, "init", action, expected);
expected = expected =
"_input.getText(_localctx.start, _input.LT(-1)); _input.getText(_localctx.start, _input.LT(-1))"; "_input.getText(_localctx.start, _input.LT(-1)); _input.getText(_localctx.start, _input.LT(-1))";

View File

@ -114,7 +114,7 @@ public class TestCommonTokenStream extends BaseTest {
tokens.LT(i++); // push it past end tokens.LT(i++); // push it past end
tokens.LT(i++); tokens.LT(i++);
String result = tokens.toString(); String result = tokens.getText();
String expecting = "x = 3 * 0 + 2 * 0;"; String expecting = "x = 3 * 0 + 2 * 0;";
assertEquals(expecting, result); assertEquals(expecting, result);
} }
@ -146,7 +146,7 @@ public class TestCommonTokenStream extends BaseTest {
tokens.consume(); tokens.consume();
tokens.LT(1); tokens.LT(1);
String result = tokens.toString(); String result = tokens.getText();
String expecting = "x = 3 * 0 + 2 * 0;"; String expecting = "x = 3 * 0 + 2 * 0;";
assertEquals(expecting, result); assertEquals(expecting, result);
} }

View File

@ -234,8 +234,8 @@ public class TestCompositeGrammars extends BaseTest {
writeFile(tmpdir, "M.g4", master); writeFile(tmpdir, "M.g4", master);
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
String expectedTokenIDToTypeMap = "{EOF=-1, B=3, A=4, C=5, WS=6}"; String expectedTokenIDToTypeMap = "{EOF=-1, B=1, A=2, C=3, WS=4}";
String expectedStringLiteralToTypeMap = "{'c'=5, 'a'=4, 'b'=3}"; String expectedStringLiteralToTypeMap = "{'c'=3, 'a'=2, 'b'=1}";
String expectedTypeToTokenList = "[B, A, C, WS]"; String expectedTypeToTokenList = "[B, A, C, WS]";
assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString()); assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString());
@ -369,14 +369,14 @@ public class TestCompositeGrammars extends BaseTest {
String slave = String slave =
"parser grammar S;\n" + "parser grammar S;\n" +
"a : b {System.out.println(\"S.a\");} ;\n" + "a : b {System.out.println(\"S.a\");} ;\n" +
"b : B ;\n" ; "b : 'b' ;\n" ;
mkdir(tmpdir); mkdir(tmpdir);
writeFile(tmpdir, "S.g4", slave); writeFile(tmpdir, "S.g4", slave);
String slave2 = String slave2 =
"parser grammar T;\n" + "parser grammar T;\n" +
"tokens { A='x'; }\n" + "tokens { A='x'; }\n" +
"b : B {System.out.println(\"T.b\");} ;\n"; "b : 'b' {System.out.println(\"T.b\");} ;\n";
writeFile(tmpdir, "T.g4", slave2); writeFile(tmpdir, "T.g4", slave2);
String master = String master =
@ -405,9 +405,9 @@ public class TestCompositeGrammars extends BaseTest {
"WS : (' '|'\\n') {skip();} ;\n" ; "WS : (' '|'\\n') {skip();} ;\n" ;
String expecting = String expecting =
"S.A\n" + "S.A\n" +
"[@0,0:0='a',<5>,1:0]\n" + "[@0,0:0='a',<3>,1:0]\n" +
"[@1,1:1='b',<3>,1:1]\n" + "[@1,1:1='b',<1>,1:1]\n" +
"[@2,2:2='c',<6>,1:2]\n" + "[@2,2:2='c',<4>,1:2]\n" +
"[@3,3:2='<EOF>',<-1>,1:3]\n"; "[@3,3:2='<EOF>',<-1>,1:3]\n";
String found = execLexer("M.g4", master, "M", "abc", debug); String found = execLexer("M.g4", master, "M", "abc", debug);
assertEquals(expecting, found); assertEquals(expecting, found);
@ -427,7 +427,7 @@ public class TestCompositeGrammars extends BaseTest {
"WS : (' '|'\\n') {skip();} ;\n" ; "WS : (' '|'\\n') {skip();} ;\n" ;
String found = execLexer("M.g4", master, "M", "ab", debug); String found = execLexer("M.g4", master, "M", "ab", debug);
assertEquals("M.A\n" + assertEquals("M.A\n" +
"[@0,0:1='ab',<3>,1:0]\n" + "[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:1='<EOF>',<-1>,1:2]\n", found); "[@1,2:1='<EOF>',<-1>,1:2]\n", found);
} }
@ -454,7 +454,7 @@ public class TestCompositeGrammars extends BaseTest {
assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size()); assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size());
assertEquals("M.A\n" + assertEquals("M.A\n" +
"M.a: [@0,0:2='abc',<3>,1:0]\n", found); "M.a: [@0,0:2='abc',<1>,1:0]\n", found);
} }
// Make sure that M can import S that imports T. // Make sure that M can import S that imports T.
@ -479,7 +479,7 @@ public class TestCompositeGrammars extends BaseTest {
writeFile(tmpdir, "M.g4", master); writeFile(tmpdir, "M.g4", master);
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
String expectedTokenIDToTypeMap = "{EOF=-1, M=3}"; // S and T aren't imported; overridden String expectedTokenIDToTypeMap = "{EOF=-1, M=1}"; // S and T aren't imported; overridden
String expectedStringLiteralToTypeMap = "{}"; String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[M]"; String expectedTypeToTokenList = "[M]";
@ -543,7 +543,7 @@ public class TestCompositeGrammars extends BaseTest {
assertEquals("[]", equeue.errors.toString()); assertEquals("[]", equeue.errors.toString());
assertEquals("[]", equeue.warnings.toString()); assertEquals("[]", equeue.warnings.toString());
String expectedTokenIDToTypeMap = "{EOF=-1, M=3, S=4, T=5, A=6, B=7, C=8}"; String expectedTokenIDToTypeMap = "{EOF=-1, M=1, S=2, T=3, A=4, B=5, C=6}";
String expectedStringLiteralToTypeMap = "{}"; String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[M, S, T, A, B, C]"; String expectedTypeToTokenList = "[M, S, T, A, B, C]";
@ -580,7 +580,7 @@ public class TestCompositeGrammars extends BaseTest {
writeFile(tmpdir, "M.g4", master); writeFile(tmpdir, "M.g4", master);
Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue);
String expectedTokenIDToTypeMap = "{EOF=-1, M=3, T=4}"; String expectedTokenIDToTypeMap = "{EOF=-1, M=1, T=2}";
String expectedStringLiteralToTypeMap = "{}"; String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[M, T]"; String expectedTypeToTokenList = "[M, T]";
@ -627,7 +627,7 @@ public class TestCompositeGrammars extends BaseTest {
Grammar g = new Grammar(tmpdir+"/G3.g4", G3str, equeue); Grammar g = new Grammar(tmpdir+"/G3.g4", G3str, equeue);
String expectedTokenIDToTypeMap = "{EOF=-1, T4=3, T3=4}"; String expectedTokenIDToTypeMap = "{EOF=-1, T4=1, T3=2}";
String expectedStringLiteralToTypeMap = "{}"; String expectedStringLiteralToTypeMap = "{}";
String expectedTypeToTokenList = "[T4, T3]"; String expectedTypeToTokenList = "[T4, T3]";
@ -655,7 +655,6 @@ public class TestCompositeGrammars extends BaseTest {
"grammar M;\n" + "grammar M;\n" +
"import S;\n" + "import S;\n" +
"@header{package mypackage;}\n" + "@header{package mypackage;}\n" +
"@lexer::header{package mypackage;}\n" +
"s : a ;\n" + "s : a ;\n" +
"B : 'b' ;" + // defines B from inherited token space "B : 'b' ;" + // defines B from inherited token space
"WS : (' '|'\\n') {skip();} ;\n" ; "WS : (' '|'\\n') {skip();} ;\n" ;

View File

@ -52,7 +52,7 @@ public class TestLexerErrors extends BaseTest {
"A : 'a' 'b' ;\n"; "A : 'a' 'b' ;\n";
String tokens = execLexer("L.g4", grammar, "L", "abx"); String tokens = execLexer("L.g4", grammar, "L", "abx");
String expectingTokens = String expectingTokens =
"[@0,0:1='ab',<3>,1:0]\n" + "[@0,0:1='ab',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n"; "[@1,3:2='<EOF>',<-1>,1:3]\n";
assertEquals(expectingTokens, tokens); assertEquals(expectingTokens, tokens);
String expectingError = "line 1:2 token recognition error at: 'x'\n"; String expectingError = "line 1:2 token recognition error at: 'x'\n";
@ -79,7 +79,7 @@ public class TestLexerErrors extends BaseTest {
"A : 'a' 'b' ;\n"; "A : 'a' 'b' ;\n";
String tokens = execLexer("L.g4", grammar, "L", "abax"); String tokens = execLexer("L.g4", grammar, "L", "abax");
String expectingTokens = String expectingTokens =
"[@0,0:1='ab',<3>,1:0]\n" + "[@0,0:1='ab',<1>,1:0]\n" +
"[@1,4:3='<EOF>',<-1>,1:4]\n"; "[@1,4:3='<EOF>',<-1>,1:4]\n";
assertEquals(expectingTokens, tokens); assertEquals(expectingTokens, tokens);
String expectingError = "line 1:2 token recognition error at: 'ax'\n"; String expectingError = "line 1:2 token recognition error at: 'ax'\n";
@ -97,8 +97,8 @@ public class TestLexerErrors extends BaseTest {
// and return to previous dfa accept state // and return to previous dfa accept state
String tokens = execLexer("L.g4", grammar, "L", "ababx"); String tokens = execLexer("L.g4", grammar, "L", "ababx");
String expectingTokens = String expectingTokens =
"[@0,0:1='ab',<3>,1:0]\n" + "[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:3='ab',<3>,1:2]\n" + "[@1,2:3='ab',<1>,1:2]\n" +
"[@2,5:4='<EOF>',<-1>,1:5]\n"; "[@2,5:4='<EOF>',<-1>,1:5]\n";
assertEquals(expectingTokens, tokens); assertEquals(expectingTokens, tokens);
String expectingError = "line 1:4 token recognition error at: 'x'\n"; String expectingError = "line 1:4 token recognition error at: 'x'\n";
@ -118,8 +118,8 @@ public class TestLexerErrors extends BaseTest {
// uses the previous accepted in the ATN not DFA // uses the previous accepted in the ATN not DFA
String tokens = execLexer("L.g4", grammar, "L", "ababcx"); String tokens = execLexer("L.g4", grammar, "L", "ababcx");
String expectingTokens = String expectingTokens =
"[@0,0:1='ab',<3>,1:0]\n" + "[@0,0:1='ab',<1>,1:0]\n" +
"[@1,2:4='abc',<4>,1:2]\n" + "[@1,2:4='abc',<2>,1:2]\n" +
"[@2,6:5='<EOF>',<-1>,1:6]\n"; "[@2,6:5='<EOF>',<-1>,1:6]\n";
assertEquals(expectingTokens, tokens); assertEquals(expectingTokens, tokens);
String expectingError = "line 1:5 token recognition error at: 'x'\n"; String expectingError = "line 1:5 token recognition error at: 'x'\n";
@ -157,9 +157,9 @@ public class TestLexerErrors extends BaseTest {
"\n"; "\n";
String result = execLexer("T.g4", grammar, "TLexer", "x : x", false); String result = execLexer("T.g4", grammar, "TLexer", "x : x", false);
String expecting = String expecting =
"[@0,0:0='x',<5>,1:0]\n" + "[@0,0:0='x',<3>,1:0]\n" +
"[@1,2:2=':',<4>,1:2]\n" + "[@1,2:2=':',<2>,1:2]\n" +
"[@2,4:4='x',<5>,1:4]\n" + "[@2,4:4='x',<3>,1:4]\n" +
"[@3,5:4='<EOF>',<-1>,1:5]\n"; "[@3,5:4='<EOF>',<-1>,1:5]\n";
assertEquals(expecting, result); assertEquals(expecting, result);
assertEquals("line 1:1 token recognition error at: ' '\n" + assertEquals("line 1:1 token recognition error at: ' '\n" +

View File

@ -9,7 +9,7 @@ public class TestLexerExec extends BaseTest {
"QUOTE : '\"' ;\n"; // make sure this compiles "QUOTE : '\"' ;\n"; // make sure this compiles
String found = execLexer("L.g4", grammar, "L", "\""); String found = execLexer("L.g4", grammar, "L", "\"");
String expecting = String expecting =
"[@0,0:0='\"',<3>,1:0]\n" + "[@0,0:0='\"',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n"; "[@1,1:0='<EOF>',<-1>,1:1]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -22,9 +22,9 @@ public class TestLexerExec extends BaseTest {
"WS : (' '|'\\n') {skip();} ;"; "WS : (' '|'\\n') {skip();} ;";
String found = execLexer("L.g4", grammar, "L", "34 -21 3"); String found = execLexer("L.g4", grammar, "L", "34 -21 3");
String expecting = String expecting =
"[@0,0:1='34',<4>,1:0]\n" + "[@0,0:1='34',<2>,1:0]\n" +
"[@1,3:5='-21',<3>,1:3]\n" + "[@1,3:5='-21',<1>,1:3]\n" +
"[@2,7:7='3',<4>,1:7]\n" + "[@2,7:7='3',<2>,1:7]\n" +
"[@3,8:7='<EOF>',<-1>,1:8]\n"; // EOF has no length so range is 8:7 not 8:8 "[@3,8:7='<EOF>',<-1>,1:8]\n"; // EOF has no length so range is 8:7 not 8:8
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -38,8 +38,8 @@ public class TestLexerExec extends BaseTest {
String expecting = String expecting =
"I\n" + "I\n" +
"I\n" + "I\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,3:4='34',<3>,1:3]\n" + "[@1,3:4='34',<1>,1:3]\n" +
"[@2,5:4='<EOF>',<-1>,1:5]\n"; "[@2,5:4='<EOF>',<-1>,1:5]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -53,8 +53,8 @@ public class TestLexerExec extends BaseTest {
String expecting = String expecting =
"I\n" + "I\n" +
"I\n" + "I\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,3:4='34',<3>,1:3]\n" + "[@1,3:4='34',<1>,1:3]\n" +
"[@2,5:4='<EOF>',<-1>,1:5]\n"; "[@2,5:4='<EOF>',<-1>,1:5]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -68,8 +68,8 @@ public class TestLexerExec extends BaseTest {
String expecting = String expecting =
"I\n" + "I\n" +
"I\n" + "I\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,2:4='#10',<3>,1:2]\n" + "[@1,2:4='#10',<1>,1:2]\n" +
"[@2,5:4='<EOF>',<-1>,1:5]\n"; "[@2,5:4='<EOF>',<-1>,1:5]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -82,8 +82,8 @@ public class TestLexerExec extends BaseTest {
String found = execLexer("L.g4", grammar, "L", "34#"); String found = execLexer("L.g4", grammar, "L", "34#");
String expecting = String expecting =
"I\n" + "I\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,2:2='#',<4>,1:2]\n" + "[@1,2:2='#',<2>,1:2]\n" +
"[@2,3:2='<EOF>',<-1>,1:3]\n"; "[@2,3:2='<EOF>',<-1>,1:3]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -97,8 +97,8 @@ public class TestLexerExec extends BaseTest {
String expecting = String expecting =
"I\n" + "I\n" +
"I\n" + "I\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,2:4='#11',<3>,1:2]\n" + "[@1,2:4='#11',<1>,1:2]\n" +
"[@2,5:4='<EOF>',<-1>,1:5]\n"; "[@2,5:4='<EOF>',<-1>,1:5]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -113,8 +113,8 @@ public class TestLexerExec extends BaseTest {
"ANY : . {more();} ;\n"; "ANY : . {more();} ;\n";
String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\"");
String expecting = String expecting =
"[@0,0:4='\"abc\"',<5>,1:0]\n" + "[@0,0:4='\"abc\"',<3>,1:0]\n" +
"[@1,6:9='\"ab\"',<5>,1:6]\n" + "[@1,6:9='\"ab\"',<3>,1:6]\n" +
"[@2,10:9='<EOF>',<-1>,1:10]\n"; "[@2,10:9='<EOF>',<-1>,1:10]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -129,8 +129,8 @@ public class TestLexerExec extends BaseTest {
"ANY : . -> more ;\n"; "ANY : . -> more ;\n";
String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\"");
String expecting = String expecting =
"[@0,0:4='\"abc\"',<5>,1:0]\n" + "[@0,0:4='\"abc\"',<3>,1:0]\n" +
"[@1,6:9='\"ab\"',<5>,1:6]\n" + "[@1,6:9='\"ab\"',<3>,1:6]\n" +
"[@2,10:9='<EOF>',<-1>,1:10]\n"; "[@2,10:9='<EOF>',<-1>,1:10]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -145,8 +145,8 @@ public class TestLexerExec extends BaseTest {
"ANY : . -> more ;\n"; "ANY : . -> more ;\n";
String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\"");
String expecting = String expecting =
"[@0,0:4='\"abc\"',<5>,1:0]\n" + "[@0,0:4='\"abc\"',<3>,1:0]\n" +
"[@1,6:9='\"ab\"',<5>,1:6]\n" + "[@1,6:9='\"ab\"',<3>,1:6]\n" +
"[@2,10:9='<EOF>',<-1>,1:10]\n"; "[@2,10:9='<EOF>',<-1>,1:10]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -159,13 +159,13 @@ public class TestLexerExec extends BaseTest {
"WS : (' '|'\n')+ ;"; "WS : (' '|'\n')+ ;";
String found = execLexer("L.g4", grammar, "L", "end eend ending a"); String found = execLexer("L.g4", grammar, "L", "end eend ending a");
String expecting = String expecting =
"[@0,0:2='end',<3>,1:0]\n" + "[@0,0:2='end',<1>,1:0]\n" +
"[@1,3:3=' ',<5>,1:3]\n" + "[@1,3:3=' ',<3>,1:3]\n" +
"[@2,4:7='eend',<4>,1:4]\n" + "[@2,4:7='eend',<2>,1:4]\n" +
"[@3,8:8=' ',<5>,1:8]\n" + "[@3,8:8=' ',<3>,1:8]\n" +
"[@4,9:14='ending',<4>,1:9]\n" + "[@4,9:14='ending',<2>,1:9]\n" +
"[@5,15:15=' ',<5>,1:15]\n" + "[@5,15:15=' ',<3>,1:15]\n" +
"[@6,16:16='a',<4>,1:16]\n" + "[@6,16:16='a',<2>,1:16]\n" +
"[@7,17:16='<EOF>',<-1>,1:17]\n"; "[@7,17:16='<EOF>',<-1>,1:17]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -182,19 +182,19 @@ public class TestLexerExec extends BaseTest {
"WS : (' '|'\n')+ ;"; "WS : (' '|'\n')+ ;";
String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l"); String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l");
String expecting = String expecting =
"[@0,0:0='x',<7>,1:0]\n" + "[@0,0:0='x',<5>,1:0]\n" +
"[@1,1:1=' ',<8>,1:1]\n" + "[@1,1:1=' ',<6>,1:1]\n" +
"[@2,2:2='0',<4>,1:2]\n" + "[@2,2:2='0',<2>,1:2]\n" +
"[@3,3:3=' ',<8>,1:3]\n" + "[@3,3:3=' ',<6>,1:3]\n" +
"[@4,4:4='1',<4>,1:4]\n" + "[@4,4:4='1',<2>,1:4]\n" +
"[@5,5:5=' ',<8>,1:5]\n" + "[@5,5:5=' ',<6>,1:5]\n" +
"[@6,6:6='a',<7>,1:6]\n" + "[@6,6:6='a',<5>,1:6]\n" +
"[@7,7:7='.',<6>,1:7]\n" + "[@7,7:7='.',<4>,1:7]\n" +
"[@8,8:8='b',<7>,1:8]\n" + "[@8,8:8='b',<5>,1:8]\n" +
"[@9,9:9=' ',<8>,1:9]\n" + "[@9,9:9=' ',<6>,1:9]\n" +
"[@10,10:10='a',<7>,1:10]\n" + "[@10,10:10='a',<5>,1:10]\n" +
"[@11,11:11='.',<6>,1:11]\n" + "[@11,11:11='.',<4>,1:11]\n" +
"[@12,12:12='l',<7>,1:12]\n" + "[@12,12:12='l',<5>,1:12]\n" +
"[@13,13:12='<EOF>',<-1>,1:13]\n"; "[@13,13:12='<EOF>',<-1>,1:13]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -207,7 +207,7 @@ public class TestLexerExec extends BaseTest {
"A : 'a';\n"; "A : 'a';\n";
String found = execLexer("L.g4", grammar, "L", ""); String found = execLexer("L.g4", grammar, "L", "");
String expecting = String expecting =
"[@0,0:-1='<EOF>',<3>,1:0]\n" + "[@0,0:-1='<EOF>',<1>,1:0]\n" +
"[@1,0:-1='<EOF>',<-1>,1:0]\n"; "[@1,0:-1='<EOF>',<-1>,1:0]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -225,7 +225,7 @@ public class TestLexerExec extends BaseTest {
found = execLexer("L.g4", grammar, "L", "a"); found = execLexer("L.g4", grammar, "L", "a");
expecting = expecting =
"[@0,0:0='a',<3>,1:0]\n" + "[@0,0:0='a',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n"; "[@1,1:0='<EOF>',<-1>,1:1]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -239,8 +239,8 @@ public class TestLexerExec extends BaseTest {
String expecting = String expecting =
"I\n" + "I\n" +
"I\n" + "I\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,5:6='34',<3>,2:1]\n" + "[@1,5:6='34',<1>,2:1]\n" +
"[@2,7:6='<EOF>',<-1>,2:3]\n"; "[@2,7:6='<EOF>',<-1>,2:3]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -254,8 +254,8 @@ public class TestLexerExec extends BaseTest {
String expecting = String expecting =
"I\n" + "I\n" +
"I\n" + "I\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,5:6='34',<3>,2:1]\n" + "[@1,5:6='34',<1>,2:1]\n" +
"[@2,7:6='<EOF>',<-1>,2:3]\n"; "[@2,7:6='<EOF>',<-1>,2:3]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -268,7 +268,7 @@ public class TestLexerExec extends BaseTest {
String found = execLexer("L.g4", grammar, "L", "xaf"); String found = execLexer("L.g4", grammar, "L", "xaf");
String expecting = String expecting =
"I\n" + "I\n" +
"[@0,0:2='xaf',<3>,1:0]\n" + "[@0,0:2='xaf',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n"; "[@1,3:2='<EOF>',<-1>,1:3]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -282,8 +282,8 @@ public class TestLexerExec extends BaseTest {
String expecting = String expecting =
"I\n" + "I\n" +
"I\n" + "I\n" +
"[@0,0:0='a',<3>,1:0]\n" + "[@0,0:0='a',<1>,1:0]\n" +
"[@1,2:2='x',<3>,1:2]\n" + "[@1,2:2='x',<1>,1:2]\n" +
"[@2,3:2='<EOF>',<-1>,1:3]\n"; "[@2,3:2='<EOF>',<-1>,1:3]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -300,10 +300,10 @@ public class TestLexerExec extends BaseTest {
"I\n" + "I\n" +
"ID\n" + "ID\n" +
"ID\n" + "ID\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,4:5='34',<3>,1:4]\n" + "[@1,4:5='34',<1>,1:4]\n" +
"[@2,7:8='a2',<4>,1:7]\n" + "[@2,7:8='a2',<2>,1:7]\n" +
"[@3,10:12='abc',<4>,1:10]\n" + "[@3,10:12='abc',<2>,1:10]\n" +
"[@4,18:17='<EOF>',<-1>,2:3]\n"; "[@4,18:17='<EOF>',<-1>,2:3]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -316,7 +316,7 @@ public class TestLexerExec extends BaseTest {
String found = execLexer("L.g4", grammar, "L", "00\r\n"); String found = execLexer("L.g4", grammar, "L", "00\r\n");
String expecting = String expecting =
"I\n" + "I\n" +
"[@0,0:1='00',<3>,1:0]\n" + "[@0,0:1='00',<1>,1:0]\n" +
"[@1,4:3='<EOF>',<-1>,2:0]\n"; "[@1,4:3='<EOF>',<-1>,2:0]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -329,7 +329,7 @@ public class TestLexerExec extends BaseTest {
String found = execLexer("L.g4", grammar, "L", "34 "); String found = execLexer("L.g4", grammar, "L", "34 ");
String expecting = String expecting =
"I\n" + "I\n" +
"[@0,0:1='34',<3>,1:0]\n" + "[@0,0:1='34',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n"; "[@1,3:2='<EOF>',<-1>,1:3]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -343,8 +343,8 @@ public class TestLexerExec extends BaseTest {
String expecting = String expecting =
"DASHBRACK\n" + "DASHBRACK\n" +
"DASHBRACK\n" + "DASHBRACK\n" +
"[@0,0:0='-',<3>,1:0]\n" + "[@0,0:0='-',<1>,1:0]\n" +
"[@1,2:2=']',<3>,1:2]\n" + "[@1,2:2=']',<1>,1:2]\n" +
"[@2,4:3='<EOF>',<-1>,1:4]\n"; "[@2,4:3='<EOF>',<-1>,1:4]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -357,7 +357,7 @@ public class TestLexerExec extends BaseTest {
String found = execLexer("L.g4", grammar, "L", "9"); String found = execLexer("L.g4", grammar, "L", "9");
String expecting = String expecting =
"A\n" + "A\n" +
"[@0,0:0='9',<3>,1:0]\n" + "[@0,0:0='9',<1>,1:0]\n" +
"[@1,1:0='<EOF>',<-1>,1:1]\n"; "[@1,1:0='<EOF>',<-1>,1:1]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -370,7 +370,7 @@ public class TestLexerExec extends BaseTest {
String found = execLexer("L.g4", grammar, "L", "b\"a"); String found = execLexer("L.g4", grammar, "L", "b\"a");
String expecting = String expecting =
"A\n" + "A\n" +
"[@0,0:2='b\"a',<3>,1:0]\n" + "[@0,0:2='b\"a',<1>,1:0]\n" +
"[@1,3:2='<EOF>',<-1>,1:3]\n"; "[@1,3:2='<EOF>',<-1>,1:3]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@ -383,7 +383,7 @@ public class TestLexerExec extends BaseTest {
String found = execLexer("L.g4", grammar, "L", "b\"\\a"); String found = execLexer("L.g4", grammar, "L", "b\"\\a");
String expecting = String expecting =
"A\n" + "A\n" +
"[@0,0:3='b\"\\a',<3>,1:0]\n" + "[@0,0:3='b\"\\a',<1>,1:0]\n" +
"[@1,4:3='<EOF>',<-1>,1:4]\n"; "[@1,4:3='<EOF>',<-1>,1:4]\n";
assertEquals(expecting, found); assertEquals(expecting, found);
} }

View File

@ -6,6 +6,7 @@ public class TestListeners extends BaseTest {
@Test public void testBasic() throws Exception { @Test public void testBasic() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"@header {import org.antlr.v4.runtime.tree.ParseTree;}\n"+
"@members {\n" + "@members {\n" +
"public static class LeafListener extends TBaseListener {\n" + "public static class LeafListener extends TBaseListener {\n" +
" public void visitTerminal(ParseTree.TerminalNode<Token> node) {\n" + " public void visitTerminal(ParseTree.TerminalNode<Token> node) {\n" +
@ -69,7 +70,7 @@ public class TestListeners extends BaseTest {
result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false);
expecting = "(a abc)\n" + expecting = "(a abc)\n" +
"[@0,0:2='abc',<6>,1:0]\n"; "[@0,0:2='abc',<4>,1:0]\n";
assertEquals(expecting, result); assertEquals(expecting, result);
} }

View File

@ -78,7 +78,7 @@ public class TestParseErrors extends BaseTest {
"grammar T;\n" + "grammar T;\n" +
"a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;"; "a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;";
String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false);
String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<3>,1:1]\n"; String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<1>,1:1]\n";
assertEquals(expecting, result); assertEquals(expecting, result);
} }
@ -97,7 +97,7 @@ public class TestParseErrors extends BaseTest {
"grammar T;\n" + "grammar T;\n" +
"a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;"; "a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;";
String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false);
String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<3>,1:1]\n"; String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<1>,1:1]\n";
assertEquals(expecting, result); assertEquals(expecting, result);
} }

View File

@ -7,13 +7,15 @@ import org.junit.Test;
public class TestScopeParsing extends BaseTest { public class TestScopeParsing extends BaseTest {
String[] argPairs = { String[] argPairs = {
"", "{}", "", "{}",
" ", "{}", " ", "{}",
"int i", "{i=int i}", "int i", "{i=int i}",
"int[] i, int j[]", "{i=int[] i, j=int [] j}", "int[] i, int j[]", "{i=int[] i, j=int [] j}",
"Map<A\\,B>[] i, int j[]", "{i=Map<A,B>[] i, j=int [] j}", "Map<A,B>[] i, int j[]", "{i=Map<A,B>[] i, j=int [] j}",
"Map<A,List<B>>[] i", "{i=Map<A,List<B>>[] i}",
"int i = 34+a[3], int j[] = new int[34]", "int i = 34+a[3], int j[] = new int[34]",
"{i=int i= 34+a[3], j=int [] j= new int[34]}", "{i=int i= 34+a[3], j=int [] j= new int[34]}",
"char *foo32[3] = {1\\,2\\,3}", "{3=char *foo32[] 3= {1,2,3}}", "char *foo32[3] = {1,2,3}", "{3=char *foo32[] 3= {1,2,3}}",
"String[] headers", "{headers=String[] headers}",
// python/ruby style // python/ruby style
"i", "{i=null i}", "i", "{i=null i}",

View File

@ -1,61 +1,111 @@
package org.antlr.v4.test; package org.antlr.v4.test;
import org.junit.*; import org.junit.Test;
public class TestSemPredEvalLexer extends BaseTest { public class TestSemPredEvalLexer extends BaseTest {
@Test public void testDisableRule() throws Exception { @Test public void testDisableRule() throws Exception {
String grammar =
"lexer grammar L;\n"+
"E1 : {false}? 'enum' ;\n" +
"E2 : {true}? 'enum' ;\n" + // winner not E1 or ID
"ID : 'a'..'z'+ ;\n"+
"WS : (' '|'\\n') {skip();} ;";
String found = execLexer("L.g4", grammar, "L", "enum abc", true);
String expecting =
"[@0,0:3='enum',<4>,1:0]\n" +
"[@1,5:7='abc',<5>,1:5]\n" +
"[@2,8:7='<EOF>',<-1>,1:8]\n"; // no dfa since preds on left edge
assertEquals(expecting, found);
}
@Test public void testDisableRuleAfterMatch() throws Exception {
String grammar = String grammar =
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"E1 : 'enum' {false}? ;\n" + "E1 : 'enum' {false}? ;\n" +
"E2 : 'enum' {true}? ;\n" + // winner not E1 or ID "E2 : 'enum' {true}? ;\n" + // winner not E1 or ID
"ID : 'a'..'z'+ ;\n"+ "ID : 'a'..'z'+ ;\n"+
"WS : (' '|'\\n') {skip();} ;"; "WS : (' '|'\\n') {skip();} ;";
String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); String found = execLexer("L.g4", grammar, "L", "enum abc", true);
String expecting = String expecting =
"[@0,0:3='enum',<4>,1:0]\n" + "[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<5>,1:5]\n" + "[@1,5:7='abc',<3>,1:5]\n" +
"[@2,9:12='enum',<4>,1:9]\n" + "[@2,8:7='<EOF>',<-1>,1:8]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" + "s0-' '->:s4=>4\n" +
"s0-' '->:s4=>6\n" + "s0-'a'->:s5=>3\n" +
"s0-'a'->:s5=>5\n" + "s0-'e'->:s1=>3\n" +
"s0-'e'->:s1=>5\n" + ":s1=>3-'n'->:s2=>3\n" +
":s1=>5-'n'->:s2=>5\n" + ":s2=>3-'u'->:s3=>3\n" +
":s2=>5-'u'->:s3=>5\n" + ":s5=>3-'b'->:s5=>3\n" +
":s5=>5-'b'->:s5=>5\n" + ":s5=>3-'c'->:s5=>3\n";
":s5=>5-'c'->:s5=>5\n";
// didn't even created DFA 2nd time; old target of 'u' has "pred" flag set
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@Ignore @Test public void testIDvsEnum() throws Exception {
public void testMatchNChar() throws Exception { // can't do locals yet
String grammar = String grammar =
"lexer grammar L;\n"+ "lexer grammar L;\n"+
"B : {int n=0;} ({n<=2}? DIGIT {n++})+ ;\n" + "ENUM : 'enum' {false}? ;\n" +
"fragment DIGIT : '0'..'9' ;\n"+ "ID : 'a'..'z'+ ;\n"+
"WS : (' '|'\\n') {skip();} ;"; "WS : (' '|'\\n') {skip();} ;";
String found = execLexer("L.g4", grammar, "L", "1234 56", true); String found = execLexer("L.g4", grammar, "L", "enum abc enum", true);
String expecting = String expecting =
"[@0,0:3='enum',<4>,1:0]\n" + "[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<5>,1:5]\n" + "[@1,5:7='abc',<2>,1:5]\n" +
"[@2,8:8='<EOF>',<-1>,1:8]\n"; // no dfa since preds on left edge "[@2,9:12='enum',<2>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s5=>3\n" +
"s0-'a'->:s4=>2\n" +
"s0-'e'->:s1=>2\n" +
":s1=>2-'n'->:s2=>2\n" +
":s2=>2-'u'->:s3=>2\n" +
":s4=>2-'b'->:s4=>2\n" +
":s4=>2-'c'->:s4=>2\n"; // no 'm'-> transition...conflicts with pred
assertEquals(expecting, found); assertEquals(expecting, found);
} }
@Test public void testIDnotEnum() throws Exception {
String grammar =
"lexer grammar L;\n"+
"ENUM : [a-z]+ {false}? ;\n" +
"ID : [a-z]+ ;\n"+
"WS : (' '|'\\n') {skip();} ;";
String found = execLexer("L.g4", grammar, "L", "enum abc enum", true);
String expecting =
"[@0,0:3='enum',<2>,1:0]\n" +
"[@1,5:7='abc',<2>,1:5]\n" +
"[@2,9:12='enum',<2>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s2=>3\n"; // no DFA for enum/id. all paths lead to pred.
assertEquals(expecting, found);
}
@Test public void testEnumNotID() throws Exception {
String grammar =
"lexer grammar L;\n"+
"ENUM : [a-z]+ {getSpeculativeText().equals(\"enum\")}? ;\n" +
"ID : [a-z]+ ;\n"+
"WS : (' '|'\\n') {skip();} ;";
String found = execLexer("L.g4", grammar, "L", "enum abc enum", true);
String expecting =
"[@0,0:3='enum',<1>,1:0]\n" +
"[@1,5:7='abc',<2>,1:5]\n" +
"[@2,9:12='enum',<1>,1:9]\n" +
"[@3,13:12='<EOF>',<-1>,1:13]\n" +
"s0-' '->:s1=>3\n"; // no DFA for enum/id. all paths lead to pred.
assertEquals(expecting, found);
}
@Test public void testIndent() throws Exception {
String grammar =
"lexer grammar L;\n"+
"ID : [a-z]+ ;\n"+
"INDENT : [ \\t]+ {_tokenStartCharPositionInLine==0}? \n" +
" {System.out.println(\"INDENT\");} ;"+
"NL : '\\n' ;"+
"WS : [ \\t]+ ;";
String found = execLexer("L.g4", grammar, "L", "abc\n def \n", true);
String expecting =
"INDENT\n" + // action output
"[@0,0:2='abc',<1>,1:0]\n" + // ID
"[@1,3:3='\\n',<3>,1:3]\n" + // NL
"[@2,4:5=' ',<2>,2:0]\n" + // INDENT
"[@3,6:8='def',<1>,2:2]\n" + // ID
"[@4,9:10=' ',<4>,2:5]\n" + // WS
"[@5,11:11='\\n',<3>,2:7]\n" +
"[@6,12:11='<EOF>',<-1>,3:8]\n" +
"s0-'\n" +
"'->:s2=>3\n" +
"s0-'a'->:s1=>1\n" +
"s0-'d'->:s1=>1\n" +
":s1=>1-'b'->:s1=>1\n" +
":s1=>1-'c'->:s1=>1\n" +
":s1=>1-'e'->:s1=>1\n" +
":s1=>1-'f'->:s1=>1\n";
assertEquals(expecting, found);
}
} }

View File

@ -508,16 +508,28 @@ public class TestSemPredEvalParser extends BaseTest {
assertEquals(expecting, found); assertEquals(expecting, found);
} }
/** if you call a rule as part of FOLLOW with $i, can't execute, but @Test public void testPredTestedEvenWhenUnAmbig() throws Exception {
* what if there is a forced action in that called rule? We should String grammar =
* NOT execute any actions after "grammar T;\n" +
* "\n" +
* a[int i] : e x[$i] ; "@members {boolean enumKeyword = true;}\n" +
* b[int i] : e x[$i] ; "\n" +
* e : ID | ; "primary\n" +
* x[int i] : {{$i=3;}} ID ; " : ID {System.out.println(\"ID \"+$ID.text);}\n" +
* " | {!enumKeyword}? 'enum' {System.out.println(\"enum\");}\n" +
* use global context? " ;\n" +
*/ "\n" +
"ID : [a-z]+ ;\n" +
"\n" +
"WS : [ \\t\\n\\r]+ -> skip ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "primary",
"abc", false);
assertEquals("ID abc\n", found);
execParser("T.g4", grammar, "TParser", "TLexer", "primary",
"enum", false);
assertEquals("line 1:0 no viable alternative at input 'enum'\n", stderrDuringParse);
}
} }

View File

@ -42,7 +42,7 @@ public class TestSets extends BaseTest {
// from a nonfragment rule does not set the overall token. // from a nonfragment rule does not set the overall token.
String grammar = String grammar =
"grammar P;\n" + "grammar P;\n" +
"a : C {System.out.println(_input);} ;\n" + "a : C {System.out.println(_input.getText());} ;\n" +
"fragment A : '1' | '2';\n" + "fragment A : '1' | '2';\n" +
"fragment B : '3' '4';\n" + "fragment B : '3' '4';\n" +
"C : A | B;\n"; "C : A | B;\n";
@ -72,7 +72,7 @@ public class TestSets extends BaseTest {
@Test public void testParserNotToken() throws Exception { @Test public void testParserNotToken() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : ~'x' 'z' {System.out.println(_input);} ;\n"; "a : ~'x' 'z' {System.out.println(_input.getText());} ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "zz", debug); "a", "zz", debug);
assertEquals("zz\n", found); assertEquals("zz\n", found);
@ -90,7 +90,7 @@ public class TestSets extends BaseTest {
@Test public void testRuleAsSet() throws Exception { @Test public void testRuleAsSet() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a @after {System.out.println(_input);} : 'a' | 'b' |'c' ;\n"; "a @after {System.out.println(_input.getText());} : 'a' | 'b' |'c' ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "b", debug); "a", "b", debug);
assertEquals("b\n", found); assertEquals("b\n", found);
@ -109,7 +109,7 @@ public class TestSets extends BaseTest {
@Test public void testOptionalSingleElement() throws Exception { @Test public void testOptionalSingleElement() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : A? 'c' {System.out.println(_input);} ;\n" + "a : A? 'c' {System.out.println(_input.getText());} ;\n" +
"A : 'b' ;\n"; "A : 'b' ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "bc", debug); "a", "bc", debug);
@ -119,7 +119,7 @@ public class TestSets extends BaseTest {
@Test public void testOptionalLexerSingleElement() throws Exception { @Test public void testOptionalLexerSingleElement() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : A {System.out.println(_input);} ;\n" + "a : A {System.out.println(_input.getText());} ;\n" +
"A : 'b'? 'c' ;\n"; "A : 'b'? 'c' ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "bc", debug); "a", "bc", debug);
@ -129,7 +129,7 @@ public class TestSets extends BaseTest {
@Test public void testStarLexerSingleElement() throws Exception { @Test public void testStarLexerSingleElement() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : A {System.out.println(_input);} ;\n" + "a : A {System.out.println(_input.getText());} ;\n" +
"A : 'b'* 'c' ;\n"; "A : 'b'* 'c' ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "bbbbc", debug); "a", "bbbbc", debug);
@ -142,7 +142,7 @@ public class TestSets extends BaseTest {
@Test public void testPlusLexerSingleElement() throws Exception { @Test public void testPlusLexerSingleElement() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : A {System.out.println(_input);} ;\n" + "a : A {System.out.println(_input.getText());} ;\n" +
"A : 'b'+ 'c' ;\n"; "A : 'b'+ 'c' ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "bbbbc", debug); "a", "bbbbc", debug);
@ -152,7 +152,7 @@ public class TestSets extends BaseTest {
@Test public void testOptionalSet() throws Exception { @Test public void testOptionalSet() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : ('a'|'b')? 'c' {System.out.println(_input);} ;\n"; "a : ('a'|'b')? 'c' {System.out.println(_input.getText());} ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "ac", debug); "a", "ac", debug);
assertEquals("ac\n", found); assertEquals("ac\n", found);
@ -161,7 +161,7 @@ public class TestSets extends BaseTest {
@Test public void testStarSet() throws Exception { @Test public void testStarSet() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : ('a'|'b')* 'c' {System.out.println(_input);} ;\n"; "a : ('a'|'b')* 'c' {System.out.println(_input.getText());} ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "abaac", debug); "a", "abaac", debug);
assertEquals("abaac\n", found); assertEquals("abaac\n", found);
@ -170,7 +170,7 @@ public class TestSets extends BaseTest {
@Test public void testPlusSet() throws Exception { @Test public void testPlusSet() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : ('a'|'b')+ 'c' {System.out.println(_input);} ;\n"; "a : ('a'|'b')+ 'c' {System.out.println(_input.getText());} ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "abaac", debug); "a", "abaac", debug);
assertEquals("abaac\n", found); assertEquals("abaac\n", found);
@ -179,7 +179,7 @@ public class TestSets extends BaseTest {
@Test public void testLexerOptionalSet() throws Exception { @Test public void testLexerOptionalSet() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : A {System.out.println(_input);} ;\n" + "a : A {System.out.println(_input.getText());} ;\n" +
"A : ('a'|'b')? 'c' ;\n"; "A : ('a'|'b')? 'c' ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "ac", debug); "a", "ac", debug);
@ -189,7 +189,7 @@ public class TestSets extends BaseTest {
@Test public void testLexerStarSet() throws Exception { @Test public void testLexerStarSet() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : A {System.out.println(_input);} ;\n" + "a : A {System.out.println(_input.getText());} ;\n" +
"A : ('a'|'b')* 'c' ;\n"; "A : ('a'|'b')* 'c' ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "abaac", debug); "a", "abaac", debug);
@ -199,7 +199,7 @@ public class TestSets extends BaseTest {
@Test public void testLexerPlusSet() throws Exception { @Test public void testLexerPlusSet() throws Exception {
String grammar = String grammar =
"grammar T;\n" + "grammar T;\n" +
"a : A {System.out.println(_input);} ;\n" + "a : A {System.out.println(_input.getText());} ;\n" +
"A : ('a'|'b')+ 'c' ;\n"; "A : ('a'|'b')+ 'c' ;\n";
String found = execParser("T.g4", grammar, "TParser", "TLexer", String found = execParser("T.g4", grammar, "TParser", "TLexer",
"a", "abaac", debug); "a", "abaac", debug);

View File

@ -313,7 +313,7 @@ public class TestTokenStreamRewriter extends BaseTest {
catch (IllegalArgumentException iae) { catch (IllegalArgumentException iae) {
exc = iae; exc = iae;
} }
String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<4>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@2,2:2='c',<5>,1:2]:\"x\">"; String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<2>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<1>,1:0]..[@2,2:2='c',<3>,1:2]:\"x\">";
assertNotNull(exc); assertNotNull(exc);
assertEquals(expecting, exc.getMessage()); assertEquals(expecting, exc.getMessage());
} }
@ -468,7 +468,7 @@ public class TestTokenStreamRewriter extends BaseTest {
catch (IllegalArgumentException iae) { catch (IllegalArgumentException iae) {
exc = iae; exc = iae;
} }
String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<5>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"x\">"; String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<3>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">";
assertNotNull(exc); assertNotNull(exc);
assertEquals(expecting, exc.getMessage()); assertEquals(expecting, exc.getMessage());
} }
@ -547,7 +547,7 @@ public class TestTokenStreamRewriter extends BaseTest {
catch (IllegalArgumentException iae) { catch (IllegalArgumentException iae) {
exc = iae; exc = iae;
} }
String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<5>,1:3]..[@5,5:5='b',<4>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">"; String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<3>,1:3]..[@5,5:5='b',<2>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:\"xyz\">";
assertNotNull(exc); assertNotNull(exc);
assertEquals(expecting, exc.getMessage()); assertEquals(expecting, exc.getMessage());
} }
@ -574,7 +574,7 @@ public class TestTokenStreamRewriter extends BaseTest {
catch (IllegalArgumentException iae) { catch (IllegalArgumentException iae) {
exc = iae; exc = iae;
} }
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@3,3:3='c',<5>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<5>,1:2]..[@4,4:4='c',<5>,1:4]:\"xyz\">"; String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@3,3:3='c',<3>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:\"xyz\">";
assertNotNull(exc); assertNotNull(exc);
assertEquals(expecting, exc.getMessage()); assertEquals(expecting, exc.getMessage());
} }
@ -737,7 +737,7 @@ public class TestTokenStreamRewriter extends BaseTest {
catch (IllegalArgumentException iae) { catch (IllegalArgumentException iae) {
exc = iae; exc = iae;
} }
String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<4>,1:1]..[@2,2:2='c',<5>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<3>,1:0]..[@3,3:3='c',<5>,1:3]:\"bar\">"; String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@2,2:2='c',<3>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<1>,1:0]..[@3,3:3='c',<3>,1:3]:\"bar\">";
assertNotNull(exc); assertNotNull(exc);
assertEquals(expecting, exc.getMessage()); assertEquals(expecting, exc.getMessage());
} }

View File

@ -1,10 +1,14 @@
package org.antlr.v4.test; package org.antlr.v4.test;
import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.Token;
import org.antlr.v4.tool.*; import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.LexerGrammar;
import org.junit.Test; import org.junit.Test;
import java.util.*; import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.StringTokenizer;
public class TestTokenTypeAssignment extends BaseTest { public class TestTokenTypeAssignment extends BaseTest {
@ -94,6 +98,21 @@ public class TestTokenTypeAssignment extends BaseTest {
assertEquals("[E, 'x']", tokens.toString()); assertEquals("[E, 'x']", tokens.toString());
} }
@Test public void testPredDoesNotHideNameToLiteralMapInLexer() throws Exception {
// 'x' is token and char in lexer rule
Grammar g = new Grammar(
"grammar t;\n" +
"a : 'x' X ; \n" +
"X: 'x' {true}?;\n"); // must match as alias even with pred
assertEquals("{'x'=1}", g.stringLiteralToTypeMap.toString());
assertEquals("{EOF=-1, X=1}", g.tokenNameToTypeMap.toString());
// pushed in lexer from parser
assertEquals("{'x'=1}", g.implicitLexer.stringLiteralToTypeMap.toString());
assertEquals("{EOF=-1, X=1}", g.implicitLexer.tokenNameToTypeMap.toString());
}
@Test public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception { @Test public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception {
Grammar g = new Grammar( Grammar g = new Grammar(
"grammar t;\n"+ "grammar t;\n"+
@ -132,7 +151,7 @@ public class TestTokenTypeAssignment extends BaseTest {
String grammar = String grammar =
"grammar P;\n" + "grammar P;\n" +
"tokens { B='}'; }\n"+ "tokens { B='}'; }\n"+
"a : A B {System.out.println(_input);} ;\n"+ "a : A B {System.out.println(_input.getText());} ;\n"+
"A : 'a' ;\n" + "A : 'a' ;\n" +
"B : '}' ;\n"+ "B : '}' ;\n"+
"WS : (' '|'\\n') {skip();} ;"; "WS : (' '|'\\n') {skip();} ;";
@ -147,7 +166,7 @@ public class TestTokenTypeAssignment extends BaseTest {
String grammar = String grammar =
"grammar P;\n" + "grammar P;\n" +
"tokens { B='}'; }\n"+ "tokens { B='}'; }\n"+
"a : A '}' {System.out.println(_input);} ;\n"+ "a : A '}' {System.out.println(_input.getText());} ;\n"+
"A : 'a' ;\n" + "A : 'a' ;\n" +
"B : '}' ;\n"+ "B : '}' ;\n"+
"WS : (' '|'\\n') {skip();} ;"; "WS : (' '|'\\n') {skip();} ;";