got modes sort of working

[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 6818]
This commit is contained in:
parrt 2010-04-20 15:29:43 -08:00
parent 91f92d9b6c
commit 55899d032c
18 changed files with 787 additions and 573 deletions

View File

@ -355,13 +355,13 @@ public class Tool {
if ( ((GrammarRootAST)t).hasErrors ) return;
GrammarRootAST ast = (GrammarRootAST)t;
Grammar g = new Grammar(this, ast);
Grammar g = createGrammar(ast);
g.fileName = grammarFileNames.get(0);
process(g);
if ( ast!=null && ast.grammarType==ANTLRParser.COMBINED && !ast.hasErrors ) {
lexerAST = extractImplicitLexer(g); // alters ast
if ( lexerAST!=null ) {
Grammar lexerg = new Grammar(this, lexerAST);
LexerGrammar lexerg = new LexerGrammar(this, lexerAST);
lexerg.fileName = grammarFileNames.get(0);
g.implicitLexer = lexerg;
lexerg.implicitLexerOwner = g;
@ -370,6 +370,11 @@ public class Tool {
}
}
public Grammar createGrammar(GrammarRootAST ast) {
if ( ast.grammarType==ANTLRParser.LEXER ) return new LexerGrammar(this, ast);
else return new Grammar(this, ast);
}
public void process(Grammar g) {
grammars.put(g.name, g);
g.loadImportedGrammars();
@ -390,12 +395,11 @@ public class Tool {
// BUILD NFA FROM AST
NFAFactory factory = new ParserNFAFactory(g);
if ( g.getType()==ANTLRParser.LEXER ) factory = new LexerNFAFactory(g);
if ( g.isLexer() ) factory = new LexerNFAFactory((LexerGrammar)g);
g.nfa = factory.createNFA();
if ( generate_NFA_dot ) generateNFAs(g);
// PERFORM GRAMMAR ANALYSIS ON NFA: BUILD DECISION DFAs
AnalysisPipeline anal = new AnalysisPipeline(g);
anal.process();

View File

@ -2,8 +2,8 @@ package org.antlr.v4.analysis;
import org.antlr.v4.automata.DFA;
import org.antlr.v4.automata.DecisionState;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.LexerGrammar;
public class AnalysisPipeline {
public Grammar g;
@ -18,13 +18,21 @@ public class AnalysisPipeline {
lr.check();
if ( lr.listOfRecursiveCycles.size()>0 ) return; // bail out
if ( g.getType() == ANTLRParser.LEXER ) {
LexerNFAToDFAConverter conv = new LexerNFAToDFAConverter(g);
DFA dfa = conv.createDFA();
g.setLookaheadDFA(0, dfa); // only one decision
// BUILD DFA FOR EACH DECISION
if ( g.isLexer() ) processLexer();
else processParserOrTreeParser();
}
void processLexer() {
LexerGrammar lg = (LexerGrammar)g;
int d = 0;
for (String modeName : lg.modes.keySet()) {
LexerNFAToDFAConverter conv = new LexerNFAToDFAConverter(lg);
DFA dfa = conv.createDFA(modeName);
g.setLookaheadDFA(d, dfa);
d++;
if ( g.tool.minimizeDFA ) {
System.out.println("MINIMIZE");
int before = dfa.stateSet.size();
DFAMinimizer dmin = new DFAMinimizer(dfa);
dfa.minimized = dmin.minimize();
@ -33,10 +41,10 @@ public class AnalysisPipeline {
System.out.println("DFA minimized from "+before+" to "+after+" states");
}
}
return;
}
}
// BUILD DFA FOR EACH DECISION IN NONLEXER
void processParserOrTreeParser() {
for (DecisionState s : g.nfa.decisionToNFAState) {
System.out.println("\nDECISION "+s.decision);

View File

@ -5,6 +5,7 @@ import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.misc.OrderedHashSet;
import org.antlr.v4.misc.Utils;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.LexerGrammar;
import java.util.*;
@ -26,13 +27,15 @@ public class LexerNFAToDFAConverter {
public static boolean debug = false;
public LexerNFAToDFAConverter(Grammar g) {
public LexerNFAToDFAConverter(LexerGrammar g) {
this.g = g;
TokensStartState startState = (TokensStartState)g.nfa.states.get(0);
dfa = new DFA(g, startState);
}
public DFA createDFA() {
public DFA createDFA() { return createDFA(LexerGrammar.DEFAULT_MODE_NAME); }
public DFA createDFA(String modeName) {
TokensStartState startState = g.nfa.modeToStartState.get(modeName);
dfa = new DFA(g, startState);
closureBusy = new HashSet<NFAConfig>();
LexerState start = computeStartState();
dfa.startState = start;

View File

@ -1,28 +1,38 @@
package org.antlr.v4.automata;
import org.antlr.v4.codegen.Target;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.GrammarAST;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.Rule;
import org.antlr.v4.tool.TerminalAST;
import org.stringtemplate.v4.misc.Misc;
/** */
import java.util.List;
public class LexerNFAFactory extends ParserNFAFactory {
public LexerNFAFactory(Grammar g) { super(g); }
public LexerNFAFactory(LexerGrammar g) { super(g); }
public NFA createNFA() {
// create s0, start state (must be first)
// implied Tokens rule node
NFAState startState = newState(TokensStartState.class, null);
// BUILD ALL START STATES (ONE PER MODE)
for (String modeName : ((LexerGrammar)g).modes.keySet()) {
// create s0, start state; implied Tokens rule node
TokensStartState startState =
(TokensStartState)newState(TokensStartState.class, null);
nfa.modeToStartState.put(modeName, startState);
}
_createNFA();
// CREATE NFA FOR EACH RULE
_createNFA(g.rules.values());
// LINK START STATE TO EACH TOKEN RULE
for (Rule r : g.rules.values()) {
if ( !r.isFragment() ) {
RuleStartState s = nfa.ruleToStartState.get(r);
epsilon(startState, s);
// LINK MODE START STATE TO EACH TOKEN RULE
for (String modeName : ((LexerGrammar)g).modes.keySet()) {
List<Rule> rules = ((LexerGrammar)g).modes.get(modeName);
TokensStartState startState = nfa.modeToStartState.get(modeName);
for (Rule r : rules) {
if ( !r.isFragment() ) {
RuleStartState s = nfa.ruleToStartState.get(r);
epsilon(startState, s);
}
}
}

View File

@ -23,6 +23,8 @@ public class NFA {
public Map<Rule, RuleStartState> ruleToStartState = new LinkedHashMap<Rule, RuleStartState>();
public Map<Rule, RuleStopState> ruleToStopState = new LinkedHashMap<Rule, RuleStopState>();
public Map<String, TokensStartState> modeToStartState =
new LinkedHashMap<String, TokensStartState>();
int stateNumber = 0;

View File

@ -65,6 +65,9 @@ public class NFASerializer {
}
String getStateString(NFAState s) {
if ( s==null ) {
System.out.println("s==null");
}
int n = s.stateNumber;
String stateStr = "s"+n;
if ( s instanceof StarBlockStartState ) stateStr = "StarBlockStart_"+n;

View File

@ -11,6 +11,7 @@ import org.antlr.v4.parse.NFABuilder;
import org.antlr.v4.tool.*;
import java.lang.reflect.Constructor;
import java.util.Collection;
import java.util.List;
/** NFA construction routines triggered by NFABuilder.g.
@ -25,16 +26,16 @@ public class ParserNFAFactory implements NFAFactory {
public ParserNFAFactory(Grammar g) { this.g = g; nfa = new NFA(g); }
public NFA createNFA() {
_createNFA();
_createNFA(g.rules.values());
addEOFTransitionToStartRules();
return nfa;
}
public void _createNFA() {
public void _createNFA(Collection<Rule> rules) {
createRuleStartAndStopNFAStates();
GrammarASTAdaptor adaptor = new GrammarASTAdaptor();
for (Rule r : g.rules.values()) {
for (Rule r : rules) {
// find rule's block
GrammarAST blk = (GrammarAST)r.ast.getFirstChildWithType(ANTLRParser.BLOCK);
CommonTreeNodeStream nodes = new CommonTreeNodeStream(adaptor,blk);
@ -125,7 +126,7 @@ public class ParserNFAFactory implements NFAFactory {
public Handle not(GrammarAST n, Handle A) {
GrammarAST ast = A.left.ast;
int ttype = 0;
if ( g.getType()==ANTLRParser.LEXER ) {
if ( g.isLexer() ) {
ttype = Target.getCharValueFromGrammarCharLiteral(ast.getText());
}
else {

View File

@ -28,7 +28,6 @@
package org.antlr.v4.misc;
import org.antlr.v4.automata.Label;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.tool.Grammar;
import java.util.ArrayList;
@ -596,7 +595,7 @@ public class IntervalSet implements IntSet {
}
else {
if ( g!=null ) {
if ( g.getType()!= ANTLRParser.LEXER ) {
if ( !g.isLexer() ) {
for (int i=a; i<=b; i++) {
if ( i>a ) buf.append(", ");
buf.append(g.getTokenDisplayName(i));

View File

@ -141,7 +141,7 @@ public class BasicSemanticChecks {
}
void checkMode(Token modeNameToken) {
if ( g.getType()!=ANTLRParser.LEXER ) {
if ( !g.isLexer() ) {
g.tool.errMgr.grammarError(ErrorType.MODE_NOT_IN_LEXER, g.fileName,
modeNameToken, modeNameToken.getText(), g);
}
@ -170,11 +170,11 @@ public class BasicSemanticChecks {
void checkInvalidRuleDef(Token ruleID) {
String fileName = ruleID.getInputStream().getSourceName();
if ( g.getType()==ANTLRParser.LEXER && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
if ( g.isLexer() && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
g.tool.errMgr.grammarError(ErrorType.PARSER_RULES_NOT_ALLOWED,
fileName, ruleID, ruleID.getText());
}
if ( (g.getType()==ANTLRParser.PARSER||g.getType()==ANTLRParser.TREE) &&
if ( (g.isParser()||g.isTreeGrammar()) &&
Character.isUpperCase(ruleID.getText().charAt(0)) )
{
g.tool.errMgr.grammarError(ErrorType.LEXER_RULES_NOT_ALLOWED,
@ -184,7 +184,7 @@ public class BasicSemanticChecks {
void checkInvalidRuleRef(Token ruleID) {
String fileName = ruleID.getInputStream().getSourceName();
if ( g.getType()==ANTLRParser.LEXER && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
if ( g.isLexer() && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
g.tool.errMgr.grammarError(ErrorType.PARSER_RULES_NOT_ALLOWED,
fileName, ruleID, ruleID.getText());
}
@ -198,7 +198,7 @@ public class BasicSemanticChecks {
tokenID,
tokenID.getText());
}
if ( g.getType()!=ANTLRParser.COMBINED ) {
if ( !g.isCombined() ) {
g.tool.errMgr.grammarError(ErrorType.CANNOT_ALIAS_TOKENS,
fileName,
tokenID,
@ -212,7 +212,7 @@ public class BasicSemanticChecks {
*/
void checkTokenArgs(Token tokenID) {
String fileName = tokenID.getInputStream().getSourceName();
if ( g.getType()!=ANTLRParser.LEXER ) {
if ( !g.isLexer() ) {
g.tool.errMgr.grammarError(ErrorType.ARGS_ON_TOKEN_REF,
fileName, tokenID, tokenID.getText());
}
@ -315,7 +315,7 @@ public class BasicSemanticChecks {
Token altStart,
int alt)
{
if ( g.getType()==ANTLRParser.TREE &&
if ( g.isTreeGrammar() &&
options!=null && options.get("output")!=null &&
options.get("output").equals("template") &&
options.get("rewrite")!=null &&
@ -377,7 +377,7 @@ public class BasicSemanticChecks {
if ( options==null ) return;
String fileName = root.token.getInputStream().getSourceName();
String filter = options.get("filter");
if ( g.getType()==ANTLRParser.TREE && filter!=null && filter.equals("true") ) {
if ( g.isTreeGrammar() && filter!=null && filter.equals("true") ) {
// check for conflicting options
// filter => backtrack=true (can't be false)
// filter&&output!=AST => error
@ -424,7 +424,7 @@ public class BasicSemanticChecks {
importID,
g, delegate);
}
if ( g.getType()==ANTLRParser.COMBINED &&
if ( g.isCombined() &&
(delegate.name.equals(g.name+Grammar.getGrammarTypeToFileNameSuffix(ANTLRParser.LEXER))||
delegate.name.equals(g.name+Grammar.getGrammarTypeToFileNameSuffix(ANTLRParser.PARSER))) )
{

View File

@ -72,7 +72,8 @@ import org.stringtemplate.v4.misc.MultiMap;
}
@members {
Rule currentRule = null;
Rule currentRule;
String currentMode = Grammar.DEFAULT_MODE_NAME;
int currentAlt = 1; // 1..n
public List<Rule> rules = new ArrayList<Rule>();
public List<GrammarAST> rulerefs = new ArrayList<GrammarAST>();
@ -95,6 +96,7 @@ topdown
: globalScope
| globalNamedAction
| tokensSection
| mode
| rule
| ruleArg
| ruleReturns
@ -140,6 +142,8 @@ tokensSection
)
;
mode: ^(MODE ID .+) {currentMode = $ID.text;} ;
rule
@init {List<GrammarAST> modifiers = new ArrayList<GrammarAST>();}
: ^( RULE
@ -151,13 +155,14 @@ rule
{
int numAlts = $RULE.getFirstChildWithType(BLOCK).getChildCount();
Rule r = new Rule(g, $name.text, (GrammarASTWithOptions)$RULE, numAlts);
if ( g.isLexer() ) r.mode = currentMode;
if ( modifiers.size()>0 ) r.modifiers = modifiers;
rules.add(r);
currentRule = r;
currentAlt = 1;
}
;
setAlt
: {inContext("RULE BLOCK")}? ( ALT | ALT_REWRITE )
{currentAlt = $start.getChildIndex()+1;}

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@ import org.antlr.v4.parse.ToolANTLRParser;
import java.util.*;
public class Grammar implements AttributeResolver {
public static final Set doNotCopyOptionsToLexer =
public static final Set doNotCopyOptionsToLexer =
new HashSet() {
{
add("output"); add("ASTLabelType"); add("superClass");
@ -71,7 +71,7 @@ public class Grammar implements AttributeResolver {
* like EPSILON. Char/String literals and token types overlap in the same
* space, however.
*/
protected int maxTokenType = Token.MIN_TOKEN_TYPE-1;
int maxTokenType = Token.MIN_TOKEN_TYPE-1;
/** Map token like ID (but not literals like "while") to its token type */
public Map<String, Integer> tokenNameToTypeMap = new LinkedHashMap<String, Integer>();
@ -108,7 +108,7 @@ public class Grammar implements AttributeResolver {
this.tool = tool;
this.ast = ast;
this.name = ((GrammarAST)ast.getChild(0)).getText();
initTokenSymbolTables();
initTokenSymbolTables();
}
/** For testing */
@ -192,7 +192,7 @@ public class Grammar implements AttributeResolver {
GrammarAST root = tool.load(importedGrammarName+".g");
if ( root instanceof GrammarASTErrorNode ) return; // came back as error node
GrammarRootAST ast = (GrammarRootAST)root;
Grammar g = new Grammar(tool, ast);
Grammar g = tool.createGrammar(ast);
g.fileName = importedGrammarName+".g";
g.parent = this;
importedGrammars.add(g);
@ -314,8 +314,7 @@ public class Grammar implements AttributeResolver {
buf.append(name);
qualifiedName = buf.toString();
}
if ( getType()==ANTLRParser.COMBINED ||
(getType()==ANTLRParser.LEXER && implicitLexer!=null) )
if ( isCombined() || (isLexer() && implicitLexer!=null) )
{
suffix = Grammar.getGrammarTypeToFileNameSuffix(getType());
}
@ -356,7 +355,7 @@ public class Grammar implements AttributeResolver {
String tokenName = null;
int index=0;
// inside any target's char range and is lexer grammar?
if ( getType()==ANTLRParser.LEXER &&
if ( isLexer() &&
ttype >= Label.MIN_CHAR_VALUE && ttype <= Label.MAX_CHAR_VALUE )
{
return Target.getANTLRCharLiteralForChar(ttype);
@ -401,7 +400,7 @@ public class Grammar implements AttributeResolver {
/** Return a set of all possible token or char types for this grammar */
public IntSet getTokenTypes() {
if ( getType()==ANTLRParser.LEXER ) {
if ( isLexer() ) {
return getAllCharValues();
}
return IntervalSet.of(Token.MIN_TOKEN_TYPE, getMaxTokenType());
@ -513,6 +512,11 @@ public class Grammar implements AttributeResolver {
return 0;
}
public boolean isLexer() { return getType()==ANTLRParser.LEXER; }
public boolean isParser() { return getType()==ANTLRParser.PARSER; }
public boolean isTreeGrammar() { return getType()==ANTLRParser.TREE; }
public boolean isCombined() { return getType()==ANTLRParser.COMBINED; }
public String getTypeString() {
if ( ast==null ) return null;
return ANTLRParser.tokenNames[getType()].toLowerCase();

View File

@ -29,12 +29,12 @@ public class LabelElementPair {
}
// now reset if lexer and string
if ( g.getType() == ANTLRParser.LEXER ) {
if ( g.isLexer() ) {
if ( element.getFirstDescendantWithType(ANTLRParser.STRING_LITERAL)!=null ) {
if ( labelOp==ANTLRParser.ASSIGN ) type = LabelType.LEXER_STRING_LABEL;
}
}
else if ( g.getType() == ANTLRParser.TREE ) {
else if ( g.isTreeGrammar() ) {
if ( element.getFirstDescendantWithType(ANTLRParser.WILDCARD)!=null ) {
if ( labelOp==ANTLRParser.ASSIGN ) type = LabelType.WILDCARD_TREE_LABEL;
else type = LabelType.WILDCARD_TREE_LIST_LABEL;

View File

@ -0,0 +1,35 @@
package org.antlr.v4.tool;
import org.antlr.misc.MultiMap;
import org.antlr.runtime.RecognitionException;
import org.antlr.v4.Tool;
/** */
public class LexerGrammar extends Grammar {
public static final String DEFAULT_MODE_NAME = "DEFAULT_MODE";
public MultiMap<String, Rule> modes = new MultiMap<String, Rule>();
//public Map<String, Integer> modeToDecision = new HashMap<String, Integer>();
public LexerGrammar(Tool tool, GrammarRootAST ast) {
super(tool, ast);
}
public LexerGrammar(String grammarText) throws RecognitionException {
super(grammarText);
}
public LexerGrammar(String grammarText, ANTLRToolListener listener) throws RecognitionException {
super(grammarText, listener);
}
public LexerGrammar(String fileName, String grammarText, ANTLRToolListener listener) throws RecognitionException {
super(fileName, grammarText, listener);
}
@Override
public void defineRule(Rule r) {
super.defineRule(r);
modes.map(r.mode, r);
}
}

View File

@ -46,9 +46,14 @@ public class Rule implements AttributeResolver {
public AttributeDict args;
public AttributeDict retvals;
public AttributeDict scope; // scope { int i; }
/** A list of scope names used by this rule */
/** A list of scope names used by this rule */
public List<Token> useScopes;
public Grammar g;
public Grammar g;
/** If we're in a lexer grammar, we might be in a mode */
public String mode;
/** Map a name to an action for this rule like @init {...}.
* The code generator will use this to fill holes in the rule template.

View File

@ -37,12 +37,8 @@ import org.antlr.v4.analysis.DFAMinimizer;
import org.antlr.v4.analysis.LexerNFAToDFAConverter;
import org.antlr.v4.analysis.PredictionDFAFactory;
import org.antlr.v4.automata.*;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.semantics.SemanticPipeline;
import org.antlr.v4.tool.AmbiguityMessage;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.Message;
import org.antlr.v4.tool.UnreachableAltsMessage;
import org.antlr.v4.tool.*;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -119,7 +115,7 @@ public abstract class BaseTest {
}
ParserNFAFactory f = new ParserNFAFactory(g);
if ( g.getType()== ANTLRParser.LEXER ) f = new LexerNFAFactory(g);
if ( g.isLexer() ) f = new LexerNFAFactory((LexerGrammar)g);
return f.createNFA();
}
@ -179,13 +175,19 @@ public abstract class BaseTest {
List<Message> checkLexerDFA(String gtext, String expecting)
throws Exception
{
return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting);
}
List<Message> checkLexerDFA(String gtext, String modeName, String expecting)
throws Exception
{
ErrorQueue equeue = new ErrorQueue();
Grammar g = new Grammar(gtext, equeue);
LexerGrammar g = new LexerGrammar(gtext, equeue);
g.nfa = createNFA(g);
LexerNFAToDFAConverter conv = new LexerNFAToDFAConverter(g);
DFA dfa = conv.createDFA();
g.setLookaheadDFA(0, dfa); // only one decision
DFA dfa = conv.createDFA(modeName);
g.setLookaheadDFA(0, dfa); // only one decision to worry about
String result = null;
if ( dfa!=null ) result = dfa.toString();

View File

@ -68,25 +68,20 @@ public class TestLexerDFAConstruction extends BaseTest {
checkLexerDFA(g, expecting);
}
@Test public void testAplusNonGreedy() throws Exception {
@Test public void testMode() throws Exception {
String g =
"lexer grammar t;\n"+
"A : (options {greedy=false;}:'0'..'9')+ '000' ;\n";
"lexer grammar L;\n"+
"A : 'a' ;\n" +
"X : 'x' ;\n" +
"mode FOO;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n";
String expecting =
"\n";
checkLexerDFA(g, expecting);
"s0-'b'->:s1=> B\n" +
"s0-'c'->:s2=> C\n";
checkLexerDFA(g, "FOO", expecting);
}
@Test public void testDotNonGreedy() throws Exception {
String g =
"lexer grammar t;\n"+
"A : (options {greedy=false;}:.)+ '000' ;\n";
String expecting =
"\n";
checkLexerDFA(g, expecting);
}
public void _template() throws Exception {
String g =

View File

@ -2,9 +2,9 @@ package org.antlr.v4.test;
import org.antlr.v4.Tool;
import org.antlr.v4.automata.*;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.semantics.SemanticPipeline;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.Rule;
import org.junit.Test;
@ -56,7 +56,7 @@ public class TestNFAConstruction extends BaseTest {
}
@Test public void testRange() throws Exception {
Grammar g = new Grammar(
LexerGrammar g = new LexerGrammar(
"lexer grammar P;\n"+
"A : 'a'..'c' ;"
);
@ -64,11 +64,11 @@ public class TestNFAConstruction extends BaseTest {
"RuleStart_A_1->s3\n" +
"s3-'a'..'c'->s4\n" +
"s4->RuleStop_A_2\n";
checkRule(g, "A", expecting);
checkTokensRule(g, "A", expecting);
}
@Test public void testRangeOrRange() throws Exception {
Grammar g = new Grammar(
LexerGrammar g = new LexerGrammar(
"lexer grammar P;\n"+
"A : ('a'..'c' 'h' | 'q' 'j'..'l') ;"
);
@ -85,7 +85,7 @@ public class TestNFAConstruction extends BaseTest {
"s6->BlockEnd_12\n" +
"s10->BlockEnd_12\n" +
"BlockEnd_12->RuleStop_A_2\n";
checkRule(g, "A", expecting);
checkTokensRule(g, "A", expecting);
}
@Test public void testStringLiteralInParser() throws Exception {
@ -269,21 +269,6 @@ public class TestNFAConstruction extends BaseTest {
checkRule(g, "a", expecting);
}
@Test public void testAplusNonGreedy() throws Exception {
Grammar g = new Grammar(
"lexer grammar t;\n"+
"A : (options {greedy=false;}:'0'..'9')+ ;\n");
String expecting =
"RuleStart_A_1->PlusBlockStart_5\n" +
"PlusBlockStart_5->s3\n" +
"s3-'0'..'9'->s4\n" +
"s4->LoopBack_6\n" +
"LoopBack_6->BlockEnd_7\n" +
"LoopBack_6->s3\n" +
"BlockEnd_7->RuleStop_A_2\n";
checkRule(g, "A", expecting);
}
@Test public void testAorBorEmptyPlus() throws Exception {
Grammar g = new Grammar(
"parser grammar P;\n"+
@ -871,6 +856,75 @@ public class TestNFAConstruction extends BaseTest {
checkRule(g, "a", expecting);
}
*/
@Test public void testDefaultMode() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar L;\n"+
"A : 'a' ;\n" +
"X : 'x' ;\n" +
"mode FOO;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
String expecting =
"BlockStart_0->RuleStart_A_2\n" +
"BlockStart_0->RuleStart_X_4\n" +
"RuleStart_A_2->s10\n" +
"RuleStart_X_4->s12\n" +
"s10-'a'->s11\n" +
"s12-'x'->s13\n" +
"s11->RuleStop_A_3\n" +
"s13->RuleStop_X_5\n";
checkTokensRule(g, "DEFAULT_MODE", expecting);
}
@Test public void testMode() throws Exception {
LexerGrammar g = new LexerGrammar(
"lexer grammar L;\n"+
"A : 'a' ;\n" +
"X : 'x' ;\n" +
"mode FOO;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
String expecting =
"BlockStart_1->RuleStart_B_6\n" +
"BlockStart_1->RuleStart_C_8\n" +
"RuleStart_B_6->s14\n" +
"RuleStart_C_8->s16\n" +
"s14-'b'->s15\n" +
"s16-'c'->s17\n" +
"s15->RuleStop_B_7\n" +
"s17->RuleStop_C_9\n";
checkTokensRule(g, "FOO", expecting);
}
void checkTokensRule(LexerGrammar g, String modeName, String expecting) {
if ( g.ast!=null && !g.ast.hasErrors ) {
System.out.println(g.ast.toStringTree());
Tool antlr = new Tool();
SemanticPipeline sem = new SemanticPipeline(g);
sem.process();
if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any)
for (Grammar imp : g.getImportedGrammars()) {
antlr.process(imp);
}
}
}
if ( g.modes.get(modeName)==null ) {
System.err.println("no such mode "+modeName);
return;
}
ParserNFAFactory f = new LexerNFAFactory((LexerGrammar)g);
NFA nfa = f.createNFA();
NFAState startState = nfa.modeToStartState.get(modeName);
NFASerializer serializer = new NFASerializer(g, startState);
String result = serializer.toString();
//System.out.print(result);
assertEquals(expecting, result);
}
void checkRule(Grammar g, String ruleName, String expecting) {
if ( g.ast!=null && !g.ast.hasErrors ) {
System.out.println(g.ast.toStringTree());
@ -885,7 +939,6 @@ public class TestNFAConstruction extends BaseTest {
}
ParserNFAFactory f = new ParserNFAFactory(g);
if ( g.getType()== ANTLRParser.LEXER ) f = new LexerNFAFactory(g);
NFA nfa = f.createNFA();
Rule r = g.getRule(ruleName);
NFAState startState = nfa.ruleToStartState.get(r);
@ -893,7 +946,6 @@ public class TestNFAConstruction extends BaseTest {
String result = serializer.toString();
//System.out.print(result);
System.out.println("test NFA checkRule: thread name: "+Thread.currentThread().getName());
assertEquals(expecting, result);
}
}