got some unit tests in
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 6787]
This commit is contained in:
parent
c2f24b5f8a
commit
dcadda638e
|
@ -40,6 +40,7 @@ public class Tool {
|
|||
public boolean forceAllFilesToOutputDir = false;
|
||||
public boolean forceRelativeOutput = false;
|
||||
public boolean deleteTempLexer = true;
|
||||
public boolean minimizeDFA = true;
|
||||
public boolean verbose = false;
|
||||
/** Don't process grammar file if generated files are newer than grammar */
|
||||
/**
|
||||
|
@ -57,7 +58,7 @@ public class Tool {
|
|||
*
|
||||
* @param make
|
||||
*/
|
||||
public boolean make = false;
|
||||
public boolean make = false;
|
||||
public boolean showBanner = true;
|
||||
|
||||
/** Exit after showing version or whatever */
|
||||
|
@ -197,9 +198,12 @@ public class Tool {
|
|||
else if (args[i].equals("-Xgrtree")) {
|
||||
internalOption_PrintGrammarTree = true; // print grammar tree
|
||||
}
|
||||
else if (args[i].equals("-Xdfa")) {
|
||||
internalOption_PrintDFA = true;
|
||||
}
|
||||
else if (args[i].equals("-Xdfa")) {
|
||||
internalOption_PrintDFA = true;
|
||||
}
|
||||
else if (args[i].equals("-Xnominimizedfa")) {
|
||||
minimizeDFA = false;
|
||||
}
|
||||
else if (args[i].equals("-Xnoprune")) {
|
||||
//DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES = false;
|
||||
}
|
||||
|
@ -341,14 +345,16 @@ public class Tool {
|
|||
GrammarRootAST ast = (GrammarRootAST)t;
|
||||
Grammar g = new Grammar(this, ast);
|
||||
g.fileName = grammarFileNames.get(0);
|
||||
process(g);
|
||||
process(g);
|
||||
if ( ast!=null && ast.grammarType==ANTLRParser.COMBINED && !ast.hasErrors ) {
|
||||
lexerAST = extractImplicitLexer(g); // alters ast
|
||||
Grammar lexerg = new Grammar(this, lexerAST);
|
||||
lexerg.fileName = grammarFileNames.get(0);
|
||||
g.implicitLexer = lexerg;
|
||||
lexerg.implicitLexerOwner = g;
|
||||
process(lexerg);
|
||||
if ( lexerAST!=null ) {
|
||||
Grammar lexerg = new Grammar(this, lexerAST);
|
||||
lexerg.fileName = grammarFileNames.get(0);
|
||||
g.implicitLexer = lexerg;
|
||||
lexerg.implicitLexerOwner = g;
|
||||
process(lexerg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,6 +367,9 @@ public class Tool {
|
|||
// MAKE SURE GRAMMAR IS SEMANTICALLY CORRECT (FILL IN GRAMMAR OBJECT)
|
||||
SemanticPipeline sem = new SemanticPipeline(g);
|
||||
sem.process();
|
||||
|
||||
if ( ErrorManager.getNumErrors()>0 ) return;
|
||||
|
||||
if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any)
|
||||
for (Grammar imp : g.getImportedGrammars()) {
|
||||
process(imp);
|
||||
|
@ -463,15 +472,23 @@ public class Tool {
|
|||
rulesWeMoved.add(r);
|
||||
}
|
||||
}
|
||||
int nLexicalRules = rulesWeMoved.size();
|
||||
rules.removeAll(rulesWeMoved);
|
||||
|
||||
// Will track 'if' from IF : 'if' ; rules to avoid defining new token for 'if'
|
||||
Map<String,String> litAliases =
|
||||
Grammar.getStringLiteralAliasesFromLexerRules(lexerAST);
|
||||
|
||||
if ( nLexicalRules==0 && (litAliases==null||litAliases.size()==0) &&
|
||||
combinedGrammar.stringLiteralToTypeMap.size()==0 )
|
||||
{
|
||||
// no rules, tokens{}, or 'literals' in grammar
|
||||
return null;
|
||||
}
|
||||
|
||||
// add strings from combined grammar (and imported grammars) into to lexer
|
||||
for (String lit : combinedGrammar.stringLiteralToTypeMap.keySet()) {
|
||||
if ( litAliases.containsKey(lit) ) continue; // already has rule
|
||||
if ( litAliases!=null && litAliases.containsKey(lit) ) continue; // already has rule
|
||||
// create for each literal: (RULE <uniquename> (BLOCK (ALT <lit>))
|
||||
//TreeWizard wiz = new TreeWizard(adaptor,ANTLRParser.tokenNames);
|
||||
String rname = combinedGrammar.getStringLiteralLexerRuleName(lit);
|
||||
|
@ -682,7 +699,8 @@ public class Tool {
|
|||
public static void Xhelp() {
|
||||
ErrorManager.info("ANTLR Parser Generator Version " + new Tool().VERSION);
|
||||
System.err.println(" -Xgrtree print the grammar AST");
|
||||
System.err.println(" -Xdfa print DFA as text ");
|
||||
System.err.println(" -Xdfa print DFA as text");
|
||||
System.err.println(" -Xnominimizedfa don't minimize decision DFA");
|
||||
System.err.println(" -Xnoprune test lookahead against EBNF block exit branches");
|
||||
System.err.println(" -Xnocollapse collapse incident edges into DFA states");
|
||||
System.err.println(" -Xdbgconversion dump lots of info during NFA conversion");
|
||||
|
|
|
@ -23,13 +23,15 @@ public class AnalysisPipeline {
|
|||
DFA dfa = conv.createDFA();
|
||||
g.setLookaheadDFA(0, dfa); // only one decision
|
||||
|
||||
System.out.println("MINIMIZE");
|
||||
int before = dfa.stateSet.size();
|
||||
DFAMinimizer dmin = new DFAMinimizer(dfa);
|
||||
dfa.minimized = dmin.minimize();
|
||||
int after = dfa.stateSet.size();
|
||||
if ( after < before ) {
|
||||
System.out.println("DFA minimized from "+before+" to "+after+" states");
|
||||
if ( g.tool.minimizeDFA ) {
|
||||
System.out.println("MINIMIZE");
|
||||
int before = dfa.stateSet.size();
|
||||
DFAMinimizer dmin = new DFAMinimizer(dfa);
|
||||
dfa.minimized = dmin.minimize();
|
||||
int after = dfa.stateSet.size();
|
||||
if ( after < before ) {
|
||||
System.out.println("DFA minimized from "+before+" to "+after+" states");
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -51,7 +53,6 @@ public class AnalysisPipeline {
|
|||
}
|
||||
|
||||
public DFA createDFA(DecisionState s) {
|
||||
// TRY STACK LIMITED LL(*) ANALYSIS
|
||||
PredictionDFAFactory conv = new PredictionDFAFactory(g, s);
|
||||
DFA dfa = conv.createDFA();
|
||||
System.out.print("DFA="+dfa);
|
||||
|
@ -63,13 +64,15 @@ public class AnalysisPipeline {
|
|||
conv.issueAmbiguityWarnings();
|
||||
|
||||
// MINIMIZE DFA
|
||||
System.out.println("MINIMIZE");
|
||||
int before = dfa.stateSet.size();
|
||||
DFAMinimizer dmin = new DFAMinimizer(dfa);
|
||||
dfa.minimized = dmin.minimize();
|
||||
int after = dfa.stateSet.size();
|
||||
if ( after < before ) {
|
||||
System.out.println("DFA minimized from "+before+" to "+after+" states");
|
||||
if ( g.tool.minimizeDFA ) {
|
||||
System.out.println("MINIMIZE");
|
||||
int before = dfa.stateSet.size();
|
||||
DFAMinimizer dmin = new DFAMinimizer(dfa);
|
||||
dfa.minimized = dmin.minimize();
|
||||
int after = dfa.stateSet.size();
|
||||
if ( after < before ) {
|
||||
System.out.println("DFA minimized from "+before+" to "+after+" states");
|
||||
}
|
||||
}
|
||||
|
||||
return dfa;
|
||||
|
|
|
@ -3,6 +3,7 @@ package org.antlr.v4.analysis;
|
|||
import org.antlr.v4.automata.*;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.misc.OrderedHashSet;
|
||||
import org.antlr.v4.misc.Utils;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
|
||||
import java.util.*;
|
||||
|
@ -47,13 +48,21 @@ public class LexerNFAToDFAConverter {
|
|||
|
||||
// walk accept states, informing DFA
|
||||
for (LexerState d : accepts) {
|
||||
Set<Integer> nfaAcceptStates = new HashSet<Integer>();
|
||||
for (NFAConfig c : d.nfaConfigs) {
|
||||
NFAState s = c.state;
|
||||
if ( s instanceof RuleStopState && !s.rule.isFragment() ) {
|
||||
dfa.defineAcceptState(c.alt, d);
|
||||
d.matchesRules.add(s.rule);
|
||||
nfaAcceptStates.add(Utils.integer(s.stateNumber));
|
||||
}
|
||||
}
|
||||
List<Integer> sorted = new ArrayList<Integer>();
|
||||
sorted.addAll(nfaAcceptStates);
|
||||
Collections.sort(sorted);
|
||||
for (int i : sorted) {
|
||||
NFAState s = g.nfa.states.get(i);
|
||||
d.matchesRules.add(s.rule);
|
||||
}
|
||||
}
|
||||
|
||||
closureBusy = null; // wack all that memory used during closure
|
||||
|
@ -105,7 +114,7 @@ public class LexerNFAToDFAConverter {
|
|||
return;
|
||||
}
|
||||
|
||||
System.out.println("ADD "+t);
|
||||
//System.out.println("ADD "+t);
|
||||
work.add(t); // add to work list to continue NFA conversion
|
||||
dfa.addState(t); // add state we've never seen before
|
||||
if ( t.isAcceptState ) accepts.add(t);
|
||||
|
|
|
@ -21,9 +21,6 @@ public class PredictionDFAFactory {
|
|||
/** DFA we are creating */
|
||||
DFA dfa;
|
||||
|
||||
/** Stack depth max; same as Bermudez's m */
|
||||
int m = 1;
|
||||
|
||||
/** A list of DFA states we still need to process during NFA conversion */
|
||||
List<DFAState> work = new LinkedList<DFAState>();
|
||||
|
||||
|
@ -81,8 +78,6 @@ public class PredictionDFAFactory {
|
|||
*/
|
||||
Set<NFAConfig> closureBusy;
|
||||
|
||||
//org.antlr.v4.misc.BitSet visited = new org.antlr.v4.misc.BitSet();
|
||||
|
||||
Resolver resolver;
|
||||
|
||||
public static boolean debug = false;
|
||||
|
@ -181,12 +176,12 @@ public class PredictionDFAFactory {
|
|||
// Just return as a valid DFA state
|
||||
int alt = t.getUniquelyPredictedAlt();
|
||||
if ( alt > 0 ) { // uniquely predicts an alt?
|
||||
System.out.println(t+" predicts "+alt);
|
||||
//System.out.println(t+" predicts "+alt);
|
||||
// Define new stop state
|
||||
dfa.addAcceptState(alt, t);
|
||||
}
|
||||
else {
|
||||
System.out.println("ADD "+t);
|
||||
// System.out.println("ADD "+t);
|
||||
work.add(t); // unresolved, add to work list to continue NFA conversion
|
||||
dfa.addState(t); // add state we've never seen before
|
||||
}
|
||||
|
@ -331,7 +326,7 @@ public class PredictionDFAFactory {
|
|||
// local follow for invokingRule and global follow for other links
|
||||
void ruleStopStateClosure(DFAState d, NFAConfig c, boolean collectPredicates) {
|
||||
if ( !c.context.recursed ) {
|
||||
System.out.println("dynamic FOLLOW of "+c.state+" context="+c.context);
|
||||
//System.out.println("dynamic FOLLOW of "+c.state+" context="+c.context);
|
||||
if ( c.context.isEmpty() ) {
|
||||
commonClosure(d, c, collectPredicates); // do global FOLLOW
|
||||
}
|
||||
|
@ -350,7 +345,7 @@ public class PredictionDFAFactory {
|
|||
invokingRule = c.context.returnState.rule;
|
||||
}
|
||||
|
||||
System.out.println("FOLLOW of "+c.state+" context="+c.context);
|
||||
//System.out.println("FOLLOW of "+c.state+" context="+c.context);
|
||||
// follow all static FOLLOW links
|
||||
int n = c.state.getNumberOfTransitions();
|
||||
for (int i=0; i<n; i++) {
|
||||
|
@ -388,11 +383,11 @@ public class PredictionDFAFactory {
|
|||
// first create a new context and push onto call tree,
|
||||
// recording the fact that we are invoking a rule and
|
||||
// from which state.
|
||||
System.out.println("nonrecursive invoke of "+t.target+" ret to "+retState+" ctx="+c.context);
|
||||
//System.out.println("nonrecursive invoke of "+t.target+" ret to "+retState+" ctx="+c.context);
|
||||
newContext = new NFAContext(c.context, retState);
|
||||
}
|
||||
else {
|
||||
System.out.println("# recursive invoke of "+t.target+" ret to "+retState+" ctx="+c.context);
|
||||
//System.out.println("# recursive invoke of "+t.target+" ret to "+retState+" ctx="+c.context);
|
||||
// don't record recursion, but record we did so we know
|
||||
// what to do at end of rule.
|
||||
c.context.recursed = true;
|
||||
|
@ -423,7 +418,7 @@ public class PredictionDFAFactory {
|
|||
// do not hoist syn preds from other rules; only get if in
|
||||
// starting state's rule (i.e., context is empty)
|
||||
if ( !labelContext.isSyntacticPredicate() || c.state==altLeftEdge ) {
|
||||
System.out.println("&"+labelContext+" enclosingRule="+c.state.rule);
|
||||
//System.out.println("&"+labelContext+" enclosingRule="+c.state.rule);
|
||||
newSemanticContext =
|
||||
SemanticContext.and(c.semanticContext, labelContext);
|
||||
}
|
||||
|
@ -488,5 +483,5 @@ public class PredictionDFAFactory {
|
|||
return unreachable;
|
||||
}
|
||||
|
||||
void issueAmbiguityWarnings() { resolver.issueAmbiguityWarnings(); }
|
||||
public void issueAmbiguityWarnings() { resolver.issueAmbiguityWarnings(); }
|
||||
}
|
||||
|
|
|
@ -238,18 +238,18 @@ public class Resolver {
|
|||
Set<Integer> alts = getAmbiguousAlts(d);
|
||||
List<Integer> sorted = new ArrayList<Integer>(alts);
|
||||
Collections.sort(sorted);
|
||||
System.err.println("ambig alts="+sorted);
|
||||
//System.err.println("ambig alts="+sorted);
|
||||
List<DFAState> dfaStates = probe.getAnyDFAPathToTarget(d);
|
||||
System.out.print("path =");
|
||||
//System.out.print("path =");
|
||||
for (DFAState d2 : dfaStates) {
|
||||
System.out.print(" "+d2.stateNumber);
|
||||
}
|
||||
System.out.println("");
|
||||
//System.out.println("");
|
||||
|
||||
List<IntSet> labels = probe.getEdgeLabels(d);
|
||||
|
||||
String input = probe.getInputSequenceDisplay(converter.g, labels);
|
||||
System.out.println("input="+ input);
|
||||
//System.out.println("input="+ input);
|
||||
|
||||
LinkedHashMap<Integer,List<Token>> altPaths = new LinkedHashMap<Integer,List<Token>>();
|
||||
for (int alt : sorted) {
|
||||
|
@ -257,11 +257,11 @@ public class Resolver {
|
|||
for (DFAState d2 : dfaStates) {
|
||||
nfaStates.add( d2.getUniqueNFAStates(alt) );
|
||||
}
|
||||
System.out.println("NFAConfigs per state: "+nfaStates);
|
||||
//System.out.println("NFAConfigs per state: "+nfaStates);
|
||||
List<Token> path =
|
||||
probe.getGrammarLocationsForInputSequence(nfaStates, labels);
|
||||
altPaths.put(alt, path);
|
||||
System.out.println("path = "+path);
|
||||
//System.out.println("path = "+path);
|
||||
}
|
||||
|
||||
List<Integer> incompletelyCoveredAlts = converter.statesWithIncompletelyCoveredAlts.get(d);
|
||||
|
|
|
@ -2,23 +2,22 @@ package org.antlr.v4.automata;
|
|||
|
||||
import org.antlr.v4.tool.Rule;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/** Lexer DFA states track just NFAStates not config with stack/alt etc... like
|
||||
* DFA used for prediction.
|
||||
*/
|
||||
public class LexerState extends DFAState {
|
||||
//public OrderedHashSet<NFAState> nfaStates;
|
||||
|
||||
/** For ambiguous lexer rules, the accept state matches a set of rules,
|
||||
* not just one. Means we can't use predictsAlt (an int).
|
||||
* not just one. Means we can't use predictsAlt (an int). The
|
||||
* order of rules is order given in grammar. So, gives precedence to
|
||||
* keywords vs IDs if keywords are first.
|
||||
*/
|
||||
public Set<Rule> matchesRules = new HashSet<Rule>();
|
||||
public List<Rule> matchesRules = new ArrayList<Rule>();
|
||||
|
||||
public LexerState(DFA dfa) {
|
||||
super(dfa);
|
||||
//nfaStates = new OrderedHashSet<NFAState>();
|
||||
}
|
||||
|
||||
// public Set<NFAState> getUniqueNFAStates() { return nfaStates; }
|
||||
|
|
|
@ -359,7 +359,7 @@ public class ParserNFAFactory implements NFAFactory {
|
|||
public Handle wildcardTree(GrammarAST associatedAST) { return null; }
|
||||
|
||||
void epsilon(NFAState a, NFAState b) {
|
||||
a.addTransition(new EpsilonTransition(b));
|
||||
if ( a!=null ) a.addTransition(new EpsilonTransition(b));
|
||||
}
|
||||
|
||||
/** Define all the rule begin/end NFAStates to solve forward reference
|
||||
|
|
|
@ -5,6 +5,7 @@ import org.antlr.runtime.tree.CommonTreeAdaptor;
|
|||
import org.antlr.v4.tool.GrammarAST;
|
||||
import org.antlr.v4.tool.GrammarASTErrorNode;
|
||||
import org.antlr.v4.tool.GrammarASTWithOptions;
|
||||
import org.antlr.v4.tool.TerminalAST;
|
||||
|
||||
public class GrammarASTAdaptor extends CommonTreeAdaptor {
|
||||
CharStream input; // where we can find chars ref'd by tokens in tree
|
||||
|
@ -23,6 +24,11 @@ public class GrammarASTAdaptor extends CommonTreeAdaptor {
|
|||
// needed by TreeWizard to make RULE tree
|
||||
t = new GrammarASTWithOptions(new CommonToken(tokenType, text));
|
||||
}
|
||||
else if ( tokenType==ANTLRParser.STRING_LITERAL ) {
|
||||
// implicit lexer construction done with wizard; needs this node type
|
||||
// whereas grammar ANTLRParser.g can use token option to spec node type
|
||||
t = new TerminalAST(new CommonToken(tokenType, text));
|
||||
}
|
||||
else {
|
||||
t = (GrammarAST)super.create(tokenType, text);
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ public class SemanticPipeline {
|
|||
symcheck.examine(); // side-effect: strip away redef'd rules.
|
||||
|
||||
// don't continue if we get symbol errors
|
||||
if ( false ) return;
|
||||
if ( ErrorManager.getNumErrors()>0 ) return;
|
||||
|
||||
// STORE RULES/ACTIONS/SCOPES IN GRAMMAR
|
||||
for (Rule r : collector.rules) g.defineRule(r);
|
||||
|
@ -77,7 +77,7 @@ public class SemanticPipeline {
|
|||
symcheck.checkForQualifiedRuleIssues(g, collector.qualifiedRulerefs);
|
||||
|
||||
// don't continue if we get symbol errors
|
||||
if ( false ) return;
|
||||
if ( ErrorManager.getNumErrors()>0 ) return;
|
||||
|
||||
// CHECK ATTRIBUTE EXPRESSIONS FOR SEMANTIC VALIDITY
|
||||
AttributeChecks.checkAllAttributeExpressions(g);
|
||||
|
|
|
@ -33,9 +33,10 @@ import org.antlr.runtime.RecognitionException;
|
|||
import org.antlr.runtime.Token;
|
||||
import org.antlr.runtime.TokenSource;
|
||||
import org.antlr.v4.Tool;
|
||||
import org.antlr.v4.automata.LexerNFAFactory;
|
||||
import org.antlr.v4.automata.NFA;
|
||||
import org.antlr.v4.automata.ParserNFAFactory;
|
||||
import org.antlr.v4.analysis.DFAMinimizer;
|
||||
import org.antlr.v4.analysis.LexerNFAToDFAConverter;
|
||||
import org.antlr.v4.analysis.PredictionDFAFactory;
|
||||
import org.antlr.v4.automata.*;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.semantics.SemanticPipeline;
|
||||
import org.antlr.v4.tool.ANTLRErrorListener;
|
||||
|
@ -121,7 +122,62 @@ public abstract class BaseTest {
|
|||
if ( g.getType()== ANTLRParser.LEXER ) f = new LexerNFAFactory(g);
|
||||
return f.createNFA();
|
||||
}
|
||||
|
||||
|
||||
public DFA createDFA(Grammar g, DecisionState s) {
|
||||
PredictionDFAFactory conv = new PredictionDFAFactory(g, s);
|
||||
DFA dfa = conv.createDFA();
|
||||
conv.issueAmbiguityWarnings();
|
||||
System.out.print("DFA="+dfa);
|
||||
return dfa;
|
||||
}
|
||||
|
||||
public void minimizeDFA(DFA dfa) {
|
||||
DFAMinimizer dmin = new DFAMinimizer(dfa);
|
||||
dfa.minimized = dmin.minimize();
|
||||
}
|
||||
|
||||
List<Message> checkRuleDFA(String gtext, String ruleName, String expecting)
|
||||
throws Exception
|
||||
{
|
||||
ErrorQueue equeue = new ErrorQueue();
|
||||
ErrorManager.setErrorListener(equeue);
|
||||
|
||||
Grammar g = new Grammar(gtext);
|
||||
NFA nfa = createNFA(g);
|
||||
NFAState s = nfa.ruleToStartState.get(g.getRule(ruleName));
|
||||
if ( s==null ) {
|
||||
System.err.println("no such rule: "+ruleName);
|
||||
return null;
|
||||
}
|
||||
DecisionState blk = (DecisionState)s.transition(0).target;
|
||||
|
||||
DFA dfa = createDFA(g, blk);
|
||||
String result = null;
|
||||
if ( dfa!=null ) result = dfa.toString();
|
||||
assertEquals(expecting, result);
|
||||
|
||||
return equeue.all;
|
||||
}
|
||||
|
||||
List<Message> checkLexerDFA(String gtext, String expecting)
|
||||
throws Exception
|
||||
{
|
||||
ErrorQueue equeue = new ErrorQueue();
|
||||
ErrorManager.setErrorListener(equeue);
|
||||
|
||||
Grammar g = new Grammar(gtext);
|
||||
g.nfa = createNFA(g);
|
||||
LexerNFAToDFAConverter conv = new LexerNFAToDFAConverter(g);
|
||||
DFA dfa = conv.createDFA();
|
||||
g.setLookaheadDFA(0, dfa); // only one decision
|
||||
|
||||
String result = null;
|
||||
if ( dfa!=null ) result = dfa.toString();
|
||||
assertEquals(expecting, result);
|
||||
|
||||
return equeue.all;
|
||||
}
|
||||
|
||||
protected boolean compile(String fileName) {
|
||||
String compiler = "javac";
|
||||
String classpathOption = "-classpath";
|
||||
|
|
|
@ -1,20 +1,120 @@
|
|||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.Message;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class TestDFAConstruction extends BaseTest {
|
||||
@Test
|
||||
public void testA() throws Exception {
|
||||
Grammar g = new Grammar(
|
||||
@Test public void testSimpleLinearApproxDecisionAsDFA() throws Exception {
|
||||
String g =
|
||||
"parser grammar P;\n"+
|
||||
"a : A;");
|
||||
"a : A | B ;";
|
||||
String expecting =
|
||||
"RuleStart_a_0->s2\n" +
|
||||
"s2-A->s3\n" +
|
||||
"s3->RuleStop_a_1\n" +
|
||||
"RuleStop_a_1-EOF->s4\n";
|
||||
//checkRule(g, "a", expecting);
|
||||
"s0-A->:s1=>1\n" +
|
||||
"s0-B->:s2=>2\n";
|
||||
checkRuleDFA(g, "a", expecting);
|
||||
}
|
||||
|
||||
@Test public void testApproxRecur() throws Exception {
|
||||
String g =
|
||||
"parser grammar A;\n" +
|
||||
"a : e X\n" +
|
||||
" | e Y\n" +
|
||||
" ;\n" +
|
||||
"e : L e R\n" +
|
||||
" | I\n" +
|
||||
" ;";
|
||||
String expecting =
|
||||
"s0-I->s2\n" +
|
||||
"s0-L->s1\n" +
|
||||
"s1-I->s2\n" +
|
||||
"s1-L->s1\n" +
|
||||
"s2-R->s3\n" +
|
||||
"s2-X->:s5=>1\n" +
|
||||
"s2-Y->:s4=>2\n" +
|
||||
"s3-R->s3\n" +
|
||||
"s3-X->:s5=>1\n" +
|
||||
"s3-Y->:s4=>2\n";
|
||||
checkRuleDFA(g, "a", expecting);
|
||||
}
|
||||
|
||||
@Test public void checkNullableRuleAndMultipleCalls() throws Exception {
|
||||
String g =
|
||||
"parser grammar B;\n" +
|
||||
" \n" +
|
||||
"a : b X\n"+
|
||||
" | b Y\n"+
|
||||
" ; \n" +
|
||||
"b : c D\n"+
|
||||
" | c E\n"+
|
||||
" ;\n" +
|
||||
"c : C | ;";
|
||||
String expecting =
|
||||
"s0-C->s3\n" +
|
||||
"s0-D->s1\n" +
|
||||
"s0-E->s2\n" +
|
||||
"s1-X->:s5=>1\n" +
|
||||
"s1-Y->:s4=>2\n" +
|
||||
"s2-X->:s5=>1\n" +
|
||||
"s2-Y->:s4=>2\n" +
|
||||
"s3-D->s1\n" +
|
||||
"s3-E->s2\n";
|
||||
checkRuleDFA(g, "a", expecting);
|
||||
}
|
||||
|
||||
@Test public void avoidsGlobalFollowSequence() throws Exception {
|
||||
String g =
|
||||
"parser grammar C;\n" +
|
||||
"a : b X\n" +
|
||||
" | b Y\n" +
|
||||
" ; \n" +
|
||||
"b : F\n" +
|
||||
" |\n" +
|
||||
" ; \n" +
|
||||
"q : b Q ;";
|
||||
String expecting =
|
||||
"s0-F->s1\n" +
|
||||
"s0-X->:s3=>1\n" +
|
||||
"s0-Y->:s2=>2\n" +
|
||||
"s1-X->:s3=>1\n" +
|
||||
"s1-Y->:s2=>2\n";
|
||||
checkRuleDFA(g, "a", expecting);
|
||||
}
|
||||
|
||||
@Test public void strongLL() throws Exception {
|
||||
String g =
|
||||
"parser grammar D;\n" +
|
||||
"\n" +
|
||||
"s : X a A B\n" +
|
||||
" | Y a B\n" +
|
||||
" ;\n" +
|
||||
"a : A | B | ;";
|
||||
// AB predicts 1 and 3 but AB only happens when called from 1st alt for 3rd alt
|
||||
// In that case, 1st alt would be AA not AB. LL(2) but not strong LL(2)
|
||||
// dup rules to reduce to strong LL(2)
|
||||
String expecting =
|
||||
"s0-A->s1\n" +
|
||||
"s0-B->s2\n" +
|
||||
"s1-A->:s3=>1\n" +
|
||||
"s1-B->:s4=>1\n" +
|
||||
"s2-A->:s5=>2\n" +
|
||||
"s2-B->:s6=>2\n" +
|
||||
"s2-EOF->:s7=>3\n";
|
||||
List<Message> msgs = checkRuleDFA(g, "a", expecting);
|
||||
System.out.println(msgs);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@Test public void _template() throws Exception {
|
||||
String g =
|
||||
"";
|
||||
String expecting =
|
||||
"";
|
||||
checkRuleDFA(g, "a", expecting);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
package org.antlr.v4.test;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
/** */
|
||||
public class TestLexerDFAConstruction extends BaseTest {
|
||||
|
||||
@Test public void unicode() throws Exception {
|
||||
String g =
|
||||
"lexer grammar L;\n" +
|
||||
"A : '\\u0030'..'\\u8000'+ 'a' ;\n" + // TODO: FAILS; \\u not converted
|
||||
"B : '\\u0020' ;";
|
||||
String expecting =
|
||||
"";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
@Test public void keywordvsID() throws Exception {
|
||||
String g =
|
||||
"lexer grammar L2;\n" +
|
||||
"IF : 'if' ;\n" +
|
||||
"ID : 'a'..'z'+ ;\n" +
|
||||
"INT : DIGIT+ ;\n" +
|
||||
"public fragment\n" +
|
||||
"DIGIT : '0'..'9' ;";
|
||||
String expecting =
|
||||
":s1=> INT-{'0'..'9'}->:s1=> INT\n" +
|
||||
":s2=> ID-{'a'..'z'}->:s2=> ID\n" +
|
||||
":s3=> ID-'f'->:s4=> ID IF\n" +
|
||||
":s3=> ID-{'a'..'e', 'g'..'z'}->:s2=> ID\n" +
|
||||
":s4=> ID IF-{'a'..'z'}->:s2=> ID\n" +
|
||||
"s0-'i'->:s3=> ID\n" +
|
||||
"s0-{'0'..'9'}->:s1=> INT\n" +
|
||||
"s0-{'a'..'h', 'j'..'z'}->:s2=> ID";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
@Test public void recursiveMatchingTwoAlts() throws Exception {
|
||||
// ambig with ACTION; accept state will try both after matching
|
||||
// since one is recursive
|
||||
String g =
|
||||
"lexer grammar L3;\n" +
|
||||
"SPECIAL : '{{}}' ;\n" +
|
||||
"ACTION : '{' (FOO | 'x')* '}' ;\n" +
|
||||
"fragment\n" +
|
||||
"FOO : ACTION ;\n" +
|
||||
"LCURLY : '{' ;";
|
||||
String expecting =
|
||||
":s1=> LCURLY-'x'->s4\n" +
|
||||
":s1=> LCURLY-'{'->s3\n" +
|
||||
":s1=> LCURLY-'}'->:s2=> ACTION\n" +
|
||||
"s0-'{'->:s1=> LCURLY\n" +
|
||||
"s3-'x'->s6\n" +
|
||||
"s3-'}'->s5\n" +
|
||||
"s4-'x'->s4\n" +
|
||||
"s4-'{'->s7\n" +
|
||||
"s4-'}'->:s2=> ACTION\n" +
|
||||
"s5-'x'->s4\n" +
|
||||
"s5-'{'->s7\n" +
|
||||
"s5-'}'->:s8=> SPECIAL ACTION\n" + // order meaningful here: SPECIAL ACTION
|
||||
"s6-'x'->s6\n" +
|
||||
"s6-'}'->s9\n" +
|
||||
"s7-'x'->s6\n" +
|
||||
"s7-'}'->s9\n" +
|
||||
"s9-'x'->s4\n" +
|
||||
"s9-'{'->s7\n" +
|
||||
"s9-'}'->:s2=> ACTION\n";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
@Test public void _t3emplate() throws Exception {
|
||||
String g =
|
||||
"";
|
||||
String expecting =
|
||||
"";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
@Test public void _templ33ate() throws Exception {
|
||||
String g =
|
||||
"";
|
||||
String expecting =
|
||||
"";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
|
||||
@Test public void _template() throws Exception {
|
||||
String g =
|
||||
"";
|
||||
String expecting =
|
||||
"";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.tool.Message;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** */
|
||||
public class TestPredicatedDFAConstruction extends BaseTest {
|
||||
@Test
|
||||
public void TwoAltsOnePred() throws Exception {
|
||||
String g =
|
||||
"parser grammar E;\n" +
|
||||
"a : {p1}? ID\n" +
|
||||
" | ID\n" +
|
||||
" ;";
|
||||
String expecting =
|
||||
"s0-ID->s1\n" +
|
||||
"s1-true->:s3=>2\n" +
|
||||
"s1-{p1}?->:s2=>1\n";
|
||||
checkRuleDFA(g, "a", expecting);
|
||||
}
|
||||
|
||||
@Test public void ambigButPredicatedTokens() throws Exception {
|
||||
// accept state matches both; try them in order since at least 1 has pred
|
||||
String g =
|
||||
"lexer grammar L4;\n" +
|
||||
"A : {p1}? 'a' ; \n" +
|
||||
"B : {p2}? 'a' ;";
|
||||
String expecting =
|
||||
"s0-'a'->:s1=> A B\n";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
@Test public void hoistPredIntoCallingRule() throws Exception {
|
||||
String g =
|
||||
"grammar Q;\n" +
|
||||
"\n" +
|
||||
"prog: stat+ ;\n" +
|
||||
"/** ANTLR pulls predicates from keyIF and keyCALL into\n" +
|
||||
"* decision for this rule.\n" +
|
||||
"*/\n" +
|
||||
"stat: keyIF expr stat\n" +
|
||||
"\t| keyCALL ID ';'\n" +
|
||||
"\t| ';'\n" +
|
||||
"\t;\n" +
|
||||
"/** An ID whose text is \"if\" */\n" +
|
||||
"keyIF : {IF}? ID ;\n" +
|
||||
"/** An ID whose text is \"call\" */\n" +
|
||||
"keyCALL : {CALL}? ID ;\n" +
|
||||
"\n" +
|
||||
"expr : ID;";
|
||||
String expecting =
|
||||
"s0-';'->:s2=>3\n" +
|
||||
"s0-ID->s1\n" +
|
||||
"s1-ID->s3\n" +
|
||||
"s3-';'->s5\n" +
|
||||
"s3-ID->:s4=>1\n" +
|
||||
"s5-{CALL}?->:s7=>2\n" +
|
||||
"s5-{IF}?->:s6=>1\n";
|
||||
List<Message> msgs = checkRuleDFA(g, "stat", expecting);
|
||||
System.err.println(msgs);
|
||||
}
|
||||
|
||||
@Test public void _template() throws Exception {
|
||||
String g =
|
||||
"";
|
||||
String expecting =
|
||||
"";
|
||||
checkRuleDFA(g, "a", expecting);
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue