rm prints, add log mgr to runtime (in case we need it there for profiling/testing etc...)

[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 9412]
This commit is contained in:
parrt 2011-11-19 18:53:37 -08:00
parent 01a71c5b02
commit 18e8a853e7
21 changed files with 107 additions and 89 deletions

View File

@ -40,6 +40,8 @@ import org.antlr.v4.parse.ANTLRLexer;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.parse.ToolANTLRParser;
import org.antlr.v4.runtime.misc.LogManager;
import org.antlr.v4.runtime.misc.Nullable;
import org.antlr.v4.semantics.SemanticPipeline;
import org.antlr.v4.tool.*;
import org.antlr.v4.tool.ast.GrammarAST;
@ -122,6 +124,7 @@ public class Tool {
protected List<String> grammarFiles = new ArrayList<String>();
public ErrorManager errMgr = new ErrorManager(this);
public LogManager logMgr = new LogManager();
List<ANTLRToolListener> listeners =
Collections.synchronizedList(new ArrayList<ANTLRToolListener>());
@ -330,7 +333,7 @@ public class Tool {
/** Try current dir then dir of g then lib dir */
public GrammarRootAST loadImportedGrammar(Grammar g, String fileName) throws IOException {
System.out.println("loadImportedGrammar "+fileName+" from "+g.fileName);
g.tool.log("grammar", "load "+fileName + " from " + g.fileName);
File importedFile = getImportedGrammarFile(g, fileName);
if ( importedFile==null ) {
errMgr.toolError(ErrorType.CANNOT_FIND_IMPORTED_FILE, fileName, g.fileName);
@ -379,7 +382,8 @@ public class Tool {
if (dot != null) {
writeDOTFile(g, r, dot);
}
} catch (IOException ioe) {
}
catch (IOException ioe) {
errMgr.toolError(ErrorType.CANNOT_WRITE_FILE, ioe);
}
}
@ -522,6 +526,9 @@ public class Tool {
}
}
public void log(@Nullable String component, String msg) { logMgr.log(component, msg); }
public void log(String msg) { log(msg); }
public int getNumErrors() { return errMgr.getNumErrors(); }
public void addListener(ANTLRToolListener tl) {

View File

@ -29,11 +29,13 @@
package org.antlr.v4.analysis;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.runtime.atn.LL1Analyzer;
import org.antlr.v4.runtime.misc.IntervalSet;
import org.antlr.v4.tool.Grammar;
import java.util.*;
import java.util.Arrays;
import java.util.Vector;
public class AnalysisPipeline {
public Grammar g;
@ -56,14 +58,14 @@ public class AnalysisPipeline {
g.decisionLOOK =
new Vector<IntervalSet[]>(g.atn.getNumberOfDecisions()+1);
for (DecisionState s : g.atn.decisionToState) {
System.out.println("\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name);
g.tool.log("LL1", "\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name);
LL1Analyzer anal = new LL1Analyzer(g.atn);
IntervalSet[] look = anal.getDecisionLookahead(s);
System.out.println("look="+ Arrays.toString(look));
g.tool.log("LL1", "look=" + Arrays.toString(look));
g.decisionLOOK.setSize(s.decision+1);
g.decisionLOOK.set(s.decision, look);
System.out.println("LL(1)? "+disjoint(look));
g.tool.log("LL1", "LL(1)? " + disjoint(look));
}
}
@ -74,7 +76,6 @@ public class AnalysisPipeline {
for (int a=1; a<altLook.length; a++) {
IntervalSet look = altLook[a];
if ( !look.and(combined).isNil() ) {
System.out.println("alt "+a+" not disjoint with "+combined+"; look = "+look);
collision = true;
break;
}

View File

@ -32,7 +32,10 @@ package org.antlr.v4.automata;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.tool.Grammar;
import java.util.*;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/** An ATN walker that knows how to dump them to serialized strings. */
public class ATNPrinter {
@ -104,9 +107,6 @@ public class ATNPrinter {
}
String getStateString(ATNState s) {
if ( s==null ) {
System.out.println("s==null");
}
int n = s.stateNumber;
String stateStr = "s"+n;
if ( s instanceof StarBlockStartState ) stateStr = "StarBlockStart_"+n;

View File

@ -49,7 +49,6 @@ public class LexerATNFactory extends ParserATNFactory {
public ATN createATN() {
// BUILD ALL START STATES (ONE PER MODE)
System.out.println(((LexerGrammar)g).modes);
Set<String> modes = ((LexerGrammar) g).modes.keySet();
for (String modeName : modes) {
// create s0, start state; implied Tokens rule node

View File

@ -60,7 +60,6 @@ public class TreeParserATNFactory extends ParserATNFactory {
LL1Analyzer analyzer = new LL1Analyzer(atn);
IntervalSet look = analyzer.LOOK(firstChild, RuleContext.EMPTY);
TreePatternAST root = treePatternRootNodes.get(i);
System.out.println(root.toStringTree()+"==nullable? "+look.contains(Token.UP));
if ( look.contains(Token.UP) ) {
// nullable child list if we can see the UP as the next token.

View File

@ -127,7 +127,7 @@ public class ActionTranslator implements ActionSplitterListener {
Token tokenWithinAction = node.token;
ActionTranslator translator = new ActionTranslator(factory, node);
translator.rf = rf;
System.out.println("translate " + action);
factory.getGrammar().tool.log("action-translator", "translate " + action);
ANTLRStringStream in = new ANTLRStringStream(action);
in.setLine(tokenWithinAction.getLine());
in.setCharPositionInLine(tokenWithinAction.getCharPositionInLine());
@ -138,7 +138,7 @@ public class ActionTranslator implements ActionSplitterListener {
}
public void attr(String expr, Token x) {
System.out.println("attr "+x);
gen.g.tool.log("action-translator", "attr "+x);
Attribute a = node.resolver.resolveToAttribute(x.getText(), node);
if ( a!=null ) {
switch ( a.dict.type ) {
@ -169,14 +169,14 @@ public class ActionTranslator implements ActionSplitterListener {
/** $x.y = expr; */
public void setQualifiedAttr(String expr, Token x, Token y, Token rhs) {
System.out.println("setQAttr "+x+"."+y+"="+rhs);
gen.g.tool.log("action-translator", "setQAttr "+x+"."+y+"="+rhs);
// x has to be current rule; just set y attr
List<ActionChunk> rhsChunks = translateActionChunk(factory,rf,rhs.getText(),node);
chunks.add(new SetAttr(y.getText(), rhsChunks));
}
public void qualifiedAttr(String expr, Token x, Token y) {
System.out.println("qattr "+x+"."+y);
gen.g.tool.log("action-translator", "qattr "+x+"."+y);
Attribute a = node.resolver.resolveToAttribute(x.getText(), y.getText(), node);
switch ( a.dict.type ) {
case ARG: chunks.add(new ArgRef(y.getText())); break; // has to be current rule
@ -211,7 +211,7 @@ public class ActionTranslator implements ActionSplitterListener {
}
public void setAttr(String expr, Token x, Token rhs) {
System.out.println("setAttr "+x+" "+rhs);
gen.g.tool.log("action-translator", "setAttr "+x+" "+rhs);
List<ActionChunk> rhsChunks = translateActionChunk(factory,rf,rhs.getText(),node);
SetAttr s = new SetAttr(x.getText(), rhsChunks);
if ( factory.getGrammar().isLexer() ) s = new LexerSetAttr(x.getText(), rhsChunks);
@ -219,13 +219,13 @@ public class ActionTranslator implements ActionSplitterListener {
}
public void nonLocalAttr(String expr, Token x, Token y) {
System.out.println("nonLocalAttr "+x+"::"+y);
gen.g.tool.log("action-translator", "nonLocalAttr "+x+"::"+y);
Rule r = factory.getGrammar().getRule(x.getText());
chunks.add(new NonLocalAttrRef(x.getText(), y.getText(), r.index));
}
public void setNonLocalAttr(String expr, Token x, Token y, Token rhs) {
System.out.println("setNonLocalAttr "+x+"::"+y+"="+rhs);
gen.g.tool.log("action-translator", "setNonLocalAttr "+x+"::"+y+"="+rhs);
Rule r = factory.getGrammar().getRule(x.getText());
List<ActionChunk> rhsChunks = translateActionChunk(factory,rf,rhs.getText(),node);
SetNonLocalAttr s = new SetNonLocalAttr(x.getText(), y.getText(), r.index, rhsChunks);

View File

@ -29,16 +29,20 @@
package org.antlr.v4.codegen;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.Tool;
import org.antlr.v4.codegen.model.OutputModelObject;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.tool.*;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.tool.ErrorType;
import org.antlr.v4.tool.Grammar;
import org.stringtemplate.v4.*;
import java.io.*;
import java.lang.reflect.*;
import java.util.*;
import java.io.IOException;
import java.io.Writer;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.HashMap;
import java.util.Map;
/** General controller for code gen. Can instantiate sub generator(s).
*/
@ -319,7 +323,6 @@ public class CodeGenerator {
code.write(wr);
w.close();
long stop = System.currentTimeMillis();
System.out.println("render time for "+fileName+": "+(int)(stop-start)+"ms");
}
catch (IOException ioe) {
tool.errMgr.toolError(ErrorType.CANNOT_WRITE_FILE,

View File

@ -31,8 +31,10 @@ package org.antlr.v4.codegen;
import org.antlr.v4.codegen.model.RuleFunction;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.runtime.*;
import org.antlr.v4.tool.*;
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.Rule;
import org.antlr.v4.tool.ast.GrammarAST;
import org.stringtemplate.v4.ST;
@ -102,9 +104,6 @@ public class Target {
// return getTargetCharLiteralFromANTLRCharLiteral(this,name);
}
String name = g.getTokenDisplayName(ttype);
if ( name==null ) {
System.out.println("null token?");
}
// If name is a literal, return the token type instead
if ( name.charAt(0)=='\'' ) {
return String.valueOf(ttype);

View File

@ -49,6 +49,5 @@ public class LL1AltBlock extends LL1Choice {
IntervalSet expecting = IntervalSet.or(altLookSets); // combine alt sets
this.error = getThrowNoViableAlt(factory, blkAST, expecting);
System.out.println(blkAST.toStringTree()+" LL1AltBlock expecting="+expecting);
}
}

View File

@ -55,9 +55,8 @@ public class LL1OptionalBlockSingleAlt extends LL1Choice {
IntervalSet look = altLookSets[1];
IntervalSet followLook = altLookSets[2];
IntervalSet expecting = (IntervalSet)look.or(followLook);
IntervalSet expecting = look.or(followLook);
this.error = getThrowNoViableAlt(factory, blkAST, expecting);
System.out.println(blkAST.toStringTree()+" LL1OptionalBlockSingleAlt expecting="+expecting);
expr = addCodeForLookaheadTempVar(look);
followExpr = factory.getLL1Test(followLook, blkAST);

View File

@ -81,7 +81,6 @@ public class MatchTree extends RuleElement {
ATNState firstChildState = rootNode.downState.transition(0).target;
LL1Analyzer analyzer = new LL1Analyzer(firstChildState.atn);
IntervalSet look = analyzer.LOOK(firstChildState, RuleContext.EMPTY);
System.out.println(rootNode.toStringTree()+"==nullable? "+look.contains(Token.UP));
return look.contains(Token.UP);
}

View File

@ -49,10 +49,9 @@ setAlt
s.add(TOKEN_REF);
List<GrammarAST> nodes = ((GrammarAST)(currentAlt.getChild(1))).getNodesWithType(s);
for (GrammarAST n : nodes) {rewriteElems.add(n.getText());}
System.out.println("stuff in rewrite: "+rewriteElems);
}
)
;
// (BLOCK (ALT (+ (BLOCK (ALT INT) (ALT ID)))))
@ -62,14 +61,14 @@ ebnfBlockSet
}
: ^(ebnfSuffix blockSet) -> ^(ebnfSuffix ^(BLOCK<BlockAST> ^(ALT blockSet)))
;
ebnfSuffix
@after {$tree = (GrammarAST)adaptor.dupNode($start);}
: OPTIONAL
| CLOSURE
| POSITIVE_CLOSURE
;
blockSet
@init {
boolean inLexer = Character.isUpperCase(currentRuleName.charAt(0));
@ -81,7 +80,7 @@ boolean inLexer = Character.isUpperCase(currentRuleName.charAt(0));
^(BLOCK ^(ALT setElement[inLexer]) ( ^(ALT setElement[inLexer]) )+)
-> ^(SET[$BLOCK.token, "SET"] setElement+)
;
setElement[boolean inLexer]
@after {
GrammarTransformPipeline.setGrammarPtr(g, $tree);

View File

@ -29,13 +29,18 @@
package org.antlr.v4.parse;
import org.antlr.runtime.*;
import org.antlr.runtime.CommonToken;
import org.antlr.runtime.TokenStream;
import org.antlr.runtime.tree.CommonTreeNodeStream;
import org.antlr.v4.Tool;
import org.antlr.v4.codegen.CodeGenerator;
import org.antlr.v4.tool.*;
import org.antlr.v4.tool.ast.*;
import org.stringtemplate.v4.*;
import org.antlr.v4.tool.AttributeDict;
import org.antlr.v4.tool.ErrorType;
import org.antlr.v4.tool.ast.GrammarAST;
import org.antlr.v4.tool.ast.GrammarASTWithOptions;
import org.stringtemplate.v4.ST;
import org.stringtemplate.v4.STGroup;
import org.stringtemplate.v4.STGroupFile;
import java.util.*;
@ -233,7 +238,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
ruleST.add("userRetvals", retvals);
fillRetValAssignments(ruleST, "recRuleName");
System.out.println(ruleST.render());
tool.log("left-recursion", ruleST.render());
return ruleST.render();
}
@ -280,7 +285,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
ruleST.add("alts", prefixAlts);
ruleST.add("alts", otherAlts);
ruleST.add("userRetvals", retvals);
System.out.println(ruleST.render());
tool.log("left-recursion", ruleST.render());
return ruleST.render();
}

View File

@ -35,7 +35,8 @@ import org.antlr.v4.Tool;
import org.antlr.v4.tool.ErrorType;
import java.io.*;
import java.util.*;
import java.util.LinkedHashMap;
import java.util.Map;
/** */
public class TokenVocabParser {
@ -103,7 +104,7 @@ public class TokenVocabParser {
}
int tokenType = (int)tokenizer.nval;
token = tokenizer.nextToken();
System.out.println("import "+tokenID+"="+tokenType);
tool.log("grammar", "import "+tokenID+"="+tokenType);
tokens.put(tokenID, tokenType);
maxTokenType = Math.max(maxTokenType,tokenType);
lineNum++;

View File

@ -70,7 +70,6 @@ public class ActionSniffer extends BlankActionSplitterListener {
ActionSplitter splitter = new ActionSplitter(in, this);
// forces eval, triggers listener methods
node.chunks = splitter.getActionTokens();
System.out.println(node.chunks);
}
public void processNested(Token actionToken) {

View File

@ -97,7 +97,6 @@ public class AttributeChecks implements ActionSplitterListener {
ActionSplitter splitter = new ActionSplitter(in, this);
// forces eval, triggers listener methods
node.chunks = splitter.getActionTokens();
System.out.println(node.chunks);
}
// LISTENER METHODS

View File

@ -190,7 +190,7 @@ public class SemanticPipeline {
// DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
for (String s : strings) { g.defineStringLiteral(s); }
System.out.println("tokens="+g.tokenNameToTypeMap);
System.out.println("strings="+g.stringLiteralToTypeMap);
g.tool.log("semantics", "tokens="+g.tokenNameToTypeMap);
g.tool.log("semantics", "strings="+g.stringLiteralToTypeMap);
}
}

View File

@ -315,7 +315,7 @@ public class SymbolChecks {
for (GrammarAST dot : qualifiedRuleRefs) {
GrammarAST grammar = (GrammarAST)dot.getChild(0);
GrammarAST rule = (GrammarAST)dot.getChild(1);
System.out.println(grammar.getText()+"."+rule.getText());
g.tool.log("semantics", grammar.getText()+"."+rule.getText());
Grammar delegate = g.getImportedGrammar(grammar.getText());
if ( delegate==null ) {
errMgr.grammarError(ErrorType.NO_SUCH_GRAMMAR_SCOPE,

View File

@ -104,7 +104,6 @@ public class UseDefAnalyzer {
ActionSplitter splitter = new ActionSplitter(in, listener);
// forces eval, triggers listener methods
splitter.getActionTokens();
System.out.println("action "+actionAST.getText()+" ctx depends="+dependent[0]);
return dependent[0];
}
@ -159,9 +158,9 @@ public class UseDefAnalyzer {
RewriteRefs collector = new RewriteRefs(desiredShallowLevel);
if ( root.getType()==ANTLRParser.RESULT ) collector.visitRewrite(root);
else collector.visitRewriteEBNF(root);
System.out.println("from "+root.toStringTree());
System.out.println("shallow: "+collector.shallow);
System.out.println("deep: "+collector.deep);
// System.out.println("from "+root.toStringTree());
// System.out.println("shallow: "+collector.shallow);
// System.out.println("deep: "+collector.deep);
return deep ? collector.deep : collector.shallow;
}

View File

@ -29,19 +29,28 @@
package org.antlr.v4.tool;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
import org.antlr.runtime.tree.*;
import org.antlr.runtime.tree.TreeVisitor;
import org.antlr.runtime.tree.TreeVisitorAction;
import org.antlr.runtime.tree.TreeWizard;
import org.antlr.v4.Tool;
import org.antlr.v4.misc.*;
import org.antlr.v4.parse.*;
import org.antlr.v4.runtime.*;
import org.antlr.v4.misc.CharSupport;
import org.antlr.v4.misc.OrderedHashMap;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.parse.GrammarTreeVisitor;
import org.antlr.v4.parse.TokenVocabParser;
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.*;
import org.antlr.v4.runtime.misc.IntSet;
import org.antlr.v4.runtime.misc.IntervalSet;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
import org.antlr.v4.tool.ast.*;
import java.io.*;
import java.io.File;
import java.io.IOException;
import java.util.*;
public class Grammar implements AttributeResolver {
@ -222,11 +231,11 @@ public class Grammar implements AttributeResolver {
String importedGrammarName = null;
if ( t.getType()==ANTLRParser.ASSIGN ) {
importedGrammarName = t.getChild(1).getText();
System.out.println("import "+ importedGrammarName);
tool.log("grammar", "import "+ importedGrammarName);
}
else if ( t.getType()==ANTLRParser.ID ) {
importedGrammarName = t.getText();
System.out.println("import "+t.getText());
tool.log("grammar", "import " + t.getText());
}
GrammarAST grammarAST = null;
try {
@ -403,7 +412,7 @@ public class Grammar implements AttributeResolver {
I = tokenNameToTypeMap.get(token);
}
int i = (I!=null)?I.intValue(): Token.INVALID_TYPE;
//System.out.println("grammar type "+type+" "+tokenName+"->"+i);
//tool.log("grammar", "grammar type "+type+" "+tokenName+"->"+i);
return i;
}
@ -437,7 +446,7 @@ public class Grammar implements AttributeResolver {
tokenName = String.valueOf(ttype);
}
}
// System.out.println("getTokenDisplayName ttype="+ttype+", name="+tokenName);
// tool.log("grammar", "getTokenDisplayName ttype="+ttype+", name="+tokenName);
return tokenName;
}
@ -518,7 +527,7 @@ public class Grammar implements AttributeResolver {
if ( vocab!=null ) {
TokenVocabParser vparser = new TokenVocabParser(tool, vocab);
Map<String,Integer> tokens = vparser.load();
System.out.println("tokens="+tokens);
tool.log("grammar", "tokens=" + tokens);
for (String t : tokens.keySet()) {
if ( t.charAt(0)=='\'' ) defineStringLiteral(t, tokens.get(t));
else defineTokenName(t, tokens.get(t));
@ -731,7 +740,7 @@ public class Grammar implements AttributeResolver {
Map<String,String> lexerRuleToStringLiteral = new HashMap<String,String>();
for (GrammarASTWithOptions r : ruleNodes) {
//System.out.println(r.toStringTree());
//tool.log("grammar", r.toStringTree());
String ruleName = r.getChild(0).getText();
if ( Character.isUpperCase(ruleName.charAt(0)) ) {
Map nodes = new HashMap();

View File

@ -31,7 +31,9 @@ package org.antlr.v4.tool;
import org.antlr.runtime.*;
import org.antlr.runtime.misc.DoubleKeyMap;
import org.antlr.runtime.tree.*;
import org.antlr.runtime.tree.Tree;
import org.antlr.runtime.tree.TreeVisitor;
import org.antlr.runtime.tree.TreeVisitorAction;
import org.antlr.v4.Tool;
import org.antlr.v4.parse.*;
import org.antlr.v4.tool.ast.*;
@ -51,14 +53,14 @@ public class GrammarTransformPipeline {
public void process() {
GrammarRootAST ast = g.ast;
if ( ast==null ) return;
System.out.println("before: "+ast.toStringTree());
tool.log("grammar", "before: "+ast.toStringTree());
if ( ast.grammarType==ANTLRParser.PARSER || ast.grammarType==ANTLRParser.COMBINED ) {
translateLeftRecursiveRules(ast);
}
reduceBlocksToSets(ast);
System.out.println("after: "+ast.toStringTree());
tool.log("grammar", "after: "+ast.toStringTree());
}
public void reduceBlocksToSets(GrammarRootAST ast) {
@ -86,7 +88,7 @@ public class GrammarTransformPipeline {
GrammarAST ruleAST,
String language)
{
//System.out.println(ruleAST.toStringTree());
//tool.log("grammar", ruleAST.toStringTree());
TokenStream tokens = ast.tokens;
Grammar g = ast.g;
String ruleName = ruleAST.getChild(0).getText();
@ -94,7 +96,7 @@ public class GrammarTransformPipeline {
new LeftRecursiveRuleAnalyzer(tokens, ruleAST, tool, ruleName, language);
boolean isLeftRec = false;
try {
// System.out.println("TESTING ---------------\n"+
// tool.log("grammar", "TESTING ---------------\n"+
// leftRecursiveRuleWalker.text(ruleAST));
isLeftRec = leftRecursiveRuleWalker.rec_rule();
}
@ -116,11 +118,11 @@ public class GrammarTransformPipeline {
rules.add( leftRecursiveRuleWalker.getArtificialOpPrecRule(buildAST) );
rules.add( leftRecursiveRuleWalker.getArtificialPrimaryRule() );
for (String ruleText : rules) {
// System.out.println("created: "+ruleText);
// tool.log("grammar", "created: "+ruleText);
GrammarAST t = parseArtificialRule(g, ruleText);
// insert into grammar tree
RULES.addChild(t);
System.out.println("added: "+t.toStringTree());
tool.log("grammar", "added: "+t.toStringTree());
}
}
@ -197,7 +199,7 @@ public class GrammarTransformPipeline {
// COPY TOKENS
GrammarAST imp_tokensRoot = (GrammarAST)imp.ast.getFirstChildWithType(ANTLRParser.TOKENS);
if ( imp_tokensRoot!=null ) {
System.out.println("imported tokens: "+imp_tokensRoot.getChildren());
rootGrammar.tool.log("grammar", "imported tokens: "+imp_tokensRoot.getChildren());
if ( tokensRoot==null ) {
tokensRoot = (GrammarAST)adaptor.create(ANTLRParser.TOKENS, "TOKENS");
tokensRoot.g = rootGrammar;
@ -216,7 +218,7 @@ public class GrammarTransformPipeline {
DoubleKeyMap<String, String, GrammarAST> namedActions =
new DoubleKeyMap<String, String, GrammarAST>();
System.out.println("imported actions: "+imp_actionRoots);
rootGrammar.tool.log("grammar", "imported actions: "+imp_actionRoots);
for (GrammarAST at : all_actionRoots) {
String scopeName = rootGrammar.getDefaultActionScope();
GrammarAST scope, name, action;
@ -255,7 +257,7 @@ public class GrammarTransformPipeline {
for (String scopeName : namedActions.keySet()) {
for (String name : namedActions.keySet(scopeName)) {
GrammarAST action = namedActions.get(scopeName, name);
System.out.println(action.g.name+" "+scopeName+":"+name+"="+action.getText());
rootGrammar.tool.log("grammar", action.g.name+" "+scopeName+":"+name+"="+action.getText());
if ( action.g != rootGrammar ) {
root.insertChild(1, action.getParent());
}
@ -267,7 +269,7 @@ public class GrammarTransformPipeline {
List<GrammarAST> rules = imp.ast.getNodesWithType(ANTLRParser.RULE);
if ( rules!=null ) {
for (GrammarAST r : rules) {
System.out.println("imported rule: "+r.toStringTree());
rootGrammar.tool.log("grammar", "imported rule: "+r.toStringTree());
String name = r.getChild(0).getText();
boolean rootAlreadyHasRule = rootRuleNames.contains(name);
if ( !rootAlreadyHasRule ) {
@ -283,7 +285,7 @@ public class GrammarTransformPipeline {
optionsRoot.g.fileName, optionsRoot.token, imp.name);
}
}
System.out.println("Grammar: "+rootGrammar.ast.toStringTree());
rootGrammar.tool.log("grammar", "Grammar: "+rootGrammar.ast.toStringTree());
}
/** Build lexer grammar from combined grammar that looks like:
@ -305,7 +307,7 @@ public class GrammarTransformPipeline {
*/
public static GrammarRootAST extractImplicitLexer(Grammar combinedGrammar) {
GrammarRootAST combinedAST = combinedGrammar.ast;
//System.out.println("before="+combinedAST.toStringTree());
//tool.log("grammar", "before="+combinedAST.toStringTree());
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(combinedAST.token.getInputStream());
List<GrammarAST> elements = combinedAST.getChildren();
@ -379,7 +381,7 @@ public class GrammarTransformPipeline {
Set<String> stringLiterals = combinedGrammar.getStringLiterals();
// add strings from combined grammar (and imported grammars) into lexer
// put them first as they are keywords; must resolve ambigs to these rules
// System.out.println("strings from parser: "+stringLiterals);
// tool.log("grammar", "strings from parser: "+stringLiterals);
for (String lit : stringLiterals) {
if ( litAliases!=null && litAliases.containsKey(lit) ) continue; // already has rule
// create for each literal: (RULE <uniquename> (BLOCK (ALT <lit>))
@ -402,10 +404,10 @@ public class GrammarTransformPipeline {
// TODO: take out after stable if slow
lexerAST.sanityCheckParentAndChildIndexes();
combinedAST.sanityCheckParentAndChildIndexes();
// System.out.println(combinedAST.toTokenString());
// tool.log("grammar", combinedAST.toTokenString());
System.out.println("after extract implicit lexer ="+combinedAST.toStringTree());
System.out.println("lexer ="+lexerAST.toStringTree());
combinedGrammar.tool.log("grammar", "after extract implicit lexer ="+combinedAST.toStringTree());
combinedGrammar.tool.log("grammar", "lexer ="+lexerAST.toStringTree());
if ( lexerRulesRoot.getChildCount()==0 ) return null;
return lexerAST;