forked from jasder/antlr
imports get merged into root grammar now.
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 8872]
This commit is contained in:
parent
5ffe679514
commit
c6904bb21b
|
@ -30,6 +30,8 @@
|
|||
package org.antlr.v4;
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import org.antlr.runtime.misc.DoubleKeyMap;
|
||||
import org.antlr.runtime.tree.*;
|
||||
import org.antlr.v4.analysis.AnalysisPipeline;
|
||||
import org.antlr.v4.automata.*;
|
||||
import org.antlr.v4.codegen.CodeGenPipeline;
|
||||
|
@ -203,21 +205,31 @@ public class Tool {
|
|||
|
||||
public void processGrammarsOnCommandLine() {
|
||||
for (String fileName : grammarFiles) {
|
||||
GrammarAST t = load(fileName);
|
||||
if ( t instanceof GrammarASTErrorNode ) return; // came back as error node
|
||||
GrammarAST t = loadGrammar(fileName);
|
||||
if ( t==null || t instanceof GrammarASTErrorNode ) return; // came back as error node
|
||||
if ( ((GrammarRootAST)t).hasErrors ) return;
|
||||
GrammarRootAST ast = (GrammarRootAST)t;
|
||||
|
||||
GrammarTransformPipeline transform = new GrammarTransformPipeline(ast);
|
||||
transform.process();
|
||||
|
||||
Grammar g = createGrammar(ast);
|
||||
final Grammar g = createGrammar(ast);
|
||||
g.fileName = fileName;
|
||||
process(g);
|
||||
}
|
||||
}
|
||||
|
||||
/** To process a grammar, we load all of its imported grammars into
|
||||
subordinate grammar objects. Then we merge the imported rules
|
||||
into the root grammar. If a root grammar is a combined grammar,
|
||||
we have to extract the implicit lexer. Once all this is done, we
|
||||
process the lexer first, if present, and then the parser grammar
|
||||
*/
|
||||
public void process(Grammar g) {
|
||||
g.loadImportedGrammars();
|
||||
|
||||
mergeImportedGrammars(g);
|
||||
|
||||
GrammarTransformPipeline transform = new GrammarTransformPipeline();
|
||||
transform.process(g.ast);
|
||||
|
||||
LexerGrammar lexerg = null;
|
||||
GrammarRootAST lexerAST = null;
|
||||
if ( g.ast!=null && g.ast.grammarType== ANTLRParser.COMBINED &&
|
||||
|
@ -229,10 +241,6 @@ public class Tool {
|
|||
lexerg.fileName = g.fileName;
|
||||
g.implicitLexer = lexerg;
|
||||
lexerg.implicitLexerOwner = g;
|
||||
|
||||
// // copy vocab from combined to implicit lexer
|
||||
// g.importVocab(g.implicitLexerOwner); // TODO: don't need i don't think; done in tool process()
|
||||
|
||||
processNonCombinedGrammar(lexerg);
|
||||
System.out.println("lexer tokens="+lexerg.tokenNameToTypeMap);
|
||||
System.out.println("lexer strings="+lexerg.stringLiteralToTypeMap);
|
||||
|
@ -245,7 +253,6 @@ public class Tool {
|
|||
}
|
||||
|
||||
public void processNonCombinedGrammar(Grammar g) {
|
||||
g.loadImportedGrammars();
|
||||
if ( g.ast!=null && internalOption_PrintGrammarTree ) System.out.println(g.ast.toStringTree());
|
||||
//g.ast.inspect();
|
||||
|
||||
|
@ -255,12 +262,6 @@ public class Tool {
|
|||
|
||||
if ( errMgr.getNumErrors()>0 ) return;
|
||||
|
||||
if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any)
|
||||
for (Grammar imp : g.getImportedGrammars()) {
|
||||
processNonCombinedGrammar(imp);
|
||||
}
|
||||
}
|
||||
|
||||
// BUILD ATN FROM AST
|
||||
ATNFactory factory;
|
||||
if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g);
|
||||
|
@ -282,19 +283,55 @@ public class Tool {
|
|||
gen.process();
|
||||
}
|
||||
|
||||
/** Given the raw AST of a grammar, create a grammar object
|
||||
associated with the AST. Once we have the grammar object, ensure
|
||||
that all nodes in tree referred to this grammar. Later, we will
|
||||
use it for error handling and generally knowing from where a rule
|
||||
comes from.
|
||||
*/
|
||||
public Grammar createGrammar(GrammarRootAST ast) {
|
||||
if ( ast.grammarType==ANTLRParser.LEXER ) return new LexerGrammar(this, ast);
|
||||
else return new Grammar(this, ast);
|
||||
final Grammar g;
|
||||
if ( ast.grammarType==ANTLRParser.LEXER ) g = new LexerGrammar(this, ast);
|
||||
else g = new Grammar(this, ast);
|
||||
|
||||
// ensure each node has pointer to surrounding grammar
|
||||
TreeVisitor v = new TreeVisitor(new GrammarASTAdaptor());
|
||||
v.visit(ast, new TreeVisitorAction() {
|
||||
public Object pre(Object t) { ((GrammarAST)t).g = g; return t; }
|
||||
public Object post(Object t) { return t; }
|
||||
});
|
||||
return g;
|
||||
}
|
||||
|
||||
public GrammarAST load(String fileName) {
|
||||
ANTLRFileStream in = null;
|
||||
public GrammarAST loadGrammar(String fileName) {
|
||||
try {
|
||||
in = new ANTLRFileStream(fileName);
|
||||
ANTLRFileStream in = new ANTLRFileStream(fileName);
|
||||
GrammarAST t = load(in);
|
||||
return t;
|
||||
}
|
||||
catch (IOException ioe) {
|
||||
errMgr.toolError(ErrorType.CANNOT_OPEN_FILE, ioe, fileName);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Try current dir then dir of g then lib dir */
|
||||
public GrammarAST loadImportedGrammar(Grammar g, String fileName) throws IOException {
|
||||
System.out.println("loadImportedGrammar "+fileName+" from "+g.fileName);
|
||||
File importedFile = new File(fileName);
|
||||
if ( !importedFile.exists() ) {
|
||||
File gfile = new File(g.fileName);
|
||||
String parentDir = gfile.getParent();
|
||||
importedFile = new File(parentDir, fileName);
|
||||
if ( !importedFile.exists() ) { // try in lib dir
|
||||
importedFile = new File(libDirectory, fileName);
|
||||
if ( !importedFile.exists() ) {
|
||||
errMgr.toolError(ErrorType.CANNOT_FIND_IMPORTED_FILE, g.fileName, fileName);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
ANTLRFileStream in = new ANTLRFileStream(importedFile.getAbsolutePath());
|
||||
return load(in);
|
||||
}
|
||||
|
||||
|
@ -304,10 +341,11 @@ public class Tool {
|
|||
|
||||
public GrammarAST load(CharStream in) {
|
||||
try {
|
||||
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(in);
|
||||
ANTLRLexer lexer = new ANTLRLexer(in);
|
||||
CommonTokenStream tokens = new CommonTokenStream(lexer);
|
||||
ToolANTLRParser p = new ToolANTLRParser(tokens, this);
|
||||
p.setTreeAdaptor(new GrammarASTAdaptor(in));
|
||||
p.setTreeAdaptor(adaptor);
|
||||
ParserRuleReturnScope r = p.grammarSpec();
|
||||
GrammarAST root = (GrammarAST) r.getTree();
|
||||
if ( root instanceof GrammarRootAST ) {
|
||||
|
@ -322,12 +360,129 @@ public class Tool {
|
|||
return null;
|
||||
}
|
||||
|
||||
/** Merge all the rules, token definitions, and named actions from
|
||||
imported grammars into the root grammar tree. Perform:
|
||||
|
||||
(tokens { X (= Y 'y')) + (tokens { Z ) -> (tokens { X (= Y 'y') Z)
|
||||
|
||||
(@ members {foo}) + (@ members {bar}) -> (@ members {foobar})
|
||||
|
||||
(RULES (RULE x y)) + (RULES (RULE z)) -> (RULES (RULE x y z))
|
||||
|
||||
Rules in root prevent same rule from being appended to RULES node.
|
||||
|
||||
The goal is a complete combined grammar so we can ignore subordinate
|
||||
grammars.
|
||||
*/
|
||||
public void mergeImportedGrammars(Grammar rootGrammar) {
|
||||
GrammarAST root = rootGrammar.ast;
|
||||
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(root.token.getInputStream());
|
||||
|
||||
GrammarAST tokensRoot = (GrammarAST)root.getFirstChildWithType(ANTLRParser.TOKENS);
|
||||
|
||||
List<GrammarAST> actionRoots = root.getNodesWithType(ANTLRParser.AT);
|
||||
|
||||
// Compute list of rules in root grammar and ensure we have a RULES node
|
||||
GrammarAST RULES = (GrammarAST)root.getFirstChildWithType(ANTLRParser.RULES);
|
||||
Set<String> rootRuleNames = new HashSet<String>();
|
||||
if ( RULES==null ) { // no rules in root, make RULES node, hook in
|
||||
RULES = (GrammarAST)adaptor.create(ANTLRParser.RULES, "RULES");
|
||||
RULES.g = rootGrammar;
|
||||
root.addChild(RULES);
|
||||
}
|
||||
else {
|
||||
List<GrammarAST> rootRules = root.getNodesWithType(ANTLRParser.RULE);
|
||||
for (GrammarAST r : rootRules) rootRuleNames.add(r.getChild(0).getText());
|
||||
}
|
||||
|
||||
List<Grammar> imports = rootGrammar.getAllImportedGrammars();
|
||||
if ( imports==null ) return;
|
||||
|
||||
for (Grammar imp : imports) {
|
||||
GrammarAST imp_tokensRoot = (GrammarAST)imp.ast.getFirstChildWithType(ANTLRParser.TOKENS);
|
||||
if ( imp_tokensRoot!=null ) {
|
||||
System.out.println("imported tokens: "+imp_tokensRoot.getChildren());
|
||||
if ( tokensRoot==null ) {
|
||||
tokensRoot = (GrammarAST)adaptor.create(ANTLRParser.TOKENS, "TOKENS");
|
||||
tokensRoot.g = rootGrammar;
|
||||
root.insertChild(1, tokensRoot); // ^(GRAMMAR ID TOKENS...)
|
||||
}
|
||||
tokensRoot.addChildren(imp_tokensRoot.getChildren());
|
||||
}
|
||||
|
||||
List<GrammarAST> all_actionRoots = new ArrayList<GrammarAST>();
|
||||
List<GrammarAST> imp_actionRoots = imp.ast.getNodesWithType(ANTLRParser.AT);
|
||||
if ( actionRoots!=null ) all_actionRoots.addAll(actionRoots);
|
||||
all_actionRoots.addAll(imp_actionRoots);
|
||||
|
||||
if ( imp_actionRoots!=null ) {
|
||||
DoubleKeyMap<String, String, GrammarAST> namedActions =
|
||||
new DoubleKeyMap<String, String, GrammarAST>();
|
||||
|
||||
System.out.println("imported actions: "+imp_actionRoots);
|
||||
for (GrammarAST at : all_actionRoots) {
|
||||
String scopeName = rootGrammar.getDefaultActionScope();
|
||||
GrammarAST scope, name, action;
|
||||
if ( at.getChildCount()>2 ) { // must have a scope
|
||||
scope = (GrammarAST)at.getChild(1);
|
||||
scopeName = scope.getText();
|
||||
name = (GrammarAST)at.getChild(1);
|
||||
action = (GrammarAST)at.getChild(2);
|
||||
}
|
||||
else {
|
||||
name = (GrammarAST)at.getChild(0);
|
||||
action = (GrammarAST)at.getChild(1);
|
||||
}
|
||||
GrammarAST prevAction = namedActions.get(scopeName, name.getText());
|
||||
if ( prevAction==null ) {
|
||||
namedActions.put(scopeName, name.getText(), action);
|
||||
}
|
||||
else {
|
||||
if ( prevAction.g == at.g ) {
|
||||
errMgr.grammarError(ErrorType.ACTION_REDEFINITION,
|
||||
at.g.fileName, name.token, name.getText());
|
||||
}
|
||||
else {
|
||||
String s1 = prevAction.getText();
|
||||
s1 = s1.substring(1, s1.length()-1);
|
||||
String s2 = action.getText();
|
||||
s2 = s2.substring(1, s2.length()-1);
|
||||
String combinedAction = "{"+s1 + '\n'+ s2+"}";
|
||||
prevAction.token.setText(combinedAction);
|
||||
}
|
||||
}
|
||||
}
|
||||
// at this point, we have complete list of combined actions,
|
||||
// some of which are already living in root grammar.
|
||||
// Merge in any actions not in root grammar into root's tree.
|
||||
for (String scopeName : namedActions.keySet()) {
|
||||
for (String name : namedActions.keySet(scopeName)) {
|
||||
GrammarAST action = namedActions.get(scopeName, name);
|
||||
System.out.println(action.g.name+" "+scopeName+":"+name+"="+action.getText());
|
||||
if ( action.g != rootGrammar ) {
|
||||
root.insertChild(1, action.getParent());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
List<GrammarAST> rules = imp.ast.getNodesWithType(ANTLRParser.RULE);
|
||||
if ( rules!=null ) {
|
||||
for (GrammarAST r : rules) {
|
||||
System.out.println("imported rule: "+r.toStringTree());
|
||||
String name = r.getChild(0).getText();
|
||||
if ( !rootRuleNames.contains(name) ) RULES.addChild(r); // if not overridden
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("Grammar: "+rootGrammar.ast.toStringTree());
|
||||
}
|
||||
|
||||
/** Build lexer grammar from combined grammar that looks like:
|
||||
*
|
||||
* (COMBINED_GRAMMAR A
|
||||
* (tokens { X (= Y 'y'))
|
||||
* (OPTIONS (= x 'y'))
|
||||
* (scope Blort { int x; })
|
||||
* (@ members {foo})
|
||||
* (@ lexer header {package jj;})
|
||||
* (RULES (RULE .+)))
|
||||
|
@ -356,7 +511,7 @@ public class Tool {
|
|||
|
||||
// MOVE OPTIONS
|
||||
GrammarAST optionsRoot =
|
||||
(GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.OPTIONS);
|
||||
(GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.OPTIONS);
|
||||
if ( optionsRoot!=null ) {
|
||||
GrammarAST lexerOptionsRoot = (GrammarAST)adaptor.dupNode(optionsRoot);
|
||||
lexerAST.addChild(lexerOptionsRoot);
|
||||
|
|
|
@ -178,7 +178,7 @@ public class BasicSemanticChecks extends GrammarTreeVisitor {
|
|||
|
||||
@Override
|
||||
public void tokenAlias(GrammarAST ID, GrammarAST literal) {
|
||||
checkTokenAlias(ID.token);
|
||||
if ( literal!=null ) checkTokenAlias(ID.token);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,14 +29,14 @@
|
|||
|
||||
package org.antlr.v4.semantics;
|
||||
|
||||
import org.antlr.v4.parse.*;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.tool.*;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
/** Do as much semantic checking as we can and fill in grammar
|
||||
* with rules, dynamic scopes, actions, and token definitions.
|
||||
* The only side effects are in the grammar pass to process().
|
||||
* with rules, actions, and token definitions.
|
||||
* The only side effects are in the grammar passed to process().
|
||||
* We consume a bunch of memory here while we build up data structures
|
||||
* to perform checking, but all of it goes away after this pipeline object
|
||||
* gets garbage collected.
|
||||
|
@ -47,8 +47,15 @@ import java.util.Map;
|
|||
* Note that imported grammars bring in token and rule definitions
|
||||
* but only the root grammar and any implicitly created lexer grammar
|
||||
* get their token definitions filled up. We are treating the
|
||||
* imported grammars like includes (the generated code treats them
|
||||
* as separate objects, however).
|
||||
* imported grammars like includes.
|
||||
*
|
||||
* The semantic pipeline works on root grammars (those that do the importing,
|
||||
* if any). Upon entry to the semantic pipeline, all imported grammars
|
||||
* should have been loaded into delegate grammar objects with their
|
||||
* ASTs created. The pipeline does the BasicSemanticChecks on the
|
||||
* imported grammar before collecting symbols. We cannot perform the
|
||||
* simple checks such as undefined rule until we have collected all
|
||||
* tokens and rules from the imported grammars into a single collection.
|
||||
*/
|
||||
public class SemanticPipeline {
|
||||
public Grammar g;
|
||||
|
@ -60,22 +67,6 @@ public class SemanticPipeline {
|
|||
public void process() {
|
||||
if ( g.ast==null ) return;
|
||||
|
||||
/*
|
||||
// VALIDATE AST STRUCTURE
|
||||
GrammarASTAdaptor adaptor = new GrammarASTAdaptor();
|
||||
// use buffered node stream as we will look around in stream
|
||||
// to give good error messages.
|
||||
BufferedTreeNodeStream nodes =
|
||||
new BufferedTreeNodeStream(adaptor,g.ast);
|
||||
ASTVerifier walker = new ASTVerifier(nodes);
|
||||
try {walker.grammarSpec();}
|
||||
catch (RecognitionException re) {
|
||||
ErrorManager.fatalInternalError("bad grammar AST structure: "+
|
||||
g.ast.toStringTree(),
|
||||
re);
|
||||
}
|
||||
*/
|
||||
|
||||
// DO BASIC / EASY SEMANTIC CHECKS
|
||||
BasicSemanticChecks basics = new BasicSemanticChecks(g);
|
||||
basics.process();
|
||||
|
@ -85,7 +76,7 @@ public class SemanticPipeline {
|
|||
|
||||
// COLLECT SYMBOLS: RULES, ACTIONS, TERMINALS, ...
|
||||
SymbolCollector collector = new SymbolCollector(g);
|
||||
collector.process(); // no side-effects; compute lists
|
||||
collector.process(g.ast);
|
||||
|
||||
// CHECK FOR SYMBOL COLLISIONS
|
||||
SymbolChecks symcheck = new SymbolChecks(g, collector);
|
||||
|
@ -97,16 +88,27 @@ public class SemanticPipeline {
|
|||
|
||||
// STORE RULES/ACTIONS/SCOPES IN GRAMMAR
|
||||
for (Rule r : collector.rules) g.defineRule(r);
|
||||
for (AttributeDict s : collector.scopes) g.defineScope(s);
|
||||
for (GrammarAST a : collector.actions) g.defineAction(a);
|
||||
for (GrammarAST a : collector.namedActions) {
|
||||
g.defineAction((GrammarAST)a.getParent());
|
||||
}
|
||||
|
||||
// LINK ALT NODES WITH (outermost) Alternatives
|
||||
// LINK (outermost) ALT NODES WITH Alternatives
|
||||
for (Rule r : g.rules.values()) {
|
||||
for (int i=1; i<=r.numberOfAlts; i++) {
|
||||
r.alt[i].ast.alt = r.alt[i];
|
||||
}
|
||||
}
|
||||
|
||||
// ASSIGN TOKEN TYPES
|
||||
g.importTokensFromTokensFile();
|
||||
if ( g.isLexer() ) {
|
||||
assignLexerTokenTypes(g, collector.tokensDefs);
|
||||
}
|
||||
else {
|
||||
assignTokenTypes(g, collector.tokensDefs,
|
||||
collector.tokenIDRefs, collector.strings);
|
||||
}
|
||||
|
||||
// CHECK RULE REFS NOW (that we've defined rules in grammar)
|
||||
symcheck.checkRuleArgs(g, collector.rulerefs);
|
||||
identifyStartRules(collector);
|
||||
|
@ -118,20 +120,6 @@ public class SemanticPipeline {
|
|||
// CHECK ATTRIBUTE EXPRESSIONS FOR SEMANTIC VALIDITY
|
||||
AttributeChecks.checkAllAttributeExpressions(g);
|
||||
|
||||
// ASSIGN TOKEN TYPES
|
||||
String vocab = g.getOption("tokenVocab");
|
||||
if ( vocab!=null ) {
|
||||
TokenVocabParser vparser = new TokenVocabParser(g.tool, vocab);
|
||||
Map<String,Integer> tokens = vparser.load();
|
||||
System.out.println("tokens="+tokens);
|
||||
for (String t : tokens.keySet()) {
|
||||
if ( t.charAt(0)=='\'' ) g.defineStringLiteral(t, tokens.get(t));
|
||||
else g.defineTokenName(t, tokens.get(t));
|
||||
}
|
||||
}
|
||||
if ( g.isLexer() ) assignLexerTokenTypes(g, collector);
|
||||
else assignTokenTypes(g, collector, symcheck);
|
||||
|
||||
symcheck.checkForRewriteIssues();
|
||||
|
||||
UseDefAnalyzer.checkRewriteElementsPresentOnLeftSide(g);
|
||||
|
@ -146,9 +134,9 @@ public class SemanticPipeline {
|
|||
}
|
||||
}
|
||||
|
||||
void assignLexerTokenTypes(Grammar g, SymbolCollector collector) {
|
||||
void assignLexerTokenTypes(Grammar g, List<GrammarAST> tokensDefs) {
|
||||
Grammar G = g.getOutermostGrammar(); // put in root, even if imported
|
||||
for (GrammarAST def : collector.tokensDefs) {
|
||||
for (GrammarAST def : tokensDefs) {
|
||||
if ( def.getType()== ANTLRParser.ID ) G.defineTokenName(def.getText());
|
||||
}
|
||||
|
||||
|
@ -167,11 +155,13 @@ public class SemanticPipeline {
|
|||
|
||||
}
|
||||
|
||||
void assignTokenTypes(Grammar g, SymbolCollector collector, SymbolChecks symcheck) {
|
||||
void assignTokenTypes(Grammar g, List<GrammarAST> tokensDefs,
|
||||
List<GrammarAST> tokenIDs, Set<String> strings)
|
||||
{
|
||||
Grammar G = g.getOutermostGrammar(); // put in root, even if imported
|
||||
|
||||
// DEFINE tokens { X='x'; } ALIASES
|
||||
for (GrammarAST alias : collector.tokensDefs) {
|
||||
for (GrammarAST alias : tokensDefs) {
|
||||
if ( alias.getType()== ANTLRParser.ASSIGN ) {
|
||||
String name = alias.getChild(0).getText();
|
||||
String lit = alias.getChild(1).getText();
|
||||
|
@ -190,10 +180,10 @@ public class SemanticPipeline {
|
|||
*/
|
||||
|
||||
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
|
||||
for (String id : symcheck.tokenIDs) { G.defineTokenName(id); }
|
||||
for (GrammarAST idAST : tokenIDs) { G.defineTokenName(idAST.getText()); }
|
||||
|
||||
// DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
|
||||
for (String s : collector.strings) { G.defineStringLiteral(s); }
|
||||
for (String s : strings) { G.defineStringLiteral(s); }
|
||||
System.out.println("tokens="+G.tokenNameToTypeMap);
|
||||
System.out.println("strings="+G.stringLiteralToTypeMap);
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
package org.antlr.v4.semantics;
|
||||
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.runtime.misc.DoubleKeyMap;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.tool.*;
|
||||
|
||||
|
@ -47,7 +48,10 @@ public class SymbolChecks {
|
|||
Map<String, Rule> nameToRuleMap = new HashMap<String, Rule>();
|
||||
Set<String> tokenIDs = new HashSet<String>();
|
||||
Set<String> globalScopeNames = new HashSet<String>();
|
||||
Map<String, Set<String>> actionScopeToActionNames = new HashMap<String, Set<String>>();
|
||||
// Map<String, Set<String>> actionScopeToActionNames = new HashMap<String, Set<String>>();
|
||||
DoubleKeyMap<String, String, GrammarAST> namedActions =
|
||||
new DoubleKeyMap<String, String, GrammarAST>();
|
||||
|
||||
public ErrorManager errMgr;
|
||||
|
||||
public SymbolChecks(Grammar g, SymbolCollector collector) {
|
||||
|
@ -71,8 +75,8 @@ public class SymbolChecks {
|
|||
// So, call order sensitive
|
||||
checkScopeRedefinitions(collector.scopes); // sets globalScopeNames
|
||||
//checkForImportedRuleIssues(collector.qualifiedRulerefs);
|
||||
// done in sem pipe for now
|
||||
checkForRuleConflicts(collector.rules); // sets nameToRuleMap
|
||||
checkActionRedefinitions(collector.actions); // sets actionScopeToActionNames
|
||||
checkTokenAliasRedefinitions(collector.tokensDefs);
|
||||
//checkRuleArgs(collector.rulerefs);
|
||||
checkForTokenConflicts(collector.tokenIDRefs); // sets tokenIDs
|
||||
|
@ -82,11 +86,13 @@ public class SymbolChecks {
|
|||
|
||||
public void checkForRuleConflicts(List<Rule> rules) {
|
||||
if ( rules==null ) return;
|
||||
for (Rule r : collector.rules) {
|
||||
if ( nameToRuleMap.get(r.name)==null ) {
|
||||
for (Rule r : rules) {
|
||||
Rule prevRule = nameToRuleMap.get(r.name);
|
||||
if ( prevRule==null ) {
|
||||
nameToRuleMap.put(r.name, r);
|
||||
}
|
||||
else {
|
||||
else if ( r.g == prevRule.g ) {
|
||||
// only generate warning if rules in same grammar
|
||||
GrammarAST idNode = (GrammarAST)r.ast.getChild(0);
|
||||
errMgr.grammarError(ErrorType.RULE_REDEFINITION,
|
||||
g.fileName, idNode.token, r.name);
|
||||
|
@ -167,35 +173,6 @@ public class SymbolChecks {
|
|||
}
|
||||
}
|
||||
|
||||
public void checkActionRedefinitions(List<GrammarAST> actions) {
|
||||
if ( actions==null ) return;
|
||||
String scope = g.getDefaultActionScope();
|
||||
String name = null;
|
||||
GrammarAST nameNode = null;
|
||||
for (GrammarAST ampersandAST : actions) {
|
||||
nameNode = (GrammarAST)ampersandAST.getChild(0);
|
||||
if ( ampersandAST.getChildCount()==2 ) {
|
||||
name = nameNode.getText();
|
||||
}
|
||||
else {
|
||||
scope = nameNode.getText();
|
||||
name = ampersandAST.getChild(1).getText();
|
||||
}
|
||||
Set<String> scopeActions = actionScopeToActionNames.get(scope);
|
||||
if ( scopeActions==null ) { // init scope
|
||||
scopeActions = new HashSet<String>();
|
||||
actionScopeToActionNames.put(scope, scopeActions);
|
||||
}
|
||||
if ( !scopeActions.contains(name) ) {
|
||||
scopeActions.add(name);
|
||||
}
|
||||
else {
|
||||
errMgr.grammarError(ErrorType.ACTION_REDEFINITION,
|
||||
g.fileName, nameNode.token, name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Make sure a label doesn't conflict with another symbol.
|
||||
* Labels must not conflict with: rules, tokens, scope names,
|
||||
* return values, parameters, and rule-scope dynamic attributes
|
||||
|
|
|
@ -35,21 +35,32 @@ import org.antlr.v4.tool.*;
|
|||
import java.util.*;
|
||||
|
||||
/** Collects (create) rules, terminals, strings, actions, scopes etc... from AST
|
||||
* No side-effects
|
||||
* side-effects: sets resolver field of asts for actions.
|
||||
*/
|
||||
public class SymbolCollector extends GrammarTreeVisitor {
|
||||
public Grammar g; // which grammar are we checking
|
||||
/** which grammar are we checking */
|
||||
public Grammar g;
|
||||
|
||||
// stuff to collect
|
||||
public List<Rule> rules = new ArrayList<Rule>();
|
||||
public List<GrammarAST> rulerefs = new ArrayList<GrammarAST>();
|
||||
public List<GrammarAST> qualifiedRulerefs = new ArrayList<GrammarAST>();
|
||||
public List<GrammarAST> terminals = new ArrayList<GrammarAST>();
|
||||
public List<GrammarAST> labels = new ArrayList<GrammarAST>();
|
||||
public List<GrammarAST> tokenIDRefs = new ArrayList<GrammarAST>();
|
||||
public Set<String> strings = new HashSet<String>();
|
||||
public List<GrammarAST> tokensDefs = new ArrayList<GrammarAST>();
|
||||
public List<AttributeDict> scopes = new ArrayList<AttributeDict>();
|
||||
public List<GrammarAST> actions = new ArrayList<GrammarAST>();
|
||||
|
||||
/** Tracks named actions like @parser::members {...}.
|
||||
* Key is scope::name, value is action ast node.
|
||||
*/
|
||||
// public DoubleKeyMap<String,String,GrammarAST> namedActions =
|
||||
// new DoubleKeyMap<String, String, GrammarAST>();
|
||||
|
||||
/** Track action name node in @parser::members {...} or @members {...} */
|
||||
List<GrammarAST> namedActions = new ArrayList<GrammarAST>();
|
||||
|
||||
/** All labels, rule references, and token references to right of -> */
|
||||
public List<GrammarAST> rewriteElements = new ArrayList<GrammarAST>();
|
||||
|
||||
|
@ -58,7 +69,7 @@ public class SymbolCollector extends GrammarTreeVisitor {
|
|||
|
||||
public SymbolCollector(Grammar g) { this.g = g; }
|
||||
|
||||
public void process() { visitGrammar(g.ast); }
|
||||
public void process(GrammarAST ast) { visitGrammar(ast); }
|
||||
|
||||
@Override
|
||||
public void globalScopeDef(GrammarAST ID, ActionAST elems) {
|
||||
|
@ -71,7 +82,10 @@ public class SymbolCollector extends GrammarTreeVisitor {
|
|||
|
||||
@Override
|
||||
public void globalNamedAction(GrammarAST scope, GrammarAST ID, ActionAST action) {
|
||||
actions.add((GrammarAST)ID.getParent());
|
||||
// String scopeName = g.getDefaultActionScope();
|
||||
// if ( scope!=null ) scopeName = scope.getText();
|
||||
// namedActions.put(scopeName, ID.getText(), action);
|
||||
namedActions.add(ID);
|
||||
action.resolver = g;
|
||||
}
|
||||
|
||||
|
@ -160,6 +174,7 @@ public class SymbolCollector extends GrammarTreeVisitor {
|
|||
public void label(GrammarAST op, GrammarAST ID, GrammarAST element) {
|
||||
LabelElementPair lp = new LabelElementPair(g, ID, element, op.getType());
|
||||
currentRule.alt[currentOuterAltNumber].labelDefs.map(ID.getText(), lp);
|
||||
labels.add(ID);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -127,6 +127,7 @@ public enum ErrorType {
|
|||
TOKEN_ALIAS_REASSIGNMENT("cannot alias <arg>; token name already <if(arg2)>assigned to <arg2><else>defined<endif>", ErrorSeverity.ERROR),
|
||||
TOKEN_VOCAB_IN_DELEGATE("tokenVocab option ignored in imported grammar <arg>", ErrorSeverity.ERROR),
|
||||
TOKEN_ALIAS_IN_DELEGATE("can't assign string to token name <arg> to string in imported grammar <arg2>", ErrorSeverity.ERROR),
|
||||
CANNOT_FIND_IMPORTED_FILE("can't find or load grammar <arg>", ErrorSeverity.ERROR),
|
||||
INVALID_IMPORT("<arg.typeString> grammar <arg.name> cannot import <arg2.typeString> grammar <arg2.name>", ErrorSeverity.ERROR),
|
||||
IMPORTED_TOKENS_RULE_EMPTY("", ErrorSeverity.ERROR),
|
||||
IMPORT_NAME_CLASH("<arg.typeString> grammar <arg.name> and imported <arg2.typeString> grammar <arg2.name> both generate <arg2.recognizerName>", ErrorSeverity.ERROR),
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.antlr.v4.runtime.dfa.DFA;
|
|||
import org.antlr.v4.runtime.misc.*;
|
||||
import org.antlr.v4.semantics.SymbolCollector;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
|
||||
public class Grammar implements AttributeResolver {
|
||||
|
@ -139,7 +140,7 @@ public class Grammar implements AttributeResolver {
|
|||
|
||||
|
||||
/** Tracks all forced actions in all alternatives of all rules.
|
||||
* Or if lexer all rules period. Doesn't track sempreds.
|
||||
* Or if lexer all actions period. Doesn't track sempreds.
|
||||
* maps tree node to action index.
|
||||
*/
|
||||
public LinkedHashMap<ActionAST, Integer> actions = new LinkedHashMap<ActionAST, Integer>();
|
||||
|
@ -191,8 +192,8 @@ public class Grammar implements AttributeResolver {
|
|||
this.ast.hasErrors = p.getNumberOfSyntaxErrors()>0;
|
||||
this.name = ((GrammarAST)ast.getChild(0)).getText();
|
||||
|
||||
GrammarTransformPipeline transform = new GrammarTransformPipeline(ast);
|
||||
transform.process();
|
||||
GrammarTransformPipeline transform = new GrammarTransformPipeline();
|
||||
transform.process(ast);
|
||||
}
|
||||
initTokenSymbolTables();
|
||||
}
|
||||
|
@ -223,19 +224,21 @@ public class Grammar implements AttributeResolver {
|
|||
else if ( t.getType()==ANTLRParser.ID ) {
|
||||
importedGrammarName = t.getText();
|
||||
System.out.println("import "+t.getText());
|
||||
}
|
||||
try {
|
||||
GrammarAST root = tool.load(importedGrammarName+".g");
|
||||
if ( root instanceof GrammarASTErrorNode ) return; // came back as error node
|
||||
GrammarRootAST ast = (GrammarRootAST)root;
|
||||
Grammar g = tool.createGrammar(ast);
|
||||
g.fileName = importedGrammarName+".g";
|
||||
g.parent = this;
|
||||
importedGrammars.add(g);
|
||||
}
|
||||
catch (Exception e) {
|
||||
System.err.println("can't load grammar "+importedGrammarName);
|
||||
}
|
||||
}
|
||||
GrammarAST grammarAST = null;
|
||||
try {
|
||||
grammarAST = tool.loadImportedGrammar(this, importedGrammarName + ".g");
|
||||
}
|
||||
catch (IOException ioe) {
|
||||
tool.errMgr.toolError(ErrorType.CANNOT_FIND_IMPORTED_FILE, ioe, fileName);
|
||||
}
|
||||
// did it come back as error node or missing?
|
||||
if ( grammarAST==null || grammarAST instanceof GrammarASTErrorNode ) return;
|
||||
GrammarRootAST ast = (GrammarRootAST)grammarAST;
|
||||
Grammar g = tool.createGrammar(ast);
|
||||
g.fileName = importedGrammarName+".g";
|
||||
g.parent = this;
|
||||
importedGrammars.add(g);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -272,13 +275,16 @@ public class Grammar implements AttributeResolver {
|
|||
public Rule getRule(String name) {
|
||||
Rule r = rules.get(name);
|
||||
if ( r!=null ) return r;
|
||||
return null;
|
||||
/*
|
||||
List<Grammar> imports = getAllImportedGrammars();
|
||||
if ( imports==null ) return null;
|
||||
for (Grammar g : imports) {
|
||||
r = g.rules.get(name);
|
||||
r = g.getRule(name); // recursively walk up hierarchy
|
||||
if ( r!=null ) return r;
|
||||
}
|
||||
return null;
|
||||
*/
|
||||
}
|
||||
|
||||
public Rule getRule(int index) { return indexToRule.get(index); }
|
||||
|
@ -503,6 +509,19 @@ public class Grammar implements AttributeResolver {
|
|||
return maxTokenType;
|
||||
}
|
||||
|
||||
public void importTokensFromTokensFile() {
|
||||
String vocab = getOption("tokenVocab");
|
||||
if ( vocab!=null ) {
|
||||
TokenVocabParser vparser = new TokenVocabParser(tool, vocab);
|
||||
Map<String,Integer> tokens = vparser.load();
|
||||
System.out.println("tokens="+tokens);
|
||||
for (String t : tokens.keySet()) {
|
||||
if ( t.charAt(0)=='\'' ) defineStringLiteral(t, tokens.get(t));
|
||||
else defineTokenName(t, tokens.get(t));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void importVocab(Grammar importG) {
|
||||
for (String tokenName: importG.tokenNameToTypeMap.keySet()) {
|
||||
defineTokenName(tokenName, importG.tokenNameToTypeMap.get(tokenName));
|
||||
|
@ -689,9 +708,10 @@ public class Grammar implements AttributeResolver {
|
|||
}
|
||||
|
||||
public Set<String> getStringLiterals() {
|
||||
// TODO: super inefficient way to get these.
|
||||
GrammarASTAdaptor adaptor = new GrammarASTAdaptor();
|
||||
SymbolCollector collector = new SymbolCollector(this);
|
||||
collector.process(); // no side-effects; find strings
|
||||
collector.process(ast); // no side-effects; find strings
|
||||
return collector.strings;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,10 @@ import org.antlr.v4.runtime.misc.IntervalSet;
|
|||
import java.util.*;
|
||||
|
||||
public class GrammarAST extends CommonTree {
|
||||
/** For error msgs, nice to know which grammar this AST lives in */
|
||||
// TODO: try to remove
|
||||
public Grammar g;
|
||||
|
||||
/** If we build an ATN, we make AST node point at left edge of ATN construct */
|
||||
public ATNState atnState;
|
||||
|
||||
|
|
|
@ -33,13 +33,13 @@ import org.antlr.v4.parse.*;
|
|||
|
||||
/** Handle left-recursion and block-set transforms */
|
||||
public class GrammarTransformPipeline {
|
||||
public GrammarAST ast;
|
||||
//public GrammarAST ast;
|
||||
|
||||
public GrammarTransformPipeline(GrammarAST ast) {
|
||||
this.ast = ast;
|
||||
public GrammarTransformPipeline() {
|
||||
// this.ast = ast;
|
||||
}
|
||||
|
||||
public void process() {
|
||||
public void process(GrammarAST ast) {
|
||||
if ( ast==null ) return;
|
||||
|
||||
org.antlr.runtime.tree.CommonTreeNodeStream nodes =
|
||||
|
|
|
@ -80,6 +80,7 @@ public class Rule implements AttributeResolver {
|
|||
/** A list of scope names used by this rule */
|
||||
public List<Token> useScopes;
|
||||
|
||||
/** In which grammar does this rule live? */
|
||||
public Grammar g;
|
||||
|
||||
/** If we're in a lexer grammar, we might be in a mode */
|
||||
|
@ -262,7 +263,15 @@ public class Rule implements AttributeResolver {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
@Override
|
||||
public int hashCode() { return name.hashCode(); }
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return this==obj || name.equals(((Rule)obj).name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("Rule{name="+name);
|
||||
|
|
|
@ -29,17 +29,24 @@
|
|||
|
||||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.Tool;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestCompositeGrammars extends BaseTest {
|
||||
protected boolean debug = false;
|
||||
/*
|
||||
@Test
|
||||
public void testWildcardStillWorks() throws Exception {
|
||||
org.antlr.test.ErrorQueue equeue = new org.antlr.test.ErrorQueue();
|
||||
ErrorManager.setErrorListener(equeue);
|
||||
|
||||
@Test public void testWildcardStillWorks() throws Exception {
|
||||
String grammar =
|
||||
"parser grammar S;\n" +
|
||||
"a : B . C ;\n"; // not qualified ID
|
||||
Grammar g = new Grammar(grammar);
|
||||
|
||||
ErrorQueue equeue = new ErrorQueue();
|
||||
Tool antlr = new Tool();
|
||||
antlr.addListener(equeue);
|
||||
antlr.process(g);
|
||||
|
||||
assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
|
||||
}
|
||||
|
||||
|
@ -60,6 +67,7 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
assertEquals("S.a\n", found);
|
||||
}
|
||||
|
||||
/*
|
||||
@Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception {
|
||||
// must generate something like:
|
||||
// public int a(int x) throws RecognitionException { return gS.a(x); }
|
||||
|
|
Loading…
Reference in New Issue