forked from jasder/antlr
fixed up import symbols and tokens{} section in imports
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 6718]
This commit is contained in:
parent
0e38fd9c47
commit
49d7d03238
|
@ -112,8 +112,8 @@ UNDEFINED_RULE_REF(arg) ::=
|
||||||
"reference to undefined rule: <arg>"
|
"reference to undefined rule: <arg>"
|
||||||
LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(arg) ::=
|
LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(arg) ::=
|
||||||
"literal has no associated lexer rule: <arg>"
|
"literal has no associated lexer rule: <arg>"
|
||||||
CANNOT_ALIAS_TOKENS_IN_LEXER(arg) ::=
|
CANNOT_ALIAS_TOKENS(arg) ::=
|
||||||
"literals are illegal in lexer tokens{} section: <arg>"
|
"can't assign string value to token name <arg> in non-combined grammar"
|
||||||
ATTRIBUTE_REF_NOT_IN_RULE(arg,arg2) ::=
|
ATTRIBUTE_REF_NOT_IN_RULE(arg,arg2) ::=
|
||||||
"reference to attribute outside of a rule: <arg><if(arg2)>.<arg2><endif>"
|
"reference to attribute outside of a rule: <arg><if(arg2)>.<arg2><endif>"
|
||||||
UNKNOWN_ATTRIBUTE_IN_SCOPE(arg,arg2) ::=
|
UNKNOWN_ATTRIBUTE_IN_SCOPE(arg,arg2) ::=
|
||||||
|
@ -224,6 +224,8 @@ TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::=
|
||||||
"cannot alias <arg>; token name already <if(arg2)>assigned to <arg2><else>defined<endif>"
|
"cannot alias <arg>; token name already <if(arg2)>assigned to <arg2><else>defined<endif>"
|
||||||
TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
|
TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
|
||||||
"tokenVocab option ignored in imported grammar <arg>"
|
"tokenVocab option ignored in imported grammar <arg>"
|
||||||
|
TOKEN_ALIAS_IN_DELEGATE(arg,arg2) ::=
|
||||||
|
"can't assign string to token name <arg> to string in imported grammar <arg2>"
|
||||||
INVALID_IMPORT(arg,arg2) ::=
|
INVALID_IMPORT(arg,arg2) ::=
|
||||||
"<arg.typeString> grammar <arg.name> cannot import <arg2.typeString> grammar <arg2.name>"
|
"<arg.typeString> grammar <arg.name> cannot import <arg2.typeString> grammar <arg2.name>"
|
||||||
IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
|
IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package org.antlr.v4;
|
package org.antlr.v4;
|
||||||
|
|
||||||
import org.antlr.runtime.*;
|
import org.antlr.runtime.*;
|
||||||
|
import org.antlr.runtime.tree.TreeWizard;
|
||||||
import org.antlr.v4.parse.ANTLRLexer;
|
import org.antlr.v4.parse.ANTLRLexer;
|
||||||
import org.antlr.v4.parse.ANTLRParser;
|
import org.antlr.v4.parse.ANTLRParser;
|
||||||
import org.antlr.v4.parse.GrammarASTAdaptor;
|
import org.antlr.v4.parse.GrammarASTAdaptor;
|
||||||
|
@ -71,7 +72,7 @@ public class Tool {
|
||||||
Tool antlr = new Tool(args);
|
Tool antlr = new Tool(args);
|
||||||
|
|
||||||
if (!exitNow) {
|
if (!exitNow) {
|
||||||
antlr.process();
|
antlr.processGrammarsOnCommandLine();
|
||||||
if (ErrorManager.getNumErrors() > 0) {
|
if (ErrorManager.getNumErrors() > 0) {
|
||||||
antlr.exit(1);
|
antlr.exit(1);
|
||||||
}
|
}
|
||||||
|
@ -322,20 +323,17 @@ public class Tool {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void process() {
|
public void processGrammarsOnCommandLine() {
|
||||||
// testing parser
|
|
||||||
GrammarAST t = load(grammarFileNames.get(0));
|
GrammarAST t = load(grammarFileNames.get(0));
|
||||||
GrammarRootAST lexerAST = null;
|
GrammarRootAST lexerAST = null;
|
||||||
if ( t instanceof GrammarASTErrorNode ) return; // came back as error node
|
if ( t instanceof GrammarASTErrorNode ) return; // came back as error node
|
||||||
GrammarRootAST ast = (GrammarRootAST)t;
|
GrammarRootAST ast = (GrammarRootAST)t;
|
||||||
if ( ast.grammarType==ANTLRParser.COMBINED ) {
|
|
||||||
lexerAST = extractImplicitLexer(ast); // alters ast
|
|
||||||
}
|
|
||||||
Grammar g = new Grammar(this, ast);
|
Grammar g = new Grammar(this, ast);
|
||||||
g.fileName = grammarFileNames.get(0);
|
g.fileName = grammarFileNames.get(0);
|
||||||
process(g);
|
process(g);
|
||||||
if ( lexerAST!=null ) {
|
if ( ast.grammarType==ANTLRParser.COMBINED ) {
|
||||||
// todo: don't process if errors in parser
|
// todo: don't process if errors in parser
|
||||||
|
lexerAST = extractImplicitLexer(g); // alters ast
|
||||||
Grammar lexerg = new Grammar(this, lexerAST);
|
Grammar lexerg = new Grammar(this, lexerAST);
|
||||||
lexerg.fileName = grammarFileNames.get(0);
|
lexerg.fileName = grammarFileNames.get(0);
|
||||||
g.implicitLexer = lexerg;
|
g.implicitLexer = lexerg;
|
||||||
|
@ -351,8 +349,13 @@ public class Tool {
|
||||||
//g.ast.inspect();
|
//g.ast.inspect();
|
||||||
SemanticPipeline sem = new SemanticPipeline();
|
SemanticPipeline sem = new SemanticPipeline();
|
||||||
sem.process(g);
|
sem.process(g);
|
||||||
|
|
||||||
// todo: add strings we collected to lexer?
|
// process imported grammars (if any)
|
||||||
|
if ( g.getImportedGrammars()!=null ) {
|
||||||
|
for (Grammar imp : g.getImportedGrammars()) {
|
||||||
|
process(imp);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Move to ast manipulation class?
|
// TODO: Move to ast manipulation class?
|
||||||
|
@ -375,7 +378,8 @@ public class Tool {
|
||||||
* in combined AST. Careful: nodes are shared between
|
* in combined AST. Careful: nodes are shared between
|
||||||
* trees after this call.
|
* trees after this call.
|
||||||
*/
|
*/
|
||||||
public GrammarRootAST extractImplicitLexer(GrammarRootAST combinedAST) {
|
public GrammarRootAST extractImplicitLexer(Grammar combinedGrammar) {
|
||||||
|
GrammarRootAST combinedAST = combinedGrammar.ast;
|
||||||
//System.out.println("before="+combinedAST.toStringTree());
|
//System.out.println("before="+combinedAST.toStringTree());
|
||||||
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(combinedAST.token.getInputStream());
|
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(combinedAST.token.getInputStream());
|
||||||
List<GrammarAST> elements = combinedAST.getChildren();
|
List<GrammarAST> elements = combinedAST.getChildren();
|
||||||
|
@ -417,7 +421,10 @@ public class Tool {
|
||||||
(GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.RULES);
|
(GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.RULES);
|
||||||
if ( combinedRulesRoot==null ) return lexerAST;
|
if ( combinedRulesRoot==null ) return lexerAST;
|
||||||
|
|
||||||
|
TreeWizard wiz = new TreeWizard(adaptor,ANTLRParser.tokenNames);
|
||||||
|
|
||||||
// MOVE lexer rules
|
// MOVE lexer rules
|
||||||
|
|
||||||
GrammarAST lexerRulesRoot =
|
GrammarAST lexerRulesRoot =
|
||||||
(GrammarAST)adaptor.create(ANTLRParser.RULES, "RULES");
|
(GrammarAST)adaptor.create(ANTLRParser.RULES, "RULES");
|
||||||
lexerAST.addChild(lexerRulesRoot);
|
lexerAST.addChild(lexerRulesRoot);
|
||||||
|
@ -432,8 +439,23 @@ public class Tool {
|
||||||
}
|
}
|
||||||
rules.removeAll(rulesWeMoved);
|
rules.removeAll(rulesWeMoved);
|
||||||
|
|
||||||
|
// Will track 'if' from IF : 'if' ; rules to avoid defining new token for 'if'
|
||||||
|
Map<String,String> litAliases =
|
||||||
|
Grammar.getStringLiteralAliasesFromLexerRules(lexerAST);
|
||||||
|
|
||||||
|
// add strings from combined grammar (and imported grammars) into to lexer
|
||||||
|
for (String lit : combinedGrammar.stringLiteralToTypeMap.keySet()) {
|
||||||
|
if ( litAliases.containsKey(lit) ) continue; // already has rule
|
||||||
|
// create for each literal: (RULE <uniquename> (BLOCK (ALT <lit>))
|
||||||
|
//TreeWizard wiz = new TreeWizard(adaptor,ANTLRParser.tokenNames);
|
||||||
|
String rname = combinedGrammar.getStringLiteralLexerRuleName(lit);
|
||||||
|
GrammarAST litRule = (GrammarAST)
|
||||||
|
wiz.create("(RULE ID["+rname+"] (BLOCK (ALT STRING_LITERAL["+lit+"])))");
|
||||||
|
lexerRulesRoot.addChild(litRule);
|
||||||
|
}
|
||||||
|
|
||||||
//System.out.println("after ="+combinedAST.toStringTree());
|
//System.out.println("after ="+combinedAST.toStringTree());
|
||||||
//System.out.println("lexer ="+lexerAST.toStringTree());
|
System.out.println("lexer ="+lexerAST.toStringTree());
|
||||||
return lexerAST;
|
return lexerAST;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
package org.antlr.v4.analysis;
|
package org.antlr.v4.analysis;
|
||||||
|
|
||||||
|
import org.antlr.runtime.Token;
|
||||||
|
|
||||||
public class Label implements Comparable, Cloneable {
|
public class Label implements Comparable, Cloneable {
|
||||||
public static final int INVALID = -7;
|
public static final int INVALID = -7;
|
||||||
|
|
||||||
|
@ -35,7 +37,37 @@ public class Label implements Comparable, Cloneable {
|
||||||
|
|
||||||
public static final int EOF = -1;
|
public static final int EOF = -1;
|
||||||
|
|
||||||
|
/** We have labels like EPSILON that are below 0; it's hard to
|
||||||
|
* store them in an array with negative index so use this
|
||||||
|
* constant as an index shift when accessing arrays based upon
|
||||||
|
* token type. If real token type is i, then array index would be
|
||||||
|
* NUM_FAUX_LABELS + i.
|
||||||
|
*/
|
||||||
|
public static final int NUM_FAUX_LABELS = -INVALID;
|
||||||
|
|
||||||
|
/** Anything at this value or larger can be considered a simple atom int
|
||||||
|
* for easy comparison during analysis only; faux labels are not used
|
||||||
|
* during parse time for real token types or char values.
|
||||||
|
*/
|
||||||
|
public static final int MIN_ATOM_VALUE = EOT;
|
||||||
|
|
||||||
|
// public static final int MIN_CHAR_VALUE = '\u0000';
|
||||||
|
// public static final int MAX_CHAR_VALUE = '\uFFFE';
|
||||||
|
|
||||||
|
/** End of rule token type; imaginary token type used only for
|
||||||
|
* local, partial FOLLOW sets to indicate that the local FOLLOW
|
||||||
|
* hit the end of rule. During error recovery, the local FOLLOW
|
||||||
|
* of a token reference may go beyond the end of the rule and have
|
||||||
|
* to use FOLLOW(rule). I have to just shift the token types to 2..n
|
||||||
|
* rather than 1..n to accommodate this imaginary token in my bitsets.
|
||||||
|
* If I didn't use a bitset implementation for runtime sets, I wouldn't
|
||||||
|
* need this. EOF is another candidate for a run time token type for
|
||||||
|
* parsers. Follow sets are not computed for lexers so we do not have
|
||||||
|
* this issue.
|
||||||
|
*/
|
||||||
|
public static final int EOR_TOKEN_TYPE = Token.EOR_TOKEN_TYPE;
|
||||||
|
|
||||||
public int compareTo(Object o) {
|
public int compareTo(Object o) {
|
||||||
return 0;
|
return 0; // TODO: impl
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -4,6 +4,7 @@ import org.antlr.runtime.*;
|
||||||
import org.antlr.runtime.tree.CommonTreeAdaptor;
|
import org.antlr.runtime.tree.CommonTreeAdaptor;
|
||||||
import org.antlr.v4.tool.GrammarAST;
|
import org.antlr.v4.tool.GrammarAST;
|
||||||
import org.antlr.v4.tool.GrammarASTErrorNode;
|
import org.antlr.v4.tool.GrammarASTErrorNode;
|
||||||
|
import org.antlr.v4.tool.GrammarASTWithOptions;
|
||||||
|
|
||||||
public class GrammarASTAdaptor extends CommonTreeAdaptor {
|
public class GrammarASTAdaptor extends CommonTreeAdaptor {
|
||||||
CharStream input; // where we can find chars ref'd by tokens in tree
|
CharStream input; // where we can find chars ref'd by tokens in tree
|
||||||
|
@ -17,7 +18,14 @@ public class GrammarASTAdaptor extends CommonTreeAdaptor {
|
||||||
@Override
|
@Override
|
||||||
/** Make sure even imaginary nodes know the input stream */
|
/** Make sure even imaginary nodes know the input stream */
|
||||||
public Object create(int tokenType, String text) {
|
public Object create(int tokenType, String text) {
|
||||||
GrammarAST t = (GrammarAST)super.create(tokenType, text);
|
GrammarAST t = null;
|
||||||
|
if ( tokenType==ANTLRParser.RULE ) {
|
||||||
|
// needed by TreeWizard to make RULE tree
|
||||||
|
t = new GrammarASTWithOptions(new CommonToken(tokenType, text));
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
t = (GrammarAST)super.create(tokenType, text);
|
||||||
|
}
|
||||||
((CommonToken)t.token).setInputStream(input);
|
((CommonToken)t.token).setInputStream(input);
|
||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
|
@ -31,62 +39,4 @@ public class GrammarASTAdaptor extends CommonTreeAdaptor {
|
||||||
{
|
{
|
||||||
return new GrammarASTErrorNode(input, start, stop, e);
|
return new GrammarASTErrorNode(input, start, stop, e);
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
|
|
||||||
public Object nil() { return delegate. }
|
|
||||||
|
|
||||||
public boolean isNil(Object tree) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void addChild(Object t, Object child) {
|
|
||||||
}
|
|
||||||
|
|
||||||
public Object becomeRoot(Object newRoot, Object oldRoot) { return delegate. }
|
|
||||||
|
|
||||||
public Object rulePostProcessing(Object root) { return delegate. }
|
|
||||||
|
|
||||||
public int getUniqueID(Object node) { return delegate. }
|
|
||||||
|
|
||||||
public Object becomeRoot(Token newRoot, Object oldRoot) { return delegate. }
|
|
||||||
|
|
||||||
public Object create(int tokenType, Token fromToken) { return delegate. }
|
|
||||||
|
|
||||||
public Object create(int tokenType, Token fromToken, String text) { return delegate. }
|
|
||||||
|
|
||||||
public int getType(Object t) { return delegate. }
|
|
||||||
|
|
||||||
public void setType(Object t, int type) { return delegate. }
|
|
||||||
|
|
||||||
public String getText(Object t) { return delegate. }
|
|
||||||
|
|
||||||
public void setText(Object t, String text) { return delegate. }
|
|
||||||
|
|
||||||
public Token getToken(Object t) { return delegate. }
|
|
||||||
|
|
||||||
public void setTokenBoundaries(Object t, Token startToken, Token stopToken) { return delegate. }
|
|
||||||
|
|
||||||
public int getTokenStartIndex(Object t) { return delegate. }
|
|
||||||
|
|
||||||
public int getTokenStopIndex(Object t) { return delegate. }
|
|
||||||
|
|
||||||
public Object getChild(Object t, int i) { return delegate. }
|
|
||||||
|
|
||||||
public void setChild(Object t, int i, Object child) { return delegate. }
|
|
||||||
|
|
||||||
public Object deleteChild(Object t, int i) { return delegate. }
|
|
||||||
|
|
||||||
public int getChildCount(Object t) { return delegate. }
|
|
||||||
|
|
||||||
public Object getParent(Object t){ return delegate. }
|
|
||||||
|
|
||||||
public void setParent(Object t, Object parent){ return delegate. }
|
|
||||||
public int getChildIndex(Object t) { return delegate. }
|
|
||||||
|
|
||||||
public void setChildIndex(Object t, int index) { delegate.setChildIndex(t,index); }
|
|
||||||
|
|
||||||
public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
|
|
||||||
delegate.replaceChildren(parent, startChildIndex, stopChildIndex, t);
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ import java.util.*;
|
||||||
* FILE_AND_GRAMMAR_NAME_DIFFER
|
* FILE_AND_GRAMMAR_NAME_DIFFER
|
||||||
* LEXER_RULES_NOT_ALLOWED
|
* LEXER_RULES_NOT_ALLOWED
|
||||||
* PARSER_RULES_NOT_ALLOWED
|
* PARSER_RULES_NOT_ALLOWED
|
||||||
* CANNOT_ALIAS_TOKENS_IN_LEXER
|
* CANNOT_ALIAS_TOKENS
|
||||||
* ARGS_ON_TOKEN_REF
|
* ARGS_ON_TOKEN_REF
|
||||||
* ILLEGAL_OPTION
|
* ILLEGAL_OPTION
|
||||||
* REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION
|
* REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION
|
||||||
|
@ -101,7 +101,6 @@ public class BasicSemanticChecks {
|
||||||
new MultiMap<Integer,Integer>() {
|
new MultiMap<Integer,Integer>() {
|
||||||
{
|
{
|
||||||
map(ANTLRParser.LEXER, ANTLRParser.LEXER);
|
map(ANTLRParser.LEXER, ANTLRParser.LEXER);
|
||||||
map(ANTLRParser.LEXER, ANTLRParser.PARSER);
|
|
||||||
map(ANTLRParser.LEXER, ANTLRParser.COMBINED);
|
map(ANTLRParser.LEXER, ANTLRParser.COMBINED);
|
||||||
|
|
||||||
map(ANTLRParser.PARSER, ANTLRParser.PARSER);
|
map(ANTLRParser.PARSER, ANTLRParser.PARSER);
|
||||||
|
@ -109,8 +108,7 @@ public class BasicSemanticChecks {
|
||||||
|
|
||||||
map(ANTLRParser.TREE, ANTLRParser.TREE);
|
map(ANTLRParser.TREE, ANTLRParser.TREE);
|
||||||
|
|
||||||
// TODO: allow COMBINED
|
map(ANTLRParser.COMBINED, ANTLRParser.COMBINED);
|
||||||
// map(ANTLRParser.GRAMMAR, ANTLRParser.GRAMMAR);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -170,7 +168,6 @@ public class BasicSemanticChecks {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: get filename from stream via token?
|
|
||||||
protected static void checkInvalidRuleRef(int gtype, Token ruleID) {
|
protected static void checkInvalidRuleRef(int gtype, Token ruleID) {
|
||||||
String fileName = ruleID.getInputStream().getSourceName();
|
String fileName = ruleID.getInputStream().getSourceName();
|
||||||
if ( gtype==ANTLRParser.LEXER && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
|
if ( gtype==ANTLRParser.LEXER && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
|
||||||
|
@ -187,8 +184,8 @@ public class BasicSemanticChecks {
|
||||||
tokenID,
|
tokenID,
|
||||||
tokenID.getText());
|
tokenID.getText());
|
||||||
}
|
}
|
||||||
if ( gtype==ANTLRParser.LEXER ) {
|
if ( gtype!=ANTLRParser.COMBINED ) {
|
||||||
ErrorManager.grammarError(ErrorType.CANNOT_ALIAS_TOKENS_IN_LEXER,
|
ErrorManager.grammarError(ErrorType.CANNOT_ALIAS_TOKENS,
|
||||||
fileName,
|
fileName,
|
||||||
tokenID,
|
tokenID,
|
||||||
tokenID.getText());
|
tokenID.getText());
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// $ANTLR ${project.version} ${buildNumber} BasicSemanticTriggers.g 2010-02-19 17:45:09
|
// $ANTLR ${project.version} ${buildNumber} BasicSemanticTriggers.g 2010-02-22 16:10:22
|
||||||
|
|
||||||
/*
|
/*
|
||||||
[The "BSD license"]
|
[The "BSD license"]
|
||||||
|
|
|
@ -79,7 +79,7 @@ public List<GrammarAST> rulerefs = new ArrayList<GrammarAST>();
|
||||||
public List<GrammarAST> qualifiedRulerefs = new ArrayList<GrammarAST>();
|
public List<GrammarAST> qualifiedRulerefs = new ArrayList<GrammarAST>();
|
||||||
public List<GrammarAST> terminals = new ArrayList<GrammarAST>();
|
public List<GrammarAST> terminals = new ArrayList<GrammarAST>();
|
||||||
public List<GrammarAST> tokenIDRefs = new ArrayList<GrammarAST>();
|
public List<GrammarAST> tokenIDRefs = new ArrayList<GrammarAST>();
|
||||||
public List<GrammarAST> strings = new ArrayList<GrammarAST>();
|
public Set<String> strings = new HashSet<String>();
|
||||||
public List<GrammarAST> tokensDefs = new ArrayList<GrammarAST>();
|
public List<GrammarAST> tokensDefs = new ArrayList<GrammarAST>();
|
||||||
public List<AttributeDict> scopes = new ArrayList<AttributeDict>();
|
public List<AttributeDict> scopes = new ArrayList<AttributeDict>();
|
||||||
public List<GrammarAST> actions = new ArrayList<GrammarAST>();
|
public List<GrammarAST> actions = new ArrayList<GrammarAST>();
|
||||||
|
@ -134,7 +134,7 @@ tokensSection
|
||||||
: {inContext("TOKENS")}?
|
: {inContext("TOKENS")}?
|
||||||
( ^(ASSIGN t=ID STRING_LITERAL)
|
( ^(ASSIGN t=ID STRING_LITERAL)
|
||||||
{terminals.add($t); tokenIDRefs.add($t);
|
{terminals.add($t); tokenIDRefs.add($t);
|
||||||
tokensDefs.add($ASSIGN); strings.add($STRING_LITERAL);}
|
tokensDefs.add($ASSIGN); strings.add($STRING_LITERAL.text);}
|
||||||
| t=ID
|
| t=ID
|
||||||
{terminals.add($t); tokenIDRefs.add($t); tokensDefs.add($t);}
|
{terminals.add($t); tokenIDRefs.add($t); tokensDefs.add($t);}
|
||||||
)
|
)
|
||||||
|
@ -149,7 +149,7 @@ rule: ^( RULE name=ID .+)
|
||||||
currentAlt = 1;
|
currentAlt = 1;
|
||||||
}
|
}
|
||||||
;
|
;
|
||||||
|
|
||||||
setAlt
|
setAlt
|
||||||
: {inContext("RULE BLOCK")}? ( ALT | ALT_REWRITE )
|
: {inContext("RULE BLOCK")}? ( ALT | ALT_REWRITE )
|
||||||
{currentAlt = $start.getChildIndex()+1;}
|
{currentAlt = $start.getChildIndex()+1;}
|
||||||
|
@ -244,7 +244,7 @@ terminal
|
||||||
: {!inContext("TOKENS ASSIGN")}? STRING_LITERAL
|
: {!inContext("TOKENS ASSIGN")}? STRING_LITERAL
|
||||||
{
|
{
|
||||||
terminals.add($start);
|
terminals.add($start);
|
||||||
strings.add($STRING_LITERAL);
|
strings.add($STRING_LITERAL.text);
|
||||||
if ( currentRule!=null ) {
|
if ( currentRule!=null ) {
|
||||||
currentRule.alt[currentAlt].tokenRefs.map($STRING_LITERAL.text, $STRING_LITERAL);
|
currentRule.alt[currentAlt].tokenRefs.map($STRING_LITERAL.text, $STRING_LITERAL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// $ANTLR ${project.version} ${buildNumber} CollectSymbols.g 2010-02-19 17:45:09
|
// $ANTLR ${project.version} ${buildNumber} CollectSymbols.g 2010-02-22 16:10:22
|
||||||
|
|
||||||
/*
|
/*
|
||||||
[The "BSD license"]
|
[The "BSD license"]
|
||||||
|
@ -34,7 +34,9 @@ import org.antlr.v4.parse.ScopeParser;
|
||||||
import org.antlr.v4.tool.*;
|
import org.antlr.v4.tool.*;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
/** Collects rules, terminals, strings, actions, scopes etc... from AST
|
/** Collects rules, terminals, strings, actions, scopes etc... from AST
|
||||||
* Side-effects: None
|
* Side-effects: None
|
||||||
*/
|
*/
|
||||||
|
@ -166,7 +168,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
public List<GrammarAST> qualifiedRulerefs = new ArrayList<GrammarAST>();
|
public List<GrammarAST> qualifiedRulerefs = new ArrayList<GrammarAST>();
|
||||||
public List<GrammarAST> terminals = new ArrayList<GrammarAST>();
|
public List<GrammarAST> terminals = new ArrayList<GrammarAST>();
|
||||||
public List<GrammarAST> tokenIDRefs = new ArrayList<GrammarAST>();
|
public List<GrammarAST> tokenIDRefs = new ArrayList<GrammarAST>();
|
||||||
public List<GrammarAST> strings = new ArrayList<GrammarAST>();
|
public Set<String> strings = new HashSet<String>();
|
||||||
public List<GrammarAST> tokensDefs = new ArrayList<GrammarAST>();
|
public List<GrammarAST> tokensDefs = new ArrayList<GrammarAST>();
|
||||||
public List<AttributeDict> scopes = new ArrayList<AttributeDict>();
|
public List<AttributeDict> scopes = new ArrayList<AttributeDict>();
|
||||||
public List<GrammarAST> actions = new ArrayList<GrammarAST>();
|
public List<GrammarAST> actions = new ArrayList<GrammarAST>();
|
||||||
|
@ -553,7 +555,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
match(input, Token.UP, null); if (state.failed) return ;
|
match(input, Token.UP, null); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
terminals.add(t); tokenIDRefs.add(t);
|
terminals.add(t); tokenIDRefs.add(t);
|
||||||
tokensDefs.add(ASSIGN5); strings.add(STRING_LITERAL6);
|
tokensDefs.add(ASSIGN5); strings.add((STRING_LITERAL6!=null?STRING_LITERAL6.getText():null));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -711,7 +713,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
// CollectSymbols.g:159:2: ( RULE )
|
// CollectSymbols.g:159:2: ( RULE )
|
||||||
// CollectSymbols.g:159:4: RULE
|
// CollectSymbols.g:159:4: RULE
|
||||||
{
|
{
|
||||||
match(input,RULE,FOLLOW_RULE_in_finishRule402); if (state.failed) return ;
|
match(input,RULE,FOLLOW_RULE_in_finishRule403); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
currentRule = null;
|
currentRule = null;
|
||||||
}
|
}
|
||||||
|
@ -744,11 +746,11 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
if (state.backtracking>0) {state.failed=true; return ;}
|
if (state.backtracking>0) {state.failed=true; return ;}
|
||||||
throw new FailedPredicateException(input, "ruleNamedAction", "inContext(\"RULE\")");
|
throw new FailedPredicateException(input, "ruleNamedAction", "inContext(\"RULE\")");
|
||||||
}
|
}
|
||||||
match(input,AT,FOLLOW_AT_in_ruleNamedAction418); if (state.failed) return ;
|
match(input,AT,FOLLOW_AT_in_ruleNamedAction419); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.DOWN, null); if (state.failed) return ;
|
match(input, Token.DOWN, null); if (state.failed) return ;
|
||||||
ID8=(GrammarAST)match(input,ID,FOLLOW_ID_in_ruleNamedAction420); if (state.failed) return ;
|
ID8=(GrammarAST)match(input,ID,FOLLOW_ID_in_ruleNamedAction421); if (state.failed) return ;
|
||||||
ACTION9=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_ruleNamedAction422); if (state.failed) return ;
|
ACTION9=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_ruleNamedAction423); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.UP, null); if (state.failed) return ;
|
match(input, Token.UP, null); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
@ -786,7 +788,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
if (state.backtracking>0) {state.failed=true; return ;}
|
if (state.backtracking>0) {state.failed=true; return ;}
|
||||||
throw new FailedPredicateException(input, "ruleAction", "inContext(\"RULE ...\")&&!inContext(\"SCOPE\")&&\n\t\t !inContext(\"CATCH\")&&!inContext(\"FINALLY\")&&!inContext(\"AT\")");
|
throw new FailedPredicateException(input, "ruleAction", "inContext(\"RULE ...\")&&!inContext(\"SCOPE\")&&\n\t\t !inContext(\"CATCH\")&&!inContext(\"FINALLY\")&&!inContext(\"AT\")");
|
||||||
}
|
}
|
||||||
ACTION10=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_ruleAction442); if (state.failed) return ;
|
ACTION10=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_ruleAction443); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
|
||||||
currentRule.alt[currentAlt].actions.add((ActionAST)ACTION10);
|
currentRule.alt[currentAlt].actions.add((ActionAST)ACTION10);
|
||||||
|
@ -817,11 +819,11 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
// CollectSymbols.g:181:2: ( ^( CATCH ARG_ACTION ACTION ) )
|
// CollectSymbols.g:181:2: ( ^( CATCH ARG_ACTION ACTION ) )
|
||||||
// CollectSymbols.g:181:4: ^( CATCH ARG_ACTION ACTION )
|
// CollectSymbols.g:181:4: ^( CATCH ARG_ACTION ACTION )
|
||||||
{
|
{
|
||||||
match(input,CATCH,FOLLOW_CATCH_in_exceptionHandler458); if (state.failed) return ;
|
match(input,CATCH,FOLLOW_CATCH_in_exceptionHandler459); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.DOWN, null); if (state.failed) return ;
|
match(input, Token.DOWN, null); if (state.failed) return ;
|
||||||
match(input,ARG_ACTION,FOLLOW_ARG_ACTION_in_exceptionHandler460); if (state.failed) return ;
|
match(input,ARG_ACTION,FOLLOW_ARG_ACTION_in_exceptionHandler461); if (state.failed) return ;
|
||||||
ACTION11=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_exceptionHandler462); if (state.failed) return ;
|
ACTION11=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_exceptionHandler463); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.UP, null); if (state.failed) return ;
|
match(input, Token.UP, null); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
@ -854,10 +856,10 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
// CollectSymbols.g:189:2: ( ^( FINALLY ACTION ) )
|
// CollectSymbols.g:189:2: ( ^( FINALLY ACTION ) )
|
||||||
// CollectSymbols.g:189:4: ^( FINALLY ACTION )
|
// CollectSymbols.g:189:4: ^( FINALLY ACTION )
|
||||||
{
|
{
|
||||||
match(input,FINALLY,FOLLOW_FINALLY_in_finallyClause479); if (state.failed) return ;
|
match(input,FINALLY,FOLLOW_FINALLY_in_finallyClause480); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.DOWN, null); if (state.failed) return ;
|
match(input, Token.DOWN, null); if (state.failed) return ;
|
||||||
ACTION12=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_finallyClause481); if (state.failed) return ;
|
ACTION12=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_finallyClause482); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.UP, null); if (state.failed) return ;
|
match(input, Token.UP, null); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
@ -894,7 +896,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
if (state.backtracking>0) {state.failed=true; return ;}
|
if (state.backtracking>0) {state.failed=true; return ;}
|
||||||
throw new FailedPredicateException(input, "ruleArg", "inContext(\"RULE\")");
|
throw new FailedPredicateException(input, "ruleArg", "inContext(\"RULE\")");
|
||||||
}
|
}
|
||||||
ARG_ACTION13=(GrammarAST)match(input,ARG_ACTION,FOLLOW_ARG_ACTION_in_ruleArg501); if (state.failed) return ;
|
ARG_ACTION13=(GrammarAST)match(input,ARG_ACTION,FOLLOW_ARG_ACTION_in_ruleArg502); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
|
||||||
currentRule.args = ScopeParser.parseTypeList((ARG_ACTION13!=null?ARG_ACTION13.getText():null));
|
currentRule.args = ScopeParser.parseTypeList((ARG_ACTION13!=null?ARG_ACTION13.getText():null));
|
||||||
|
@ -925,10 +927,10 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
// CollectSymbols.g:205:2: ( ^( RETURNS ARG_ACTION ) )
|
// CollectSymbols.g:205:2: ( ^( RETURNS ARG_ACTION ) )
|
||||||
// CollectSymbols.g:205:4: ^( RETURNS ARG_ACTION )
|
// CollectSymbols.g:205:4: ^( RETURNS ARG_ACTION )
|
||||||
{
|
{
|
||||||
match(input,RETURNS,FOLLOW_RETURNS_in_ruleReturns518); if (state.failed) return ;
|
match(input,RETURNS,FOLLOW_RETURNS_in_ruleReturns519); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.DOWN, null); if (state.failed) return ;
|
match(input, Token.DOWN, null); if (state.failed) return ;
|
||||||
ARG_ACTION14=(GrammarAST)match(input,ARG_ACTION,FOLLOW_ARG_ACTION_in_ruleReturns520); if (state.failed) return ;
|
ARG_ACTION14=(GrammarAST)match(input,ARG_ACTION,FOLLOW_ARG_ACTION_in_ruleReturns521); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.UP, null); if (state.failed) return ;
|
match(input, Token.UP, null); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
@ -1010,10 +1012,10 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
case 1 :
|
case 1 :
|
||||||
// CollectSymbols.g:214:5: ^( SCOPE ACTION )
|
// CollectSymbols.g:214:5: ^( SCOPE ACTION )
|
||||||
{
|
{
|
||||||
match(input,SCOPE,FOLLOW_SCOPE_in_ruleScopeSpec543); if (state.failed) return ;
|
match(input,SCOPE,FOLLOW_SCOPE_in_ruleScopeSpec544); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.DOWN, null); if (state.failed) return ;
|
match(input, Token.DOWN, null); if (state.failed) return ;
|
||||||
ACTION15=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_ruleScopeSpec545); if (state.failed) return ;
|
ACTION15=(GrammarAST)match(input,ACTION,FOLLOW_ACTION_in_ruleScopeSpec546); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.UP, null); if (state.failed) return ;
|
match(input, Token.UP, null); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
@ -1029,7 +1031,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
case 2 :
|
case 2 :
|
||||||
// CollectSymbols.g:220:5: ^( SCOPE (ids+= ID )+ )
|
// CollectSymbols.g:220:5: ^( SCOPE (ids+= ID )+ )
|
||||||
{
|
{
|
||||||
match(input,SCOPE,FOLLOW_SCOPE_in_ruleScopeSpec558); if (state.failed) return ;
|
match(input,SCOPE,FOLLOW_SCOPE_in_ruleScopeSpec559); if (state.failed) return ;
|
||||||
|
|
||||||
match(input, Token.DOWN, null); if (state.failed) return ;
|
match(input, Token.DOWN, null); if (state.failed) return ;
|
||||||
// CollectSymbols.g:220:16: (ids+= ID )+
|
// CollectSymbols.g:220:16: (ids+= ID )+
|
||||||
|
@ -1048,7 +1050,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
case 1 :
|
case 1 :
|
||||||
// CollectSymbols.g:220:16: ids+= ID
|
// CollectSymbols.g:220:16: ids+= ID
|
||||||
{
|
{
|
||||||
ids=(GrammarAST)match(input,ID,FOLLOW_ID_in_ruleScopeSpec562); if (state.failed) return ;
|
ids=(GrammarAST)match(input,ID,FOLLOW_ID_in_ruleScopeSpec563); if (state.failed) return ;
|
||||||
if (list_ids==null) list_ids=new ArrayList();
|
if (list_ids==null) list_ids=new ArrayList();
|
||||||
list_ids.add(ids);
|
list_ids.add(ids);
|
||||||
|
|
||||||
|
@ -1176,10 +1178,10 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
case 1 :
|
case 1 :
|
||||||
// CollectSymbols.g:238:5: ^( ASSIGN id= ID e= . )
|
// CollectSymbols.g:238:5: ^( ASSIGN id= ID e= . )
|
||||||
{
|
{
|
||||||
match(input,ASSIGN,FOLLOW_ASSIGN_in_labeledElement626); if (state.failed) return retval;
|
match(input,ASSIGN,FOLLOW_ASSIGN_in_labeledElement627); if (state.failed) return retval;
|
||||||
|
|
||||||
match(input, Token.DOWN, null); if (state.failed) return retval;
|
match(input, Token.DOWN, null); if (state.failed) return retval;
|
||||||
id=(GrammarAST)match(input,ID,FOLLOW_ID_in_labeledElement630); if (state.failed) return retval;
|
id=(GrammarAST)match(input,ID,FOLLOW_ID_in_labeledElement631); if (state.failed) return retval;
|
||||||
e=(GrammarAST)input.LT(1);
|
e=(GrammarAST)input.LT(1);
|
||||||
matchAny(input); if (state.failed) return retval;
|
matchAny(input); if (state.failed) return retval;
|
||||||
|
|
||||||
|
@ -1190,10 +1192,10 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
case 2 :
|
case 2 :
|
||||||
// CollectSymbols.g:239:5: ^( PLUS_ASSIGN id= ID e= . )
|
// CollectSymbols.g:239:5: ^( PLUS_ASSIGN id= ID e= . )
|
||||||
{
|
{
|
||||||
match(input,PLUS_ASSIGN,FOLLOW_PLUS_ASSIGN_in_labeledElement642); if (state.failed) return retval;
|
match(input,PLUS_ASSIGN,FOLLOW_PLUS_ASSIGN_in_labeledElement643); if (state.failed) return retval;
|
||||||
|
|
||||||
match(input, Token.DOWN, null); if (state.failed) return retval;
|
match(input, Token.DOWN, null); if (state.failed) return retval;
|
||||||
id=(GrammarAST)match(input,ID,FOLLOW_ID_in_labeledElement646); if (state.failed) return retval;
|
id=(GrammarAST)match(input,ID,FOLLOW_ID_in_labeledElement647); if (state.failed) return retval;
|
||||||
e=(GrammarAST)input.LT(1);
|
e=(GrammarAST)input.LT(1);
|
||||||
matchAny(input); if (state.failed) return retval;
|
matchAny(input); if (state.failed) return retval;
|
||||||
|
|
||||||
|
@ -1263,11 +1265,11 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
if (state.backtracking>0) {state.failed=true; return retval;}
|
if (state.backtracking>0) {state.failed=true; return retval;}
|
||||||
throw new FailedPredicateException(input, "terminal", "!inContext(\"TOKENS ASSIGN\")");
|
throw new FailedPredicateException(input, "terminal", "!inContext(\"TOKENS ASSIGN\")");
|
||||||
}
|
}
|
||||||
STRING_LITERAL16=(GrammarAST)match(input,STRING_LITERAL,FOLLOW_STRING_LITERAL_in_terminal672); if (state.failed) return retval;
|
STRING_LITERAL16=(GrammarAST)match(input,STRING_LITERAL,FOLLOW_STRING_LITERAL_in_terminal673); if (state.failed) return retval;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
|
||||||
terminals.add(((GrammarAST)retval.start));
|
terminals.add(((GrammarAST)retval.start));
|
||||||
strings.add(STRING_LITERAL16);
|
strings.add((STRING_LITERAL16!=null?STRING_LITERAL16.getText():null));
|
||||||
if ( currentRule!=null ) {
|
if ( currentRule!=null ) {
|
||||||
currentRule.alt[currentAlt].tokenRefs.map((STRING_LITERAL16!=null?STRING_LITERAL16.getText():null), STRING_LITERAL16);
|
currentRule.alt[currentAlt].tokenRefs.map((STRING_LITERAL16!=null?STRING_LITERAL16.getText():null), STRING_LITERAL16);
|
||||||
}
|
}
|
||||||
|
@ -1279,7 +1281,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
case 2 :
|
case 2 :
|
||||||
// CollectSymbols.g:252:7: TOKEN_REF
|
// CollectSymbols.g:252:7: TOKEN_REF
|
||||||
{
|
{
|
||||||
TOKEN_REF17=(GrammarAST)match(input,TOKEN_REF,FOLLOW_TOKEN_REF_in_terminal687); if (state.failed) return retval;
|
TOKEN_REF17=(GrammarAST)match(input,TOKEN_REF,FOLLOW_TOKEN_REF_in_terminal688); if (state.failed) return retval;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
|
|
||||||
terminals.add(TOKEN_REF17);
|
terminals.add(TOKEN_REF17);
|
||||||
|
@ -1351,7 +1353,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
if (state.backtracking>0) {state.failed=true; return ;}
|
if (state.backtracking>0) {state.failed=true; return ;}
|
||||||
throw new FailedPredicateException(input, "ruleref", "inContext(\"DOT ...\")");
|
throw new FailedPredicateException(input, "ruleref", "inContext(\"DOT ...\")");
|
||||||
}
|
}
|
||||||
r=(GrammarAST)match(input,RULE_REF,FOLLOW_RULE_REF_in_ruleref724); if (state.failed) return ;
|
r=(GrammarAST)match(input,RULE_REF,FOLLOW_RULE_REF_in_ruleref725); if (state.failed) return ;
|
||||||
if ( state.backtracking==1 ) {
|
if ( state.backtracking==1 ) {
|
||||||
qualifiedRulerefs.add((GrammarAST)r.getParent());
|
qualifiedRulerefs.add((GrammarAST)r.getParent());
|
||||||
}
|
}
|
||||||
|
@ -1361,7 +1363,7 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
case 2 :
|
case 2 :
|
||||||
// CollectSymbols.g:266:8: r= RULE_REF
|
// CollectSymbols.g:266:8: r= RULE_REF
|
||||||
{
|
{
|
||||||
r=(GrammarAST)match(input,RULE_REF,FOLLOW_RULE_REF_in_ruleref737); if (state.failed) return ;
|
r=(GrammarAST)match(input,RULE_REF,FOLLOW_RULE_REF_in_ruleref738); if (state.failed) return ;
|
||||||
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -1600,32 +1602,32 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
||||||
public static final BitSet FOLLOW_ID_in_tokensSection328 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_ID_in_tokensSection328 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_RULE_in_rule350 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_RULE_in_rule350 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ID_in_rule354 = new BitSet(new long[]{0xFFFFFFFFFFFFFFF0L,0x0000003FFFFFFFFFL});
|
public static final BitSet FOLLOW_ID_in_rule354 = new BitSet(new long[]{0xFFFFFFFFFFFFFFF0L,0x0000003FFFFFFFFFL});
|
||||||
public static final BitSet FOLLOW_set_in_setAlt378 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_set_in_setAlt379 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_RULE_in_finishRule402 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_RULE_in_finishRule403 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_AT_in_ruleNamedAction418 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_AT_in_ruleNamedAction419 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ID_in_ruleNamedAction420 = new BitSet(new long[]{0x0000000000010000L});
|
public static final BitSet FOLLOW_ID_in_ruleNamedAction421 = new BitSet(new long[]{0x0000000000010000L});
|
||||||
public static final BitSet FOLLOW_ACTION_in_ruleNamedAction422 = new BitSet(new long[]{0x0000000000000008L});
|
public static final BitSet FOLLOW_ACTION_in_ruleNamedAction423 = new BitSet(new long[]{0x0000000000000008L});
|
||||||
public static final BitSet FOLLOW_ACTION_in_ruleAction442 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_ACTION_in_ruleAction443 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_CATCH_in_exceptionHandler458 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_CATCH_in_exceptionHandler459 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ARG_ACTION_in_exceptionHandler460 = new BitSet(new long[]{0x0000000000010000L});
|
public static final BitSet FOLLOW_ARG_ACTION_in_exceptionHandler461 = new BitSet(new long[]{0x0000000000010000L});
|
||||||
public static final BitSet FOLLOW_ACTION_in_exceptionHandler462 = new BitSet(new long[]{0x0000000000000008L});
|
public static final BitSet FOLLOW_ACTION_in_exceptionHandler463 = new BitSet(new long[]{0x0000000000000008L});
|
||||||
public static final BitSet FOLLOW_FINALLY_in_finallyClause479 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_FINALLY_in_finallyClause480 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ACTION_in_finallyClause481 = new BitSet(new long[]{0x0000000000000008L});
|
public static final BitSet FOLLOW_ACTION_in_finallyClause482 = new BitSet(new long[]{0x0000000000000008L});
|
||||||
public static final BitSet FOLLOW_ARG_ACTION_in_ruleArg501 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_ARG_ACTION_in_ruleArg502 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_RETURNS_in_ruleReturns518 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_RETURNS_in_ruleReturns519 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ARG_ACTION_in_ruleReturns520 = new BitSet(new long[]{0x0000000000000008L});
|
public static final BitSet FOLLOW_ARG_ACTION_in_ruleReturns521 = new BitSet(new long[]{0x0000000000000008L});
|
||||||
public static final BitSet FOLLOW_SCOPE_in_ruleScopeSpec543 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_SCOPE_in_ruleScopeSpec544 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ACTION_in_ruleScopeSpec545 = new BitSet(new long[]{0x0000000000000008L});
|
public static final BitSet FOLLOW_ACTION_in_ruleScopeSpec546 = new BitSet(new long[]{0x0000000000000008L});
|
||||||
public static final BitSet FOLLOW_SCOPE_in_ruleScopeSpec558 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_SCOPE_in_ruleScopeSpec559 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ID_in_ruleScopeSpec562 = new BitSet(new long[]{0x0000000000000008L,0x0000000000400000L});
|
public static final BitSet FOLLOW_ID_in_ruleScopeSpec563 = new BitSet(new long[]{0x0000000000000008L,0x0000000000400000L});
|
||||||
public static final BitSet FOLLOW_set_in_rewriteElement590 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_set_in_rewriteElement591 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_ASSIGN_in_labeledElement626 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_ASSIGN_in_labeledElement627 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ID_in_labeledElement630 = new BitSet(new long[]{0xFFFFFFFFFFFFFFF0L,0x0000003FFFFFFFFFL});
|
public static final BitSet FOLLOW_ID_in_labeledElement631 = new BitSet(new long[]{0xFFFFFFFFFFFFFFF0L,0x0000003FFFFFFFFFL});
|
||||||
public static final BitSet FOLLOW_PLUS_ASSIGN_in_labeledElement642 = new BitSet(new long[]{0x0000000000000004L});
|
public static final BitSet FOLLOW_PLUS_ASSIGN_in_labeledElement643 = new BitSet(new long[]{0x0000000000000004L});
|
||||||
public static final BitSet FOLLOW_ID_in_labeledElement646 = new BitSet(new long[]{0xFFFFFFFFFFFFFFF0L,0x0000003FFFFFFFFFL});
|
public static final BitSet FOLLOW_ID_in_labeledElement647 = new BitSet(new long[]{0xFFFFFFFFFFFFFFF0L,0x0000003FFFFFFFFFL});
|
||||||
public static final BitSet FOLLOW_STRING_LITERAL_in_terminal672 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_STRING_LITERAL_in_terminal673 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_TOKEN_REF_in_terminal687 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_TOKEN_REF_in_terminal688 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_RULE_REF_in_ruleref724 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_RULE_REF_in_ruleref725 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
public static final BitSet FOLLOW_RULE_REF_in_ruleref737 = new BitSet(new long[]{0x0000000000000002L});
|
public static final BitSet FOLLOW_RULE_REF_in_ruleref738 = new BitSet(new long[]{0x0000000000000002L});
|
||||||
|
|
||||||
}
|
}
|
|
@ -9,6 +9,7 @@ import org.antlr.v4.parse.GrammarASTAdaptor;
|
||||||
import org.antlr.v4.tool.*;
|
import org.antlr.v4.tool.*;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/** */
|
/** */
|
||||||
public class SemanticPipeline {
|
public class SemanticPipeline {
|
||||||
|
@ -37,14 +38,6 @@ public class SemanticPipeline {
|
||||||
// don't continue if we get errors in this basic check
|
// don't continue if we get errors in this basic check
|
||||||
if ( false ) return;
|
if ( false ) return;
|
||||||
|
|
||||||
// TODO: can i move to Tool.process? why recurse here?
|
|
||||||
// NOW DO BASIC / EASY SEMANTIC CHECKS FOR DELEGATES (IF ANY)
|
|
||||||
if ( g.getImportedGrammars()!=null ) {
|
|
||||||
for (Grammar d : g.getImportedGrammars()) {
|
|
||||||
process(d);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// COLLECT SYMBOLS: RULES, ACTIONS, TERMINALS, ...
|
// COLLECT SYMBOLS: RULES, ACTIONS, TERMINALS, ...
|
||||||
nodes.reset();
|
nodes.reset();
|
||||||
CollectSymbols collector = new CollectSymbols(nodes,g);
|
CollectSymbols collector = new CollectSymbols(nodes,g);
|
||||||
|
@ -62,62 +55,57 @@ public class SemanticPipeline {
|
||||||
for (AttributeDict s : collector.scopes) g.defineScope(s);
|
for (AttributeDict s : collector.scopes) g.defineScope(s);
|
||||||
for (GrammarAST a : collector.actions) g.defineAction(a);
|
for (GrammarAST a : collector.actions) g.defineAction(a);
|
||||||
|
|
||||||
// CHECK RULE REFS NOW
|
// CHECK RULE REFS NOW (that we've defined rules in grammar)
|
||||||
checkRuleArgs(g, collector.rulerefs);
|
symcheck.checkRuleArgs(g, collector.rulerefs);
|
||||||
checkForQualifiedRuleIssues(g, collector.qualifiedRulerefs);
|
symcheck.checkForQualifiedRuleIssues(g, collector.qualifiedRulerefs);
|
||||||
|
|
||||||
|
// don't continue if we get symbol errors
|
||||||
|
if ( false ) return;
|
||||||
|
|
||||||
// CHECK ATTRIBUTE EXPRESSIONS FOR SEMANTIC VALIDITY
|
// CHECK ATTRIBUTE EXPRESSIONS FOR SEMANTIC VALIDITY
|
||||||
AttributeChecks.checkAllAttributeExpressions(g);
|
AttributeChecks.checkAllAttributeExpressions(g);
|
||||||
|
|
||||||
// ASSIGN TOKEN TYPES
|
// ASSIGN TOKEN TYPES
|
||||||
//for (GrammarAST a : collector.strings) g.defineAction(a);
|
assignTokenTypes(g, collector, symcheck);
|
||||||
//for (String id : symcheck.tokenIDs) g.defineAction(a);
|
|
||||||
|
|
||||||
// TODO: move to a use-def or deadcode eliminator
|
// TODO: move to a use-def or deadcode eliminator
|
||||||
checkRewriteElementsPresentOnLeftSide(g, collector.rules);
|
checkRewriteElementsPresentOnLeftSide(g, collector.rules);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void checkRuleArgs(Grammar g, List<GrammarAST> rulerefs) {
|
public void assignTokenTypes(Grammar g, CollectSymbols collector, SymbolChecks symcheck) {
|
||||||
if ( rulerefs==null ) return;
|
if ( g.implicitLexerOwner!=null ) {
|
||||||
for (GrammarAST ref : rulerefs) {
|
// copy vocab from combined to implicit lexer
|
||||||
String ruleName = ref.getText();
|
g.importVocab(g.implicitLexerOwner);
|
||||||
Rule r = g.getRule(ruleName);
|
System.out.println("tokens="+g.tokenNameToTypeMap);
|
||||||
if ( r==null && !ref.hasAncestor(ANTLRParser.DOT)) {
|
System.out.println("strings="+g.stringLiteralToTypeMap);
|
||||||
// only give error for unqualified rule refs now
|
|
||||||
ErrorManager.grammarError(ErrorType.UNDEFINED_RULE_REF,
|
|
||||||
g.fileName, ref.token, ruleName);
|
|
||||||
}
|
|
||||||
GrammarAST arg = (GrammarAST)ref.getChild(0);
|
|
||||||
if ( arg!=null && r.args==null ) {
|
|
||||||
ErrorManager.grammarError(ErrorType.RULE_HAS_NO_ARGS,
|
|
||||||
g.fileName, ref.token, ruleName);
|
|
||||||
|
|
||||||
}
|
|
||||||
else if ( arg==null && (r!=null&&r.args!=null) ) {
|
|
||||||
ErrorManager.grammarError(ErrorType.MISSING_RULE_ARGS,
|
|
||||||
g.fileName, ref.token, ruleName);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
else {
|
||||||
|
Grammar G = g.getOutermostGrammar(); // put in root, even if imported
|
||||||
|
|
||||||
public void checkForQualifiedRuleIssues(Grammar g, List<GrammarAST> qualifiedRuleRefs) {
|
// DEFINE tokens { X='x'; } ALIASES
|
||||||
for (GrammarAST dot : qualifiedRuleRefs) {
|
for (GrammarAST alias : collector.tokensDefs) {
|
||||||
GrammarAST grammar = (GrammarAST)dot.getChild(0);
|
if ( alias.getType()== ANTLRParser.ASSIGN ) {
|
||||||
GrammarAST rule = (GrammarAST)dot.getChild(1);
|
String name = alias.getChild(0).getText();
|
||||||
System.out.println(grammar.getText()+"."+rule.getText());
|
String lit = alias.getChild(1).getText();
|
||||||
Grammar delegate = g.getImportedGrammar(grammar.getText());
|
G.defineTokenAlias(name, lit);
|
||||||
if ( delegate==null ) {
|
|
||||||
ErrorManager.grammarError(ErrorType.NO_SUCH_GRAMMAR_SCOPE,
|
|
||||||
g.fileName, grammar.token, grammar.getText(),
|
|
||||||
rule.getText());
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
if ( g.getRule(grammar.getText(), rule.getText())==null ) {
|
|
||||||
ErrorManager.grammarError(ErrorType.NO_SUCH_RULE_IN_SCOPE,
|
|
||||||
g.fileName, rule.token, grammar.getText(),
|
|
||||||
rule.getText());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DEFINE TOKEN TYPES FOR X : 'x' ; RULES
|
||||||
|
Map<String,String> litAliases = Grammar.getStringLiteralAliasesFromLexerRules(g.ast);
|
||||||
|
if ( litAliases!=null ) {
|
||||||
|
for (String lit : litAliases.keySet()) {
|
||||||
|
G.defineTokenAlias(litAliases.get(lit), lit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
|
||||||
|
for (String id : symcheck.tokenIDs) { G.defineTokenName(id); }
|
||||||
|
|
||||||
|
// DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
|
||||||
|
for (String s : collector.strings) { G.defineStringLiteral(s); }
|
||||||
|
System.out.println("tokens="+G.tokenNameToTypeMap);
|
||||||
|
System.out.println("strings="+G.stringLiteralToTypeMap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,5 +126,4 @@ public class SemanticPipeline {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,6 +92,10 @@ public class SymbolChecks {
|
||||||
GrammarAST idNode = a;
|
GrammarAST idNode = a;
|
||||||
if ( a.getType()== ANTLRParser.ASSIGN ) {
|
if ( a.getType()== ANTLRParser.ASSIGN ) {
|
||||||
idNode = (GrammarAST)a.getChild(0);
|
idNode = (GrammarAST)a.getChild(0);
|
||||||
|
if ( g!=g.getOutermostGrammar() ) {
|
||||||
|
ErrorManager.grammarError(ErrorType.TOKEN_ALIAS_IN_DELEGATE,
|
||||||
|
g.fileName, idNode.token, idNode.getText(), g.name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
GrammarAST prev = aliasTokenNames.get(idNode.getText());
|
GrammarAST prev = aliasTokenNames.get(idNode.getText());
|
||||||
if ( prev==null ) {
|
if ( prev==null ) {
|
||||||
|
@ -261,4 +265,50 @@ public class SymbolChecks {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CAN ONLY CALL THE TWO NEXT METHODS AFTER GRAMMAR HAS RULE DEFS (see semanticpipeline)
|
||||||
|
|
||||||
|
public void checkRuleArgs(Grammar g, List<GrammarAST> rulerefs) {
|
||||||
|
if ( rulerefs==null ) return;
|
||||||
|
for (GrammarAST ref : rulerefs) {
|
||||||
|
String ruleName = ref.getText();
|
||||||
|
Rule r = g.getRule(ruleName);
|
||||||
|
if ( r==null && !ref.hasAncestor(ANTLRParser.DOT)) {
|
||||||
|
// only give error for unqualified rule refs now
|
||||||
|
ErrorManager.grammarError(ErrorType.UNDEFINED_RULE_REF,
|
||||||
|
g.fileName, ref.token, ruleName);
|
||||||
|
}
|
||||||
|
GrammarAST arg = (GrammarAST)ref.getChild(0);
|
||||||
|
if ( arg!=null && r.args==null ) {
|
||||||
|
ErrorManager.grammarError(ErrorType.RULE_HAS_NO_ARGS,
|
||||||
|
g.fileName, ref.token, ruleName);
|
||||||
|
|
||||||
|
}
|
||||||
|
else if ( arg==null && (r!=null&&r.args!=null) ) {
|
||||||
|
ErrorManager.grammarError(ErrorType.MISSING_RULE_ARGS,
|
||||||
|
g.fileName, ref.token, ruleName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void checkForQualifiedRuleIssues(Grammar g, List<GrammarAST> qualifiedRuleRefs) {
|
||||||
|
for (GrammarAST dot : qualifiedRuleRefs) {
|
||||||
|
GrammarAST grammar = (GrammarAST)dot.getChild(0);
|
||||||
|
GrammarAST rule = (GrammarAST)dot.getChild(1);
|
||||||
|
System.out.println(grammar.getText()+"."+rule.getText());
|
||||||
|
Grammar delegate = g.getImportedGrammar(grammar.getText());
|
||||||
|
if ( delegate==null ) {
|
||||||
|
ErrorManager.grammarError(ErrorType.NO_SUCH_GRAMMAR_SCOPE,
|
||||||
|
g.fileName, grammar.token, grammar.getText(),
|
||||||
|
rule.getText());
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if ( g.getRule(grammar.getText(), rule.getText())==null ) {
|
||||||
|
ErrorManager.grammarError(ErrorType.NO_SUCH_RULE_IN_SCOPE,
|
||||||
|
g.fileName, rule.token, grammar.getText(),
|
||||||
|
rule.getText());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ public enum ErrorType {
|
||||||
NO_TOKEN_DEFINITION(ErrorSeverity.ERROR, true, true),
|
NO_TOKEN_DEFINITION(ErrorSeverity.ERROR, true, true),
|
||||||
UNDEFINED_RULE_REF(ErrorSeverity.ERROR, true, true),
|
UNDEFINED_RULE_REF(ErrorSeverity.ERROR, true, true),
|
||||||
LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(ErrorSeverity.ERROR, true, true),
|
LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(ErrorSeverity.ERROR, true, true),
|
||||||
CANNOT_ALIAS_TOKENS_IN_LEXER(ErrorSeverity.ERROR, true, true),
|
CANNOT_ALIAS_TOKENS(ErrorSeverity.ERROR, true, true),
|
||||||
TOKEN_NAMES_MUST_START_UPPER(ErrorSeverity.ERROR, true, true),
|
TOKEN_NAMES_MUST_START_UPPER(ErrorSeverity.ERROR, true, true),
|
||||||
ATTRIBUTE_REF_NOT_IN_RULE(ErrorSeverity.ERROR, true, true),
|
ATTRIBUTE_REF_NOT_IN_RULE(ErrorSeverity.ERROR, true, true),
|
||||||
INVALID_RULE_SCOPE_ATTRIBUTE_REF(ErrorSeverity.ERROR, true, true),
|
INVALID_RULE_SCOPE_ATTRIBUTE_REF(ErrorSeverity.ERROR, true, true),
|
||||||
|
@ -123,6 +123,7 @@ public enum ErrorType {
|
||||||
TOKEN_ALIAS_CONFLICT(ErrorSeverity.ERROR, true, true),
|
TOKEN_ALIAS_CONFLICT(ErrorSeverity.ERROR, true, true),
|
||||||
TOKEN_ALIAS_REASSIGNMENT(ErrorSeverity.ERROR, true, true),
|
TOKEN_ALIAS_REASSIGNMENT(ErrorSeverity.ERROR, true, true),
|
||||||
TOKEN_VOCAB_IN_DELEGATE(ErrorSeverity.ERROR, true, true),
|
TOKEN_VOCAB_IN_DELEGATE(ErrorSeverity.ERROR, true, true),
|
||||||
|
TOKEN_ALIAS_IN_DELEGATE(ErrorSeverity.ERROR, true, true),
|
||||||
INVALID_IMPORT(ErrorSeverity.ERROR, true, true),
|
INVALID_IMPORT(ErrorSeverity.ERROR, true, true),
|
||||||
IMPORTED_TOKENS_RULE_EMPTY(ErrorSeverity.ERROR, true, true),
|
IMPORTED_TOKENS_RULE_EMPTY(ErrorSeverity.ERROR, true, true),
|
||||||
IMPORT_NAME_CLASH(ErrorSeverity.ERROR, true, true),
|
IMPORT_NAME_CLASH(ErrorSeverity.ERROR, true, true),
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
package org.antlr.v4.tool;
|
package org.antlr.v4.tool;
|
||||||
|
|
||||||
import org.antlr.runtime.ANTLRStringStream;
|
import org.antlr.runtime.*;
|
||||||
import org.antlr.runtime.CommonTokenStream;
|
import org.antlr.runtime.tree.TreeWizard;
|
||||||
import org.antlr.runtime.ParserRuleReturnScope;
|
|
||||||
import org.antlr.runtime.RecognitionException;
|
|
||||||
import org.antlr.v4.Tool;
|
import org.antlr.v4.Tool;
|
||||||
import org.antlr.v4.analysis.Label;
|
import org.antlr.v4.analysis.Label;
|
||||||
import org.antlr.v4.parse.ANTLRLexer;
|
import org.antlr.v4.parse.ANTLRLexer;
|
||||||
|
@ -49,14 +47,29 @@ public class Grammar implements AttributeResolver {
|
||||||
public List<Grammar> importedGrammars;
|
public List<Grammar> importedGrammars;
|
||||||
public Map<String, Rule> rules = new LinkedHashMap<String, Rule>();
|
public Map<String, Rule> rules = new LinkedHashMap<String, Rule>();
|
||||||
|
|
||||||
|
/** Token names and literal tokens like "void" are uniquely indexed.
|
||||||
|
* with -1 implying EOF. Characters are different; they go from
|
||||||
|
* -1 (EOF) to \uFFFE. For example, 0 could be a binary byte you
|
||||||
|
* want to lexer. Labels of DFA/NFA transitions can be both tokens
|
||||||
|
* and characters. I use negative numbers for bookkeeping labels
|
||||||
|
* like EPSILON. Char/String literals and token types overlap in the same
|
||||||
|
* space, however.
|
||||||
|
*/
|
||||||
|
protected int maxTokenType = Token.MIN_TOKEN_TYPE-1;
|
||||||
|
|
||||||
/** Map token like ID (but not literals like "while") to its token type */
|
/** Map token like ID (but not literals like "while") to its token type */
|
||||||
public Map<String, Integer> tokenNameToTypeMap = new HashMap<String, Integer>();
|
public Map<String, Integer> tokenNameToTypeMap = new LinkedHashMap<String, Integer>();
|
||||||
|
|
||||||
/** Map token literals like "while" to its token type. It may be that
|
/** Map token literals like "while" to its token type. It may be that
|
||||||
* WHILE="while"=35, in which case both tokenIDToTypeMap and this
|
* WHILE="while"=35, in which case both tokenIDToTypeMap and this
|
||||||
* field will have entries both mapped to 35.
|
* field will have entries both mapped to 35.
|
||||||
*/
|
*/
|
||||||
public Map<String, Integer> stringLiteralToTypeMap = new HashMap<String, Integer>();
|
public Map<String, Integer> stringLiteralToTypeMap = new LinkedHashMap<String, Integer>();
|
||||||
|
|
||||||
|
/** Map a token type to its token name.
|
||||||
|
* Must subtract MIN_TOKEN_TYPE from index.
|
||||||
|
*/
|
||||||
|
public Vector<String> typeToTokenList = new Vector<String>();
|
||||||
|
|
||||||
/** Map a name to an action.
|
/** Map a name to an action.
|
||||||
* The code generator will use this to fill holes in the output files.
|
* The code generator will use this to fill holes in the output files.
|
||||||
|
@ -75,6 +88,7 @@ public class Grammar implements AttributeResolver {
|
||||||
this.tool = tool;
|
this.tool = tool;
|
||||||
this.ast = ast;
|
this.ast = ast;
|
||||||
this.name = ((GrammarAST)ast.getChild(0)).getText();
|
this.name = ((GrammarAST)ast.getChild(0)).getText();
|
||||||
|
initTokenSymbolTables();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** For testing */
|
/** For testing */
|
||||||
|
@ -92,8 +106,35 @@ public class Grammar implements AttributeResolver {
|
||||||
this.ast = (GrammarRootAST)r.getTree();
|
this.ast = (GrammarRootAST)r.getTree();
|
||||||
this.name = ((GrammarAST)ast.getChild(0)).getText();
|
this.name = ((GrammarAST)ast.getChild(0)).getText();
|
||||||
}
|
}
|
||||||
|
initTokenSymbolTables();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void initTokenSymbolTables() {
|
||||||
|
// the faux token types take first NUM_FAUX_LABELS positions
|
||||||
|
// then we must have room for the predefined runtime token types
|
||||||
|
// like DOWN/UP used for tree parsing.
|
||||||
|
typeToTokenList.setSize(Label.NUM_FAUX_LABELS+Token.MIN_TOKEN_TYPE-1);
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.INVALID, "<INVALID>");
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOT, "<EOT>");
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SEMPRED, "<SEMPRED>");
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SET, "<SET>");
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EPSILON, Label.EPSILON_STR);
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOF, "EOF");
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "<EOR>");
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Token.DOWN-1, "DOWN");
|
||||||
|
typeToTokenList.set(Label.NUM_FAUX_LABELS+Token.UP-1, "UP");
|
||||||
|
tokenNameToTypeMap.put("<INVALID>", Label.INVALID);
|
||||||
|
tokenNameToTypeMap.put("<ACTION>", Label.ACTION);
|
||||||
|
tokenNameToTypeMap.put("<EPSILON>", Label.EPSILON);
|
||||||
|
tokenNameToTypeMap.put("<SEMPRED>", Label.SEMPRED);
|
||||||
|
tokenNameToTypeMap.put("<SET>", Label.SET);
|
||||||
|
tokenNameToTypeMap.put("<EOT>", Label.EOT);
|
||||||
|
tokenNameToTypeMap.put("EOF", Label.EOF);
|
||||||
|
tokenNameToTypeMap.put("<EOR>", Label.EOR_TOKEN_TYPE);
|
||||||
|
tokenNameToTypeMap.put("DOWN", Token.DOWN);
|
||||||
|
tokenNameToTypeMap.put("UP", Token.UP);
|
||||||
|
}
|
||||||
|
|
||||||
public void loadImportedGrammars() {
|
public void loadImportedGrammars() {
|
||||||
if ( ast==null ) return;
|
if ( ast==null ) return;
|
||||||
GrammarAST i = (GrammarAST)ast.getFirstChildWithType(ANTLRParser.IMPORT);
|
GrammarAST i = (GrammarAST)ast.getFirstChildWithType(ANTLRParser.IMPORT);
|
||||||
|
@ -115,6 +156,7 @@ public class Grammar implements AttributeResolver {
|
||||||
if ( root instanceof GrammarASTErrorNode ) return; // came back as error node
|
if ( root instanceof GrammarASTErrorNode ) return; // came back as error node
|
||||||
GrammarRootAST ast = (GrammarRootAST)root;
|
GrammarRootAST ast = (GrammarRootAST)root;
|
||||||
Grammar g = new Grammar(tool, ast);
|
Grammar g = new Grammar(tool, ast);
|
||||||
|
g.fileName = importedGrammarName+".g";
|
||||||
g.parent = this;
|
g.parent = this;
|
||||||
importedGrammars.add(g);
|
importedGrammars.add(g);
|
||||||
}
|
}
|
||||||
|
@ -239,6 +281,11 @@ public class Grammar implements AttributeResolver {
|
||||||
return qualifiedName+suffix;
|
return qualifiedName+suffix;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getStringLiteralLexerRuleName(String lit) {
|
||||||
|
int ttype = getTokenType(lit);
|
||||||
|
return "T__"+ttype;
|
||||||
|
}
|
||||||
|
|
||||||
/** Return grammar directly imported by this grammar */
|
/** Return grammar directly imported by this grammar */
|
||||||
public Grammar getImportedGrammar(String name) {
|
public Grammar getImportedGrammar(String name) {
|
||||||
for (Grammar g : importedGrammars) {
|
for (Grammar g : importedGrammars) {
|
||||||
|
@ -247,19 +294,69 @@ public class Grammar implements AttributeResolver {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getTokenType(String tokenName) {
|
public int getTokenType(String token) {
|
||||||
Integer I = null;
|
Integer I = null;
|
||||||
if ( tokenName.charAt(0)=='\'') {
|
if ( token.charAt(0)=='\'') {
|
||||||
I = stringLiteralToTypeMap.get(tokenName);
|
I = stringLiteralToTypeMap.get(token);
|
||||||
}
|
}
|
||||||
else { // must be a label like ID
|
else { // must be a label like ID
|
||||||
I = tokenNameToTypeMap.get(tokenName);
|
I = tokenNameToTypeMap.get(token);
|
||||||
}
|
}
|
||||||
int i = (I!=null)?I.intValue(): Label.INVALID;
|
int i = (I!=null)?I.intValue(): Label.INVALID;
|
||||||
//System.out.println("grammar type "+type+" "+tokenName+"->"+i);
|
//System.out.println("grammar type "+type+" "+tokenName+"->"+i);
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Return a new unique integer in the token type space */
|
||||||
|
public int getNewTokenType() {
|
||||||
|
maxTokenType++;
|
||||||
|
return maxTokenType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void importVocab(Grammar g) {
|
||||||
|
this.tokenNameToTypeMap.putAll( g.tokenNameToTypeMap );
|
||||||
|
this.stringLiteralToTypeMap.putAll( g.stringLiteralToTypeMap );
|
||||||
|
this.typeToTokenList.addAll( g.typeToTokenList );
|
||||||
|
}
|
||||||
|
|
||||||
|
public int defineTokenName(String name) {
|
||||||
|
Integer prev = tokenNameToTypeMap.get(name);
|
||||||
|
if ( prev!=null ) return prev;
|
||||||
|
int ttype = getNewTokenType();
|
||||||
|
tokenNameToTypeMap.put(name, ttype);
|
||||||
|
setTokenForType(ttype, name);
|
||||||
|
return ttype;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int defineStringLiteral(String lit) {
|
||||||
|
if ( !stringLiteralToTypeMap.containsKey(lit) ) {
|
||||||
|
int ttype = getNewTokenType();
|
||||||
|
stringLiteralToTypeMap.put(lit, ttype);
|
||||||
|
setTokenForType(ttype, lit);
|
||||||
|
return ttype;
|
||||||
|
}
|
||||||
|
return Token.INVALID_TOKEN_TYPE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int defineTokenAlias(String name, String lit) {
|
||||||
|
int ttype = defineTokenName(name);
|
||||||
|
stringLiteralToTypeMap.put(lit, ttype);
|
||||||
|
setTokenForType(ttype, name);
|
||||||
|
return ttype;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTokenForType(int ttype, String text) {
|
||||||
|
int index = Label.NUM_FAUX_LABELS+ttype-1;
|
||||||
|
if ( index>=typeToTokenList.size() ) {
|
||||||
|
typeToTokenList.setSize(index+1);
|
||||||
|
}
|
||||||
|
String prevToken = (String)typeToTokenList.get(index);
|
||||||
|
if ( prevToken==null || prevToken.charAt(0)=='\'' ) {
|
||||||
|
// only record if nothing there before or if thing before was a literal
|
||||||
|
typeToTokenList.set(index, text);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// no isolated attr at grammar action level
|
// no isolated attr at grammar action level
|
||||||
public Attribute resolveToAttribute(String x, ActionAST node) {
|
public Attribute resolveToAttribute(String x, ActionAST node) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -324,6 +421,33 @@ public class Grammar implements AttributeResolver {
|
||||||
default :
|
default :
|
||||||
return "<invalid>";
|
return "<invalid>";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Map<String,String> getStringLiteralAliasesFromLexerRules(GrammarRootAST ast) {
|
||||||
|
GrammarAST combinedRulesRoot =
|
||||||
|
(GrammarAST)ast.getFirstChildWithType(ANTLRParser.RULES);
|
||||||
|
if ( combinedRulesRoot==null ) return null;
|
||||||
|
|
||||||
|
List<GrammarASTWithOptions> ruleNodes = combinedRulesRoot.getChildren();
|
||||||
|
if ( ruleNodes==null || ruleNodes.size()==0 ) return null;
|
||||||
|
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(ruleNodes.get(0).token.getInputStream());
|
||||||
|
TreeWizard wiz = new TreeWizard(adaptor,ANTLRParser.tokenNames);
|
||||||
|
Map<String,String> lexerRuleToStringLiteral = new HashMap<String,String>();
|
||||||
|
|
||||||
|
for (GrammarASTWithOptions r : ruleNodes) {
|
||||||
|
String ruleName = r.getChild(0).getText();
|
||||||
|
if ( Character.isUpperCase(ruleName.charAt(0)) ) {
|
||||||
|
Map nodes = new HashMap();
|
||||||
|
boolean isLitRule =
|
||||||
|
wiz.parse(r, "(RULE %name:ID (BLOCK (ALT %lit:STRING_LITERAL)))", nodes);
|
||||||
|
if ( isLitRule ) {
|
||||||
|
GrammarAST litNode = (GrammarAST)nodes.get("lit");
|
||||||
|
GrammarAST nameNode = (GrammarAST)nodes.get("name");
|
||||||
|
lexerRuleToStringLiteral.put(litNode.getText(), nameNode.getText());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lexerRuleToStringLiteral;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -167,7 +167,7 @@ public abstract class BaseTest {
|
||||||
ErrorManager.setErrorListener(equeue);
|
ErrorManager.setErrorListener(equeue);
|
||||||
*/
|
*/
|
||||||
Tool antlr = newTool(optionsA);
|
Tool antlr = newTool(optionsA);
|
||||||
antlr.process();
|
antlr.processGrammarsOnCommandLine();
|
||||||
ANTLRErrorListener listener = ErrorManager.getErrorListener();
|
ANTLRErrorListener listener = ErrorManager.getErrorListener();
|
||||||
if ( listener instanceof ErrorQueue ) {
|
if ( listener instanceof ErrorQueue ) {
|
||||||
ErrorQueue equeue = (ErrorQueue)listener;
|
ErrorQueue equeue = (ErrorQueue)listener;
|
||||||
|
|
Loading…
Reference in New Issue