adding new files

[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 8659]
This commit is contained in:
parrt 2011-06-17 16:05:00 -08:00
parent 2ddeb7c769
commit 061fff09f9
94 changed files with 9629 additions and 173 deletions

View File

@ -27,7 +27,6 @@
*/
package org.antlr.v4.runtime;
import org.antlr.v4.runtime.misc.LABitSet;
/** A parser for TokenStreams. "parser grammars" result in a subclass
* of this.
@ -53,8 +52,7 @@ public class Parser extends BaseRecognizer {
}
protected Object getMissingSymbol(RecognitionException e,
int expectedTokenType,
LABitSet follow)
int expectedTokenType)
{
String tokenText = null;
if ( expectedTokenType== Token.EOF ) tokenText = "<missing EOF>";

View File

@ -1,7 +1,6 @@
package org.antlr.v4.runtime.atn;
import org.antlr.v4.automata.ATNSerializer;
import org.antlr.v4.misc.*;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.runtime.RuleContext;
import org.antlr.v4.tool.*;
@ -85,22 +84,4 @@ public class ATN {
public int getNumberOfDecisions() {
return decisionToATNState.size();
}
/** Used by Java target to encode short/int array as chars in string. */
public String getSerializedAsString() {
return new String(Utils.toCharArray(getSerialized()));
}
public List<Integer> getSerialized() {
return new ATNSerializer(this).serialize();
}
public char[] getSerializedAsChars() {
return Utils.toCharArray(new ATNSerializer(this).serialize());
}
public String getDecoded() {
return new ATNSerializer(this).decode(Utils.toCharArray(getSerialized()));
}
}

View File

@ -2,8 +2,7 @@ package org.antlr.v4.runtime.atn;
import org.antlr.v4.misc.*;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.runtime.dfa.*;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.runtime.dfa.DFAState;
import java.util.*;
@ -138,6 +137,7 @@ public abstract class ATNInterpreter {
return s;
}
/*
public static void dump(DFA dfa, Grammar g) {
DOTGenerator dot = new DOTGenerator(g);
String output = dot.getDOT(dfa, false);
@ -147,4 +147,5 @@ public abstract class ATNInterpreter {
public static void dump(DFA dfa) {
dump(dfa, null);
}
*/
}

View File

@ -22,10 +22,6 @@ public class ActionTransition extends Transition {
return true; // we are to be ignored by analysis 'cept for predicates
}
public int compareTo(Object o) {
return 0;
}
public String toString() {
if ( actionAST!=null ) return "{"+actionAST.getText()+"}";
return "action_"+ruleIndex+":"+actionIndex;

View File

@ -19,28 +19,6 @@ public class AtomTransition extends Transition {
public IntervalSet label() { return IntervalSet.of(label); }
public int hashCode() { return label; }
public boolean equals(Object o) {
if ( o==null ) return false;
if ( this == o ) return true; // equals if same object
if ( o.getClass() == SetTransition.class ) {
return IntervalSet.of(label).equals(o);
}
return label!=((AtomTransition)o).label;
}
// public boolean intersect(Label other) {
// if ( other.getClass() == AtomTransition.class ) {
// return label==((AtomTransition)other).label;
// }
// return ((SetLabel)other).label.member(this.label);
// }
public int compareTo(Object o) {
return this.label-((AtomTransition)o).label;
}
@Override
public String toString(Grammar g) {
if (g!=null ) return g.getTokenDisplayName(label);

View File

@ -7,10 +7,6 @@ public class EpsilonTransition extends Transition {
public boolean isEpsilon() { return true; }
public int compareTo(Object o) {
return 0;
}
@Override
public String toString(Grammar g) {
return "epsilon";

View File

@ -1,8 +1,7 @@
package org.antlr.v4.runtime.atn;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.GrammarAST;
import org.antlr.v4.tool.*;
public class NotSetTransition extends SetTransition {
public NotSetTransition(GrammarAST ast, IntervalSet label, ATNState target) {

View File

@ -1,8 +1,6 @@
package org.antlr.v4.runtime.atn;
import org.antlr.v4.analysis.SemanticContext;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.GrammarAST;
import org.antlr.v4.tool.*;
/** TODO: this is old comment:
* A tree of semantic predicates from the grammar AST if label==SEMPRED.
@ -14,12 +12,10 @@ public class PredicateTransition extends Transition {
public int ruleIndex;
public int predIndex;
public GrammarAST predAST;
public SemanticContext semanticContext;
public PredicateTransition(GrammarAST predicateASTNode, ATNState target) {
super(target);
this.predAST = predicateASTNode;
this.semanticContext = new SemanticContext.Predicate(predicateASTNode);
}
public PredicateTransition(ATNState target, int ruleIndex, int predIndex) {
@ -30,29 +26,7 @@ public class PredicateTransition extends Transition {
public boolean isEpsilon() { return true; }
public int compareTo(Object o) {
return 0;
}
public int hashCode() {
return semanticContext.hashCode();
}
public boolean equals(Object o) {
if ( o==null ) {
return false;
}
if ( this == o ) {
return true; // equals if same object
}
if ( !(o instanceof PredicateTransition) ) {
return false;
}
return semanticContext.equals(((PredicateTransition)o).semanticContext);
}
public String toString() {
if ( semanticContext!=null ) return semanticContext.toString();
if ( predAST!=null ) return predAST.getText();
return "pred-"+ruleIndex+":"+predIndex;
}

View File

@ -1,7 +1,6 @@
package org.antlr.v4.runtime.atn;
import org.antlr.v4.misc.CharSupport;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.misc.*;
public class RangeTransition extends Transition {
public int from;
@ -15,10 +14,6 @@ public class RangeTransition extends Transition {
super(target);
}
public int compareTo(Object o) {
return 0;
}
@Override
public IntervalSet label() { return IntervalSet.of(from,to); }

View File

@ -34,8 +34,4 @@ public class RuleTransition extends Transition {
}
public boolean isEpsilon() { return true; }
public int compareTo(Object o) {
return 0;
}
}

View File

@ -22,28 +22,6 @@ public class SetTransition extends Transition {
public IntervalSet label() { return label; }
public int compareTo(Object o) {
return 0;
}
// public boolean intersect(Label other) {
// if ( other.getClass() == SetTransition.class ) {
// return label.and(((SetTransition)other).label).isNil();
// }
// return label.member(((AtomTransition)other).label);
// }
public int hashCode() { return label.hashCode(); }
public boolean equals(Object o) {
if ( o==null ) return false;
if ( this == o ) return true; // equals if same object
if ( o.getClass() == AtomTransition.class ) {
o = IntervalSet.of(((AtomTransition)o).label);
}
return this.label.equals(((SetTransition)o).label);
}
public String toString(Grammar g) {
return label.toString(g);
}

View File

@ -1,41 +0,0 @@
package org.antlr.v4.runtime.atn;
/** ATN simulation thread state */
public class ThreadState {
public int addr;
public int alt; // or speculatively matched token type for lexers
public ATNStack context;
public int inputIndex = -1; // char (or token?) index from 0
public int inputMarker = -1; // accept states track input markers in case we need to rewind
public ThreadState(int addr, int alt, ATNStack context) {
this.addr = addr;
this.alt = alt;
this.context = context;
}
public ThreadState(ThreadState t) {
this.addr = t.addr;
this.alt = t.alt;
this.context = t.context;
this.inputIndex = t.inputIndex;
}
public boolean equals(Object o) {
if ( o==null ) return false;
if ( this==o ) return true;
ThreadState other = (ThreadState)o;
return this.addr==other.addr &&
this.alt==other.alt &&
this.context.equals(other.context);
}
public int hashCode() { return addr + context.hashCode(); }
public String toString() {
if ( context.parent==null ) {
return "("+addr+","+alt+")";
}
return "("+addr+","+alt+","+context+")";
}
}

View File

@ -3,8 +3,7 @@ package org.antlr.v4.runtime.atn;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.Grammar;
import java.util.HashMap;
import java.util.Map;
import java.util.*;
/** An ATN transition between any two ATN states. Subclasses define
* atom, set, epsilon, action, predicate, rule transitions.
@ -18,7 +17,7 @@ import java.util.Map;
* the states. We'll use the term Edge for the DFA to distinguish them from
* ATN transitions.
*/
public abstract class Transition implements Comparable {
public abstract class Transition {
// constants for serialization
public static final int EPSILON = 1;
public static final int RANGE = 2;

View File

@ -4,9 +4,6 @@ import org.antlr.v4.tool.Grammar;
public class WildcardTransition extends Transition {
public WildcardTransition(ATNState target) { super(target); }
public int compareTo(Object o) {
return 0;
}
@Override
public String toString(Grammar g) {

View File

@ -2,7 +2,11 @@ package org.antlr.v4;
import org.antlr.runtime.*;
import org.antlr.tool.DOTGenerator;
import org.antlr.v4.analysis.AnalysisPipeline;
import org.antlr.v4.automata.*;
import org.antlr.v4.codegen.CodeGenPipeline;
import org.antlr.v4.parse.*;
import org.antlr.v4.semantics.SemanticPipeline;
import org.antlr.v4.tool.*;
import java.io.IOException;
@ -65,6 +69,10 @@ public class Tool {
new Option("launch_ST_inspector", "-dbgST", "launch StringTemplate visualizer on generated code"),
};
// The internal options are for my use on the command line during dev
public static boolean internalOption_PrintGrammarTree = false;
public static boolean internalOption_ShowATNConfigsInDFA = false;
public final String[] args;
protected List<String> grammarFiles = new ArrayList<String>();

View File

@ -0,0 +1,56 @@
package org.antlr.v4.analysis;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.tool.Grammar;
import java.util.*;
public class AnalysisPipeline {
public Grammar g;
public AnalysisPipeline(Grammar g) {
this.g = g;
}
public void process() {
// LEFT-RECURSION CHECK
LeftRecursionDetector lr = new LeftRecursionDetector(g.atn);
lr.check();
if ( lr.listOfRecursiveCycles.size()>0 ) return; // bail out
// BUILD DFA FOR EACH DECISION
if ( !g.isLexer() ) processParserOrTreeParser();
}
void processParserOrTreeParser() {
g.decisionLOOK =
new Vector<IntervalSet[]>(g.atn.getNumberOfDecisions()+1);
for (DecisionState s : g.atn.decisionToATNState) {
System.out.println("\nDECISION "+s.decision+" in rule "+s.rule.name);
LL1Analyzer anal = new LL1Analyzer(g.atn);
IntervalSet[] look = anal.getDecisionLookahead(s);
System.out.println("look="+ Arrays.toString(look));
g.decisionLOOK.setSize(s.decision+1);
g.decisionLOOK.set(s.decision, look);
System.out.println("LL(1)? "+disjoint(look));
}
}
/** Return lookahead depth at which lookahead sets are disjoint or return 0 */
public static boolean disjoint(IntervalSet[] altLook) {
boolean collision = false;
IntervalSet combined = new IntervalSet();
for (int a=1; a<altLook.length; a++) {
IntervalSet look = altLook[a];
if ( !look.and(combined).isNil() ) {
System.out.println("alt "+a+" not disjoint with "+combined+"; look = "+look);
collision = true;
break;
}
combined.addAll(look);
}
return !collision;
}
}

View File

@ -215,4 +215,21 @@ public class ATNSerializer {
if ( atn.g!=null ) return atn.g.getTokenDisplayName(t);
return String.valueOf(t);
}
/** Used by Java target to encode short/int array as chars in string. */
public static String getSerializedAsString(ATN atn) {
return new String(Utils.toCharArray(getSerialized(atn)));
}
public static List<Integer> getSerialized(ATN atn) {
return new ATNSerializer(atn).serialize();
}
public static char[] getSerializedAsChars(ATN atn) {
return Utils.toCharArray(new ATNSerializer(atn).serialize());
}
public static String getDecoded(ATN atn) {
return new ATNSerializer(atn).decode(Utils.toCharArray(getSerialized(atn)));
}
}

View File

@ -1,7 +1,7 @@
package org.antlr.v4.automata;
import org.antlr.runtime.RecognitionException;
import org.antlr.runtime.*;
import org.antlr.runtime.tree.*;
import org.antlr.v4.misc.*;
import org.antlr.v4.parse.*;

View File

@ -0,0 +1,254 @@
package org.antlr.v4.codegen;
import org.antlr.runtime.*;
import org.antlr.v4.codegen.model.RuleFunction;
import org.antlr.v4.codegen.model.actions.*;
import org.antlr.v4.parse.*;
import org.antlr.v4.tool.*;
import java.lang.reflect.Constructor;
import java.util.*;
/** */
public class ActionTranslator implements ActionSplitterListener {
public static final Map<String, Class> rulePropToModelMap = new HashMap<String, Class>() {{
put("start", RulePropertyRef_start.class);
put("stop", RulePropertyRef_stop.class);
put("tree", RulePropertyRef_tree.class);
put("text", RulePropertyRef_text.class);
put("st", RulePropertyRef_st.class);
}};
public static final Map<String, Class> tokenPropToModelMap = new HashMap<String, Class>() {{
put("text", TokenPropertyRef_text.class);
put("type", TokenPropertyRef_type.class);
put("line", TokenPropertyRef_line.class);
put("index", TokenPropertyRef_index.class);
put("pos", TokenPropertyRef_pos.class);
put("channel", TokenPropertyRef_channel.class);
put("tree", TokenPropertyRef_tree.class);
put("int", TokenPropertyRef_int.class);
}};
ActionAST node;
RuleFunction rf;
List<ActionChunk> chunks = new ArrayList<ActionChunk>();
OutputModelFactory factory;
public ActionTranslator(OutputModelFactory factory, ActionAST node) {
this.factory = factory;
this.node = node;
}
public static List<ActionChunk> translateAction(OutputModelFactory factory,
RuleFunction rf,
Token tokenWithinAction,
ActionAST node)
{
String action = tokenWithinAction.getText();
int firstCurly = action.indexOf('{');
int lastCurly = action.lastIndexOf('}');
if ( firstCurly>=0 && lastCurly>=0 ) {
action = action.substring(firstCurly+1, lastCurly); // trim {...}
}
return translateActionChunk(factory, rf, action, node);
}
public static List<ActionChunk> translateActionChunk(OutputModelFactory factory,
RuleFunction rf,
String action,
ActionAST node)
{
Token tokenWithinAction = node.token;
ActionTranslator translator = new ActionTranslator(factory, node);
translator.rf = rf;
System.out.println("translate "+action);
ANTLRStringStream in = new ANTLRStringStream(action);
in.setLine(tokenWithinAction.getLine());
in.setCharPositionInLine(tokenWithinAction.getCharPositionInLine());
ActionSplitter trigger = new ActionSplitter(in, translator);
// forces eval, triggers listener methods
trigger.getActionTokens();
return translator.chunks;
}
public void attr(String expr, Token x) {
System.out.println("attr "+x);
Attribute a = node.resolver.resolveToAttribute(x.getText(), node);
if ( a!=null ) {
switch ( a.dict.type ) {
case ARG: chunks.add(new ArgRef(x.getText())); break;
case RET: chunks.add(new RetValueRef(x.getText())); break;
// case PREDEFINED_RULE: chunks.add(new RetValueRef(x.getText())); break;
// case PREDEFINED_TREE_RULE: chunks.add(new RetValueRef(x.getText())); break;
}
}
if ( node.resolver.resolvesToToken(x.getText(), node) ) {
chunks.add(new TokenRef(getTokenLabel(x.getText()))); // $label
return;
}
if ( node.resolver.resolvesToListLabel(x.getText(), node) ) {
return; // $ids for ids+=ID etc...
}
if ( node.resolver.resolveToDynamicScope(x.getText(), node)!=null ) {
chunks.add(new DynScopeRef(getDynamicScopeName(x.getText()))); // $S for scope S is ok
return;
}
// switch ( a.dict.type ) {
// case ARG: chunks.add(new ArgRef(x.getText())); break;
// case RET: chunks.add(new RetValueRef(x.getText())); break;
// case PREDEFINED_RULE: chunks.add(new RetValueRef(x.getText())); break;
// case PREDEFINED_LEXER_RULE: chunks.add(new RetValueRef(x.getText())); break;
// case PREDEFINED_TREE_RULE: chunks.add(new RetValueRef(x.getText())); break;
// case GLOBAL_SCOPE: chunks.add(new RetValueRef(x.getText())); break;
// case RULE_SCOPE: chunks.add(new RetValueRef(x.getText())); break;
// case TOKEN: chunks.add(new TokenRef(x.getText())); break;
// }
}
/** $x.y = expr; */
public void setQualifiedAttr(String expr, Token x, Token y, Token rhs) {
System.out.println("setQAttr "+x+"."+y+"="+rhs);
// x has to be current rule; just set y attr
List<ActionChunk> rhsChunks = translateActionChunk(factory,rf,rhs.getText(),node);
chunks.add(new SetAttr(y.getText(), rhsChunks));
}
public void qualifiedAttr(String expr, Token x, Token y) {
System.out.println("qattr "+x+"."+y);
Attribute a = node.resolver.resolveToAttribute(x.getText(), y.getText(), node);
switch ( a.dict.type ) {
case ARG: chunks.add(new ArgRef(y.getText())); break; // has to be current rule
case RET:
if ( factory.currentRule.size()>0 && factory.currentRule.peek().name.equals(x.getText()) ) {
chunks.add(new RetValueRef(y.getText())); break;
}
else {
chunks.add(new QRetValueRef(getRuleLabel(x.getText()), y.getText())); break;
}
case PREDEFINED_RULE: chunks.add(getRulePropertyRef(x, y)); break;
case TOKEN: chunks.add(getTokenPropertyRef(x, y)); break;
// case PREDEFINED_LEXER_RULE: chunks.add(new RetValueRef(x.getText())); break;
// case PREDEFINED_TREE_RULE: chunks.add(new RetValueRef(x.getText())); break;
}
}
public void setAttr(String expr, Token x, Token rhs) {
System.out.println("setAttr "+x+" "+rhs);
List<ActionChunk> rhsChunks = translateActionChunk(factory,rf,rhs.getText(),node);
chunks.add(new SetAttr(x.getText(), rhsChunks));
}
public void dynamicScopeAttr(String expr, Token x, Token y) {
System.out.println("scoped "+x+"."+y);
// we assume valid, just gen code
chunks.add(new DynScopeAttrRef(getDynamicScopeName(x.getText()), y.getText()));
}
public void setDynamicScopeAttr(String expr, Token x, Token y, Token rhs) {
List<ActionChunk> rhsChunks = translateActionChunk(factory,rf,rhs.getText(),node);
chunks.add(new SetDynScopeAttr(getDynamicScopeName(x.getText()), y.getText(), rhsChunks));
}
public void dynamicNegativeIndexedScopeAttr(String expr, Token x, Token y, Token index) {
List<ActionChunk> indexChunks = translateActionChunk(factory,rf,index.getText(),node);
chunks.add(new DynScopeAttrRef_negIndex(getDynamicScopeName(x.getText()), y.getText(), indexChunks));
}
public void setDynamicNegativeIndexedScopeAttr(String expr, Token x, Token y, Token index, Token rhs) {
List<ActionChunk> indexChunks = translateActionChunk(factory,rf,index.getText(),node);
List<ActionChunk> rhsChunks = translateActionChunk(factory,rf,rhs.getText(),node);
chunks.add(new SetDynScopeAttr_negIndex(getDynamicScopeName(x.getText()), y.getText(), indexChunks, rhsChunks));
}
public void dynamicAbsoluteIndexedScopeAttr(String expr, Token x, Token y, Token index) {
List<ActionChunk> indexChunks = translateActionChunk(factory,rf,index.getText(),node);
chunks.add(new DynScopeAttrRef_index(getDynamicScopeName(x.getText()), y.getText(), indexChunks));
}
public void setDynamicAbsoluteIndexedScopeAttr(String expr, Token x, Token y, Token index, Token rhs) {
List<ActionChunk> indexChunks = translateActionChunk(factory,rf,index.getText(),node);
List<ActionChunk> rhsChunks = translateActionChunk(factory,rf,rhs.getText(),node);
chunks.add(new SetDynScopeAttr_index(getDynamicScopeName(x.getText()), y.getText(), indexChunks, rhsChunks));
}
public void templateInstance(String expr) {
}
public void indirectTemplateInstance(String expr) {
}
public void setExprAttribute(String expr) {
}
public void setSTAttribute(String expr) {
}
public void templateExpr(String expr) {
}
public void unknownSyntax(Token t) {
}
public void text(String text) {
chunks.add(new ActionText(text));
}
TokenPropertyRef getTokenPropertyRef(Token x, Token y) {
try {
Class c = tokenPropToModelMap.get(y.getText());
Constructor ctor = c.getConstructor(new Class[] {String.class});
TokenPropertyRef ref =
(TokenPropertyRef)ctor.newInstance(getRuleLabel(x.getText()));
return ref;
}
catch (Exception e) {
factory.g.tool.errMgr.toolError(ErrorType.INTERNAL_ERROR, e);
}
return null;
}
RulePropertyRef getRulePropertyRef(Token x, Token y) {
try {
Class c = rulePropToModelMap.get(y.getText());
Constructor ctor = c.getConstructor(new Class[] {String.class});
RulePropertyRef ref =
(RulePropertyRef)ctor.newInstance(getRuleLabel(x.getText()));
return ref;
}
catch (Exception e) {
factory.g.tool.errMgr.toolError(ErrorType.INTERNAL_ERROR, e);
}
return null;
}
public String getTokenLabel(String x) {
if ( node.resolver.resolvesToLabel(x, node) ) return x;
return factory.gen.target.getImplicitTokenLabel(x);
}
public String getRuleLabel(String x) {
if ( node.resolver.resolvesToLabel(x, node) ) return x;
return factory.gen.target.getImplicitRuleLabel(x);
}
public String getDynamicScopeName(String x) {
String scope;
if ( factory.g.getRule(x)==null ) {
scope = factory.gen.target.getGlobalDynamicScopeStructName(x);
}
else {
scope = factory.gen.target.getRuleDynamicScopeStructName(x);
}
return scope;
}
// public String getTokenLabel(String x, ActionAST node) {
// Alternative alt = node.resolver.
// Rule r = node.ATNState.rule;
// if ( r.tokenRefs.get(x)!=null ) return true;
// LabelElementPair anyLabelDef = getAnyLabelDef(x);
// if ( anyLabelDef!=null && anyLabelDef.type== LabelType.TOKEN_LABEL ) return true;
// return false;
// }
}

View File

@ -0,0 +1,180 @@
package org.antlr.v4.codegen;
import org.antlr.v4.codegen.model.OutputModelObject;
import org.antlr.v4.misc.Utils;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.tool.*;
import org.stringtemplate.v4.*;
import java.io.*;
import java.util.*;
/** General controller for code gen. Can instantiate sub generator(s).
*/
public class CodeGenerator {
public static boolean LAUNCH_ST_INSPECTOR = false;
public static final String TEMPLATE_ROOT = "org/antlr/v4/tool/templates/codegen";
public static final String VOCAB_FILE_EXTENSION = ".tokens";
public final static String vocabFilePattern =
"<tokens.keys:{t | <t>=<tokens.(t)>\n}>" +
"<literals.keys:{t | <t>=<literals.(t)>\n}>";
public Grammar g;
public Target target;
public STGroup templates;
public int lineWidth = 72;
public CodeGenerator(Grammar g) {
this.g = g;
String language = g.getOption("language", "Java");
loadLanguageTarget(language);
loadTemplates(language);
}
void loadLanguageTarget(String language) {
String targetName = "org.antlr.v4.codegen."+language+"Target";
try {
Class c = Class.forName(targetName);
target = (Target)c.newInstance();
}
catch (ClassNotFoundException cnfe) {
target = new Target(); // use default
}
catch (InstantiationException ie) {
g.tool.errMgr.toolError(ErrorType.CANNOT_CREATE_TARGET_GENERATOR,
targetName,
ie);
}
catch (IllegalAccessException cnfe) {
g.tool.errMgr.toolError(ErrorType.CANNOT_CREATE_TARGET_GENERATOR,
targetName,
cnfe);
}
}
public void loadTemplates(String language) {
try {
templates = new STGroupFile(TEMPLATE_ROOT+"/"+language+"/"+language+".stg");
templates.registerRenderer(Integer.class, new NumberRenderer());
}
catch (IllegalArgumentException iae) {
g.tool.errMgr.toolError(ErrorType.CANNOT_CREATE_TARGET_GENERATOR,
language);
}
// if ( EMIT_TEMPLATE_DELIMITERS ) {
// templates.emitDebugStartStopStrings(true);
// templates.doNotEmitDebugStringsForTemplate("codeFileExtension");
// templates.doNotEmitDebugStringsForTemplate("headerFileExtension");
// }
}
public ST generate() {
OutputModelFactory factory;
if ( g.isParser() || g.isCombined() || g.isTreeGrammar() ) {
factory = new ParserFactory(this);
}
else {
factory = new LexerFactory(this);
}
OutputModelWalker walker = new OutputModelWalker(g.tool, templates);
OutputModelObject outputModel = factory.buildOutputModel();
ST st = walker.walk(outputModel);
if (CodeGenerator.LAUNCH_ST_INSPECTOR) {
st.inspect();
//if ( templates.isDefined("headerFile") ) headerFileST.inspect();
}
return st;
}
/** Generate a token vocab file with all the token names/types. For example:
* ID=7
* FOR=8
* 'for'=8
*
* This is independent of the target language; used by antlr internally
*/
ST getTokenVocabOutput() {
ST vocabFileST = new ST(vocabFilePattern);
Map<String,Integer> tokens = new HashMap<String,Integer>();
// make constants for the token names
for (String t : g.tokenNameToTypeMap.keySet()) {
int tokenType = g.tokenNameToTypeMap.get(t);
if ( tokenType>=Token.MIN_TOKEN_TYPE ) {
tokens.put(t, Utils.integer(tokenType));
}
}
vocabFileST.add("tokens", tokens);
// now dump the strings
Map<String,Integer> literals = new HashMap<String,Integer>();
for (String literal : g.stringLiteralToTypeMap.keySet()) {
int tokenType = g.stringLiteralToTypeMap.get(literal);
if ( tokenType>=Token.MIN_TOKEN_TYPE ) {
literals.put(literal, Utils.integer(tokenType));
}
}
vocabFileST.add("literals", literals);
return vocabFileST;
}
public void write(ST outputFileST) {
// WRITE FILES
try {
target.genRecognizerFile(this,g,outputFileST);
if ( templates.isDefined("headerFile") ) {
ST extST = templates.getInstanceOf("headerFileExtension");
ST headerFileST = null;
target.genRecognizerHeaderFile(this,g,headerFileST,extST.render(lineWidth));
}
// write out the vocab interchange file; used by antlr,
// does not change per target
ST tokenVocabSerialization = getTokenVocabOutput();
String vocabFileName = getVocabFileName();
if ( vocabFileName!=null ) {
write(tokenVocabSerialization, vocabFileName);
}
}
catch (IOException ioe) {
g.tool.errMgr.toolError(ErrorType.CANNOT_WRITE_FILE,
getVocabFileName(),
ioe);
}
}
public void write(ST code, String fileName) throws IOException {
long start = System.currentTimeMillis();
Writer w = g.tool.getOutputFile(g, fileName);
STWriter wr = new AutoIndentWriter(w);
wr.setLineWidth(lineWidth);
code.write(wr);
w.close();
long stop = System.currentTimeMillis();
System.out.println("render time for "+fileName+": "+(int)(stop-start)+"ms");
}
/** Generate TParser.java and TLexer.java from T.g if combined, else
* just use T.java as output regardless of type.
*/
public String getRecognizerFileName() {
ST extST = templates.getInstanceOf("codeFileExtension");
String recognizerName = g.getRecognizerName();
return recognizerName+extST.render();
}
/** What is the name of the vocab file generated for this grammar?
* Returns null if no .tokens file should be generated.
*/
public String getVocabFileName() {
// if ( g.isBuiltFromString() ) {
// return null;
// }
return g.name+VOCAB_FILE_EXTENSION;
}
}

View File

@ -0,0 +1,167 @@
package org.antlr.v4.codegen;
import org.antlr.v4.analysis.AnalysisPipeline;
import org.antlr.v4.codegen.model.*;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.tool.*;
import java.util.*;
/** Create output objects wthin rule functions */
public abstract class OutputModelFactory {
public Grammar g;
public CodeGenerator gen;
// Context ptrs
public OutputModelObject file; // root
public Stack<RuleFunction> currentRule = new Stack<RuleFunction>();
public Alternative currentAlt;
protected OutputModelFactory(CodeGenerator gen) {
this.gen = gen;
this.g = gen.g;
}
public abstract OutputModelObject buildOutputModel();
public CodeBlock epsilon() { return new CodeBlock(this); }
public CodeBlock alternative(List<SrcOp> elems) { return new CodeBlock(this, elems); }
public SrcOp action(GrammarAST ast) { return new Action(this, ast); }
public SrcOp forcedAction(GrammarAST ast) { return new ForcedAction(this, ast); }
public SrcOp sempred(GrammarAST ast) { return new SemPred(this, ast); }
public abstract List<SrcOp> ruleRef(GrammarAST ID, GrammarAST label, GrammarAST args);
public abstract List<SrcOp> tokenRef(GrammarAST ID, GrammarAST label, GrammarAST args);
public abstract List<SrcOp> stringRef(GrammarAST ID, GrammarAST label);
public Choice getChoiceBlock(BlockAST blkAST, List<CodeBlock> alts) {
int decision = ((DecisionState)blkAST.atnState).decision;
if ( AnalysisPipeline.disjoint(g.decisionLOOK.get(decision)) ) {
return getLL1ChoiceBlock(blkAST, alts);
}
else {
return getLLStarChoiceBlock(blkAST, alts);
}
}
public Choice getEBNFBlock(GrammarAST ebnfRoot, List<CodeBlock> alts) {
int decision;
if ( ebnfRoot.getType()==ANTLRParser.POSITIVE_CLOSURE ) {
decision = ((PlusBlockStartState)ebnfRoot.atnState).loopBackState.decision;
}
else if ( ebnfRoot.getType()==ANTLRParser.CLOSURE ) {
decision = ((BlockStartState)ebnfRoot.atnState).decision;
}
else {
decision = ((DecisionState)ebnfRoot.atnState).decision;
}
if ( AnalysisPipeline.disjoint(g.decisionLOOK.get(decision)) ) {
return getLL1EBNFBlock(ebnfRoot, alts);
}
else {
return getLLStarEBNFBlock(ebnfRoot, alts);
}
}
public Choice getLL1ChoiceBlock(BlockAST blkAST, List<CodeBlock> alts) {
return new LL1AltBlock(this, blkAST, alts);
}
public Choice getLLStarChoiceBlock(BlockAST blkAST, List<CodeBlock> alts) {
return new AltBlock(this, blkAST, alts);
}
public Choice getLL1EBNFBlock(GrammarAST ebnfRoot, List<CodeBlock> alts) {
int ebnf = 0;
if ( ebnfRoot!=null ) ebnf = ebnfRoot.getType();
Choice c = null;
switch ( ebnf ) {
case ANTLRParser.OPTIONAL :
if ( alts.size()==1 ) c = new LL1OptionalBlockSingleAlt(this, ebnfRoot, alts);
else c = new LL1OptionalBlock(this, ebnfRoot, alts);
break;
case ANTLRParser.CLOSURE :
if ( alts.size()==1 ) c = new LL1StarBlockSingleAlt(this, ebnfRoot, alts);
else c = new LL1StarBlock(this, ebnfRoot, alts);
break;
case ANTLRParser.POSITIVE_CLOSURE :
if ( alts.size()==1 ) c = new LL1PlusBlockSingleAlt(this, ebnfRoot, alts);
else c = new LL1PlusBlock(this, ebnfRoot, alts);
break;
}
return c;
}
public Choice getLLStarEBNFBlock(GrammarAST ebnfRoot, List<CodeBlock> alts) {
int ebnf = 0;
if ( ebnfRoot!=null ) ebnf = ebnfRoot.getType();
Choice c = null;
switch ( ebnf ) {
case ANTLRParser.OPTIONAL :
c = new OptionalBlock(this, ebnfRoot, alts);
break;
case ANTLRParser.CLOSURE :
c = new StarBlock(this, ebnfRoot, alts);
break;
case ANTLRParser.POSITIVE_CLOSURE :
c = new PlusBlock(this, ebnfRoot, alts);
break;
}
return c;
}
public abstract void defineBitSet(BitSetDecl b);
public SrcOp getLL1Test(IntervalSet look, GrammarAST blkAST) {
return new TestSetInline(this, blkAST, look);
// OutputModelObject expr;
// if ( look.size() < gen.target.getInlineTestsVsBitsetThreshold() ) {
// expr = new TestSetInline(this, blkAST, look);
// }
// else {
// expr = new TestSet(this, blkAST, look);
// }
// return expr;
}
// public DFADecl defineDFA(GrammarAST ast, DFA dfa) {
// return null;
//// DFADef d = new DFADef(name, dfa);
//// outputModel.dfaDefs.add(d);
// }
//
public BitSetDecl createFollowBitSet(GrammarAST ast, IntervalSet set) {
String inRuleName = ast.atnState.rule.name;
String elementName = ast.getText(); // assume rule ref
if ( ast.getType() == ANTLRParser.STRING_LITERAL ) {
elementName = gen.target.getTokenTypeAsTargetLabel(g, g.stringLiteralToTypeMap.get(elementName));
}
String name = "FOLLOW_"+elementName+"_in_"+inRuleName+"_"+ast.token.getTokenIndex();
BitSetDecl b = new BitSetDecl(this, name, set);
return b;
}
public BitSetDecl createExpectingBitSet(GrammarAST ast, int decision, IntervalSet set, String position) {
String inRuleName = ast.atnState.rule.name;
String name = "EXPECTING_in_"+inRuleName+"_"+position+"_"+decision;
//System.out.println("!!!!!!!! create "+name);
BitSetDecl b = new BitSetDecl(this, name, set);
return b;
}
public BitSetDecl createTestBitSet(GrammarAST ast, IntervalSet set) {
String inRuleName = ast.atnState.rule.name;
String name = "LOOK_in_"+inRuleName+"_"+ast.token.getTokenIndex();
BitSetDecl b = new BitSetDecl(this, name, set);
return b;
}
}

View File

@ -0,0 +1,58 @@
package org.antlr.v4.codegen;
import org.antlr.v4.codegen.model.*;
import org.antlr.v4.misc.Utils;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.tool.*;
import java.util.List;
/** */
public class ParserFactory extends OutputModelFactory {
// public static final Map<Class, String> modelToTemplateMap = new HashMap<Class, String>() {{
// put(ParserFile.class, "parserFile");
// put(Parser.class, "parser");
// put(RuleFunction.class, "parserFunction");
// put(DFADef.class, "DFA");
// put(CodeBlock.class, "codeBlock");
// put(LL1Choice.class, "switch");
// put(MatchToken.class, "matchToken");
// }};
public ParserFactory(CodeGenerator gen) {
super(gen);
}
public OutputModelObject buildOutputModel() {
return new ParserFile(this, gen.getRecognizerFileName());
}
@Override
public List<SrcOp> ruleRef(GrammarAST ID, GrammarAST label, GrammarAST args) {
InvokeRule r = new InvokeRule(this, ID, label);
AddToList a = null;
if ( label!=null && label.parent.getType()==ANTLRParser.PLUS_ASSIGN ) {
a = new AddToList(this, gen.target.getListLabel(label.getText()), r);
}
return Utils.list(r, a);
}
@Override
public List<SrcOp> tokenRef(GrammarAST ID, GrammarAST label, GrammarAST args) {
MatchToken m = new MatchToken(this, (TerminalAST) ID, label);
AddToList a = null;
if ( label!=null && label.parent.getType()==ANTLRParser.PLUS_ASSIGN ) {
a = new AddToList(this, gen.target.getListLabel(label.getText()), m);
}
return Utils.list(m, a);
}
@Override
public List<SrcOp> stringRef(GrammarAST ID, GrammarAST label) {
return tokenRef(ID, label, null);
}
public void defineBitSet(BitSetDecl b) {
// ((ParserFile)file).defineBitSet(b);
}
}

View File

@ -0,0 +1,12 @@
package org.antlr.v4.codegen;
import org.antlr.v4.codegen.model.Decl;
/** */
public class RuleContextDecl extends Decl {
public String ctxName;
public RuleContextDecl(OutputModelFactory factory, String name, String ctxName) {
super(factory, name);
this.ctxName = ctxName;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,29 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.*;
import org.antlr.v4.codegen.model.actions.ActionChunk;
import org.antlr.v4.tool.*;
import java.util.List;
/** */
public class Action extends RuleElement {
public List<ActionChunk> chunks;
public Action(OutputModelFactory factory, GrammarAST ast) {
super(factory,ast);
RuleFunction rf = null;
if ( factory.currentRule.size()>0 ) rf = factory.currentRule.peek();
chunks = ActionTranslator.translateAction(factory, rf, ast.token, (ActionAST)ast);
//System.out.println("actions="+chunks);
}
// @Override
// public List<String> getChildren() {
// final List<String> sup = super.getChildren();
// return new ArrayList<String>() {{
// if ( sup!=null ) addAll(sup);
// add("chunks");
// }};
// }
}

View File

@ -0,0 +1,15 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
/** */
public class AddToList extends SrcOp {
public String listName;
public LabeledOp opWithResultToAdd;
public AddToList(OutputModelFactory factory, String listName, LabeledOp opWithResultToAdd) {
super(factory);
this.listName = listName;
this.opWithResultToAdd = opWithResultToAdd;
}
}

View File

@ -0,0 +1,20 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.runtime.atn.BlockStartState;
import org.antlr.v4.tool.GrammarAST;
import java.util.List;
public class AltBlock extends Choice {
public ThrowNoViableAlt error;
public AltBlock(OutputModelFactory factory,
GrammarAST blkOrEbnfRootAST,
List<CodeBlock> alts)
{
super(factory, blkOrEbnfRootAST, alts);
decision = ((BlockStartState)blkOrEbnfRootAST.atnState).decision;
this.error = new ThrowNoViableAlt(factory, blkOrEbnfRootAST, null);
}
}

View File

@ -0,0 +1,6 @@
package org.antlr.v4.codegen.model;
public class CaptureNextToken extends SrcOp {
public String varName;
public CaptureNextToken(String varName) { this.varName = varName; }
}

View File

@ -0,0 +1,7 @@
package org.antlr.v4.codegen.model;
/** */
public class CaptureNextTokenType extends SrcOp {
public String varName;
public CaptureNextTokenType(String varName) { this.varName = varName; }
}

View File

@ -0,0 +1,58 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.GrammarAST;
import java.util.*;
/** The class hierarchy underneath SrcOp is pretty deep but makes sense that,
* for example LL1StarBlock is a kind of LL1Loop which is a kind of Choice.
* The problem is it's impossible to figure
* out how to construct one of these deeply nested objects because of the
* long super constructor call chain. Instead, I decided to in-line all of
* this and then look for opportunities to re-factor code into functions.
* It makes sense to use a class hierarchy to share data fields, but I don't
* think it makes sense to factor code using super constructors because
* it has too much work to do.
*/
public abstract class Choice extends RuleElement {
public int decision = -1;
public List<CodeBlock> alts;
public List<SrcOp> preamble;
public Choice(OutputModelFactory factory,
GrammarAST blkOrEbnfRootAST,
List<CodeBlock> alts)
{
super(factory, blkOrEbnfRootAST);
this.alts = alts;
}
public void addPreambleOp(SrcOp op) {
if ( preamble==null ) preamble = new ArrayList<SrcOp>();
preamble.add(op);
}
public List<String[]> getAltLookaheadAsStringLists(IntervalSet[] altLookSets) {
List<String[]> altLook = new ArrayList<String[]>();
for (int a=1; a<altLookSets.length; a++) {
IntervalSet s = altLookSets[a];
altLook.add(factory.gen.target.getTokenTypesAsTargetLabels(factory.g, s.toArray()));
}
return altLook;
}
public SrcOp addCodeForLookaheadTempVar(IntervalSet look) {
SrcOp expr = factory.getLL1Test(look, ast);
if ( expr instanceof TestSetInline) {
TestSetInline e = (TestSetInline)expr;
Decl d = new TokenTypeDecl(factory, e.varName);
factory.currentRule.peek().addDecl(d);
CaptureNextTokenType nextType = new CaptureNextTokenType(e.varName);
addPreambleOp(nextType);
}
return expr;
}
}

View File

@ -0,0 +1,27 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import java.util.*;
/** */
public class CodeBlock extends SrcOp {
public List<SrcOp> ops;
public CodeBlock(OutputModelFactory factory) { this.factory = factory; }
public CodeBlock(OutputModelFactory factory, List<SrcOp> ops) {
super(factory);
this.ops = ops;
}
public CodeBlock(OutputModelFactory factory, final SrcOp elem) {
this(factory, new ArrayList<SrcOp>() {{ add(elem); }});
}
// @Override
// public List<String> getChildren() {
// final List<String> sup = super.getChildren();
// return new ArrayList<String>() {{ if ( sup!=null ) addAll(sup); add("ops"); }};
// }
}

View File

@ -0,0 +1,29 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
/** */
public class Decl extends SrcOp {
public String name;
public String decl; // whole thing if copied from action
public Decl(OutputModelFactory factory, String name, String decl) {
this(factory, name);
this.decl = decl;
}
public Decl(OutputModelFactory factory, String name) {
super(factory);
this.name = name;
}
@Override
public int hashCode() {
return name.hashCode();
}
@Override
public boolean equals(Object obj) {
return name.equals(((Decl)obj).name);
}
}

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.GrammarAST;
public class ForcedAction extends Action {
public ForcedAction(OutputModelFactory factory, GrammarAST ast) {
super(factory, ast);
}
}

View File

@ -0,0 +1,25 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.tool.GrammarAST;
import java.util.List;
/** (A | B | C) */
public class LL1AltBlock extends LL1Choice {
public LL1AltBlock(OutputModelFactory factory, GrammarAST blkAST, List<CodeBlock> alts) {
super(factory, blkAST, alts);
this.decision = ((DecisionState)blkAST.atnState).decision;
/** Lookahead for each alt 1..n */
// IntervalSet[] altLookSets = LinearApproximator.getLL1LookaheadSets(dfa);
IntervalSet[] altLookSets = factory.g.decisionLOOK.get(decision);
altLook = getAltLookaheadAsStringLists(altLookSets);
IntervalSet expecting = IntervalSet.or(altLookSets); // combine alt sets
this.error = new ThrowNoViableAlt(factory, blkAST, expecting);
System.out.println(blkAST.toStringTree()+" LL1AltBlock expecting="+expecting);
}
}

View File

@ -0,0 +1,36 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.GrammarAST;
import java.util.*;
/** */
public abstract class LL1Loop extends Choice {
public OutputModelObject loopExpr;
public List<SrcOp> iteration;
public Sync sync;
public LL1Loop(OutputModelFactory factory,
GrammarAST blkAST,
List<CodeBlock> alts)
{
super(factory, blkAST, alts);
}
public void addIterationOp(SrcOp op) {
if ( iteration==null ) iteration = new ArrayList<SrcOp>();
iteration.add(op);
}
public SrcOp addCodeForLoopLookaheadTempVar(IntervalSet look) {
SrcOp expr = addCodeForLookaheadTempVar(look);
if ( expr instanceof TestSetInline ) {
TestSetInline e = (TestSetInline)expr;
CaptureNextTokenType nextType = new CaptureNextTokenType(e.varName);
addIterationOp(nextType);
}
return expr;
}
}

View File

@ -0,0 +1,36 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.tool.GrammarAST;
import java.util.List;
/** (A B C)? */
public class LL1OptionalBlockSingleAlt extends LL1Choice {
public OutputModelObject expr;
public OutputModelObject followExpr;
public LL1OptionalBlockSingleAlt(OutputModelFactory factory,
GrammarAST blkAST,
List<CodeBlock> alts)
{
super(factory, blkAST, alts);
this.decision = ((DecisionState)blkAST.atnState).decision;
/** Lookahead for each alt 1..n */
// IntervalSet[] altLookSets = LinearApproximator.getLL1LookaheadSets(dfa);
IntervalSet[] altLookSets = factory.g.decisionLOOK.get(decision);
altLook = getAltLookaheadAsStringLists(altLookSets);
IntervalSet look = altLookSets[1];
IntervalSet followLook = altLookSets[2];
IntervalSet expecting = (IntervalSet)look.or(followLook);
this.error = new ThrowNoViableAlt(factory, blkAST, expecting);
System.out.println(blkAST.toStringTree()+" LL1OptionalBlockSingleAlt expecting="+expecting);
expr = addCodeForLookaheadTempVar(look);
followExpr = factory.getLL1Test(followLook, blkAST);
}
}

View File

@ -0,0 +1,37 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.runtime.atn.StarBlockStartState;
import org.antlr.v4.tool.GrammarAST;
import java.util.List;
/** */
public class LL1StarBlock extends LL1Loop {
/** Token names for each alt 0..n-1 */
public List<String[]> altLook;
public String loopLabel;
public String[] exitLook;
public LL1StarBlock(OutputModelFactory factory, GrammarAST blkAST, List<CodeBlock> alts) {
super(factory, blkAST, alts);
StarBlockStartState blkStart = (StarBlockStartState)blkAST.atnState;
this.decision = blkStart.decision;
/** Lookahead for each alt 1..n */
IntervalSet[] altLookSets = factory.g.decisionLOOK.get(decision);
IntervalSet lastLook = altLookSets[altLookSets.length-1];
IntervalSet[] copy = new IntervalSet[altLookSets.length-1];
System.arraycopy(altLookSets, 0, copy, 0, altLookSets.length-1); // remove last (exit) alt
altLookSets = copy;
altLook = getAltLookaheadAsStringLists(altLookSets);
loopLabel = factory.gen.target.getLoopLabel(blkAST);
this.exitLook =
factory.gen.target.getTokenTypesAsTargetLabels(factory.g, lastLook.toArray());
// this.sync = new Sync(factory, blkAST, expecting, decision, "iter");
}
}

View File

@ -0,0 +1,27 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.runtime.atn.StarBlockStartState;
import org.antlr.v4.tool.GrammarAST;
import java.util.List;
/** */
public class LL1StarBlockSingleAlt extends LL1Loop {
public LL1StarBlockSingleAlt(OutputModelFactory factory, GrammarAST starRoot, List<CodeBlock> alts) {
super(factory, starRoot, alts);
StarBlockStartState star = (StarBlockStartState)starRoot.atnState;
this.decision = star.decision;
// DFA dfa = factory.g.decisionDFAs.get(decision);
// IntervalSet[] altLookSets = LinearApproximator.getLL1LookaheadSets(dfa);
IntervalSet[] altLookSets = factory.g.decisionLOOK.get(decision);
IntervalSet enterLook = altLookSets[1];
IntervalSet exitLook = altLookSets[2];
loopExpr = addCodeForLoopLookaheadTempVar(enterLook);
IntervalSet enterExpecting = (IntervalSet)exitLook.or(enterLook);
this.sync = new Sync(factory, starRoot, enterExpecting, decision, "iter");
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model;
import java.util.List;
/** */
public interface LabeledOp {
public List<String> getLabels();
}

View File

@ -0,0 +1,46 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.*;
import java.util.*;
public class Lexer extends OutputModelObject {
public String name;
public Map<String,Integer> tokens;
public LexerFile file;
public String[] tokenNames;
public Set<String> ruleNames;
public Collection<String> modes;
public LinkedHashMap<Integer, Action> actions;
public LinkedHashMap<Integer, Action> sempreds;
public SerializedATN atn;
public Lexer(OutputModelFactory factory, LexerFile file) {
this.factory = factory;
this.file = file; // who contains us?
name = factory.g.getRecognizerName();
tokens = new LinkedHashMap<String,Integer>();
LexerGrammar lg = (LexerGrammar)factory.g;
atn = new SerializedATN(factory, lg.atn);
modes = lg.modes.keySet();
for (String t : factory.g.tokenNameToTypeMap.keySet()) {
Integer ttype = factory.g.tokenNameToTypeMap.get(t);
if ( ttype>0 ) tokens.put(t, ttype);
}
tokenNames = factory.g.getTokenDisplayNames();
ruleNames = factory.g.rules.keySet();
sempreds = new LinkedHashMap<Integer, Action>();
for (PredAST p : factory.g.sempreds.keySet()) {
sempreds.put(factory.g.sempreds.get(p), new Action(factory, p));
}
actions = new LinkedHashMap<Integer, Action>();
for (ActionAST a : factory.g.actions.keySet()) {
actions.put(factory.g.actions.get(a), new Action(factory, a));
}
}
}

View File

@ -0,0 +1,46 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.tool.*;
import java.util.*;
/** */
public class MatchToken extends RuleElement implements LabeledOp {
public String name;
public BitSetDecl follow;
public List<String> labels = new ArrayList<String>();
public MatchToken(OutputModelFactory factory, TerminalAST ast, GrammarAST labelAST) {
super(factory, ast);
int ttype = factory.g.getTokenType(ast.getText());
name = factory.gen.target.getTokenTypeAsTargetLabel(factory.g, ttype);
if ( labelAST!=null ) {
String label = labelAST.getText();
labels.add(label);
TokenDecl d = new TokenDecl(factory, label);
factory.currentRule.peek().addDecl(d);
if ( labelAST.parent.getType() == ANTLRParser.PLUS_ASSIGN ) {
TokenListDecl l = new TokenListDecl(factory, factory.gen.target.getListLabel(label));
factory.currentRule.peek().addDecl(l);
}
}
// If action refs as token not label, we need to define implicit label
if ( factory.currentAlt.tokenRefsInActions.containsKey(ast.getText()) ) {
String label = factory.gen.target.getImplicitTokenLabel(ast.getText());
labels.add(label);
TokenDecl d = new TokenDecl(factory, label);
factory.currentRule.peek().addDecl(d);
}
// LinearApproximator approx = new LinearApproximator(factory.g, ATN.INVALID_DECISION_NUMBER);
// IntervalSet fset = approx.FIRST(ast.ATNState.transition(0).target);
// System.out.println("follow match "+name+"="+fset);
// follow = factory.createFollowBitSet(ast, fset);
// factory.defineBitSet(follow);
}
public List<String> getLabels() { return labels; }
}

View File

@ -0,0 +1,16 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.GrammarAST;
import java.util.List;
/** */
public class OptionalBlock extends AltBlock {
public OptionalBlock(OutputModelFactory factory,
GrammarAST questionAST,
List<CodeBlock> alts)
{
super(factory, questionAST, alts);
}
}

View File

@ -0,0 +1,63 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.*;
import java.util.*;
/** */
public class Parser extends OutputModelObject {
public String name;
public Map<String,Integer> tokens;
public String[] tokenNames;
public Set<String> ruleNames;
public List<RuleFunction> funcs = new ArrayList<RuleFunction>();
public List<DynamicScopeStruct> scopes = new ArrayList<DynamicScopeStruct>();
public ParserFile file;
public SerializedATN atn;
public LinkedHashMap<Integer, ForcedAction> actions;
public LinkedHashMap<Integer, Action> sempreds;
public Parser(OutputModelFactory factory, ParserFile file) {
this.factory = factory;
this.file = file; // who contains us?
name = factory.g.getRecognizerName();
tokens = new LinkedHashMap<String,Integer>();
//tokens.putAll( factory.g.tokenNameToTypeMap );
for (String t : factory.g.tokenNameToTypeMap.keySet()) {
Integer ttype = factory.g.tokenNameToTypeMap.get(t);
if ( ttype>0 ) tokens.put(t, ttype);
}
// int numTokens = factory.g.getTokenTypes().size();
// for (int t=Token.MIN_TOKEN_TYPE; t<numTokens; t++) {
// String lit = factory.g.typeToStringLiteralList.get(t);
// if ( lit!=null ) tokens.put(lit, t);
// tokens.put(factory.g.typeToTokenList.get(t), t);
// }
tokenNames = factory.g.getTokenDisplayNames();
ruleNames = factory.g.rules.keySet();
for (AttributeDict d : factory.g.scopes.values()) {
scopes.add( new DynamicScopeStruct(factory, d.name, d.attributes.values()) );
}
for (Rule r : factory.g.rules.values()) {
//if ( r.isStartRule ) funcs.add( new StartRuleFunction(factory, r) );
funcs.add( new RuleFunction(factory, r) );
}
atn = new SerializedATN(factory, factory.g.atn);
sempreds = new LinkedHashMap<Integer, Action>();
for (PredAST p : factory.g.sempreds.keySet()) {
sempreds.put(factory.g.sempreds.get(p), new Action(factory, p));
}
actions = new LinkedHashMap<Integer, ForcedAction>();
for (ActionAST a : factory.g.actions.keySet()) {
actions.put(factory.g.actions.get(a), new ForcedAction(factory, a));
}
}
// @Override
// public List<String> getChildren() {
// final List<String> sup = super.getChildren();
// return new ArrayList<String>() {{ if ( sup!=null ) addAll(sup); add("funcs"); add("scopes"); }};
// }
}

View File

@ -0,0 +1,35 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.GrammarAST;
import java.util.*;
/** */
public class ParserFile extends OutputModelObject {
public String fileName;
public Parser parser;
// public List<DFADecl> dfaDecls = new ArrayList<DFADecl>();
// public OrderedHashSet<BitSetDecl> bitSetDecls = new OrderedHashSet<BitSetDecl>();
public String TokenLabelType;
public String ASTLabelType;
public Map<String, Action> namedActions;
public ParserFile(OutputModelFactory factory, String fileName) {
super(factory);
this.fileName = fileName;
factory.file = this;
TokenLabelType = factory.gen.g.getOption("TokenLabelType");
ASTLabelType = factory.gen.g.getOption("ASTLabelType");
namedActions = new HashMap<String, Action>();
for (String name : factory.gen.g.namedActions.keySet()) {
GrammarAST ast = factory.gen.g.namedActions.get(name);
namedActions.put(name, new Action(factory, ast));
}
parser = new Parser(factory, this);
}
// public void defineBitSet(BitSetDecl b) {
// bitSetDecls.add(b);
// }
}

View File

@ -0,0 +1,14 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.GrammarAST;
public class RuleElement extends SrcOp {
/** Associated ATN state for this rule elements (action, token, ruleref, ...) */
public int stateNumber;
public RuleElement(OutputModelFactory factory, GrammarAST ast) {
super(factory, ast);
if ( ast.atnState!=null ) stateNumber = ast.atnState.stateNumber;
}
}

View File

@ -0,0 +1,118 @@
package org.antlr.v4.codegen.model;
import org.antlr.runtime.tree.CommonTreeNodeStream;
import org.antlr.v4.codegen.*;
import org.antlr.v4.misc.*;
import org.antlr.v4.parse.*;
import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.tool.*;
import java.util.*;
/** */
public class RuleFunction extends OutputModelObject {
public String name;
public List<String> modifiers;
public String ctxType;
public List<String> globalScopesUsed;
public Collection<String> ruleLabels;
public Collection<String> tokenLabels;
public List<String> elementsReferencedInRewrite;
public List<String> exceptions;
public Action finallyAction;
public Map<String, Action> namedActions;
public ATNState startState;
public StructDecl context;
public DynamicScopeStruct scope;
public int index;
public Collection<Attribute> args = null;
public OrderedHashSet<Decl> decls;
public SrcOp code;
public RuleFunction(OutputModelFactory factory) {
super(factory);
}
public RuleFunction(OutputModelFactory factory, Rule r) {
super(factory);
this.name = r.name;
if ( r.modifiers!=null && r.modifiers.size()>0 ) {
this.modifiers = new ArrayList<String>();
for (GrammarAST t : r.modifiers) modifiers.add(t.getText());
}
modifiers = Utils.nodesToStrings(r.modifiers);
ctxType = factory.gen.target.getRuleFunctionContextStructName(r);
List<Attribute> argsAndReturnValues = new ArrayList<Attribute>();
List<Attribute> ctorAttrs = new ArrayList<Attribute>();
index = r.index;
if ( r.args!=null ) {
argsAndReturnValues.addAll(r.args.attributes.values());
args = r.args.attributes.values();
ctorAttrs.addAll(args);
}
if ( r.retvals!=null ) {
argsAndReturnValues.addAll(r.retvals.attributes.values());
}
if ( r.scope!=null ) {
scope = new DynamicScopeStruct(factory, factory.gen.target.getRuleDynamicScopeStructName(r.name),
r.scope.attributes.values());
}
globalScopesUsed = Utils.apply(r.useScopes, "getText");
if ( argsAndReturnValues.size()>0 ) {
context = new StructDecl(factory, factory.gen.target.getRuleFunctionContextStructName(r),
argsAndReturnValues);
context.ctorAttrs = ctorAttrs;
}
ruleLabels = r.getLabelNames();
tokenLabels = r.getTokenRefs();
exceptions = Utils.nodesToStrings(r.exceptionActions);
if ( r.finallyAction!=null ) finallyAction = new Action(factory, r.finallyAction);
namedActions = new HashMap<String, Action>();
for (String name : r.namedActions.keySet()) {
GrammarAST ast = r.namedActions.get(name);
namedActions.put(name, new Action(factory, ast));
}
startState = factory.g.atn.ruleToStartState.get(r);
factory.currentRule.push(this);
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(r.ast.token.getInputStream());
GrammarAST blk = (GrammarAST)r.ast.getFirstChildWithType(ANTLRParser.BLOCK);
CommonTreeNodeStream nodes = new CommonTreeNodeStream(adaptor,blk);
SourceGenTriggers genTriggers = new SourceGenTriggers(nodes, factory);
try {
code = genTriggers.block(null,null); // GEN Instr OBJECTS
}
catch (Exception e){
e.printStackTrace(System.err);
}
factory.currentRule.pop();
}
public void addDecl(Decl d) {
if ( decls==null ) decls = new OrderedHashSet<Decl>();
decls.add(d);
}
// @Override
// public List<String> getChildren() {
// final List<String> sup = super.getChildren();
// return new ArrayList<String>() {{
// if ( sup!=null ) addAll(sup);
// add("context"); add("scope"); add("decls"); add("code");
// add("finallyAction"); add("namedActions");
// }};
// }
}

View File

@ -0,0 +1,9 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.GrammarAST;
/** */
public class SemPred extends Action {
public SemPred(OutputModelFactory factory, GrammarAST ast) { super(factory,ast); }
}

View File

@ -0,0 +1,17 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.GrammarAST;
/** */
public abstract class SrcOp extends OutputModelObject {
/** Used to create unique var names etc... */
public int uniqueID;
public SrcOp() {;}
public SrcOp(OutputModelFactory factory) { super(factory); }
public SrcOp(OutputModelFactory factory, GrammarAST ast) {
super(factory,ast);
uniqueID = ast.token.getTokenIndex();
}
}

View File

@ -0,0 +1,22 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.runtime.atn.BlockStartState;
import org.antlr.v4.tool.GrammarAST;
import java.util.List;
public class StarBlock extends Loop {
public String loopLabel;
public StarBlock(OutputModelFactory factory,
GrammarAST blkOrEbnfRootAST,
List<CodeBlock> alts)
{
super(factory, blkOrEbnfRootAST, alts);
loopLabel = factory.gen.target.getLoopLabel(blkOrEbnfRootAST);
BlockStartState star = (BlockStartState)blkOrEbnfRootAST.atnState;
decision = star.decision;
exitAlt = alts.size()+1;
}
}

View File

@ -0,0 +1,27 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.Attribute;
import java.util.*;
/** */
public class StructDecl extends Decl {
public List<Decl> attrs = new ArrayList<Decl>();
public Collection<Attribute> ctorAttrs;
public StructDecl(OutputModelFactory factory, String name, Collection<Attribute> attrList) {
super(factory, name);
for (Attribute a : attrList) {
attrs.add(new AttributeDecl(factory, a.name, a.decl));
}
}
// @Override
// public List<String> getChildren() {
// final List<String> sup = super.getChildren();
// return new ArrayList<String>() {{
// if ( sup!=null ) addAll(sup); add("attrs");
// }};
// }
}

View File

@ -0,0 +1,22 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.GrammarAST;
/** */
public class Sync extends SrcOp {
public int decision;
public BitSetDecl expecting;
public Sync(OutputModelFactory factory,
GrammarAST blkOrEbnfRootAST,
IntervalSet expecting,
int decision,
String position)
{
super(factory, blkOrEbnfRootAST);
this.decision = decision;
this.expecting = factory.createExpectingBitSet(ast, decision, expecting, position);
factory.defineBitSet(this.expecting);
}
}

View File

@ -0,0 +1,15 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.GrammarAST;
/** */
public class TestSet extends RuleElement {
public BitSetDecl set;
public TestSet(OutputModelFactory factory, GrammarAST blkAST, IntervalSet set) {
super(factory, blkAST);
this.set = factory.createTestBitSet(blkAST, set);
factory.defineBitSet(this.set);
}
}

View File

@ -0,0 +1,21 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.GrammarAST;
/** */
public class TestSetInline extends SrcOp {
public String varName;
public String[] ttypes;
// public CaptureNextToken nextToken;
// public Choice choice;
public TestSetInline(OutputModelFactory factory, GrammarAST ast, IntervalSet set) {
super(factory, ast);
this.ttypes = factory.gen.target.getTokenTypesAsTargetLabels(factory.g, set.toArray());
this.varName = "_la";
// this.choice = choice;
// nextToken = new CaptureNextToken();
// choice.addPreambleOp(nextToken);
}
}

View File

@ -0,0 +1,12 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.GrammarAST;
/** */
public class ThrowEarlyExitException extends ThrowRecognitionException {
public ThrowEarlyExitException(OutputModelFactory factory, GrammarAST ast, IntervalSet expecting) {
super(factory, ast, expecting);
}
}

View File

@ -0,0 +1,14 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.GrammarAST;
/** */
public class ThrowNoViableAlt extends ThrowRecognitionException {
public ThrowNoViableAlt(OutputModelFactory factory, GrammarAST blkOrEbnfRootAST,
IntervalSet expecting)
{
super(factory, blkOrEbnfRootAST, expecting);
}
}

View File

@ -0,0 +1,24 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.misc.IntervalSet;
import org.antlr.v4.tool.GrammarAST;
/** */
public class ThrowRecognitionException extends SrcOp {
public int decision;
public String grammarFile;
public int grammarLine;
public int grammarCharPosInLine;
public BitSetDecl expecting;
public ThrowRecognitionException(OutputModelFactory factory, GrammarAST ast, IntervalSet expecting) {
super(factory, ast);
//this.decision = ((BlockStartState)ast.ATNState).decision;
grammarLine = ast.getLine();
grammarLine = ast.getCharPositionInLine();
grammarFile = factory.g.fileName;
//this.expecting = factory.createExpectingBitSet(ast, decision, expecting, "error");
factory.defineBitSet(this.expecting);
}
}

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
/** */
public class TokenDecl extends Decl {
public TokenDecl(OutputModelFactory factory, String varName) {
super(factory, varName);
}
}

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
/** */
public class TokenListDecl extends Decl {
public TokenListDecl(OutputModelFactory factory, String varName) {
super(factory, varName);
}
}

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class ActionText extends ActionChunk {
public String text;
public ActionText(String text) {
this.text = text;
}
}

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class ArgRef extends ActionChunk {
public String name;
public ArgRef(String name) {
this.name = name;
}
}

View File

@ -0,0 +1,12 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class DynScopeAttrRef extends ActionChunk {
public String scope;
public String attr;
public DynScopeAttrRef(String scope, String attr) {
this.attr = attr;
this.scope = scope;
}
}

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class DynScopeRef extends ActionChunk {
public String scope;
public DynScopeRef(String scope) {
this.scope = scope;
}
}

View File

@ -0,0 +1,11 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class RetValueRef extends ActionChunk {
public String name;
public RetValueRef(String name) {
this.name = name;
}
}

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class RulePropertyRef extends ActionChunk {
public String label;
public RulePropertyRef(String label) {
this.label = label;
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class RulePropertyRef_st extends RulePropertyRef {
public RulePropertyRef_st(String label) {
super(label);
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class RulePropertyRef_start extends RulePropertyRef {
public RulePropertyRef_start(String label) {
super(label);
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class RulePropertyRef_stop extends RulePropertyRef {
public RulePropertyRef_stop(String label) {
super(label);
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class RulePropertyRef_text extends RulePropertyRef {
public RulePropertyRef_text(String label) {
super(label);
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class RulePropertyRef_tree extends RulePropertyRef {
public RulePropertyRef_tree(String label) {
super(label);
}
}

View File

@ -0,0 +1,23 @@
package org.antlr.v4.codegen.model.actions;
import java.util.List;
/** */
public class SetAttr extends ActionChunk {
public String name;
public List<ActionChunk> rhsChunks;
public SetAttr(String name, List<ActionChunk> rhsChunks) {
this.name = name;
this.rhsChunks = rhsChunks;
}
// @Override
// public List<String> getChildren() {
// final List<String> sup = super.getChildren();
// return new ArrayList<String>() {{
// if ( sup!=null ) addAll(sup);
// add("rhsChunks");
// }};
// }
}

View File

@ -0,0 +1,25 @@
package org.antlr.v4.codegen.model.actions;
import java.util.List;
/** */
public class SetDynScopeAttr extends ActionChunk {
public String scope;
public String attr;
public List<ActionChunk> rhsChunks;
public SetDynScopeAttr(String scope, String attr, List<ActionChunk> rhsChunks) {
this.scope = scope;
this.attr = attr;
this.rhsChunks = rhsChunks;
}
//
// @Override
// public List<String> getChildren() {
// final List<String> sup = super.getChildren();
// return new ArrayList<String>() {{
// if ( sup!=null ) addAll(sup);
// add("rhsChunks");
// }};
// }
}

View File

@ -0,0 +1,21 @@
package org.antlr.v4.codegen.model.actions;
import java.util.List;
/** */
public class SetDynScopeAttr_index extends SetDynScopeAttr {
public List<ActionChunk> indexChunks;
public SetDynScopeAttr_index(String scope, String attr, List<ActionChunk> indexChunks, List<ActionChunk> rhsChunks) {
super(scope, attr, rhsChunks);
this.indexChunks = indexChunks;
}
// @Override
// public List<String> getChildren() {
// final List<String> sup = super.getChildren();
// return new ArrayList<String>() {{
// if ( sup!=null ) addAll(sup);
// add("indexChunks");
// }};
// }
}

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class TokenPropertyRef extends ActionChunk {
public String label;
public TokenPropertyRef(String label) {
this.label = label;
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class TokenPropertyRef_int extends TokenPropertyRef {
public TokenPropertyRef_int(String label) {
super(label);
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class TokenPropertyRef_text extends TokenPropertyRef {
public TokenPropertyRef_text(String label) {
super(label);
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class TokenPropertyRef_tree extends TokenPropertyRef {
public TokenPropertyRef_tree(String label) {
super(label);
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.actions;
/** */
public class TokenPropertyRef_type extends TokenPropertyRef {
public TokenPropertyRef_type(String label) {
super(label);
}
}

View File

@ -0,0 +1,8 @@
package org.antlr.v4.codegen.model.ast;
import org.antlr.v4.codegen.model.SrcOp;
/** */
public class MakeRoot extends SrcOp {
public SrcOp opWithResultToAdd;
}

View File

@ -0,0 +1,5 @@
package org.antlr.v4.codegen.model;
/** */
public class dbg extends OutputModelObject {
}

View File

@ -118,7 +118,7 @@ public class TokenVocabParser {
*/
public File getImportedVocabFile() {
File f = new File(tool.getLibraryDirectory(),
File f = new File(tool.libDirectory,
File.separator +
vocabName +
CodeGenerator.VOCAB_FILE_EXTENSION);
@ -131,12 +131,7 @@ public class TokenVocabParser {
// files are generated (in the base, not relative to the input
// location.)
//
if (tool.haveOutputDir) {
f = new File(tool.getOutputDirectory(), vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
}
else {
f = new File(vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
}
f = new File(tool.outputDirectory, vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
return f;
}
}

View File

@ -0,0 +1,52 @@
package org.antlr.v4.semantics;
import org.antlr.runtime.*;
import org.antlr.v4.parse.ActionSplitter;
import org.antlr.v4.tool.*;
import java.util.List;
/** Find token and rule refs, side-effect: update Alternatives */
public class ActionSniffer extends BlankActionSplitterListener {
public Grammar g;
public Rule r; // null if action outside of rule
public Alternative alt; // null if action outside of alt; could be in rule
public ActionAST node;
public Token actionToken; // token within action
public ErrorManager errMgr;
public ActionSniffer(Grammar g, Rule r, Alternative alt, ActionAST node, Token actionToken) {
this.g = g;
this.r = r;
this.alt = alt;
this.node = node;
this.actionToken = actionToken;
this.errMgr = g.tool.errMgr;
}
public void examineAction() {
//System.out.println("examine "+actionToken);
ANTLRStringStream in = new ANTLRStringStream(actionToken.getText());
in.setLine(actionToken.getLine());
in.setCharPositionInLine(actionToken.getCharPositionInLine());
ActionSplitter splitter = new ActionSplitter(in, this);
// forces eval, triggers listener methods
node.chunks = splitter.getActionTokens();
System.out.println(node.chunks);
}
public void attr(String expr, Token x) {
List<TerminalAST> xRefs = alt.tokenRefs.get(x.getText());
if ( alt!=null && xRefs!=null ) {
alt.tokenRefsInActions.map(x.getText(), node);
}
List<GrammarAST> rRefs = alt.ruleRefs.get(x.getText());
if ( alt!=null && rRefs!=null ) {
alt.ruleRefsInActions.map(x.getText(), node);
}
}
public void qualifiedAttr(String expr, Token x, Token y) {
attr(expr, x);
}
}

View File

@ -0,0 +1,219 @@
package org.antlr.v4.semantics;
import org.antlr.runtime.*;
import org.antlr.v4.parse.*;
import org.antlr.v4.tool.*;
import java.util.List;
/** Trigger checks for various kinds of attribute expressions.
* no side-effects.
*/
public class AttributeChecks implements ActionSplitterListener {
public Grammar g;
public Rule r; // null if action outside of rule
public Alternative alt; // null if action outside of alt; could be in rule
public ActionAST node;
public Token actionToken; // token within action
public ErrorManager errMgr;
public AttributeChecks(Grammar g, Rule r, Alternative alt, ActionAST node, Token actionToken) {
this.g = g;
this.r = r;
this.alt = alt;
this.node = node;
this.actionToken = actionToken;
this.errMgr = g.tool.errMgr;
}
public static void checkAllAttributeExpressions(Grammar g) {
for (ActionAST act : g.namedActions.values()) {
AttributeChecks checker = new AttributeChecks(g, null, null, act, act.token);
checker.examineAction();
}
for (Rule r : g.rules.values()) {
for (ActionAST a : r.namedActions.values()) {
AttributeChecks checker = new AttributeChecks(g, r, null, a, a.token);
checker.examineAction();
}
for (int i=1; i<=r.numberOfAlts; i++) {
Alternative alt = r.alt[i];
for (ActionAST a : alt.actions) {
AttributeChecks checker =
new AttributeChecks(g, r, alt, a, a.token);
checker.examineAction();
}
}
for (ActionAST a : r.exceptionActions) {
AttributeChecks checker = new AttributeChecks(g, r, null, a, a.token);
checker.examineAction();
}
if ( r.finallyAction!=null ) {
AttributeChecks checker =
new AttributeChecks(g, r, null, r.finallyAction, r.finallyAction.token);
checker.examineAction();
}
}
}
public void examineAction() {
//System.out.println("examine "+actionToken);
ANTLRStringStream in = new ANTLRStringStream(actionToken.getText());
in.setLine(actionToken.getLine());
in.setCharPositionInLine(actionToken.getCharPositionInLine());
ActionSplitter splitter = new ActionSplitter(in, this);
// forces eval, triggers listener methods
node.chunks = splitter.getActionTokens();
System.out.println(node.chunks);
}
// LISTENER METHODS
public void setQualifiedAttr(String expr, Token x, Token y, Token rhs) {
qualifiedAttr(expr, x, y);
new AttributeChecks(g, r, alt, node, rhs).examineAction();
}
// $x.y
public void qualifiedAttr(String expr, Token x, Token y) {
if ( node.resolver.resolveToAttribute(x.getText(), y.getText(), node)==null ) {
Rule rref = isolatedRuleRef(x.getText());
if ( rref!=null ) {
//if ( r.name.equals(x.getText()) ) return; // $a.x in rule a is ok
if ( rref.args!=null && rref.args.get(y.getText())!=null ) {
g.tool.errMgr.grammarError(ErrorType.INVALID_RULE_PARAMETER_REF,
g.fileName, y, y.getText(), expr);
}
else {
errMgr.grammarError(ErrorType.UNKNOWN_RULE_ATTRIBUTE,
g.fileName, y, y.getText(), rref.name, expr);
}
}
else if ( !node.resolver.resolvesToAttributeDict(x.getText(), node) ) {
errMgr.grammarError(ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE,
g.fileName, x, x.getText(), expr);
}
else {
errMgr.grammarError(ErrorType.UNKNOWN_ATTRIBUTE_IN_SCOPE,
g.fileName, y, y.getText(), expr);
}
}
}
public void setAttr(String expr, Token x, Token rhs) {
if ( node.resolver.resolveToAttribute(x.getText(), node)==null ) {
errMgr.grammarError(ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE,
g.fileName, x, x.getText(), expr);
}
new AttributeChecks(g, r, alt, node, rhs).examineAction();
}
public void attr(String expr, Token x) {
if ( node.resolver.resolveToAttribute(x.getText(), node)==null ) {
if ( node.resolver.resolveToDynamicScope(x.getText(), node)!=null ) {
return; // $S for scope S is ok
}
if ( node.resolver.resolvesToToken(x.getText(), node) ) {
return; // $ID for token ref or label of token
}
if ( node.resolver.resolvesToListLabel(x.getText(), node) ) {
return; // $ids for ids+=ID etc...
}
if ( isolatedRuleRef(x.getText())!=null ) {
errMgr.grammarError(ErrorType.ISOLATED_RULE_REF,
g.fileName, x, x.getText(), expr);
return;
}
errMgr.grammarError(ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE,
g.fileName, x, x.getText(), expr);
}
}
public void setDynamicScopeAttr(String expr, Token x, Token y, Token rhs) {
//System.out.println("SET "+x+" :: "+y);
dynamicScopeAttr(expr, x, y);
new AttributeChecks(g, r, alt, node, rhs).examineAction();
}
public void dynamicScopeAttr(String expr, Token x, Token y) {
//System.out.println(x+" :: "+y);
AttributeDict s = node.resolver.resolveToDynamicScope(x.getText(), node);
if ( s==null ) {
errMgr.grammarError(ErrorType.UNKNOWN_DYNAMIC_SCOPE,
g.fileName, x, x.getText(), expr);
return;
}
Attribute a = s.get(y.getText());
if ( a==null ) {
errMgr.grammarError(ErrorType.UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE,
g.fileName, y, x.getText(), y.getText(), expr);
}
}
public void setDynamicNegativeIndexedScopeAttr(String expr, Token x, Token y,
Token index, Token rhs) {
setDynamicScopeAttr(expr, x, y, rhs);
new AttributeChecks(g, r, alt, node, index).examineAction();
}
public void dynamicNegativeIndexedScopeAttr(String expr, Token x, Token y,
Token index) {
dynamicScopeAttr(expr, x, y);
new AttributeChecks(g, r, alt, node, index).examineAction();
}
public void setDynamicAbsoluteIndexedScopeAttr(String expr, Token x, Token y,
Token index, Token rhs) {
setDynamicScopeAttr(expr, x, y, rhs);
new AttributeChecks(g, r, alt, node, index).examineAction();
}
public void dynamicAbsoluteIndexedScopeAttr(String expr, Token x, Token y,
Token index) {
dynamicScopeAttr(expr, x, y);
new AttributeChecks(g, r, alt, node, index).examineAction();
}
public void unknownSyntax(Token t) {
errMgr.grammarError(ErrorType.INVALID_TEMPLATE_ACTION,
g.fileName, t, t.getText());
}
public void text(String text) { }
// don't care
public void templateInstance(String expr) { }
public void indirectTemplateInstance(String expr) { }
public void setExprAttribute(String expr) { }
public void setSTAttribute(String expr) { }
public void templateExpr(String expr) { }
// SUPPORT
public Rule isolatedRuleRef(String x) {
if ( node.resolver instanceof Grammar ) return null;
if ( x.equals(r.name) ) return r;
List<LabelElementPair> labels = null;
if ( node.resolver instanceof Rule ) {
labels = r.getLabelDefs().get(x);
}
else if ( node.resolver instanceof Alternative ) {
labels = ((Alternative)node.resolver).labelDefs.get(x);
}
if ( labels!=null ) { // it's a label ref. is it a rule label?
LabelElementPair anyLabelDef = labels.get(0);
if ( anyLabelDef.type==LabelType.RULE_LABEL ) {
return g.getRule(anyLabelDef.element.getText());
}
}
if ( node.resolver instanceof Alternative ) {
if ( ((Alternative)node.resolver).ruleRefs.get(x)!=null ) {
return g.getRule(x);
}
}
return null;
}
}

View File

@ -0,0 +1,442 @@
package org.antlr.v4.semantics;
import org.antlr.runtime.Token;
import org.antlr.v4.misc.Utils;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.tool.*;
import org.stringtemplate.v4.misc.MultiMap;
import java.io.File;
import java.util.*;
/** No side-effects; BasicSemanticTriggers.g invokes check rules for these:
*
* FILE_AND_GRAMMAR_NAME_DIFFER
* LEXER_RULES_NOT_ALLOWED
* PARSER_RULES_NOT_ALLOWED
* CANNOT_ALIAS_TOKENS
* ARGS_ON_TOKEN_REF
* ILLEGAL_OPTION
* REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION
* NO_RULES
* REWRITE_FOR_MULTI_ELEMENT_ALT
* HETERO_ILLEGAL_IN_REWRITE_ALT
* AST_OP_WITH_NON_AST_OUTPUT_OPTION
* AST_OP_IN_ALT_WITH_REWRITE
* CONFLICTING_OPTION_IN_TREE_FILTER
* WILDCARD_AS_ROOT
* INVALID_IMPORT
* TOKEN_VOCAB_IN_DELEGATE
* IMPORT_NAME_CLASH
* REPEATED_PREQUEL
* TOKEN_NAMES_MUST_START_UPPER
*
* TODO: 1 action per lex rule
*/
public class BasicSemanticChecks {
public static final Set legalLexerOptions =
new HashSet() {
{
add("language"); add("tokenVocab");
add("TokenLabelType");
add("superClass");
add("filter");
add("k");
add("backtrack");
add("memoize");
}
};
public static final Set legalParserOptions =
new HashSet() {
{
add("language"); add("tokenVocab");
add("output"); add("rewrite"); add("ASTLabelType");
add("TokenLabelType");
add("superClass");
add("k");
add("backtrack");
add("memoize");
}
};
public static final Set legalTreeParserOptions =
new HashSet() {
{
add("language"); add("tokenVocab");
add("output"); add("rewrite"); add("ASTLabelType");
add("TokenLabelType");
add("superClass");
add("k");
add("backtrack");
add("memoize");
add("filter");
}
};
public static final Set legalRuleOptions =
new HashSet() {
{
add("k"); add("greedy"); add("memoize");
add("backtrack"); add("strategy");
}
};
public static final Set legalBlockOptions =
new HashSet() {{add("k"); add("greedy"); add("backtrack"); add("memoize");}};
/** Legal options for terminal refs like ID<node=MyVarNode> */
public static final Set legalTokenOptions =
new HashSet() {
{
add(TerminalAST.defaultTokenOption);
add("associativity");
}
};
/** Set of valid imports. E.g., can only import a tree parser into
* another tree parser. Maps delegate to set of delegator grammar types.
* validDelegations.get(LEXER) gives list of the kinds of delegators
* that can import lexers.
*/
public static MultiMap<Integer,Integer> validImportTypes =
new MultiMap<Integer,Integer>() {
{
map(ANTLRParser.LEXER, ANTLRParser.LEXER);
map(ANTLRParser.LEXER, ANTLRParser.COMBINED);
map(ANTLRParser.PARSER, ANTLRParser.PARSER);
map(ANTLRParser.PARSER, ANTLRParser.COMBINED);
map(ANTLRParser.TREE, ANTLRParser.TREE);
map(ANTLRParser.COMBINED, ANTLRParser.COMBINED);
}
};
public Grammar g;
public ErrorManager errMgr;
public BasicSemanticChecks(Grammar g) {
this.g = g;
this.errMgr = g.tool.errMgr;
}
void checkGrammarName(Token nameToken) {
if ( g.implicitLexer==null ) return;
String fullyQualifiedName = nameToken.getInputStream().getSourceName();
File f = new File(fullyQualifiedName);
String fileName = f.getName();
if ( !Utils.stripFileExtension(fileName).equals(nameToken.getText()) ) {
g.tool.errMgr.grammarError(ErrorType.FILE_AND_GRAMMAR_NAME_DIFFER,
fileName, nameToken, nameToken.getText(), fileName);
}
}
void checkNumRules(GrammarAST rulesNode) {
if ( rulesNode.getChildCount()==0 ) {
GrammarAST root = (GrammarAST)rulesNode.getParent();
GrammarAST IDNode = (GrammarAST)root.getChild(0);
g.tool.errMgr.grammarError(ErrorType.NO_RULES, g.fileName,
null, IDNode.getText(), g);
}
}
void checkMode(Token modeNameToken) {
if ( !g.isLexer() ) {
g.tool.errMgr.grammarError(ErrorType.MODE_NOT_IN_LEXER, g.fileName,
modeNameToken, modeNameToken.getText(), g);
}
}
void checkNumPrequels(List<GrammarAST> options,
List<GrammarAST> imports,
List<GrammarAST> tokens)
{
List<Token> secondOptionTokens = new ArrayList<Token>();
if ( options!=null && options.size()>1 ) {
secondOptionTokens.add(options.get(1).token);
}
if ( imports!=null && imports.size()>1 ) {
secondOptionTokens.add(imports.get(1).token);
}
if ( tokens!=null && tokens.size()>1 ) {
secondOptionTokens.add(tokens.get(1).token);
}
for (Token t : secondOptionTokens) {
String fileName = t.getInputStream().getSourceName();
g.tool.errMgr.grammarError(ErrorType.REPEATED_PREQUEL,
fileName, t);
}
}
void checkInvalidRuleDef(Token ruleID) {
String fileName = null;
if ( ruleID.getInputStream()!=null ) {
fileName = ruleID.getInputStream().getSourceName();
}
if ( g.isLexer() && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
g.tool.errMgr.grammarError(ErrorType.PARSER_RULES_NOT_ALLOWED,
fileName, ruleID, ruleID.getText());
}
if ( (g.isParser()||g.isTreeGrammar()) &&
Character.isUpperCase(ruleID.getText().charAt(0)) )
{
g.tool.errMgr.grammarError(ErrorType.LEXER_RULES_NOT_ALLOWED,
fileName, ruleID, ruleID.getText());
}
}
void checkInvalidRuleRef(Token ruleID) {
String fileName = ruleID.getInputStream().getSourceName();
if ( g.isLexer() && Character.isLowerCase(ruleID.getText().charAt(0)) ) {
g.tool.errMgr.grammarError(ErrorType.PARSER_RULES_NOT_ALLOWED,
fileName, ruleID, ruleID.getText());
}
}
void checkTokenAlias(Token tokenID) {
String fileName = tokenID.getInputStream().getSourceName();
if ( Character.isLowerCase(tokenID.getText().charAt(0)) ) {
g.tool.errMgr.grammarError(ErrorType.TOKEN_NAMES_MUST_START_UPPER,
fileName,
tokenID,
tokenID.getText());
}
if ( !g.isCombined() ) {
g.tool.errMgr.grammarError(ErrorType.CANNOT_ALIAS_TOKENS,
fileName,
tokenID,
tokenID.getText());
}
}
/** At this point, we can only rule out obvious problems like ID[3]
* in parser. Might be illegal too in later stage when we see ID
* isn't a fragment.
*/
void checkTokenArgs(Token tokenID) {
String fileName = tokenID.getInputStream().getSourceName();
if ( !g.isLexer() ) {
g.tool.errMgr.grammarError(ErrorType.ARGS_ON_TOKEN_REF,
fileName, tokenID, tokenID.getText());
}
}
/** Check option is appropriate for grammar, rule, subrule */
boolean checkOptions(GrammarAST parent,
Token optionID, String value)
{
boolean ok = true;
if ( optionID.getText().equals("tokenVocab") &&
g.parent!=null ) // only allow tokenVocab option in root grammar
{
g.tool.errMgr.grammarWarning(ErrorType.TOKEN_VOCAB_IN_DELEGATE,
g.fileName,
optionID,
g.name);
ok = false;
}
if ( parent.getType()==ANTLRParser.BLOCK ) {
if ( !legalBlockOptions.contains(optionID.getText()) ) { // block
g.tool.errMgr.grammarError(ErrorType.ILLEGAL_OPTION,
g.fileName,
optionID,
optionID.getText());
ok = false;
}
}
else if ( parent.getType()==ANTLRParser.RULE ) {
if ( !legalRuleOptions.contains(optionID.getText()) ) { // rule
g.tool.errMgr.grammarError(ErrorType.ILLEGAL_OPTION,
g.fileName,
optionID,
optionID.getText());
ok = false;
}
}
else if ( parent.getType()==ANTLRParser.GRAMMAR &&
!legalGrammarOption(optionID.getText()) ) { // grammar
g.tool.errMgr.grammarError(ErrorType.ILLEGAL_OPTION,
g.fileName,
optionID,
optionID.getText());
ok = false;
}
return ok;
}
/** Check option is appropriate for token; parent is ELEMENT_OPTIONS */
boolean checkTokenOptions(GrammarAST parent,
Token optionID, String value)
{
String fileName = optionID.getInputStream().getSourceName();
// don't care about ID<ASTNodeName> options
if ( value!=null && !legalTokenOptions.contains(optionID.getText()) ) {
g.tool.errMgr.grammarError(ErrorType.ILLEGAL_OPTION,
fileName,
optionID,
optionID.getText());
return false;
}
// example (ALT_REWRITE (ALT (ID (ELEMENT_OPTIONS Foo))) (-> (ALT ID))
if ( parent.hasAncestor(ANTLRParser.ALT_REWRITE) ) {
g.tool.errMgr.grammarError(ErrorType.HETERO_ILLEGAL_IN_REWRITE_ALT,
fileName,
optionID);
}
// TODO: extra checks depending on terminal kind?
switch ( parent.getType() ) {
case ANTLRParser.TOKEN_REF :
case ANTLRParser.STRING_LITERAL :
case ANTLRParser.WILDCARD :
}
return true;
}
boolean legalGrammarOption(String key) {
switch ( g.getType() ) {
case ANTLRParser.LEXER :
return legalLexerOptions.contains(key);
case ANTLRParser.PARSER :
return legalParserOptions.contains(key);
case ANTLRParser.TREE :
return legalTreeParserOptions.contains(key);
default :
return legalParserOptions.contains(key);
}
}
/** Rules in tree grammar that use -> rewrites and are spitting out
* templates via output=template and then use rewrite=true must only
* use -> on alts that are simple nodes or trees or single rule refs
* that match either nodes or trees.
*/
void checkRewriteForMultiRootAltInTreeGrammar(
Map<String, String> options,
Token altStart,
int alt)
{
if ( g.isTreeGrammar() &&
options!=null && options.get("output")!=null &&
options.get("output").equals("template") &&
options.get("rewrite")!=null &&
options.get("rewrite").equals("true") )
{
String fileName = altStart.getInputStream().getSourceName();
g.tool.errMgr.grammarWarning(ErrorType.REWRITE_FOR_MULTI_ELEMENT_ALT,
fileName,
altStart,
alt);
}
}
void checkASTOps(Map<String, String> options,
GrammarAST op,
GrammarAST elementRoot)
{
RuleAST rule = (RuleAST)op.getAncestor(ANTLRParser.RULE);
String ruleName = rule.getChild(0).getText();
String fileName = elementRoot.token.getInputStream().getSourceName();
if ( options==null || !options.get("output").equals("AST") ) {
g.tool.errMgr.grammarWarning(ErrorType.AST_OP_WITH_NON_AST_OUTPUT_OPTION,
fileName,
elementRoot.token,
op.getText());
}
if ( options!=null && options.get("output")==null ) {
g.tool.errMgr.grammarWarning(ErrorType.REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
fileName,
elementRoot.token,
ruleName);
}
if ( op.hasAncestor(ANTLRParser.ALT_REWRITE) ) {
GrammarAST rew = (GrammarAST)op.getAncestor(ANTLRParser.ALT_REWRITE);
int altNum = rew.getChildIndex() + 1; // alts are 1..n
g.tool.errMgr.grammarWarning(ErrorType.AST_OP_IN_ALT_WITH_REWRITE,
fileName,
elementRoot.token,
ruleName,
altNum);
}
}
void checkRewriteOk(Map<String, String> options, GrammarAST elementRoot) {
RuleAST rule = (RuleAST)elementRoot.getAncestor(ANTLRParser.RULE);
String ruleName = rule.getChild(0).getText();
String fileName = elementRoot.token.getInputStream().getSourceName();
if ( options!=null && options.get("output")==null ) {
g.tool.errMgr.grammarWarning(ErrorType.REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
fileName,
elementRoot.token,
ruleName);
}
}
void checkTreeFilterOptions(GrammarRootAST root,
Map<String, String> options)
{
if ( options==null ) return;
String fileName = root.token.getInputStream().getSourceName();
String filter = options.get("filter");
if ( g.isTreeGrammar() && filter!=null && filter.equals("true") ) {
// check for conflicting options
// filter => backtrack=true (can't be false)
// filter&&output!=AST => error
// filter&&output=AST => rewrite=true
// any deviation from valid option set is an error
String backtrack = options.get("backtrack");
String output = options.get("output");
String rewrite = options.get("rewrite");
if ( backtrack!=null && !backtrack.toString().equals("true") ) {
g.tool.errMgr.grammarError(ErrorType.CONFLICTING_OPTION_IN_TREE_FILTER,
fileName,
root.token,
"backtrack", backtrack);
}
if ( output!=null && !output.equals("AST") ) {
g.tool.errMgr.grammarError(ErrorType.CONFLICTING_OPTION_IN_TREE_FILTER,
fileName,
root.token,
"output", output);
}
else if ( rewrite!=null && !rewrite.equals("true") ) { // && AST output
g.tool.errMgr.grammarError(ErrorType.CONFLICTING_OPTION_IN_TREE_FILTER,
fileName,
root.token,
"rewrite", rewrite);
}
}
}
void checkWildcardRoot(Token wild) {
String fileName = wild.getInputStream().getSourceName();
g.tool.errMgr.grammarError(ErrorType.WILDCARD_AS_ROOT,
fileName,
wild);
}
void checkImport(Token importID) {
Grammar delegate = g.getImportedGrammar(importID.getText());
if ( delegate==null ) return;
List<Integer> validDelegators = validImportTypes.get(delegate.getType());
if ( validDelegators!=null && !validDelegators.contains(g.getType()) ) {
g.tool.errMgr.grammarError(ErrorType.INVALID_IMPORT,
g.fileName,
importID,
g, delegate);
}
if ( g.isCombined() &&
(delegate.name.equals(g.name+Grammar.getGrammarTypeToFileNameSuffix(ANTLRParser.LEXER))||
delegate.name.equals(g.name+Grammar.getGrammarTypeToFileNameSuffix(ANTLRParser.PARSER))) )
{
g.tool.errMgr.grammarError(ErrorType.IMPORT_NAME_CLASH,
g.fileName,
importID,
g, delegate);
}
}
}

View File

@ -0,0 +1,240 @@
/*
[The "BSD license"]
Copyright (c) 2010 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** Triggers for the basic semantics of the input. Side-effects:
* Set token, block, rule options in the tree. Load field option
* with grammar options. Only legal options are set.
*/
tree grammar BasicSemanticTriggers;
options {
language = Java;
tokenVocab = ANTLRParser;
ASTLabelType = GrammarAST;
filter = true;
//superClass = 'org.antlr.v4.runtime.tree.TreeFilter';
}
// Include the copyright in this source and also the generated source
@header {
/*
[The "BSD license"]
Copyright (c) 2010 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.semantics;
import org.antlr.v4.tool.*;
}
@members {
// TODO: SHOULD we fix up grammar AST to remove errors? Like kill refs to bad rules?
// that is, rewrite tree? maybe all passes are filters until code gen, which needs
// tree grammar. 'course we won't try codegen if errors.
public String name;
GrammarASTWithOptions root;
Grammar g; // which grammar are we checking
BasicSemanticChecks checker;
public BasicSemanticTriggers(TreeNodeStream input, Grammar g) {
this(input);
this.g = g;
checker = new BasicSemanticChecks(g);
}
}
topdown // do these on way down so options and such are set first
: grammarSpec
| rules
| mode
| option
| rule
| tokenAlias
| rewrite
;
bottomup // do these "inside to outside" of expressions.
: multiElementAltInTreeGrammar
| astOps
| ruleref
| tokenRefWithArgs
| elementOption
| checkGrammarOptions // do after we see everything
| wildcardRoot
;
grammarSpec
: ^( GRAMMAR ID DOC_COMMENT?
{
name = $ID.text;
checker.checkGrammarName($ID.token);
root = (GrammarRootAST)$start;
}
prequelConstructs ^(RULES .*)
)
;
checkGrammarOptions // when we get back to root
: GRAMMAR
{checker.checkTreeFilterOptions((GrammarRootAST)$GRAMMAR,
root.getOptions());}
;
/*
grammarType
@init {gtype = $start.getType(); root = (GrammarASTWithOptions)$start;}
: LEXER_GRAMMAR | PARSER_GRAMMAR | TREE_GRAMMAR | COMBINED_GRAMMAR
;
*/
prequelConstructs
: ( ^(o+=OPTIONS .+)
| ^(i+=IMPORT delegateGrammar+)
| ^(t+=TOKENS .+)
)*
{checker.checkNumPrequels($o, $i, $t);}
;
delegateGrammar
: ( ^(ASSIGN ID id=ID)
| id=ID
)
{checker.checkImport($id.token);}
;
rules : RULES {checker.checkNumRules($RULES);} ;
mode : ^(MODE ID .*) {checker.checkMode($ID.token);} ;
option // TODO: put in grammar, or rule, or block
: {inContext("OPTIONS")}? ^(ASSIGN o=ID optionValue)
{
GrammarAST parent = (GrammarAST)$start.getParent(); // OPTION
GrammarAST parentWithOptionKind = (GrammarAST)parent.getParent();
boolean ok = checker.checkOptions(parentWithOptionKind,
$ID.token, $optionValue.v);
// store options into XXX_GRAMMAR, RULE, BLOCK nodes
if ( ok ) {
((GrammarASTWithOptions)parentWithOptionKind).setOption($o.text, $optionValue.v);
}
}
;
optionValue returns [String v]
@init {$v = $start.token.getText();}
: ID
| STRING_LITERAL
| INT
| STAR
;
rule: ^( RULE r=ID .*) {checker.checkInvalidRuleDef($r.token);}
;
ruleref
: RULE_REF {checker.checkInvalidRuleRef($RULE_REF.token);}
;
tokenAlias
: {inContext("TOKENS")}? ^(ASSIGN ID STRING_LITERAL)
{checker.checkTokenAlias($ID.token);}
;
tokenRefWithArgs
: {!inContext("RESULT ...")}? // if not on right side of ->
^(TOKEN_REF ARG_ACTION)
{checker.checkTokenArgs($TOKEN_REF.token);}
;
elementOption
: {!inContext("RESULT ...")}? // not on right side of ->
^( ELEMENT_OPTIONS
( ^(ASSIGN o=ID value=ID)
| ^(ASSIGN o=ID value=STRING_LITERAL)
| o=ID
)
)
{
boolean ok = checker.checkTokenOptions((GrammarAST)$o.getParent(),
$o.token, $value.text);
if ( ok ) {
if ( value!=null ) {
TerminalAST terminal = (TerminalAST)$start.getParent();
terminal.setOption($o.text, $value.text);
}
else {
TerminalAST terminal = (TerminalAST)$start.getParent();
terminal.setOption(TerminalAST.defaultTokenOption, $o.text);
}
}
}
;
// (ALT_REWRITE (ALT A B) ^( ALT ^( A B ) ) or ( ALT A )
multiElementAltInTreeGrammar
: {inContext("ALT_REWRITE")}?
^( ALT ~(SEMPRED|ACTION) ~(SEMPRED|ACTION)+ ) // > 1 element at outer level
{
int altNum = $start.getParent().getChildIndex() + 1; // alts are 1..n
GrammarAST firstNode = (GrammarAST)$start.getChild(0);
checker.checkRewriteForMultiRootAltInTreeGrammar(root.getOptions(),
firstNode.token,
altNum);
}
;
// Check stuff like (^ A) (! r)
astOps
: ^(ROOT el=.) {checker.checkASTOps(root.getOptions(), $start, $el);}
| ^(BANG el=.) {checker.checkASTOps(root.getOptions(), $start, $el);}
;
rewrite
: (RESULT|ST_RESULT)
{checker.checkRewriteOk(root.getOptions(),$start);}
;
wildcardRoot
: ^(TREE_BEGIN WILDCARD .*)
{checker.checkWildcardRoot($WILDCARD.token);}
;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,99 @@
SEMPRED=4
FORCED_ACTION=5
DOC_COMMENT=6
SRC=7
NLCHARS=8
COMMENT=9
DOUBLE_QUOTE_STRING_LITERAL=10
DOUBLE_ANGLE_STRING_LITERAL=11
ACTION_STRING_LITERAL=12
ACTION_CHAR_LITERAL=13
ARG_ACTION=14
NESTED_ACTION=15
ACTION=16
ACTION_ESC=17
WSNLCHARS=18
OPTIONS=19
TOKENS=20
SCOPE=21
IMPORT=22
FRAGMENT=23
LEXER=24
PARSER=25
TREE=26
GRAMMAR=27
PROTECTED=28
PUBLIC=29
PRIVATE=30
RETURNS=31
THROWS=32
CATCH=33
FINALLY=34
TEMPLATE=35
MODE=36
COLON=37
COLONCOLON=38
COMMA=39
SEMI=40
LPAREN=41
RPAREN=42
IMPLIES=43
LT=44
GT=45
ASSIGN=46
QUESTION=47
BANG=48
STAR=49
PLUS=50
PLUS_ASSIGN=51
OR=52
ROOT=53
DOLLAR=54
DOT=55
RANGE=56
ETC=57
RARROW=58
TREE_BEGIN=59
AT=60
NOT=61
RBRACE=62
TOKEN_REF=63
RULE_REF=64
INT=65
WSCHARS=66
ESC_SEQ=67
STRING_LITERAL=68
HEX_DIGIT=69
UNICODE_ESC=70
WS=71
ERRCHAR=72
RULE=73
RULES=74
RULEMODIFIERS=75
RULEACTIONS=76
BLOCK=77
REWRITE_BLOCK=78
OPTIONAL=79
CLOSURE=80
POSITIVE_CLOSURE=81
SYNPRED=82
CHAR_RANGE=83
EPSILON=84
ALT=85
ALTLIST=86
ID=87
ARG=88
ARGLIST=89
RET=90
COMBINED=91
INITACTION=92
LABEL=93
GATED_SEMPRED=94
SYN_SEMPRED=95
BACKTRACK_SEMPRED=96
WILDCARD=97
LIST=98
ELEMENT_OPTIONS=99
ST_RESULT=100
RESULT=101
ALT_REWRITE=102

View File

@ -0,0 +1,57 @@
package org.antlr.v4.semantics;
import org.antlr.runtime.Token;
import org.antlr.v4.parse.ActionSplitterListener;
public class BlankActionSplitterListener implements ActionSplitterListener {
public void setQualifiedAttr(String expr, Token x, Token y, Token rhs) {
}
public void qualifiedAttr(String expr, Token x, Token y) {
}
public void setAttr(String expr, Token x, Token rhs) {
}
public void attr(String expr, Token x) {
}
public void setDynamicScopeAttr(String expr, Token x, Token y, Token rhs) {
}
public void dynamicScopeAttr(String expr, Token x, Token y) {
}
public void setDynamicNegativeIndexedScopeAttr(String expr, Token x, Token y, Token index, Token rhs) {
}
public void dynamicNegativeIndexedScopeAttr(String expr, Token x, Token y, Token index) {
}
public void setDynamicAbsoluteIndexedScopeAttr(String expr, Token x, Token y, Token index, Token rhs) {
}
public void dynamicAbsoluteIndexedScopeAttr(String expr, Token x, Token y, Token index) {
}
public void templateInstance(String expr) {
}
public void indirectTemplateInstance(String expr) {
}
public void setExprAttribute(String expr) {
}
public void setSTAttribute(String expr) {
}
public void templateExpr(String expr) {
}
public void unknownSyntax(Token t) {
}
public void text(String text) {
}
}

View File

@ -0,0 +1,310 @@
/*
[The "BSD license"]
Copyright (c) 2010 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** Collects rules, terminals, strings, actions, scopes etc... from AST
* No side-effects
*/
tree grammar CollectSymbols;
options {
language = Java;
tokenVocab = ANTLRParser;
ASTLabelType = GrammarAST;
filter = true;
//superClass = 'org.antlr.v4.runtime.tree.TreeFilter';
}
// Include the copyright in this source and also the generated source
@header {
/*
[The "BSD license"]
Copyright (c) 2010 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.semantics;
import org.antlr.v4.tool.*;
import org.antlr.v4.parse.*;
import java.util.Set;
import java.util.HashSet;
import org.stringtemplate.v4.misc.MultiMap;
}
@members {
Rule currentRule;
String currentMode = LexerGrammar.DEFAULT_MODE_NAME;
int currentAlt = 1; // 1..n
public List<Rule> rules = new ArrayList<Rule>();
public List<GrammarAST> rulerefs = new ArrayList<GrammarAST>();
public List<GrammarAST> qualifiedRulerefs = new ArrayList<GrammarAST>();
public List<GrammarAST> terminals = new ArrayList<GrammarAST>();
public List<GrammarAST> tokenIDRefs = new ArrayList<GrammarAST>();
public Set<String> strings = new HashSet<String>();
public List<GrammarAST> tokensDefs = new ArrayList<GrammarAST>();
public List<AttributeDict> scopes = new ArrayList<AttributeDict>();
public List<GrammarAST> actions = new ArrayList<GrammarAST>();
Grammar g; // which grammar are we checking
public CollectSymbols(TreeNodeStream input, Grammar g) {
this(input);
this.g = g;
}
}
topdown
//@init {System.out.println("topdown: "+((Tree)input.LT(1)).getText());}
: globalScope
| globalNamedAction
| tokensSection
| mode
| rule
| ruleArg
| ruleReturns
| ruleNamedAction
| ruleScopeSpec
| ruleref
| rewriteElement // make sure we check this before terminal etc...
// want to match rewrite stuff all here
| terminal
| labeledElement
| setAlt
| ruleAction
| sempred
| finallyClause
| exceptionHandler
;
bottomup
: finishRule
;
globalScope
: {inContext("GRAMMAR")}? ^(SCOPE ID ACTION)
{
AttributeDict s = ScopeParser.parseDynamicScope($ACTION.text);
s.type = AttributeDict.DictType.GLOBAL_SCOPE;
s.name = $ID.text;
s.ast = $ACTION;
scopes.add(s);
}
;
globalNamedAction
: {inContext("GRAMMAR")}? ^(AT ID? ID ACTION)
{actions.add($AT); ((ActionAST)$ACTION).resolver = g;}
;
tokensSection
: {inContext("TOKENS")}?
( ^(ASSIGN t=ID STRING_LITERAL)
{terminals.add($t); tokenIDRefs.add($t);
tokensDefs.add($ASSIGN); strings.add($STRING_LITERAL.text);}
| t=ID
{terminals.add($t); tokenIDRefs.add($t); tokensDefs.add($t);}
)
;
mode: ^(MODE ID .+) {currentMode = $ID.text;} ;
rule
@init {List<GrammarAST> modifiers = new ArrayList<GrammarAST>();}
: ^( RULE
name=ID (options {greedy=false;}:.)*
(^(RULEMODIFIERS (m=. {modifiers.add($m);})+))?
(^(AT ID ACTION))*
^(BLOCK .+)
.*
)
{
int numAlts = $RULE.getFirstChildWithType(BLOCK).getChildCount();
Rule r = new Rule(g, $name.text, (RuleAST)$RULE, numAlts);
if ( g.isLexer() ) r.mode = currentMode;
if ( modifiers.size()>0 ) r.modifiers = modifiers;
rules.add(r);
currentRule = r;
currentAlt = 1;
}
;
setAlt
: {inContext("RULE BLOCK")}? ( ALT | ALT_REWRITE )
{
currentAlt = $start.getChildIndex()+1;
currentRule.alt[currentAlt].ast = (AltAST)$start;
}
;
finishRule
: RULE {currentRule = null;}
;
ruleNamedAction
: {inContext("RULE")}? ^(AT ID ACTION)
{
currentRule.namedActions.put($ID.text,(ActionAST)$ACTION);
((ActionAST)$ACTION).resolver = currentRule;
}
;
ruleAction
: {inContext("RULE ...")&&!inContext("SCOPE")&&
!inContext("CATCH")&&!inContext("FINALLY")&&!inContext("AT")}?
ACTION
{
currentRule.defineActionInAlt(currentAlt, (ActionAST)$ACTION);
((ActionAST)$ACTION).resolver = currentRule.alt[currentAlt];
}
| FORCED_ACTION
{
currentRule.defineActionInAlt(currentAlt, (ActionAST)$FORCED_ACTION);
((ActionAST)$FORCED_ACTION).resolver = currentRule.alt[currentAlt];
}
;
sempred
: {inContext("RULE ...")}?
SEMPRED
{
currentRule.definePredicateInAlt(currentAlt, (PredAST)$SEMPRED);
((PredAST)$SEMPRED).resolver = currentRule.alt[currentAlt];
}
;
exceptionHandler
: ^(CATCH ARG_ACTION ACTION)
{
currentRule.exceptionActions.add((ActionAST)$ACTION);
((ActionAST)$ACTION).resolver = currentRule;
}
;
finallyClause
: ^(FINALLY ACTION)
{
currentRule.finallyAction = (ActionAST)$ACTION;
((ActionAST)$ACTION).resolver = currentRule;
}
;
ruleArg
: {inContext("RULE")}? ARG_ACTION
{
currentRule.args = ScopeParser.parseTypeList($ARG_ACTION.text);
currentRule.args.type = AttributeDict.DictType.ARG;
currentRule.args.ast = $ARG_ACTION;
}
;
ruleReturns
: ^(RETURNS ARG_ACTION)
{
currentRule.retvals = ScopeParser.parseTypeList($ARG_ACTION.text);
currentRule.retvals.type = AttributeDict.DictType.RET;
currentRule.retvals.ast = $ARG_ACTION;
}
;
ruleScopeSpec
: {inContext("RULE")}?
( ^(SCOPE ACTION)
{
currentRule.scope = ScopeParser.parseDynamicScope($ACTION.text);
currentRule.scope.type = AttributeDict.DictType.RULE_SCOPE;
currentRule.scope.name = currentRule.name;
currentRule.scope.ast = $ACTION;
}
| ^(SCOPE ids+=ID+) {currentRule.useScopes = $ids;}
)
;
rewriteElement
//@init {System.out.println("rewriteElement: "+((Tree)input.LT(1)).getText());}
:
{inContext("RESULT ...")}? (TOKEN_REF|RULE_REF|STRING_LITERAL|LABEL)
{currentRule.alt[currentAlt].rewriteElements.add($start);}
;
labeledElement
@after {
LabelElementPair lp = new LabelElementPair(g, $id, $e, $start.getType());
//currentRule.labelDefs.map($id.text, lp);
currentRule.alt[currentAlt].labelDefs.map($id.text, lp);
}
: {inContext("RULE ...")}?
( ^(ASSIGN id=ID e=.)
| ^(PLUS_ASSIGN id=ID e=.)
)
;
terminal
: {!inContext("TOKENS ASSIGN")}? STRING_LITERAL
{
terminals.add($start);
strings.add($STRING_LITERAL.text);
if ( currentRule!=null ) {
currentRule.alt[currentAlt].tokenRefs.map($STRING_LITERAL.text, (TerminalAST)$STRING_LITERAL);
}
}
| TOKEN_REF
{
terminals.add($TOKEN_REF);
tokenIDRefs.add($TOKEN_REF);
if ( currentRule!=null ) {
currentRule.alt[currentAlt].tokenRefs.map($TOKEN_REF.text, (TerminalAST)$TOKEN_REF);
}
}
;
ruleref
//@init {System.out.println("ruleref: "+((Tree)input.LT(1)).getText());}
: ( {inContext("DOT ...")}?
r=RULE_REF {qualifiedRulerefs.add((GrammarAST)$r.getParent());}
| r=RULE_REF
)
{
rulerefs.add($r);
if ( currentRule!=null ) {
currentRule.alt[currentAlt].ruleRefs.map($r.text, $r);
}
}
;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,99 @@
SEMPRED=4
FORCED_ACTION=5
DOC_COMMENT=6
SRC=7
NLCHARS=8
COMMENT=9
DOUBLE_QUOTE_STRING_LITERAL=10
DOUBLE_ANGLE_STRING_LITERAL=11
ACTION_STRING_LITERAL=12
ACTION_CHAR_LITERAL=13
ARG_ACTION=14
NESTED_ACTION=15
ACTION=16
ACTION_ESC=17
WSNLCHARS=18
OPTIONS=19
TOKENS=20
SCOPE=21
IMPORT=22
FRAGMENT=23
LEXER=24
PARSER=25
TREE=26
GRAMMAR=27
PROTECTED=28
PUBLIC=29
PRIVATE=30
RETURNS=31
THROWS=32
CATCH=33
FINALLY=34
TEMPLATE=35
MODE=36
COLON=37
COLONCOLON=38
COMMA=39
SEMI=40
LPAREN=41
RPAREN=42
IMPLIES=43
LT=44
GT=45
ASSIGN=46
QUESTION=47
BANG=48
STAR=49
PLUS=50
PLUS_ASSIGN=51
OR=52
ROOT=53
DOLLAR=54
DOT=55
RANGE=56
ETC=57
RARROW=58
TREE_BEGIN=59
AT=60
NOT=61
RBRACE=62
TOKEN_REF=63
RULE_REF=64
INT=65
WSCHARS=66
ESC_SEQ=67
STRING_LITERAL=68
HEX_DIGIT=69
UNICODE_ESC=70
WS=71
ERRCHAR=72
RULE=73
RULES=74
RULEMODIFIERS=75
RULEACTIONS=76
BLOCK=77
REWRITE_BLOCK=78
OPTIONAL=79
CLOSURE=80
POSITIVE_CLOSURE=81
SYNPRED=82
CHAR_RANGE=83
EPSILON=84
ALT=85
ALTLIST=86
ID=87
ARG=88
ARGLIST=89
RET=90
COMBINED=91
INITACTION=92
LABEL=93
GATED_SEMPRED=94
SYN_SEMPRED=95
BACKTRACK_SEMPRED=96
WILDCARD=97
LIST=98
ELEMENT_OPTIONS=99
ST_RESULT=100
RESULT=101
ALT_REWRITE=102

View File

@ -0,0 +1,175 @@
package org.antlr.v4.semantics;
import org.antlr.runtime.RecognitionException;
import org.antlr.runtime.tree.BufferedTreeNodeStream;
import org.antlr.v4.parse.*;
import org.antlr.v4.tool.*;
import java.util.Map;
/** Do as much semantic checking as we can and fill in grammar
* with rules, dynamic scopes, actions, and token definitions.
* The only side effects are in the grammar pass to process().
* We consume a bunch of memory here while we build up data structures
* to perform checking, but all of it goes away after this pipeline object
* gets garbage collected.
*
* After this pipeline finishes, we can be sure that the grammar
* is syntactically correct and that it's semantically correct enough for us
* to attempt grammar analysis. We have assigned all token types.
* Note that imported grammars bring in token and rule definitions
* but only the root grammar and any implicitly created lexer grammar
* get their token definitions filled up. We are treating the
* imported grammars like includes (the generated code treats them
* as separate objects, however).
*/
public class SemanticPipeline {
public Grammar g;
public SemanticPipeline(Grammar g) {
this.g = g;
}
public void process() {
if ( g.ast==null ) return;
// VALIDATE AST STRUCTURE
GrammarASTAdaptor adaptor = new GrammarASTAdaptor();
// use buffered node stream as we will look around in stream
// to give good error messages.
BufferedTreeNodeStream nodes =
new BufferedTreeNodeStream(adaptor,g.ast);
ASTVerifier walker = new ASTVerifier(nodes);
try {walker.grammarSpec();}
catch (RecognitionException re) {
ErrorManager.fatalInternalError("bad grammar AST structure", re);
}
// DO BASIC / EASY SEMANTIC CHECKS
nodes.reset();
BasicSemanticTriggers basics = new BasicSemanticTriggers(nodes,g);
basics.downup(g.ast);
// don't continue if we get errors in this basic check
if ( false ) return;
// COLLECT SYMBOLS: RULES, ACTIONS, TERMINALS, ...
nodes.reset();
CollectSymbols collector = new CollectSymbols(nodes,g);
collector.downup(g.ast); // no side-effects; compute lists
// CHECK FOR SYMBOL COLLISIONS
SymbolChecks symcheck = new SymbolChecks(g, collector);
symcheck.examine(); // side-effect: strip away redef'd rules.
// don't continue if we get symbol errors
//if ( ErrorManager.getNumErrors()>0 ) return;
// hmm...we don't get missing arg errors and such if we bail out here
// STORE RULES/ACTIONS/SCOPES IN GRAMMAR
for (Rule r : collector.rules) g.defineRule(r);
for (AttributeDict s : collector.scopes) g.defineScope(s);
for (GrammarAST a : collector.actions) g.defineAction(a);
// LINK ALT NODES WITH Alternatives
for (Rule r : g.rules.values()) {
for (int i=1; i<=r.numberOfAlts; i++) {
r.alt[i].ast.alt = r.alt[i];
}
}
// CHECK RULE REFS NOW (that we've defined rules in grammar)
symcheck.checkRuleArgs(g, collector.rulerefs);
identifyStartRules(collector);
symcheck.checkForQualifiedRuleIssues(g, collector.qualifiedRulerefs);
// don't continue if we got symbol errors
if ( g.tool.getNumErrors()>0 ) return;
// CHECK ATTRIBUTE EXPRESSIONS FOR SEMANTIC VALIDITY
AttributeChecks.checkAllAttributeExpressions(g);
// ASSIGN TOKEN TYPES
String vocab = g.getOption("tokenVocab");
if ( vocab!=null ) {
TokenVocabParser vparser = new TokenVocabParser(g.tool, vocab);
Map<String,Integer> tokens = vparser.load();
System.out.println("tokens="+tokens);
for (String t : tokens.keySet()) {
if ( t.charAt(0)=='\'' ) g.defineStringLiteral(t, tokens.get(t));
else g.defineTokenName(t, tokens.get(t));
}
}
if ( g.isLexer() ) assignLexerTokenTypes(g, collector);
else assignTokenTypes(g, collector, symcheck);
UseDefAnalyzer.checkRewriteElementsPresentOnLeftSide(g);
UseDefAnalyzer.trackTokenRuleRefsInActions(g);
}
void identifyStartRules(CollectSymbols collector) {
for (GrammarAST ref : collector.rulerefs) {
String ruleName = ref.getText();
Rule r = g.getRule(ruleName);
if ( r!=null ) r.isStartRule = false;
}
}
void assignLexerTokenTypes(Grammar g, CollectSymbols collector) {
Grammar G = g.getOutermostGrammar(); // put in root, even if imported
for (GrammarAST def : collector.tokensDefs) {
if ( def.getType()== ANTLRParser.ID ) G.defineTokenName(def.getText());
}
// DEFINE TOKEN TYPES FOR NONFRAGMENT RULES
for (Rule r : g.rules.values()) {
if ( !r.isFragment() ) G.defineTokenName(r.name);
}
// FOR ALL X : 'xxx'; RULES, DEFINE 'xxx' AS TYPE X
Map<String,String> litAliases = Grammar.getStringLiteralAliasesFromLexerRules(g.ast);
if ( litAliases!=null ) {
for (String lit : litAliases.keySet()) {
G.defineTokenAlias(litAliases.get(lit), lit);
}
}
}
void assignTokenTypes(Grammar g, CollectSymbols collector, SymbolChecks symcheck) {
if ( g.implicitLexerOwner!=null ) {
// copy vocab from combined to implicit lexer
g.importVocab(g.implicitLexerOwner);
System.out.println("tokens="+g.tokenNameToTypeMap);
System.out.println("strings="+g.stringLiteralToTypeMap);
}
else {
Grammar G = g.getOutermostGrammar(); // put in root, even if imported
// DEFINE tokens { X='x'; } ALIASES
for (GrammarAST alias : collector.tokensDefs) {
if ( alias.getType()== ANTLRParser.ASSIGN ) {
String name = alias.getChild(0).getText();
String lit = alias.getChild(1).getText();
G.defineTokenAlias(name, lit);
}
}
// DEFINE TOKEN TYPES FOR X : 'x' ; RULES
Map<String,String> litAliases = Grammar.getStringLiteralAliasesFromLexerRules(g.ast);
if ( litAliases!=null ) {
for (String lit : litAliases.keySet()) {
G.defineTokenAlias(litAliases.get(lit), lit);
}
}
// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
for (String id : symcheck.tokenIDs) { G.defineTokenName(id); }
// DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
for (String s : collector.strings) { G.defineStringLiteral(s); }
// System.out.println("tokens="+G.tokenNameToTypeMap);
// System.out.println("strings="+G.stringLiteralToTypeMap);
}
}
}

View File

@ -0,0 +1,316 @@
package org.antlr.v4.semantics;
import org.antlr.runtime.Token;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.tool.*;
import java.util.*;
/** Check for symbol problems; no side-effects. Inefficient to walk rules
* and such multiple times, but I like isolating all error checking outside
* of code that actually defines symbols etc...
*
* Side-effect: strip away redef'd rules.
*/
public class SymbolChecks {
Grammar g;
CollectSymbols collector;
Map<String, Rule> nameToRuleMap = new HashMap<String, Rule>();
Set<String> tokenIDs = new HashSet<String>();
Set<String> globalScopeNames = new HashSet<String>();
Map<String, Set<String>> actionScopeToActionNames = new HashMap<String, Set<String>>();
public ErrorManager errMgr;
public SymbolChecks(Grammar g, CollectSymbols collector) {
this.g = g;
this.collector = collector;
this.errMgr = g.tool.errMgr;
/*
System.out.println("rules="+collector.rules);
System.out.println("rulerefs="+collector.rulerefs);
System.out.println("tokenIDRefs="+collector.tokenIDRefs);
System.out.println("terminals="+collector.terminals);
System.out.println("strings="+collector.strings);
System.out.println("tokensDef="+collector.tokensDefs);
System.out.println("actions="+collector.actions);
System.out.println("scopes="+collector.scopes);
*/
}
public void examine() {
// methods affect fields, but no side-effects outside this object
// So, call order sensitive
checkScopeRedefinitions(collector.scopes); // sets globalScopeNames
//checkForImportedRuleIssues(collector.qualifiedRulerefs);
checkForRuleConflicts(collector.rules); // sets nameToRuleMap
checkActionRedefinitions(collector.actions); // sets actionScopeToActionNames
checkTokenAliasRedefinitions(collector.tokensDefs);
//checkRuleArgs(collector.rulerefs);
checkForTokenConflicts(collector.tokenIDRefs); // sets tokenIDs
checkForLabelConflicts(collector.rules);
//checkRewriteElementsPresentOnLeftSide(collector.rules); // move to after token type assignment
}
public void checkForRuleConflicts(List<Rule> rules) {
if ( rules==null ) return;
for (Rule r : collector.rules) {
if ( nameToRuleMap.get(r.name)==null ) {
nameToRuleMap.put(r.name, r);
}
else {
GrammarAST idNode = (GrammarAST)r.ast.getChild(0);
errMgr.grammarError(ErrorType.RULE_REDEFINITION,
g.fileName, idNode.token, r.name);
}
if ( globalScopeNames.contains(r.name) ) {
GrammarAST idNode = (GrammarAST)r.ast.getChild(0);
errMgr.grammarError(ErrorType.SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE,
g.fileName, idNode.token, r.name);
}
}
}
public void checkScopeRedefinitions(List<AttributeDict> dicts) {
if ( dicts ==null ) return;
for (int i=0; i< dicts.size(); i++) {
AttributeDict s = dicts.get(i);
//GrammarAST idNode = (GrammarAST)s.getChild(0);
if ( !globalScopeNames.contains(s.getName()) ) {
globalScopeNames.add(s.getName());
}
else {
Token idNode = ((GrammarAST) s.ast.getParent().getChild(0)).token;
errMgr.grammarError(ErrorType.SCOPE_REDEFINITION,
g.fileName, idNode, s.getName());
}
}
}
public void checkTokenAliasRedefinitions(List<GrammarAST> aliases) {
if ( aliases==null ) return;
Map<String, GrammarAST> aliasTokenNames = new HashMap<String, GrammarAST>();
for (int i=0; i< aliases.size(); i++) {
GrammarAST a = aliases.get(i);
GrammarAST idNode = a;
if ( a.getType()== ANTLRParser.ASSIGN ) {
idNode = (GrammarAST)a.getChild(0);
if ( g!=g.getOutermostGrammar() ) {
errMgr.grammarError(ErrorType.TOKEN_ALIAS_IN_DELEGATE,
g.fileName, idNode.token, idNode.getText(), g.name);
}
}
GrammarAST prev = aliasTokenNames.get(idNode.getText());
if ( prev==null ) {
aliasTokenNames.put(idNode.getText(), a);
}
else {
GrammarAST value = (GrammarAST)prev.getChild(1);
String valueText = null;
if ( value!=null ) valueText = value.getText();
errMgr.grammarError(ErrorType.TOKEN_ALIAS_REASSIGNMENT,
g.fileName, idNode.token, idNode.getText(), valueText);
}
}
}
public void checkForTokenConflicts(List<GrammarAST> tokenIDRefs) {
for (GrammarAST a : tokenIDRefs) {
Token t = a.token;
String ID = t.getText();
tokenIDs.add(ID);
if ( globalScopeNames.contains(t.getText()) ) {
errMgr.grammarError(ErrorType.SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE,
g.fileName, t, ID);
}
}
}
public void checkActionRedefinitions(List<GrammarAST> actions) {
if ( actions==null ) return;
String scope = g.getDefaultActionScope();
String name = null;
GrammarAST nameNode = null;
for (GrammarAST ampersandAST : actions) {
nameNode = (GrammarAST)ampersandAST.getChild(0);
if ( ampersandAST.getChildCount()==2 ) {
name = nameNode.getText();
}
else {
scope = nameNode.getText();
name = ampersandAST.getChild(1).getText();
}
Set<String> scopeActions = actionScopeToActionNames.get(scope);
if ( scopeActions==null ) { // init scope
scopeActions = new HashSet<String>();
actionScopeToActionNames.put(scope, scopeActions);
}
if ( !scopeActions.contains(name) ) {
scopeActions.add(name);
}
else {
errMgr.grammarError(ErrorType.ACTION_REDEFINITION,
g.fileName, nameNode.token, name);
}
}
}
/** Make sure a label doesn't conflict with another symbol.
* Labels must not conflict with: rules, tokens, scope names,
* return values, parameters, and rule-scope dynamic attributes
* defined in surrounding rule. Also they must have same type
* for repeated defs.
*/
public void checkForLabelConflicts(List<Rule> rules) {
for (Rule r : rules) {
checkForRuleArgumentAndReturnValueConflicts(r);
checkForRuleScopeAttributeConflict(r);
Map<String, LabelElementPair> labelNameSpace =
new HashMap<String, LabelElementPair>();
for (int i=1; i<=r.numberOfAlts; i++) {
Alternative a = r.alt[i];
for (List<LabelElementPair> pairs : a.labelDefs.values() ) {
for (LabelElementPair p : pairs) {
checkForLabelConflict(r, p.label);
String name = p.label.getText();
LabelElementPair prev = labelNameSpace.get(name);
if ( prev==null ) labelNameSpace.put(name, p);
else checkForTypeMismatch(prev, p);
}
}
}
}
}
void checkForTypeMismatch(LabelElementPair prevLabelPair,
LabelElementPair labelPair)
{
// label already defined; if same type, no problem
if ( prevLabelPair.type != labelPair.type ) {
String typeMismatchExpr = labelPair.type+"!="+prevLabelPair.type;
errMgr.grammarError(
ErrorType.LABEL_TYPE_CONFLICT,
g.fileName,
labelPair.label.token,
labelPair.label.getText(),
typeMismatchExpr);
}
}
public void checkForLabelConflict(Rule r, GrammarAST labelID) {
ErrorType etype = ErrorType.INVALID;
Object arg2 = null;
String name = labelID.getText();
if ( globalScopeNames.contains(name) ) {
etype = ErrorType.SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
}
else if ( nameToRuleMap.containsKey(name) ) {
etype = ErrorType.LABEL_CONFLICTS_WITH_RULE;
}
else if ( tokenIDs.contains(name) ) {
etype = ErrorType.LABEL_CONFLICTS_WITH_TOKEN;
}
else if ( r.scope !=null && r.scope.get(name)!=null ) {
etype = ErrorType.LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE;
arg2 = r.name;
}
else if ( (r.retvals!=null&&r.retvals.get(name)!=null) ||
(r.args!=null&&r.args.get(name)!=null) )
{
etype = ErrorType.LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
arg2 = r.name;
}
if ( etype!=ErrorType.INVALID ) {
errMgr.grammarError(etype,g.fileName,labelID.token,name,arg2);
}
}
public void checkForRuleArgumentAndReturnValueConflicts(Rule r) {
if ( r.retvals!=null ) {
Set conflictingKeys = r.retvals.intersection(r.args);
if (conflictingKeys!=null) {
for (Iterator it = conflictingKeys.iterator(); it.hasNext();) {
String key = (String) it.next();
errMgr.grammarError(
ErrorType.ARG_RETVAL_CONFLICT,
g.fileName,
((GrammarAST)r.ast.getChild(0)).token,
key,
r.name);
}
}
}
}
/** Check for collision of a rule-scope dynamic attribute with:
* arg, return value, rule name itself. Labels are checked elsewhere.
*/
public void checkForRuleScopeAttributeConflict(Rule r) {
if ( r.scope ==null ) return;
for (Attribute a : r.scope.attributes.values()) {
ErrorType msgID = ErrorType.INVALID;
Object arg2 = null;
String attrName = a.name;
if ( r.name.equals(attrName) ) {
msgID = ErrorType.ATTRIBUTE_CONFLICTS_WITH_RULE;
arg2 = r.name;
}
else if ( (r.retvals!=null&&r.retvals.get(attrName)!=null) ||
(r.args!=null&&r.args.get(attrName)!=null) )
{
msgID = ErrorType.ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
arg2 = r.name;
}
if ( msgID!=ErrorType.INVALID ) {
errMgr.grammarError(msgID,g.fileName,
r.scope.ast.token,
attrName,arg2);
}
}
}
// CAN ONLY CALL THE TWO NEXT METHODS AFTER GRAMMAR HAS RULE DEFS (see semanticpipeline)
public void checkRuleArgs(Grammar g, List<GrammarAST> rulerefs) {
if ( rulerefs==null ) return;
for (GrammarAST ref : rulerefs) {
String ruleName = ref.getText();
Rule r = g.getRule(ruleName);
if ( r==null && !ref.hasAncestor(ANTLRParser.DOT)) {
// only give error for unqualified rule refs now
errMgr.grammarError(ErrorType.UNDEFINED_RULE_REF,
g.fileName, ref.token, ruleName);
}
GrammarAST arg = (GrammarAST)ref.getChild(0);
if ( arg!=null && r.args==null ) {
errMgr.grammarError(ErrorType.RULE_HAS_NO_ARGS,
g.fileName, ref.token, ruleName);
}
else if ( arg==null && (r!=null&&r.args!=null) ) {
errMgr.grammarError(ErrorType.MISSING_RULE_ARGS,
g.fileName, ref.token, ruleName);
}
}
}
public void checkForQualifiedRuleIssues(Grammar g, List<GrammarAST> qualifiedRuleRefs) {
for (GrammarAST dot : qualifiedRuleRefs) {
GrammarAST grammar = (GrammarAST)dot.getChild(0);
GrammarAST rule = (GrammarAST)dot.getChild(1);
System.out.println(grammar.getText()+"."+rule.getText());
Grammar delegate = g.getImportedGrammar(grammar.getText());
if ( delegate==null ) {
errMgr.grammarError(ErrorType.NO_SUCH_GRAMMAR_SCOPE,
g.fileName, grammar.token, grammar.getText(),
rule.getText());
}
else {
if ( g.getRule(grammar.getText(), rule.getText())==null ) {
errMgr.grammarError(ErrorType.NO_SUCH_RULE_IN_SCOPE,
g.fileName, rule.token, grammar.getText(),
rule.getText());
}
}
}
}
}

View File

@ -0,0 +1,69 @@
package org.antlr.v4.semantics;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.tool.*;
import java.util.*;
/** Look for errors and deadcode stuff */
public class UseDefAnalyzer {
public static void checkRewriteElementsPresentOnLeftSide(Grammar g) {
for (Rule r : g.rules.values()) {
for (int a=1; a<=r.numberOfAlts; a++) {
Alternative alt = r.alt[a];
for (GrammarAST e : alt.rewriteElements) {
if ( !(alt.ruleRefs.containsKey(e.getText()) ||
g.getTokenType(e.getText())!= Token.INVALID_TYPE ||
alt.labelDefs.containsKey(e.getText()) ||
e.getText().equals(r.name)) ) // $r ok in rule r
{
g.tool.errMgr.grammarError(ErrorType.REWRITE_ELEMENT_NOT_PRESENT_ON_LHS,
g.fileName, e.token, e.getText());
}
}
}
}
}
// side-effect: updates Alternative with refs in actions
public static void trackTokenRuleRefsInActions(Grammar g) {
for (Rule r : g.rules.values()) {
for (int i=1; i<=r.numberOfAlts; i++) {
Alternative alt = r.alt[i];
for (ActionAST a : alt.actions) {
ActionSniffer sniffer = new ActionSniffer(g, r, alt, a, a.token);
sniffer.examineAction();
}
}
}
}
/** Find all rules reachable from r directly or indirectly for all r in g */
public static Map<Rule, Set<Rule>> getRuleDependencies(Grammar g) {
return getRuleDependencies(g, g.rules.values());
}
public static Map<Rule, Set<Rule>> getRuleDependencies(LexerGrammar g, String modeName) {
return getRuleDependencies(g, g.modes.get(modeName));
}
public static Map<Rule, Set<Rule>> getRuleDependencies(Grammar g, Collection<Rule> rules) {
Map<Rule, Set<Rule>> dependencies = new HashMap<Rule, Set<Rule>>();
for (Rule r : rules) {
List<GrammarAST> tokenRefs = r.ast.getNodesWithType(ANTLRParser.TOKEN_REF);
for (GrammarAST tref : tokenRefs) {
Set<Rule> calls = dependencies.get(r);
if ( calls==null ) {
calls = new HashSet<Rule>();
dependencies.put(r, calls);
}
calls.add(g.getRule(tref.getText()));
}
}
return dependencies;
}
}

View File

@ -1,9 +1,8 @@
package org.antlr.v4.tool;
import org.antlr.misc.MultiMap;
import org.antlr.runtime.RecognitionException;
import org.antlr.tool.Rule;
import org.antlr.v4.Tool;
import org.stringtemplate.v4.misc.MultiMap;
/** */
public class LexerGrammar extends Grammar {