forked from jasder/antlr
adding new files
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 8660]
This commit is contained in:
parent
061fff09f9
commit
1a43396d3b
|
@ -0,0 +1,3 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project />
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<java version="1.6.0_15" class="java.beans.XMLDecoder">
|
||||
<object class="com.jformdesigner.model.FormModel">
|
||||
<void property="contentType">
|
||||
<string>form/swing</string>
|
||||
</void>
|
||||
<void property="root">
|
||||
<object class="com.jformdesigner.model.FormRoot">
|
||||
<void method="add">
|
||||
<object class="com.jformdesigner.model.FormWindow">
|
||||
<string>javax.swing.JFrame</string>
|
||||
<object class="com.jformdesigner.model.FormLayoutManager">
|
||||
<class>java.awt.GridLayout</class>
|
||||
<void method="setProperty">
|
||||
<string>columns</string>
|
||||
<int>1</int>
|
||||
</void>
|
||||
</object>
|
||||
<void method="setProperty">
|
||||
<string>title</string>
|
||||
<string>ANTLR AST Viewer</string>
|
||||
</void>
|
||||
<void method="add">
|
||||
<object class="com.jformdesigner.model.FormContainer">
|
||||
<string>javax.swing.JScrollPane</string>
|
||||
<object class="com.jformdesigner.model.FormLayoutManager">
|
||||
<class>javax.swing.JScrollPane</class>
|
||||
</object>
|
||||
<void property="name">
|
||||
<string>scrollPane1</string>
|
||||
</void>
|
||||
<void method="add">
|
||||
<object class="com.jformdesigner.model.FormComponent">
|
||||
<string>javax.swing.JTree</string>
|
||||
<void property="name">
|
||||
<string>tree</string>
|
||||
</void>
|
||||
<void method="auxiliary">
|
||||
<void method="setProperty">
|
||||
<string>JavaCodeGenerator.variableModifiers</string>
|
||||
<int>1</int>
|
||||
</void>
|
||||
</void>
|
||||
</object>
|
||||
</void>
|
||||
</object>
|
||||
</void>
|
||||
<void property="name">
|
||||
<string>this</string>
|
||||
</void>
|
||||
</object>
|
||||
<object class="com.jformdesigner.model.FormLayoutConstraints">
|
||||
<null/>
|
||||
<void method="setProperty">
|
||||
<string>location</string>
|
||||
<object class="java.awt.Point">
|
||||
<int>0</int>
|
||||
<int>0</int>
|
||||
</object>
|
||||
</void>
|
||||
<void method="setProperty">
|
||||
<string>size</string>
|
||||
<object class="java.awt.Dimension">
|
||||
<int>400</int>
|
||||
<int>300</int>
|
||||
</object>
|
||||
</void>
|
||||
</object>
|
||||
</void>
|
||||
</object>
|
||||
</void>
|
||||
</object>
|
||||
</java>
|
|
@ -0,0 +1,112 @@
|
|||
package org.antlr.v4.analysis;
|
||||
|
||||
import org.antlr.v4.misc.OrderedHashSet;
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
public class LeftRecursionDetector {
|
||||
public ATN atn;
|
||||
|
||||
/** Holds a list of cycles (sets of rule names). */
|
||||
public List<Set<Rule>> listOfRecursiveCycles = new ArrayList<Set<Rule>>();
|
||||
|
||||
/** Which rule start states have we visited while looking for a single
|
||||
* left-recursion check?
|
||||
*/
|
||||
Set<RuleStartState> rulesVisitedPerRuleCheck = new HashSet<RuleStartState>();
|
||||
|
||||
public LeftRecursionDetector(ATN atn) { this.atn = atn; }
|
||||
|
||||
public void check() {
|
||||
for (RuleStartState start : atn.ruleToStartState.values()) {
|
||||
//System.out.print("check "+start.rule.name);
|
||||
rulesVisitedPerRuleCheck.clear();
|
||||
rulesVisitedPerRuleCheck.add(start);
|
||||
//FASerializer ser = new FASerializer(atn.g, start);
|
||||
//System.out.print(":\n"+ser+"\n");
|
||||
|
||||
check(start.rule, start, new HashSet<ATNState>());
|
||||
}
|
||||
//System.out.println("cycles="+listOfRecursiveCycles);
|
||||
if ( listOfRecursiveCycles.size()>0 ) {
|
||||
atn.g.tool.errMgr.leftRecursionCycles(atn.g.fileName, listOfRecursiveCycles);
|
||||
}
|
||||
}
|
||||
|
||||
/** From state s, look for any transition to a rule that is currently
|
||||
* being traced. When tracing r, visitedPerRuleCheck has r
|
||||
* initially. If you reach a rule stop state, return but notify the
|
||||
* invoking rule that the called rule is nullable. This implies that
|
||||
* invoking rule must look at follow transition for that invoking state.
|
||||
*
|
||||
* The visitedStates tracks visited states within a single rule so
|
||||
* we can avoid epsilon-loop-induced infinite recursion here. Keep
|
||||
* filling the cycles in listOfRecursiveCycles and also, as a
|
||||
* side-effect, set leftRecursiveRules.
|
||||
*/
|
||||
public boolean check(Rule enclosingRule, ATNState s, Set<ATNState> visitedStates) {
|
||||
if ( s instanceof RuleStopState) return true;
|
||||
if ( visitedStates.contains(s) ) return false;
|
||||
visitedStates.add(s);
|
||||
|
||||
//System.out.println("visit "+s);
|
||||
int n = s.getNumberOfTransitions();
|
||||
boolean stateReachesStopState = false;
|
||||
for (int i=0; i<n; i++) {
|
||||
Transition t = s.transition(i);
|
||||
if ( t instanceof RuleTransition ) {
|
||||
RuleTransition rt = (RuleTransition) t;
|
||||
Rule r = rt.rule;
|
||||
if ( rulesVisitedPerRuleCheck.contains((RuleStartState)t.target) ) {
|
||||
addRulesToCycle(enclosingRule, r);
|
||||
}
|
||||
else {
|
||||
// must visit if not already visited; mark target, pop when done
|
||||
rulesVisitedPerRuleCheck.add((RuleStartState)t.target);
|
||||
// send new visitedStates set per rule invocation
|
||||
boolean nullable = check(r, t.target, new HashSet<ATNState>());
|
||||
// we're back from visiting that rule
|
||||
rulesVisitedPerRuleCheck.remove((RuleStartState)t.target);
|
||||
if ( nullable ) {
|
||||
stateReachesStopState |= check(enclosingRule, rt.followState, visitedStates);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if ( t.isEpsilon() ) {
|
||||
stateReachesStopState |= check(enclosingRule, t.target, visitedStates);
|
||||
}
|
||||
// else ignore non-epsilon transitions
|
||||
}
|
||||
return stateReachesStopState;
|
||||
}
|
||||
|
||||
/** enclosingRule calls targetRule. Find the cycle containing
|
||||
* the target and add the caller. Find the cycle containing the caller
|
||||
* and add the target. If no cycles contain either, then create a new
|
||||
* cycle.
|
||||
*/
|
||||
protected void addRulesToCycle(Rule enclosingRule, Rule targetRule) {
|
||||
//System.err.println("left-recursion to "+targetRule.name+" from "+enclosingRule.name);
|
||||
boolean foundCycle = false;
|
||||
for (int i = 0; i < listOfRecursiveCycles.size(); i++) {
|
||||
Set<Rule> rulesInCycle = listOfRecursiveCycles.get(i);
|
||||
// ensure both rules are in same cycle
|
||||
if ( rulesInCycle.contains(targetRule) ) {
|
||||
rulesInCycle.add(enclosingRule);
|
||||
foundCycle = true;
|
||||
}
|
||||
if ( rulesInCycle.contains(enclosingRule) ) {
|
||||
rulesInCycle.add(targetRule);
|
||||
foundCycle = true;
|
||||
}
|
||||
}
|
||||
if ( !foundCycle ) {
|
||||
Set<Rule> cycle = new OrderedHashSet<Rule>();
|
||||
cycle.add(targetRule);
|
||||
cycle.add(enclosingRule);
|
||||
listOfRecursiveCycles.add(cycle);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package org.antlr.v4.codegen;
|
||||
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.stringtemplate.v4.ST;
|
||||
|
||||
public class CodeGenPipeline {
|
||||
Grammar g;
|
||||
public CodeGenPipeline(Grammar g) {
|
||||
this.g = g;
|
||||
}
|
||||
public void process() {
|
||||
CodeGenerator gen = new CodeGenerator(g);
|
||||
|
||||
// for (Rule r : g.rules.values()) {
|
||||
// for (int i=1; i<=r.numberOfAlts; i++) {
|
||||
// Alternative alt = r.alt[i];
|
||||
// for (String ref : alt.tokenRefs.keySet()) {
|
||||
// if ( alt.tokenRefsInActions.get(ref)!=null ) {
|
||||
// String label = gen.target.getImplicitTokenLabel(ast);
|
||||
// alt.implicitTokenLabels.put(, label);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
ST outputFileST = gen.generate();
|
||||
gen.write(outputFileST);
|
||||
|
||||
// if ( g.isLexer() ) processLexer();
|
||||
// else if ( g.isParser() ) processParser();
|
||||
}
|
||||
|
||||
void processParser() {
|
||||
CodeGenerator gen = new CodeGenerator(g);
|
||||
ST outputFileST = gen.generate();
|
||||
gen.write(outputFileST);
|
||||
|
||||
}
|
||||
|
||||
void processLexer() {
|
||||
CodeGenerator gen = new CodeGenerator(g);
|
||||
ST outputFileST = gen.generate();
|
||||
gen.write(outputFileST);
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
package org.antlr.v4.codegen;
|
||||
|
||||
import org.antlr.v4.codegen.model.*;
|
||||
import org.antlr.v4.tool.*;
|
||||
import org.stringtemplate.v4.ST;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/** */
|
||||
public class LexerFactory extends OutputModelFactory {
|
||||
public LexerFactory(CodeGenerator gen) {
|
||||
super(gen);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OutputModelObject buildOutputModel() {
|
||||
return new LexerFile(this, gen.getRecognizerFileName());
|
||||
}
|
||||
|
||||
public ST build() {
|
||||
LexerGrammar lg = (LexerGrammar)gen.g;
|
||||
ST fileST = gen.templates.getInstanceOf("LexerFile");
|
||||
ST lexerST = gen.templates.getInstanceOf("Lexer");
|
||||
lexerST.add("lexerName", gen.g.getRecognizerName());
|
||||
lexerST.add("modes", lg.modes.keySet());
|
||||
fileST.add("fileName", gen.getRecognizerFileName());
|
||||
fileST.add("lexer", lexerST);
|
||||
|
||||
SerializedATN atn = new SerializedATN(this, lg.atn);
|
||||
|
||||
for (String modeName : lg.modes.keySet()) { // for each mode
|
||||
|
||||
// injectDFAs(lg, lexerST, modeName);
|
||||
// LexerCompiler comp = new LexerCompiler(lg);
|
||||
// CompiledATN atn = comp.compileMode(modeName);
|
||||
// injectPDAs(atn, lexerST, modeName);
|
||||
}
|
||||
|
||||
LinkedHashMap<String,Integer> tokens = new LinkedHashMap<String,Integer>();
|
||||
for (String t : gen.g.tokenNameToTypeMap.keySet()) {
|
||||
Integer ttype = gen.g.tokenNameToTypeMap.get(t);
|
||||
if ( ttype>0 ) tokens.put(t, ttype);
|
||||
}
|
||||
lexerST.add("tokens", tokens);
|
||||
lexerST.add("namedActions", gen.g.namedActions);
|
||||
|
||||
return fileST;
|
||||
}
|
||||
|
||||
// void injectDFAs(LexerGrammar lg, ST lexerST, String modeName) {
|
||||
// System.out.println("inject dfa for "+modeName);
|
||||
// DFA dfa = lg.modeToDFA.get(modeName);
|
||||
// ST dfaST = gen.templates.getInstanceOf("DFA");
|
||||
// dfaST.add("name", modeName);
|
||||
// CompiledDFA obj = new CompiledDFA(dfa);
|
||||
// dfaST.add("model", obj);
|
||||
//// ST actionST = gen.templates.getInstanceOf("actionMethod");
|
||||
//// actionST.add("name", modeName);
|
||||
//// actionST.add("actions", obj.actions);
|
||||
//// lexerST.add("actions", actionST);
|
||||
// lexerST.add("dfas", dfaST);
|
||||
// }
|
||||
|
||||
/*
|
||||
void injectPDAs(CompiledATN atn, ST lexerST, String modeName) {
|
||||
ST pdaST = gen.templates.getInstanceOf("ATN");
|
||||
for (Rule r : atn.ruleActions.keySet()) {
|
||||
Set<Token> actionTokens = atn.ruleActions.keySet(r);
|
||||
ST actionST = gen.templates.getInstanceOf("actionMethod");
|
||||
actionST.add("name", r.name);
|
||||
for (Token t : actionTokens) {
|
||||
actionST.add("actions", Misc.strip(t.getText(),1));
|
||||
actionST.add("ruleIndex", r.index);
|
||||
}
|
||||
pdaST.add("actions", actionST);
|
||||
lexerST.add("actions", actionST);
|
||||
}
|
||||
for (Rule r : atn.ruleSempreds.keySet()) {
|
||||
Set<Token> sempredTokens = atn.ruleSempreds.keySet(r);
|
||||
ST sempredST = gen.templates.getInstanceOf("sempredMethod");
|
||||
sempredST.add("name", r.name);
|
||||
sempredST.add("ruleIndex", r.index);
|
||||
for (Token t : sempredTokens) {
|
||||
sempredST.add("preds", t.getText());
|
||||
}
|
||||
pdaST.add("sempreds", sempredST);
|
||||
lexerST.add("sempreds", sempredST);
|
||||
}
|
||||
pdaST.add("name", modeName);
|
||||
pdaST.add("model", atn);
|
||||
lexerST.add("atns", pdaST);
|
||||
}
|
||||
*/
|
||||
|
||||
// lexers don't do anything with rules etc...
|
||||
|
||||
@Override
|
||||
public List<SrcOp> ruleRef(GrammarAST ID, GrammarAST label, GrammarAST args) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SrcOp> tokenRef(GrammarAST ID, GrammarAST label, GrammarAST args) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SrcOp> stringRef(GrammarAST ID, GrammarAST label) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void defineBitSet(BitSetDecl b) {
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
package org.antlr.v4.codegen;
|
||||
|
||||
import org.antlr.v4.Tool;
|
||||
import org.antlr.v4.codegen.model.OutputModelObject;
|
||||
import org.antlr.v4.tool.ErrorType;
|
||||
import org.stringtemplate.v4.*;
|
||||
import org.stringtemplate.v4.compiler.FormalArgument;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.*;
|
||||
|
||||
/** Convert an output model tree to template hierarchy by walking
|
||||
* the output model. Each output model object has a corresponding template
|
||||
* of the same name. An output model object can have nested objects.
|
||||
* We identify those nested objects by the list of arguments in the template
|
||||
* definition. For example, here is the definition of the parser template:
|
||||
*
|
||||
* Parser(parser, scopes, funcs) ::= <<...>>
|
||||
*
|
||||
* The first template argument is always the output model object from which
|
||||
* this walker will create the template. Any other arguments identify
|
||||
* the field names within the output model object of nested model objects.
|
||||
* So, in this case, template Parser is saying that output model object
|
||||
* Parser has two fields the walker should chase called a scopes and funcs.
|
||||
*
|
||||
* This simple mechanism means we don't have to include code in every
|
||||
* output model object that says how to create the corresponding template.
|
||||
*/
|
||||
public class OutputModelWalker {
|
||||
Tool tool;
|
||||
STGroup templates;
|
||||
|
||||
public OutputModelWalker(Tool tool,
|
||||
STGroup templates)
|
||||
{
|
||||
this.tool = tool;
|
||||
this.templates = templates;
|
||||
}
|
||||
|
||||
public ST walk(OutputModelObject omo) {
|
||||
// CREATE TEMPLATE FOR THIS OUTPUT OBJECT
|
||||
String templateName = omo.getClass().getSimpleName();
|
||||
if ( templateName == null ) {
|
||||
tool.errMgr.toolError(ErrorType.NO_MODEL_TO_TEMPLATE_MAPPING, omo.getClass().getSimpleName());
|
||||
return new ST("["+templateName+" invalid]");
|
||||
}
|
||||
ST st = templates.getInstanceOf(templateName);
|
||||
if ( st == null ) {
|
||||
tool.errMgr.toolError(ErrorType.CODE_GEN_TEMPLATES_INCOMPLETE, templateName);
|
||||
return new ST("["+templateName+" invalid]");
|
||||
}
|
||||
if ( st.impl.formalArguments == null ) {
|
||||
tool.errMgr.toolError(ErrorType.CODE_TEMPLATE_ARG_ISSUE, templateName, "<none>");
|
||||
return st;
|
||||
}
|
||||
|
||||
Map<String,FormalArgument> formalArgs = st.impl.formalArguments;
|
||||
Set<String> argNames = formalArgs.keySet();
|
||||
Iterator<String> arg_it = argNames.iterator();
|
||||
|
||||
// PASS IN OUTPUT MODEL OBJECT TO TEMPLATE
|
||||
String modelArgName = arg_it.next(); // ordered so this is first arg
|
||||
st.add(modelArgName, omo);
|
||||
|
||||
// COMPUTE STs FOR EACH NESTED MODEL OBJECT NAMED AS ARG BY TEMPLATE
|
||||
while ( arg_it.hasNext() ) {
|
||||
String fieldName = arg_it.next();
|
||||
if ( fieldName.equals("actions") ) {
|
||||
System.out.println("computing ST for field "+fieldName+" of "+omo.getClass());
|
||||
}
|
||||
try {
|
||||
Field fi = omo.getClass().getField(fieldName);
|
||||
Object o = fi.get(omo);
|
||||
if ( o instanceof OutputModelObject ) { // SINGLE MODEL OBJECT?
|
||||
OutputModelObject nestedOmo = (OutputModelObject)o;
|
||||
ST nestedST = walk(nestedOmo);
|
||||
st.add(fieldName, nestedST);
|
||||
}
|
||||
else if ( o instanceof Collection || o instanceof OutputModelObject[] ) {
|
||||
// LIST OF MODEL OBJECTS?
|
||||
if ( o instanceof OutputModelObject[] ) {
|
||||
o = Arrays.asList((OutputModelObject[])o);
|
||||
}
|
||||
Collection<? extends OutputModelObject> nestedOmos = (Collection)o;
|
||||
for (OutputModelObject nestedOmo : nestedOmos) {
|
||||
if ( nestedOmo==null ) {
|
||||
System.out.println("collection has nulls: "+nestedOmos);
|
||||
}
|
||||
ST nestedST = walk(nestedOmo);
|
||||
st.add(fieldName, nestedST);
|
||||
}
|
||||
}
|
||||
else if ( o instanceof Map ) {
|
||||
Map<Object, OutputModelObject> nestedOmoMap = (Map<Object, OutputModelObject>)o;
|
||||
Map<Object, ST> m = new HashMap<Object, ST>();
|
||||
for (Object key : nestedOmoMap.keySet()) {
|
||||
ST nestedST = walk(nestedOmoMap.get(key));
|
||||
m.put(key, nestedST);
|
||||
}
|
||||
st.add(fieldName, m);
|
||||
}
|
||||
else if ( o!=null ) {
|
||||
tool.errMgr.toolError(ErrorType.CODE_TEMPLATE_ARG_ISSUE, templateName, fieldName);
|
||||
}
|
||||
}
|
||||
catch (NoSuchFieldException nsfe) {
|
||||
tool.errMgr.toolError(ErrorType.CODE_TEMPLATE_ARG_ISSUE, templateName, nsfe.getMessage());
|
||||
}
|
||||
catch (IllegalAccessException iae) {
|
||||
tool.errMgr.toolError(ErrorType.CODE_TEMPLATE_ARG_ISSUE, templateName, fieldName);
|
||||
}
|
||||
}
|
||||
//st.impl.dump();
|
||||
return st;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
tree grammar SourceGenTriggers;
|
||||
options {
|
||||
language = Java;
|
||||
tokenVocab = ANTLRParser;
|
||||
ASTLabelType = GrammarAST;
|
||||
}
|
||||
|
||||
@header {
|
||||
package org.antlr.v4.codegen;
|
||||
import org.antlr.v4.misc.Utils;
|
||||
import org.antlr.v4.codegen.src.*;
|
||||
import org.antlr.v4.tool.*;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
}
|
||||
|
||||
@members {
|
||||
// TODO: identical grammar to ATNBytecodeTriggers; would be nice to combine
|
||||
public OutputModelFactory factory;
|
||||
public SourceGenTriggers(TreeNodeStream input, OutputModelFactory factory) {
|
||||
this(input);
|
||||
this.factory = factory;
|
||||
}
|
||||
}
|
||||
|
||||
block[GrammarAST label, GrammarAST ebnfRoot] returns [SrcOp omo]
|
||||
: ^( blk=BLOCK (^(OPTIONS .+))?
|
||||
{List<CodeBlock> alts = new ArrayList<CodeBlock>();}
|
||||
( alternative {alts.add($alternative.omo);} )+
|
||||
)
|
||||
{
|
||||
if ( alts.size()==1 && ebnfRoot==null) return alts.get(0);
|
||||
if ( ebnfRoot==null ) {
|
||||
$omo = factory.getChoiceBlock((BlockAST)$blk, alts);
|
||||
}
|
||||
else {
|
||||
$omo = factory.getEBNFBlock($ebnfRoot, alts);
|
||||
}
|
||||
}
|
||||
;
|
||||
|
||||
alternative returns [CodeBlock omo]
|
||||
@init {
|
||||
List<SrcOp> elems = new ArrayList<SrcOp>();
|
||||
if ( ((AltAST)$start).alt!=null ) factory.currentAlt = ((AltAST)$start).alt;
|
||||
|
||||
}
|
||||
: ^(ALT_REWRITE a=alternative .)
|
||||
| ^(ALT EPSILON) {$omo = factory.epsilon();}
|
||||
| ^( ALT ( element {elems.addAll($element.omos);} )+ ) {$omo = factory.alternative(elems);}
|
||||
;
|
||||
|
||||
element returns [List<SrcOp> omos]
|
||||
: labeledElement {$omos = $labeledElement.omos;}
|
||||
| atom[null] {$omos = $atom.omos;}
|
||||
| ebnf {$omos = Utils.list($ebnf.omo);}
|
||||
| ACTION {$omos = Utils.list(factory.action($ACTION));}
|
||||
| FORCED_ACTION {$omos = Utils.list(factory.forcedAction($FORCED_ACTION));}
|
||||
| SEMPRED {$omos = Utils.list(factory.sempred($SEMPRED));}
|
||||
| GATED_SEMPRED
|
||||
| treeSpec
|
||||
;
|
||||
|
||||
labeledElement returns [List<SrcOp> omos]
|
||||
: ^(ASSIGN ID atom[$ID] ) {$omos = $atom.omos;}
|
||||
| ^(ASSIGN ID block[$ID,null]) {$omos = Utils.list($block.omo);}
|
||||
| ^(PLUS_ASSIGN ID atom[$ID]) {$omos = $atom.omos;}
|
||||
| ^(PLUS_ASSIGN ID block[$ID,null]) {$omos = Utils.list($block.omo);}
|
||||
;
|
||||
|
||||
treeSpec returns [SrcOp omo]
|
||||
: ^(TREE_BEGIN (e=element )+)
|
||||
;
|
||||
|
||||
ebnf returns [SrcOp omo]
|
||||
: ^(astBlockSuffix block[null,null])
|
||||
| ^(OPTIONAL block[null,$OPTIONAL]) {$omo = $block.omo;}
|
||||
| ^(CLOSURE block[null,$CLOSURE]) {$omo = $block.omo;}
|
||||
| ^(POSITIVE_CLOSURE block[null,$POSITIVE_CLOSURE])
|
||||
{$omo = $block.omo;}
|
||||
| block[null, null] {$omo = $block.omo;}
|
||||
;
|
||||
|
||||
astBlockSuffix
|
||||
: ROOT
|
||||
| IMPLIES
|
||||
| BANG
|
||||
;
|
||||
|
||||
// TODO: combine ROOT/BANG into one then just make new op ref'ing return value of atom/terminal...
|
||||
// TODO: same for NOT
|
||||
atom[GrammarAST label] returns [List<SrcOp> omos]
|
||||
: ^(ROOT range[label])
|
||||
| ^(BANG range[label]) {$omos = $range.omos;}
|
||||
| ^(ROOT notSet[label])
|
||||
| ^(BANG notSet[label]) {$omos = $notSet.omos;}
|
||||
| notSet[label]
|
||||
| range[label] {$omos = $range.omos;}
|
||||
| ^(DOT ID terminal[label])
|
||||
| ^(DOT ID ruleref[label])
|
||||
| ^(WILDCARD .)
|
||||
| WILDCARD
|
||||
| terminal[label] {$omos = $terminal.omos;}
|
||||
| ruleref[label] {$omos = $ruleref.omos;}
|
||||
;
|
||||
|
||||
notSet[GrammarAST label] returns [List<SrcOp> omos]
|
||||
: ^(NOT terminal[label])
|
||||
| ^(NOT block[label,null])
|
||||
;
|
||||
|
||||
ruleref[GrammarAST label] returns [List<SrcOp> omos]
|
||||
: ^(ROOT ^(RULE_REF ARG_ACTION?))
|
||||
| ^(BANG ^(RULE_REF ARG_ACTION?)) {$omos = factory.ruleRef($RULE_REF, $label, $ARG_ACTION);}
|
||||
| ^(RULE_REF ARG_ACTION?) {$omos = factory.ruleRef($RULE_REF, $label, $ARG_ACTION);}
|
||||
;
|
||||
|
||||
range[GrammarAST label] returns [List<SrcOp> omos]
|
||||
: ^(RANGE a=STRING_LITERAL b=STRING_LITERAL)
|
||||
;
|
||||
|
||||
terminal[GrammarAST label] returns [List<SrcOp> omos]
|
||||
: ^(STRING_LITERAL .) {$omos = factory.stringRef($STRING_LITERAL, $label);}
|
||||
| STRING_LITERAL {$omos = factory.stringRef($STRING_LITERAL, $label);}
|
||||
| ^(TOKEN_REF ARG_ACTION .) {$omos = factory.tokenRef($TOKEN_REF, $label, $ARG_ACTION);}
|
||||
| ^(TOKEN_REF .) {$omos = factory.tokenRef($TOKEN_REF, $label, null);}
|
||||
| TOKEN_REF {$omos = factory.tokenRef($TOKEN_REF, $label, null);}
|
||||
| ^(ROOT terminal[label])
|
||||
| ^(BANG terminal[label])
|
||||
;
|
|
@ -0,0 +1,176 @@
|
|||
package org.antlr.v4.codegen;
|
||||
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.tool.*;
|
||||
import org.stringtemplate.v4.ST;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** */
|
||||
public class Target {
|
||||
/** For pure strings of Java 16-bit unicode char, how can we display
|
||||
* it in the target language as a literal. Useful for dumping
|
||||
* predicates and such that may refer to chars that need to be escaped
|
||||
* when represented as strings. Also, templates need to be escaped so
|
||||
* that the target language can hold them as a string.
|
||||
*
|
||||
* I have defined (via the constructor) the set of typical escapes,
|
||||
* but your Target subclass is free to alter the translated chars or
|
||||
* add more definitions. This is nonstatic so each target can have
|
||||
* a different set in memory at same time.
|
||||
*/
|
||||
protected String[] targetCharValueEscape = new String[255];
|
||||
|
||||
public Target() {
|
||||
targetCharValueEscape['\n'] = "\\n";
|
||||
targetCharValueEscape['\r'] = "\\r";
|
||||
targetCharValueEscape['\t'] = "\\t";
|
||||
targetCharValueEscape['\b'] = "\\b";
|
||||
targetCharValueEscape['\f'] = "\\f";
|
||||
targetCharValueEscape['\\'] = "\\\\";
|
||||
targetCharValueEscape['\''] = "\\'";
|
||||
targetCharValueEscape['"'] = "\\\"";
|
||||
}
|
||||
|
||||
protected void genRecognizerFile(CodeGenerator generator,
|
||||
Grammar g,
|
||||
ST outputFileST)
|
||||
throws IOException
|
||||
{
|
||||
String fileName = generator.getRecognizerFileName();
|
||||
generator.write(outputFileST, fileName);
|
||||
}
|
||||
|
||||
protected void genRecognizerHeaderFile(CodeGenerator generator,
|
||||
Grammar g,
|
||||
ST headerFileST,
|
||||
String extName) // e.g., ".h"
|
||||
throws IOException
|
||||
{
|
||||
// no header file by default
|
||||
}
|
||||
|
||||
/** Get a meaningful name for a token type useful during code generation.
|
||||
* Literals without associated names are converted to the string equivalent
|
||||
* of their integer values. Used to generate x==ID and x==34 type comparisons
|
||||
* etc... Essentially we are looking for the most obvious way to refer
|
||||
* to a token type in the generated code. If in the lexer, return the
|
||||
* char literal translated to the target language. For example, ttype=10
|
||||
* will yield '\n' from the getTokenDisplayName method. That must
|
||||
* be converted to the target languages literals. For most C-derived
|
||||
* languages no translation is needed.
|
||||
*/
|
||||
public String getTokenTypeAsTargetLabel(Grammar g, int ttype) {
|
||||
if ( g.getType() == ANTLRParser.LEXER ) {
|
||||
// String name = g.getTokenDisplayName(ttype);
|
||||
// return getTargetCharLiteralFromANTLRCharLiteral(this,name);
|
||||
}
|
||||
String name = g.getTokenDisplayName(ttype);
|
||||
if ( name==null ) {
|
||||
System.out.println("null token?");
|
||||
}
|
||||
// If name is a literal, return the token type instead
|
||||
if ( name.charAt(0)=='\'' ) {
|
||||
return String.valueOf(ttype);
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
public String[] getTokenTypesAsTargetLabels(Grammar g, int[] ttypes) {
|
||||
String[] labels = new String[ttypes.length];
|
||||
for (int i=0; i<ttypes.length; i++) {
|
||||
labels[i] = getTokenTypeAsTargetLabel(g, ttypes[i]);
|
||||
}
|
||||
return labels;
|
||||
}
|
||||
|
||||
/** Convert from an ANTLR char literal found in a grammar file to
|
||||
* an equivalent char literal in the target language. For most
|
||||
* languages, this means leaving 'x' as 'x'. Actually, we need
|
||||
* to escape '\u000A' so that it doesn't get converted to \n by
|
||||
* the compiler. Convert the literal to the char value and then
|
||||
* to an appropriate target char literal.
|
||||
*
|
||||
* Expect single quotes around the incoming literal.
|
||||
* TODO: unused and should call CharSupport.getANTLRCharLiteralForChar anyway
|
||||
*/
|
||||
public String getTargetCharLiteralCharValue(int c) {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append('\'');
|
||||
if ( c< Grammar.MIN_CHAR_VALUE ) return "'\u0000'";
|
||||
if ( c<targetCharValueEscape.length &&
|
||||
targetCharValueEscape[c]!=null )
|
||||
{
|
||||
buf.append(targetCharValueEscape[c]);
|
||||
}
|
||||
else if ( Character.UnicodeBlock.of((char)c)==
|
||||
Character.UnicodeBlock.BASIC_LATIN &&
|
||||
!Character.isISOControl((char)c) )
|
||||
{
|
||||
// normal char
|
||||
buf.append((char)c);
|
||||
}
|
||||
else {
|
||||
// must be something unprintable...use \\uXXXX
|
||||
// turn on the bit above max "\\uFFFF" value so that we pad with zeros
|
||||
// then only take last 4 digits
|
||||
String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
|
||||
buf.append("\\u");
|
||||
buf.append(hex);
|
||||
}
|
||||
|
||||
buf.append('\'');
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/** Convert long to 0xNNNNNNNNNNNNNNNN by default for spitting out
|
||||
* with bitsets. I.e., convert bytes to hex string.
|
||||
*/
|
||||
public String getTarget64BitStringFromValue(long word) {
|
||||
int numHexDigits = 8*2;
|
||||
StringBuffer buf = new StringBuffer(numHexDigits+2);
|
||||
buf.append("0x");
|
||||
String digits = Long.toHexString(word);
|
||||
digits = digits.toUpperCase();
|
||||
int padding = numHexDigits - digits.length();
|
||||
// pad left with zeros
|
||||
for (int i=1; i<=padding; i++) {
|
||||
buf.append('0');
|
||||
}
|
||||
buf.append(digits);
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/** Assume 16-bit char */
|
||||
public String encodeIntAsCharEscape(int v) {
|
||||
if ( v>=0 && v<=127 ) {
|
||||
String oct = Integer.toOctalString(v);
|
||||
if ( oct.length()<3 ) oct = '0'+oct;
|
||||
return "\\"+ oct;
|
||||
}
|
||||
String hex = Integer.toHexString(v|0x10000).substring(1,5);
|
||||
return "\\u"+hex;
|
||||
}
|
||||
|
||||
public String getLoopLabel(GrammarAST ast) {
|
||||
return "loop"+ ast.token.getTokenIndex();
|
||||
}
|
||||
|
||||
public String getLoopCounter(GrammarAST ast) {
|
||||
return "cnt"+ ast.token.getTokenIndex();
|
||||
}
|
||||
|
||||
public String getListLabel(String label) { return label+"_list"; }
|
||||
public String getRuleFunctionContextStructName(Rule r) {
|
||||
if ( r.args==null && r.retvals==null ) return "ParserRuleContext";
|
||||
return r.name+"_ctx";
|
||||
}
|
||||
public String getRuleDynamicScopeStructName(String ruleName) { return ruleName+"_scope"; }
|
||||
public String getGlobalDynamicScopeStructName(String scopeName) { return scopeName; }
|
||||
|
||||
// should be same for all refs to same token like $ID within single rule function
|
||||
public String getImplicitTokenLabel(String tokenName) { return "_t"+tokenName; }
|
||||
public String getImplicitRuleLabel(String ruleName) { return "_r"+ruleName; }
|
||||
|
||||
public int getInlineTestsVsBitsetThreshold() { return 20; }
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
|
||||
/** */
|
||||
public class AttributeDecl extends Decl {
|
||||
public AttributeDecl(OutputModelFactory factory, String name, String decl) {
|
||||
super(factory, name, decl);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.misc.LABitSet;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/** */
|
||||
public class BitSetDecl extends Decl {
|
||||
public LABitSet fset; // runtime bitset
|
||||
public List<String> hexWords;
|
||||
public BitSetDecl(OutputModelFactory factory, String name, IntervalSet fset) {
|
||||
super(factory, name);
|
||||
this.fset = fset.toRuntimeBitSet();
|
||||
long[] words = this.fset.bits;
|
||||
|
||||
hexWords = new ArrayList<String>();
|
||||
for (long w : words) {
|
||||
String h = factory.gen.target.getTarget64BitStringFromValue(w);
|
||||
hexWords.add(h);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.tool.Attribute;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
/** */
|
||||
public class DynamicScopeStruct extends StructDecl {
|
||||
public DynamicScopeStruct(OutputModelFactory factory, String name, Collection<Attribute> attrList) {
|
||||
super(factory, name, attrList);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.*;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.runtime.atn.RuleTransition;
|
||||
import org.antlr.v4.tool.*;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/** */
|
||||
public class InvokeRule extends RuleElement implements LabeledOp {
|
||||
public String name;
|
||||
public List<String> labels = new ArrayList<String>();
|
||||
public String argExprs;
|
||||
public BitSetDecl follow;
|
||||
public String ctxName;
|
||||
|
||||
public InvokeRule(OutputModelFactory factory, GrammarAST ast, GrammarAST labelAST) {
|
||||
super(factory, ast);
|
||||
if ( ast.atnState!=null ) {
|
||||
RuleTransition ruleTrans = (RuleTransition)ast.atnState.transition(0);
|
||||
stateNumber = ast.atnState.stateNumber;
|
||||
}
|
||||
|
||||
this.name = ast.getText();
|
||||
Rule r = factory.g.getRule(name);
|
||||
ctxName = factory.gen.target.getRuleFunctionContextStructName(r);
|
||||
|
||||
if ( labelAST!=null ) {
|
||||
String label = labelAST.getText();
|
||||
labels.add(label);
|
||||
RuleContextDecl d = new RuleContextDecl(factory,label,ctxName);
|
||||
factory.currentRule.peek().addDecl(d);
|
||||
if ( labelAST.parent.getType() == ANTLRParser.PLUS_ASSIGN ) {
|
||||
// TokenListDecl l = new TokenListDecl(factory.getListLabel(label));
|
||||
// factory.currentRule.peek().addDecl(l);
|
||||
}
|
||||
}
|
||||
if ( ast.getChildCount()>0 ) {
|
||||
argExprs = ast.getChild(0).getText();
|
||||
}
|
||||
|
||||
// If action refs rule as rulename not label, we need to define implicit label
|
||||
if ( factory.currentAlt.ruleRefsInActions.containsKey(ast.getText()) ) {
|
||||
String label = factory.gen.target.getImplicitRuleLabel(ast.getText());
|
||||
labels.add(label);
|
||||
RuleContextDecl d = new RuleContextDecl(factory,label,ctxName);
|
||||
factory.currentRule.peek().addDecl(d);
|
||||
}
|
||||
|
||||
// LinearApproximator approx = new LinearApproximator(factory.g, ATN.INVALID_DECISION_NUMBER);
|
||||
// RuleTransition call = (RuleTransition)ast.atnState.transition(0);
|
||||
// IntervalSet fset = approx.FIRST(call.followState);
|
||||
// System.out.println("follow rule ref "+name+"="+fset);
|
||||
// follow = factory.createFollowBitSet(ast, fset);
|
||||
// factory.defineBitSet(follow);
|
||||
}
|
||||
|
||||
public List<String> getLabels() {
|
||||
return labels;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public abstract class LL1Choice extends Choice {
|
||||
/** Token names for each alt 0..n-1 */
|
||||
public List<String[]> altLook;
|
||||
public ThrowNoViableAlt error;
|
||||
|
||||
public LL1Choice(OutputModelFactory factory, GrammarAST blkAST,
|
||||
List<CodeBlock> alts)
|
||||
{
|
||||
super(factory, blkAST, alts);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** An optional block is just an alternative block where the last alternative
|
||||
* is epsilon. The analysis takes care of adding to the empty alternative.
|
||||
*
|
||||
* (A | B | C)?
|
||||
*/
|
||||
public class LL1OptionalBlock extends LL1AltBlock {
|
||||
public LL1OptionalBlock(OutputModelFactory factory, GrammarAST blkAST, List<CodeBlock> alts) {
|
||||
super(factory, blkAST, alts);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.atn.PlusBlockStartState;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** */
|
||||
public class LL1PlusBlock extends LL1Loop {
|
||||
/** Token names for each alt 0..n-1 */
|
||||
public List<String[]> altLook;
|
||||
|
||||
public Sync iterationSync;
|
||||
public String loopLabel;
|
||||
public String loopCounterVar;
|
||||
public String[] exitLook;
|
||||
public SrcOp loopExpr;
|
||||
public ThrowNoViableAlt error;
|
||||
|
||||
public LL1PlusBlock(OutputModelFactory factory, GrammarAST plusRoot, List<CodeBlock> alts) {
|
||||
super(factory, plusRoot, alts);
|
||||
|
||||
PlusBlockStartState blkStart = (PlusBlockStartState)plusRoot.atnState;
|
||||
// BlockStartState blkStart = (BlockStartState)plus.transition(0).target;
|
||||
|
||||
this.decision = blkStart.decision;
|
||||
/** Lookahead for each alt 1..n */
|
||||
// IntervalSet[] altLookSets = LinearApproximator.getLL1LookaheadSets(dfa);
|
||||
IntervalSet[] altLookSets = factory.g.decisionLOOK.get(decision);
|
||||
altLook = getAltLookaheadAsStringLists(altLookSets);
|
||||
IntervalSet all = new IntervalSet();
|
||||
for (IntervalSet s : altLookSets) all.addAll(s);
|
||||
|
||||
this.error = new ThrowNoViableAlt(factory, plusRoot, all);
|
||||
|
||||
loopExpr = addCodeForLoopLookaheadTempVar(all);
|
||||
|
||||
loopLabel = factory.gen.target.getLoopLabel(plusRoot);
|
||||
loopCounterVar = factory.gen.target.getLoopCounter(plusRoot);
|
||||
|
||||
IntervalSet exitLookSet = altLookSets[altLookSets.length-1];
|
||||
this.exitLook = factory.gen.target.getTokenTypesAsTargetLabels(factory.g,
|
||||
exitLookSet.toArray());
|
||||
|
||||
//IntervalSet iterationExpected = (IntervalSet)loopBackLook.or(exitLookSet);
|
||||
// this.sync = new Sync(factory, plusRoot, loopBackLook, decision, "enter");
|
||||
// this.iterationSync = new Sync(factory, plusRoot, iterationExpected, decision, "iter");
|
||||
// this.earlyExitError = new ThrowEarlyExitException(factory, plusRoot, null);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.atn.PlusBlockStartState;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** */
|
||||
public class LL1PlusBlockSingleAlt extends LL1Loop {
|
||||
public Sync iterationSync;
|
||||
|
||||
public LL1PlusBlockSingleAlt(OutputModelFactory factory, GrammarAST blkAST, List<CodeBlock> alts) {
|
||||
super(factory, blkAST, alts);
|
||||
|
||||
PlusBlockStartState plus = (PlusBlockStartState)blkAST.atnState;
|
||||
this.decision = plus.loopBackState.decision;
|
||||
IntervalSet[] altLookSets = factory.g.decisionLOOK.get(decision);
|
||||
IntervalSet exitLook = altLookSets[altLookSets.length-1];
|
||||
|
||||
IntervalSet loopBackLook = altLookSets[1];
|
||||
loopExpr = addCodeForLoopLookaheadTempVar(loopBackLook);
|
||||
|
||||
this.sync = new Sync(factory, blkAST, loopBackLook, decision, "enter");
|
||||
IntervalSet iterationExpected = (IntervalSet) loopBackLook.or(exitLook);
|
||||
iterationSync = new Sync(factory, blkAST, iterationExpected, decision, "iter");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
public class LexerFile extends OutputModelObject {
|
||||
public String fileName;
|
||||
public Lexer lexer;
|
||||
public Map<String, Action> namedActions;
|
||||
|
||||
public LexerFile(OutputModelFactory factory, String fileName) {
|
||||
super(factory);
|
||||
this.fileName = fileName;
|
||||
factory.file = this;
|
||||
namedActions = new HashMap<String, Action>();
|
||||
for (String name : factory.gen.g.namedActions.keySet()) {
|
||||
GrammarAST ast = factory.gen.g.namedActions.get(name);
|
||||
namedActions.put(name, new Action(factory, ast));
|
||||
}
|
||||
lexer = new Lexer(factory, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class Loop extends Choice {
|
||||
public int exitAlt;
|
||||
public Loop(OutputModelFactory factory,
|
||||
GrammarAST blkOrEbnfRootAST,
|
||||
List<CodeBlock> alts)
|
||||
{
|
||||
super(factory, blkOrEbnfRootAST, alts);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
/** */
|
||||
public abstract class OutputModelObject {
|
||||
public OutputModelFactory factory;
|
||||
public GrammarAST ast;
|
||||
|
||||
public OutputModelObject() {;}
|
||||
|
||||
public OutputModelObject(OutputModelFactory factory) { this.factory = factory; }
|
||||
|
||||
public OutputModelObject(OutputModelFactory factory, GrammarAST ast) {
|
||||
this.factory = factory;
|
||||
this.ast = ast;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class PlusBlock extends Loop {
|
||||
public ThrowNoViableAlt error;
|
||||
public PlusBlock(OutputModelFactory factory,
|
||||
GrammarAST ebnfRootAST,
|
||||
List<CodeBlock> alts)
|
||||
{
|
||||
super(factory, ebnfRootAST, alts);
|
||||
PlusLoopbackState loop = ((PlusBlockStartState)ebnfRootAST.atnState).loopBackState;
|
||||
this.error = new ThrowNoViableAlt(factory, ebnfRootAST, null);
|
||||
decision = loop.decision;
|
||||
exitAlt = alts.size()+1;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
public class SerializedATN extends OutputModelObject {
|
||||
// TODO: make this into a kind of decl or multiple?
|
||||
public List<String> serialized;
|
||||
public SerializedATN(OutputModelFactory factory, ATN atn) {
|
||||
super(factory);
|
||||
List<Integer> data = atn.getSerialized();
|
||||
serialized = new ArrayList<String>(data.size());
|
||||
for (int c : data) {
|
||||
String encoded = factory.gen.target.encodeIntAsCharEscape(c);
|
||||
serialized.add(encoded);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.misc.Utils;
|
||||
import org.antlr.v4.tool.*;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
/** */
|
||||
public class StartRuleFunction extends RuleFunction {
|
||||
public StartRuleFunction(OutputModelFactory factory, Rule r) {
|
||||
super(factory);
|
||||
this.name = r.name;
|
||||
if ( r.modifiers!=null && r.modifiers.size()>0 ) {
|
||||
this.modifiers = new ArrayList<String>();
|
||||
for (GrammarAST t : r.modifiers) modifiers.add(t.getText());
|
||||
}
|
||||
modifiers = Utils.nodesToStrings(r.modifiers);
|
||||
|
||||
ctxType = factory.gen.target.getRuleFunctionContextStructName(r);
|
||||
|
||||
if ( r.args!=null ) {
|
||||
args = r.args.attributes.values();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
|
||||
/** */
|
||||
public class TokenTypeDecl extends Decl {
|
||||
public TokenTypeDecl(OutputModelFactory factory, String name) {
|
||||
super(factory, name);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
import org.antlr.v4.codegen.model.OutputModelObject;
|
||||
|
||||
/** */
|
||||
public class ActionChunk extends OutputModelObject {
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** */
|
||||
public class DynScopeAttrRef_index extends DynScopeAttrRef {
|
||||
public List<ActionChunk> indexChunks;
|
||||
public DynScopeAttrRef_index(String scope, String attr, List<ActionChunk> indexChunks) {
|
||||
super(scope, attr);
|
||||
this.indexChunks = indexChunks;
|
||||
}
|
||||
|
||||
// @Override
|
||||
// public List<String> getChildren() {
|
||||
// final List<String> sup = super.getChildren();
|
||||
// return new ArrayList<String>() {{
|
||||
// if ( sup!=null ) addAll(sup);
|
||||
// add("indexChunks");
|
||||
// }};
|
||||
// }
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** */
|
||||
public class DynScopeAttrRef_negIndex extends DynScopeAttrRef_index {
|
||||
public DynScopeAttrRef_negIndex(String scope, String attr, List<ActionChunk> indexChunks) {
|
||||
super(scope, attr, indexChunks);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
/** */
|
||||
public class QRetValueRef extends RetValueRef {
|
||||
public String dict;
|
||||
public QRetValueRef(String dict, String name) {
|
||||
super(name);
|
||||
this.dict = dict;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** */
|
||||
public class SetDynScopeAttr_negIndex extends SetDynScopeAttr_index {
|
||||
public SetDynScopeAttr_negIndex(String scope, String attr, List<ActionChunk> indexChunks, List<ActionChunk> rhsChunks) {
|
||||
super(scope, attr, indexChunks, rhsChunks);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
/** */
|
||||
public class TokenPropertyRef_channel extends TokenPropertyRef {
|
||||
public TokenPropertyRef_channel(String label) {
|
||||
super(label);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
/** */
|
||||
public class TokenPropertyRef_index extends TokenPropertyRef {
|
||||
public TokenPropertyRef_index(String label) {
|
||||
super(label);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
/** */
|
||||
public class TokenPropertyRef_line extends TokenPropertyRef {
|
||||
public TokenPropertyRef_line(String label) {
|
||||
super(label);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
/** */
|
||||
public class TokenPropertyRef_pos extends TokenPropertyRef {
|
||||
public TokenPropertyRef_pos(String label) {
|
||||
super(label);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
package org.antlr.v4.codegen.model.actions;
|
||||
|
||||
/** */
|
||||
public class TokenRef extends ActionChunk {
|
||||
public String name;
|
||||
|
||||
public TokenRef(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package org.antlr.v4.codegen.model.ast;
|
||||
|
||||
import org.antlr.v4.codegen.model.SrcOp;
|
||||
|
||||
/** */
|
||||
public class AddLeaf extends SrcOp {
|
||||
public SrcOp opWithResultToAdd;
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.misc;
|
||||
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** A generic set of ints that has an efficient implementation, BitSet,
|
||||
* which is a compressed bitset and is useful for ints that
|
||||
* are small, for example less than 500 or so, and w/o many ranges. For
|
||||
* ranges with large values like unicode char sets, this is not very efficient.
|
||||
* Consider using IntervalSet. Not all methods in IntervalSet are implemented.
|
||||
*
|
||||
* @see org.antlr.misc.BitSet
|
||||
* @see org.antlr.misc.IntervalSet
|
||||
*/
|
||||
public interface IntSet {
|
||||
/** Add an element to the set */
|
||||
void add(int el);
|
||||
|
||||
/** Add all elements from incoming set to this set. Can limit
|
||||
* to set of its own type. Return "this" so we can chain calls.
|
||||
*/
|
||||
IntSet addAll(IntSet set);
|
||||
|
||||
/** Return the intersection of this set with the argument, creating
|
||||
* a new set.
|
||||
*/
|
||||
IntSet and(IntSet a);
|
||||
|
||||
IntSet complement(IntSet elements);
|
||||
|
||||
IntSet or(IntSet a);
|
||||
|
||||
IntSet subtract(IntSet a);
|
||||
|
||||
/** Return the size of this set (not the underlying implementation's
|
||||
* allocated memory size, for example).
|
||||
*/
|
||||
int size();
|
||||
|
||||
boolean isNil();
|
||||
|
||||
boolean equals(Object obj);
|
||||
|
||||
int getSingleElement();
|
||||
|
||||
boolean member(int el);
|
||||
|
||||
/** remove this element from this set */
|
||||
void remove(int el);
|
||||
|
||||
List toList();
|
||||
|
||||
String toString();
|
||||
|
||||
String toString(Grammar g);
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
package org.antlr.v4.misc;
|
||||
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.*;
|
||||
|
||||
/** */
|
||||
public class Utils {
|
||||
public static final int INTEGER_POOL_MAX_VALUE = 1000;
|
||||
static Integer[] ints = new Integer[INTEGER_POOL_MAX_VALUE+1];
|
||||
|
||||
/** Integer objects are immutable so share all Integers with the
|
||||
* same value up to some max size. Use an array as a perfect hash.
|
||||
* Return shared object for 0..INTEGER_POOL_MAX_VALUE or a new
|
||||
* Integer object with x in it. Java's autoboxing only caches up to 127.
|
||||
*/
|
||||
public static Integer integer(int x) {
|
||||
if ( x<0 || x>INTEGER_POOL_MAX_VALUE ) {
|
||||
return new Integer(x);
|
||||
}
|
||||
if ( ints[x]==null ) {
|
||||
ints[x] = new Integer(x);
|
||||
}
|
||||
return ints[x];
|
||||
}
|
||||
|
||||
public static String stripFileExtension(String name) {
|
||||
if ( name==null ) return null;
|
||||
int lastDot = name.lastIndexOf('.');
|
||||
if ( lastDot<0 ) return name;
|
||||
return name.substring(0, lastDot);
|
||||
}
|
||||
|
||||
// Seriously: why isn't this built in to java? ugh!
|
||||
public static String join(Iterator iter, String separator) {
|
||||
StringBuilder buf = new StringBuilder();
|
||||
while ( iter.hasNext() ) {
|
||||
buf.append(iter.next());
|
||||
if ( iter.hasNext() ) {
|
||||
buf.append(separator);
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
public static String join(Object[] a, String separator) {
|
||||
StringBuilder buf = new StringBuilder();
|
||||
for (int i=0; i<a.length; i++) {
|
||||
Object o = a[i];
|
||||
buf.append(o.toString());
|
||||
if ( (i+1)<a.length ) {
|
||||
buf.append(separator);
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/** Given a source string, src,
|
||||
a string to replace, replacee,
|
||||
and a string to replace with, replacer,
|
||||
return a new string w/ the replacing done.
|
||||
You can use replacer==null to remove replacee from the string.
|
||||
|
||||
This should be faster than Java's String.replaceAll as that one
|
||||
uses regex (I only want to play with strings anyway).
|
||||
*/
|
||||
public static String replace(String src, String replacee, String replacer) {
|
||||
StringBuffer result = new StringBuffer(src.length() + 50);
|
||||
int startIndex = 0;
|
||||
int endIndex = src.indexOf(replacee);
|
||||
while(endIndex != -1) {
|
||||
result.append(src.substring(startIndex,endIndex));
|
||||
if ( replacer!=null ) {
|
||||
result.append(replacer);
|
||||
}
|
||||
startIndex = endIndex + replacee.length();
|
||||
endIndex = src.indexOf(replacee,startIndex);
|
||||
}
|
||||
result.append(src.substring(startIndex,src.length()));
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
public static String sortLinesInString(String s) {
|
||||
String lines[] = s.split("\n");
|
||||
Arrays.sort(lines);
|
||||
List<String> linesL = Arrays.asList(lines);
|
||||
StringBuffer buf = new StringBuffer();
|
||||
for (String l : linesL) {
|
||||
buf.append(l);
|
||||
buf.append('\n');
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
public static <T extends GrammarAST> List<String> nodesToStrings(List<T> nodes) {
|
||||
if ( nodes == null ) return null;
|
||||
List<String> a = new ArrayList<String>();
|
||||
for (T t : nodes) a.add(t.getText());
|
||||
return a;
|
||||
}
|
||||
|
||||
// public static <T> List<T> list(T... values) {
|
||||
// List<T> x = new ArrayList<T>(values.length);
|
||||
// for (T v : values) {
|
||||
// if ( v!=null ) x.add(v);
|
||||
// }
|
||||
// return x;
|
||||
// }
|
||||
|
||||
public static List list(Object... values) {
|
||||
List x = new ArrayList(values.length);
|
||||
for (Object v : values) {
|
||||
if ( v!=null ) x.add(v);
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
public static int[] toIntArray(List<Integer> list) {
|
||||
if ( list==null ) return null;
|
||||
int[] a = new int[list.size()];
|
||||
for (int i=0; i<list.size(); i++) a[i] = list.get(i);
|
||||
return a;
|
||||
}
|
||||
|
||||
public static char[] toCharArray(List<Integer> data) {
|
||||
if ( data==null ) return null;
|
||||
char[] cdata = new char[data.size()];
|
||||
for (int i=0; i<data.size(); i++) {
|
||||
cdata[i] = (char)(int)data.get(i);
|
||||
}
|
||||
return cdata;
|
||||
}
|
||||
|
||||
/** apply methodName to list and return list of results. method has
|
||||
* no args. This pulls data out of a list essentially.
|
||||
*/
|
||||
public static <From,To> List<To> apply(List<From> list, String methodName) {
|
||||
if ( list==null ) return null;
|
||||
List<To> b = new ArrayList<To>();
|
||||
for (From f : list) {
|
||||
try {
|
||||
Method m = f.getClass().getMethod(methodName, (Class[])null);
|
||||
b.add( (To)m.invoke(f, (Object[])null) );
|
||||
}
|
||||
catch (Exception e) {
|
||||
e.printStackTrace(System.err);
|
||||
}
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,217 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime.atn;
|
||||
|
||||
/** A tree node for tracking the call chains for ATNs that invoke
|
||||
* other ATNs. These trees only have to point upwards to their parents
|
||||
* so we can walk back up the tree (i.e., pop stuff off the stack). We
|
||||
* never walk from stack down down through the children.
|
||||
*
|
||||
* Each alt predicted in a decision has its own context tree,
|
||||
* representing all possible return nodes. The initial stack has
|
||||
* EOF ("$") in it. So, for m alternative productions, the lookahead
|
||||
* DFA will have m ATNContext trees.
|
||||
*
|
||||
* To "push" a new context, just do "new ATNContext(context-parent, state)"
|
||||
* which will add itself to the parent. The root is ATNContext(null, null).
|
||||
*
|
||||
* The complete context for an ATN configuration is the set of invoking states
|
||||
* on the path from this node thru the parent pointers to the root.
|
||||
*/
|
||||
public class ATNContext {
|
||||
public ATNContext parent;
|
||||
|
||||
/** The ATN state following state that invoked another rule's start state
|
||||
* is recorded on the rule invocation context stack.
|
||||
*/
|
||||
public ATNState returnState;
|
||||
|
||||
/** Computing the hashCode is very expensive and closureBusy()
|
||||
* uses it to track when it's seen a state|ctx before to avoid
|
||||
* infinite loops. As we add new contexts, record the hash code
|
||||
* as this.invokingState + parent.cachedHashCode. Avoids walking
|
||||
* up the tree for every hashCode(). Note that this caching works
|
||||
* because a context is a monotonically growing tree of context nodes
|
||||
* and nothing on the stack is ever modified...ctx just grows
|
||||
* or shrinks.
|
||||
*/
|
||||
protected int cachedHashCode;
|
||||
|
||||
public ATNContext(ATNContext parent, ATNState returnState) {
|
||||
this.parent = parent;
|
||||
this.returnState = returnState;
|
||||
if ( returnState !=null ) {
|
||||
this.cachedHashCode = returnState.stateNumber;
|
||||
}
|
||||
if ( parent!=null ) {
|
||||
this.cachedHashCode += parent.cachedHashCode;
|
||||
}
|
||||
}
|
||||
|
||||
/** Dup context so we can turn on approximated or whatever */
|
||||
public ATNContext(ATNContext proto) {
|
||||
this.parent = proto.parent;
|
||||
this.returnState = proto.returnState;
|
||||
this.cachedHashCode = proto.cachedHashCode;
|
||||
// this.approximated = proto.approximated;
|
||||
}
|
||||
|
||||
public static ATNContext EMPTY() { return new ATNContext(null, null); }
|
||||
|
||||
|
||||
/** Is s anywhere in the context? */
|
||||
public boolean contains(ATNState s) {
|
||||
ATNContext sp = this;
|
||||
while ( sp!=null ) {
|
||||
if ( sp.returnState == s ) return true;
|
||||
sp = sp.parent;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Two contexts are equals() if both have
|
||||
* same call stack; walk upwards to the root.
|
||||
* Recall that the root sentinel node has no parent.
|
||||
* Note that you may be comparing contextsv in different alt trees.
|
||||
*/
|
||||
public boolean equals(Object o) {
|
||||
ATNContext other = ((ATNContext)o);
|
||||
if ( this.cachedHashCode != other.cachedHashCode ) {
|
||||
return false; // can't be same if hash is different
|
||||
}
|
||||
if ( this==other ) return true;
|
||||
|
||||
// System.out.println("comparing "+this+" with "+other);
|
||||
ATNContext sp = this;
|
||||
while ( sp.parent!=null && other.parent!=null ) {
|
||||
if ( sp.returnState != other.returnState) return false;
|
||||
sp = sp.parent;
|
||||
other = other.parent;
|
||||
}
|
||||
if ( !(sp.parent==null && other.parent==null) ) {
|
||||
return false; // both pointers must be at their roots after walk
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** [$] suffix any context
|
||||
* [21 $] suffix [21 12 $]
|
||||
* [21 12 $] suffix [21 $]
|
||||
* [21 18 $] suffix [21 18 12 9 $]
|
||||
* [21 18 12 9 $] suffix [21 18 $]
|
||||
* [21 12 $] not suffix [21 9 $]
|
||||
*
|
||||
* Example "[21 $] suffix [21 12 $]" means: rule r invoked current rule
|
||||
* from state 21. Rule s invoked rule r from state 12 which then invoked
|
||||
* current rule also via state 21. While the context prior to state 21
|
||||
* is different, the fact that both contexts emanate from state 21 implies
|
||||
* that they are now going to track perfectly together. Once they
|
||||
* converged on state 21, there is no way they can separate. In other
|
||||
* words, the prior stack state is not consulted when computing where to
|
||||
* go in the closure operation. beta $ and beta alpha $ are considered the same stack.
|
||||
* If beta is popped off then $ and alpha$ remain; there is now an empty and
|
||||
* nonempty context comparison. So, if one stack is a suffix of
|
||||
* another, then it will still degenerate to the simple empty / nonempty stack
|
||||
* comparison case.
|
||||
*/
|
||||
protected boolean suffix(ATNContext other) {
|
||||
ATNContext sp = this;
|
||||
// if one of the contexts is empty, it never enters loop and returns true
|
||||
while ( sp.parent!=null && other.parent!=null ) {
|
||||
if ( sp.returnState != other.returnState) {
|
||||
return false;
|
||||
}
|
||||
sp = sp.parent;
|
||||
other = other.parent;
|
||||
}
|
||||
//System.out.println("suffix");
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Given an ATN state number, how many times does it appear on stack?
|
||||
* The ATN-to-DFA conversion pushes "return" states as it does
|
||||
* rule invocations. The ATN state number must be a rule return state
|
||||
* (following state from invocation state).
|
||||
*/
|
||||
public int occurrences(int state) {
|
||||
ATNContext sp = this;
|
||||
int n = 0; // track recursive invocations of target from this state
|
||||
//System.out.println("this.context is "+sp);
|
||||
while ( sp.parent!=null ) {
|
||||
if ( sp.returnState.stateNumber == state ) {
|
||||
n++;
|
||||
}
|
||||
sp = sp.parent;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return cachedHashCode; // works with tests; don't recompute.
|
||||
// int h = 0;
|
||||
// ATNContext sp = this;
|
||||
// while ( sp.parent!=null ) {
|
||||
// h += sp.returnState.stateNumber;
|
||||
// sp = sp.parent;
|
||||
// }
|
||||
// return h;
|
||||
}
|
||||
|
||||
/** How many rule invocations in this context? I.e., how many
|
||||
* elements in stack (path to root, not including root placeholder)?
|
||||
*/
|
||||
public int depth() {
|
||||
int n = 0;
|
||||
ATNContext sp = this;
|
||||
while ( !sp.isEmpty() ) {
|
||||
n++;
|
||||
sp = sp.parent;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
/** A context is empty if there is no parent; meaning nobody pushed
|
||||
* anything on the call stack.
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
return parent==null;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
ATNContext sp = this;
|
||||
buf.append("[");
|
||||
while ( sp.parent!=null ) {
|
||||
buf.append(sp.returnState.stateNumber);
|
||||
buf.append(" ");
|
||||
sp = sp.parent;
|
||||
}
|
||||
buf.append("$]");
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package org.antlr.v4.runtime.atn;
|
||||
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.runtime.RuleContext;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
public class LL1Analyzer {
|
||||
/** Used during LOOK to detect computation cycles. E.g., ()* causes
|
||||
* infinite loop without it. If we get to same state would be infinite
|
||||
* loop.
|
||||
*/
|
||||
|
||||
public ATN atn;
|
||||
|
||||
public LL1Analyzer(ATN atn) { this.atn = atn; }
|
||||
|
||||
/** From an ATN state, s, find the set of all labels reachable from s at
|
||||
* depth k. Only for DecisionStates.
|
||||
*/
|
||||
public IntervalSet[] getDecisionLookahead(ATNState s) {
|
||||
// System.out.println("LOOK("+s.stateNumber+")");
|
||||
if ( s==null ) return null;
|
||||
IntervalSet[] look = new IntervalSet[s.getNumberOfTransitions()+1];
|
||||
Set<ATNConfig> lookBusy = new HashSet<ATNConfig>();
|
||||
for (int alt=1; alt<=s.getNumberOfTransitions(); alt++) {
|
||||
look[alt] = new IntervalSet();
|
||||
lookBusy.clear();
|
||||
_LOOK(s.transition(alt - 1).target, RuleContext.EMPTY, look[alt], lookBusy);
|
||||
}
|
||||
return look;
|
||||
}
|
||||
|
||||
public IntervalSet LOOK(ATNState s, RuleContext ctx) {
|
||||
IntervalSet r = new IntervalSet();
|
||||
_LOOK(s, ctx, r, new HashSet<ATNConfig>());
|
||||
return r;
|
||||
}
|
||||
|
||||
protected void _LOOK(ATNState s, RuleContext ctx, IntervalSet look,
|
||||
Set<ATNConfig> lookBusy) {
|
||||
// System.out.println("_LOOK("+s.stateNumber+", ctx="+ctx);
|
||||
ATNConfig c = new ATNConfig(s, 0, ctx);
|
||||
if ( lookBusy.contains(c) ) return;
|
||||
lookBusy.add(c);
|
||||
|
||||
if ( s instanceof RuleStopState && ctx != null && ctx.invokingState!=-1 ) {
|
||||
ATNState invokingState = atn.states.get(ctx.invokingState);
|
||||
RuleTransition rt = (RuleTransition)invokingState.transition(0);
|
||||
ATNState retState = rt.followState;
|
||||
// System.out.println("popping back to "+retState);
|
||||
_LOOK(retState, ctx.parent, look, lookBusy);
|
||||
return;
|
||||
}
|
||||
|
||||
int n = s.getNumberOfTransitions();
|
||||
for (int i=0; i<n; i++) {
|
||||
Transition t = s.transition(i);
|
||||
if ( t instanceof RuleTransition ) {
|
||||
RuleContext newContext =
|
||||
new RuleContext(ctx, s.stateNumber, t.target.stateNumber);
|
||||
_LOOK(t.target, newContext, look, lookBusy);
|
||||
}
|
||||
else if ( t.isEpsilon() ) {
|
||||
_LOOK(t.target, ctx, look, lookBusy);
|
||||
}
|
||||
else {
|
||||
// System.out.println("adding "+ t);
|
||||
look.addAll(t.label());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,361 @@
|
|||
package org.antlr.v4.tool;
|
||||
|
||||
import org.antlr.v4.misc.Utils;
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.runtime.dfa.*;
|
||||
import org.stringtemplate.v4.*;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/** The DOT (part of graphviz) generation aspect. */
|
||||
public class DOTGenerator {
|
||||
public static final boolean STRIP_NONREDUCED_STATES = false;
|
||||
|
||||
protected String arrowhead="normal";
|
||||
protected String rankdir="LR";
|
||||
|
||||
/** Library of output templates; use <attrname> format */
|
||||
public static STGroup stlib = new STGroupDir("org/antlr/v4/tool/templates/dot");
|
||||
|
||||
protected Grammar grammar;
|
||||
|
||||
/** This aspect is associated with a grammar */
|
||||
public DOTGenerator(Grammar grammar) {
|
||||
this.grammar = grammar;
|
||||
}
|
||||
|
||||
public String getDOT(DFA dfa,
|
||||
boolean isLexer)
|
||||
{
|
||||
if ( dfa.s0==null ) return null;
|
||||
|
||||
ST dot = stlib.getInstanceOf("dfa");
|
||||
dot.add("name", "DFA"+dfa.decision);
|
||||
dot.add("startState", dfa.s0.stateNumber);
|
||||
// dot.add("useBox", Tool.internalOption_ShowATNConfigsInDFA);
|
||||
dot.add("rankdir", rankdir);
|
||||
|
||||
// define stop states first; seems to be a bug in DOT where doublecircle
|
||||
for (DFAState d : dfa.states.keySet()) {
|
||||
if ( !d.isAcceptState ) continue;
|
||||
ST st = stlib.getInstanceOf("stopstate");
|
||||
st.add("name", "s"+d.stateNumber);
|
||||
st.add("label", getStateLabel(d));
|
||||
dot.add("states", st);
|
||||
}
|
||||
|
||||
for (DFAState d : dfa.states.keySet()) {
|
||||
if ( d.isAcceptState ) continue;
|
||||
if ( d.stateNumber == Integer.MAX_VALUE ) continue;
|
||||
ST st = stlib.getInstanceOf("state");
|
||||
st.add("name", "s"+d.stateNumber);
|
||||
st.add("label", getStateLabel(d));
|
||||
dot.add("states", st);
|
||||
}
|
||||
|
||||
for (DFAState d : dfa.states.keySet()) {
|
||||
if ( d.edges!=null ) {
|
||||
for (int i = 0; i < d.edges.length; i++) {
|
||||
DFAState target = d.edges[i];
|
||||
if ( target==null) continue;
|
||||
if ( target.stateNumber == Integer.MAX_VALUE ) continue;
|
||||
int ttype = i-1; // we shift up for EOF as -1 for parser
|
||||
String label = String.valueOf(ttype);
|
||||
if ( isLexer ) label = "'"+getEdgeLabel(String.valueOf((char) i))+"'";
|
||||
else if ( grammar!=null ) label = grammar.getTokenDisplayName(ttype);
|
||||
ST st = stlib.getInstanceOf("edge");
|
||||
st.add("label", label);
|
||||
st.add("src", "s"+d.stateNumber);
|
||||
st.add("target", "s"+target.stateNumber);
|
||||
st.add("arrowhead", arrowhead);
|
||||
dot.add("edges", st);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
String output = dot.render();
|
||||
return Utils.sortLinesInString(output);
|
||||
}
|
||||
|
||||
protected String getStateLabel(DFAState s) {
|
||||
if ( s==null ) return "null";
|
||||
StringBuffer buf = new StringBuffer(250);
|
||||
buf.append('s');
|
||||
buf.append(s.stateNumber);
|
||||
if ( s.isAcceptState ) {
|
||||
buf.append("=>"+s.prediction);
|
||||
}
|
||||
// if ( Tool.internalOption_ShowATNConfigsInDFA ) {
|
||||
if ( false ) {
|
||||
Set<Integer> alts = ((DFAState)s).getAltSet();
|
||||
if ( alts!=null ) {
|
||||
buf.append("\\n");
|
||||
// separate alts
|
||||
List<Integer> altList = new ArrayList<Integer>();
|
||||
altList.addAll(alts);
|
||||
Collections.sort(altList);
|
||||
Set<ATNConfig> configurations = ((DFAState)s).configs;
|
||||
for (int altIndex = 0; altIndex < altList.size(); altIndex++) {
|
||||
Integer altI = (Integer) altList.get(altIndex);
|
||||
int alt = altI.intValue();
|
||||
if ( altIndex>0 ) {
|
||||
buf.append("\\n");
|
||||
}
|
||||
buf.append("alt");
|
||||
buf.append(alt);
|
||||
buf.append(':');
|
||||
// get a list of configs for just this alt
|
||||
// it will help us print better later
|
||||
List<ATNConfig> configsInAlt = new ArrayList<ATNConfig>();
|
||||
for (Iterator it = configurations.iterator(); it.hasNext();) {
|
||||
ATNConfig c = (ATNConfig) it.next();
|
||||
if ( c.alt!=alt ) continue;
|
||||
configsInAlt.add(c);
|
||||
}
|
||||
int n = 0;
|
||||
for (int cIndex = 0; cIndex < configsInAlt.size(); cIndex++) {
|
||||
ATNConfig c =
|
||||
(ATNConfig)configsInAlt.get(cIndex);
|
||||
n++;
|
||||
buf.append(c.toString(null, false));
|
||||
if ( (cIndex+1)<configsInAlt.size() ) {
|
||||
buf.append(", ");
|
||||
}
|
||||
if ( n%5==0 && (configsInAlt.size()-cIndex)>3 ) {
|
||||
buf.append("\\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
String stateLabel = buf.toString();
|
||||
return stateLabel;
|
||||
}
|
||||
|
||||
public String getDOT(ATNState startState) {
|
||||
Set<String> ruleNames = grammar.rules.keySet();
|
||||
String[] names = new String[ruleNames.size()+1];
|
||||
int i = 1;
|
||||
for (String s : ruleNames) names[i++] = s;
|
||||
return getDOT(startState, names);
|
||||
}
|
||||
|
||||
/** Return a String containing a DOT description that, when displayed,
|
||||
* will show the incoming state machine visually. All nodes reachable
|
||||
* from startState will be included.
|
||||
*/
|
||||
public String getDOT(ATNState startState, String[] ruleNames) {
|
||||
if ( startState==null ) return null;
|
||||
|
||||
// The output DOT graph for visualization
|
||||
ST dot = null;
|
||||
Set<ATNState> markedStates = new HashSet<ATNState>();
|
||||
dot = stlib.getInstanceOf("atn");
|
||||
dot.add("startState", Utils.integer(startState.stateNumber));
|
||||
dot.add("rankdir", rankdir);
|
||||
|
||||
List<ATNState> work = new LinkedList<ATNState>();
|
||||
|
||||
work.add(startState);
|
||||
while ( work.size()>0 ) {
|
||||
ATNState s = work.get(0);
|
||||
if ( markedStates.contains(s) ) { work.remove(0); continue; }
|
||||
markedStates.add(s);
|
||||
|
||||
// don't go past end of rule node to the follow states
|
||||
if ( s instanceof RuleStopState) continue;
|
||||
|
||||
// special case: if decision point, then line up the alt start states
|
||||
// unless it's an end of block
|
||||
if ( s instanceof BlockStartState ) {
|
||||
ST rankST = stlib.getInstanceOf("decision-rank");
|
||||
DecisionState alt = (DecisionState)s;
|
||||
for (int i=0; i<alt.getNumberOfTransitions(); i++) {
|
||||
ATNState target = alt.transition(i).target;
|
||||
if ( target!=null ) {
|
||||
rankST.add("states", target.stateNumber);
|
||||
}
|
||||
}
|
||||
dot.add("decisionRanks", rankST);
|
||||
}
|
||||
|
||||
// make a DOT edge for each transition
|
||||
ST edgeST = null;
|
||||
for (int i = 0; i < s.getNumberOfTransitions(); i++) {
|
||||
Transition edge = s.transition(i);
|
||||
if ( edge instanceof RuleTransition ) {
|
||||
RuleTransition rr = ((RuleTransition)edge);
|
||||
// don't jump to other rules, but display edge to follow node
|
||||
edgeST = stlib.getInstanceOf("edge");
|
||||
edgeST.add("label", "<"+ruleNames[rr.ruleIndex]+">");
|
||||
edgeST.add("src", "s"+s.stateNumber);
|
||||
edgeST.add("target", "s"+rr.followState.stateNumber);
|
||||
edgeST.add("arrowhead", arrowhead);
|
||||
dot.add("edges", edgeST);
|
||||
work.add(rr.followState);
|
||||
continue;
|
||||
}
|
||||
if ( edge instanceof ActionTransition) {
|
||||
edgeST = stlib.getInstanceOf("action-edge");
|
||||
}
|
||||
else if ( edge instanceof PredicateTransition ) {
|
||||
edgeST = stlib.getInstanceOf("edge");
|
||||
}
|
||||
else if ( edge.isEpsilon() ) {
|
||||
edgeST = stlib.getInstanceOf("epsilon-edge");
|
||||
}
|
||||
else {
|
||||
edgeST = stlib.getInstanceOf("edge");
|
||||
}
|
||||
edgeST.add("label", getEdgeLabel(edge.toString(grammar)));
|
||||
edgeST.add("src", "s"+s.stateNumber);
|
||||
edgeST.add("target", "s"+edge.target.stateNumber);
|
||||
edgeST.add("arrowhead", arrowhead);
|
||||
dot.add("edges", edgeST);
|
||||
work.add(edge.target);
|
||||
}
|
||||
}
|
||||
|
||||
// define nodes we visited (they will appear first in DOT output)
|
||||
// this is an example of ST's lazy eval :)
|
||||
// define stop state first; seems to be a bug in DOT where doublecircle
|
||||
// shape only works if we define them first. weird.
|
||||
// ATNState stopState = startState.atn.ruleToStopState.get(startState.rule);
|
||||
// if ( stopState!=null ) {
|
||||
// ST st = stlib.getInstanceOf("stopstate");
|
||||
// st.add("name", "s"+stopState.stateNumber);
|
||||
// st.add("label", getStateLabel(stopState));
|
||||
// dot.add("states", st);
|
||||
// }
|
||||
for (ATNState s : markedStates) {
|
||||
if ( !(s instanceof RuleStopState) ) continue;
|
||||
ST st = stlib.getInstanceOf("stopstate");
|
||||
st.add("name", "s"+s.stateNumber);
|
||||
st.add("label", getStateLabel(s));
|
||||
dot.add("states", st);
|
||||
}
|
||||
for (ATNState s : markedStates) {
|
||||
if ( s instanceof RuleStopState ) continue;
|
||||
ST st = stlib.getInstanceOf("state");
|
||||
st.add("name", "s"+s.stateNumber);
|
||||
st.add("label", getStateLabel(s));
|
||||
dot.add("states", st);
|
||||
}
|
||||
|
||||
return dot.render();
|
||||
}
|
||||
|
||||
|
||||
/** Do a depth-first walk of the state machine graph and
|
||||
* fill a DOT description template. Keep filling the
|
||||
* states and edges attributes. We know this is an ATN
|
||||
* for a rule so don't traverse edges to other rules and
|
||||
* don't go past rule end state.
|
||||
*/
|
||||
// protected void walkRuleATNCreatingDOT(ST dot,
|
||||
// ATNState s)
|
||||
// {
|
||||
// if ( markedStates.contains(s) ) {
|
||||
// return; // already visited this node
|
||||
// }
|
||||
//
|
||||
// markedStates.add(s.stateNumber); // mark this node as completed.
|
||||
//
|
||||
// // first add this node
|
||||
// ST stateST;
|
||||
// if ( s instanceof RuleStopState ) {
|
||||
// stateST = stlib.getInstanceOf("stopstate");
|
||||
// }
|
||||
// else {
|
||||
// stateST = stlib.getInstanceOf("state");
|
||||
// }
|
||||
// stateST.add("name", getStateLabel(s));
|
||||
// dot.add("states", stateST);
|
||||
//
|
||||
// if ( s instanceof RuleStopState ) {
|
||||
// return; // don't go past end of rule node to the follow states
|
||||
// }
|
||||
//
|
||||
// // special case: if decision point, then line up the alt start states
|
||||
// // unless it's an end of block
|
||||
// if ( s instanceof DecisionState ) {
|
||||
// GrammarAST n = ((ATNState)s).ast;
|
||||
// if ( n!=null && s instanceof BlockEndState ) {
|
||||
// ST rankST = stlib.getInstanceOf("decision-rank");
|
||||
// ATNState alt = (ATNState)s;
|
||||
// while ( alt!=null ) {
|
||||
// rankST.add("states", getStateLabel(alt));
|
||||
// if ( alt.transition(1) !=null ) {
|
||||
// alt = (ATNState)alt.transition(1).target;
|
||||
// }
|
||||
// else {
|
||||
// alt=null;
|
||||
// }
|
||||
// }
|
||||
// dot.add("decisionRanks", rankST);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // make a DOT edge for each transition
|
||||
// ST edgeST = null;
|
||||
// for (int i = 0; i < s.getNumberOfTransitions(); i++) {
|
||||
// Transition edge = (Transition) s.transition(i);
|
||||
// if ( edge instanceof RuleTransition ) {
|
||||
// RuleTransition rr = ((RuleTransition)edge);
|
||||
// // don't jump to other rules, but display edge to follow node
|
||||
// edgeST = stlib.getInstanceOf("edge");
|
||||
// if ( rr.rule.g != grammar ) {
|
||||
// edgeST.add("label", "<"+rr.rule.g.name+"."+rr.rule.name+">");
|
||||
// }
|
||||
// else {
|
||||
// edgeST.add("label", "<"+rr.rule.name+">");
|
||||
// }
|
||||
// edgeST.add("src", getStateLabel(s));
|
||||
// edgeST.add("target", getStateLabel(rr.followState));
|
||||
// edgeST.add("arrowhead", arrowhead);
|
||||
// dot.add("edges", edgeST);
|
||||
// walkRuleATNCreatingDOT(dot, rr.followState);
|
||||
// continue;
|
||||
// }
|
||||
// if ( edge instanceof ActionTransition ) {
|
||||
// edgeST = stlib.getInstanceOf("action-edge");
|
||||
// }
|
||||
// else if ( edge instanceof PredicateTransition ) {
|
||||
// edgeST = stlib.getInstanceOf("edge");
|
||||
// }
|
||||
// else if ( edge.isEpsilon() ) {
|
||||
// edgeST = stlib.getInstanceOf("epsilon-edge");
|
||||
// }
|
||||
// else {
|
||||
// edgeST = stlib.getInstanceOf("edge");
|
||||
// }
|
||||
// edgeST.add("label", getEdgeLabel(edge.toString(grammar)));
|
||||
// edgeST.add("src", getStateLabel(s));
|
||||
// edgeST.add("target", getStateLabel(edge.target));
|
||||
// edgeST.add("arrowhead", arrowhead);
|
||||
// dot.add("edges", edgeST);
|
||||
// walkRuleATNCreatingDOT(dot, edge.target); // keep walkin'
|
||||
// }
|
||||
// }
|
||||
|
||||
/** Fix edge strings so they print out in DOT properly;
|
||||
* generate any gated predicates on edge too.
|
||||
*/
|
||||
protected String getEdgeLabel(String label) {
|
||||
label = Utils.replace(label,"\\", "\\\\");
|
||||
label = Utils.replace(label,"\"", "\\\"");
|
||||
label = Utils.replace(label,"\n", "\\\\n");
|
||||
label = Utils.replace(label,"\r", "");
|
||||
return label;
|
||||
}
|
||||
|
||||
protected String getStateLabel(ATNState s) {
|
||||
if ( s==null ) return "null";
|
||||
String stateLabel = String.valueOf(s.stateNumber);
|
||||
if ( s instanceof DecisionState ) {
|
||||
stateLabel = stateLabel+"\\nd="+((DecisionState)s).decision;
|
||||
}
|
||||
return stateLabel;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package org.antlr.v4.tool;
|
||||
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.runtime.TokenStream;
|
||||
import org.antlr.runtime.tree.CommonErrorNode;
|
||||
|
||||
/** A node representing erroneous token range in token stream */
|
||||
public class GrammarASTErrorNode extends GrammarAST {
|
||||
CommonErrorNode delegate;
|
||||
public GrammarASTErrorNode(TokenStream input, Token start, Token stop,
|
||||
org.antlr.runtime.RecognitionException e)
|
||||
{
|
||||
delegate = new CommonErrorNode(input,start,stop,e);
|
||||
}
|
||||
|
||||
public boolean isNil() { return delegate.isNil(); }
|
||||
|
||||
public int getType() { return delegate.getType(); }
|
||||
|
||||
public String getText() { return delegate.getText(); }
|
||||
public String toString() { return delegate.toString(); }
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package org.antlr.v4.tool;
|
||||
|
||||
import org.antlr.runtime.Token;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class GrammarASTWithOptions extends GrammarAST {
|
||||
protected Map<String, String> options;
|
||||
|
||||
public GrammarASTWithOptions(GrammarAST node) {
|
||||
super(node);
|
||||
this.options = ((GrammarASTWithOptions)node).options;
|
||||
}
|
||||
|
||||
public GrammarASTWithOptions(Token t) { super(t); }
|
||||
public GrammarASTWithOptions(int type) { super(type); }
|
||||
public GrammarASTWithOptions(int type, Token t) { super(type, t); }
|
||||
public GrammarASTWithOptions(int type, Token t, String text) { super(type,t,text); }
|
||||
|
||||
public void setOption(String key, String value) {
|
||||
if ( options==null ) options = new HashMap<String, String>();
|
||||
options.put(key, value);
|
||||
}
|
||||
|
||||
public String getOption(String key) {
|
||||
if ( options==null ) return null;
|
||||
return options.get(key);
|
||||
}
|
||||
|
||||
public Map<String, String> getOptions() { return options; }
|
||||
}
|
Loading…
Reference in New Issue