got basic tree parser working.

[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 9071]
This commit is contained in:
parrt 2011-09-22 17:16:22 -08:00
parent 0c1716d062
commit b63b271815
20 changed files with 286 additions and 144 deletions

View File

@ -28,10 +28,14 @@
*/
package org.antlr.v4.runtime;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.runtime.misc.*;
import org.antlr.v4.runtime.atn.ATNConfig;
import org.antlr.v4.runtime.atn.ParserATNSimulator;
import org.antlr.v4.runtime.misc.IntervalSet;
import org.antlr.v4.runtime.misc.OrderedHashSet;
import java.util.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
/** A generic recognizer that can handle recognizers generated from
* parser and tree grammars. This is all the parsing
@ -43,8 +47,6 @@ import java.util.*;
public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
public static final String NEXT_TOKEN_RULE_NAME = "nextToken";
protected TokenStream _input;
/** The RuleContext object for the currently executing rule. This
* must be non-null during parsing, but is initially null.
* When somebody calls the start rule, this gets set to the
@ -77,13 +79,13 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
/** Did the recognizer encounter a syntax error? Track how many. */
public int syntaxErrors = 0;
public BaseRecognizer(TokenStream input) {
this._input = input;
public BaseRecognizer(IntStream input) {
setInputStream(input);
}
/** reset the parser's state */
public void reset() {
if ( _input !=null ) _input.seek(0);
if ( getInputStream()!=null ) getInputStream().seek(0);
errorRecovery = false;
_ctx = null;
lastErrorIndex = -1;
@ -104,8 +106,8 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
public Object match(int ttype) throws RecognitionException {
// System.out.println("match "+((TokenStream)input).LT(1)+" vs expected "+ttype);
Object matchedSymbol = getCurrentInputSymbol();
if ( _input.LA(1)==ttype ) {
_input.consume();
if ( getInputStream().LA(1)==ttype ) {
getInputStream().consume();
errorRecovery = false;
// failed = false;
if ( buildParseTrees ) _ctx.addChild((Token)matchedSymbol);
@ -123,7 +125,7 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
// like matchSet but w/o consume; error checking routine.
public void sync(IntervalSet expecting) {
if ( expecting.member(_input.LA(1)) ) return;
if ( expecting.member(getInputStream().LA(1)) ) return;
// System.out.println("failed sync to "+expecting);
IntervalSet followSet = computeErrorRecoverySet();
followSet.addAll(expecting);
@ -135,11 +137,11 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
public void matchAny() {
errorRecovery = false;
// failed = false;
_input.consume();
getInputStream().consume();
}
public boolean mismatchIsUnwantedToken(int ttype) {
return _input.LA(2)==ttype;
return getInputStream().LA(2)==ttype;
}
public boolean mismatchIsMissingToken(IntervalSet follow) {
@ -258,7 +260,7 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
* token that the match() routine could not recover from.
*/
public void recover() {
_input.consume();
getInputStream().consume();
/*
if ( lastErrorIndex==input.index() ) {
// uh oh, another error at same token index; must be a case
@ -490,30 +492,30 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
RecognitionException e = null;
// if next token is what we are looking for then "delete" this token
if ( mismatchIsUnwantedToken(ttype) ) {
e = new UnwantedTokenException(this, _input, ttype);
e = new UnwantedTokenException(this, getInputStream(), ttype);
/*
System.err.println("recoverFromMismatchedToken deleting "+
((TokenStream)input).LT(1)+
" since "+((TokenStream)input).LT(2)+" is what we want");
*/
beginResync();
_input.consume(); // simply delete extra token
getInputStream().consume(); // simply delete extra token
endResync();
reportError(e); // report after consuming so AW sees the token in the exception
// we want to return the token we're actually matching
Object matchedSymbol = getCurrentInputSymbol();
_input.consume(); // move past ttype token as if all were ok
getInputStream().consume(); // move past ttype token as if all were ok
return matchedSymbol;
}
// can't recover with single token deletion, try insertion
if ( mismatchIsMissingToken(follow) ) {
Object inserted = getMissingSymbol(e, ttype, follow);
e = new MissingTokenException(this, _input, ttype, inserted);
e = new MissingTokenException(this, getInputStream(), ttype, inserted);
reportError(e); // report after inserting so AW sees the token in the exception
return inserted;
}
// even that didn't work; must throw the exception
e = new MismatchedTokenException(this, _input, ttype);
e = new MismatchedTokenException(this, getInputStream(), ttype);
throw e;
}
@ -531,6 +533,9 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
throw e;
}
public abstract IntStream getInputStream();
public abstract void setInputStream(IntStream input);
/** Match needs to return the current input symbol, which gets put
* into the label for the associated token ref; e.g., x=ID. Token
* and tree parsers need to return different objects. Rather than test
@ -540,6 +545,20 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
*/
protected Object getCurrentInputSymbol() { return null; }
public void enterOuterAlt(ParserRuleContext localctx, int altNum) {
_ctx = localctx;
_ctx.altNum = altNum;
if ( buildParseTrees ) {
if ( _ctx.parent!=null ) _ctx.parent.addChild(_ctx);
}
}
public void exitRule(int ruleIndex) {
_ctx = (ParserRuleContext)_ctx.parent;
}
/** Conjure up a missing token during error recovery.
*
* The recognizer attempts to recover from single missing
@ -568,21 +587,21 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
public void consumeUntil(int tokenType) {
//System.out.println("consumeUntil "+tokenType);
int ttype = _input.LA(1);
int ttype = getInputStream().LA(1);
while (ttype != Token.EOF && ttype != tokenType) {
_input.consume();
ttype = _input.LA(1);
getInputStream().consume();
ttype = getInputStream().LA(1);
}
}
/** Consume tokens until one matches the given token set */
public void consumeUntil(IntervalSet set) {
//System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
int ttype = _input.LA(1);
int ttype = getInputStream().LA(1);
while (ttype != Token.EOF && !set.member(ttype) ) {
//System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
_input.consume();
ttype = _input.LA(1);
getInputStream().consume();
ttype = getInputStream().LA(1);
}
}
@ -688,38 +707,11 @@ public abstract class BaseRecognizer extends Recognizer<ParserATNSimulator> {
* invoking rules. Combine this and we have complete ATN
* configuration information.
*/
public void move(int atnState) {
public void setState(int atnState) {
_ctx.s = atnState;
if ( traceATNStates ) _ctx.trace(atnState);
}
public void enterRule(int ruleIndex) { }
/** Always called by generated parsers upon entry to a rule.
* This occurs after the new context has been pushed. Access field
* _ctx get the current context.
*
* This is flexible because users do not have to regenerate parsers
* to get trace facilities.
*/
public void enterRule(ParserRuleContext localctx, int ruleIndex) {
_ctx = localctx;
_ctx.start = _input.LT(1);
_ctx.ruleIndex = ruleIndex;
}
public void enterOuterAlt(ParserRuleContext localctx, int altNum) {
_ctx = localctx;
_ctx.altNum = altNum;
if ( buildParseTrees ) {
if ( _ctx.parent!=null ) _ctx.parent.addChild(_ctx);
}
}
public void exitRule(int ruleIndex) {
_ctx = (ParserRuleContext)_ctx.parent;
}
/* In v3, programmers altered error messages by overriding
displayRecognitionError() and possibly getTokenErrorDisplay().
They overrode emitErrorMessage(String) to change where the output goes.

View File

@ -42,7 +42,7 @@ public class NoViableAltException extends RecognitionException {
public NoViableAltException() {;}
public NoViableAltException(BaseRecognizer recognizer, RuleContext ctx) { // LL(1) error
super(recognizer, recognizer._input, ctx);
super(recognizer, recognizer.getInputStream(), ctx);
}
public NoViableAltException(BaseRecognizer recognizer, IntStream input,
@ -55,7 +55,7 @@ public class NoViableAltException extends RecognitionException {
public String toString() {
if ( recognizer!=null ) {
TokenStream tokens = recognizer._input;
TokenStream tokens = ((Parser)recognizer).getTokenStream();
String bad = tokens.toString(startIndex, index);
return "NoViableAltException(input=\""+bad+"\" last token type is "+getUnexpectedType()+")";
}

View File

@ -29,7 +29,8 @@
package org.antlr.v4.runtime;
import org.antlr.v4.runtime.tree.*;
import org.antlr.v4.runtime.tree.ASTAdaptor;
import org.antlr.v4.runtime.tree.CommonASTAdaptor;
/** A parser for TokenStreams. "parser grammars" result in a subclass
* of this.
@ -38,6 +39,8 @@ public class Parser extends BaseRecognizer {
public ASTAdaptor _adaptor = new CommonASTAdaptor();
protected TokenStream _input;
public Parser(TokenStream input) {
super(input);
}
@ -49,10 +52,34 @@ public class Parser extends BaseRecognizer {
}
}
protected Object getCurrentInputSymbol() {
return _input.LT(1);
/** Always called by generated parsers upon entry to a rule.
* This occurs after the new context has been pushed. Access field
* _ctx get the current context.
*
* This is flexible because users do not have to regenerate parsers
* to get trace facilities.
*/
public void enterRule(ParserRuleContext localctx, int ruleIndex) {
_ctx = localctx;
_ctx.start = _input.LT(1);
_ctx.ruleIndex = ruleIndex;
}
@Override
public Token match(int ttype) throws RecognitionException {
return (Token)super.match(ttype);
}
protected Object getCurrentInputSymbol() {
return ((TokenStream)_input).LT(1);
}
@Override
public TokenStream getInputStream() { return _input; }
@Override
public void setInputStream(IntStream input) { _input = (TokenStream)input; }
protected Object getMissingSymbol(RecognitionException e,
int expectedTokenType)
{
@ -60,9 +87,9 @@ public class Parser extends BaseRecognizer {
if ( expectedTokenType== Token.EOF ) tokenText = "<missing EOF>";
else tokenText = "<missing "+getTokenNames()[expectedTokenType]+">";
CommonToken t = new CommonToken(expectedTokenType, tokenText);
Token current = _input.LT(1);
Token current = ((TokenStream)_input).LT(1);
if ( current.getType() == Token.EOF ) {
current = _input.LT(-1);
current = ((TokenStream)_input).LT(-1);
}
t.line = current.getLine();
t.charPositionInLine = current.getCharPositionInLine();
@ -79,7 +106,7 @@ public class Parser extends BaseRecognizer {
}
public TokenStream getTokenStream() {
return _input;
return ((TokenStream)_input);
}
public String getSourceName() {

View File

@ -29,7 +29,8 @@
package org.antlr.v4.runtime;
import org.antlr.v4.runtime.misc.IntervalSet;
import org.antlr.v4.runtime.tree.*;
import org.antlr.v4.runtime.tree.ASTAdaptor;
import org.antlr.v4.runtime.tree.TreeNodeStream;
/** The root of the ANTLR exception hierarchy.
*
@ -182,11 +183,11 @@ public class RecognitionException extends RuntimeException {
/** Return the token type or char of the unexpected input element */
public int getUnexpectedType() {
if ( recognizer==null ) return token.getType();
if ( recognizer._input instanceof TokenStream) {
if ( recognizer.getInputStream() instanceof TokenStream) {
return token.getType();
}
else if ( recognizer._input instanceof TreeNodeStream) {
TreeNodeStream nodes = (TreeNodeStream)recognizer._input;
else if ( recognizer.getInputStream() instanceof TreeNodeStream) {
TreeNodeStream nodes = (TreeNodeStream)recognizer.getInputStream();
ASTAdaptor adaptor = nodes.getTreeAdaptor();
return adaptor.getType(node);
}

View File

@ -0,0 +1,23 @@
package org.antlr.v4.runtime;
import org.stringtemplate.v4.ST;
public class TreeParserRuleContext extends ParserRuleContext {
public Object start, stop;
public Object tree;
public ST st;
/** Set during parsing to identify which rule parser is in. */
public int ruleIndex;
/** Set during parsing to identify which alt of rule parser is in. */
public int altNum;
public TreeParserRuleContext() {
super();
}
public TreeParserRuleContext(RuleContext parent, int stateNumber) {
super(parent, stateNumber);
}
}

View File

@ -58,8 +58,8 @@ public class DebugTreeParser extends TreeParser {
* input stream too that it should send events to this listener.
*/
public void setDebugListener(DebugEventListener dbg) {
if ( input instanceof DebugTreeNodeStream ) {
((DebugTreeNodeStream)input).setDebugListener(dbg);
if ( _input instanceof DebugTreeNodeStream ) {
((DebugTreeNodeStream) _input).setDebugListener(dbg);
}
this.dbg = dbg;
}

View File

@ -29,7 +29,8 @@
package org.antlr.v4.runtime.tree;
import org.antlr.v4.runtime.*;
import org.antlr.v4.runtime.RecognitionException;
import org.antlr.v4.runtime.TokenStream;
/**
Cut-n-paste from material I'm not using in the book anymore (edit later
@ -93,8 +94,8 @@ public class TreeFilter extends TreeParser {
if ( t==null ) return;
try {
// share TreeParser object but not parsing-related state
input = new CommonASTNodeStream(originalAdaptor, t);
((CommonASTNodeStream)input).setTokenStream(originalTokenStream);
_input = new CommonASTNodeStream(originalAdaptor, t);
((CommonASTNodeStream) _input).setTokenStream(originalTokenStream);
whichRule.rule();
}
catch (RecognitionException e) { ; }

View File

@ -31,7 +31,8 @@ package org.antlr.v4.runtime.tree;
import org.antlr.v4.runtime.*;
import java.util.regex.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/** A parser for a stream of tree nodes. "tree grammars" result in a subclass
* of this. All the error reporting and recovery is shared with Parser via
@ -47,31 +48,42 @@ public class TreeParser extends BaseRecognizer {
static Pattern dotdotPattern = Pattern.compile(dotdot);
static Pattern doubleEtcPattern = Pattern.compile(doubleEtc);
protected TreeNodeStream input;
protected TreeNodeStream _input;
public TreeParser(TreeNodeStream input) {
super((TokenStream)input); // highlight that we go to super to set state object
setTreeNodeStream(input);
super(input);
}
public void reset() {
super.reset(); // reset all recognizer state variables
if ( input!=null ) {
input.seek(0); // rewind the input
if ( _input !=null ) {
_input.seek(0); // rewind the input
}
}
/** Set the input stream */
public void setTreeNodeStream(TreeNodeStream input) {
this.input = input;
}
protected Object getCurrentInputSymbol() { return _input.LT(1); }
public TreeNodeStream getTreeNodeStream() {
return input;
@Override
public TreeNodeStream getInputStream() { return _input; }
@Override
public void setInputStream(IntStream input) { _input = (TreeNodeStream)input; }
/** Always called by generated parsers upon entry to a rule.
* This occurs after the new context has been pushed. Access field
* _ctx get the current context.
*
* This is flexible because users do not have to regenerate parsers
* to get trace facilities.
*/
public void enterRule(TreeParserRuleContext localctx, int ruleIndex) {
_ctx = localctx;
localctx.start = _input.LT(1);
localctx.ruleIndex = ruleIndex;
}
public String getSourceName() {
return input.getSourceName();
return _input.getSourceName();
}
protected Object getCurrentInputSymbol(IntStream input) {
@ -95,19 +107,19 @@ public class TreeParser extends BaseRecognizer {
public void matchAny(IntStream ignore) { // ignore stream, copy of input
errorRecovery = false;
// failed = false;
Object look = input.LT(1);
if ( input.getTreeAdaptor().getChildCount(look)==0 ) {
input.consume(); // not subtree, consume 1 node and return
Object look = _input.LT(1);
if ( _input.getTreeAdaptor().getChildCount(look)==0 ) {
_input.consume(); // not subtree, consume 1 node and return
return;
}
// current node is a subtree, skip to corresponding UP.
// must count nesting level to get right UP
int level=0;
int tokenType = input.getTreeAdaptor().getType(look);
int tokenType = _input.getTreeAdaptor().getType(look);
while ( tokenType!=Token.EOF && !(tokenType==UP && level==0) ) {
input.consume();
look = input.LT(1);
tokenType = input.getTreeAdaptor().getType(look);
_input.consume();
look = _input.LT(1);
tokenType = _input.getTreeAdaptor().getType(look);
if ( tokenType == DOWN ) {
level++;
}
@ -115,7 +127,7 @@ public class TreeParser extends BaseRecognizer {
level--;
}
}
input.consume(); // consume UP
_input.consume(); // consume UP
}
/** We have DOWN/UP nodes in the stream that have no line info; override.
@ -165,7 +177,7 @@ public class TreeParser extends BaseRecognizer {
* There is no way to force the first node to be the root.
*/
public boolean inContext(String context) {
return inContext(input.getTreeAdaptor(), getTokenNames(), input.LT(1), context);
return inContext(_input.getTreeAdaptor(), getTokenNames(), _input.LT(1), context);
}
/** The worker for inContext. It's static and full of parameters for

View File

@ -0,0 +1,21 @@
import org.antlr.v4.runtime.ANTLRFileStream;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.tree.CommonASTNodeStream;
import org.antlr.v4.runtime.tree.Tree;
import org.antlr.v4.runtime.tree.TreeNodeStream;
public class TestU {
public static void main(String[] args) throws Exception {
ULexer t = new ULexer(new ANTLRFileStream(args[0]));
CommonTokenStream tokens = new CommonTokenStream(t);
UParser p = new UParser(tokens);
p.setBuildParseTrees(true);
UParser.aContext ctx = p.a();
System.out.println(((Tree) ctx.tree).toStringTree());
TreeNodeStream nodes = new CommonASTNodeStream(ctx.tree);
UWalker walker = new UWalker(nodes);
walker.a();
}
}

8
tool/playground/U.g Normal file
View File

@ -0,0 +1,8 @@
grammar U;
options {output=AST;}
a : INT ID -> ID INT ;
INT : '0'..'9'+ ;
ID : 'a'..'z'+ ;
WS : (' '|'\n')+ {skip();} ;

View File

@ -369,19 +369,19 @@ MatchTree(t, elems) ::= <<
>>
MatchDOWN(m) ::= <<
move(<m.stateNumber>); match(Token.DOWN);
setState(<m.stateNumber>); match(Token.DOWN);
>>
MatchUP(m) ::= <<
move(<m.stateNumber>); match(Token.UP);
setState(<m.stateNumber>); match(Token.UP);
>>
InvokeRule(r, argExprsChunks) ::= <<
move(<r.stateNumber>); <if(r.labels)><r.labels:{l | <labelref(l)> = }><endif><r.name>(<argExprsChunks:{e| <e>}>);
setState(<r.stateNumber>); <if(r.labels)><r.labels:{l | <labelref(l)> = }><endif><r.name>(<argExprsChunks:{e| <e>}>);
>>
MatchToken(m) ::= <<
move(<m.stateNumber>); <if(m.labels)><m.labels:{l | <labelref(l)> = }>(Token)<endif>match(<m.name>);
setState(<m.stateNumber>); <if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>match(<m.name>);
>>
MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
@ -389,7 +389,7 @@ MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
MatchNotSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, true)>"
CommonSetStuff(m, expr, capture, invert) ::= <<
move(<m.stateNumber>);
setState(<m.stateNumber>);
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>_input.LT(1);
<capture>
if ( <if(!invert)>!<endif>(<expr>) ) throw new MismatchedSetException(this, _input);
@ -397,7 +397,7 @@ _input.consume();
>>
Wildcard(w) ::= <<
move(<w.stateNumber>);
setState(<w.stateNumber>);
<if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>_input.LT(1); _input.consume();
>>
@ -459,6 +459,9 @@ TokenListDecl(t) ::= "List\<Token> <t.name> = new ArrayList\<Token>();"
RuleContextDecl(r) ::= "<r.ctxName> <r.name>;"
RuleContextListDecl(rdecl) ::= "List\<<rdecl.ctxName>> <rdecl.name> = new ArrayList\<<rdecl.ctxName>>();"
NodeDecl(t) ::= "Object <t.name>;"
/** Default RuleContext type name for a Parser rule */
ParserRuleContext() ::= "ParserRuleContext"
@ -475,8 +478,8 @@ ListLabelName(label) ::= "<label>_list"
CaptureNextToken(d) ::= "<d.varName> = _input.LT(1);"
CaptureNextTokenType(d) ::= "<d.varName> = _input.LA(1);"
StructDecl(s,attrs,visitorDispatchMethods) ::= <<
public static class <s.name> extends ParserRuleContext {
StructDecl(s,attrs,visitorDispatchMethods,superClass="ParserRuleContext") ::= <<
public static class <s.name> extends <superClass> {
<attrs:{a | public <a>;}; separator="\n">
<if(s.ctorAttrs)>public <s.name>(RuleContext parent, int state) { super(parent, state); }<endif>
public <s.name>(RuleContext parent, int state<s.ctorAttrs:{a | , <a>}>) {
@ -494,6 +497,10 @@ public static class <s.name> extends ParserRuleContext {
}
>>
TreeParserStructDecl(s,attrs,visitorDispatchMethods) ::= <<
<StructDecl(superClass="TreeParserRuleContext", ...)>
>>
AltLabelStructDecl(s,attrs,visitorDispatchMethods) ::= <<
public static class <s.label>Context extends <currentRule.name>Context {
public <s.label>Context(<currentRule.name>Context ctx) { copyFrom(ctx); }
@ -684,7 +691,7 @@ public class <lexer.name> extends Lexer {
<lexer.namedActions.members>
<dumpActions(lexer, {}, actionFuncs, sempredFuncs)>
<dumpActions(lexer, "", actionFuncs, sempredFuncs)>
<atn>
}
>>

View File

@ -31,16 +31,25 @@ package org.antlr.v4;
import org.antlr.runtime.*;
import org.antlr.v4.analysis.AnalysisPipeline;
import org.antlr.v4.automata.*;
import org.antlr.v4.automata.ATNFactory;
import org.antlr.v4.automata.LexerATNFactory;
import org.antlr.v4.automata.ParserATNFactory;
import org.antlr.v4.automata.TreeParserATNFactory;
import org.antlr.v4.codegen.CodeGenPipeline;
import org.antlr.v4.parse.*;
import org.antlr.v4.parse.ANTLRLexer;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.parse.ToolANTLRParser;
import org.antlr.v4.semantics.SemanticPipeline;
import org.antlr.v4.tool.*;
import org.stringtemplate.v4.STGroup;
import java.io.*;
import java.lang.reflect.Field;
import java.util.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
public class Tool {
public String VERSION = "4.0-"+new Date();
@ -76,20 +85,22 @@ public class Tool {
public boolean generate_ATN_dot = false;
public String msgFormat = "antlr";
public boolean saveLexer = false;
public boolean genListener = true;
public boolean launch_ST_inspector = false;
public static Option[] optionDefs = {
new Option("outputDirectory", "-o", OptionArgType.STRING, "specify output directory where all output is generated"),
new Option("libDirectory", "-lib", OptionArgType.STRING, "specify location of .token files"),
new Option("report", "-report", "print out a report about the grammar(s) processed"),
new Option("printGrammar", "-print", "print out the grammar without actions"),
new Option("debug", "-debug", "generate a parser that emits debugging events"),
new Option("profile", "-profile", "generate a parser that computes profiling information"),
new Option("trace", "-trace", "generate a recognizer that traces rule entry/exit"),
new Option("generate_ATN_dot", "-atn", "generate rule augmented transition networks"),
new Option("msgFormat", "-message-format", OptionArgType.STRING, "specify output style for messages"),
new Option("saveLexer", "-Xsavelexer", "save temp lexer file created for combined grammars"),
new Option("launch_ST_inspector", "-XdbgST", "launch StringTemplate visualizer on generated code"),
new Option("outputDirectory", "-o", OptionArgType.STRING, "specify output directory where all output is generated"),
new Option("libDirectory", "-lib", OptionArgType.STRING, "specify location of .token files"),
new Option("report", "-report", "print out a report about the grammar(s) processed"),
new Option("printGrammar", "-print", "print out the grammar without actions"),
new Option("debug", "-debug", "generate a parser that emits debugging events"),
new Option("profile", "-profile", "generate a parser that computes profiling information"),
new Option("trace", "-trace", "generate a recognizer that traces rule entry/exit"),
new Option("generate_ATN_dot", "-atn", "generate rule augmented transition networks"),
new Option("msgFormat", "-message-format", OptionArgType.STRING, "specify output style for messages"),
new Option("genListener", "-walker", "generate parse tree walker and listener"),
new Option("saveLexer", "-Xsavelexer", "save temp lexer file created for combined grammars"),
new Option("launch_ST_inspector", "-XdbgST", "launch StringTemplate visualizer on generated code"),
};
// helper vars for option management

View File

@ -49,8 +49,10 @@ public class CodeGenPipeline {
}
else {
gen.writeRecognizer(gen.generateParser());
gen.writeListener(gen.generateListener());
gen.writeBlankListener(gen.generateBlankListener());
if ( g.tool.genListener) {
gen.writeListener(gen.generateListener());
gen.writeBlankListener(gen.generateBlankListener());
}
gen.writeHeaderFile();
}
gen.writeVocabFile();

View File

@ -34,7 +34,9 @@ import org.antlr.v4.codegen.model.*;
import org.antlr.v4.codegen.model.ast.*;
import org.antlr.v4.codegen.model.decl.*;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.runtime.atn.PlusBlockStartState;
import org.antlr.v4.runtime.atn.StarLoopEntryState;
import org.antlr.v4.runtime.misc.IntervalSet;
import org.antlr.v4.semantics.UseDefAnalyzer;
import org.antlr.v4.tool.*;
@ -93,7 +95,7 @@ public class ParserFactory extends DefaultOutputModelFactory {
LabeledOp matchOp = new MatchToken(this, (TerminalAST) ID);
if ( labelAST!=null ) {
String label = labelAST.getText();
TokenDecl d = new TokenDecl(this, label);
Decl d = getTokenLabelDecl(label);
((MatchToken)matchOp).labels.add(d);
getCurrentRuleFunction().addContextDecl(d);
if ( labelAST.parent.getType() == ANTLRParser.PLUS_ASSIGN ) {
@ -106,6 +108,10 @@ public class ParserFactory extends DefaultOutputModelFactory {
return list(matchOp, listLabelOp);
}
public Decl getTokenLabelDecl(String label) {
return new TokenDecl(this, label);
}
@Override
public List<SrcOp> set(GrammarAST setAST, GrammarAST labelAST,
GrammarAST astOp, boolean invert)
@ -115,7 +121,7 @@ public class ParserFactory extends DefaultOutputModelFactory {
else matchOp = new MatchSet(this, setAST);
if ( labelAST!=null ) {
String label = labelAST.getText();
TokenDecl d = new TokenDecl(this, label);
Decl d = getTokenLabelDecl(label);
((MatchSet)matchOp).labels.add(d);
getCurrentRuleFunction().addContextDecl(d);
if ( labelAST.parent.getType() == ANTLRParser.PLUS_ASSIGN ) {
@ -134,7 +140,7 @@ public class ParserFactory extends DefaultOutputModelFactory {
// TODO: dup with tokenRef
if ( labelAST!=null ) {
String label = labelAST.getText();
TokenDecl d = new TokenDecl(this, label);
Decl d = getTokenLabelDecl(label);
wild.labels.add(d);
getCurrentRuleFunction().addContextDecl(d);
if ( labelAST.parent.getType() == ANTLRParser.PLUS_ASSIGN ) {
@ -159,7 +165,7 @@ public class ParserFactory extends DefaultOutputModelFactory {
if ( labelAST!=null ) { // for x=(...), define x or x_list
String label = labelAST.getText();
TokenDecl d = new TokenDecl(this,label);
Decl d = getTokenLabelDecl(label);
c.label = d;
getCurrentRuleFunction().addContextDecl(d);
if ( labelAST.parent.getType() == ANTLRParser.PLUS_ASSIGN ) {
@ -377,7 +383,7 @@ public class ParserFactory extends DefaultOutputModelFactory {
if ( ast.getType()==ANTLRParser.SET || ast.getType()==ANTLRParser.WILDCARD ) {
String implLabel =
gen.target.getImplicitSetLabel(String.valueOf(ast.token.getTokenIndex()));
d = new TokenDecl(this, implLabel);
d = getTokenLabelDecl(implLabel);
((TokenDecl)d).isImplicit = true;
}
else if ( r!=null ) {
@ -389,7 +395,7 @@ public class ParserFactory extends DefaultOutputModelFactory {
}
else {
String implLabel = gen.target.getImplicitTokenLabel(ast.getText());
d = new TokenDecl(this, implLabel);
d = getTokenLabelDecl(implLabel);
((TokenDecl)d).isImplicit = true;
}
op.getLabels().add(d);

View File

@ -29,7 +29,11 @@
package org.antlr.v4.codegen;
import org.antlr.v4.codegen.model.*;
import org.antlr.v4.codegen.model.MatchTree;
import org.antlr.v4.codegen.model.SrcOp;
import org.antlr.v4.codegen.model.TreeParserModel;
import org.antlr.v4.codegen.model.decl.Decl;
import org.antlr.v4.codegen.model.decl.NodeDecl;
import org.antlr.v4.tool.GrammarAST;
import java.util.List;
@ -54,4 +58,8 @@ public class TreeParserFactory extends ParserFactory {
return new MatchTree(this, treeBeginAST, omos);
}
@Override
public Decl getTokenLabelDecl(String label) {
return new NodeDecl(this, label);
}
}

View File

@ -30,11 +30,16 @@
package org.antlr.v4.codegen.model;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.codegen.model.decl.*;
import org.antlr.v4.codegen.model.decl.AltLabelStructDecl;
import org.antlr.v4.codegen.model.decl.Decl;
import org.antlr.v4.codegen.model.decl.StructDecl;
import org.antlr.v4.codegen.model.decl.TreeParserStructDecl;
import org.antlr.v4.misc.Utils;
import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.runtime.misc.OrderedHashSet;
import org.antlr.v4.tool.*;
import org.antlr.v4.tool.Attribute;
import org.antlr.v4.tool.GrammarAST;
import org.antlr.v4.tool.Rule;
import java.util.*;
@ -72,7 +77,9 @@ public class RuleFunction extends OutputModelObject {
index = r.index;
ruleCtx = new StructDecl(factory, r);
ruleCtx = r.g.isTreeGrammar() ?
new TreeParserStructDecl(factory, r) :
new StructDecl(factory, r);
List<String> labels = r.getAltLabels();
if ( labels!=null ) {

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model.decl;
import org.antlr.v4.codegen.OutputModelFactory;
/** x=ID or implicit _tID label in tree grammar */
public class NodeDecl extends TokenDecl {
public NodeDecl(OutputModelFactory factory, String varName) {
super(factory, varName);
}
}

View File

@ -30,34 +30,30 @@
package org.antlr.v4.codegen.model.decl;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.codegen.model.*;
import org.antlr.v4.codegen.model.ModelElement;
import org.antlr.v4.codegen.model.VisitorDispatchMethod;
import org.antlr.v4.runtime.misc.OrderedHashSet;
import org.antlr.v4.tool.*;
import org.antlr.v4.tool.Attribute;
import org.antlr.v4.tool.Rule;
import java.util.*;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/** This object models the structure holding all of the parameters,
* return values, local variables, and labels associated with a rule.
*/
public class StructDecl extends Decl {
public String superClass;
@ModelElement public OrderedHashSet<Decl> attrs = new OrderedHashSet<Decl>();
@ModelElement public Collection<Attribute> ctorAttrs;
@ModelElement public List<VisitorDispatchMethod> visitorDispatchMethods;
public StructDecl(OutputModelFactory factory, Rule r) {
super(factory, factory.getGenerator().target.getRuleFunctionContextStructName(r));
addVisitorDispatchMethods(r);
// boolean multiAlts = labels!=null && labels.size()>1;
// visitorDispatchMethods = new ArrayList<VisitorDispatchMethod>();
// VisitorDispatchMethod enter = multiAlts ?
// new SwitchedVisitorDispatchMethod(factory, r, true) :
// new VisitorDispatchMethod(factory, r, true);
// visitorDispatchMethods.add(enter);
// VisitorDispatchMethod exit = multiAlts ?
// new SwitchedVisitorDispatchMethod(factory, r, false) :
// new VisitorDispatchMethod(factory, r, false);
// visitorDispatchMethods.add(exit);
if ( !factory.getGrammar().isTreeGrammar() ) {
addVisitorDispatchMethods(r);
}
}
public void addVisitorDispatchMethods(Rule r) {

View File

@ -31,7 +31,7 @@ package org.antlr.v4.codegen.model.decl;
import org.antlr.v4.codegen.OutputModelFactory;
/** */
/** x=ID or implicit _tID label */
public class TokenDecl extends Decl {
public boolean isImplicit;

View File

@ -0,0 +1,10 @@
package org.antlr.v4.codegen.model.decl;
import org.antlr.v4.codegen.OutputModelFactory;
import org.antlr.v4.tool.Rule;
public class TreeParserStructDecl extends StructDecl {
public TreeParserStructDecl(OutputModelFactory factory, Rule r) {
super(factory, r);
}
}