removed all traces of FORCED_ACTION

[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 8971]
This commit is contained in:
parrt 2011-07-31 12:36:07 -08:00
parent a7f2bc31ac
commit ec5d74c83e
17 changed files with 107 additions and 159 deletions

View File

@ -143,7 +143,6 @@ public abstract class ATNSimulator {
case Transition.RANGE : return new RangeTransition(arg1, arg2, target);
case Transition.RULE :
RuleTransition rt = new RuleTransition(arg2, atn.states.get(arg1), target);
rt.argIndex = arg3;
return rt;
case Transition.PREDICATE :
PredicateTransition pt = new PredicateTransition(target, arg1, arg2);
@ -154,7 +153,6 @@ public abstract class ATNSimulator {
ActionTransition a = new ActionTransition(target, arg1, arg2);
a.isCtxDependent = arg3==1;
return a;
case Transition.FORCED_ACTION : return new ActionTransition(target, arg1, arg2);
case Transition.SET : return new SetTransition(sets.get(arg1), target);
case Transition.NOT_SET : return new NotSetTransition(sets.get(arg1), null, target);
case Transition.WILDCARD : return new WildcardTransition(target);

View File

@ -513,7 +513,7 @@ public class ParserATNSimulator extends ATNSimulator {
ActionTransition at = (ActionTransition)t;
if ( debug ) System.out.println("ACTION edge "+at.ruleIndex+":"+at.actionIndex);
if ( debug && !config.traversedAction ) {
System.out.println("NONFORCED; pruning future pred eval derived from s"+
System.out.println("pruning future pred eval derived from s"+
config.state.stateNumber);
}

View File

@ -33,7 +33,6 @@ package org.antlr.v4.runtime.atn;
public class RuleTransition extends Transition {
/** Ptr to the rule definition object for this rule ref */
public int ruleIndex; // no Rule object at runtime
public int argIndex = -1; // args are forced actions
/** What node to begin computations following ref to rule */
public ATNState followState;

View File

@ -53,10 +53,9 @@ public abstract class Transition {
public static final int PREDICATE = 4; // e.g., {isType(input.LT(1))}?
public static final int ATOM = 5;
public static final int ACTION = 6;
public static final int FORCED_ACTION = 7;
public static final int SET = 8; // ~(A|B) or ~atom, wildcard, which convert to next 2
public static final int NOT_SET = 9;
public static final int WILDCARD = 10;
public static final int SET = 7; // ~(A|B) or ~atom, wildcard, which convert to next 2
public static final int NOT_SET = 8;
public static final int WILDCARD = 9;
public static String[] serializationNames = {
@ -67,7 +66,6 @@ public abstract class Transition {
"PREDICATE",
"ATOM",
"ACTION",
"FORCED_ACTION",
"SET",
"NOT_SET",
"WILDCARD",

View File

@ -139,13 +139,11 @@ public class ATNSerializer {
trg = ((RuleTransition)t).followState.stateNumber;
arg1 = ((RuleTransition)t).target.stateNumber;
arg2 = ((RuleTransition)t).ruleIndex;
arg3 = ((RuleTransition)t).argIndex;
break;
case Transition.PREDICATE :
PredicateTransition pt = (PredicateTransition)t;
arg1 = pt.ruleIndex;
arg2 = pt.predIndex;
// if ( pt.isCtxDependent ) edgeType = Transition.DEPENDENT_PREDICATE;
arg3 = pt.isCtxDependent ? 1 : 0 ;
break;
case Transition.RANGE :
@ -160,7 +158,6 @@ public class ATNSerializer {
arg1 = at.ruleIndex;
arg2 = at.actionIndex;
arg3 = at.isCtxDependent ? 1 : 0 ;
// if ( at.isCtxDependent ) edgeType = Transition.FORCED_DEPENDENT_ACTION;
break;
case Transition.SET :
arg1 = setIndex++;

View File

@ -191,11 +191,6 @@ public class ParserATNFactory implements ATNFactory {
ATNState left = newState(node);
ATNState right = newState(node);
RuleTransition call = new RuleTransition(r.index, start, right);
ActionAST arg = (ActionAST)node.getFirstChildWithType(ANTLRParser.ARG_ACTION);
if ( arg!=null ) {
call.argIndex = g.actions.get(arg);
}
left.addTransition(call);
node.atnState = left;
@ -244,10 +239,6 @@ public class ParserATNFactory implements ATNFactory {
ATNState right = newState(action);
ActionTransition a = new ActionTransition(right);
a.ruleIndex = currentRule.index;
if ( action.getType()==ANTLRParser.FORCED_ACTION ) {
a.actionIndex = g.actions.get(action);
a.isCtxDependent = UseDefAnalyzer.actionIsContextDependent(action);
}
left.transition = a;
action.atnState = left;
return new Handle(left, right);

View File

@ -76,8 +76,6 @@ public class CodeGeneratorExtension {
public List<SrcOp> action(List<SrcOp> ops) { return ops; }
public List<SrcOp> forcedAction(List<SrcOp> ops) { return ops; }
public List<SrcOp> sempred(List<SrcOp> ops) { return ops; }
// AST OPS

View File

@ -179,15 +179,13 @@ public class OutputModelController {
}
rsf.actions.put(g.sempreds.get(p), new Action(delegate, p));
}
else if ( a.getType()== ANTLRParser.ACTION ||
a.getType()==ANTLRParser.FORCED_ACTION )
{
else if ( a.getType()== ANTLRParser.ACTION ) {
RuleActionFunction raf = lexer.sempredFuncs.get(r);
if ( raf==null ) {
raf = new RuleActionFunction(delegate, r, ctxType);
lexer.actionFuncs.put(r, raf);
}
raf.actions.put(g.actions.get(a), new ForcedAction(delegate, a));
raf.actions.put(g.lexerActions.get(a), new ForcedAction(delegate, a));
}
if ( a instanceof PredAST ) {
@ -196,13 +194,11 @@ public class OutputModelController {
lexer.sempredFuncs.put(r, rsf);
rsf.actions.put(g.sempreds.get(p), new Action(delegate, p));
}
else if ( a.getType()==ANTLRParser.ACTION ||
a.getType()==ANTLRParser.FORCED_ACTION )
{
else if ( a.getType()==ANTLRParser.ACTION ) {
// lexer sees {{...}} and {..} as same; neither are done until accept
RuleActionFunction raf = new RuleActionFunction(delegate, r, ctxType);
lexer.actionFuncs.put(r, raf);
raf.actions.put(g.actions.get(a), new ForcedAction(delegate, a));
raf.actions.put(g.lexerActions.get(a), new ForcedAction(delegate, a));
}
}
}
@ -312,12 +308,6 @@ public class OutputModelController {
return ops;
}
public List<SrcOp> forcedAction(GrammarAST ast) {
List<SrcOp> ops = delegate.forcedAction(ast);
for (CodeGeneratorExtension ext : extensions) ops = ext.forcedAction(ops);
return ops;
}
public List<SrcOp> sempred(GrammarAST ast) {
List<SrcOp> ops = delegate.sempred(ast);
for (CodeGeneratorExtension ext : extensions) ops = ext.sempred(ops);

View File

@ -107,7 +107,6 @@ element returns [List<? extends SrcOp> omos]
| atom[null,null,false] {$omos = $atom.omos;}
| subrule {$omos = $subrule.omos;}
| ACTION {$omos = controller.action($ACTION);}
| FORCED_ACTION {$omos = controller.forcedAction($FORCED_ACTION);}
| SEMPRED {$omos = controller.sempred($SEMPRED);}
| treeSpec
;

View File

@ -19,7 +19,7 @@
// 3) All errors are pushed as far down the parsing chain as possible, which means
// that the lexer tries to defer error reporting to the parser, and the parser
// tries to defer error reporting to a semantic phase consisting of a single
// walk of the AST. The reason for this is that the error messages produced
// walk of the AST. The reason for this is that the error messages produced
// from later phases of the parse will generally have better context and so
// be more useful to the end user. Consider the message: "Syntax error at 'options'"
// vs: "You cannot specify two options{} sections in a single grammar file".
@ -53,7 +53,7 @@ lexer grammar ANTLRLexer;
// ==============================================================================
// Note that while this grammar does not care about order of constructs
// that don't really matter, such as options before @header etc, it must first
// be parsed by the original v2 parser, before it replaces it. That parser does
// be parsed by the original v2 parser, before it replaces it. That parser does
// care about order of structures. Hence we are constrained by the v2 parser
// for at least the first bootstrap release that causes this parser to replace
// the v2 version.
@ -72,7 +72,7 @@ options {
// for users.
//
language = Java;
// The super class that this lexer should expect to inherit from, and
// which contains any and all support routines for the lexer. This is
// commented out in this baseline (definitive or normative grammar)
@ -82,7 +82,7 @@ options {
//superclass = AbstractA3Lexer;
}
tokens { SEMPRED; FORCED_ACTION; }
tokens { SEMPRED; }
// Include the copyright in this source and also the generated source
//
@ -139,7 +139,7 @@ COMMENT
// Record the start line and offsets as if we need to report an
// unterminated comment, then we want to show the start of the comment
// we think is broken, not the end, where people will have to try and work
// it out themselves.
// it out themselves.
//
int startLine = $line;
int offset = getCharPositionInLine();
@ -148,7 +148,7 @@ COMMENT
// or something silly.
//
'/' // Comment introducer
(
// Single line comment, possibly with embedded src/line directives
// in a similar style to the C pre-processor, allowing generated
@ -158,9 +158,9 @@ COMMENT
'/'
(
(' $ANTLR')=> ' $ANTLR' SRC
| ~(NLCHARS)*
| ~(NLCHARS)*
)
| // Multi-line comment, which may be a documentation comment
// if it starts /** (note that we protect against accidentaly
// recognizing a comment /**/ as a documentation comment
@ -171,17 +171,17 @@ COMMENT
)
// Should we support embedded multiline comments here?
//
//
(
// Pick out end of multiline comment and exit the loop
// if we find it.
//
{ !(input.LA(1) == '*' && input.LA(2) == '/') }?
// Anything else other than the non-greedy match of
// the comment close sequence
//
.
.
)*
(
// Look for the comment terminator, but if it is accidentally
@ -190,14 +190,14 @@ COMMENT
// to the start of the unterminated multi-line comment
//
'*/'
| // Unterminated comment!
//
{
// ErrorManager.msg(Msg.UNTERMINATED_DOC_COMMENT, startLine, offset, $pos, startLine, offset, $pos, (Object)null);
}
)
| // There was nothing that made sense following the opening '/' and so
// we issue an error regarding the malformed comment
//
@ -213,7 +213,7 @@ COMMENT
// just skip and save token space if not.
//
if ($type != DOC_COMMENT) {
$channel=2; // Comments are on channel 2
}
}
@ -227,7 +227,7 @@ DOUBLE_QUOTE_STRING_LITERAL
DOUBLE_ANGLE_STRING_LITERAL
: '<<' (options {greedy=false;} : . )* '>>'
;
// --------------
// Argument specs
//
@ -241,17 +241,17 @@ ARG_ACTION
{
StringBuffer theText = new StringBuffer();
}
: '['
: '['
(
('\\')=>'\\'
(
(']')=>']'
(']')=>']'
{
// We do not include the \ character itself when picking up an escaped ]
// We do not include the \ character itself when picking up an escaped ]
//
theText.append(']');
theText.append(']');
}
| c=.
| c=.
{
// We DO include the \ character when finding any other escape
//
@ -266,14 +266,14 @@ ARG_ACTION
//
theText.append($as.text);
}
| ('\'')=>ac=ACTION_CHAR_LITERAL
{
// Append the embedded chracter literal text
//
theText.append($ac.text);
}
}
| c=~']'
{
// Whatever else we found in the scan
@ -281,7 +281,7 @@ ARG_ACTION
theText.append((char)$c);
}
)*
']'
{
// Set the token text to our gathered string
@ -289,7 +289,7 @@ ARG_ACTION
setText(theText.toString());
}
;
// -------
// Actions
//
@ -297,24 +297,12 @@ ARG_ACTION
// within what we have assumed to be literals in the action code, the
// job of the lexer is merely to gather the code within the action
// (delimited by {}) and pass it to the parser as a single token.
// Note the special case of the {{ }} action, which is a forced
// action, that the generated code will execute regardless of
// backtracking (predicate) level.
// We know that this token will be asked for its text somewhere
// in the upcoming parse, so setting the text here to exclude
// the delimiting {} is no additional overhead.
//
ACTION
: NESTED_ACTION ('?' {$type = SEMPRED;} )?
{
// Note that because of the sempred detection above, we
// will not see {{ action }}? as a forced action, but as a semantic
// predicate.
if ( $text.startsWith("{{") && $text.endsWith("}}") ) {
// Switch types to a forced action
$type = FORCED_ACTION;
}
}
;
// ----------------
@ -335,7 +323,7 @@ NESTED_ACTION
// Record the start line and offsets as if we need to report an
// unterminated block, then we want to show the start of the comment
// we think is broken, not the end, where people will have to try and work
// it out themselves.
// it out themselves.
//
int startLine = getLine();
int offset = getCharPositionInLine();
@ -346,7 +334,7 @@ NESTED_ACTION
'{'
(
// And now we can match one of a number of embedded
// elements within the action until we find a
// elements within the action until we find a
// } that balances the opening {. If we do not find
// the balanced } then we will hit EOF and can issue
// an error message about the brace that we belive to
@ -355,41 +343,41 @@ NESTED_ACTION
// opening brace that we feel is in error and this will
// guide the user to the correction as best we can.
//
// An embedded {} block
//
NESTED_ACTION
| // What appears to be a literal
//
ACTION_CHAR_LITERAL
| // We have assumed that the target language has C/Java
// type comments.
//
COMMENT
| // What appears to be a literal
//
ACTION_STRING_LITERAL
| // What appears to be an escape sequence
//
//
ACTION_ESC
| // Some other single character that is not
// handled above
//
~('\\'|'"'|'\''|'/'|'{'|'}')
)*
(
// Correctly balanced closing brace
//
'}'
| // Looks like have an imblanced {} block, report
// with respect to the opening brace.
//
@ -399,8 +387,8 @@ NESTED_ACTION
}
)
;
// Keywords
// --------
// keywords used to specify ANTLR v3 grammars. Keywords may not be used as
@ -483,15 +471,15 @@ TOKEN_REF
RULE_REF
: ('a'..'z') ('A'..'Z' | 'a'..'z' | '0'..'9' | '_')*
;
// ----------------------------
// Literals embedded in actions
//
// Note that we have made the assumption that the language used within
// actions uses the fairly standard " and ' delimiters for literals and
// that within these literals, characters are escaped using the \ character.
// There are some languages which do not conform to this in all cases, such
// There are some languages which do not conform to this in all cases, such
// as by using /string/ and so on. We will have to deal with such cases if
// if they come up in targets.
//
@ -535,7 +523,7 @@ ACTION_ESC
INT : ('0'..'9')+
;
// -----------
// -----------
// Source spec
//
// A fragment rule for picking up information about an origrinating
@ -547,8 +535,8 @@ fragment
SRC : 'src' WSCHARS+ file=ACTION_STRING_LITERAL WSCHARS+ line=INT
{
// TODO: Add target specific code to change the source file name and current line number
//
}
//
}
;
// --------------
@ -575,17 +563,17 @@ HEX_DIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ;
//
fragment
ESC_SEQ
: '\\'
: '\\'
(
// The standard escaped character set such as tab, newline,
// etc.
//
'b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\'
| // A Java style Unicode escape sequence
//
UNICODE_ESC
| // An illegal escape seqeunce
//
{
@ -606,43 +594,43 @@ UNICODE_ESC
int hCount = 0;
}
: 'u' // Leadin for unicode escape sequence
// We now require 4 hex digits. Note though
// that we accept any number of characters
// and issue an error if we do not get 4. We cannot
// use an inifinite count such as + because this
// might consume too many, so we lay out the lexical
// options and issue an error at the invalid paths.
//
//
(
(
(
HEX_DIGIT { hCount++; }
(
HEX_DIGIT { hCount++; }
(
HEX_DIGIT { hCount++; }
(
(
// Four valid hex digits, we are good
//
HEX_DIGIT { hCount++; }
| // Three valid digits
)
| // Two valid digits
)
| // One valid digit
)
)
| // No valid hex digits at all
)
// Now check the digit count and issue an error if we need to
//
{
if (hCount != 4) {
// TODO: Issue error message
}
}
@ -655,31 +643,31 @@ UNICODE_ESC
// to the parser and are used to make the grammar easier to read
// for humans.
//
WS
: (
WS
: (
' '
| '\t'
| '\r'
| '\n'
| '\f'
)+
)+
{
$channel=2;
}
;
// A fragment rule for use in recognizing end of line in
// A fragment rule for use in recognizing end of line in
// rules like COMMENT.
//
//
fragment
NLCHARS
: '\n' | '\r'
;
// A fragment rule for recognizing traditional whitespace
// characters within lexer rules.
//
//
fragment
WSCHARS
: ' ' | '\t' | '\f'
@ -688,13 +676,13 @@ WSCHARS
// A fragment rule for recognizing both traditional whitespace and
// end of line markers, when we don't care to distinguish but don't
// want any action code going on.
//
//
fragment
WSNLCHARS
: ' ' | '\t' | '\f' | '\n' | '\r'
;
// -----------------
// -----------------
// Illegal Character
//
// This is an illegal character trap which is always the last rule in the
@ -702,7 +690,7 @@ WSNLCHARS
// the last rule in the file will match when no other rule knows what to do
// about the character. It is reported as an error but is not passed on to the
// parser. This means that the parser to deal with the gramamr file anyway
// but we will not try to analyse or code generate from a file with lexical
// but we will not try to analyse or code generate from a file with lexical
// errors.
//
ERRCHAR

View File

@ -582,7 +582,6 @@ element
)
| ebnf
| ACTION<ActionAST>
| FORCED_ACTION<ActionAST>
| SEMPRED -> SEMPRED<PredAST>
| treeSpec
( ebnfSuffix -> ^( ebnfSuffix ^(BLOCK<BlockAST>[$treeSpec.start,"BLOCK"] ^(ALT<AltAST> treeSpec ) ) )

View File

@ -26,7 +26,7 @@
/** The definitive ANTLR v3 tree grammar to parse ANTLR v4 grammars.
* Parses trees created in ANTLRParser.g.
OBSOLETE; See tree visitor grammar.
*/
tree grammar ASTVerifier;
@ -260,7 +260,6 @@ element
| atom
| subrule
| ACTION
| FORCED_ACTION
| SEMPRED
| GATED_SEMPRED
| treeSpec
@ -315,7 +314,7 @@ atom: range
blockSet
: ^(SET setElement+)
;
setElement
: STRING_LITERAL
| TOKEN_REF

View File

@ -94,7 +94,6 @@ element returns [ATNFactory.Handle p]
| atom {$p = $atom.p;}
| subrule {$p = $subrule.p;}
| ACTION {$p = factory.action((ActionAST)$ACTION);}
| FORCED_ACTION {$p = factory.action((ActionAST)$FORCED_ACTION);}
| SEMPRED {$p = factory.sempred((PredAST)$SEMPRED);}
| treeSpec {$p = $treeSpec.p;}
| ^(ROOT a=astOperand) {$p = $a.p;}

View File

@ -186,7 +186,7 @@ grammarSpec
prequelConstructs
: prequelConstruct*
;
prequelConstruct
: optionsSpec
| delegateGrammars
@ -257,7 +257,7 @@ currentOuterAltNumber=0;
loc=locals?
( opts=optionsSpec
| a=ruleAction {actions.add($a.start);}
)*
)*
{discoverRule((RuleAST)$RULE, $ID, mods, (ActionAST)$ARG_ACTION,
$ret.start!=null?(ActionAST)$ret.start.getChild(0):null,
$thr.start, $opts.start, actions, (GrammarAST)input.LT(1));}
@ -281,7 +281,7 @@ finallyClause
locals
: ^(LOCALS ARG_ACTION)
;
ruleReturns
: ^(RETURNS ARG_ACTION)
;
@ -328,7 +328,7 @@ outerAlternative
}
: alternative
;
alternative
: ^(ALT_REWRITE alternative {inRewrite=true;} rewrite {inRewrite=false;})
| ^(ALT element+)
@ -340,7 +340,6 @@ element
| atom
| subrule
| ACTION {actionInAlt((ActionAST)$ACTION);}
| FORCED_ACTION {actionInAlt((ActionAST)$FORCED_ACTION);}
| SEMPRED {sempredInAlt((PredAST)$SEMPRED);}
| treeSpec
| ^(ROOT astOperand) {rootOp($ROOT, $astOperand.start);}
@ -394,7 +393,7 @@ atom: range
blockSet
: ^(SET setElement+)
;
setElement
: STRING_LITERAL {stringRef((TerminalAST)$STRING_LITERAL, null);}
| TOKEN_REF {tokenRef((TerminalAST)$TOKEN_REF, null);}
@ -440,7 +439,7 @@ elementOption[TerminalAST t]
;
rewrite
: {discoverRewrites($start);} predicatedRewrite* nakedRewrite {finishRewrites($start);}
: {discoverRewrites($start);} predicatedRewrite* nakedRewrite {finishRewrites($start);}
;
predicatedRewrite
@ -490,7 +489,7 @@ rewriteTreeAtom
rewriteElementOptions
: ^(ELEMENT_OPTIONS rewriteElementOption[(TerminalAST)$start.getParent()]+)
;
rewriteElementOption[TerminalAST t]
: ID {rewriteTerminalOption(t, $ID, null);}
| ^(ASSIGN id=ID v=ID) {rewriteTerminalOption(t, $id, $v);}
@ -500,7 +499,7 @@ rewriteElementOption[TerminalAST t]
rewriteTreeEbnf
: ^(ebnfSuffix ^(REWRITE_BLOCK {rewriteEBNFLevel++;} rewriteTreeAlt {rewriteEBNFLevel--;}))
;
rewriteTree
: ^(TREE_BEGIN rewriteTreeAtom rewriteTreeElement* )
;

View File

@ -73,7 +73,7 @@ rec_rule returns [boolean isLeftRec]
( ^(LOCALS ARG_ACTION) )?
( ^(OPTIONS .*)
| ^(AT ID ACTION)
)*
)*
ruleBlock {$isLeftRec = $ruleBlock.isLeftRec;}
exceptionGroup
)
@ -90,7 +90,7 @@ exceptionHandler
;
finallyClause
: ^(FINALLY ACTION)
: ^(FINALLY ACTION)
;
ruleModifier
@ -117,7 +117,7 @@ ruleBlock returns [boolean isLeftRec]
outerAlternative[GrammarAST rew] returns [boolean isLeftRec]
: (binaryMultipleOp)=> binaryMultipleOp
{binaryAlt($start, $rew, currentOuterAltNumber); $isLeftRec=true;}
| (binary)=> binary
| (binary)=> binary
{binaryAlt($start, $rew, currentOuterAltNumber); $isLeftRec=true;}
| (ternary)=> ternary
{ternaryAlt($start, $rew, currentOuterAltNumber); $isLeftRec=true;}
@ -191,27 +191,26 @@ element
| RULE_REF
| ebnf
| tree_
| FORCED_ACTION
| ACTION
| SEMPRED
| EPSILON
| EPSILON
;
setElement
: STRING_LITERAL
| TOKEN_REF
: STRING_LITERAL
| TOKEN_REF
;
ebnf: block
| ^( OPTIONAL block )
| ^( CLOSURE block )
| ^( POSITIVE_CLOSURE block )
| ^( OPTIONAL block )
| ^( CLOSURE block )
| ^( POSITIVE_CLOSURE block )
;
block
: ^(BLOCK ACTION? alternative+)
;
alternative
: ^(ALT_REWRITE alternative rewrite)
| ^(ALT element+)
@ -229,14 +228,14 @@ atom
| TOKEN_REF
| ^(WILDCARD elementOptions)
| WILDCARD
| ^(DOT ID element)
| ^(DOT ID element)
;
ast_suffix
: ROOT
| BANG
;
rewrite
: rewrite_result*
;

View File

@ -134,12 +134,10 @@ public class Grammar implements AttributeResolver {
*/
public Map<String,ActionAST> namedActions = new HashMap<String,ActionAST>();
/** Tracks all forced actions in all alternatives of all rules.
* This is includes rule arguments.
* Or if lexer all actions period. Doesn't track sempreds.
* maps tree node to action index.
/** Tracks all lexer actions in all alternatives of all rules.
* Doesn't track sempreds. maps tree node to action index.
*/
public LinkedHashMap<ActionAST, Integer> actions = new LinkedHashMap<ActionAST, Integer>();
public LinkedHashMap<ActionAST, Integer> lexerActions = new LinkedHashMap<ActionAST, Integer>();
/** All sempreds found in grammar; maps tree node to sempred index;
* sempred index is 0..n-1

View File

@ -29,7 +29,6 @@
package org.antlr.v4.tool;
import org.antlr.v4.parse.ANTLRParser;
import org.stringtemplate.v4.misc.MultiMap;
import java.util.*;
@ -130,12 +129,10 @@ public class Rule implements AttributeResolver {
public void defineActionInAlt(int currentAlt, ActionAST actionAST) {
actions.add(actionAST);
alt[currentAlt].actions.add(actionAST);
if ( g.isLexer() || actionAST.getType()==ANTLRParser.FORCED_ACTION ||
actionAST.getType()==ANTLRParser.ARG_ACTION )
{
actionIndex = g.actions.size();
if ( g.actions.get(actionAST)==null ) {
g.actions.put(actionAST, actionIndex);
if ( g.isLexer() ) {
actionIndex = g.lexerActions.size();
if ( g.lexerActions.get(actionAST)==null ) {
g.lexerActions.put(actionAST, actionIndex);
}
}
}