forked from jasder/antlr
move exceptions over from v3 runtime. add bitset defs..
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 6869]
This commit is contained in:
parent
dfffbe7ef3
commit
563de77ec4
|
@ -0,0 +1,887 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.v4.runtime.misc.LABitSet;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/** A generic recognizer that can handle recognizers generated from
|
||||
* parser and tree grammars. This is all the parsing
|
||||
* support code essentially; most of it is error recovery stuff and
|
||||
* backtracking.
|
||||
*/
|
||||
public abstract class BaseRecognizer {
|
||||
public static final int MEMO_RULE_FAILED = -2;
|
||||
public static final int MEMO_RULE_UNKNOWN = -1;
|
||||
public static final int INITIAL_FOLLOW_STACK_SIZE = 100;
|
||||
|
||||
// copies from Token object for convenience in actions
|
||||
public static final int DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL;
|
||||
public static final int HIDDEN = Token.HIDDEN_CHANNEL;
|
||||
|
||||
public static final String NEXT_TOKEN_RULE_NAME = "nextToken";
|
||||
|
||||
public IntStream input;
|
||||
|
||||
/** State of a lexer, parser, or tree parser are collected into a state
|
||||
* object so the state can be shared. This sharing is needed to
|
||||
* have one grammar import others and share same error variables
|
||||
* and other state variables. It's a kind of explicit multiple
|
||||
* inheritance via delegation of methods and shared state.
|
||||
*/
|
||||
protected RecognizerSharedState state;
|
||||
|
||||
public BaseRecognizer() {
|
||||
state = new RecognizerSharedState();
|
||||
}
|
||||
|
||||
public BaseRecognizer(RecognizerSharedState state) {
|
||||
if ( state==null ) {
|
||||
state = new RecognizerSharedState();
|
||||
}
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
/** reset the parser's state; subclasses must rewinds the input stream */
|
||||
public void reset() {
|
||||
// wack everything related to error recovery
|
||||
if ( state==null ) {
|
||||
return; // no shared state work to do
|
||||
}
|
||||
state._fsp = -1;
|
||||
state.errorRecovery = false;
|
||||
state.lastErrorIndex = -1;
|
||||
state.failed = false;
|
||||
state.syntaxErrors = 0;
|
||||
// wack everything related to backtracking and memoization
|
||||
state.backtracking = 0;
|
||||
for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) { // wipe cache
|
||||
state.ruleMemo[i] = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Match current input symbol against ttype. Attempt
|
||||
* single token insertion or deletion error recovery. If
|
||||
* that fails, throw MismatchedTokenException.
|
||||
*
|
||||
* To turn off single token insertion or deletion error
|
||||
* recovery, override recoverFromMismatchedToken() and have it
|
||||
* throw an exception. See TreeParser.recoverFromMismatchedToken().
|
||||
* This way any error in a rule will cause an exception and
|
||||
* immediate exit from rule. Rule would recover by resynchronizing
|
||||
* to the set of symbols that can follow rule ref.
|
||||
*/
|
||||
public Object match(int ttype, LABitSet follow)
|
||||
throws RecognitionException
|
||||
{
|
||||
//System.out.println("match "+((TokenStream)input).LT(1));
|
||||
Object matchedSymbol = getCurrentInputSymbol(input);
|
||||
if ( input.LA(1)==ttype ) {
|
||||
input.consume();
|
||||
state.errorRecovery = false;
|
||||
state.failed = false;
|
||||
return matchedSymbol;
|
||||
}
|
||||
if ( state.backtracking>0 ) {
|
||||
state.failed = true;
|
||||
return matchedSymbol;
|
||||
}
|
||||
matchedSymbol = recoverFromMismatchedToken(ttype, follow);
|
||||
return matchedSymbol;
|
||||
}
|
||||
|
||||
/** Match the wildcard: in a symbol */
|
||||
public void matchAny(IntStream input) {
|
||||
state.errorRecovery = false;
|
||||
state.failed = false;
|
||||
input.consume();
|
||||
}
|
||||
|
||||
public boolean mismatchIsUnwantedToken(int ttype) {
|
||||
return input.LA(2)==ttype;
|
||||
}
|
||||
|
||||
public boolean mismatchIsMissingToken(LABitSet follow) {
|
||||
if ( follow==null ) {
|
||||
// we have no information about the follow; we can only consume
|
||||
// a single token and hope for the best
|
||||
return false;
|
||||
}
|
||||
// compute what can follow this grammar element reference
|
||||
if ( follow.member(Token.EOR_TOKEN_TYPE) ) {
|
||||
LABitSet viableTokensFollowingThisRule = computeContextSensitiveRuleFOLLOW();
|
||||
follow = follow.or(viableTokensFollowingThisRule);
|
||||
if ( state._fsp>=0 ) { // remove EOR if we're not the start symbol
|
||||
follow.remove(Token.EOR_TOKEN_TYPE);
|
||||
}
|
||||
}
|
||||
// if current token is consistent with what could come after set
|
||||
// then we know we're missing a token; error recovery is free to
|
||||
// "insert" the missing token
|
||||
|
||||
//System.out.println("viable tokens="+follow.toString(getTokenNames()));
|
||||
//System.out.println("LT(1)="+((TokenStream)input).LT(1));
|
||||
|
||||
// LABitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
|
||||
// in follow set to indicate that the fall of the start symbol is
|
||||
// in the set (EOF can follow).
|
||||
if ( follow.member(input.LA(1)) || follow.member(Token.EOR_TOKEN_TYPE) ) {
|
||||
//System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Report a recognition problem.
|
||||
*
|
||||
* This method sets errorRecovery to indicate the parser is recovering
|
||||
* not parsing. Once in recovery mode, no errors are generated.
|
||||
* To get out of recovery mode, the parser must successfully match
|
||||
* a token (after a resync). So it will go:
|
||||
*
|
||||
* 1. error occurs
|
||||
* 2. enter recovery mode, report error
|
||||
* 3. consume until token found in resynch set
|
||||
* 4. try to resume parsing
|
||||
* 5. next match() will reset errorRecovery mode
|
||||
*
|
||||
* If you override, make sure to update syntaxErrors if you care about that.
|
||||
*/
|
||||
public void reportError(RecognitionException e) {
|
||||
// if we've already reported an error and have not matched a token
|
||||
// yet successfully, don't report any errors.
|
||||
if ( state.errorRecovery ) {
|
||||
//System.err.print("[SPURIOUS] ");
|
||||
return;
|
||||
}
|
||||
state.syntaxErrors++; // don't count spurious
|
||||
state.errorRecovery = true;
|
||||
|
||||
displayRecognitionError(this.getTokenNames(), e);
|
||||
}
|
||||
|
||||
public void displayRecognitionError(String[] tokenNames,
|
||||
RecognitionException e)
|
||||
{
|
||||
String hdr = getErrorHeader(e);
|
||||
String msg = getErrorMessage(e, tokenNames);
|
||||
emitErrorMessage(hdr+" "+msg);
|
||||
}
|
||||
|
||||
/** What error message should be generated for the various
|
||||
* exception types?
|
||||
*
|
||||
* Not very object-oriented code, but I like having all error message
|
||||
* generation within one method rather than spread among all of the
|
||||
* exception classes. This also makes it much easier for the exception
|
||||
* handling because the exception classes do not have to have pointers back
|
||||
* to this object to access utility routines and so on. Also, changing
|
||||
* the message for an exception type would be difficult because you
|
||||
* would have to subclassing exception, but then somehow get ANTLR
|
||||
* to make those kinds of exception objects instead of the default.
|
||||
* This looks weird, but trust me--it makes the most sense in terms
|
||||
* of flexibility.
|
||||
*
|
||||
* For grammar debugging, you will want to override this to add
|
||||
* more information such as the stack frame with
|
||||
* getRuleInvocationStack(e, this.getClass().getName()) and,
|
||||
* for no viable alts, the decision description and state etc...
|
||||
*
|
||||
* Override this to change the message generated for one or more
|
||||
* exception types.
|
||||
*/
|
||||
public String getErrorMessage(RecognitionException e, String[] tokenNames) {
|
||||
String msg = e.getMessage();
|
||||
if ( e instanceof UnwantedTokenException ) {
|
||||
UnwantedTokenException ute = (UnwantedTokenException)e;
|
||||
String tokenName="<unknown>";
|
||||
if ( ute.expecting== Token.EOF ) {
|
||||
tokenName = "EOF";
|
||||
}
|
||||
else {
|
||||
tokenName = tokenNames[ute.expecting];
|
||||
}
|
||||
msg = "extraneous input "+getTokenErrorDisplay(ute.getUnexpectedToken())+
|
||||
" expecting "+tokenName;
|
||||
}
|
||||
else if ( e instanceof MissingTokenException ) {
|
||||
MissingTokenException mte = (MissingTokenException)e;
|
||||
String tokenName="<unknown>";
|
||||
if ( mte.expecting== Token.EOF ) {
|
||||
tokenName = "EOF";
|
||||
}
|
||||
else {
|
||||
tokenName = tokenNames[mte.expecting];
|
||||
}
|
||||
msg = "missing "+tokenName+" at "+getTokenErrorDisplay(e.token);
|
||||
}
|
||||
else if ( e instanceof MismatchedTokenException ) {
|
||||
MismatchedTokenException mte = (MismatchedTokenException)e;
|
||||
String tokenName="<unknown>";
|
||||
if ( mte.expecting== Token.EOF ) {
|
||||
tokenName = "EOF";
|
||||
}
|
||||
else {
|
||||
tokenName = tokenNames[mte.expecting];
|
||||
}
|
||||
msg = "mismatched input "+getTokenErrorDisplay(e.token)+
|
||||
" expecting "+tokenName;
|
||||
}
|
||||
else if ( e instanceof MismatchedTreeNodeException ) {
|
||||
MismatchedTreeNodeException mtne = (MismatchedTreeNodeException)e;
|
||||
String tokenName="<unknown>";
|
||||
if ( mtne.expecting==Token.EOF ) {
|
||||
tokenName = "EOF";
|
||||
}
|
||||
else {
|
||||
tokenName = tokenNames[mtne.expecting];
|
||||
}
|
||||
msg = "mismatched tree node: "+mtne.node+
|
||||
" expecting "+tokenName;
|
||||
}
|
||||
else if ( e instanceof NoViableAltException ) {
|
||||
//NoViableAltException nvae = (NoViableAltException)e;
|
||||
// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
|
||||
// and "(decision="+nvae.decisionNumber+") and
|
||||
// "state "+nvae.stateNumber
|
||||
msg = "no viable alternative at input "+getTokenErrorDisplay(e.token);
|
||||
}
|
||||
else if ( e instanceof EarlyExitException ) {
|
||||
//EarlyExitException eee = (EarlyExitException)e;
|
||||
// for development, can add "(decision="+eee.decisionNumber+")"
|
||||
msg = "required (...)+ loop did not match anything at input "+
|
||||
getTokenErrorDisplay(e.token);
|
||||
}
|
||||
else if ( e instanceof MismatchedSetException ) {
|
||||
MismatchedSetException mse = (MismatchedSetException)e;
|
||||
msg = "mismatched input "+getTokenErrorDisplay(e.token)+
|
||||
" expecting set "+mse.expecting;
|
||||
}
|
||||
else if ( e instanceof MismatchedNotSetException ) {
|
||||
MismatchedNotSetException mse = (MismatchedNotSetException)e;
|
||||
msg = "mismatched input "+getTokenErrorDisplay(e.token)+
|
||||
" expecting set "+mse.expecting;
|
||||
}
|
||||
else if ( e instanceof FailedPredicateException ) {
|
||||
FailedPredicateException fpe = (FailedPredicateException)e;
|
||||
msg = "rule "+fpe.ruleName+" failed predicate: {"+
|
||||
fpe.predicateText+"}?";
|
||||
}
|
||||
return msg;
|
||||
}
|
||||
|
||||
/** Get number of recognition errors (lexer, parser, tree parser). Each
|
||||
* recognizer tracks its own number. So parser and lexer each have
|
||||
* separate count. Does not count the spurious errors found between
|
||||
* an error and next valid token match
|
||||
*
|
||||
* See also reportError()
|
||||
*/
|
||||
public int getNumberOfSyntaxErrors() {
|
||||
return state.syntaxErrors;
|
||||
}
|
||||
|
||||
/** What is the error header, normally line/character position information? */
|
||||
public String getErrorHeader(RecognitionException e) {
|
||||
return "line "+e.line+":"+e.charPositionInLine;
|
||||
}
|
||||
|
||||
/** How should a token be displayed in an error message? The default
|
||||
* is to display just the text, but during development you might
|
||||
* want to have a lot of information spit out. Override in that case
|
||||
* to use t.toString() (which, for CommonToken, dumps everything about
|
||||
* the token). This is better than forcing you to override a method in
|
||||
* your token objects because you don't have to go modify your lexer
|
||||
* so that it creates a new Java type.
|
||||
*/
|
||||
public String getTokenErrorDisplay(Token t) {
|
||||
String s = t.getText();
|
||||
if ( s==null ) {
|
||||
if ( t.getType()==Token.EOF ) {
|
||||
s = "<EOF>";
|
||||
}
|
||||
else {
|
||||
s = "<"+t.getType()+">";
|
||||
}
|
||||
}
|
||||
s = s.replaceAll("\n","\\\\n");
|
||||
s = s.replaceAll("\r","\\\\r");
|
||||
s = s.replaceAll("\t","\\\\t");
|
||||
return "'"+s+"'";
|
||||
}
|
||||
|
||||
/** Override this method to change where error messages go */
|
||||
public void emitErrorMessage(String msg) {
|
||||
System.err.println(msg);
|
||||
}
|
||||
|
||||
/** Recover from an error found on the input stream. This is
|
||||
* for NoViableAlt and mismatched symbol exceptions. If you enable
|
||||
* single token insertion and deletion, this will usually not
|
||||
* handle mismatched symbol exceptions but there could be a mismatched
|
||||
* token that the match() routine could not recover from.
|
||||
*/
|
||||
public void recover(RecognitionException re) {
|
||||
if ( state.lastErrorIndex==input.index() ) {
|
||||
// uh oh, another error at same token index; must be a case
|
||||
// where LT(1) is in the recovery token set so nothing is
|
||||
// consumed; consume a single token so at least to prevent
|
||||
// an infinite loop; this is a failsafe.
|
||||
input.consume();
|
||||
}
|
||||
state.lastErrorIndex = input.index();
|
||||
LABitSet followSet = computeErrorRecoverySet();
|
||||
beginResync();
|
||||
consumeUntil(followSet);
|
||||
endResync();
|
||||
}
|
||||
|
||||
/** A hook to listen in on the token consumption during error recovery.
|
||||
* The DebugParser subclasses this to fire events to the listenter.
|
||||
*/
|
||||
public void beginResync() {
|
||||
}
|
||||
|
||||
public void endResync() {
|
||||
}
|
||||
|
||||
/* Compute the error recovery set for the current rule. During
|
||||
* rule invocation, the parser pushes the set of tokens that can
|
||||
* follow that rule reference on the stack; this amounts to
|
||||
* computing FIRST of what follows the rule reference in the
|
||||
* enclosing rule. This local follow set only includes tokens
|
||||
* from within the rule; i.e., the FIRST computation done by
|
||||
* ANTLR stops at the end of a rule.
|
||||
*
|
||||
* EXAMPLE
|
||||
*
|
||||
* When you find a "no viable alt exception", the input is not
|
||||
* consistent with any of the alternatives for rule r. The best
|
||||
* thing to do is to consume tokens until you see something that
|
||||
* can legally follow a call to r *or* any rule that called r.
|
||||
* You don't want the exact set of viable next tokens because the
|
||||
* input might just be missing a token--you might consume the
|
||||
* rest of the input looking for one of the missing tokens.
|
||||
*
|
||||
* Consider grammar:
|
||||
*
|
||||
* a : '[' b ']'
|
||||
* | '(' b ')'
|
||||
* ;
|
||||
* b : c '^' INT ;
|
||||
* c : ID
|
||||
* | INT
|
||||
* ;
|
||||
*
|
||||
* At each rule invocation, the set of tokens that could follow
|
||||
* that rule is pushed on a stack. Here are the various "local"
|
||||
* follow sets:
|
||||
*
|
||||
* FOLLOW(b1_in_a) = FIRST(']') = ']'
|
||||
* FOLLOW(b2_in_a) = FIRST(')') = ')'
|
||||
* FOLLOW(c_in_b) = FIRST('^') = '^'
|
||||
*
|
||||
* Upon erroneous input "[]", the call chain is
|
||||
*
|
||||
* a -> b -> c
|
||||
*
|
||||
* and, hence, the follow context stack is:
|
||||
*
|
||||
* depth local follow set after call to rule
|
||||
* 0 <EOF> a (from main())
|
||||
* 1 ']' b
|
||||
* 3 '^' c
|
||||
*
|
||||
* Notice that ')' is not included, because b would have to have
|
||||
* been called from a different context in rule a for ')' to be
|
||||
* included.
|
||||
*
|
||||
* For error recovery, we cannot consider FOLLOW(c)
|
||||
* (context-sensitive or otherwise). We need the combined set of
|
||||
* all context-sensitive FOLLOW sets--the set of all tokens that
|
||||
* could follow any reference in the call chain. We need to
|
||||
* resync to one of those tokens. Note that FOLLOW(c)='^' and if
|
||||
* we resync'd to that token, we'd consume until EOF. We need to
|
||||
* sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
|
||||
* In this case, for input "[]", LA(1) is in this set so we would
|
||||
* not consume anything and after printing an error rule c would
|
||||
* return normally. It would not find the required '^' though.
|
||||
* At this point, it gets a mismatched token error and throws an
|
||||
* exception (since LA(1) is not in the viable following token
|
||||
* set). The rule exception handler tries to recover, but finds
|
||||
* the same recovery set and doesn't consume anything. Rule b
|
||||
* exits normally returning to rule a. Now it finds the ']' (and
|
||||
* with the successful match exits errorRecovery mode).
|
||||
*
|
||||
* So, you cna see that the parser walks up call chain looking
|
||||
* for the token that was a member of the recovery set.
|
||||
*
|
||||
* Errors are not generated in errorRecovery mode.
|
||||
*
|
||||
* ANTLR's error recovery mechanism is based upon original ideas:
|
||||
*
|
||||
* "Algorithms + Data Structures = Programs" by Niklaus Wirth
|
||||
*
|
||||
* and
|
||||
*
|
||||
* "A note on error recovery in recursive descent parsers":
|
||||
* http://portal.acm.org/citation.cfm?id=947902.947905
|
||||
*
|
||||
* Later, Josef Grosch had some good ideas:
|
||||
*
|
||||
* "Efficient and Comfortable Error Recovery in Recursive Descent
|
||||
* Parsers":
|
||||
* ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
||||
*
|
||||
* Like Grosch I implemented local FOLLOW sets that are combined
|
||||
* at run-time upon error to avoid overhead during parsing.
|
||||
*/
|
||||
protected LABitSet computeErrorRecoverySet() {
|
||||
return combineFollows(false);
|
||||
}
|
||||
|
||||
/** Compute the context-sensitive FOLLOW set for current rule.
|
||||
* This is set of token types that can follow a specific rule
|
||||
* reference given a specific call chain. You get the set of
|
||||
* viable tokens that can possibly come next (lookahead depth 1)
|
||||
* given the current call chain. Contrast this with the
|
||||
* definition of plain FOLLOW for rule r:
|
||||
*
|
||||
* FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
|
||||
*
|
||||
* where x in T* and alpha, beta in V*; T is set of terminals and
|
||||
* V is the set of terminals and nonterminals. In other words,
|
||||
* FOLLOW(r) is the set of all tokens that can possibly follow
|
||||
* references to r in *any* sentential form (context). At
|
||||
* runtime, however, we know precisely which context applies as
|
||||
* we have the call chain. We may compute the exact (rather
|
||||
* than covering superset) set of following tokens.
|
||||
*
|
||||
* For example, consider grammar:
|
||||
*
|
||||
* stat : ID '=' expr ';' // FOLLOW(stat)=={EOF}
|
||||
* | "return" expr '.'
|
||||
* ;
|
||||
* expr : atom ('+' atom)* ; // FOLLOW(expr)=={';','.',')'}
|
||||
* atom : INT // FOLLOW(atom)=={'+',')',';','.'}
|
||||
* | '(' expr ')'
|
||||
* ;
|
||||
*
|
||||
* The FOLLOW sets are all inclusive whereas context-sensitive
|
||||
* FOLLOW sets are precisely what could follow a rule reference.
|
||||
* For input input "i=(3);", here is the derivation:
|
||||
*
|
||||
* stat => ID '=' expr ';'
|
||||
* => ID '=' atom ('+' atom)* ';'
|
||||
* => ID '=' '(' expr ')' ('+' atom)* ';'
|
||||
* => ID '=' '(' atom ')' ('+' atom)* ';'
|
||||
* => ID '=' '(' INT ')' ('+' atom)* ';'
|
||||
* => ID '=' '(' INT ')' ';'
|
||||
*
|
||||
* At the "3" token, you'd have a call chain of
|
||||
*
|
||||
* stat -> expr -> atom -> expr -> atom
|
||||
*
|
||||
* What can follow that specific nested ref to atom? Exactly ')'
|
||||
* as you can see by looking at the derivation of this specific
|
||||
* input. Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
|
||||
*
|
||||
* You want the exact viable token set when recovering from a
|
||||
* token mismatch. Upon token mismatch, if LA(1) is member of
|
||||
* the viable next token set, then you know there is most likely
|
||||
* a missing token in the input stream. "Insert" one by just not
|
||||
* throwing an exception.
|
||||
*/
|
||||
protected LABitSet computeContextSensitiveRuleFOLLOW() {
|
||||
return combineFollows(true);
|
||||
}
|
||||
|
||||
// what is exact? it seems to only add sets from above on stack
|
||||
// if EOR is in set i. When it sees a set w/o EOR, it stops adding.
|
||||
// Why would we ever want them all? Maybe no viable alt instead of
|
||||
// mismatched token?
|
||||
protected LABitSet combineFollows(boolean exact) {
|
||||
int top = state._fsp;
|
||||
LABitSet followSet = new LABitSet();
|
||||
for (int i=top; i>=0; i--) {
|
||||
LABitSet localFollowSet = (LABitSet)state.following[i];
|
||||
/*
|
||||
System.out.println("local follow depth "+i+"="+
|
||||
localFollowSet.toString(getTokenNames())+")");
|
||||
*/
|
||||
followSet.orInPlace(localFollowSet);
|
||||
if ( exact ) {
|
||||
// can we see end of rule?
|
||||
if ( localFollowSet.member(Token.EOR_TOKEN_TYPE) ) {
|
||||
// Only leave EOR in set if at top (start rule); this lets
|
||||
// us know if have to include follow(start rule); i.e., EOF
|
||||
if ( i>0 ) {
|
||||
followSet.remove(Token.EOR_TOKEN_TYPE);
|
||||
}
|
||||
}
|
||||
else { // can't see end of rule, quit
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return followSet;
|
||||
}
|
||||
|
||||
/** Attempt to recover from a single missing or extra token.
|
||||
*
|
||||
* EXTRA TOKEN
|
||||
*
|
||||
* LA(1) is not what we are looking for. If LA(2) has the right token,
|
||||
* however, then assume LA(1) is some extra spurious token. Delete it
|
||||
* and LA(2) as if we were doing a normal match(), which advances the
|
||||
* input.
|
||||
*
|
||||
* MISSING TOKEN
|
||||
*
|
||||
* If current token is consistent with what could come after
|
||||
* ttype then it is ok to "insert" the missing token, else throw
|
||||
* exception For example, Input "i=(3;" is clearly missing the
|
||||
* ')'. When the parser returns from the nested call to expr, it
|
||||
* will have call chain:
|
||||
*
|
||||
* stat -> expr -> atom
|
||||
*
|
||||
* and it will be trying to match the ')' at this point in the
|
||||
* derivation:
|
||||
*
|
||||
* => ID '=' '(' INT ')' ('+' atom)* ';'
|
||||
* ^
|
||||
* match() will see that ';' doesn't match ')' and report a
|
||||
* mismatched token error. To recover, it sees that LA(1)==';'
|
||||
* is in the set of tokens that can follow the ')' token
|
||||
* reference in rule atom. It can assume that you forgot the ')'.
|
||||
*/
|
||||
protected Object recoverFromMismatchedToken(int ttype, LABitSet follow)
|
||||
throws RecognitionException
|
||||
{
|
||||
RecognitionException e = null;
|
||||
// if next token is what we are looking for then "delete" this token
|
||||
if ( mismatchIsUnwantedToken(ttype) ) {
|
||||
e = new UnwantedTokenException(ttype, input);
|
||||
/*
|
||||
System.err.println("recoverFromMismatchedToken deleting "+
|
||||
((TokenStream)input).LT(1)+
|
||||
" since "+((TokenStream)input).LT(2)+" is what we want");
|
||||
*/
|
||||
beginResync();
|
||||
input.consume(); // simply delete extra token
|
||||
endResync();
|
||||
reportError(e); // report after consuming so AW sees the token in the exception
|
||||
// we want to return the token we're actually matching
|
||||
Object matchedSymbol = getCurrentInputSymbol(input);
|
||||
input.consume(); // move past ttype token as if all were ok
|
||||
return matchedSymbol;
|
||||
}
|
||||
// can't recover with single token deletion, try insertion
|
||||
if ( mismatchIsMissingToken(follow) ) {
|
||||
Object inserted = getMissingSymbol(e, ttype, follow);
|
||||
e = new MissingTokenException(ttype, input, inserted);
|
||||
reportError(e); // report after inserting so AW sees the token in the exception
|
||||
return inserted;
|
||||
}
|
||||
// even that didn't work; must throw the exception
|
||||
e = new MismatchedTokenException(ttype, input);
|
||||
throw e;
|
||||
}
|
||||
|
||||
/** Not currently used */
|
||||
public Object recoverFromMismatchedSet(RecognitionException e,
|
||||
LABitSet follow)
|
||||
throws RecognitionException
|
||||
{
|
||||
if ( mismatchIsMissingToken(follow) ) {
|
||||
// System.out.println("missing token");
|
||||
reportError(e);
|
||||
// we don't know how to conjure up a token for sets yet
|
||||
return getMissingSymbol(e, Token.INVALID_TOKEN_TYPE, follow);
|
||||
}
|
||||
// TODO do single token deletion like above for Token mismatch
|
||||
throw e;
|
||||
}
|
||||
|
||||
/** Match needs to return the current input symbol, which gets put
|
||||
* into the label for the associated token ref; e.g., x=ID. Token
|
||||
* and tree parsers need to return different objects. Rather than test
|
||||
* for input stream type or change the IntStream interface, I use
|
||||
* a simple method to ask the recognizer to tell me what the current
|
||||
* input symbol is.
|
||||
*
|
||||
* This is ignored for lexers.
|
||||
*/
|
||||
protected Object getCurrentInputSymbol(IntStream input) { return null; }
|
||||
|
||||
/** Conjure up a missing token during error recovery.
|
||||
*
|
||||
* The recognizer attempts to recover from single missing
|
||||
* symbols. But, actions might refer to that missing symbol.
|
||||
* For example, x=ID {f($x);}. The action clearly assumes
|
||||
* that there has been an identifier matched previously and that
|
||||
* $x points at that token. If that token is missing, but
|
||||
* the next token in the stream is what we want we assume that
|
||||
* this token is missing and we keep going. Because we
|
||||
* have to return some token to replace the missing token,
|
||||
* we have to conjure one up. This method gives the user control
|
||||
* over the tokens returned for missing tokens. Mostly,
|
||||
* you will want to create something special for identifier
|
||||
* tokens. For literals such as '{' and ',', the default
|
||||
* action in the parser or tree parser works. It simply creates
|
||||
* a CommonToken of the appropriate type. The text will be the token.
|
||||
* If you change what tokens must be created by the lexer,
|
||||
* override this method to create the appropriate tokens.
|
||||
*/
|
||||
protected Object getMissingSymbol(RecognitionException e,
|
||||
int expectedTokenType,
|
||||
LABitSet follow)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
public void consumeUntil(int tokenType) {
|
||||
//System.out.println("consumeUntil "+tokenType);
|
||||
int ttype = input.LA(1);
|
||||
while (ttype != Token.EOF && ttype != tokenType) {
|
||||
input.consume();
|
||||
ttype = input.LA(1);
|
||||
}
|
||||
}
|
||||
|
||||
/** Consume tokens until one matches the given token set */
|
||||
public void consumeUntil(LABitSet set) {
|
||||
//System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
|
||||
int ttype = input.LA(1);
|
||||
while (ttype != Token.EOF && !set.member(ttype) ) {
|
||||
//System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
|
||||
input.consume();
|
||||
ttype = input.LA(1);
|
||||
}
|
||||
}
|
||||
|
||||
/** Push a rule's follow set using our own hardcoded stack */
|
||||
protected void pushFollow(LABitSet fset) {
|
||||
if ( (state._fsp +1)>=state.following.length ) {
|
||||
LABitSet[] f = new LABitSet[state.following.length*2];
|
||||
System.arraycopy(state.following, 0, f, 0, state.following.length);
|
||||
state.following = f;
|
||||
}
|
||||
state.following[++state._fsp] = fset;
|
||||
}
|
||||
|
||||
/** Return List<String> of the rules in your parser instance
|
||||
* leading up to a call to this method. You could override if
|
||||
* you want more details such as the file/line info of where
|
||||
* in the parser java code a rule is invoked.
|
||||
*
|
||||
* This is very useful for error messages and for context-sensitive
|
||||
* error recovery.
|
||||
*/
|
||||
public List getRuleInvocationStack() {
|
||||
String parserClassName = getClass().getName();
|
||||
return getRuleInvocationStack(new Throwable(), parserClassName);
|
||||
}
|
||||
|
||||
/** A more general version of getRuleInvocationStack where you can
|
||||
* pass in, for example, a RecognitionException to get it's rule
|
||||
* stack trace. This routine is shared with all recognizers, hence,
|
||||
* static.
|
||||
*
|
||||
* TODO: move to a utility class or something; weird having lexer call this
|
||||
*/
|
||||
public static List getRuleInvocationStack(Throwable e,
|
||||
String recognizerClassName)
|
||||
{
|
||||
List rules = new ArrayList();
|
||||
StackTraceElement[] stack = e.getStackTrace();
|
||||
int i = 0;
|
||||
for (i=stack.length-1; i>=0; i--) {
|
||||
StackTraceElement t = stack[i];
|
||||
if ( t.getClassName().startsWith("org.antlr.runtime.") ) {
|
||||
continue; // skip support code such as this method
|
||||
}
|
||||
if ( t.getMethodName().equals(NEXT_TOKEN_RULE_NAME) ) {
|
||||
continue;
|
||||
}
|
||||
if ( !t.getClassName().equals(recognizerClassName) ) {
|
||||
continue; // must not be part of this parser
|
||||
}
|
||||
rules.add(t.getMethodName());
|
||||
}
|
||||
return rules;
|
||||
}
|
||||
|
||||
public int getBacktrackingLevel() { return state.backtracking; }
|
||||
|
||||
public void setBacktrackingLevel(int n) { state.backtracking = n; }
|
||||
|
||||
/** Return whether or not a backtracking attempt failed. */
|
||||
public boolean failed() { return state.failed; }
|
||||
|
||||
/** Used to print out token names like ID during debugging and
|
||||
* error reporting. The generated parsers implement a method
|
||||
* that overrides this to point to their String[] tokenNames.
|
||||
*/
|
||||
public String[] getTokenNames() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/** For debugging and other purposes, might want the grammar name.
|
||||
* Have ANTLR generate an implementation for this method.
|
||||
*/
|
||||
public String getGrammarFileName() {
|
||||
return null;
|
||||
}
|
||||
|
||||
public abstract String getSourceName();
|
||||
|
||||
/** A convenience method for use most often with template rewrites.
|
||||
* Convert a List<Token> to List<String>
|
||||
*/
|
||||
public List toStrings(List tokens) {
|
||||
if ( tokens==null ) return null;
|
||||
List strings = new ArrayList(tokens.size());
|
||||
for (int i=0; i<tokens.size(); i++) {
|
||||
strings.add(((Token)tokens.get(i)).getText());
|
||||
}
|
||||
return strings;
|
||||
}
|
||||
|
||||
/** Given a rule number and a start token index number, return
|
||||
* MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
|
||||
* start index. If this rule has parsed input starting from the
|
||||
* start index before, then return where the rule stopped parsing.
|
||||
* It returns the index of the last token matched by the rule.
|
||||
*
|
||||
* For now we use a hashtable and just the slow Object-based one.
|
||||
* Later, we can make a special one for ints and also one that
|
||||
* tosses out data after we commit past input position i.
|
||||
*/
|
||||
public int getRuleMemoization(int ruleIndex, int ruleStartIndex) {
|
||||
if ( state.ruleMemo[ruleIndex]==null ) {
|
||||
state.ruleMemo[ruleIndex] = new HashMap();
|
||||
}
|
||||
Integer stopIndexI =
|
||||
(Integer)state.ruleMemo[ruleIndex].get(new Integer(ruleStartIndex));
|
||||
if ( stopIndexI==null ) {
|
||||
return MEMO_RULE_UNKNOWN;
|
||||
}
|
||||
return stopIndexI.intValue();
|
||||
}
|
||||
|
||||
/** Has this rule already parsed input at the current index in the
|
||||
* input stream? Return the stop token index or MEMO_RULE_UNKNOWN.
|
||||
* If we attempted but failed to parse properly before, return
|
||||
* MEMO_RULE_FAILED.
|
||||
*
|
||||
* This method has a side-effect: if we have seen this input for
|
||||
* this rule and successfully parsed before, then seek ahead to
|
||||
* 1 past the stop token matched for this rule last time.
|
||||
*/
|
||||
public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
|
||||
int stopIndex = getRuleMemoization(ruleIndex, input.index());
|
||||
if ( stopIndex==MEMO_RULE_UNKNOWN ) {
|
||||
return false;
|
||||
}
|
||||
if ( stopIndex==MEMO_RULE_FAILED ) {
|
||||
//System.out.println("rule "+ruleIndex+" will never succeed");
|
||||
state.failed=true;
|
||||
}
|
||||
else {
|
||||
//System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+state.failed);
|
||||
input.seek(stopIndex+1); // jump to one past stop token
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Record whether or not this rule parsed the input at this position
|
||||
* successfully. Use a standard java hashtable for now.
|
||||
*/
|
||||
public void memoize(IntStream input,
|
||||
int ruleIndex,
|
||||
int ruleStartIndex)
|
||||
{
|
||||
int stopTokenIndex = state.failed?MEMO_RULE_FAILED:input.index()-1;
|
||||
if ( state.ruleMemo==null ) {
|
||||
System.err.println("!!!!!!!!! memo array is null for "+ getGrammarFileName());
|
||||
}
|
||||
if ( ruleIndex >= state.ruleMemo.length ) {
|
||||
System.err.println("!!!!!!!!! memo size is "+state.ruleMemo.length+", but rule index is "+ruleIndex);
|
||||
}
|
||||
if ( state.ruleMemo[ruleIndex]!=null ) {
|
||||
state.ruleMemo[ruleIndex].put(
|
||||
new Integer(ruleStartIndex), new Integer(stopTokenIndex)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/** return how many rule/input-index pairs there are in total.
|
||||
* TODO: this includes synpreds. :(
|
||||
*/
|
||||
public int getRuleMemoizationCacheSize() {
|
||||
int n = 0;
|
||||
for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) {
|
||||
Map ruleMap = state.ruleMemo[i];
|
||||
if ( ruleMap!=null ) {
|
||||
n += ruleMap.size(); // how many input indexes are recorded?
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
public void traceIn(String ruleName, int ruleIndex, Object inputSymbol) {
|
||||
System.out.print("enter "+ruleName+" "+inputSymbol);
|
||||
if ( state.backtracking>0 ) {
|
||||
System.out.print(" backtracking="+state.backtracking);
|
||||
}
|
||||
System.out.println();
|
||||
}
|
||||
|
||||
public void traceOut(String ruleName,
|
||||
int ruleIndex,
|
||||
Object inputSymbol)
|
||||
{
|
||||
System.out.print("exit "+ruleName+" "+inputSymbol);
|
||||
if ( state.backtracking>0 ) {
|
||||
System.out.print(" backtracking="+state.backtracking);
|
||||
if ( state.failed ) System.out.print(" failed");
|
||||
else System.out.print(" succeeded");
|
||||
}
|
||||
System.out.println();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
|
||||
/** The recognizer did not match anything for a (..)+ loop. */
|
||||
public class EarlyExitException extends RecognitionException {
|
||||
public int decisionNumber;
|
||||
|
||||
/** Used for remote debugger deserialization */
|
||||
public EarlyExitException() {;}
|
||||
|
||||
public EarlyExitException(int decisionNumber, IntStream input) {
|
||||
super(input);
|
||||
this.decisionNumber = decisionNumber;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
|
||||
/** A semantic predicate failed during validation. Validation of predicates
|
||||
* occurs when normally parsing the alternative just like matching a token.
|
||||
* Disambiguating predicate evaluation occurs when we hoist a predicate into
|
||||
* a prediction decision.
|
||||
*/
|
||||
public class FailedPredicateException extends RecognitionException {
|
||||
public String ruleName;
|
||||
public String predicateText;
|
||||
|
||||
/** Used for remote debugger deserialization */
|
||||
public FailedPredicateException() {;}
|
||||
|
||||
public FailedPredicateException(IntStream input,
|
||||
String ruleName,
|
||||
String predicateText)
|
||||
{
|
||||
super(input);
|
||||
this.ruleName = ruleName;
|
||||
this.predicateText = predicateText;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "FailedPredicateException("+ruleName+",{"+predicateText+"}?)";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.v4.runtime.misc.LABitSet;
|
||||
|
||||
public class MismatchedNotSetException extends MismatchedSetException {
|
||||
/** Used for remote debugger deserialization */
|
||||
public MismatchedNotSetException() {;}
|
||||
|
||||
public MismatchedNotSetException(LABitSet expecting, IntStream input) {
|
||||
super(expecting, input);
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "MismatchedNotSetException("+getUnexpectedType()+"!="+expecting+")";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
|
||||
public class MismatchedRangeException extends RecognitionException {
|
||||
public int a,b;
|
||||
|
||||
/** Used for remote debugger deserialization */
|
||||
public MismatchedRangeException() {;}
|
||||
|
||||
public MismatchedRangeException(int a, int b, IntStream input) {
|
||||
super(input);
|
||||
this.a = a;
|
||||
this.b = b;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "MismatchedNotSetException("+getUnexpectedType()+" not in ["+a+","+b+"])";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.v4.runtime.misc.LABitSet;
|
||||
|
||||
public class MismatchedSetException extends RecognitionException {
|
||||
public LABitSet expecting;
|
||||
|
||||
/** Used for remote debugger deserialization */
|
||||
public MismatchedSetException() {;}
|
||||
|
||||
public MismatchedSetException(LABitSet expecting, IntStream input) {
|
||||
super(input);
|
||||
this.expecting = expecting;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "MismatchedSetException("+getUnexpectedType()+"!="+expecting+")";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.runtime.Token;
|
||||
|
||||
/** A mismatched char or Token or tree node */
|
||||
public class MismatchedTokenException extends RecognitionException {
|
||||
public int expecting = Token.INVALID_TOKEN_TYPE;
|
||||
|
||||
/** Used for remote debugger deserialization */
|
||||
public MismatchedTokenException() {;}
|
||||
|
||||
public MismatchedTokenException(int expecting, IntStream input) {
|
||||
super(input);
|
||||
this.expecting = expecting;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "MismatchedTokenException("+getUnexpectedType()+"!="+expecting+")";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.tree.TreeNodeStream;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class MismatchedTreeNodeException extends RecognitionException {
|
||||
public int expecting;
|
||||
|
||||
public MismatchedTreeNodeException() {
|
||||
}
|
||||
|
||||
public MismatchedTreeNodeException(int expecting, TreeNodeStream input) {
|
||||
super(input);
|
||||
this.expecting = expecting;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "MismatchedTreeNodeException("+getUnexpectedType()+"!="+expecting+")";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
|
||||
/** We were expecting a token but it's not found. The current token
|
||||
* is actually what we wanted next. Used for tree node errors too.
|
||||
*/
|
||||
public class MissingTokenException extends MismatchedTokenException {
|
||||
public Object inserted;
|
||||
/** Used for remote debugger deserialization */
|
||||
public MissingTokenException() {;}
|
||||
|
||||
public MissingTokenException(int expecting, IntStream input, Object inserted) {
|
||||
super(expecting, input);
|
||||
this.inserted = inserted;
|
||||
}
|
||||
|
||||
public int getMissingType() {
|
||||
return expecting;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
if ( inserted!=null && token!=null ) {
|
||||
return "MissingTokenException(inserted "+inserted+" at "+token.getText()+")";
|
||||
}
|
||||
if ( token!=null ) {
|
||||
return "MissingTokenException(at "+token.getText()+")";
|
||||
}
|
||||
return "MissingTokenException";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.CharStream;
|
||||
import org.antlr.runtime.IntStream;
|
||||
|
||||
public class NoViableAltException extends RecognitionException {
|
||||
public String grammarDecisionDescription;
|
||||
public int decisionNumber;
|
||||
public int stateNumber;
|
||||
|
||||
/** Used for remote debugger deserialization */
|
||||
public NoViableAltException() {;}
|
||||
|
||||
public NoViableAltException(String grammarDecisionDescription,
|
||||
int decisionNumber,
|
||||
int stateNumber,
|
||||
IntStream input)
|
||||
{
|
||||
super(input);
|
||||
this.grammarDecisionDescription = grammarDecisionDescription;
|
||||
this.decisionNumber = decisionNumber;
|
||||
this.stateNumber = stateNumber;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
if ( input instanceof CharStream) {
|
||||
return "NoViableAltException('"+(char)getUnexpectedType()+"'@["+grammarDecisionDescription+"])";
|
||||
}
|
||||
else {
|
||||
return "NoViableAltException("+getUnexpectedType()+"@["+grammarDecisionDescription+"])";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.runtime.RecognitionException;
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.misc.LABitSet;
|
||||
|
||||
/** A parser for TokenStreams. "parser grammars" result in a subclass
|
||||
* of this.
|
||||
*/
|
||||
public class Parser extends BaseRecognizer {
|
||||
public TokenStream input;
|
||||
|
||||
public Parser(TokenStream input) {
|
||||
super(); // highlight that we go to super to set state object
|
||||
setTokenStream(input);
|
||||
}
|
||||
|
||||
public Parser(TokenStream input, RecognizerSharedState state) {
|
||||
super(state); // share the state object with another parser
|
||||
this.input = input;
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
super.reset(); // reset all recognizer state variables
|
||||
if ( input!=null ) {
|
||||
input.seek(0); // rewind the input
|
||||
}
|
||||
}
|
||||
|
||||
protected Object getCurrentInputSymbol(IntStream input) {
|
||||
return ((TokenStream)input).LT(1);
|
||||
}
|
||||
|
||||
protected Object getMissingSymbol(IntStream input,
|
||||
RecognitionException e,
|
||||
int expectedTokenType,
|
||||
LABitSet follow)
|
||||
{
|
||||
String tokenText = null;
|
||||
if ( expectedTokenType== Token.EOF ) tokenText = "<missing EOF>";
|
||||
else tokenText = "<missing "+getTokenNames()[expectedTokenType]+">";
|
||||
CommonToken t = new CommonToken(expectedTokenType, tokenText);
|
||||
Token current = ((TokenStream)input).LT(1);
|
||||
if ( current.getType() == Token.EOF ) {
|
||||
current = ((TokenStream)input).LT(-1);
|
||||
}
|
||||
t.line = current.getLine();
|
||||
t.charPositionInLine = current.getCharPositionInLine();
|
||||
t.channel = DEFAULT_TOKEN_CHANNEL;
|
||||
return t;
|
||||
}
|
||||
|
||||
/** Set the token stream and reset the parser */
|
||||
public void setTokenStream(TokenStream input) {
|
||||
this.input = null;
|
||||
reset();
|
||||
this.input = input;
|
||||
}
|
||||
|
||||
public TokenStream getTokenStream() {
|
||||
return input;
|
||||
}
|
||||
|
||||
public String getSourceName() {
|
||||
return input.getSourceName();
|
||||
}
|
||||
|
||||
public void traceIn(String ruleName, int ruleIndex) {
|
||||
super.traceIn(ruleName, ruleIndex, input.LT(1));
|
||||
}
|
||||
|
||||
public void traceOut(String ruleName, int ruleIndex) {
|
||||
super.traceOut(ruleName, ruleIndex, input.LT(1));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.CharStream;
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.runtime.TokenStream;
|
||||
import org.antlr.runtime.tree.CommonTree;
|
||||
import org.antlr.runtime.tree.Tree;
|
||||
import org.antlr.runtime.tree.TreeAdaptor;
|
||||
import org.antlr.runtime.tree.TreeNodeStream;
|
||||
|
||||
/** The root of the ANTLR exception hierarchy.
|
||||
*
|
||||
* To avoid English-only error messages and to generally make things
|
||||
* as flexible as possible, these exceptions are not created with strings,
|
||||
* but rather the information necessary to generate an error. Then
|
||||
* the various reporting methods in Parser and Lexer can be overridden
|
||||
* to generate a localized error message. For example, MismatchedToken
|
||||
* exceptions are built with the expected token type.
|
||||
* So, don't expect getMessage() to return anything.
|
||||
*
|
||||
* Note that as of Java 1.4, you can access the stack trace, which means
|
||||
* that you can compute the complete trace of rules from the start symbol.
|
||||
* This gives you considerable context information with which to generate
|
||||
* useful error messages.
|
||||
*
|
||||
* ANTLR generates code that throws exceptions upon recognition error and
|
||||
* also generates code to catch these exceptions in each rule. If you
|
||||
* want to quit upon first error, you can turn off the automatic error
|
||||
* handling mechanism using rulecatch action, but you still need to
|
||||
* override methods mismatch and recoverFromMismatchSet.
|
||||
*
|
||||
* In general, the recognition exceptions can track where in a grammar a
|
||||
* problem occurred and/or what was the expected input. While the parser
|
||||
* knows its state (such as current input symbol and line info) that
|
||||
* state can change before the exception is reported so current token index
|
||||
* is computed and stored at exception time. From this info, you can
|
||||
* perhaps print an entire line of input not just a single token, for example.
|
||||
* Better to just say the recognizer had a problem and then let the parser
|
||||
* figure out a fancy report.
|
||||
*/
|
||||
public class RecognitionException extends Throwable {
|
||||
/** What input stream did the error occur in? */
|
||||
public transient IntStream input;
|
||||
|
||||
/** What is index of token/char were we looking at when the error occurred? */
|
||||
public int index;
|
||||
|
||||
/** The current Token when an error occurred. Since not all streams
|
||||
* can retrieve the ith Token, we have to track the Token object.
|
||||
* For parsers. Even when it's a tree parser, token might be set.
|
||||
*/
|
||||
public Token token;
|
||||
|
||||
/** If this is a tree parser exception, node is set to the node with
|
||||
* the problem.
|
||||
*/
|
||||
public Object node;
|
||||
|
||||
/** The current char when an error occurred. For lexers. */
|
||||
public int c;
|
||||
|
||||
/** Track the line at which the error occurred in case this is
|
||||
* generated from a lexer. We need to track this since the
|
||||
* unexpected char doesn't carry the line info.
|
||||
*/
|
||||
public int line;
|
||||
|
||||
public int charPositionInLine;
|
||||
|
||||
/** If you are parsing a tree node stream, you will encounter som
|
||||
* imaginary nodes w/o line/col info. We now search backwards looking
|
||||
* for most recent token with line/col info, but notify getErrorHeader()
|
||||
* that info is approximate.
|
||||
*/
|
||||
public boolean approximateLineInfo;
|
||||
|
||||
/** Used for remote debugger deserialization */
|
||||
public RecognitionException() {
|
||||
}
|
||||
|
||||
public RecognitionException(IntStream input) {
|
||||
this.input = input;
|
||||
this.index = input.index();
|
||||
if ( input instanceof TokenStream ) {
|
||||
this.token = ((TokenStream)input).LT(1);
|
||||
this.line = token.getLine();
|
||||
this.charPositionInLine = token.getCharPositionInLine();
|
||||
}
|
||||
if ( input instanceof TreeNodeStream ) {
|
||||
extractInformationFromTreeNodeStream(input);
|
||||
}
|
||||
else if ( input instanceof CharStream) {
|
||||
this.c = input.LA(1);
|
||||
this.line = ((CharStream)input).getLine();
|
||||
this.charPositionInLine = ((CharStream)input).getCharPositionInLine();
|
||||
}
|
||||
else {
|
||||
this.c = input.LA(1);
|
||||
}
|
||||
}
|
||||
|
||||
protected void extractInformationFromTreeNodeStream(IntStream input) {
|
||||
TreeNodeStream nodes = (TreeNodeStream)input;
|
||||
this.node = nodes.LT(1);
|
||||
TreeAdaptor adaptor = nodes.getTreeAdaptor();
|
||||
Token payload = adaptor.getToken(node);
|
||||
if ( payload!=null ) {
|
||||
this.token = payload;
|
||||
if ( payload.getLine()<= 0 ) {
|
||||
// imaginary node; no line/pos info; scan backwards
|
||||
int i = -1;
|
||||
Object priorNode = nodes.LT(i);
|
||||
while ( priorNode!=null ) {
|
||||
Token priorPayload = adaptor.getToken(priorNode);
|
||||
if ( priorPayload!=null && priorPayload.getLine()>0 ) {
|
||||
// we found the most recent real line / pos info
|
||||
this.line = priorPayload.getLine();
|
||||
this.charPositionInLine = priorPayload.getCharPositionInLine();
|
||||
this.approximateLineInfo = true;
|
||||
break;
|
||||
}
|
||||
--i;
|
||||
priorNode = nodes.LT(i);
|
||||
}
|
||||
}
|
||||
else { // node created from real token
|
||||
this.line = payload.getLine();
|
||||
this.charPositionInLine = payload.getCharPositionInLine();
|
||||
}
|
||||
}
|
||||
else if ( this.node instanceof Tree) {
|
||||
this.line = ((Tree)this.node).getLine();
|
||||
this.charPositionInLine = ((Tree)this.node).getCharPositionInLine();
|
||||
if ( this.node instanceof CommonTree) {
|
||||
this.token = ((CommonTree)this.node).token;
|
||||
}
|
||||
}
|
||||
else {
|
||||
int type = adaptor.getType(this.node);
|
||||
String text = adaptor.getText(this.node);
|
||||
this.token = new CommonToken(type, text);
|
||||
}
|
||||
}
|
||||
|
||||
/** Return the token type or char of the unexpected input element */
|
||||
public int getUnexpectedType() {
|
||||
if ( input instanceof TokenStream) {
|
||||
return token.getType();
|
||||
}
|
||||
else if ( input instanceof TreeNodeStream ) {
|
||||
TreeNodeStream nodes = (TreeNodeStream)input;
|
||||
TreeAdaptor adaptor = nodes.getTreeAdaptor();
|
||||
return adaptor.getType(node);
|
||||
}
|
||||
else {
|
||||
return c;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.v4.runtime.misc.LABitSet;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/** The set of fields needed by an abstract recognizer to recognize input
|
||||
* and recover from errors etc... As a separate state object, it can be
|
||||
* shared among multiple grammars; e.g., when one grammar imports another.
|
||||
*
|
||||
* These fields are publically visible but the actual state pointer per
|
||||
* parser is protected.
|
||||
*/
|
||||
public class RecognizerSharedState {
|
||||
/** Track the set of token types that can follow any rule invocation.
|
||||
* Stack grows upwards. When it hits the max, it grows 2x in size
|
||||
* and keeps going.
|
||||
*/
|
||||
public LABitSet[] following = new LABitSet[BaseRecognizer.INITIAL_FOLLOW_STACK_SIZE];
|
||||
public int _fsp = -1;
|
||||
|
||||
/** This is true when we see an error and before having successfully
|
||||
* matched a token. Prevents generation of more than one error message
|
||||
* per error.
|
||||
*/
|
||||
public boolean errorRecovery = false;
|
||||
|
||||
/** The index into the input stream where the last error occurred.
|
||||
* This is used to prevent infinite loops where an error is found
|
||||
* but no token is consumed during recovery...another error is found,
|
||||
* ad naseum. This is a failsafe mechanism to guarantee that at least
|
||||
* one token/tree node is consumed for two errors.
|
||||
*/
|
||||
public int lastErrorIndex = -1;
|
||||
|
||||
/** In lieu of a return value, this indicates that a rule or token
|
||||
* has failed to match. Reset to false upon valid token match.
|
||||
*/
|
||||
public boolean failed = false;
|
||||
|
||||
/** Did the recognizer encounter a syntax error? Track how many. */
|
||||
public int syntaxErrors = 0;
|
||||
|
||||
/** If 0, no backtracking is going on. Safe to exec actions etc...
|
||||
* If >0 then it's the level of backtracking.
|
||||
*/
|
||||
public int backtracking = 0;
|
||||
|
||||
/** An array[size num rules] of Map<Integer,Integer> that tracks
|
||||
* the stop token index for each rule. ruleMemo[ruleIndex] is
|
||||
* the memoization table for ruleIndex. For key ruleStartIndex, you
|
||||
* get back the stop token for associated rule or MEMO_RULE_FAILED.
|
||||
*
|
||||
* This is only used if rule memoization is on (which it is by default).
|
||||
*/
|
||||
public Map[] ruleMemo;
|
||||
|
||||
|
||||
// LEXER FIELDS (must be in same state object to avoid casting
|
||||
// constantly in generated code and Lexer object) :(
|
||||
|
||||
|
||||
/** The goal of all lexer rules/methods is to create a token object.
|
||||
* This is an instance variable as multiple rules may collaborate to
|
||||
* create a single token. nextToken will return this object after
|
||||
* matching lexer rule(s). If you subclass to allow multiple token
|
||||
* emissions, then set this to the last token to be matched or
|
||||
* something nonnull so that the auto token emit mechanism will not
|
||||
* emit another token.
|
||||
*/
|
||||
public Token token;
|
||||
|
||||
/** What character index in the stream did the current token start at?
|
||||
* Needed, for example, to get the text for current token. Set at
|
||||
* the start of nextToken.
|
||||
*/
|
||||
public int tokenStartCharIndex = -1;
|
||||
|
||||
/** The line on which the first character of the token resides */
|
||||
public int tokenStartLine;
|
||||
|
||||
/** The character position of first character within the line */
|
||||
public int tokenStartCharPositionInLine;
|
||||
|
||||
/** The channel number for the current token */
|
||||
public int channel;
|
||||
|
||||
/** The token type for the current token */
|
||||
public int type;
|
||||
|
||||
/** You can set the text for the current token to override what is in
|
||||
* the input char buffer. Use setText() or can set this instance var.
|
||||
*/
|
||||
public String text;
|
||||
|
||||
public RecognizerSharedState() {;}
|
||||
|
||||
public RecognizerSharedState(RecognizerSharedState state) {
|
||||
if ( this.following.length < state.following.length ) {
|
||||
this.following = new LABitSet[state.following.length];
|
||||
}
|
||||
System.arraycopy(state.following, 0, this.following, 0, state.following.length);
|
||||
this._fsp = state._fsp;
|
||||
this.errorRecovery = state.errorRecovery;
|
||||
this.lastErrorIndex = state.lastErrorIndex;
|
||||
this.failed = state.failed;
|
||||
this.syntaxErrors = state.syntaxErrors;
|
||||
this.backtracking = state.backtracking;
|
||||
if ( state.ruleMemo!=null ) {
|
||||
this.ruleMemo = new Map[state.ruleMemo.length];
|
||||
System.arraycopy(state.ruleMemo, 0, this.ruleMemo, 0, state.ruleMemo.length);
|
||||
}
|
||||
this.token = state.token;
|
||||
this.tokenStartCharIndex = state.tokenStartCharIndex;
|
||||
this.tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
|
||||
this.channel = state.channel;
|
||||
this.type = state.type;
|
||||
this.text = state.text;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.runtime.Token;
|
||||
|
||||
/** An extra token while parsing a TokenStream */
|
||||
public class UnwantedTokenException extends MismatchedTokenException {
|
||||
/** Used for remote debugger deserialization */
|
||||
public UnwantedTokenException() {;}
|
||||
|
||||
public UnwantedTokenException(int expecting, IntStream input) {
|
||||
super(expecting, input);
|
||||
}
|
||||
|
||||
public Token getUnexpectedToken() {
|
||||
return token;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
String exp = ", expected "+expecting;
|
||||
if ( expecting==Token.INVALID_TOKEN_TYPE ) {
|
||||
exp = "";
|
||||
}
|
||||
if ( token==null ) {
|
||||
return "UnwantedTokenException(found="+null+exp+")";
|
||||
}
|
||||
return "UnwantedTokenException(found="+token.getText()+exp+")";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,200 @@
|
|||
package org.antlr.v4.runtime.misc;
|
||||
|
||||
import org.antlr.runtime.Token;
|
||||
|
||||
/** */
|
||||
public class LABitSet {
|
||||
public final static int BITS = 64; // number of bits / long
|
||||
public final static int LOG_BITS = 6; // 2^6 == 64
|
||||
|
||||
/* We will often need to do a mod operator (i mod nbits). Its
|
||||
* turns out that, for powers of two, this mod operation is
|
||||
* same as (i & (nbits-1)). Since mod is slow, we use a
|
||||
* precomputed mod mask to do the mod instead.
|
||||
*/
|
||||
public final static int MOD_MASK = BITS - 1;
|
||||
|
||||
/** The actual data bits */
|
||||
public long bits[];
|
||||
|
||||
public boolean EOF; // is EOF in set (-1)?
|
||||
|
||||
/** Construct a bitset of size one word (64 bits) */
|
||||
public LABitSet() {
|
||||
this(BITS);
|
||||
}
|
||||
|
||||
/** Construct a bitset given the size
|
||||
* @param nbits The size of the bitset in bits
|
||||
*/
|
||||
public LABitSet(int nbits) {
|
||||
bits = new long[((nbits - 1) >> LOG_BITS) + 1];
|
||||
}
|
||||
|
||||
/** Construction from a static array of longs */
|
||||
public LABitSet(long[] bits_) {
|
||||
bits = bits_;
|
||||
}
|
||||
|
||||
/** Construction from a static array of longs */
|
||||
public LABitSet(long[] bits_, boolean EOF) {
|
||||
bits = bits_;
|
||||
this.EOF = EOF;
|
||||
}
|
||||
|
||||
/** or this element into this set (grow as necessary to accommodate) */
|
||||
public void add(int el) {
|
||||
//System.out.println("add("+el+")");
|
||||
if ( el==Token.EOF ) { EOF = true; return; }
|
||||
int n = wordNumber(el);
|
||||
//System.out.println("word number is "+n);
|
||||
//System.out.println("bits.length "+bits.length);
|
||||
if (n >= bits.length) {
|
||||
growToInclude(el);
|
||||
}
|
||||
bits[n] |= bitMask(el);
|
||||
}
|
||||
|
||||
public boolean member(int el) {
|
||||
if ( el == Token.EOF && EOF ) return true;
|
||||
int n = wordNumber(el);
|
||||
if (n >= bits.length) return false;
|
||||
return (bits[n] & bitMask(el)) != 0;
|
||||
}
|
||||
|
||||
/** return this | a in a new set */
|
||||
public LABitSet or(LABitSet a) {
|
||||
if ( a==null ) {
|
||||
return this;
|
||||
}
|
||||
LABitSet s = (LABitSet)this.clone();
|
||||
s.orInPlace((LABitSet)a);
|
||||
return s;
|
||||
}
|
||||
|
||||
public void orInPlace(LABitSet a) {
|
||||
if ( a==null ) {
|
||||
return;
|
||||
}
|
||||
// If this is smaller than a, grow this first
|
||||
if (a.bits.length > bits.length) {
|
||||
setSize(a.bits.length);
|
||||
}
|
||||
int min = Math.min(bits.length, a.bits.length);
|
||||
for (int i = min - 1; i >= 0; i--) {
|
||||
bits[i] |= a.bits[i];
|
||||
}
|
||||
EOF = EOF | a.EOF;
|
||||
}
|
||||
|
||||
// remove this element from this set
|
||||
public void remove(int el) {
|
||||
if ( el==Token.EOF ) { EOF = false; return; }
|
||||
int n = wordNumber(el);
|
||||
if (n >= bits.length) {
|
||||
throw new IllegalArgumentException(el+" is outside set range of "+bits.length+" words");
|
||||
}
|
||||
bits[n] &= ~bitMask(el);
|
||||
}
|
||||
|
||||
public Object clone() {
|
||||
LABitSet s;
|
||||
try {
|
||||
s = (LABitSet)super.clone();
|
||||
s.bits = new long[bits.length];
|
||||
System.arraycopy(bits, 0, s.bits, 0, bits.length);
|
||||
s.EOF = EOF;
|
||||
}
|
||||
catch (CloneNotSupportedException e) {
|
||||
throw new InternalError();
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the size of a set.
|
||||
* @param nwords how many words the new set should be
|
||||
*/
|
||||
void setSize(int nwords) {
|
||||
long newbits[] = new long[nwords];
|
||||
int n = Math.min(nwords, bits.length);
|
||||
System.arraycopy(bits, 0, newbits, 0, n);
|
||||
bits = newbits;
|
||||
}
|
||||
|
||||
/** Transform a bit set into a string by formatting each element as an integer
|
||||
* separator The string to put in between elements
|
||||
* @return A commma-separated list of values
|
||||
*/
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
String separator = ",";
|
||||
boolean havePrintedAnElement = false;
|
||||
buf.append('{');
|
||||
|
||||
for (int i = 0; i < (bits.length << LOG_BITS); i++) {
|
||||
if (member(i)) {
|
||||
if (i > 0 && havePrintedAnElement ) {
|
||||
buf.append(separator);
|
||||
}
|
||||
else {
|
||||
buf.append(i);
|
||||
}
|
||||
havePrintedAnElement = true;
|
||||
}
|
||||
}
|
||||
buf.append('}');
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
// /**Create a string representation where instead of integer elements, the
|
||||
// * ith element of vocabulary is displayed instead. Vocabulary is a Vector
|
||||
// * of Strings.
|
||||
// * separator The string to put in between elements
|
||||
// * @return A commma-separated list of character constants.
|
||||
// */
|
||||
// public String toString(String separator, List vocabulary) {
|
||||
// String str = "";
|
||||
// for (int i = 0; i < (bits.length << LOG_BITS); i++) {
|
||||
// if (member(i)) {
|
||||
// if (str.length() > 0) {
|
||||
// str += separator;
|
||||
// }
|
||||
// if (i >= vocabulary.size()) {
|
||||
// str += "'" + (char)i + "'";
|
||||
// }
|
||||
// else if (vocabulary.get(i) == null) {
|
||||
// str += "'" + (char)i + "'";
|
||||
// }
|
||||
// else {
|
||||
// str += (String)vocabulary.get(i);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return str;
|
||||
// }
|
||||
|
||||
/**
|
||||
* Grows the set to a larger number of bits.
|
||||
* @param bit element that must fit in set
|
||||
*/
|
||||
public void growToInclude(int bit) {
|
||||
int newSize = Math.max(bits.length << 1, numWordsToHold(bit));
|
||||
long newbits[] = new long[newSize];
|
||||
System.arraycopy(bits, 0, newbits, 0, bits.length);
|
||||
bits = newbits;
|
||||
}
|
||||
|
||||
static long bitMask(int bitNumber) {
|
||||
int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS
|
||||
return 1L << bitPosition;
|
||||
}
|
||||
|
||||
static int numWordsToHold(int el) {
|
||||
return (el >> LOG_BITS) + 1;
|
||||
}
|
||||
|
||||
static int wordNumber(int bit) {
|
||||
return bit >> LOG_BITS; // bit / BITS
|
||||
}
|
||||
}
|
|
@ -11,22 +11,47 @@ javaTypeInitMap ::= [
|
|||
]
|
||||
|
||||
// args must be <object-model-object>, <fields-resulting-in-STs>
|
||||
ParserFile(f, parser, dfaDecls, bitSetDecls) ::= <<
|
||||
// $ANTLR ANTLRVersion> <f.fileName> generatedTimestamp>
|
||||
ParserFile(file, parser, dfaDecls, bitSetDecls) ::= <<
|
||||
// $ANTLR ANTLRVersion> <file.fileName> generatedTimestamp>
|
||||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.RecognizerSharedState;
|
||||
import org.antlr.v4.runtime.RecognitionException;
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.misc.*;
|
||||
import org.antlr.runtime.*;
|
||||
|
||||
<parser>
|
||||
>>
|
||||
|
||||
Parser(p, funcs) ::= <<
|
||||
public class <p.name> {
|
||||
public QStack _ctx;
|
||||
public class <p.name> extends Parser {
|
||||
<!
|
||||
public enum TokenType {
|
||||
EOF(-1),
|
||||
<p.tokens.keys:{k | <k>(<p.tokens.(k)>)}; separator=", ">
|
||||
;
|
||||
public int type;
|
||||
TokenType(int type) { this.type = type; }
|
||||
}
|
||||
!>
|
||||
<p.tokens.keys:{k | public static final int <k>=<p.tokens.(k)>;}; separator="\n">
|
||||
public QStack\<ParserRuleReturnScope> _ctx = new QStack\<ParserRuleReturnScope>();
|
||||
<p:ctor()>
|
||||
<funcs; separator="\n">
|
||||
<dfaDecls; separator="\n">
|
||||
<bitSetDecls; separator="\n">
|
||||
}
|
||||
>>
|
||||
|
||||
ctor(p) ::= <<
|
||||
public <p.name>(TokenStream input) {
|
||||
this(input, new RecognizerSharedState());
|
||||
}
|
||||
public <p.name>(TokenStream input, RecognizerSharedState state) {
|
||||
super(input, state);
|
||||
}
|
||||
>>
|
||||
|
||||
/*
|
||||
// S.g:5:1: b returns [String q, float x] : A ;
|
||||
public final S.b_return b() throws RecognitionException {
|
||||
|
@ -37,7 +62,7 @@ public class <p.name> {
|
|||
RuleFunction(f,code,decls,context) ::= <<
|
||||
<context>
|
||||
|
||||
<if(f.modifiers)><f.modifiers:{f | <f> }><else>public final <endif><if(f.retType)><f.retType><else>void<endif> <f.name>(<f.args:{a|<a>, }>BitSet _follow) {
|
||||
<if(f.modifiers)><f.modifiers:{f | <f> }><else>public final <endif><if(f.retType)><f.retType><else>void<endif> <f.name>(<f.args:{a|<a>, }>LABitSet _follow) throws RecognitionException {
|
||||
<! <if(f.scope)><f.scope.name>.push(new <f.scope.name>);<endif> !>
|
||||
<if(f.retType)>
|
||||
_ctx.push(new <f.context.name>(<f.args:{a|<a.name>}; separator=",">));
|
||||
|
@ -47,7 +72,7 @@ RuleFunction(f,code,decls,context) ::= <<
|
|||
<code>
|
||||
}
|
||||
finally {
|
||||
<if(f.retType)>return _ctx.pop();<endif>
|
||||
<if(f.retType)>return (<f.retType>)_ctx.pop();<endif>
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
@ -144,7 +169,7 @@ InvokeRule(r) ::= <<
|
|||
>>
|
||||
|
||||
MatchToken(m) ::= <<
|
||||
<if(m.label)><m.label> = <endif>match(<m.name>, <m.follow.name>);
|
||||
<if(m.label)><m.label> = (Token)<endif>match(<m.name>, <m.follow.name>);
|
||||
>>
|
||||
|
||||
Action(a) ::= "<a.ast.text>"
|
||||
|
@ -156,13 +181,14 @@ AddToList(a) ::= "<a.listName>.add(<a.opWithResultToAdd.label>);"
|
|||
|
||||
//Decl(d) ::= "<d.type> <d.varName> = <d.type:initValue()>;"
|
||||
|
||||
TokenDecl(t) ::= "Token <t.varName>;"
|
||||
TokenListDecl(t) ::= "List\<Token> <t.varName> = new ArrayList\<Token>();"
|
||||
TokenDecl(t) ::= "Token <t.name>;"
|
||||
TokenListDecl(t) ::= "List\<Token> <t.name> = new ArrayList\<Token>();"
|
||||
RuleContextDecl(r) ::= "<r.ctxName> <r.name>;"
|
||||
|
||||
CaptureNextToken(d) ::= "<d.varName> = input.LA(1);"
|
||||
|
||||
StructDecl(s,attrs) ::= <<
|
||||
public static class <s.name> extends ParserContextScope {
|
||||
public static class <s.name> extends ParserRuleReturnScope {
|
||||
<attrs:{a | <a>;}; separator="\n">
|
||||
<if(s.ctorAttrs)>
|
||||
public <s.name>(<s.ctorAttrs; separator=", ">) {
|
||||
|
@ -186,7 +212,7 @@ DFADecl(dfa) ::= <<
|
|||
>>
|
||||
|
||||
BitSetDecl(b) ::= <<
|
||||
// define <b.name>
|
||||
public static final LABitSet <b.name>=new LABitSet(new long[]{<b.fset.bits:{<it>L};separator=",">}<if(b.fset.EOF)>, true<endif>);
|
||||
>>
|
||||
|
||||
/** Using a type to init value map, try to init a type; if not in table
|
||||
|
|
|
@ -2,7 +2,6 @@ package org.antlr.v4.codegen;
|
|||
|
||||
import org.antlr.v4.automata.DFA;
|
||||
import org.antlr.v4.codegen.src.*;
|
||||
import org.antlr.v4.misc.IntSet;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.tool.BlockAST;
|
||||
|
@ -99,7 +98,7 @@ public abstract class OutputModelFactory {
|
|||
public String getRuleFunctionContextStructName(String ruleName) { return ruleName+"_ctx"; }
|
||||
public String getDynamicScopeStructName(String ruleName) { return ruleName+"_scope"; }
|
||||
|
||||
public BitSetDecl createFollowBitSet(GrammarAST ast, IntSet set) {
|
||||
public BitSetDecl createFollowBitSet(GrammarAST ast, IntervalSet set) {
|
||||
String inRuleName = ast.nfaState.rule.name;
|
||||
String elementName = ast.getText(); // assume rule ref
|
||||
if ( ast.getType() == ANTLRParser.TOKEN_REF ) {
|
||||
|
@ -110,7 +109,7 @@ public abstract class OutputModelFactory {
|
|||
return b;
|
||||
}
|
||||
|
||||
public BitSetDecl createTestBitSet(GrammarAST ast, IntSet set) {
|
||||
public BitSetDecl createTestBitSet(GrammarAST ast, IntervalSet set) {
|
||||
String inRuleName = ast.nfaState.rule.name;
|
||||
String name = "LOOK_in_"+inRuleName+"_"+ast.token.getTokenIndex();
|
||||
BitSetDecl b = new BitSetDecl(this, name, set);
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
package org.antlr.v4.codegen;
|
||||
|
||||
import org.antlr.v4.codegen.src.Decl;
|
||||
|
||||
/** */
|
||||
public class RuleContextDecl extends Decl {
|
||||
public String ctxName;
|
||||
public RuleContextDecl(OutputModelFactory factory, String name, String ctxName) {
|
||||
super(factory, name);
|
||||
this.ctxName = ctxName;
|
||||
}
|
||||
}
|
|
@ -1,13 +1,13 @@
|
|||
package org.antlr.v4.codegen.src;
|
||||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.misc.IntSet;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
|
||||
/** */
|
||||
public class BitSetDecl extends Decl {
|
||||
public IntSet fset;
|
||||
public BitSetDecl(OutputModelFactory factory, String name, IntSet fset) {
|
||||
public Object fset; // runtime bitset
|
||||
public BitSetDecl(OutputModelFactory factory, String name, IntervalSet fset) {
|
||||
super(factory, name);
|
||||
this.fset = fset;
|
||||
this.fset = fset.toRuntimeBitSet();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import org.antlr.v4.codegen.OutputModelFactory;
|
|||
/** */
|
||||
public class Decl extends SrcOp {
|
||||
public String name;
|
||||
public String decl;
|
||||
public String decl; // whole thing if copied from action
|
||||
|
||||
public Decl(OutputModelFactory factory, String name, String decl) {
|
||||
this(factory, name);
|
||||
|
|
|
@ -2,6 +2,7 @@ package org.antlr.v4.codegen.src;
|
|||
|
||||
import org.antlr.v4.analysis.LinearApproximator;
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.codegen.RuleContextDecl;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
@ -19,10 +20,15 @@ public class InvokeRule extends SrcOp implements LabeledOp {
|
|||
this.factory = factory;
|
||||
this.ast = ast;
|
||||
this.name = ast.getText();
|
||||
Rule r = factory.g.getRule(name);
|
||||
if ( r.retvals!=null || r.args!=null ) {
|
||||
ctxName = factory.getRuleFunctionContextStructName(r.name);
|
||||
}
|
||||
|
||||
if ( labelAST!=null ) {
|
||||
label = labelAST.getText();
|
||||
// TokenDecl d = new TokenDecl(label);
|
||||
// factory.currentRule.peek().addDecl(d);
|
||||
RuleContextDecl d = new RuleContextDecl(factory,label,ctxName);
|
||||
factory.currentRule.peek().addDecl(d);
|
||||
if ( labelAST.parent.getType() == ANTLRParser.PLUS_ASSIGN ) {
|
||||
// TokenListDecl l = new TokenListDecl(factory.getListLabel(label));
|
||||
// factory.currentRule.peek().addDecl(l);
|
||||
|
@ -32,11 +38,6 @@ public class InvokeRule extends SrcOp implements LabeledOp {
|
|||
argExprs = ast.getChild(0).getText();
|
||||
}
|
||||
|
||||
Rule r = factory.g.getRule(name);
|
||||
if ( r.retvals!=null || r.args!=null ) {
|
||||
ctxName = factory.getRuleFunctionContextStructName(r.name);
|
||||
}
|
||||
|
||||
// compute follow
|
||||
LinearApproximator approx = new LinearApproximator(factory.g, -1);
|
||||
IntervalSet fset = approx.LOOK(ast.nfaState.transition(0).target);
|
||||
|
|
|
@ -28,7 +28,7 @@ public class MatchToken extends SrcOp implements LabeledOp {
|
|||
|
||||
LinearApproximator approx = new LinearApproximator(factory.g, -1);
|
||||
IntervalSet fset = approx.LOOK(ast.nfaState.transition(0).target);
|
||||
System.out.println("follow="+follow);
|
||||
System.out.println("follow="+fset);
|
||||
follow = factory.createFollowBitSet(ast, fset);
|
||||
factory.defineBitSet(follow);
|
||||
}
|
||||
|
|
|
@ -4,11 +4,14 @@ import org.antlr.v4.codegen.OutputModelFactory;
|
|||
import org.antlr.v4.tool.Rule;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/** */
|
||||
public class Parser extends OutputModelObject {
|
||||
public String name;
|
||||
public Map<String,Integer> tokens;
|
||||
public List<RuleFunction> funcs = new ArrayList<RuleFunction>();
|
||||
ParserFile file;
|
||||
|
||||
|
@ -16,6 +19,11 @@ public class Parser extends OutputModelObject {
|
|||
this.factory = factory;
|
||||
this.file = file; // who contains us?
|
||||
name = factory.g.getRecognizerName();
|
||||
tokens = new LinkedHashMap<String,Integer>();
|
||||
for (String t : factory.g.tokenNameToTypeMap.keySet()) {
|
||||
Integer ttype = factory.g.tokenNameToTypeMap.get(t);
|
||||
if ( ttype>0 ) tokens.put(t, ttype);
|
||||
}
|
||||
for (Rule r : factory.g.rules.values()) funcs.add( new RuleFunction(factory, r) );
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
package org.antlr.v4.misc;
|
||||
|
||||
import org.antlr.v4.automata.Label;
|
||||
import org.antlr.v4.runtime.misc.LABitSet;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -684,9 +685,9 @@ public class IntervalSet implements IntSet {
|
|||
return values;
|
||||
}
|
||||
|
||||
public org.antlr.runtime.BitSet toRuntimeBitSet() {
|
||||
org.antlr.runtime.BitSet s =
|
||||
new org.antlr.runtime.BitSet(getMaxElement()+1);
|
||||
public LABitSet toRuntimeBitSet() {
|
||||
LABitSet s =
|
||||
new LABitSet(getMaxElement()+1);
|
||||
int n = intervals.size();
|
||||
for (int i = 0; i < n; i++) {
|
||||
Interval I = (Interval) intervals.get(i);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR ${project.version} ${buildNumber} ANTLRLexer.g 2010-04-19 15:55:53
|
||||
// $ANTLR ${project.version} ${buildNumber} ANTLRLexer.g 2010-05-16 13:12:30
|
||||
|
||||
/*
|
||||
[The "BSD licence"]
|
||||
|
@ -264,7 +264,7 @@ public class ANTLRLexer extends Lexer {
|
|||
if ( (( input.LA(2) != '/')) ) {
|
||||
alt3=1;
|
||||
}
|
||||
else if ( ((( true )||(( true )&&( !(input.LA(1) == '*' && input.LA(2) == '/') )))) ) {
|
||||
else if ( (((( true )&&( !(input.LA(1) == '*' && input.LA(2) == '/') ))||( true ))) ) {
|
||||
alt3=2;
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR ${project.version} ${buildNumber} ANTLRParser.g 2010-04-19 15:55:56
|
||||
// $ANTLR ${project.version} ${buildNumber} ANTLRParser.g 2010-05-16 13:12:33
|
||||
|
||||
/*
|
||||
[The "BSD licence"]
|
||||
|
@ -339,7 +339,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, grammarType, mode, rules, DOC_COMMENT, prequelConstruct
|
||||
// elements: prequelConstruct, mode, rules, DOC_COMMENT, id, grammarType
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -849,7 +849,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: option, OPTIONS
|
||||
// elements: OPTIONS, option
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1465,7 +1465,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: tokenSpec, TOKENS
|
||||
// elements: TOKENS, tokenSpec
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1620,7 +1620,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, STRING_LITERAL, ASSIGN
|
||||
// elements: STRING_LITERAL, id, ASSIGN
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1760,7 +1760,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, ACTION, SCOPE
|
||||
// elements: ACTION, SCOPE, id
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1914,7 +1914,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: AT, id, actionScopeName, ACTION
|
||||
// elements: actionScopeName, ACTION, id, AT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2606,7 +2606,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ruleBlock, rulePrequels, ruleModifiers, ruleReturns, exceptionGroup, ARG_ACTION, DOC_COMMENT, id
|
||||
// elements: rulePrequels, ARG_ACTION, DOC_COMMENT, ruleReturns, ruleBlock, ruleModifiers, id, exceptionGroup
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2835,7 +2835,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ACTION, ARG_ACTION, CATCH
|
||||
// elements: CATCH, ACTION, ARG_ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2918,7 +2918,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: FINALLY, ACTION
|
||||
// elements: ACTION, FINALLY
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -3352,7 +3352,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: qid, THROWS
|
||||
// elements: THROWS, qid
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -3561,7 +3561,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, SCOPE
|
||||
// elements: SCOPE, id
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -4178,7 +4178,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: elements, rewrite
|
||||
// elements: rewrite, elements
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -4664,7 +4664,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ebnfSuffix, atom
|
||||
// elements: atom, ebnfSuffix
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -4889,7 +4889,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: treeSpec, ebnfSuffix
|
||||
// elements: ebnfSuffix, treeSpec
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -5372,7 +5372,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: blockSuffixe, block
|
||||
// elements: block, blockSuffixe
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6177,7 +6177,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: NOT, terminal
|
||||
// elements: terminal, NOT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6220,7 +6220,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: NOT, block
|
||||
// elements: block, NOT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6396,7 +6396,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ra, altList, optionsSpec
|
||||
// elements: altList, ra, optionsSpec
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6571,7 +6571,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: RULE_REF, ARG_ACTION, op
|
||||
// elements: ARG_ACTION, RULE_REF, op
|
||||
// token labels: op
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6618,7 +6618,7 @@ public class ANTLRParser extends Parser {
|
|||
{
|
||||
|
||||
// AST REWRITE
|
||||
// elements: RULE_REF, ARG_ACTION
|
||||
// elements: ARG_ACTION, RULE_REF
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6838,7 +6838,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ARG_ACTION, elementOptions, TOKEN_REF
|
||||
// elements: TOKEN_REF, elementOptions, ARG_ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6909,7 +6909,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: elementOptions, STRING_LITERAL
|
||||
// elements: STRING_LITERAL, elementOptions
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -7362,7 +7362,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: predicatedRewrite, nakedRewrite
|
||||
// elements: nakedRewrite, predicatedRewrite
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -7450,7 +7450,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: rewriteAlt, rewriteAlt, SEMPRED, SEMPRED
|
||||
// elements: rewriteAlt, SEMPRED, rewriteAlt, SEMPRED
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -7992,7 +7992,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: rewriteTree, ebnfSuffix
|
||||
// elements: ebnfSuffix, rewriteTree
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8231,7 +8231,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: TOKEN_REF, ARG_ACTION, elementOptions
|
||||
// elements: elementOptions, ARG_ACTION, TOKEN_REF
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8472,7 +8472,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: rewriteTreeAlt, ebnfSuffix
|
||||
// elements: ebnfSuffix, rewriteTreeAlt
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8609,7 +8609,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: rewriteTreeElement, TREE_BEGIN, rewriteTreeAtom
|
||||
// elements: TREE_BEGIN, rewriteTreeAtom, rewriteTreeElement
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8913,7 +8913,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, rewriteTemplateArgs
|
||||
// elements: rewriteTemplateArgs, id
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -9025,7 +9025,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ACTION, rewriteTemplateArgs
|
||||
// elements: rewriteTemplateArgs, ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -9271,7 +9271,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, ACTION
|
||||
// elements: ACTION, id
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR ${project.version} ${buildNumber} ASTVerifier.g 2010-04-19 15:55:56
|
||||
// $ANTLR ${project.version} ${buildNumber} ASTVerifier.g 2010-05-16 13:12:34
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
lexer grammar ActionSplitter;
|
||||
|
||||
options { filter=true; superClass='org.antlr.v4.runtime.Lexer'; }
|
||||
options { filter=true; }
|
||||
|
||||
@header {
|
||||
package org.antlr.v4.parse;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR ${project.version} ${buildNumber} ActionSplitter.g 2010-04-19 15:55:56
|
||||
// $ANTLR ${project.version} ${buildNumber} ActionSplitter.g 2010-05-16 13:12:33
|
||||
|
||||
package org.antlr.v4.parse;
|
||||
|
||||
|
@ -7,7 +7,7 @@ import org.antlr.v4.runtime.CommonToken;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
||||
public class ActionSplitter extends Lexer {
|
||||
public static final int INDIRECT_TEMPLATE_INSTANCE=24;
|
||||
public static final int LINE_COMMENT=5;
|
||||
public static final int ESC=6;
|
||||
|
@ -336,21 +336,21 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:51:4: '$' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart133 = getCharIndex();
|
||||
int xStartLine133 = getLine();
|
||||
int xStartCharPos133 = getCharPositionInLine();
|
||||
int xStart128 = getCharIndex();
|
||||
int xStartLine128 = getLine();
|
||||
int xStartCharPos128 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart133, getCharIndex()-1);
|
||||
x.setLine(xStartLine133);
|
||||
x.setCharPositionInLine(xStartCharPos133);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart128, getCharIndex()-1);
|
||||
x.setLine(xStartLine128);
|
||||
x.setCharPositionInLine(xStartCharPos128);
|
||||
match('.'); if (state.failed) return ;
|
||||
int yStart139 = getCharIndex();
|
||||
int yStartLine139 = getLine();
|
||||
int yStartCharPos139 = getCharPositionInLine();
|
||||
int yStart134 = getCharIndex();
|
||||
int yStartLine134 = getLine();
|
||||
int yStartCharPos134 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart139, getCharIndex()-1);
|
||||
y.setLine(yStartLine139);
|
||||
y.setCharPositionInLine(yStartCharPos139);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart134, getCharIndex()-1);
|
||||
y.setLine(yStartLine134);
|
||||
y.setCharPositionInLine(yStartCharPos134);
|
||||
// ActionSplitter.g:51:22: ( WS )?
|
||||
int alt5=2;
|
||||
int LA5_0 = input.LA(1);
|
||||
|
@ -370,13 +370,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart148 = getCharIndex();
|
||||
int exprStartLine148 = getLine();
|
||||
int exprStartCharPos148 = getCharPositionInLine();
|
||||
int exprStart143 = getCharIndex();
|
||||
int exprStartLine143 = getLine();
|
||||
int exprStartCharPos143 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart148, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine148);
|
||||
expr.setCharPositionInLine(exprStartCharPos148);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart143, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine143);
|
||||
expr.setCharPositionInLine(exprStartCharPos143);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setQualifiedAttr(getText(), x, y, expr);
|
||||
|
@ -404,21 +404,21 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:56:4: '$' x= ID '.' y= ID {...}?
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart169 = getCharIndex();
|
||||
int xStartLine169 = getLine();
|
||||
int xStartCharPos169 = getCharPositionInLine();
|
||||
int xStart164 = getCharIndex();
|
||||
int xStartLine164 = getLine();
|
||||
int xStartCharPos164 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart169, getCharIndex()-1);
|
||||
x.setLine(xStartLine169);
|
||||
x.setCharPositionInLine(xStartCharPos169);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart164, getCharIndex()-1);
|
||||
x.setLine(xStartLine164);
|
||||
x.setCharPositionInLine(xStartCharPos164);
|
||||
match('.'); if (state.failed) return ;
|
||||
int yStart175 = getCharIndex();
|
||||
int yStartLine175 = getLine();
|
||||
int yStartCharPos175 = getCharPositionInLine();
|
||||
int yStart170 = getCharIndex();
|
||||
int yStartLine170 = getLine();
|
||||
int yStartCharPos170 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart175, getCharIndex()-1);
|
||||
y.setLine(yStartLine175);
|
||||
y.setCharPositionInLine(yStartCharPos175);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart170, getCharIndex()-1);
|
||||
y.setLine(yStartLine170);
|
||||
y.setCharPositionInLine(yStartCharPos170);
|
||||
if ( !((input.LA(1)!='(')) ) {
|
||||
if (state.backtracking>0) {state.failed=true; return ;}
|
||||
throw new FailedPredicateException(input, "QUALIFIED_ATTR", "input.LA(1)!='('");
|
||||
|
@ -450,22 +450,22 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:60:4: '$' x= ID '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart194 = getCharIndex();
|
||||
int xStartLine194 = getLine();
|
||||
int xStartCharPos194 = getCharPositionInLine();
|
||||
int xStart189 = getCharIndex();
|
||||
int xStartLine189 = getLine();
|
||||
int xStartCharPos189 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart194, getCharIndex()-1);
|
||||
x.setLine(xStartLine194);
|
||||
x.setCharPositionInLine(xStartCharPos194);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart189, getCharIndex()-1);
|
||||
x.setLine(xStartLine189);
|
||||
x.setCharPositionInLine(xStartCharPos189);
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart200 = getCharIndex();
|
||||
int yStartLine200 = getLine();
|
||||
int yStartCharPos200 = getCharPositionInLine();
|
||||
int yStart195 = getCharIndex();
|
||||
int yStartLine195 = getLine();
|
||||
int yStartCharPos195 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart200, getCharIndex()-1);
|
||||
y.setLine(yStartLine200);
|
||||
y.setCharPositionInLine(yStartCharPos200);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart195, getCharIndex()-1);
|
||||
y.setLine(yStartLine195);
|
||||
y.setCharPositionInLine(yStartCharPos195);
|
||||
// ActionSplitter.g:60:23: ( WS )?
|
||||
int alt6=2;
|
||||
int LA6_0 = input.LA(1);
|
||||
|
@ -485,13 +485,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart209 = getCharIndex();
|
||||
int exprStartLine209 = getLine();
|
||||
int exprStartCharPos209 = getCharPositionInLine();
|
||||
int exprStart204 = getCharIndex();
|
||||
int exprStartLine204 = getLine();
|
||||
int exprStartCharPos204 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart209, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine209);
|
||||
expr.setCharPositionInLine(exprStartCharPos209);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart204, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine204);
|
||||
expr.setCharPositionInLine(exprStartCharPos204);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setDynamicScopeAttr(getText(), x, y, expr);
|
||||
|
@ -519,22 +519,22 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:65:4: '$' x= ID '::' y= ID
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart230 = getCharIndex();
|
||||
int xStartLine230 = getLine();
|
||||
int xStartCharPos230 = getCharPositionInLine();
|
||||
int xStart225 = getCharIndex();
|
||||
int xStartLine225 = getLine();
|
||||
int xStartCharPos225 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart230, getCharIndex()-1);
|
||||
x.setLine(xStartLine230);
|
||||
x.setCharPositionInLine(xStartCharPos230);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart225, getCharIndex()-1);
|
||||
x.setLine(xStartLine225);
|
||||
x.setCharPositionInLine(xStartCharPos225);
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart236 = getCharIndex();
|
||||
int yStartLine236 = getLine();
|
||||
int yStartCharPos236 = getCharPositionInLine();
|
||||
int yStart231 = getCharIndex();
|
||||
int yStartLine231 = getLine();
|
||||
int yStartCharPos231 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart236, getCharIndex()-1);
|
||||
y.setLine(yStartLine236);
|
||||
y.setCharPositionInLine(yStartCharPos236);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart231, getCharIndex()-1);
|
||||
y.setLine(yStartLine231);
|
||||
y.setCharPositionInLine(yStartCharPos231);
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.dynamicScopeAttr(getText(), x, y);
|
||||
}
|
||||
|
@ -563,32 +563,32 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:77:4: '$' x= ID '[' '-' index= SCOPE_INDEX_EXPR ']' '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart255 = getCharIndex();
|
||||
int xStartLine255 = getLine();
|
||||
int xStartCharPos255 = getCharPositionInLine();
|
||||
int xStart250 = getCharIndex();
|
||||
int xStartLine250 = getLine();
|
||||
int xStartCharPos250 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart255, getCharIndex()-1);
|
||||
x.setLine(xStartLine255);
|
||||
x.setCharPositionInLine(xStartCharPos255);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart250, getCharIndex()-1);
|
||||
x.setLine(xStartLine250);
|
||||
x.setCharPositionInLine(xStartCharPos250);
|
||||
match('['); if (state.failed) return ;
|
||||
match('-'); if (state.failed) return ;
|
||||
int indexStart263 = getCharIndex();
|
||||
int indexStartLine263 = getLine();
|
||||
int indexStartCharPos263 = getCharPositionInLine();
|
||||
int indexStart258 = getCharIndex();
|
||||
int indexStartLine258 = getLine();
|
||||
int indexStartCharPos258 = getCharPositionInLine();
|
||||
mSCOPE_INDEX_EXPR(); if (state.failed) return ;
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart263, getCharIndex()-1);
|
||||
index.setLine(indexStartLine263);
|
||||
index.setCharPositionInLine(indexStartCharPos263);
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart258, getCharIndex()-1);
|
||||
index.setLine(indexStartLine258);
|
||||
index.setCharPositionInLine(indexStartCharPos258);
|
||||
match(']'); if (state.failed) return ;
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart271 = getCharIndex();
|
||||
int yStartLine271 = getLine();
|
||||
int yStartCharPos271 = getCharPositionInLine();
|
||||
int yStart266 = getCharIndex();
|
||||
int yStartLine266 = getLine();
|
||||
int yStartCharPos266 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart271, getCharIndex()-1);
|
||||
y.setLine(yStartLine271);
|
||||
y.setCharPositionInLine(yStartCharPos271);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart266, getCharIndex()-1);
|
||||
y.setLine(yStartLine266);
|
||||
y.setCharPositionInLine(yStartCharPos266);
|
||||
// ActionSplitter.g:78:3: ( WS )?
|
||||
int alt7=2;
|
||||
int LA7_0 = input.LA(1);
|
||||
|
@ -608,13 +608,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart282 = getCharIndex();
|
||||
int exprStartLine282 = getLine();
|
||||
int exprStartCharPos282 = getCharPositionInLine();
|
||||
int exprStart277 = getCharIndex();
|
||||
int exprStartLine277 = getLine();
|
||||
int exprStartCharPos277 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart282, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine282);
|
||||
expr.setCharPositionInLine(exprStartCharPos282);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart277, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine277);
|
||||
expr.setCharPositionInLine(exprStartCharPos277);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setDynamicNegativeIndexedScopeAttr(getText(), x, y, index, expr);
|
||||
|
@ -643,32 +643,32 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:83:4: '$' x= ID '[' '-' index= SCOPE_INDEX_EXPR ']' '::' y= ID
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart303 = getCharIndex();
|
||||
int xStartLine303 = getLine();
|
||||
int xStartCharPos303 = getCharPositionInLine();
|
||||
int xStart298 = getCharIndex();
|
||||
int xStartLine298 = getLine();
|
||||
int xStartCharPos298 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart303, getCharIndex()-1);
|
||||
x.setLine(xStartLine303);
|
||||
x.setCharPositionInLine(xStartCharPos303);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart298, getCharIndex()-1);
|
||||
x.setLine(xStartLine298);
|
||||
x.setCharPositionInLine(xStartCharPos298);
|
||||
match('['); if (state.failed) return ;
|
||||
match('-'); if (state.failed) return ;
|
||||
int indexStart311 = getCharIndex();
|
||||
int indexStartLine311 = getLine();
|
||||
int indexStartCharPos311 = getCharPositionInLine();
|
||||
int indexStart306 = getCharIndex();
|
||||
int indexStartLine306 = getLine();
|
||||
int indexStartCharPos306 = getCharPositionInLine();
|
||||
mSCOPE_INDEX_EXPR(); if (state.failed) return ;
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart311, getCharIndex()-1);
|
||||
index.setLine(indexStartLine311);
|
||||
index.setCharPositionInLine(indexStartCharPos311);
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart306, getCharIndex()-1);
|
||||
index.setLine(indexStartLine306);
|
||||
index.setCharPositionInLine(indexStartCharPos306);
|
||||
match(']'); if (state.failed) return ;
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart319 = getCharIndex();
|
||||
int yStartLine319 = getLine();
|
||||
int yStartCharPos319 = getCharPositionInLine();
|
||||
int yStart314 = getCharIndex();
|
||||
int yStartLine314 = getLine();
|
||||
int yStartCharPos314 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart319, getCharIndex()-1);
|
||||
y.setLine(yStartLine319);
|
||||
y.setCharPositionInLine(yStartCharPos319);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart314, getCharIndex()-1);
|
||||
y.setLine(yStartLine314);
|
||||
y.setCharPositionInLine(yStartCharPos314);
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.dynamicNegativeIndexedScopeAttr(getText(), x, y, index);
|
||||
}
|
||||
|
@ -697,31 +697,31 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:88:4: '$' x= ID '[' index= SCOPE_INDEX_EXPR ']' '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart338 = getCharIndex();
|
||||
int xStartLine338 = getLine();
|
||||
int xStartCharPos338 = getCharPositionInLine();
|
||||
int xStart333 = getCharIndex();
|
||||
int xStartLine333 = getLine();
|
||||
int xStartCharPos333 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart338, getCharIndex()-1);
|
||||
x.setLine(xStartLine338);
|
||||
x.setCharPositionInLine(xStartCharPos338);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart333, getCharIndex()-1);
|
||||
x.setLine(xStartLine333);
|
||||
x.setCharPositionInLine(xStartCharPos333);
|
||||
match('['); if (state.failed) return ;
|
||||
int indexStart344 = getCharIndex();
|
||||
int indexStartLine344 = getLine();
|
||||
int indexStartCharPos344 = getCharPositionInLine();
|
||||
int indexStart339 = getCharIndex();
|
||||
int indexStartLine339 = getLine();
|
||||
int indexStartCharPos339 = getCharPositionInLine();
|
||||
mSCOPE_INDEX_EXPR(); if (state.failed) return ;
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart344, getCharIndex()-1);
|
||||
index.setLine(indexStartLine344);
|
||||
index.setCharPositionInLine(indexStartCharPos344);
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart339, getCharIndex()-1);
|
||||
index.setLine(indexStartLine339);
|
||||
index.setCharPositionInLine(indexStartCharPos339);
|
||||
match(']'); if (state.failed) return ;
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart352 = getCharIndex();
|
||||
int yStartLine352 = getLine();
|
||||
int yStartCharPos352 = getCharPositionInLine();
|
||||
int yStart347 = getCharIndex();
|
||||
int yStartLine347 = getLine();
|
||||
int yStartCharPos347 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart352, getCharIndex()-1);
|
||||
y.setLine(yStartLine352);
|
||||
y.setCharPositionInLine(yStartCharPos352);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart347, getCharIndex()-1);
|
||||
y.setLine(yStartLine347);
|
||||
y.setCharPositionInLine(yStartCharPos347);
|
||||
// ActionSplitter.g:89:3: ( WS )?
|
||||
int alt8=2;
|
||||
int LA8_0 = input.LA(1);
|
||||
|
@ -741,13 +741,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart363 = getCharIndex();
|
||||
int exprStartLine363 = getLine();
|
||||
int exprStartCharPos363 = getCharPositionInLine();
|
||||
int exprStart358 = getCharIndex();
|
||||
int exprStartLine358 = getLine();
|
||||
int exprStartCharPos358 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart363, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine363);
|
||||
expr.setCharPositionInLine(exprStartCharPos363);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart358, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine358);
|
||||
expr.setCharPositionInLine(exprStartCharPos358);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setDynamicAbsoluteIndexedScopeAttr(getText(), x, y, index, expr);
|
||||
|
@ -776,31 +776,31 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:94:4: '$' x= ID '[' index= SCOPE_INDEX_EXPR ']' '::' y= ID
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart384 = getCharIndex();
|
||||
int xStartLine384 = getLine();
|
||||
int xStartCharPos384 = getCharPositionInLine();
|
||||
int xStart379 = getCharIndex();
|
||||
int xStartLine379 = getLine();
|
||||
int xStartCharPos379 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart384, getCharIndex()-1);
|
||||
x.setLine(xStartLine384);
|
||||
x.setCharPositionInLine(xStartCharPos384);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart379, getCharIndex()-1);
|
||||
x.setLine(xStartLine379);
|
||||
x.setCharPositionInLine(xStartCharPos379);
|
||||
match('['); if (state.failed) return ;
|
||||
int indexStart390 = getCharIndex();
|
||||
int indexStartLine390 = getLine();
|
||||
int indexStartCharPos390 = getCharPositionInLine();
|
||||
int indexStart385 = getCharIndex();
|
||||
int indexStartLine385 = getLine();
|
||||
int indexStartCharPos385 = getCharPositionInLine();
|
||||
mSCOPE_INDEX_EXPR(); if (state.failed) return ;
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart390, getCharIndex()-1);
|
||||
index.setLine(indexStartLine390);
|
||||
index.setCharPositionInLine(indexStartCharPos390);
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart385, getCharIndex()-1);
|
||||
index.setLine(indexStartLine385);
|
||||
index.setCharPositionInLine(indexStartCharPos385);
|
||||
match(']'); if (state.failed) return ;
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart398 = getCharIndex();
|
||||
int yStartLine398 = getLine();
|
||||
int yStartCharPos398 = getCharPositionInLine();
|
||||
int yStart393 = getCharIndex();
|
||||
int yStartLine393 = getLine();
|
||||
int yStartCharPos393 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart398, getCharIndex()-1);
|
||||
y.setLine(yStartLine398);
|
||||
y.setCharPositionInLine(yStartCharPos398);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart393, getCharIndex()-1);
|
||||
y.setLine(yStartLine393);
|
||||
y.setCharPositionInLine(yStartCharPos393);
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.dynamicAbsoluteIndexedScopeAttr(getText(), x, y, index);
|
||||
}
|
||||
|
@ -827,13 +827,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:99:4: '$' x= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart417 = getCharIndex();
|
||||
int xStartLine417 = getLine();
|
||||
int xStartCharPos417 = getCharPositionInLine();
|
||||
int xStart412 = getCharIndex();
|
||||
int xStartLine412 = getLine();
|
||||
int xStartCharPos412 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart417, getCharIndex()-1);
|
||||
x.setLine(xStartLine417);
|
||||
x.setCharPositionInLine(xStartCharPos417);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart412, getCharIndex()-1);
|
||||
x.setLine(xStartLine412);
|
||||
x.setCharPositionInLine(xStartCharPos412);
|
||||
// ActionSplitter.g:99:13: ( WS )?
|
||||
int alt9=2;
|
||||
int LA9_0 = input.LA(1);
|
||||
|
@ -853,13 +853,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart426 = getCharIndex();
|
||||
int exprStartLine426 = getLine();
|
||||
int exprStartCharPos426 = getCharPositionInLine();
|
||||
int exprStart421 = getCharIndex();
|
||||
int exprStartLine421 = getLine();
|
||||
int exprStartCharPos421 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart426, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine426);
|
||||
expr.setCharPositionInLine(exprStartCharPos426);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart421, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine421);
|
||||
expr.setCharPositionInLine(exprStartCharPos421);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setAttr(getText(), x, expr);
|
||||
|
@ -886,13 +886,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:103:4: '$' x= ID
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart445 = getCharIndex();
|
||||
int xStartLine445 = getLine();
|
||||
int xStartCharPos445 = getCharPositionInLine();
|
||||
int xStart440 = getCharIndex();
|
||||
int xStartLine440 = getLine();
|
||||
int xStartCharPos440 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart445, getCharIndex()-1);
|
||||
x.setLine(xStartLine445);
|
||||
x.setCharPositionInLine(xStartCharPos445);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart440, getCharIndex()-1);
|
||||
x.setLine(xStartLine440);
|
||||
x.setCharPositionInLine(xStartCharPos440);
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.attr(getText(), x);
|
||||
}
|
||||
|
@ -1163,13 +1163,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:118:4: '%' a= ACTION '.' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('%'); if (state.failed) return ;
|
||||
int aStart552 = getCharIndex();
|
||||
int aStartLine552 = getLine();
|
||||
int aStartCharPos552 = getCharPositionInLine();
|
||||
int aStart547 = getCharIndex();
|
||||
int aStartLine547 = getLine();
|
||||
int aStartCharPos547 = getCharPositionInLine();
|
||||
mACTION(); if (state.failed) return ;
|
||||
a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart552, getCharIndex()-1);
|
||||
a.setLine(aStartLine552);
|
||||
a.setCharPositionInLine(aStartCharPos552);
|
||||
a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart547, getCharIndex()-1);
|
||||
a.setLine(aStartLine547);
|
||||
a.setCharPositionInLine(aStartCharPos547);
|
||||
match('.'); if (state.failed) return ;
|
||||
mID(); if (state.failed) return ;
|
||||
// ActionSplitter.g:118:24: ( WS )?
|
||||
|
@ -1191,13 +1191,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart565 = getCharIndex();
|
||||
int exprStartLine565 = getLine();
|
||||
int exprStartCharPos565 = getCharPositionInLine();
|
||||
int exprStart560 = getCharIndex();
|
||||
int exprStartLine560 = getLine();
|
||||
int exprStartCharPos560 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart565, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine565);
|
||||
expr.setCharPositionInLine(exprStartCharPos565);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart560, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine560);
|
||||
expr.setCharPositionInLine(exprStartCharPos560);
|
||||
match(';'); if (state.failed) return ;
|
||||
|
||||
}
|
||||
|
@ -1223,21 +1223,21 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:126:4: '%' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('%'); if (state.failed) return ;
|
||||
int xStart585 = getCharIndex();
|
||||
int xStartLine585 = getLine();
|
||||
int xStartCharPos585 = getCharPositionInLine();
|
||||
int xStart580 = getCharIndex();
|
||||
int xStartLine580 = getLine();
|
||||
int xStartCharPos580 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart585, getCharIndex()-1);
|
||||
x.setLine(xStartLine585);
|
||||
x.setCharPositionInLine(xStartCharPos585);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart580, getCharIndex()-1);
|
||||
x.setLine(xStartLine580);
|
||||
x.setCharPositionInLine(xStartCharPos580);
|
||||
match('.'); if (state.failed) return ;
|
||||
int yStart591 = getCharIndex();
|
||||
int yStartLine591 = getLine();
|
||||
int yStartCharPos591 = getCharPositionInLine();
|
||||
int yStart586 = getCharIndex();
|
||||
int yStartLine586 = getLine();
|
||||
int yStartCharPos586 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart591, getCharIndex()-1);
|
||||
y.setLine(yStartLine591);
|
||||
y.setCharPositionInLine(yStartCharPos591);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart586, getCharIndex()-1);
|
||||
y.setLine(yStartLine586);
|
||||
y.setCharPositionInLine(yStartCharPos586);
|
||||
// ActionSplitter.g:126:22: ( WS )?
|
||||
int alt21=2;
|
||||
int LA21_0 = input.LA(1);
|
||||
|
@ -1257,13 +1257,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart600 = getCharIndex();
|
||||
int exprStartLine600 = getLine();
|
||||
int exprStartCharPos600 = getCharPositionInLine();
|
||||
int exprStart595 = getCharIndex();
|
||||
int exprStartLine595 = getLine();
|
||||
int exprStartCharPos595 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart600, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine600);
|
||||
expr.setCharPositionInLine(exprStartCharPos600);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart595, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine595);
|
||||
expr.setCharPositionInLine(exprStartCharPos595);
|
||||
match(';'); if (state.failed) return ;
|
||||
|
||||
}
|
||||
|
@ -1287,13 +1287,13 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
// ActionSplitter.g:131:4: '%' a= ACTION
|
||||
{
|
||||
match('%'); if (state.failed) return ;
|
||||
int aStart619 = getCharIndex();
|
||||
int aStartLine619 = getLine();
|
||||
int aStartCharPos619 = getCharPositionInLine();
|
||||
int aStart614 = getCharIndex();
|
||||
int aStartLine614 = getLine();
|
||||
int aStartCharPos614 = getCharPositionInLine();
|
||||
mACTION(); if (state.failed) return ;
|
||||
a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart619, getCharIndex()-1);
|
||||
a.setLine(aStartLine619);
|
||||
a.setCharPositionInLine(aStartCharPos619);
|
||||
a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart614, getCharIndex()-1);
|
||||
a.setLine(aStartLine614);
|
||||
a.setCharPositionInLine(aStartCharPos614);
|
||||
|
||||
}
|
||||
|
||||
|
@ -2329,11 +2329,11 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
state.failed=false;
|
||||
return success;
|
||||
}
|
||||
public final boolean synpred8_ActionSplitter() {
|
||||
public final boolean synpred2_ActionSplitter() {
|
||||
state.backtracking++;
|
||||
int start = input.mark();
|
||||
try {
|
||||
synpred8_ActionSplitter_fragment(); // can never throw exception
|
||||
synpred2_ActionSplitter_fragment(); // can never throw exception
|
||||
} catch (RecognitionException re) {
|
||||
System.err.println("impossible: "+re);
|
||||
}
|
||||
|
@ -2343,11 +2343,11 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
state.failed=false;
|
||||
return success;
|
||||
}
|
||||
public final boolean synpred2_ActionSplitter() {
|
||||
public final boolean synpred8_ActionSplitter() {
|
||||
state.backtracking++;
|
||||
int start = input.mark();
|
||||
try {
|
||||
synpred2_ActionSplitter_fragment(); // can never throw exception
|
||||
synpred8_ActionSplitter_fragment(); // can never throw exception
|
||||
} catch (RecognitionException re) {
|
||||
System.err.println("impossible: "+re);
|
||||
}
|
||||
|
@ -2399,11 +2399,11 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
state.failed=false;
|
||||
return success;
|
||||
}
|
||||
public final boolean synpred3_ActionSplitter() {
|
||||
public final boolean synpred15_ActionSplitter() {
|
||||
state.backtracking++;
|
||||
int start = input.mark();
|
||||
try {
|
||||
synpred3_ActionSplitter_fragment(); // can never throw exception
|
||||
synpred15_ActionSplitter_fragment(); // can never throw exception
|
||||
} catch (RecognitionException re) {
|
||||
System.err.println("impossible: "+re);
|
||||
}
|
||||
|
@ -2413,11 +2413,11 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
state.failed=false;
|
||||
return success;
|
||||
}
|
||||
public final boolean synpred15_ActionSplitter() {
|
||||
public final boolean synpred3_ActionSplitter() {
|
||||
state.backtracking++;
|
||||
int start = input.mark();
|
||||
try {
|
||||
synpred15_ActionSplitter_fragment(); // can never throw exception
|
||||
synpred3_ActionSplitter_fragment(); // can never throw exception
|
||||
} catch (RecognitionException re) {
|
||||
System.err.println("impossible: "+re);
|
||||
}
|
||||
|
@ -2491,21 +2491,25 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
}
|
||||
}
|
||||
static final String DFA29_eotS =
|
||||
"\32\uffff";
|
||||
"\31\uffff";
|
||||
static final String DFA29_eofS =
|
||||
"\32\uffff";
|
||||
"\31\uffff";
|
||||
static final String DFA29_minS =
|
||||
"\2\0\12\uffff\1\0\3\uffff\1\0\6\uffff\1\0\2\uffff";
|
||||
"\1\0\1\uffff\1\0\2\uffff\1\0\12\uffff\1\0\1\uffff\1\0\6\uffff";
|
||||
static final String DFA29_maxS =
|
||||
"\1\uffff\1\0\12\uffff\1\0\3\uffff\1\0\6\uffff\1\0\2\uffff";
|
||||
"\1\uffff\1\uffff\1\0\2\uffff\1\0\12\uffff\1\0\1\uffff\1\0\6\uffff";
|
||||
static final String DFA29_acceptS =
|
||||
"\2\uffff\1\4\1\5\1\6\1\7\1\10\1\11\1\12\1\13\1\14\1\15\1\uffff\1"+
|
||||
"\1\1\2\1\24\1\uffff\1\16\1\17\1\20\1\21\1\22\1\23\1\uffff\1\3\1"+
|
||||
"\24";
|
||||
"\1\uffff\1\24\1\uffff\1\1\1\2\1\uffff\1\4\1\5\1\6\1\7\1\10\1\11"+
|
||||
"\1\12\1\13\1\14\1\15\1\uffff\1\3\1\uffff\1\16\1\17\1\20\1\21\1\22"+
|
||||
"\1\23";
|
||||
static final String DFA29_specialS =
|
||||
"\1\0\1\1\12\uffff\1\2\3\uffff\1\3\6\uffff\1\4\2\uffff}>";
|
||||
"\1\0\1\uffff\1\1\2\uffff\1\2\12\uffff\1\3\1\uffff\1\4\6\uffff}>";
|
||||
static final String[] DFA29_transitionS = {
|
||||
"\44\31\1\1\1\20\11\31\1\14\54\31\1\27\uffa3\31",
|
||||
"\44\1\1\5\1\22\11\1\1\2\54\1\1\20\uffa3\1",
|
||||
"",
|
||||
"\1\uffff",
|
||||
"",
|
||||
"",
|
||||
"\1\uffff",
|
||||
"",
|
||||
"",
|
||||
|
@ -2519,17 +2523,12 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
"",
|
||||
"\1\uffff",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"\1\uffff",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"\1\uffff",
|
||||
"",
|
||||
""
|
||||
};
|
||||
|
||||
|
@ -2573,64 +2572,64 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
int LA29_0 = input.LA(1);
|
||||
|
||||
s = -1;
|
||||
if ( (LA29_0=='$') ) {s = 1;}
|
||||
if ( ((LA29_0>='\u0000' && LA29_0<='#')||(LA29_0>='&' && LA29_0<='.')||(LA29_0>='0' && LA29_0<='[')||(LA29_0>=']' && LA29_0<='\uFFFF')) ) {s = 1;}
|
||||
|
||||
else if ( (LA29_0=='/') ) {s = 12;}
|
||||
else if ( (LA29_0=='/') ) {s = 2;}
|
||||
|
||||
else if ( (LA29_0=='%') ) {s = 16;}
|
||||
else if ( (LA29_0=='$') ) {s = 5;}
|
||||
|
||||
else if ( (LA29_0=='\\') ) {s = 23;}
|
||||
else if ( (LA29_0=='\\') ) {s = 16;}
|
||||
|
||||
else if ( ((LA29_0>='\u0000' && LA29_0<='#')||(LA29_0>='&' && LA29_0<='.')||(LA29_0>='0' && LA29_0<='[')||(LA29_0>=']' && LA29_0<='\uFFFF')) ) {s = 25;}
|
||||
else if ( (LA29_0=='%') ) {s = 18;}
|
||||
|
||||
if ( s>=0 ) return s;
|
||||
break;
|
||||
case 1 :
|
||||
int LA29_1 = input.LA(1);
|
||||
int LA29_2 = input.LA(1);
|
||||
|
||||
|
||||
int index29_1 = input.index();
|
||||
int index29_2 = input.index();
|
||||
input.rewind();
|
||||
s = -1;
|
||||
if ( (synpred4_ActionSplitter()) ) {s = 2;}
|
||||
if ( (synpred1_ActionSplitter()) ) {s = 3;}
|
||||
|
||||
else if ( (synpred5_ActionSplitter()) ) {s = 3;}
|
||||
else if ( (synpred2_ActionSplitter()) ) {s = 4;}
|
||||
|
||||
else if ( (synpred6_ActionSplitter()) ) {s = 4;}
|
||||
|
||||
else if ( (synpred7_ActionSplitter()) ) {s = 5;}
|
||||
|
||||
else if ( (synpred8_ActionSplitter()) ) {s = 6;}
|
||||
|
||||
else if ( (synpred9_ActionSplitter()) ) {s = 7;}
|
||||
|
||||
else if ( (synpred10_ActionSplitter()) ) {s = 8;}
|
||||
|
||||
else if ( (synpred11_ActionSplitter()) ) {s = 9;}
|
||||
|
||||
else if ( (synpred12_ActionSplitter()) ) {s = 10;}
|
||||
|
||||
else if ( (synpred13_ActionSplitter()) ) {s = 11;}
|
||||
else if ( (true) ) {s = 1;}
|
||||
|
||||
|
||||
input.seek(index29_1);
|
||||
input.seek(index29_2);
|
||||
if ( s>=0 ) return s;
|
||||
break;
|
||||
case 2 :
|
||||
int LA29_12 = input.LA(1);
|
||||
int LA29_5 = input.LA(1);
|
||||
|
||||
|
||||
int index29_12 = input.index();
|
||||
int index29_5 = input.index();
|
||||
input.rewind();
|
||||
s = -1;
|
||||
if ( (synpred1_ActionSplitter()) ) {s = 13;}
|
||||
if ( (synpred4_ActionSplitter()) ) {s = 6;}
|
||||
|
||||
else if ( (synpred2_ActionSplitter()) ) {s = 14;}
|
||||
else if ( (synpred5_ActionSplitter()) ) {s = 7;}
|
||||
|
||||
else if ( (true) ) {s = 15;}
|
||||
else if ( (synpred6_ActionSplitter()) ) {s = 8;}
|
||||
|
||||
else if ( (synpred7_ActionSplitter()) ) {s = 9;}
|
||||
|
||||
else if ( (synpred8_ActionSplitter()) ) {s = 10;}
|
||||
|
||||
else if ( (synpred9_ActionSplitter()) ) {s = 11;}
|
||||
|
||||
else if ( (synpred10_ActionSplitter()) ) {s = 12;}
|
||||
|
||||
else if ( (synpred11_ActionSplitter()) ) {s = 13;}
|
||||
|
||||
else if ( (synpred12_ActionSplitter()) ) {s = 14;}
|
||||
|
||||
else if ( (synpred13_ActionSplitter()) ) {s = 15;}
|
||||
|
||||
|
||||
input.seek(index29_12);
|
||||
input.seek(index29_5);
|
||||
if ( s>=0 ) return s;
|
||||
break;
|
||||
case 3 :
|
||||
|
@ -2640,35 +2639,35 @@ public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
|||
int index29_16 = input.index();
|
||||
input.rewind();
|
||||
s = -1;
|
||||
if ( (synpred14_ActionSplitter()) ) {s = 17;}
|
||||
if ( (synpred3_ActionSplitter()) ) {s = 17;}
|
||||
|
||||
else if ( (synpred15_ActionSplitter()) ) {s = 18;}
|
||||
|
||||
else if ( (synpred16_ActionSplitter()) ) {s = 19;}
|
||||
|
||||
else if ( (synpred17_ActionSplitter()) ) {s = 20;}
|
||||
|
||||
else if ( (synpred18_ActionSplitter()) ) {s = 21;}
|
||||
|
||||
else if ( (synpred19_ActionSplitter()) ) {s = 22;}
|
||||
else if ( (true) ) {s = 1;}
|
||||
|
||||
|
||||
input.seek(index29_16);
|
||||
if ( s>=0 ) return s;
|
||||
break;
|
||||
case 4 :
|
||||
int LA29_23 = input.LA(1);
|
||||
int LA29_18 = input.LA(1);
|
||||
|
||||
|
||||
int index29_23 = input.index();
|
||||
int index29_18 = input.index();
|
||||
input.rewind();
|
||||
s = -1;
|
||||
if ( (synpred3_ActionSplitter()) ) {s = 24;}
|
||||
if ( (synpred14_ActionSplitter()) ) {s = 19;}
|
||||
|
||||
else if ( (true) ) {s = 15;}
|
||||
else if ( (synpred15_ActionSplitter()) ) {s = 20;}
|
||||
|
||||
else if ( (synpred16_ActionSplitter()) ) {s = 21;}
|
||||
|
||||
else if ( (synpred17_ActionSplitter()) ) {s = 22;}
|
||||
|
||||
else if ( (synpred18_ActionSplitter()) ) {s = 23;}
|
||||
|
||||
else if ( (synpred19_ActionSplitter()) ) {s = 24;}
|
||||
|
||||
|
||||
input.seek(index29_23);
|
||||
input.seek(index29_18);
|
||||
if ( s>=0 ) return s;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR ${project.version} ${buildNumber} NFABuilder.g 2010-04-19 15:55:56
|
||||
// $ANTLR ${project.version} ${buildNumber} NFABuilder.g 2010-05-16 13:12:33
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
|
|
@ -158,7 +158,7 @@ public class Grammar implements AttributeResolver {
|
|||
// typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SET, "<SET>");
|
||||
// typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EPSILON, Label.EPSILON_STR);
|
||||
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOF, "EOF");
|
||||
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "<EOR>");
|
||||
typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "EOR");
|
||||
typeToTokenList.set(Label.NUM_FAUX_LABELS+Token.DOWN-1, "DOWN");
|
||||
typeToTokenList.set(Label.NUM_FAUX_LABELS+Token.UP-1, "UP");
|
||||
tokenNameToTypeMap.put("<INVALID>", Label.INVALID);
|
||||
|
@ -168,7 +168,7 @@ public class Grammar implements AttributeResolver {
|
|||
// tokenNameToTypeMap.put("<SET>", Label.SET);
|
||||
tokenNameToTypeMap.put("<EOT>", Label.EOT);
|
||||
tokenNameToTypeMap.put("EOF", Label.EOF);
|
||||
tokenNameToTypeMap.put("<EOR>", Label.EOR_TOKEN_TYPE);
|
||||
tokenNameToTypeMap.put("EOR", Label.EOR_TOKEN_TYPE);
|
||||
tokenNameToTypeMap.put("DOWN", Token.DOWN);
|
||||
tokenNameToTypeMap.put("UP", Token.UP);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue