forked from jasder/antlr
got attribute checks done
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 6693]
This commit is contained in:
parent
68719b8df7
commit
e38535a9bc
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
/** When walking ahead with cyclic DFA or for syntactic predicates,
|
||||
* we need to record the state of the input stream (char index,
|
||||
* line, etc...) so that we can rewind the state after scanning ahead.
|
||||
*
|
||||
* This is the complete state of a stream.
|
||||
*/
|
||||
public class CharStreamState {
|
||||
/** Index into the char stream of next lookahead char */
|
||||
public int p;
|
||||
|
||||
/** What line number is the scanner at before processing buffer[p]? */
|
||||
public int line;
|
||||
|
||||
/** What char position 0..n-1 in line is scanner before processing buffer[p]? */
|
||||
public int charPositionInLine;
|
||||
}
|
|
@ -0,0 +1,342 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
|
||||
/** A lexer is recognizer that draws input symbols from a character stream.
|
||||
* lexer grammars result in a subclass of this object. A Lexer object
|
||||
* uses simplified match() and error recovery mechanisms in the interest
|
||||
* of speed.
|
||||
*/
|
||||
public abstract class Lexer extends BaseRecognizer implements TokenSource {
|
||||
/** Where is the lexer drawing characters from? */
|
||||
protected CharStream input;
|
||||
|
||||
public Lexer() {
|
||||
}
|
||||
|
||||
public Lexer(CharStream input) {
|
||||
this.input = input;
|
||||
}
|
||||
|
||||
public Lexer(CharStream input, RecognizerSharedState state) {
|
||||
super(state);
|
||||
this.input = input;
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
super.reset(); // reset all recognizer state variables
|
||||
// wack Lexer state variables
|
||||
if ( input!=null ) {
|
||||
input.seek(0); // rewind the input
|
||||
}
|
||||
if ( state==null ) {
|
||||
return; // no shared state work to do
|
||||
}
|
||||
state.token = null;
|
||||
state.type = Token.INVALID_TOKEN_TYPE;
|
||||
state.channel = Token.DEFAULT_CHANNEL;
|
||||
state.tokenStartCharIndex = -1;
|
||||
state.tokenStartCharPositionInLine = -1;
|
||||
state.tokenStartLine = -1;
|
||||
state.text = null;
|
||||
}
|
||||
|
||||
/** Return a token from this source; i.e., match a token on the char
|
||||
* stream.
|
||||
*/
|
||||
public Token nextToken() {
|
||||
while (true) {
|
||||
state.token = null;
|
||||
state.channel = Token.DEFAULT_CHANNEL;
|
||||
state.tokenStartCharIndex = input.index();
|
||||
state.tokenStartCharPositionInLine = input.getCharPositionInLine();
|
||||
state.tokenStartLine = input.getLine();
|
||||
state.text = null;
|
||||
if ( input.LA(1)==CharStream.EOF ) {
|
||||
Token eof = new CommonToken((CharStream)input,Token.EOF,
|
||||
Token.DEFAULT_CHANNEL,
|
||||
input.index(),input.index());
|
||||
eof.setLine(getLine());
|
||||
eof.setCharPositionInLine(getCharPositionInLine());
|
||||
return eof;
|
||||
}
|
||||
try {
|
||||
mTokens();
|
||||
if ( state.token==null ) {
|
||||
emit();
|
||||
}
|
||||
else if ( state.token==Token.SKIP_TOKEN ) {
|
||||
continue;
|
||||
}
|
||||
return state.token;
|
||||
}
|
||||
catch (NoViableAltException nva) {
|
||||
reportError(nva);
|
||||
recover(nva); // throw out current char and try again
|
||||
}
|
||||
catch (RecognitionException re) {
|
||||
reportError(re);
|
||||
// match() routine has already called recover()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Instruct the lexer to skip creating a token for current lexer rule
|
||||
* and look for another token. nextToken() knows to keep looking when
|
||||
* a lexer rule finishes with token set to SKIP_TOKEN. Recall that
|
||||
* if token==null at end of any token rule, it creates one for you
|
||||
* and emits it.
|
||||
*/
|
||||
public void skip() {
|
||||
state.token = Token.SKIP_TOKEN;
|
||||
}
|
||||
|
||||
/** This is the lexer entry point that sets instance var 'token' */
|
||||
public abstract void mTokens() throws RecognitionException;
|
||||
|
||||
/** Set the char stream and reset the lexer */
|
||||
public void setCharStream(CharStream input) {
|
||||
this.input = null;
|
||||
reset();
|
||||
this.input = input;
|
||||
}
|
||||
|
||||
public CharStream getCharStream() {
|
||||
return this.input;
|
||||
}
|
||||
|
||||
public String getSourceName() {
|
||||
return input.getSourceName();
|
||||
}
|
||||
|
||||
/** Currently does not support multiple emits per nextToken invocation
|
||||
* for efficiency reasons. Subclass and override this method and
|
||||
* nextToken (to push tokens into a list and pull from that list rather
|
||||
* than a single variable as this implementation does).
|
||||
*/
|
||||
public void emit(Token token) {
|
||||
state.token = token;
|
||||
}
|
||||
|
||||
/** The standard method called to automatically emit a token at the
|
||||
* outermost lexical rule. The token object should point into the
|
||||
* char buffer start..stop. If there is a text override in 'text',
|
||||
* use that to set the token's text. Override this method to emit
|
||||
* custom Token objects.
|
||||
*
|
||||
* If you are building trees, then you should also override
|
||||
* Parser or TreeParser.getMissingSymbol().
|
||||
*/
|
||||
public Token emit() {
|
||||
Token t = new CommonToken(input, state.type, state.channel, state.tokenStartCharIndex, getCharIndex()-1);
|
||||
t.setLine(state.tokenStartLine);
|
||||
t.setText(state.text);
|
||||
t.setCharPositionInLine(state.tokenStartCharPositionInLine);
|
||||
emit(t);
|
||||
return t;
|
||||
}
|
||||
|
||||
public void match(String s) throws MismatchedTokenException {
|
||||
int i = 0;
|
||||
while ( i<s.length() ) {
|
||||
if ( input.LA(1)!=s.charAt(i) ) {
|
||||
if ( state.backtracking>0 ) {
|
||||
state.failed = true;
|
||||
return;
|
||||
}
|
||||
MismatchedTokenException mte =
|
||||
new MismatchedTokenException(s.charAt(i), input);
|
||||
recover(mte);
|
||||
throw mte;
|
||||
}
|
||||
i++;
|
||||
input.consume();
|
||||
state.failed = false;
|
||||
}
|
||||
}
|
||||
|
||||
public void matchAny() {
|
||||
input.consume();
|
||||
}
|
||||
|
||||
public void match(int c) throws MismatchedTokenException {
|
||||
if ( input.LA(1)!=c ) {
|
||||
if ( state.backtracking>0 ) {
|
||||
state.failed = true;
|
||||
return;
|
||||
}
|
||||
MismatchedTokenException mte =
|
||||
new MismatchedTokenException(c, input);
|
||||
recover(mte); // don't really recover; just consume in lexer
|
||||
throw mte;
|
||||
}
|
||||
input.consume();
|
||||
state.failed = false;
|
||||
}
|
||||
|
||||
public void matchRange(int a, int b)
|
||||
throws MismatchedRangeException
|
||||
{
|
||||
if ( input.LA(1)<a || input.LA(1)>b ) {
|
||||
if ( state.backtracking>0 ) {
|
||||
state.failed = true;
|
||||
return;
|
||||
}
|
||||
MismatchedRangeException mre =
|
||||
new MismatchedRangeException(a,b,input);
|
||||
recover(mre);
|
||||
throw mre;
|
||||
}
|
||||
input.consume();
|
||||
state.failed = false;
|
||||
}
|
||||
|
||||
public int getLine() {
|
||||
return input.getLine();
|
||||
}
|
||||
|
||||
public int getCharPositionInLine() {
|
||||
return input.getCharPositionInLine();
|
||||
}
|
||||
|
||||
/** What is the index of the current character of lookahead? */
|
||||
public int getCharIndex() {
|
||||
return input.index();
|
||||
}
|
||||
|
||||
/** Return the text matched so far for the current token or any
|
||||
* text override.
|
||||
*/
|
||||
public String getText() {
|
||||
if ( state.text!=null ) {
|
||||
return state.text;
|
||||
}
|
||||
return input.substring(state.tokenStartCharIndex,getCharIndex()-1);
|
||||
}
|
||||
|
||||
/** Set the complete text of this token; it wipes any previous
|
||||
* changes to the text.
|
||||
*/
|
||||
public void setText(String text) {
|
||||
state.text = text;
|
||||
}
|
||||
|
||||
public void reportError(RecognitionException e) {
|
||||
/** TODO: not thought about recovery in lexer yet.
|
||||
*
|
||||
// if we've already reported an error and have not matched a token
|
||||
// yet successfully, don't report any errors.
|
||||
if ( errorRecovery ) {
|
||||
//System.err.print("[SPURIOUS] ");
|
||||
return;
|
||||
}
|
||||
errorRecovery = true;
|
||||
*/
|
||||
|
||||
displayRecognitionError(this.getTokenNames(), e);
|
||||
}
|
||||
|
||||
public String getErrorMessage(RecognitionException e, String[] tokenNames) {
|
||||
String msg = null;
|
||||
if ( e instanceof MismatchedTokenException ) {
|
||||
MismatchedTokenException mte = (MismatchedTokenException)e;
|
||||
msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting "+getCharErrorDisplay(mte.expecting);
|
||||
}
|
||||
else if ( e instanceof NoViableAltException ) {
|
||||
NoViableAltException nvae = (NoViableAltException)e;
|
||||
// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
|
||||
// and "(decision="+nvae.decisionNumber+") and
|
||||
// "state "+nvae.stateNumber
|
||||
msg = "no viable alternative at character "+getCharErrorDisplay(e.c);
|
||||
}
|
||||
else if ( e instanceof EarlyExitException ) {
|
||||
EarlyExitException eee = (EarlyExitException)e;
|
||||
// for development, can add "(decision="+eee.decisionNumber+")"
|
||||
msg = "required (...)+ loop did not match anything at character "+getCharErrorDisplay(e.c);
|
||||
}
|
||||
else if ( e instanceof MismatchedNotSetException ) {
|
||||
MismatchedNotSetException mse = (MismatchedNotSetException)e;
|
||||
msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting;
|
||||
}
|
||||
else if ( e instanceof MismatchedSetException ) {
|
||||
MismatchedSetException mse = (MismatchedSetException)e;
|
||||
msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting;
|
||||
}
|
||||
else if ( e instanceof MismatchedRangeException ) {
|
||||
MismatchedRangeException mre = (MismatchedRangeException)e;
|
||||
msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+
|
||||
getCharErrorDisplay(mre.a)+".."+getCharErrorDisplay(mre.b);
|
||||
}
|
||||
else {
|
||||
msg = super.getErrorMessage(e, tokenNames);
|
||||
}
|
||||
return msg;
|
||||
}
|
||||
|
||||
public String getCharErrorDisplay(int c) {
|
||||
String s = String.valueOf((char)c);
|
||||
switch ( c ) {
|
||||
case Token.EOF :
|
||||
s = "<EOF>";
|
||||
break;
|
||||
case '\n' :
|
||||
s = "\\n";
|
||||
break;
|
||||
case '\t' :
|
||||
s = "\\t";
|
||||
break;
|
||||
case '\r' :
|
||||
s = "\\r";
|
||||
break;
|
||||
}
|
||||
return "'"+s+"'";
|
||||
}
|
||||
|
||||
/** Lexers can normally match any char in it's vocabulary after matching
|
||||
* a token, so do the easy thing and just kill a character and hope
|
||||
* it all works out. You can instead use the rule invocation stack
|
||||
* to do sophisticated error recovery if you are in a fragment rule.
|
||||
*/
|
||||
public void recover(RecognitionException re) {
|
||||
//System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
|
||||
//re.printStackTrace();
|
||||
input.consume();
|
||||
}
|
||||
|
||||
public void traceIn(String ruleName, int ruleIndex) {
|
||||
String inputSymbol = ((char)input.LT(1))+" line="+getLine()+":"+getCharPositionInLine();
|
||||
super.traceIn(ruleName, ruleIndex, inputSymbol);
|
||||
}
|
||||
|
||||
public void traceOut(String ruleName, int ruleIndex) {
|
||||
String inputSymbol = ((char)input.LT(1))+" line="+getLine()+":"+getCharPositionInLine();
|
||||
super.traceOut(ruleName, ruleIndex, inputSymbol);
|
||||
}
|
||||
}
|
|
@ -119,7 +119,7 @@ ATTRIBUTE_REF_NOT_IN_RULE(arg,arg2) ::=
|
|||
UNKNOWN_ATTRIBUTE_IN_SCOPE(arg,arg2) ::=
|
||||
"attribute <arg> isn't a valid property in <arg2>"
|
||||
UNKNOWN_RULE_ATTRIBUTE(arg,arg2) ::=
|
||||
"unknown attribute for rule <arg>: <arg2>"
|
||||
"unknown attribute rule <arg> in <arg2>"
|
||||
UNKNOWN_SIMPLE_ATTRIBUTE(arg,arg2) ::=
|
||||
"unknown attribute reference <arg> in <arg2>"
|
||||
ISOLATED_RULE_SCOPE(arg,arg2) ::=
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 ANTLRLexer.g 2010-02-12 16:46:34
|
||||
// $ANTLR ${project.version} ${buildNumber} ANTLRLexer.g 2010-02-15 12:12:15
|
||||
|
||||
/*
|
||||
[The "BSD licence"]
|
||||
|
@ -31,6 +31,11 @@ package org.antlr.v4.parse;
|
|||
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import java.util.Stack;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
/** Read in an ANTLR grammar and build an AST. Try not to do
|
||||
* any actions, just build the tree.
|
||||
*
|
||||
|
@ -640,8 +645,12 @@ public class ANTLRLexer extends Lexer {
|
|||
// ANTLRLexer.g:263:14: ( '\"' )=>as= ACTION_STRING_LITERAL
|
||||
{
|
||||
int asStart1319 = getCharIndex();
|
||||
int asStartLine1319 = getLine();
|
||||
int asStartCharPos1319 = getCharPositionInLine();
|
||||
mACTION_STRING_LITERAL(); if (state.failed) return ;
|
||||
as = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, asStart1319, getCharIndex()-1);
|
||||
as.setLine(asStartLine1319);
|
||||
as.setCharPositionInLine(asStartCharPos1319);
|
||||
if ( state.backtracking==0 ) {
|
||||
|
||||
// Append the embedded string literal test
|
||||
|
@ -656,8 +665,12 @@ public class ANTLRLexer extends Lexer {
|
|||
// ANTLRLexer.g:270:14: ( '\\'' )=>ac= ACTION_CHAR_LITERAL
|
||||
{
|
||||
int acStart1370 = getCharIndex();
|
||||
int acStartLine1370 = getLine();
|
||||
int acStartCharPos1370 = getCharPositionInLine();
|
||||
mACTION_CHAR_LITERAL(); if (state.failed) return ;
|
||||
ac = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, acStart1370, getCharIndex()-1);
|
||||
ac.setLine(acStartLine1370);
|
||||
ac.setCharPositionInLine(acStartCharPos1370);
|
||||
if ( state.backtracking==0 ) {
|
||||
|
||||
// Append the embedded chracter literal text
|
||||
|
@ -2181,8 +2194,12 @@ public class ANTLRLexer extends Lexer {
|
|||
} while (true);
|
||||
|
||||
int fileStart3568 = getCharIndex();
|
||||
int fileStartLine3568 = getLine();
|
||||
int fileStartCharPos3568 = getCharPositionInLine();
|
||||
mACTION_STRING_LITERAL(); if (state.failed) return ;
|
||||
file = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, fileStart3568, getCharIndex()-1);
|
||||
file.setLine(fileStartLine3568);
|
||||
file.setCharPositionInLine(fileStartCharPos3568);
|
||||
// ANTLRLexer.g:545:49: ( WSCHARS )+
|
||||
int cnt22=0;
|
||||
loop22:
|
||||
|
@ -2215,8 +2232,12 @@ public class ANTLRLexer extends Lexer {
|
|||
} while (true);
|
||||
|
||||
int lineStart3575 = getCharIndex();
|
||||
int lineStartLine3575 = getLine();
|
||||
int lineStartCharPos3575 = getCharPositionInLine();
|
||||
mINT(); if (state.failed) return ;
|
||||
line = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, lineStart3575, getCharIndex()-1);
|
||||
line.setLine(lineStartLine3575);
|
||||
line.setCharPositionInLine(lineStartCharPos3575);
|
||||
if ( state.backtracking==0 ) {
|
||||
|
||||
// TODO: Add target specific code to change the source file name and current line number
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 ANTLRParser.g 2010-02-12 16:46:36
|
||||
// $ANTLR ${project.version} ${buildNumber} ANTLRParser.g 2010-02-15 12:12:17
|
||||
|
||||
/*
|
||||
[The "BSD licence"]
|
||||
|
@ -29,12 +29,17 @@
|
|||
*/
|
||||
package org.antlr.v4.parse;
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import org.antlr.runtime.tree.*;
|
||||
import org.antlr.v4.tool.*;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import java.util.Stack;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.antlr.runtime.tree.*;
|
||||
|
||||
/** The definitive ANTLR v3 grammar to parse ANTLR v4 grammars.
|
||||
* The grammar builds ASTs that are sniffed by subsequent stages.
|
||||
|
@ -284,7 +289,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: rules, DOC_COMMENT, id, grammarType, prequelConstruct
|
||||
// elements: id, grammarType, prequelConstruct, DOC_COMMENT, rules
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1404,7 +1409,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: TOKENS, tokenSpec
|
||||
// elements: tokenSpec, TOKENS
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1559,7 +1564,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: STRING_LITERAL, id, ASSIGN
|
||||
// elements: id, ASSIGN, STRING_LITERAL
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1853,7 +1858,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: AT, actionScopeName, id, ACTION
|
||||
// elements: actionScopeName, id, AT, ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2353,7 +2358,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: exceptionGroup, DOC_COMMENT, id, ARG_ACTION, rulePrequel, altListAsBlock, ruleReturns, ruleModifiers
|
||||
// elements: id, ruleReturns, DOC_COMMENT, rulePrequel, ARG_ACTION, ruleModifiers, altListAsBlock, exceptionGroup
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2579,7 +2584,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ARG_ACTION, CATCH, ACTION
|
||||
// elements: CATCH, ARG_ACTION, ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2976,7 +2981,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: qid, THROWS
|
||||
// elements: THROWS, qid
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -3106,7 +3111,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ACTION, SCOPE
|
||||
// elements: SCOPE, ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -3185,7 +3190,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: SCOPE, id
|
||||
// elements: id, SCOPE
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -3284,7 +3289,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: AT, ACTION, id
|
||||
// elements: ACTION, id, AT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -4279,7 +4284,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ebnfSuffix, atom
|
||||
// elements: atom, ebnfSuffix
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -5450,7 +5455,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ruleref, DOT, id
|
||||
// elements: ruleref, id, DOT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -5508,7 +5513,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: terminal, id, DOT
|
||||
// elements: id, terminal, DOT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -5707,7 +5712,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: NOT, notTerminal
|
||||
// elements: notTerminal, NOT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -5750,7 +5755,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: block, NOT
|
||||
// elements: NOT, block
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -5984,7 +5989,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: altList, ra, optionsSpec
|
||||
// elements: optionsSpec, altList, ra
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6159,7 +6164,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: RULE_REF, op, ARG_ACTION
|
||||
// elements: ARG_ACTION, RULE_REF, op
|
||||
// token labels: op
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6206,7 +6211,7 @@ public class ANTLRParser extends Parser {
|
|||
{
|
||||
|
||||
// AST REWRITE
|
||||
// elements: RULE_REF, ARG_ACTION
|
||||
// elements: ARG_ACTION, RULE_REF
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6512,7 +6517,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ARG_ACTION, elementOptions, TOKEN_REF
|
||||
// elements: TOKEN_REF, elementOptions, ARG_ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6704,7 +6709,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ROOT, terminal
|
||||
// elements: terminal, ROOT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6741,7 +6746,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: terminal, BANG
|
||||
// elements: BANG, terminal
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -7193,7 +7198,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: nakedRewrite, predicatedRewrite
|
||||
// elements: predicatedRewrite, nakedRewrite
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8062,7 +8067,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: elementOptions, TOKEN_REF, ARG_ACTION
|
||||
// elements: TOKEN_REF, elementOptions, ARG_ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8146,7 +8151,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: elementOptions, STRING_LITERAL
|
||||
// elements: STRING_LITERAL, elementOptions
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8303,7 +8308,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ebnfSuffix, rewriteTreeAlt
|
||||
// elements: rewriteTreeAlt, ebnfSuffix
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8440,7 +8445,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: TREE_BEGIN, rewriteTreeElement, rewriteTreeAtom
|
||||
// elements: rewriteTreeAtom, TREE_BEGIN, rewriteTreeElement
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8595,7 +8600,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: rewriteTemplateArgs, str, TEMPLATE
|
||||
// elements: str, rewriteTemplateArgs, TEMPLATE
|
||||
// token labels: str
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8856,7 +8861,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: rewriteTemplateArgs, ACTION
|
||||
// elements: ACTION, rewriteTemplateArgs
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 ASTVerifier.g 2010-02-12 16:46:37
|
||||
// $ANTLR ${project.version} ${buildNumber} ASTVerifier.g 2010-02-15 12:12:18
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
@ -26,15 +26,14 @@
|
|||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.parse;
|
||||
import org.antlr.v4.tool.*;
|
||||
import org.antlr.v4.runtime.tree.CommonTree; // use updated v4 one not v3
|
||||
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import org.antlr.runtime.tree.TreeNodeStream;
|
||||
import org.antlr.runtime.tree.TreeParser;
|
||||
import org.antlr.runtime.tree.TreeRuleReturnScope;
|
||||
import org.antlr.v4.runtime.tree.CommonTree;
|
||||
import org.antlr.v4.tool.GrammarAST;
|
||||
|
||||
import org.antlr.runtime.tree.*;import java.util.Stack;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
/** The definitive ANTLR v3 tree grammar to parse ANTLR v4 grammars.
|
||||
* Parses trees created in ANTLRParser.g.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
lexer grammar ActionSplitter;
|
||||
|
||||
options { filter=true; }
|
||||
options { filter=true; superClass='org.antlr.v4.runtime.Lexer'; }
|
||||
|
||||
@header {
|
||||
package org.antlr.v4.parse;
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 ActionSplitter.g 2010-02-12 16:46:37
|
||||
// $ANTLR ${project.version} ${buildNumber} ActionSplitter.g 2010-02-15 12:12:18
|
||||
|
||||
package org.antlr.v4.parse;
|
||||
import org.antlr.v4.runtime.CommonToken;
|
||||
import org.antlr.v4.tool.*;
|
||||
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import org.antlr.v4.runtime.CommonToken;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Stack;
|
||||
import java.util.List;
|
||||
public class ActionSplitter extends Lexer {
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
public class ActionSplitter extends org.antlr.v4.runtime.Lexer {
|
||||
public static final int INDIRECT_TEMPLATE_INSTANCE=23;
|
||||
public static final int LINE_COMMENT=5;
|
||||
public static final int DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR=15;
|
||||
|
@ -275,13 +279,21 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:47:4: '$' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart112 = getCharIndex();
|
||||
int xStart117 = getCharIndex();
|
||||
int xStartLine117 = getLine();
|
||||
int xStartCharPos117 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart112, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart117, getCharIndex()-1);
|
||||
x.setLine(xStartLine117);
|
||||
x.setCharPositionInLine(xStartCharPos117);
|
||||
match('.'); if (state.failed) return ;
|
||||
int yStart118 = getCharIndex();
|
||||
int yStart123 = getCharIndex();
|
||||
int yStartLine123 = getLine();
|
||||
int yStartCharPos123 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart118, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart123, getCharIndex()-1);
|
||||
y.setLine(yStartLine123);
|
||||
y.setCharPositionInLine(yStartCharPos123);
|
||||
// ActionSplitter.g:47:22: ( WS )?
|
||||
int alt4=2;
|
||||
int LA4_0 = input.LA(1);
|
||||
|
@ -301,9 +313,13 @@ public class ActionSplitter extends Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart127 = getCharIndex();
|
||||
int exprStart132 = getCharIndex();
|
||||
int exprStartLine132 = getLine();
|
||||
int exprStartCharPos132 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart127, getCharIndex()-1);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart132, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine132);
|
||||
expr.setCharPositionInLine(exprStartCharPos132);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setQualifiedAttr(getText(), x, y, expr);
|
||||
|
@ -331,13 +347,21 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:52:4: '$' x= ID '.' y= ID {...}?
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart148 = getCharIndex();
|
||||
int xStart153 = getCharIndex();
|
||||
int xStartLine153 = getLine();
|
||||
int xStartCharPos153 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart148, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart153, getCharIndex()-1);
|
||||
x.setLine(xStartLine153);
|
||||
x.setCharPositionInLine(xStartCharPos153);
|
||||
match('.'); if (state.failed) return ;
|
||||
int yStart154 = getCharIndex();
|
||||
int yStart159 = getCharIndex();
|
||||
int yStartLine159 = getLine();
|
||||
int yStartCharPos159 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart154, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart159, getCharIndex()-1);
|
||||
y.setLine(yStartLine159);
|
||||
y.setCharPositionInLine(yStartCharPos159);
|
||||
if ( !((input.LA(1)!='(')) ) {
|
||||
if (state.backtracking>0) {state.failed=true; return ;}
|
||||
throw new FailedPredicateException(input, "QUALIFIED_ATTR", "input.LA(1)!='('");
|
||||
|
@ -369,14 +393,22 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:56:4: '$' x= ID '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart173 = getCharIndex();
|
||||
int xStart178 = getCharIndex();
|
||||
int xStartLine178 = getLine();
|
||||
int xStartCharPos178 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart173, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart178, getCharIndex()-1);
|
||||
x.setLine(xStartLine178);
|
||||
x.setCharPositionInLine(xStartCharPos178);
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart179 = getCharIndex();
|
||||
int yStart184 = getCharIndex();
|
||||
int yStartLine184 = getLine();
|
||||
int yStartCharPos184 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart179, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart184, getCharIndex()-1);
|
||||
y.setLine(yStartLine184);
|
||||
y.setCharPositionInLine(yStartCharPos184);
|
||||
// ActionSplitter.g:56:23: ( WS )?
|
||||
int alt5=2;
|
||||
int LA5_0 = input.LA(1);
|
||||
|
@ -396,9 +428,13 @@ public class ActionSplitter extends Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart188 = getCharIndex();
|
||||
int exprStart193 = getCharIndex();
|
||||
int exprStartLine193 = getLine();
|
||||
int exprStartCharPos193 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart188, getCharIndex()-1);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart193, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine193);
|
||||
expr.setCharPositionInLine(exprStartCharPos193);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setDynamicScopeAttr(getText(), x, y, expr);
|
||||
|
@ -426,14 +462,22 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:61:4: '$' x= ID '::' y= ID
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart209 = getCharIndex();
|
||||
int xStart214 = getCharIndex();
|
||||
int xStartLine214 = getLine();
|
||||
int xStartCharPos214 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart209, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart214, getCharIndex()-1);
|
||||
x.setLine(xStartLine214);
|
||||
x.setCharPositionInLine(xStartCharPos214);
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart215 = getCharIndex();
|
||||
int yStart220 = getCharIndex();
|
||||
int yStartLine220 = getLine();
|
||||
int yStartCharPos220 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart215, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart220, getCharIndex()-1);
|
||||
y.setLine(yStartLine220);
|
||||
y.setCharPositionInLine(yStartCharPos220);
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.dynamicScopeAttr(getText(), x, y);
|
||||
}
|
||||
|
@ -462,20 +506,32 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:73:4: '$' x= ID '[' '-' index= SCOPE_INDEX_EXPR ']' '::' y= ID ( WS )? ( '=' expr= ATTR_VALUE_EXPR ';' )?
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart234 = getCharIndex();
|
||||
int xStart239 = getCharIndex();
|
||||
int xStartLine239 = getLine();
|
||||
int xStartCharPos239 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart234, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart239, getCharIndex()-1);
|
||||
x.setLine(xStartLine239);
|
||||
x.setCharPositionInLine(xStartCharPos239);
|
||||
match('['); if (state.failed) return ;
|
||||
match('-'); if (state.failed) return ;
|
||||
int indexStart242 = getCharIndex();
|
||||
int indexStart247 = getCharIndex();
|
||||
int indexStartLine247 = getLine();
|
||||
int indexStartCharPos247 = getCharPositionInLine();
|
||||
mSCOPE_INDEX_EXPR(); if (state.failed) return ;
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart242, getCharIndex()-1);
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart247, getCharIndex()-1);
|
||||
index.setLine(indexStartLine247);
|
||||
index.setCharPositionInLine(indexStartCharPos247);
|
||||
match(']'); if (state.failed) return ;
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart250 = getCharIndex();
|
||||
int yStart255 = getCharIndex();
|
||||
int yStartLine255 = getLine();
|
||||
int yStartCharPos255 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart250, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart255, getCharIndex()-1);
|
||||
y.setLine(yStartLine255);
|
||||
y.setCharPositionInLine(yStartCharPos255);
|
||||
// ActionSplitter.g:74:3: ( WS )?
|
||||
int alt6=2;
|
||||
int LA6_0 = input.LA(1);
|
||||
|
@ -506,9 +562,13 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:74:8: '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart262 = getCharIndex();
|
||||
int exprStart267 = getCharIndex();
|
||||
int exprStartLine267 = getLine();
|
||||
int exprStartCharPos267 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart262, getCharIndex()-1);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart267, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine267);
|
||||
expr.setCharPositionInLine(exprStartCharPos267);
|
||||
match(';'); if (state.failed) return ;
|
||||
|
||||
}
|
||||
|
@ -543,20 +603,32 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:79:4: '$' x= ID '[' '-' index= SCOPE_INDEX_EXPR ']' '::' y= ID
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart285 = getCharIndex();
|
||||
int xStart290 = getCharIndex();
|
||||
int xStartLine290 = getLine();
|
||||
int xStartCharPos290 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart285, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart290, getCharIndex()-1);
|
||||
x.setLine(xStartLine290);
|
||||
x.setCharPositionInLine(xStartCharPos290);
|
||||
match('['); if (state.failed) return ;
|
||||
match('-'); if (state.failed) return ;
|
||||
int indexStart293 = getCharIndex();
|
||||
int indexStart298 = getCharIndex();
|
||||
int indexStartLine298 = getLine();
|
||||
int indexStartCharPos298 = getCharPositionInLine();
|
||||
mSCOPE_INDEX_EXPR(); if (state.failed) return ;
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart293, getCharIndex()-1);
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart298, getCharIndex()-1);
|
||||
index.setLine(indexStartLine298);
|
||||
index.setCharPositionInLine(indexStartCharPos298);
|
||||
match(']'); if (state.failed) return ;
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart301 = getCharIndex();
|
||||
int yStart306 = getCharIndex();
|
||||
int yStartLine306 = getLine();
|
||||
int yStartCharPos306 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart301, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart306, getCharIndex()-1);
|
||||
y.setLine(yStartLine306);
|
||||
y.setCharPositionInLine(yStartCharPos306);
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.dynamicNegativeIndexedScopeAttr(getText(), x, y, index);
|
||||
}
|
||||
|
@ -585,19 +657,31 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:84:4: '$' x= ID '[' index= SCOPE_INDEX_EXPR ']' '::' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart320 = getCharIndex();
|
||||
int xStart325 = getCharIndex();
|
||||
int xStartLine325 = getLine();
|
||||
int xStartCharPos325 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart320, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart325, getCharIndex()-1);
|
||||
x.setLine(xStartLine325);
|
||||
x.setCharPositionInLine(xStartCharPos325);
|
||||
match('['); if (state.failed) return ;
|
||||
int indexStart326 = getCharIndex();
|
||||
int indexStart331 = getCharIndex();
|
||||
int indexStartLine331 = getLine();
|
||||
int indexStartCharPos331 = getCharPositionInLine();
|
||||
mSCOPE_INDEX_EXPR(); if (state.failed) return ;
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart326, getCharIndex()-1);
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart331, getCharIndex()-1);
|
||||
index.setLine(indexStartLine331);
|
||||
index.setCharPositionInLine(indexStartCharPos331);
|
||||
match(']'); if (state.failed) return ;
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart334 = getCharIndex();
|
||||
int yStart339 = getCharIndex();
|
||||
int yStartLine339 = getLine();
|
||||
int yStartCharPos339 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart334, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart339, getCharIndex()-1);
|
||||
y.setLine(yStartLine339);
|
||||
y.setCharPositionInLine(yStartCharPos339);
|
||||
// ActionSplitter.g:85:3: ( WS )?
|
||||
int alt8=2;
|
||||
int LA8_0 = input.LA(1);
|
||||
|
@ -617,9 +701,13 @@ public class ActionSplitter extends Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart345 = getCharIndex();
|
||||
int exprStart350 = getCharIndex();
|
||||
int exprStartLine350 = getLine();
|
||||
int exprStartCharPos350 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart345, getCharIndex()-1);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart350, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine350);
|
||||
expr.setCharPositionInLine(exprStartCharPos350);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setDynamicAbsoluteIndexedScopeAttr(getText(), x, y, index, expr);
|
||||
|
@ -648,19 +736,31 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:90:4: '$' x= ID '[' index= SCOPE_INDEX_EXPR ']' '::' y= ID
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart366 = getCharIndex();
|
||||
int xStart371 = getCharIndex();
|
||||
int xStartLine371 = getLine();
|
||||
int xStartCharPos371 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart366, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart371, getCharIndex()-1);
|
||||
x.setLine(xStartLine371);
|
||||
x.setCharPositionInLine(xStartCharPos371);
|
||||
match('['); if (state.failed) return ;
|
||||
int indexStart372 = getCharIndex();
|
||||
int indexStart377 = getCharIndex();
|
||||
int indexStartLine377 = getLine();
|
||||
int indexStartCharPos377 = getCharPositionInLine();
|
||||
mSCOPE_INDEX_EXPR(); if (state.failed) return ;
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart372, getCharIndex()-1);
|
||||
index = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, indexStart377, getCharIndex()-1);
|
||||
index.setLine(indexStartLine377);
|
||||
index.setCharPositionInLine(indexStartCharPos377);
|
||||
match(']'); if (state.failed) return ;
|
||||
match("::"); if (state.failed) return ;
|
||||
|
||||
int yStart380 = getCharIndex();
|
||||
int yStart385 = getCharIndex();
|
||||
int yStartLine385 = getLine();
|
||||
int yStartCharPos385 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart380, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart385, getCharIndex()-1);
|
||||
y.setLine(yStartLine385);
|
||||
y.setCharPositionInLine(yStartCharPos385);
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.dynamicAbsoluteIndexedScopeAttr(getText(), x, y, index);
|
||||
}
|
||||
|
@ -687,9 +787,13 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:95:4: '$' x= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart399 = getCharIndex();
|
||||
int xStart404 = getCharIndex();
|
||||
int xStartLine404 = getLine();
|
||||
int xStartCharPos404 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart399, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart404, getCharIndex()-1);
|
||||
x.setLine(xStartLine404);
|
||||
x.setCharPositionInLine(xStartCharPos404);
|
||||
// ActionSplitter.g:95:13: ( WS )?
|
||||
int alt9=2;
|
||||
int LA9_0 = input.LA(1);
|
||||
|
@ -709,9 +813,13 @@ public class ActionSplitter extends Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart408 = getCharIndex();
|
||||
int exprStart413 = getCharIndex();
|
||||
int exprStartLine413 = getLine();
|
||||
int exprStartCharPos413 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart408, getCharIndex()-1);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart413, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine413);
|
||||
expr.setCharPositionInLine(exprStartCharPos413);
|
||||
match(';'); if (state.failed) return ;
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.setAttr(getText(), x, expr);
|
||||
|
@ -738,9 +846,13 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:99:4: '$' x= ID
|
||||
{
|
||||
match('$'); if (state.failed) return ;
|
||||
int xStart427 = getCharIndex();
|
||||
int xStart432 = getCharIndex();
|
||||
int xStartLine432 = getLine();
|
||||
int xStartCharPos432 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart427, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart432, getCharIndex()-1);
|
||||
x.setLine(xStartLine432);
|
||||
x.setCharPositionInLine(xStartCharPos432);
|
||||
if ( state.backtracking==1 ) {
|
||||
delegate.attr(getText(), x);
|
||||
}
|
||||
|
@ -1011,9 +1123,13 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:114:4: '%' a= ACTION '.' ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('%'); if (state.failed) return ;
|
||||
int aStart534 = getCharIndex();
|
||||
int aStart539 = getCharIndex();
|
||||
int aStartLine539 = getLine();
|
||||
int aStartCharPos539 = getCharPositionInLine();
|
||||
mACTION(); if (state.failed) return ;
|
||||
a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart534, getCharIndex()-1);
|
||||
a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart539, getCharIndex()-1);
|
||||
a.setLine(aStartLine539);
|
||||
a.setCharPositionInLine(aStartCharPos539);
|
||||
match('.'); if (state.failed) return ;
|
||||
mID(); if (state.failed) return ;
|
||||
// ActionSplitter.g:114:24: ( WS )?
|
||||
|
@ -1035,9 +1151,13 @@ public class ActionSplitter extends Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart547 = getCharIndex();
|
||||
int exprStart552 = getCharIndex();
|
||||
int exprStartLine552 = getLine();
|
||||
int exprStartCharPos552 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart547, getCharIndex()-1);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart552, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine552);
|
||||
expr.setCharPositionInLine(exprStartCharPos552);
|
||||
match(';'); if (state.failed) return ;
|
||||
|
||||
}
|
||||
|
@ -1063,13 +1183,21 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:122:4: '%' x= ID '.' y= ID ( WS )? '=' expr= ATTR_VALUE_EXPR ';'
|
||||
{
|
||||
match('%'); if (state.failed) return ;
|
||||
int xStart567 = getCharIndex();
|
||||
int xStart572 = getCharIndex();
|
||||
int xStartLine572 = getLine();
|
||||
int xStartCharPos572 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart567, getCharIndex()-1);
|
||||
x = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, xStart572, getCharIndex()-1);
|
||||
x.setLine(xStartLine572);
|
||||
x.setCharPositionInLine(xStartCharPos572);
|
||||
match('.'); if (state.failed) return ;
|
||||
int yStart573 = getCharIndex();
|
||||
int yStart578 = getCharIndex();
|
||||
int yStartLine578 = getLine();
|
||||
int yStartCharPos578 = getCharPositionInLine();
|
||||
mID(); if (state.failed) return ;
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart573, getCharIndex()-1);
|
||||
y = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, yStart578, getCharIndex()-1);
|
||||
y.setLine(yStartLine578);
|
||||
y.setCharPositionInLine(yStartCharPos578);
|
||||
// ActionSplitter.g:122:22: ( WS )?
|
||||
int alt21=2;
|
||||
int LA21_0 = input.LA(1);
|
||||
|
@ -1089,9 +1217,13 @@ public class ActionSplitter extends Lexer {
|
|||
}
|
||||
|
||||
match('='); if (state.failed) return ;
|
||||
int exprStart582 = getCharIndex();
|
||||
int exprStart587 = getCharIndex();
|
||||
int exprStartLine587 = getLine();
|
||||
int exprStartCharPos587 = getCharPositionInLine();
|
||||
mATTR_VALUE_EXPR(); if (state.failed) return ;
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart582, getCharIndex()-1);
|
||||
expr = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, exprStart587, getCharIndex()-1);
|
||||
expr.setLine(exprStartLine587);
|
||||
expr.setCharPositionInLine(exprStartCharPos587);
|
||||
match(';'); if (state.failed) return ;
|
||||
|
||||
}
|
||||
|
@ -1115,9 +1247,13 @@ public class ActionSplitter extends Lexer {
|
|||
// ActionSplitter.g:127:4: '%' a= ACTION
|
||||
{
|
||||
match('%'); if (state.failed) return ;
|
||||
int aStart601 = getCharIndex();
|
||||
int aStart606 = getCharIndex();
|
||||
int aStartLine606 = getLine();
|
||||
int aStartCharPos606 = getCharPositionInLine();
|
||||
mACTION(); if (state.failed) return ;
|
||||
a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart601, getCharIndex()-1);
|
||||
a = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, aStart606, getCharIndex()-1);
|
||||
a.setLine(aStartLine606);
|
||||
a.setCharPositionInLine(aStartCharPos606);
|
||||
|
||||
}
|
||||
|
||||
|
@ -2130,11 +2266,11 @@ public class ActionSplitter extends Lexer {
|
|||
state.failed=false;
|
||||
return success;
|
||||
}
|
||||
public final boolean synpred8_ActionSplitter() {
|
||||
public final boolean synpred2_ActionSplitter() {
|
||||
state.backtracking++;
|
||||
int start = input.mark();
|
||||
try {
|
||||
synpred8_ActionSplitter_fragment(); // can never throw exception
|
||||
synpred2_ActionSplitter_fragment(); // can never throw exception
|
||||
} catch (RecognitionException re) {
|
||||
System.err.println("impossible: "+re);
|
||||
}
|
||||
|
@ -2144,11 +2280,11 @@ public class ActionSplitter extends Lexer {
|
|||
state.failed=false;
|
||||
return success;
|
||||
}
|
||||
public final boolean synpred2_ActionSplitter() {
|
||||
public final boolean synpred8_ActionSplitter() {
|
||||
state.backtracking++;
|
||||
int start = input.mark();
|
||||
try {
|
||||
synpred2_ActionSplitter_fragment(); // can never throw exception
|
||||
synpred8_ActionSplitter_fragment(); // can never throw exception
|
||||
} catch (RecognitionException re) {
|
||||
System.err.println("impossible: "+re);
|
||||
}
|
||||
|
@ -2296,23 +2432,19 @@ public class ActionSplitter extends Lexer {
|
|||
static final String DFA29_eofS =
|
||||
"\27\uffff";
|
||||
static final String DFA29_minS =
|
||||
"\1\44\1\0\2\uffff\1\0\7\uffff\1\0\12\uffff";
|
||||
"\1\44\1\0\6\uffff\1\0\12\uffff\1\0\3\uffff";
|
||||
static final String DFA29_maxS =
|
||||
"\1\134\1\0\2\uffff\1\0\7\uffff\1\0\12\uffff";
|
||||
"\1\134\1\0\6\uffff\1\0\12\uffff\1\0\3\uffff";
|
||||
static final String DFA29_acceptS =
|
||||
"\2\uffff\1\1\1\2\1\uffff\1\15\1\16\1\17\1\20\1\21\1\22\1\23\1\uffff"+
|
||||
"\1\3\1\4\1\5\1\6\1\7\1\10\1\11\1\12\1\13\1\14";
|
||||
"\2\uffff\1\15\1\16\1\17\1\20\1\21\1\22\1\uffff\1\3\1\4\1\5\1\6\1"+
|
||||
"\7\1\10\1\11\1\12\1\13\1\14\1\uffff\1\1\1\2\1\23";
|
||||
static final String DFA29_specialS =
|
||||
"\1\uffff\1\0\2\uffff\1\1\7\uffff\1\2\12\uffff}>";
|
||||
"\1\uffff\1\0\6\uffff\1\1\12\uffff\1\2\3\uffff}>";
|
||||
static final String[] DFA29_transitionS = {
|
||||
"\1\14\1\4\11\uffff\1\1\54\uffff\1\13",
|
||||
"\1\10\1\1\11\uffff\1\23\54\uffff\1\26",
|
||||
"\1\uffff",
|
||||
"",
|
||||
"",
|
||||
"\1\uffff",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
|
@ -2327,6 +2459,10 @@ public class ActionSplitter extends Lexer {
|
|||
"",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"\1\uffff",
|
||||
"",
|
||||
"",
|
||||
""
|
||||
};
|
||||
|
||||
|
@ -2373,66 +2509,66 @@ public class ActionSplitter extends Lexer {
|
|||
int index29_1 = input.index();
|
||||
input.rewind();
|
||||
s = -1;
|
||||
if ( (synpred1_ActionSplitter()) ) {s = 2;}
|
||||
if ( (synpred13_ActionSplitter()) ) {s = 2;}
|
||||
|
||||
else if ( (synpred2_ActionSplitter()) ) {s = 3;}
|
||||
else if ( (synpred14_ActionSplitter()) ) {s = 3;}
|
||||
|
||||
else if ( (synpred15_ActionSplitter()) ) {s = 4;}
|
||||
|
||||
else if ( (synpred16_ActionSplitter()) ) {s = 5;}
|
||||
|
||||
else if ( (synpred17_ActionSplitter()) ) {s = 6;}
|
||||
|
||||
else if ( (synpred18_ActionSplitter()) ) {s = 7;}
|
||||
|
||||
|
||||
input.seek(index29_1);
|
||||
if ( s>=0 ) return s;
|
||||
break;
|
||||
case 1 :
|
||||
int LA29_4 = input.LA(1);
|
||||
int LA29_8 = input.LA(1);
|
||||
|
||||
|
||||
int index29_4 = input.index();
|
||||
int index29_8 = input.index();
|
||||
input.rewind();
|
||||
s = -1;
|
||||
if ( (synpred13_ActionSplitter()) ) {s = 5;}
|
||||
if ( (synpred3_ActionSplitter()) ) {s = 9;}
|
||||
|
||||
else if ( (synpred14_ActionSplitter()) ) {s = 6;}
|
||||
else if ( (synpred4_ActionSplitter()) ) {s = 10;}
|
||||
|
||||
else if ( (synpred15_ActionSplitter()) ) {s = 7;}
|
||||
else if ( (synpred5_ActionSplitter()) ) {s = 11;}
|
||||
|
||||
else if ( (synpred16_ActionSplitter()) ) {s = 8;}
|
||||
else if ( (synpred6_ActionSplitter()) ) {s = 12;}
|
||||
|
||||
else if ( (synpred17_ActionSplitter()) ) {s = 9;}
|
||||
else if ( (synpred7_ActionSplitter()) ) {s = 13;}
|
||||
|
||||
else if ( (synpred18_ActionSplitter()) ) {s = 10;}
|
||||
else if ( (synpred8_ActionSplitter()) ) {s = 14;}
|
||||
|
||||
else if ( (synpred9_ActionSplitter()) ) {s = 15;}
|
||||
|
||||
else if ( (synpred10_ActionSplitter()) ) {s = 16;}
|
||||
|
||||
else if ( (synpred11_ActionSplitter()) ) {s = 17;}
|
||||
|
||||
else if ( (synpred12_ActionSplitter()) ) {s = 18;}
|
||||
|
||||
|
||||
input.seek(index29_4);
|
||||
input.seek(index29_8);
|
||||
if ( s>=0 ) return s;
|
||||
break;
|
||||
case 2 :
|
||||
int LA29_12 = input.LA(1);
|
||||
int LA29_19 = input.LA(1);
|
||||
|
||||
|
||||
int index29_12 = input.index();
|
||||
int index29_19 = input.index();
|
||||
input.rewind();
|
||||
s = -1;
|
||||
if ( (synpred3_ActionSplitter()) ) {s = 13;}
|
||||
if ( (synpred1_ActionSplitter()) ) {s = 20;}
|
||||
|
||||
else if ( (synpred4_ActionSplitter()) ) {s = 14;}
|
||||
|
||||
else if ( (synpred5_ActionSplitter()) ) {s = 15;}
|
||||
|
||||
else if ( (synpred6_ActionSplitter()) ) {s = 16;}
|
||||
|
||||
else if ( (synpred7_ActionSplitter()) ) {s = 17;}
|
||||
|
||||
else if ( (synpred8_ActionSplitter()) ) {s = 18;}
|
||||
|
||||
else if ( (synpred9_ActionSplitter()) ) {s = 19;}
|
||||
|
||||
else if ( (synpred10_ActionSplitter()) ) {s = 20;}
|
||||
|
||||
else if ( (synpred11_ActionSplitter()) ) {s = 21;}
|
||||
|
||||
else if ( (synpred12_ActionSplitter()) ) {s = 22;}
|
||||
else if ( (synpred2_ActionSplitter()) ) {s = 21;}
|
||||
|
||||
|
||||
input.seek(index29_12);
|
||||
input.seek(index29_19);
|
||||
if ( s>=0 ) return s;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -14,46 +14,48 @@ public class AttributeChecks implements ActionSplitterListener {
|
|||
public Rule r; // null if action outside of rule
|
||||
public Alternative alt; // null if action outside of alt; could be in rule
|
||||
public ActionAST node;
|
||||
public String action;
|
||||
public Token actionToken; // token within action
|
||||
//public String action;
|
||||
|
||||
public AttributeChecks(Grammar g, Rule r, Alternative alt, ActionAST node, String action) {
|
||||
public AttributeChecks(Grammar g, Rule r, Alternative alt, ActionAST node, Token actionToken) {
|
||||
this.g = g;
|
||||
this.r = r;
|
||||
this.alt = alt;
|
||||
this.node = node;
|
||||
this.action = action;
|
||||
this.actionToken = actionToken;
|
||||
}
|
||||
|
||||
public static void checkAllAttributeExpressions(Grammar g) {
|
||||
for (ActionAST act : g.actions.values()) {
|
||||
AttributeChecks checker = new AttributeChecks(g, null, null, act, act.getText());
|
||||
for (ActionAST act : g.namedActions.values()) {
|
||||
AttributeChecks checker = new AttributeChecks(g, null, null, act, act.token);
|
||||
checker.examineAction();
|
||||
}
|
||||
|
||||
for (Rule r : g.rules.values()) {
|
||||
for (ActionAST a : r.namedActions.values()) {
|
||||
AttributeChecks checker = new AttributeChecks(g, r, null, a, a.getText());
|
||||
AttributeChecks checker = new AttributeChecks(g, r, null, a, a.token);
|
||||
checker.examineAction();
|
||||
}
|
||||
for (int i=1; i<=r.numberOfAlts; i++) {
|
||||
Alternative alt = r.alt[i];
|
||||
for (ActionAST a : alt.actions) {
|
||||
AttributeChecks checker =
|
||||
new AttributeChecks(g, r, alt, a, a.getText());
|
||||
new AttributeChecks(g, r, alt, a, a.token);
|
||||
checker.examineAction();
|
||||
}
|
||||
}
|
||||
for (ActionAST a : r.exceptionActions) {
|
||||
AttributeChecks checker = new AttributeChecks(g, r, null, a, a.getText());
|
||||
AttributeChecks checker = new AttributeChecks(g, r, null, a, a.token);
|
||||
checker.examineAction();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void examineAction() {
|
||||
ANTLRStringStream in = new ANTLRStringStream(action);
|
||||
in.setLine(node.getLine());
|
||||
in.setCharPositionInLine(node.getCharPositionInLine());
|
||||
//System.out.println("examine "+actionToken);
|
||||
ANTLRStringStream in = new ANTLRStringStream(actionToken.getText());
|
||||
in.setLine(actionToken.getLine());
|
||||
in.setCharPositionInLine(actionToken.getCharPositionInLine());
|
||||
ActionSplitter splitter = new ActionSplitter(in, this);
|
||||
List<Token> chunks = splitter.getActionChunks(); // forces eval, fills extractor
|
||||
//System.out.println(chunks);
|
||||
|
@ -62,45 +64,47 @@ public class AttributeChecks implements ActionSplitterListener {
|
|||
// LISTENER METHODS
|
||||
|
||||
public void setQualifiedAttr(String expr, Token x, Token y, Token rhs) {
|
||||
if ( !node.resolver.resolves(x.getText(), y.getText(), node) ) {
|
||||
ErrorManager.grammarError(ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE, // TODO; not right error
|
||||
g.fileName, x, x.getText(), expr);
|
||||
}
|
||||
new AttributeChecks(g, r, alt, node, rhs.getText()).examineAction();
|
||||
qualifiedAttr(expr, x, y);
|
||||
new AttributeChecks(g, r, alt, node, rhs).examineAction();
|
||||
}
|
||||
|
||||
public void qualifiedAttr(String expr, Token x, Token y) {
|
||||
if ( !node.resolver.resolves(x.getText(), node) ) {
|
||||
ErrorManager.grammarError(ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE,
|
||||
g.fileName, x, x.getText(), expr);
|
||||
return;
|
||||
}
|
||||
if ( !node.resolver.resolves(x.getText(), y.getText(), node) ) {
|
||||
if ( !node.resolver.resolves(x.getText(), node) &&
|
||||
(r==null || !r.name.equals(x.getText())) )
|
||||
{
|
||||
ErrorManager.grammarError(ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE,
|
||||
g.fileName, x, x.getText(), expr);
|
||||
return;
|
||||
}
|
||||
if ( node.resolver.resolveRefToRule(x.getText(), node)!=null ) {
|
||||
Rule rref = g.getRule(x.getText());
|
||||
if ( rref!=null && rref.args!=null && rref.args.get(y.getText())!=null ) {
|
||||
ErrorManager.grammarError(ErrorType.INVALID_RULE_PARAMETER_REF,
|
||||
g.fileName, y, y.getText(), expr);
|
||||
return;
|
||||
}
|
||||
ErrorManager.grammarError(ErrorType.UNKNOWN_RULE_ATTRIBUTE,
|
||||
g.fileName, y, y.getText(), expr);
|
||||
return;
|
||||
}
|
||||
ErrorManager.grammarError(ErrorType.UNKNOWN_ATTRIBUTE_IN_SCOPE,
|
||||
g.fileName, y, y.getText(), expr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// ???if y is not prop of x, we don't care; we'll ignore and leave as simple attr
|
||||
|
||||
if ( !node.resolver.resolves(x.getText(), y.getText(), node) ) {
|
||||
if ( node.resolver.resolveRefToRule(x.getText(), node)!=null ) {
|
||||
ErrorManager.grammarError(ErrorType.INVALID_RULE_PARAMETER_REF,
|
||||
g.fileName, y, y.getText(), expr);
|
||||
}
|
||||
else {
|
||||
ErrorManager.grammarError(ErrorType.UNKNOWN_ATTRIBUTE_IN_SCOPE,
|
||||
g.fileName, y, y.getText(), expr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void setAttr(String expr, Token x, Token rhs) {
|
||||
if ( !node.resolver.resolves(x.getText(), node) ) {
|
||||
public void setAttr(String expr, Token x, Token rhs) {
|
||||
System.out.println("setAttr x="+x+" = "+rhs+"; expr="+expr);
|
||||
if ( !node.resolver.resolves(x.getText(), node) ) {
|
||||
ErrorManager.grammarError(ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE,
|
||||
g.fileName, x, x.getText(), expr);
|
||||
}
|
||||
new AttributeChecks(g, r, alt, node, rhs.getText()).examineAction();
|
||||
new AttributeChecks(g, r, alt, node, rhs).examineAction();
|
||||
}
|
||||
|
||||
public void attr(String expr, Token x) { // arg, retval, predefined, token ref, rule ref, current rule
|
||||
// TODO: check for isolated rule ref "+x+" in "+expr);
|
||||
if ( node.resolver.resolveRefToRule(x.getText(), node)!=null ) {
|
||||
if ( node.resolver.resolveRefToRule(x.getText(), node)!=null ) { // or in rule and is rule ref
|
||||
ErrorManager.grammarError(ErrorType.ISOLATED_RULE_SCOPE,
|
||||
g.fileName, x, x.getText(), expr);
|
||||
return;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 BasicSemanticTriggers.g 2010-02-12 17:18:11
|
||||
// $ANTLR ${project.version} ${buildNumber} BasicSemanticTriggers.g 2010-02-15 12:12:19
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
@ -26,14 +26,15 @@
|
|||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.semantics;
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import org.antlr.runtime.tree.TreeNodeStream;
|
||||
import org.antlr.runtime.tree.TreeRuleReturnScope;
|
||||
import org.antlr.v4.tool.*;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import org.antlr.runtime.tree.*;import java.util.Stack;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
/** Triggers for the basic semantics of the input. Side-effects:
|
||||
* Set token, block, rule options in the tree. Load field option
|
||||
* with grammar options. Only legal options are set.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 CollectSymbols.g 2010-02-12 17:18:11
|
||||
// $ANTLR ${project.version} ${buildNumber} CollectSymbols.g 2010-02-15 12:12:19
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
@ -26,15 +26,19 @@
|
|||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.semantics;
|
||||
import org.antlr.v4.tool.*;
|
||||
import org.antlr.v4.parse.*;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import org.stringtemplate.v4.misc.MultiMap;
|
||||
|
||||
|
||||
import org.antlr.runtime.*;
|
||||
import org.antlr.runtime.tree.TreeNodeStream;
|
||||
import org.antlr.runtime.tree.TreeRuleReturnScope;
|
||||
import org.antlr.v4.parse.ScopeParser;
|
||||
import org.antlr.v4.tool.*;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import org.antlr.runtime.tree.*;import java.util.Stack;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
/** Collects rules, terminals, strings, actions, scopes etc... from AST
|
||||
* Side-effects: None
|
||||
*/
|
||||
|
|
|
@ -57,18 +57,6 @@ public class Alternative implements AttributeResolver {
|
|||
Rule r = resolveRefToRule(x, node);
|
||||
if ( r!=null ) return r.resolvesAsRetvalOrProperty(y);
|
||||
return getParent().resolves(x, y, node);
|
||||
//
|
||||
// List<LabelElementPair> labels = labelDefs.get(x); // label?
|
||||
// if ( labels!=null ) {
|
||||
// // it's a label ref, compute scope from label type and grammar type
|
||||
// LabelElementPair anyLabelDef = labels.get(0);
|
||||
// if ( rule.getPredefinedScope(anyLabelDef.type).get(y)!=null) return true;
|
||||
// if ( anyLabelDef.type==LabelType.RULE_LABEL ) {
|
||||
// Rule ref = rule.g.getRule(anyLabelDef.element.getText());
|
||||
// return ref.resolvesAsRetvalOrProperty(y);
|
||||
// }
|
||||
// }
|
||||
// return false;
|
||||
}
|
||||
|
||||
public Rule resolveRefToRule(String x, ActionAST node) {
|
||||
|
|
|
@ -53,14 +53,14 @@ public class Grammar implements AttributeResolver {
|
|||
* I track the AST node for the action in case I need the line number
|
||||
* for errors.
|
||||
*/
|
||||
public Map<String,ActionAST> actions = new HashMap<String,ActionAST>();
|
||||
public Map<String,ActionAST> namedActions = new HashMap<String,ActionAST>();
|
||||
|
||||
/** A list of options specified at the grammar level such as language=Java. */
|
||||
public Map<String, String> options;
|
||||
|
||||
public Map<String, AttributeScope> scopes = new LinkedHashMap<String, AttributeScope>();
|
||||
public Map<String, AttributeScope> scopes = new LinkedHashMap<String, AttributeScope>();
|
||||
|
||||
public Grammar(Tool tool, GrammarRootAST ast) {
|
||||
public Grammar(Tool tool, GrammarRootAST ast) {
|
||||
if ( ast==null ) throw new IllegalArgumentException("can't pass null tree");
|
||||
this.tool = tool;
|
||||
this.ast = ast;
|
||||
|
@ -112,11 +112,11 @@ public class Grammar implements AttributeResolver {
|
|||
public void defineAction(GrammarAST atAST) {
|
||||
if ( atAST.getChildCount()==2 ) {
|
||||
String name = atAST.getChild(0).getText();
|
||||
actions.put(name, (ActionAST)atAST.getChild(1));
|
||||
namedActions.put(name, (ActionAST)atAST.getChild(1));
|
||||
}
|
||||
else {
|
||||
String name = atAST.getChild(1).getText();
|
||||
actions.put(name, (ActionAST)atAST.getChild(2));
|
||||
namedActions.put(name, (ActionAST)atAST.getChild(2));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,8 @@ public class Grammar implements AttributeResolver {
|
|||
*/
|
||||
public boolean resolves(String x, String y, ActionAST node) { return false; }
|
||||
|
||||
public Rule resolveRefToRule(String x, ActionAST node) { return getRule(x); }
|
||||
/** Can't be a rule ref in grammar named action */
|
||||
public Rule resolveRefToRule(String x, ActionAST node) { return null; }
|
||||
|
||||
/** Given a grammar type, what should be the default action scope?
|
||||
* If I say @members in a COMBINED grammar, for example, the
|
||||
|
|
|
@ -11,11 +11,13 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"@members {<members>}\n" +
|
||||
"a[int x] returns [int y]\n" +
|
||||
"@init {<init>}\n" +
|
||||
" : {<action>}\n" +
|
||||
" : lab=b[34] {\n" +
|
||||
" <inline>\n" +
|
||||
" }\n" +
|
||||
" ;\n" +
|
||||
" finally {<finally>}\n" +
|
||||
"b[int d] returns [int e]\n" +
|
||||
" : {<action2>}\n" +
|
||||
" : {<inline2>}\n" +
|
||||
" ;\n" +
|
||||
"c : ;";
|
||||
|
||||
|
@ -38,32 +40,75 @@ public class TestAttributeChecks extends BaseTest {
|
|||
"c : ;";
|
||||
|
||||
String[] membersChecks = {
|
||||
"$a.y", "error(29): A.g:2:12: unknown attribute reference a in $a.y",
|
||||
"$a", "error(29): A.g:2:11: unknown attribute reference a in $a",
|
||||
"$a.y", "error(29): A.g:2:11: unknown attribute reference a in $a.y",
|
||||
};
|
||||
|
||||
String[] initChecks = {
|
||||
"$a.y", "error(29): A.g:4:9: unknown attribute reference a in $a.y",
|
||||
"$a", "error(33): A.g:4:8: missing attribute access on rule reference a in $a",
|
||||
"$a.q", "error(31): A.g:4:10: unknown attribute rule q in $a.q",
|
||||
};
|
||||
|
||||
String[] inlineChecks = {
|
||||
"$q", "error(29): A.g:6:4: unknown attribute reference q in $q",
|
||||
"$q.y", "error(29): A.g:6:4: unknown attribute reference q in $q.y",
|
||||
"$q = 3", "error(29): A.g:6:4: unknown attribute reference q in $q",
|
||||
"$q = 3;", "error(29): A.g:6:4: unknown attribute reference q in $q = 3;",
|
||||
"$q.y = 3;", "error(29): A.g:6:4: unknown attribute reference q in $q.y = 3;",
|
||||
"$q = $blort;", "error(29): A.g:6:4: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(29): A.g:6:9: unknown attribute reference blort in $blort",
|
||||
"$a", "error(33): A.g:6:4: missing attribute access on rule reference a in $a",
|
||||
"$a.ick", "error(31): A.g:6:6: unknown attribute rule ick in $a.ick",
|
||||
"$a.ick = 3;", "error(31): A.g:6:6: unknown attribute rule ick in $a.ick = 3;",
|
||||
"$b", "error(33): A.g:6:4: missing attribute access on rule reference b in $b",
|
||||
"$b.d", "error(30): A.g:6:6: cannot access rule d's parameter: $b.d", // can't see rule ref's arg
|
||||
"$c.text", "error(29): A.g:6:4: unknown attribute reference c in $c.text", // valid rule, but no ref
|
||||
"$lab", "error(33): A.g:6:4: missing attribute access on rule reference lab in $lab",
|
||||
"$lab.d", "error(31): A.g:6:8: unknown attribute rule d in $lab.d",
|
||||
};
|
||||
|
||||
String[] finallyChecks = {
|
||||
"$q", "error(29): A.g:8:14: unknown attribute reference q in $q",
|
||||
"$q.y", "error(29): A.g:8:14: unknown attribute reference q in $q.y",
|
||||
"$q = 3", "error(29): A.g:8:14: unknown attribute reference q in $q",
|
||||
"$q = 3;", "error(29): A.g:8:14: unknown attribute reference q in $q = 3;",
|
||||
"$q.y = 3;", "error(29): A.g:8:14: unknown attribute reference q in $q.y = 3;",
|
||||
"$q = $blort;", "error(29): A.g:8:14: unknown attribute reference q in $q = $blort;\n" +
|
||||
"error(29): A.g:8:19: unknown attribute reference blort in $blort",
|
||||
"$a", "error(33): A.g:8:14: missing attribute access on rule reference a in $a",
|
||||
"$a.ick", "error(31): A.g:8:16: unknown attribute rule ick in $a.ick",
|
||||
"$a.ick = 3;", "error(31): A.g:8:16: unknown attribute rule ick in $a.ick = 3;",
|
||||
"$b", "error(29): A.g:8:14: unknown attribute reference b in $b",
|
||||
"$b.d", "error(29): A.g:8:14: unknown attribute reference b in $b.d",
|
||||
"$c.text", "error(29): A.g:8:14: unknown attribute reference c in $c.text",
|
||||
"$lab", "error(33): A.g:8:14: missing attribute access on rule reference lab in $lab",
|
||||
"$lab.d", "error(31): A.g:8:18: unknown attribute rule d in $lab.d",
|
||||
};
|
||||
|
||||
@Test public void testMembersActions() throws RecognitionException {
|
||||
for (int i = 0; i < membersChecks.length; i+=2) {
|
||||
String m = membersChecks[i];
|
||||
String expected = membersChecks[i+1];
|
||||
ST st = new ST(attributeTemplate);
|
||||
st.add("members", m);
|
||||
testActions("members", membersChecks, attributeTemplate);
|
||||
}
|
||||
|
||||
@Test public void testInitActions() throws RecognitionException {
|
||||
testActions("init", initChecks, attributeTemplate);
|
||||
}
|
||||
|
||||
@Test public void testInlineActions() throws RecognitionException {
|
||||
testActions("inline", inlineChecks, attributeTemplate);
|
||||
}
|
||||
|
||||
@Test public void testFinallyActions() throws RecognitionException {
|
||||
testActions("finally", finallyChecks, attributeTemplate);
|
||||
}
|
||||
|
||||
public void testActions(String location, String[] pairs, String template) {
|
||||
for (int i = 0; i < pairs.length; i+=2) {
|
||||
String action = pairs[i];
|
||||
String expected = pairs[i+1];
|
||||
ST st = new ST(template);
|
||||
st.add(location, action);
|
||||
String grammar = st.render();
|
||||
testErrors(new String[] {grammar, expected});
|
||||
}
|
||||
}
|
||||
|
||||
@Test public void testInitActions() throws RecognitionException {
|
||||
for (int i = 0; i < initChecks.length; i+=2) {
|
||||
String init = initChecks[i];
|
||||
String expected = initChecks[i+1];
|
||||
ST st = new ST(attributeTemplate);
|
||||
st.add("init", init);
|
||||
String grammar = st.render();
|
||||
testErrors(new String[] {grammar, expected});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue