forked from jasder/antlr
huge update; preds in lexers
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 6911]
This commit is contained in:
parent
024f03b8d7
commit
4b71c478c1
|
@ -38,6 +38,8 @@ import java.util.*;
|
|||
* parser and tree grammars. This is all the parsing
|
||||
* support code essentially; most of it is error recovery stuff and
|
||||
* backtracking.
|
||||
*
|
||||
* TODO: rename since lexer not under. or reorg parser/treeparser; treeparser under parser?
|
||||
*/
|
||||
public abstract class BaseRecognizer {
|
||||
public static final int EOF=-1;
|
||||
|
@ -53,15 +55,15 @@ public abstract class BaseRecognizer {
|
|||
* and other state variables. It's a kind of explicit multiple
|
||||
* inheritance via delegation of methods and shared state.
|
||||
*/
|
||||
public RecognizerSharedState state;
|
||||
public ParserSharedState state;
|
||||
|
||||
public BaseRecognizer(IntStream input) {
|
||||
this(input, new RecognizerSharedState());
|
||||
this(input, new ParserSharedState());
|
||||
}
|
||||
|
||||
public BaseRecognizer(IntStream input, RecognizerSharedState state) {
|
||||
public BaseRecognizer(IntStream input, ParserSharedState state) {
|
||||
if ( state==null ) {
|
||||
state = new RecognizerSharedState();
|
||||
state = new ParserSharedState();
|
||||
}
|
||||
this.state = state;
|
||||
state.input = input;
|
||||
|
|
|
@ -28,9 +28,9 @@
|
|||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.CharStream;
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.runtime.TokenSource;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.QStack;
|
||||
import org.antlr.v4.runtime.pda.Bytecode;
|
||||
import org.antlr.v4.runtime.pda.PDA;
|
||||
|
@ -53,12 +53,13 @@ public abstract class Lexer implements TokenSource {
|
|||
public LexerSharedState state;
|
||||
|
||||
public static PDA[] modeToPDA;
|
||||
public static DFA[] modeToDFA;
|
||||
|
||||
public Lexer(IntStream input) {
|
||||
public Lexer(CharStream input) {
|
||||
this(input, new LexerSharedState());
|
||||
}
|
||||
|
||||
public Lexer(IntStream input, LexerSharedState state) {
|
||||
public Lexer(CharStream input, LexerSharedState state) {
|
||||
if ( state==null ) {
|
||||
state = new LexerSharedState();
|
||||
}
|
||||
|
@ -87,6 +88,38 @@ public abstract class Lexer implements TokenSource {
|
|||
* stream.
|
||||
*/
|
||||
public Token nextToken() {
|
||||
outer:
|
||||
while (true) {
|
||||
state.token = null;
|
||||
state.channel = Token.DEFAULT_CHANNEL;
|
||||
state.tokenStartCharIndex = state.input.index();
|
||||
state.tokenStartCharPositionInLine = state.input.getCharPositionInLine();
|
||||
state.tokenStartLine = state.input.getLine();
|
||||
state.text = null;
|
||||
do {
|
||||
state.type = Token.INVALID_TOKEN_TYPE;
|
||||
if ( state.input.LA(1)==CharStream.EOF ) {
|
||||
Token eof = new org.antlr.runtime.CommonToken(state.input,Token.EOF,
|
||||
Token.DEFAULT_CHANNEL,
|
||||
state.input.index(),state.input.index());
|
||||
eof.setLine(getLine());
|
||||
eof.setCharPositionInLine(getCharPositionInLine());
|
||||
return eof;
|
||||
}
|
||||
System.err.println("predict mode "+state.mode+" at index "+state.input.index());
|
||||
int ttype = modeToDFA[state.mode].predict(state.input);
|
||||
System.err.println("returns "+ttype);
|
||||
if ( state.type == Token.INVALID_TOKEN_TYPE ) state.type = ttype;
|
||||
if ( state.type==SKIP ) {
|
||||
continue outer;
|
||||
}
|
||||
} while ( state.type==MORE );
|
||||
if ( state.token==null ) emit();
|
||||
return state.token;
|
||||
}
|
||||
}
|
||||
|
||||
public Token nextToken_PDA() {
|
||||
outer:
|
||||
while (true) {
|
||||
state.token = null;
|
||||
|
@ -142,15 +175,19 @@ public abstract class Lexer implements TokenSource {
|
|||
state.type = MORE;
|
||||
}
|
||||
|
||||
public void mode(int m) { state.mode = m; }
|
||||
public void mode(int m) {
|
||||
state.mode = m;
|
||||
}
|
||||
|
||||
public void pushMode(int m) {
|
||||
if ( state.modeStack==null ) state.modeStack = new QStack<Integer>();
|
||||
state.modeStack.push(state.mode);
|
||||
state.mode = m;
|
||||
mode(m);
|
||||
}
|
||||
|
||||
public int popMode() {
|
||||
if ( state.modeStack==null ) throw new EmptyStackException();
|
||||
state.mode = state.modeStack.pop();
|
||||
mode( state.modeStack.pop() );
|
||||
return state.mode;
|
||||
}
|
||||
|
||||
|
@ -175,6 +212,7 @@ public abstract class Lexer implements TokenSource {
|
|||
* than a single variable as this implementation does).
|
||||
*/
|
||||
public void emit(Token token) {
|
||||
//System.err.println("emit "+token);
|
||||
state.token = token;
|
||||
}
|
||||
|
||||
|
@ -251,7 +289,7 @@ public abstract class Lexer implements TokenSource {
|
|||
return null;
|
||||
}
|
||||
|
||||
public String getErrorMessage(RecognitionException e, String[] tokenNames) {
|
||||
public String getErrorMessage(RecognitionException e) {
|
||||
String msg = null;
|
||||
if ( e instanceof MismatchedTokenException ) {
|
||||
MismatchedTokenException mte = (MismatchedTokenException)e;
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.runtime.CharStream;
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.v4.runtime.misc.QStack;
|
||||
|
||||
public class LexerSharedState {
|
||||
public IntStream input;
|
||||
public CharStream input;
|
||||
|
||||
/** The goal of all lexer rules/methods is to create a token object.
|
||||
* This is an instance variable as multiple rules may collaborate to
|
||||
|
|
|
@ -40,7 +40,7 @@ public class Parser extends BaseRecognizer {
|
|||
super(input);
|
||||
}
|
||||
|
||||
public Parser(TokenStream input, RecognizerSharedState state) {
|
||||
public Parser(TokenStream input, ParserSharedState state) {
|
||||
super(input, state); // share the state object with another parser
|
||||
}
|
||||
|
||||
|
|
|
@ -1,29 +1,29 @@
|
|||
/*
|
||||
[The "BSD license"]
|
||||
Copyright (c) 2005-2009 Terence Parr
|
||||
All rights reserved.
|
||||
[BSD]
|
||||
Copyright (c) 2010 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ import java.util.Map;
|
|||
* and recover from errors etc... As a separate state object, it can be
|
||||
* shared among multiple grammars; e.g., when one grammar imports another.
|
||||
*/
|
||||
public class RecognizerSharedState {
|
||||
public class ParserSharedState {
|
||||
public IntStream input;
|
||||
|
||||
/** First on stack is fake a call to start rule from S' : S EOF ;
|
||||
|
@ -83,7 +83,7 @@ public class RecognizerSharedState {
|
|||
|
||||
List<ANTLRParserListener> listeners;
|
||||
|
||||
public RecognizerSharedState() {
|
||||
public ParserSharedState() {
|
||||
ctx = new QStack<RuleContext>();
|
||||
}
|
||||
|
|
@ -0,0 +1,228 @@
|
|||
/*
|
||||
[BSD]
|
||||
Copyright (c) 2010 Terence Parr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime.dfa;
|
||||
|
||||
import org.antlr.runtime.IntStream;
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.v4.runtime.NoViableAltException;
|
||||
import org.antlr.v4.runtime.RecognitionException;
|
||||
|
||||
/** A DFA implemented as a set of transition tables.
|
||||
*
|
||||
* Any state that has a semantic predicate edge is special; those states
|
||||
* are generated with if-then-else structures in a specialStateTransition()
|
||||
* which is generated by cyclicDFA template.
|
||||
*
|
||||
* There are at most 32767 states (16-bit signed short).
|
||||
* Could get away with byte sometimes but would have to generate different
|
||||
* types and the simulation code too. For a point of reference, the Java
|
||||
* lexer's Tokens rule DFA has 326 states roughly.
|
||||
*/
|
||||
public class DFA {
|
||||
public short[] eof;
|
||||
public char[] max;
|
||||
public short[] accept;
|
||||
/** { target1, npairs1, range-pairs1,
|
||||
target2, npairs2, range-pairs2, ... }
|
||||
*/
|
||||
public int[][] set_edges;
|
||||
public int[][] pred_edges; // 'a'&&{p1}?
|
||||
public short[][] transition;
|
||||
public short[] action_index;
|
||||
|
||||
public int decisionNumber;
|
||||
|
||||
/** Which recognizer encloses this DFA? Needed to check backtracking */
|
||||
//public BaseRecognizer recognizer;
|
||||
|
||||
public static final boolean debug = false;
|
||||
|
||||
/** From the input stream, predict what alternative will succeed
|
||||
* using this DFA (representing the covering regular approximation
|
||||
* to the underlying CFL). Return an alternative number 1..n. Throw
|
||||
* an exception upon error.
|
||||
*/
|
||||
public int predict(IntStream input)
|
||||
throws RecognitionException
|
||||
{
|
||||
if ( debug ) {
|
||||
System.err.println("Enter DFA.predict for decision "+decisionNumber);
|
||||
}
|
||||
//int mark = input.mark(); // remember where decision started in input
|
||||
int prevAcceptMarker = -1;
|
||||
int prevAcceptState = -1;
|
||||
int s = 0; // we always start at s0
|
||||
try {
|
||||
while ( true ) {
|
||||
if ( debug ) System.err.println("DFA "+decisionNumber+" state "+s+" LA(1)="+(char)input.LA(1)+"("+input.LA(1)+
|
||||
"), index="+input.index());
|
||||
if ( accept[s] >= 1 ) {
|
||||
// TODO: have to keep going and then backtrack if we fail!!!!
|
||||
if ( debug ) System.err.println("accept; predict "+accept[s]+" from state "+s);
|
||||
prevAcceptMarker = input.mark();
|
||||
prevAcceptState = s;
|
||||
// keep going
|
||||
}
|
||||
// look for a normal char transition
|
||||
char c = (char)input.LA(1); // -1 == \uFFFF, all types fit in 64k space
|
||||
if ( c<=max[s] ) {
|
||||
int snext = transition[s][c]; // move to next state
|
||||
if ( snext < 0 ) {
|
||||
// was in range but not valid transition
|
||||
// TODO: check if eof[s]>=0, indicating that EOF goes to another
|
||||
// state.
|
||||
// TODO: refactor this common fail code
|
||||
if ( prevAcceptMarker<0 ) noViableAlt(s,input);
|
||||
input.rewind(prevAcceptMarker);
|
||||
s = prevAcceptState;
|
||||
if ( action_index[s]>=0 ) action(action_index[s]);
|
||||
System.err.println("accept state "+s+" with ttype "+accept[s]+" at index "+input.index());
|
||||
return accept[s];
|
||||
}
|
||||
s = snext;
|
||||
input.consume();
|
||||
continue;
|
||||
}
|
||||
if ( set_edges[s]!=null ) {
|
||||
// TODO: unicode
|
||||
}
|
||||
if ( pred_edges[s]!=null ) {
|
||||
// TODO: gated or disambiguating sem
|
||||
}
|
||||
if ( c==(char)Token.EOF && eof[s]>=0 ) { // EOF Transition to accept state?
|
||||
if ( debug ) System.err.println("accept via EOF; predict "+accept[eof[s]]+" from "+eof[s]);
|
||||
// TODO: have to keep going and then backtrack if we fail??
|
||||
return accept[eof[s]];
|
||||
}
|
||||
// not in range and not EOF/EOT, must be invalid symbol
|
||||
if ( debug ) {
|
||||
System.err.println("max["+s+"]="+max[s]);
|
||||
System.err.println("eof["+s+"]="+eof[s]);
|
||||
if ( transition[s]!=null ) {
|
||||
System.err.print("transitions=");
|
||||
for (int p=0; p<transition[s].length; p++) {
|
||||
System.err.print(transition[s][p]+" ");
|
||||
}
|
||||
System.err.println();
|
||||
}
|
||||
}
|
||||
if ( prevAcceptMarker<0 ) noViableAlt(s,input);
|
||||
input.rewind(prevAcceptMarker);
|
||||
s = prevAcceptState;
|
||||
if ( action_index[s]>=0 ) action(action_index[s]);
|
||||
System.err.println("accept state "+s+" with ttype "+accept[s]+" at index "+input.index());
|
||||
return accept[s];
|
||||
}
|
||||
}
|
||||
finally {
|
||||
// input.rewind(mark);
|
||||
}
|
||||
}
|
||||
|
||||
// subclass needs to override these if there are sempreds or actions in lexer rules
|
||||
|
||||
public boolean sempred(int predIndex) {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void action(int actionIndex) {
|
||||
}
|
||||
|
||||
public void noViableAlt(int s, IntStream input) throws NoViableAltException {
|
||||
NoViableAltException nvae = new NoViableAltException();
|
||||
// new NoViableAltException(getDescription(),
|
||||
// decisionNumber,
|
||||
// s,
|
||||
// input);
|
||||
error(nvae);
|
||||
throw nvae;
|
||||
}
|
||||
|
||||
/** A hook for debugging interface */
|
||||
public void error(NoViableAltException nvae) { ; }
|
||||
|
||||
public int specialStateTransition(int s, IntStream input)
|
||||
throws NoViableAltException
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return "n/a";
|
||||
}
|
||||
|
||||
/** Given a String that has a run-length-encoding of some unsigned shorts
|
||||
* like "\1\2\3\9", convert to short[] {2,9,9,9}. We do this to avoid
|
||||
* static short[] which generates so much init code that the class won't
|
||||
* compile. :(
|
||||
*/
|
||||
public static short[] unpackEncodedString(String encodedString) {
|
||||
// walk first to find how big it is.
|
||||
int size = 0;
|
||||
for (int i=0; i<encodedString.length(); i+=2) {
|
||||
size += encodedString.charAt(i);
|
||||
}
|
||||
short[] data = new short[size];
|
||||
int di = 0;
|
||||
for (int i=0; i<encodedString.length(); i+=2) {
|
||||
char n = encodedString.charAt(i);
|
||||
char v = encodedString.charAt(i+1);
|
||||
// add v n times to data
|
||||
for (int j=1; j<=n; j++) {
|
||||
data[di++] = (short)v;
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
/** Hideous duplication of code, but I need different typed arrays out :( */
|
||||
public static char[] unpackEncodedStringToUnsignedChars(String encodedString) {
|
||||
// walk first to find how big it is.
|
||||
int size = 0;
|
||||
for (int i=0; i<encodedString.length(); i+=2) {
|
||||
size += encodedString.charAt(i);
|
||||
}
|
||||
char[] data = new char[size];
|
||||
int di = 0;
|
||||
for (int i=0; i<encodedString.length(); i+=2) {
|
||||
char n = encodedString.charAt(i);
|
||||
char v = encodedString.charAt(i+1);
|
||||
// add v n times to data
|
||||
for (int j=1; j<=n; j++) {
|
||||
data[di++] = v;
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
/*
|
||||
public int specialTransition(int state, int symbol) {
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
}
|
|
@ -18,9 +18,10 @@ ParserFile(file, parser, dfaDecls, bitSetDecls, namedActions) ::= <<
|
|||
import org.antlr.v4.runtime.NoViableAltException;
|
||||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.EarlyExitException;
|
||||
import org.antlr.v4.runtime.RecognizerSharedState;
|
||||
import org.antlr.v4.runtime.ParserSharedState;
|
||||
import org.antlr.v4.runtime.RecognitionException;
|
||||
import org.antlr.v4.runtime.ParserRuleContext;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.misc.*;
|
||||
import org.antlr.runtime.*;
|
||||
|
@ -51,9 +52,9 @@ public class <parser.name> extends Parser {
|
|||
|
||||
ctor(p) ::= <<
|
||||
public <p.name>(TokenStream input) {
|
||||
this(input, new RecognizerSharedState());
|
||||
this(input, new ParserSharedState());
|
||||
}
|
||||
public <p.name>(TokenStream input, RecognizerSharedState state) {
|
||||
public <p.name>(TokenStream input, ParserSharedState state) {
|
||||
super(input, state);
|
||||
}
|
||||
>>
|
||||
|
@ -292,9 +293,9 @@ LexerFile(fileName, lexer) ::= <<
|
|||
// $ANTLR ANTLRVersion> <fileName> generatedTimestamp>
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.LexerSharedState;
|
||||
import org.antlr.v4.runtime.RecognitionException;
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.pda.*;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.*;
|
||||
import org.antlr.runtime.*;
|
||||
|
||||
|
@ -311,7 +312,8 @@ public class <lexerName> extends Lexer {
|
|||
}
|
||||
public <lexerName>(CharStream input, LexerSharedState state) {
|
||||
super(input,state);
|
||||
modeToPDA = new PDA[] { <modes:{m | new <m>_PDA()}; separator=", "> };
|
||||
// modeToPDA = new PDA[] { <modes:{m | new <m>_PDA()}; separator=", "> };
|
||||
modeToDFA = new DFA[] { <modes:{m | new <m>_DFA()}; separator=", "> };
|
||||
}
|
||||
public String getGrammarFileName() { return "<fileName>"; }
|
||||
|
||||
|
@ -325,8 +327,11 @@ public class <lexerName> extends Lexer {
|
|||
>>
|
||||
|
||||
DFA(name, model) ::= <<
|
||||
public static final char[] <name>_min = {
|
||||
<model.min; separator=", ">
|
||||
public static final short[] <name>_accept = {
|
||||
<model.accept; separator=", ">
|
||||
};
|
||||
public static final short[] <name>_eof = {
|
||||
<model.eof; separator=", ">
|
||||
};
|
||||
public static final char[] <name>_max = {
|
||||
<model.max; separator=", ">
|
||||
|
@ -334,6 +339,50 @@ public static final char[] <name>_max = {
|
|||
public static final short[][] <name>_transition = {
|
||||
<model.transition:{t | {<t; separator=", ">\}}; separator=",\n", null="null">
|
||||
};
|
||||
public static final int[][] <name>_set_edges = {
|
||||
<model.set_edges:{edges | {<edges; separator=", ">\}}; separator=",\n", null="null">
|
||||
};
|
||||
public static final int[][] <name>_pred_edges = {
|
||||
<model.pred_edges:{edges | {<edges; separator=", ">\}}; separator=",\n", null="null">
|
||||
};
|
||||
public static final short[] <name>_action_index = {
|
||||
<model.action_index; separator=", ">
|
||||
};
|
||||
|
||||
public final class <name>_DFA extends DFA {
|
||||
<if(model.actions)> <! TODO: FACTOR OUT !>
|
||||
public void action(int action) {
|
||||
switch ( action ) {
|
||||
<model.actions:{a |
|
||||
case <i0> :
|
||||
<a>
|
||||
break;
|
||||
}>
|
||||
}
|
||||
}
|
||||
<endif>
|
||||
<if(model.sempreds)>
|
||||
public boolean sempred(int sempred) {
|
||||
switch ( sempred ) {
|
||||
<model.sempreds:{p |
|
||||
case <i0> :
|
||||
return <p>;
|
||||
break;
|
||||
}>
|
||||
}
|
||||
return false;
|
||||
}
|
||||
<endif>
|
||||
public <name>_DFA() {
|
||||
this.eof = <name>_eof;
|
||||
this.max = <name>_max;
|
||||
this.accept = <name>_accept;
|
||||
this.transition = <name>_transition;
|
||||
this.set_edges = <name>_set_edges;
|
||||
this.pred_edges = <name>_pred_edges;
|
||||
this.action_index = <name>_action_index;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PDA(name, model, actions, sempreds) ::= <<
|
||||
|
@ -343,9 +392,6 @@ public static final byte[] <name>_code = {
|
|||
public static final int[] <name>_tokenTypeToAddr = {
|
||||
<model.altToAddr; separator=", ">
|
||||
};
|
||||
public static final int[][] <name>charToAddr = {
|
||||
<model.charToAddr:{addrs | /* <i0> */ {<addrs; separator=", ">\}}; null="null", separator=",\n">
|
||||
};
|
||||
|
||||
public final class <name>_PDA extends PDA {
|
||||
<if(actions)>
|
||||
|
@ -368,13 +414,13 @@ public final class <name>_PDA extends PDA {
|
|||
<endif>
|
||||
public <name>_PDA() {
|
||||
super(<name>_code, <name>_tokenTypeToAddr, <model.nLabels>);
|
||||
this.charToAddr = <name>charToAddr;
|
||||
}
|
||||
}<\n>
|
||||
>>
|
||||
|
||||
actionMethod(name, actions, ruleIndex) ::= <<
|
||||
actionMethod(name, actions) ::= <<
|
||||
public void <name>_actions(int action) {
|
||||
System.out.println("exec action "+action);
|
||||
switch ( action ) {
|
||||
<actions:{a |
|
||||
case <i0> :
|
||||
|
|
|
@ -543,6 +543,13 @@ public class Tool {
|
|||
}
|
||||
|
||||
public void generateDFAs(Grammar g) {
|
||||
if ( g.isLexer() ) {
|
||||
LexerGrammar lg = (LexerGrammar)g;
|
||||
for (String modeName : lg.modes.keySet()) {
|
||||
generateDFA(g, lg.modeToDFA.get(modeName));
|
||||
}
|
||||
return;
|
||||
}
|
||||
for (DFA dfa : g.decisionDFAs.values()) {
|
||||
generateDFA(g, dfa);
|
||||
}
|
||||
|
|
|
@ -9,9 +9,7 @@ import org.antlr.v4.misc.Interval;
|
|||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.misc.OrderedHashSet;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
/** First consolidate accept states, which leads to smaller DFA. Also,
|
||||
* consolidate all edges from p to q into a single edge with set.
|
||||
|
@ -60,7 +58,7 @@ public class DFAMinimizer {
|
|||
|
||||
// Nobody can merge with a state resolved with predicates to be safe
|
||||
if ( dfa.converter!=null ) {
|
||||
for (DFAState d : dfa.converter.resolvedWithSemanticPredicates) {
|
||||
for (DFAState d : dfa.converter.resolver.resolvedWithSemanticPredicates) {
|
||||
for (int i=1; i<n; i++) {
|
||||
distinct[d.stateNumber][i] = true;
|
||||
distinct[i][d.stateNumber] = true;
|
||||
|
@ -96,10 +94,14 @@ public class DFAMinimizer {
|
|||
DFAState p = dfa.states.get(i);
|
||||
DFAState q = dfa.states.get(j);
|
||||
for (IntSet label : labels) {
|
||||
// leave all states with gated pred transitions on this label as distinct
|
||||
SemanticContext p_preds = p.getGatedPredicatesInNFAConfigurations();
|
||||
SemanticContext q_preds = q.getGatedPredicatesInNFAConfigurations();
|
||||
boolean preds_present = p_preds!=null || q_preds!=null;
|
||||
DFAState pt = p.target(label);
|
||||
DFAState qt = q.target(label);
|
||||
// System.out.println(p.stateNumber+"-"+label.toString(dfa.g)+"->"+pt);
|
||||
// System.out.println(q.stateNumber+"-"+label.toString(dfa.g)+"->"+qt);
|
||||
System.out.println(p.stateNumber+"-"+label.toString(dfa.g)+"->"+pt);
|
||||
System.out.println(q.stateNumber+"-"+label.toString(dfa.g)+"->"+qt);
|
||||
// if DISTINCT(p,q) is empty and
|
||||
// DISTINCT(?(p, a),?(q, a)) is not empty
|
||||
// then DISTINCT(p,q) = a.
|
||||
|
@ -113,8 +115,10 @@ public class DFAMinimizer {
|
|||
// so leave as equiv (nondistinct). If one goes to
|
||||
// error (pt or qt is null) and other doesn't, must
|
||||
// be in distinct sets so p,q are distinct.
|
||||
if ( pt==null && qt==null ) continue;
|
||||
boolean bothTargetsAreErrors = pt == null && qt == null;
|
||||
if ( bothTargetsAreErrors && !preds_present ) continue;
|
||||
if ( pt==null || qt==null ||
|
||||
preds_present ||
|
||||
distinct[pt.stateNumber][qt.stateNumber] )
|
||||
{
|
||||
distinct[i][j] = true;
|
||||
|
@ -163,41 +167,72 @@ public class DFAMinimizer {
|
|||
|
||||
// minimize the DFA (combine equiv sets)
|
||||
// merge all edges from a set to first state in set
|
||||
DFAState[] states = new DFAState[n];
|
||||
// newstates[oldstate] = new state number for oldstate
|
||||
DFAState[] oldToNewStateMap = new DFAState[n];
|
||||
OrderedHashSet<DFAState> uniqNewStates = new OrderedHashSet<DFAState>();
|
||||
|
||||
// first map all states in set to same DFA state (old min)
|
||||
for (IntervalSet s : uniq) {
|
||||
int min = s.getMinElement();
|
||||
states[min] = dfa.states.get(min);
|
||||
int newStateNum = s.getMinElement();
|
||||
uniqNewStates.add(dfa.states.get(newStateNum));
|
||||
oldToNewStateMap[newStateNum] = dfa.states.get(newStateNum);
|
||||
List<Interval> intervals = s.getIntervals();
|
||||
for (Interval I : intervals) {
|
||||
for (int i=I.a; i<=I.b; i++) {
|
||||
states[i] = states[min];
|
||||
oldToNewStateMap[i] = oldToNewStateMap[newStateNum];
|
||||
}
|
||||
}
|
||||
}
|
||||
for (DFAState s : states) System.out.println(s);
|
||||
for (DFAState s : oldToNewStateMap) System.out.println(s);
|
||||
// now do edges
|
||||
for (IntervalSet s : uniq) {
|
||||
List<Interval> intervals = s.getIntervals();
|
||||
System.out.println("do set "+s);
|
||||
for (Interval I : intervals) {
|
||||
for (int i=I.a; i<=I.b; i++) {
|
||||
DFAState p = dfa.states.get(i);
|
||||
for (Edge e : p.edges) {
|
||||
System.out.println(p.stateNumber+" upon "+e.toString(dfa.g)+
|
||||
" used to point at "+e.target.stateNumber+
|
||||
" now points at "+states[e.target.stateNumber].stateNumber);
|
||||
e.target = states[e.target.stateNumber];
|
||||
}
|
||||
}
|
||||
// for (IntervalSet equivStates : uniq) {
|
||||
// List<Interval> intervals_in_state_set = equivStates.getIntervals();
|
||||
// System.out.println("do set "+equivStates);
|
||||
// // for each state in equiv state set, make all edges point at new state
|
||||
// for (Interval I : intervals_in_state_set) {
|
||||
// for (int i=I.a; i<=I.b; i++) {
|
||||
// DFAState p = dfa.states.get(i);
|
||||
// for (Edge e : p.edges) {
|
||||
// System.out.println(p.stateNumber+" upon "+e.toString(dfa.g)+
|
||||
// " used to point at "+e.target.stateNumber+
|
||||
// " now points at "+ newstates[e.target.stateNumber].stateNumber);
|
||||
// e.target = newstates[e.target.stateNumber];
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// simpler version of above
|
||||
for (DFAState d : uniqNewStates) {
|
||||
for (Edge e : d.edges) {
|
||||
// System.out.println(d.stateNumber+" upon "+e.toString(dfa.g)+
|
||||
// " used to point at "+e.target.stateNumber+
|
||||
// " now points at "+ oldToNewStateMap[e.target.stateNumber].stateNumber);
|
||||
e.target = oldToNewStateMap[e.target.stateNumber];
|
||||
}
|
||||
}
|
||||
|
||||
// merge all edges from p to q
|
||||
for (DFAState d : uniqNewStates) {
|
||||
Map<DFAState, IntervalSet> targetToEdges = new HashMap<DFAState, IntervalSet>();
|
||||
for (Edge e : d.edges) {
|
||||
IntervalSet s = targetToEdges.get(e.target);
|
||||
if ( s==null ) { s = new IntervalSet(e.label); targetToEdges.put(e.target, s); }
|
||||
else s.addAll(e.label);
|
||||
}
|
||||
System.out.println("state "+d.stateNumber+" has "+d.edges.size()+" edges but "+targetToEdges.size()+" targets");
|
||||
d.edges.clear();
|
||||
for (DFAState target : targetToEdges.keySet()) {
|
||||
d.addEdge(new Edge(target, targetToEdges.get(target)));
|
||||
}
|
||||
}
|
||||
|
||||
// now kill unused states
|
||||
for (IntervalSet s : uniq) {
|
||||
List<Interval> intervals = s.getIntervals();
|
||||
for (Interval I : intervals) {
|
||||
for (IntervalSet equivStates : uniq) {
|
||||
List<Interval> intervals_in_state_set = equivStates.getIntervals();
|
||||
for (Interval I : intervals_in_state_set) {
|
||||
for (int i=I.a; i<=I.b; i++) {
|
||||
if ( states[i].stateNumber != i ) { // if not one of our merged states
|
||||
if ( oldToNewStateMap[i].stateNumber != i ) { // if not one of our merged states
|
||||
System.out.println("kill "+i);
|
||||
DFAState d = dfa.states.get(i);
|
||||
dfa.stateSet.remove(d);
|
||||
|
|
|
@ -6,6 +6,8 @@ import org.antlr.v4.misc.OrderedHashSet;
|
|||
import org.antlr.v4.misc.Utils;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
import org.antlr.v4.tool.RuleAST;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
|
@ -18,18 +20,24 @@ public class LexerNFAToDFAConverter {
|
|||
|
||||
/** A list of DFA states we still need to process during NFA conversion */
|
||||
List<LexerState> work = new LinkedList<LexerState>();
|
||||
List<LexerState> accepts = new LinkedList<LexerState>();
|
||||
/** The set of rule stop NFA states we encountered during conversion.
|
||||
* Walk this list to find ambig stop states (split if we have preds).
|
||||
*/
|
||||
Set<LexerState> accepts = new HashSet<LexerState>();
|
||||
|
||||
//int[] altToRuleIndex;
|
||||
|
||||
/** Used to prevent the closure operation from looping to itself and
|
||||
* hence looping forever. Sensitive to the NFA state, the alt, and
|
||||
* the stack context.
|
||||
*/
|
||||
Set<NFAConfig> closureBusy;
|
||||
* hence looping forever. Sensitive to the NFA state, the alt, and
|
||||
* the stack context.
|
||||
*/
|
||||
Set<NFAConfig> closureBusy;
|
||||
|
||||
public static boolean debug = false;
|
||||
public static boolean debug = false;
|
||||
|
||||
public LexerNFAToDFAConverter(LexerGrammar g) {
|
||||
this.g = g;
|
||||
//altToRuleIndex = new int[g.getNumRules()+1]; // alts <= num rules
|
||||
}
|
||||
|
||||
public DFA createDFA() { return createDFA(LexerGrammar.DEFAULT_MODE_NAME); }
|
||||
|
@ -50,47 +58,95 @@ public class LexerNFAToDFAConverter {
|
|||
work.remove(0); // we're done with this DFA state
|
||||
}
|
||||
|
||||
// walk accept states, informing DFA
|
||||
for (LexerState d : accepts) {
|
||||
Set<Integer> nfaAcceptStates = new HashSet<Integer>();
|
||||
for (NFAConfig c : d.nfaConfigs) {
|
||||
NFAState s = c.state;
|
||||
if ( s instanceof RuleStopState && !s.rule.isFragment() ) {
|
||||
dfa.defineAcceptState(c.alt, d);
|
||||
nfaAcceptStates.add(Utils.integer(s.stateNumber));
|
||||
}
|
||||
}
|
||||
List<Integer> sorted = new ArrayList<Integer>();
|
||||
sorted.addAll(nfaAcceptStates);
|
||||
Collections.sort(sorted);
|
||||
for (int i : sorted) {
|
||||
NFAState s = g.nfa.states.get(i);
|
||||
d.matchesRules.add(s.rule);
|
||||
}
|
||||
}
|
||||
defineLexerAcceptStates();
|
||||
|
||||
closureBusy = null; // wack all that memory used during closure
|
||||
|
||||
return dfa;
|
||||
}
|
||||
|
||||
// walk accept states, informing DFA.
|
||||
// get list of NFA states per each DFA accept so we can get list of
|
||||
// rules matched (sorted by NFA state num, which gives priority to
|
||||
// rules appearing first in grammar).
|
||||
// Also, track any extreme right edge actions in
|
||||
// DFA accept state (pick action of first of any ambig rules).
|
||||
void defineLexerAcceptStates() {
|
||||
int aaa = 0;
|
||||
System.out.println("accepts ="+accepts);
|
||||
for (LexerState d : accepts) {
|
||||
if ( d.edges.size()==0 ) aaa++;
|
||||
// First get NFA accept states and associated DFA alts for this DFA state
|
||||
SortedSet<Integer> nfaAcceptStates = new TreeSet<Integer>();
|
||||
SortedSet<Integer> sortedAlts = new TreeSet<Integer>();
|
||||
OrderedHashSet<Rule> predictedRules = new OrderedHashSet<Rule>();
|
||||
for (NFAConfig c : d.nfaConfigs) {
|
||||
NFAState s = c.state;
|
||||
if ( s instanceof RuleStopState && !s.rule.isFragment() ) {
|
||||
nfaAcceptStates.add(Utils.integer(s.stateNumber));
|
||||
sortedAlts.add(c.alt);
|
||||
predictedRules.add(s.rule);
|
||||
}
|
||||
}
|
||||
|
||||
// Look for and count preds
|
||||
Map<Integer, SemanticContext> predsPerAlt = d.getPredicatesForAlts();
|
||||
int npreds = 0;
|
||||
for (SemanticContext ctx : predsPerAlt.values()) if ( ctx!=null ) npreds++;
|
||||
|
||||
// If unambiguous, make it a DFA accept state, else resolve with preds if possible
|
||||
if ( predictedRules.size()==1 || npreds==0 ) { // unambig or no preds
|
||||
d.predictsRule = predictedRules.get(0);
|
||||
d.action = ((RuleAST)d.predictsRule.ast).getLexerAction();
|
||||
Integer minAlt = sortedAlts.first();
|
||||
dfa.defineAcceptState(minAlt, d);
|
||||
}
|
||||
if ( predictedRules.size()>1 && npreds>0 ) {
|
||||
System.out.println(d.stateNumber+" ambig upon "+ predictedRules+" but we have preds");
|
||||
// has preds; add new accept states
|
||||
d.isAcceptState = false; // this state isn't a stop state anymore
|
||||
d.resolvedWithPredicates = true;
|
||||
for (Rule r : predictedRules) {
|
||||
SemanticContext preds = predsPerAlt.get(r.index);
|
||||
LexerState predDFATarget = dfa.newLexerState();
|
||||
predDFATarget.predictsRule = r;
|
||||
for (NFAConfig c : d.getNFAConfigsForAlt(r.index)) {
|
||||
predDFATarget.addNFAConfig(c);
|
||||
}
|
||||
// new DFA state is a target of the predicate from d
|
||||
//predDFATarget.addNFAConfig(c);
|
||||
dfa.addAcceptState(r.index, predDFATarget);
|
||||
// add a transition to pred target from d
|
||||
if ( preds!=null ) {
|
||||
d.addEdge(new PredicateEdge(preds, predDFATarget));
|
||||
}
|
||||
else {
|
||||
d.addEdge(new PredicateEdge(new SemanticContext.TruePredicate(), predDFATarget));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("#accepts ="+accepts.size()+" and "+aaa+" with no edges");
|
||||
}
|
||||
|
||||
/** */
|
||||
public LexerState computeStartState() {
|
||||
LexerState d = dfa.newLexerState();
|
||||
// add config for each alt start, then add closure for those states
|
||||
for (int ruleIndex=1; ruleIndex<=dfa.nAlts; ruleIndex++) {
|
||||
Transition t = dfa.decisionNFAStartState.transition(ruleIndex-1);
|
||||
for (int alt=1; alt<=dfa.nAlts; alt++) {
|
||||
Transition t = dfa.decisionNFAStartState.transition(alt-1);
|
||||
NFAState altStart = t.target;
|
||||
//altToRuleIndex[alt] = altStart.rule.index;
|
||||
d.addNFAConfig(
|
||||
new NFAConfig(altStart, ruleIndex,
|
||||
new NFAConfig(altStart, alt,
|
||||
NFAContext.EMPTY(),
|
||||
SemanticContext.EMPTY_SEMANTIC_CONTEXT));
|
||||
}
|
||||
|
||||
closure(d);
|
||||
closure(d, true);
|
||||
return d;
|
||||
}
|
||||
|
||||
|
||||
/** From this node, add a d--a-->t transition for all
|
||||
* labels 'a' where t is a DFA node created
|
||||
* from the set of NFA states reachable from any NFA
|
||||
|
@ -105,7 +161,7 @@ public class LexerNFAToDFAConverter {
|
|||
System.out.println("DFA state after reach -" +
|
||||
label.toString(g)+"->"+t);
|
||||
}
|
||||
closure(t); // add any NFA states reachable via epsilon
|
||||
closure(t, true); // add any NFA states reachable via epsilon
|
||||
addTransition(d, label, t); // make d-label->t transition
|
||||
}
|
||||
}
|
||||
|
@ -146,19 +202,19 @@ public class LexerNFAToDFAConverter {
|
|||
//System.out.println("found edge with "+label.toString(g)+" from NFA state "+s);
|
||||
// add NFA target to (potentially) new DFA state
|
||||
labelTarget.addNFAConfig(
|
||||
new NFAConfig(c, t.target, SemanticContext.EMPTY_SEMANTIC_CONTEXT));
|
||||
new NFAConfig(c, t.target, c.semanticContext));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return labelTarget;
|
||||
}
|
||||
|
||||
|
||||
/** For all NFA states in d, compute the epsilon closure; that is, find
|
||||
* all NFA states reachable from the NFA states in d purely via epsilon
|
||||
* transitions.
|
||||
*/
|
||||
public void closure(LexerState d) {
|
||||
public void closure(LexerState d, boolean collectPredicates) {
|
||||
if ( debug ) {
|
||||
System.out.println("closure("+d+")");
|
||||
}
|
||||
|
@ -166,7 +222,7 @@ public class LexerNFAToDFAConverter {
|
|||
List<NFAConfig> configs = new ArrayList<NFAConfig>();
|
||||
configs.addAll(d.nfaConfigs.elements()); // dup initial list; avoid walk/update issue
|
||||
for (NFAConfig c : configs) {
|
||||
closure(d, c.state, c.alt, c.context); // update d.nfaStates
|
||||
closure(d, c.state, c.alt, c.context, c.semanticContext, collectPredicates); // update d.nfaStates
|
||||
}
|
||||
|
||||
closureBusy.clear();
|
||||
|
@ -178,9 +234,10 @@ public class LexerNFAToDFAConverter {
|
|||
}
|
||||
|
||||
// TODO: make pass NFAConfig like other DFA
|
||||
public void closure(LexerState d, NFAState s, int ruleIndex, NFAContext context) {
|
||||
public void closure(LexerState d, NFAState s, int ruleIndex, NFAContext context,
|
||||
SemanticContext semanticContext, boolean collectPredicates) {
|
||||
NFAConfig proposedNFAConfig =
|
||||
new NFAConfig(s, ruleIndex, context, SemanticContext.EMPTY_SEMANTIC_CONTEXT);
|
||||
new NFAConfig(s, ruleIndex, context, semanticContext);
|
||||
|
||||
if ( closureBusy.contains(proposedNFAConfig) ) return;
|
||||
closureBusy.add(proposedNFAConfig);
|
||||
|
@ -191,7 +248,7 @@ public class LexerNFAToDFAConverter {
|
|||
if ( s instanceof RuleStopState ) {
|
||||
// TODO: chase FOLLOW links if recursive
|
||||
if ( !context.isEmpty() ) {
|
||||
closure(d, context.returnState, ruleIndex, context.parent);
|
||||
closure(d, context.returnState, ruleIndex, context.parent, semanticContext, collectPredicates);
|
||||
// do nothing if context not empty and already added to nfaStates
|
||||
}
|
||||
else {
|
||||
|
@ -209,16 +266,56 @@ public class LexerNFAToDFAConverter {
|
|||
if ( !context.contains(((RuleTransition)t).followState) ) {
|
||||
NFAContext newContext =
|
||||
new NFAContext(context, ((RuleTransition)t).followState);
|
||||
closure(d, t.target, ruleIndex, newContext);
|
||||
closure(d, t.target, ruleIndex, newContext, semanticContext, collectPredicates);
|
||||
}
|
||||
}
|
||||
else if ( t instanceof ActionTransition ) {
|
||||
collectPredicates = false; // can't see past actions
|
||||
closure(d, t.target, ruleIndex, context, semanticContext, collectPredicates);
|
||||
}
|
||||
else if ( t instanceof PredicateTransition ) {
|
||||
SemanticContext labelContext = ((PredicateTransition)t).semanticContext;
|
||||
SemanticContext newSemanticContext = semanticContext;
|
||||
if ( collectPredicates ) {
|
||||
// AND the previous semantic context with new pred
|
||||
//System.out.println("&"+labelContext+" enclosingRule="+c.state.rule);
|
||||
newSemanticContext =
|
||||
SemanticContext.and(semanticContext, labelContext);
|
||||
}
|
||||
closure(d, t.target, ruleIndex, context, newSemanticContext, collectPredicates);
|
||||
}
|
||||
else if ( t.isEpsilon() ) {
|
||||
closure(d, t.target, ruleIndex, context);
|
||||
closure(d, t.target, ruleIndex, context, semanticContext, collectPredicates);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// public void resolveAmbiguities(DFAState d) {
|
||||
// Resolver resolver = new Resolver(null);
|
||||
// PredicateResolver semResolver = new PredicateResolver();
|
||||
// Set<Integer> ambiguousAlts = resolver.getAmbiguousAlts(d);
|
||||
// if ( PredictionDFAFactory.debug && ambiguousAlts!=null ) {
|
||||
// System.out.println("ambig alts="+ambiguousAlts);
|
||||
// }
|
||||
//
|
||||
// // if no problems return
|
||||
// if ( ambiguousAlts==null ) return;
|
||||
//
|
||||
// // ATTEMPT TO RESOLVE WITH SEMANTIC PREDICATES
|
||||
// boolean resolved =
|
||||
// semResolver.tryToResolveWithSemanticPredicates(d, ambiguousAlts);
|
||||
// if ( resolved ) {
|
||||
// if ( PredictionDFAFactory.debug ) {
|
||||
// System.out.println("resolved DFA state "+d.stateNumber+" with pred");
|
||||
// }
|
||||
// d.resolvedWithPredicates = true;
|
||||
// return;
|
||||
// }
|
||||
//
|
||||
// // RESOLVE SYNTACTIC CONFLICT BY REMOVING ALL BUT ONE ALT
|
||||
// }
|
||||
|
||||
// void ruleStopStateClosure(LexerState d, NFAState s) {
|
||||
// //System.out.println("FOLLOW of "+s+" context="+context);
|
||||
// // follow all static FOLLOW links
|
||||
|
|
|
@ -118,6 +118,10 @@ public class NFAConfig {
|
|||
if ( context!=null && !context.isEmpty() ) {
|
||||
buf.append("|");
|
||||
buf.append(context);
|
||||
}
|
||||
if ( semanticContext!=null && semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT ) {
|
||||
buf.append("|");
|
||||
buf.append(semanticContext);
|
||||
}
|
||||
if ( resolved ) {
|
||||
buf.append("|resolved");
|
||||
|
|
|
@ -12,10 +12,7 @@ import java.util.*;
|
|||
|
||||
/** */
|
||||
public class PredicateResolver {
|
||||
PredictionDFAFactory converter;
|
||||
public PredicateResolver(PredictionDFAFactory converter) {
|
||||
this.converter = converter;
|
||||
}
|
||||
public Map<DFAState, List<Integer>> statesWithIncompletelyCoveredAlts = new HashMap<DFAState, List<Integer>>();
|
||||
|
||||
/** See if a set of nondeterministic alternatives can be disambiguated
|
||||
* with the semantic predicate contexts of the alternatives.
|
||||
|
@ -44,8 +41,8 @@ public class PredicateResolver {
|
|||
*
|
||||
* This is done down in getPredicatesPerNonDeterministicAlt().
|
||||
*/
|
||||
protected boolean tryToResolveWithSemanticPredicates(DFAState d,
|
||||
Set<Integer> ambiguousAlts)
|
||||
public boolean tryToResolveWithSemanticPredicates(DFAState d,
|
||||
Set<Integer> ambiguousAlts)
|
||||
{
|
||||
Map<Integer, SemanticContext> altToPredMap =
|
||||
getPredicatesPerAmbiguousAlt(d, ambiguousAlts);
|
||||
|
@ -56,7 +53,7 @@ public class PredicateResolver {
|
|||
|
||||
if ( ambiguousAlts.size()-altToPredMap.size()>1 ) {
|
||||
// too few predicates to resolve; just return.
|
||||
// We caught/tracked incompletly covered preds in getPredicatesPerNonDeterministicAlt
|
||||
// We caught/tracked incompletely covered preds in getPredicatesPerNonDeterministicAlt
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -207,7 +204,7 @@ public class PredicateResolver {
|
|||
if ( contextsForThisAlt.size()>0 ) { // && at least one pred
|
||||
incompletelyCoveredAlts.add(alt); // this alt incompleted covered
|
||||
}
|
||||
continue; // don't include at least 1 config has no ctx
|
||||
continue; // don't include; at least 1 config has no ctx
|
||||
}
|
||||
SemanticContext combinedContext = null;
|
||||
for (Iterator itrSet = contextsForThisAlt.iterator(); itrSet.hasNext();) {
|
||||
|
@ -220,14 +217,14 @@ public class PredicateResolver {
|
|||
|
||||
if ( incompletelyCoveredAlts.size()>0 ) {
|
||||
// track these troublesome states later for reporting.
|
||||
converter.statesWithIncompletelyCoveredAlts.put(d, incompletelyCoveredAlts);
|
||||
statesWithIncompletelyCoveredAlts.put(d, incompletelyCoveredAlts);
|
||||
}
|
||||
|
||||
return altToPredicateContextMap;
|
||||
}
|
||||
|
||||
public Map<Integer, Set<Token>> getInsufficientlyPredicatedLocations(DFAState d,
|
||||
List<Integer> incompletelyCoveredAlts)
|
||||
public static Map<Integer, Set<Token>> getInsufficientlyPredicatedLocations(DFAState d,
|
||||
List<Integer> incompletelyCoveredAlts)
|
||||
{
|
||||
Map<Integer, Set<Token>> altToLocationsReachableWithoutPredicate = new HashMap<Integer, Set<Token>>();
|
||||
for (NFAConfig c : d.nfaConfigs) {
|
||||
|
@ -279,7 +276,7 @@ public class PredicateResolver {
|
|||
/** OR together all predicates from the alts. Note that the predicate
|
||||
* for an alt could itself be a combination of predicates.
|
||||
*/
|
||||
public SemanticContext getUnionOfPredicates(Map altToPredMap) {
|
||||
public static SemanticContext getUnionOfPredicates(Map altToPredMap) {
|
||||
Iterator iter;
|
||||
SemanticContext unionOfPredicatesFromAllAlts = null;
|
||||
iter = altToPredMap.values().iterator();
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package org.antlr.v4.analysis;
|
||||
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.v4.automata.*;
|
||||
import org.antlr.v4.misc.IntSet;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.misc.OrderedHashSet;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
|
@ -32,27 +34,6 @@ public class PredictionDFAFactory {
|
|||
*/
|
||||
public Set<Integer> unreachableAlts;
|
||||
|
||||
/** Track all DFA states with ambiguous configurations.
|
||||
* By reaching the same DFA state, a path through the NFA for some input
|
||||
* is able to reach the same NFA state by starting at more than one
|
||||
* alternative's left edge. If the context is the same or conflicts,
|
||||
* then we have ambiguity. If the context is different, it's simply
|
||||
* nondeterministic and we should keep looking for edges that will
|
||||
* render it deterministic. If we run out of things to add to the DFA,
|
||||
* we'll get a dangling state; it's non-LL(*). Later we may find that predicates
|
||||
* resolve the issue, but track ambiguous states anyway.
|
||||
*/
|
||||
public Set<DFAState> ambiguousStates = new HashSet<DFAState>();
|
||||
|
||||
/** The set of states w/o emanating edges (and w/o resolving sem preds). */
|
||||
public Set<DFAState> danglingStates = new HashSet<DFAState>();
|
||||
|
||||
/** Was a syntactic ambiguity resolved with predicates? Any DFA
|
||||
* state that predicts more than one alternative, must be resolved
|
||||
* with predicates or it should be reported to the user.
|
||||
*/
|
||||
public Set<DFAState> resolvedWithSemanticPredicates = new HashSet<DFAState>();
|
||||
|
||||
/** Tracks alts insufficiently covered.
|
||||
* For example, p1||true gets reduced to true and so leaves
|
||||
* whole alt uncovered. This maps alt num to the set of (Token)
|
||||
|
@ -77,7 +58,7 @@ public class PredictionDFAFactory {
|
|||
*/
|
||||
OrderedHashSet<NFAConfig> closureBusy;
|
||||
|
||||
Resolver resolver;
|
||||
public Resolver resolver;
|
||||
|
||||
public static boolean debug = false;
|
||||
|
||||
|
@ -86,7 +67,7 @@ public class PredictionDFAFactory {
|
|||
this.nfaStartState = nfaStartState;
|
||||
dfa = new DFA(g, nfaStartState);
|
||||
dfa.converter = this;
|
||||
resolver = new Resolver(this);
|
||||
resolver = new Resolver();
|
||||
}
|
||||
|
||||
public DFA createDFA() {
|
||||
|
@ -515,5 +496,59 @@ public class PredictionDFAFactory {
|
|||
return unreachable;
|
||||
}
|
||||
|
||||
public void issueAmbiguityWarnings() { resolver.issueAmbiguityWarnings(); }
|
||||
public void issueAmbiguityWarnings() {
|
||||
MachineProbe probe = new MachineProbe(dfa);
|
||||
|
||||
for (DFAState d : resolver.ambiguousStates) {
|
||||
Set<Integer> alts = resolver.getAmbiguousAlts(d);
|
||||
List<Integer> sorted = new ArrayList<Integer>(alts);
|
||||
Collections.sort(sorted);
|
||||
//System.err.println("ambig alts="+sorted);
|
||||
List<DFAState> dfaStates = probe.getAnyDFAPathToTarget(d);
|
||||
//System.out.print("path =");
|
||||
for (DFAState d2 : dfaStates) {
|
||||
System.out.print(" "+d2.stateNumber);
|
||||
}
|
||||
//System.out.println("");
|
||||
|
||||
List<IntSet> labels = probe.getEdgeLabels(d);
|
||||
|
||||
String input = probe.getInputSequenceDisplay(g, labels);
|
||||
//System.out.println("input="+ input);
|
||||
|
||||
LinkedHashMap<Integer,List<Token>> altPaths = new LinkedHashMap<Integer,List<Token>>();
|
||||
for (int alt : sorted) {
|
||||
List<Set<NFAState>> nfaStates = new ArrayList<Set<NFAState>>();
|
||||
for (DFAState d2 : dfaStates) {
|
||||
nfaStates.add( d2.getUniqueNFAStates(alt) );
|
||||
}
|
||||
//System.out.println("NFAConfigs per state: "+nfaStates);
|
||||
List<Token> path =
|
||||
probe.getGrammarLocationsForInputSequence(nfaStates, labels);
|
||||
altPaths.put(alt, path);
|
||||
//System.out.println("path = "+path);
|
||||
}
|
||||
|
||||
List<Integer> incompletelyCoveredAlts = statesWithIncompletelyCoveredAlts.get(d);
|
||||
if ( incompletelyCoveredAlts!=null && incompletelyCoveredAlts.size()>0 ) {
|
||||
Map<Integer, Set<Token>> insufficientAltToLocations =
|
||||
PredicateResolver.getInsufficientlyPredicatedLocations(d, incompletelyCoveredAlts);
|
||||
g.tool.errMgr.insufficientPredicates(g.fileName, d, input,
|
||||
insufficientAltToLocations,
|
||||
hasPredicateBlockedByAction);
|
||||
}
|
||||
|
||||
if ( !d.resolvedWithPredicates &&
|
||||
(incompletelyCoveredAlts==null || incompletelyCoveredAlts.size()==0) )
|
||||
{
|
||||
g.tool.errMgr.ambiguity(g.fileName, d, sorted, input, altPaths,
|
||||
hasPredicateBlockedByAction);
|
||||
}
|
||||
}
|
||||
if ( unreachableAlts!=null && unreachableAlts.size()>0 ) {
|
||||
g.tool.errMgr.unreachableAlts(g.fileName, dfa,
|
||||
unreachableAlts);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,24 +1,43 @@
|
|||
package org.antlr.v4.analysis;
|
||||
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.v4.automata.DFAState;
|
||||
import org.antlr.v4.automata.NFA;
|
||||
import org.antlr.v4.automata.NFAState;
|
||||
import org.antlr.v4.misc.IntSet;
|
||||
import org.antlr.v4.misc.Utils;
|
||||
import org.stringtemplate.v4.misc.MultiMap;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/** Code "module" that knows how to resolve LL(*) nondeterminisms. */
|
||||
public class Resolver {
|
||||
PredictionDFAFactory converter;
|
||||
|
||||
PredicateResolver semResolver;
|
||||
|
||||
public Resolver(PredictionDFAFactory converter) {
|
||||
this.converter = converter;
|
||||
semResolver = new PredicateResolver(converter);
|
||||
/** Track all DFA states with ambiguous configurations.
|
||||
* By reaching the same DFA state, a path through the NFA for some input
|
||||
* is able to reach the same NFA state by starting at more than one
|
||||
* alternative's left edge. If the context is the same or conflicts,
|
||||
* then we have ambiguity. If the context is different, it's simply
|
||||
* nondeterministic and we should keep looking for edges that will
|
||||
* render it deterministic. If we run out of things to add to the DFA,
|
||||
* we'll get a dangling state; it's non-LL(*). Later we may find that predicates
|
||||
* resolve the issue, but track ambiguous states anyway.
|
||||
*/
|
||||
public Set<DFAState> ambiguousStates = new HashSet<DFAState>();
|
||||
|
||||
/** The set of states w/o emanating edges (and w/o resolving sem preds). */
|
||||
public Set<DFAState> danglingStates = new HashSet<DFAState>();
|
||||
|
||||
/** Was a syntactic ambiguity resolved with predicates? Any DFA
|
||||
* state that predicts more than one alternative, must be resolved
|
||||
* with predicates or it should be reported to the user.
|
||||
*/
|
||||
public Set<DFAState> resolvedWithSemanticPredicates = new HashSet<DFAState>();
|
||||
|
||||
public Resolver() {
|
||||
//this.converter = converter;
|
||||
semResolver = new PredicateResolver();
|
||||
}
|
||||
|
||||
/** Walk each NFA configuration in this DFA state looking for a conflict
|
||||
|
@ -42,7 +61,7 @@ public class Resolver {
|
|||
* TODO: suffix degenerates to one empty one nonempty; avoid some tests?
|
||||
* TODO: or perhaps check if i, j are already in and don't do compare?
|
||||
*/
|
||||
public Set<Integer> getAmbiguousAlts(DFAState d) {
|
||||
public static Set<Integer> getAmbiguousAlts(DFAState d) {
|
||||
//System.out.println("getNondetAlts for DFA state "+stateNumber);
|
||||
Set<Integer> ambiguousAlts = new HashSet<Integer>();
|
||||
|
||||
|
@ -137,7 +156,7 @@ public class Resolver {
|
|||
// if no problems return
|
||||
if ( ambiguousAlts==null ) return;
|
||||
|
||||
converter.ambiguousStates.add(d);
|
||||
ambiguousStates.add(d);
|
||||
|
||||
// ATTEMPT TO RESOLVE WITH SEMANTIC PREDICATES
|
||||
boolean resolved =
|
||||
|
@ -147,7 +166,7 @@ public class Resolver {
|
|||
System.out.println("resolved DFA state "+d.stateNumber+" with pred");
|
||||
}
|
||||
d.resolvedWithPredicates = true;
|
||||
converter.resolvedWithSemanticPredicates.add(d);
|
||||
resolvedWithSemanticPredicates.add(d);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -159,7 +178,7 @@ public class Resolver {
|
|||
if ( d.resolvedWithPredicates || d.getNumberOfEdges()>0 ) return;
|
||||
|
||||
System.err.println("dangling DFA state "+d+" after reach / closures");
|
||||
converter.danglingStates.add(d);
|
||||
danglingStates.add(d);
|
||||
// turn off all configurations except for those associated with
|
||||
// min alt number; somebody has to win else some input will not
|
||||
// predict any alt.
|
||||
|
@ -168,7 +187,7 @@ public class Resolver {
|
|||
d.isAcceptState = true;
|
||||
d.predictsAlt = minAlt;
|
||||
// might be adding new accept state for alt, but that's ok
|
||||
converter.dfa.addAcceptState(minAlt, d);
|
||||
d.dfa.addAcceptState(minAlt, d);
|
||||
}
|
||||
|
||||
/** Turn off all configurations associated with the
|
||||
|
@ -181,7 +200,7 @@ public class Resolver {
|
|||
*
|
||||
* Return the min alt found.
|
||||
*/
|
||||
int resolveByPickingMinAlt(DFAState d, Set<Integer> alts) {
|
||||
static int resolveByPickingMinAlt(DFAState d, Set<Integer> alts) {
|
||||
int min = 0;
|
||||
if ( alts !=null ) {
|
||||
min = getMinAlt(alts);
|
||||
|
@ -198,7 +217,7 @@ public class Resolver {
|
|||
/** turn off all states associated with alts other than the good one
|
||||
* (as long as they are one of the ones in alts)
|
||||
*/
|
||||
void turnOffOtherAlts(DFAState d, int min, Set<Integer> alts) {
|
||||
static void turnOffOtherAlts(DFAState d, int min, Set<Integer> alts) {
|
||||
int numConfigs = d.nfaConfigs.size();
|
||||
for (int i = 0; i < numConfigs; i++) {
|
||||
NFAConfig configuration = d.nfaConfigs.get(i);
|
||||
|
@ -241,60 +260,6 @@ public class Resolver {
|
|||
return alt;
|
||||
}
|
||||
|
||||
public void issueAmbiguityWarnings() {
|
||||
MachineProbe probe = new MachineProbe(converter.dfa);
|
||||
|
||||
for (DFAState d : converter.ambiguousStates) {
|
||||
Set<Integer> alts = getAmbiguousAlts(d);
|
||||
List<Integer> sorted = new ArrayList<Integer>(alts);
|
||||
Collections.sort(sorted);
|
||||
//System.err.println("ambig alts="+sorted);
|
||||
List<DFAState> dfaStates = probe.getAnyDFAPathToTarget(d);
|
||||
//System.out.print("path =");
|
||||
for (DFAState d2 : dfaStates) {
|
||||
System.out.print(" "+d2.stateNumber);
|
||||
}
|
||||
//System.out.println("");
|
||||
|
||||
List<IntSet> labels = probe.getEdgeLabels(d);
|
||||
|
||||
String input = probe.getInputSequenceDisplay(converter.g, labels);
|
||||
//System.out.println("input="+ input);
|
||||
|
||||
LinkedHashMap<Integer,List<Token>> altPaths = new LinkedHashMap<Integer,List<Token>>();
|
||||
for (int alt : sorted) {
|
||||
List<Set<NFAState>> nfaStates = new ArrayList<Set<NFAState>>();
|
||||
for (DFAState d2 : dfaStates) {
|
||||
nfaStates.add( d2.getUniqueNFAStates(alt) );
|
||||
}
|
||||
//System.out.println("NFAConfigs per state: "+nfaStates);
|
||||
List<Token> path =
|
||||
probe.getGrammarLocationsForInputSequence(nfaStates, labels);
|
||||
altPaths.put(alt, path);
|
||||
//System.out.println("path = "+path);
|
||||
}
|
||||
|
||||
List<Integer> incompletelyCoveredAlts = converter.statesWithIncompletelyCoveredAlts.get(d);
|
||||
if ( incompletelyCoveredAlts!=null && incompletelyCoveredAlts.size()>0 ) {
|
||||
Map<Integer, Set<Token>> insufficientAltToLocations =
|
||||
semResolver.getInsufficientlyPredicatedLocations(d, incompletelyCoveredAlts);
|
||||
converter.g.tool.errMgr.insufficientPredicates(converter.g.fileName, d, input,
|
||||
insufficientAltToLocations,
|
||||
converter.hasPredicateBlockedByAction);
|
||||
}
|
||||
|
||||
if ( !d.resolvedWithPredicates &&
|
||||
(incompletelyCoveredAlts==null || incompletelyCoveredAlts.size()==0) )
|
||||
{
|
||||
converter.g.tool.errMgr.ambiguity(converter.g.fileName, d, sorted, input, altPaths,
|
||||
converter.hasPredicateBlockedByAction);
|
||||
}
|
||||
}
|
||||
if ( converter.unreachableAlts!=null && converter.unreachableAlts.size()>0 ) {
|
||||
converter.g.tool.errMgr.unreachableAlts(converter.g.fileName, converter.dfa,
|
||||
converter.unreachableAlts);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
void issueRecursionWarnings() {
|
||||
|
|
|
@ -5,7 +5,6 @@ import org.antlr.v4.misc.IntervalSet;
|
|||
import org.antlr.v4.misc.OrderedHashSet;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
|
||||
import java.lang.reflect.Array;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -43,7 +42,7 @@ public class DFA {
|
|||
|
||||
public int nAlts = 0;
|
||||
|
||||
/** We only want one accept state per predicted alt; track here */
|
||||
/** accept state(s) per predicted alt; track here */
|
||||
public List<DFAState>[] altToAcceptStates;
|
||||
|
||||
/** Did DFA minimization do anything? */
|
||||
|
@ -61,13 +60,13 @@ public class DFA {
|
|||
this.decisionNFAStartState = startState;
|
||||
nAlts = startState.getNumberOfTransitions();
|
||||
decision = startState.decision;
|
||||
altToAcceptStates = (ArrayList<DFAState>[])Array.newInstance(ArrayList.class,nAlts+1);
|
||||
altToAcceptStates = new ArrayList[nAlts+1]; //(ArrayList<DFAState>[])Array.newInstance(ArrayList.class,nAlts+1);
|
||||
}
|
||||
|
||||
public DFA(Grammar g, int nAlts) {
|
||||
this.g = g;
|
||||
this.nAlts = nAlts;
|
||||
altToAcceptStates = (ArrayList<DFAState>[])Array.newInstance(ArrayList.class,nAlts+1);
|
||||
altToAcceptStates = new ArrayList[nAlts+1]; //(ArrayList<DFAState>[])Array.newInstance(ArrayList.class,nAlts+1);
|
||||
}
|
||||
|
||||
/** Add a new DFA state to this DFA (doesn't check if already present). */
|
||||
|
@ -101,19 +100,19 @@ public class DFA {
|
|||
return n;
|
||||
}
|
||||
|
||||
// could imply converter.unreachableAlts.size()>0 too
|
||||
public boolean isAmbiguous() {
|
||||
boolean resolvedWithPredicates = true;
|
||||
// flip resolvedWithPredicates if we find an ambig state not resolve with pred
|
||||
for (DFAState d : converter.ambiguousStates) {
|
||||
if ( !d.resolvedWithPredicates ) resolvedWithPredicates = false;
|
||||
}
|
||||
return converter.ambiguousStates.size()>0 && !resolvedWithPredicates;
|
||||
}
|
||||
// // could imply converter.unreachableAlts.size()>0 too
|
||||
// public boolean isAmbiguous() {
|
||||
// boolean resolvedWithPredicates = true;
|
||||
// // flip resolvedWithPredicates if we find an ambig state not resolve with pred
|
||||
// for (DFAState d : converter.ambiguousStates) {
|
||||
// if ( !d.resolvedWithPredicates ) resolvedWithPredicates = false;
|
||||
// }
|
||||
// return converter.ambiguousStates.size()>0 && !resolvedWithPredicates;
|
||||
// }
|
||||
|
||||
public boolean valid() {
|
||||
return
|
||||
converter.danglingStates.size()==0;
|
||||
converter.resolver.danglingStates.size()==0;
|
||||
// converter.abortedDueToMultipleRecursiveAltsAt ==null &&
|
||||
// converter.recursionOverflowState ==null;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package org.antlr.v4.automata;
|
||||
|
||||
import org.antlr.v4.analysis.SemanticContext;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
|
@ -37,8 +37,16 @@ public class DFASerializer {
|
|||
for (int i=0; i<n; i++) {
|
||||
buf.append(getStateString(s));
|
||||
Edge t = s.edge(i);
|
||||
work.add( t.target );
|
||||
buf.append("-"+t.toString(g)+"->"+ getStateString(t.target)+'\n');
|
||||
work.add( t.target );
|
||||
String label = t.toString(g);
|
||||
SemanticContext preds = t.semanticContext; //t.target.getGatedPredicatesInNFAConfigurations();
|
||||
if ( preds!=null ) {
|
||||
String predsStr = "";
|
||||
predsStr = "&&"+preds.toString();
|
||||
label += predsStr;
|
||||
}
|
||||
|
||||
buf.append("-"+label+"->"+ getStateString(t.target)+'\n');
|
||||
}
|
||||
}
|
||||
String output = buf.toString();
|
||||
|
@ -52,11 +60,7 @@ public class DFASerializer {
|
|||
if ( s.isAcceptState ) {
|
||||
if ( s instanceof LexerState ) {
|
||||
stateStr = ":s"+n+"=>";
|
||||
StringBuilder buf = new StringBuilder();
|
||||
for (Rule r : ((LexerState)s).matchesRules) {
|
||||
buf.append(" "+r.name);
|
||||
}
|
||||
stateStr += buf.toString();
|
||||
stateStr += ((LexerState)s).predictsRule.name;
|
||||
}
|
||||
else {
|
||||
stateStr = ":s"+n+"=>"+s.getUniquelyPredictedAlt();
|
||||
|
|
|
@ -2,13 +2,11 @@ package org.antlr.v4.automata;
|
|||
|
||||
import org.antlr.v4.analysis.NFAConfig;
|
||||
import org.antlr.v4.analysis.Resolver;
|
||||
import org.antlr.v4.analysis.SemanticContext;
|
||||
import org.antlr.v4.misc.IntSet;
|
||||
import org.antlr.v4.misc.OrderedHashSet;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
/** A DFA state represents a set of possible NFA configurations.
|
||||
* As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
|
||||
|
@ -127,6 +125,95 @@ public class DFAState {
|
|||
return alts;
|
||||
}
|
||||
|
||||
public Map<Integer, SemanticContext> getPredicatesForAlts() {
|
||||
// map alt to combined SemanticContext
|
||||
Map<Integer, SemanticContext> altToPredicateContextMap =
|
||||
new HashMap<Integer, SemanticContext>();
|
||||
Set<Integer> alts = getAltSet();
|
||||
for (Integer alt : alts) {
|
||||
SemanticContext ctx = getPredicatesForAlt(alt);
|
||||
altToPredicateContextMap.put(alt, ctx);
|
||||
}
|
||||
return altToPredicateContextMap;
|
||||
}
|
||||
|
||||
public SemanticContext getPredicatesForAlt(int alt) {
|
||||
SemanticContext preds = null;
|
||||
for (NFAConfig c : nfaConfigs) {
|
||||
if ( c.alt == alt &&
|
||||
c.semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT )
|
||||
{
|
||||
if ( preds == null ) preds = c.semanticContext;
|
||||
else preds = SemanticContext.or(preds, c.semanticContext);
|
||||
}
|
||||
}
|
||||
return preds;
|
||||
}
|
||||
|
||||
public List<NFAConfig> getNFAConfigsForAlt(int alt) {
|
||||
List<NFAConfig> configs = new ArrayList<NFAConfig>();
|
||||
for (NFAConfig c : nfaConfigs) {
|
||||
if ( c.alt == alt ) configs.add(c);
|
||||
}
|
||||
return configs;
|
||||
}
|
||||
|
||||
/** For gated productions, we need an OR'd list of all predicates for the
|
||||
* target of an edge so we can gate the edge based upon the predicates
|
||||
* associated with taking that path (if any).
|
||||
*
|
||||
* For syntactic predicates, we only want to generate predicate
|
||||
* evaluations as we transitions to an accept state; it's a waste to
|
||||
* do it earlier. So, only add gated preds derived from manually-
|
||||
* specified syntactic predicates if this is an accept state.
|
||||
*
|
||||
* Also, since configurations w/o gated predicates are like true
|
||||
* gated predicates, finding a configuration whose alt has no gated
|
||||
* predicate implies we should evaluate the predicate to true. This
|
||||
* means the whole edge has to be ungated. Consider:
|
||||
*
|
||||
* X : ('a' | {p}?=> 'a')
|
||||
* | 'a' 'b'
|
||||
* ;
|
||||
*
|
||||
* Here, you 'a' gets you from s0 to s1 but you can't test p because
|
||||
* plain 'a' is ok. It's also ok for starting alt 2. Hence, you can't
|
||||
* test p. Even on the edge going to accept state for alt 1 of X, you
|
||||
* can't test p. You can get to the same place with and w/o the context.
|
||||
* Therefore, it is never ok to test p in this situation.
|
||||
*/
|
||||
public SemanticContext getGatedPredicatesInNFAConfigurations() {
|
||||
SemanticContext unionOfPredicatesFromAllAlts = null;
|
||||
for (NFAConfig c : nfaConfigs) {
|
||||
SemanticContext gatedPredExpr =
|
||||
c.semanticContext.getGatedPredicateContext();
|
||||
if ( gatedPredExpr==null ) {
|
||||
// if we ever find a configuration w/o a gated predicate
|
||||
// (even if it's a nongated predicate), we cannot gate
|
||||
// the indident edges.
|
||||
return null;
|
||||
}
|
||||
else if ( isAcceptState || !c.semanticContext.isSyntacticPredicate() ) {
|
||||
// at this point we have a gated predicate and, due to elseif,
|
||||
// we know it's an accept and not a syn pred. In this case,
|
||||
// it's safe to add the gated predicate to the union. We
|
||||
// only want to add syn preds if it's an accept state. Other
|
||||
// gated preds can be used with edges leading to accept states.
|
||||
if ( unionOfPredicatesFromAllAlts==null ) {
|
||||
unionOfPredicatesFromAllAlts = gatedPredExpr;
|
||||
}
|
||||
else {
|
||||
unionOfPredicatesFromAllAlts =
|
||||
SemanticContext.or(unionOfPredicatesFromAllAlts,gatedPredExpr);
|
||||
}
|
||||
}
|
||||
}
|
||||
if ( unionOfPredicatesFromAllAlts instanceof SemanticContext.TruePredicate ) {
|
||||
return null;
|
||||
}
|
||||
return unionOfPredicatesFromAllAlts;
|
||||
}
|
||||
|
||||
public int getNumberOfEdges() { return edges.size(); }
|
||||
|
||||
public void addEdge(Edge e) { edges.add(e); }
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
package org.antlr.v4.automata;
|
||||
|
||||
import org.antlr.v4.analysis.SemanticContext;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
|
||||
/** A DFA edge (NFA edges are called transitions) */
|
||||
public class Edge {
|
||||
public IntervalSet label;
|
||||
public SemanticContext semanticContext; // predicated edge?
|
||||
public DFAState target;
|
||||
|
||||
public Edge(DFAState target) {
|
||||
|
@ -14,6 +16,7 @@ public class Edge {
|
|||
|
||||
public Edge(DFAState target, IntervalSet label) {
|
||||
this(target);
|
||||
semanticContext = target.getGatedPredicatesInNFAConfigurations();
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,20 +1,18 @@
|
|||
package org.antlr.v4.automata;
|
||||
|
||||
import org.antlr.v4.tool.ActionAST;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/** Lexer DFA states track just NFAStates not config with stack/alt etc... like
|
||||
* DFA used for prediction.
|
||||
*/
|
||||
public class LexerState extends DFAState {
|
||||
/** For ambiguous lexer rules, the accept state matches a set of rules,
|
||||
* not just one. Means we can't use predictsAlt (an int). The
|
||||
* order of rules is order given in grammar. So, gives precedence to
|
||||
* keywords vs IDs if keywords are first.
|
||||
*/
|
||||
* not just one. So, gives precedence to keywords vs IDs if keywords are first.
|
||||
public List<Rule> matchesRules = new ArrayList<Rule>();
|
||||
*/
|
||||
|
||||
public Rule predictsRule;
|
||||
|
||||
/** Single action sitting at extreme right edge of lexer rule */
|
||||
public ActionAST action;
|
||||
|
||||
public LexerState(DFA dfa) {
|
||||
super(dfa);
|
||||
|
|
|
@ -171,7 +171,11 @@ public class ParserNFAFactory implements NFAFactory {
|
|||
}
|
||||
|
||||
public Handle gated_sempred(GrammarAST pred) {
|
||||
return null;
|
||||
BasicState left = newState(pred);
|
||||
NFAState right = newState(pred);
|
||||
left.transition = new PredicateTransition(pred, right);
|
||||
pred.nfaState = left;
|
||||
return new Handle(left, right);
|
||||
}
|
||||
|
||||
/** Build what amounts to an epsilon transition with an action.
|
||||
|
|
|
@ -1,38 +1,76 @@
|
|||
package org.antlr.v4.codegen;
|
||||
|
||||
import org.antlr.v4.automata.DFA;
|
||||
import org.antlr.v4.automata.DFAState;
|
||||
import org.antlr.v4.automata.Edge;
|
||||
import org.antlr.v4.automata.Label;
|
||||
import org.antlr.v4.analysis.SemanticContext;
|
||||
import org.antlr.v4.automata.*;
|
||||
import org.antlr.v4.misc.Interval;
|
||||
import org.antlr.v4.misc.IntervalSet;
|
||||
import org.antlr.v4.misc.Utils;
|
||||
import org.antlr.v4.tool.ErrorManager;
|
||||
import org.stringtemplate.v4.misc.Misc;
|
||||
|
||||
import java.util.Vector;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/** From a DFA, create transition table etc... */
|
||||
public class CompiledDFA {
|
||||
/** How big can char get before DFA states overflow and we need a set? */
|
||||
public static int MAX_EDGE_VALUE_FOR_TABLE = 255;
|
||||
|
||||
public DFA dfa;
|
||||
public Vector<Vector<Integer>> transition;
|
||||
public Vector<Integer> min;
|
||||
public Vector<Integer> max;
|
||||
public int[][] transition;
|
||||
public int[][] set_edges;
|
||||
public int[][] pred_edges; // 'a'&&{p1}?
|
||||
public int[] accept;
|
||||
public int[] eof;
|
||||
public int[] max;
|
||||
public int[] action_index;
|
||||
public List<String> actions;
|
||||
public List<String> sempreds;
|
||||
|
||||
public CompiledDFA(DFA dfa) {
|
||||
this.dfa = dfa;
|
||||
|
||||
int n = dfa.states.size();
|
||||
min = new Vector<Integer>(n); min.setSize(n);
|
||||
max = new Vector<Integer>(n); max.setSize(n);
|
||||
transition = new Vector<Vector<Integer>>(n); transition.setSize(n);
|
||||
accept = new int[n];
|
||||
eof = new int[n];
|
||||
Arrays.fill(eof, -1);
|
||||
//min = new int[n];
|
||||
max = new int[n];
|
||||
transition = new int[n][];
|
||||
set_edges = new int[n][];
|
||||
pred_edges = new int[n][];
|
||||
action_index = new int[n];
|
||||
Arrays.fill(action_index, -1);
|
||||
actions = new ArrayList<String>();
|
||||
sempreds = new ArrayList<String>();
|
||||
|
||||
for (DFAState d : dfa.states) {
|
||||
if ( d == null ) continue;
|
||||
if ( d.isAcceptState ) createAcceptTable(d);
|
||||
createMinMaxTables(d);
|
||||
createTransitionTableEntryForState(d);
|
||||
createEOFTable(d);
|
||||
if ( d.edges.size() > 0 ) {
|
||||
createTransitionTableEntryForState(d);
|
||||
createSetTable(d);
|
||||
createPredTable(d);
|
||||
}
|
||||
if ( dfa.g.isLexer() ) createActionTable((LexerState)d);
|
||||
}
|
||||
}
|
||||
|
||||
protected void createMinMaxTables(DFAState d) {
|
||||
void createAcceptTable(DFAState d) {
|
||||
int predicts = d.predictsAlt;
|
||||
if ( d.dfa.g.isLexer() ) {
|
||||
// for lexer, we don't use predicted alt; we use token type
|
||||
LexerState ld = (LexerState)d;
|
||||
String predictedLexerRuleName = ld.predictsRule.name;
|
||||
predicts = d.dfa.g.getTokenType(predictedLexerRuleName);
|
||||
}
|
||||
accept[d.stateNumber] = predicts;
|
||||
}
|
||||
|
||||
void createMinMaxTables(DFAState d) {
|
||||
int smin = Label.MAX_CHAR_VALUE + 1;
|
||||
int smax = Label.MIN_ATOM_VALUE - 1;
|
||||
int n = d.edges.size();
|
||||
|
@ -55,38 +93,42 @@ public class CompiledDFA {
|
|||
smax = Label.MIN_CHAR_VALUE;
|
||||
}
|
||||
|
||||
min.set(d.stateNumber, Utils.integer((char)smin));
|
||||
max.set(d.stateNumber, Utils.integer((char)smax));
|
||||
//min[d.stateNumber] = smin;
|
||||
max[d.stateNumber] = smax;
|
||||
|
||||
if ( smax<0 || smin>Label.MAX_CHAR_VALUE || smin<0 ) {
|
||||
ErrorManager.internalError("messed up: min="+min+", max="+max);
|
||||
if ( smax<0 || smin<0 ) {
|
||||
ErrorManager.internalError("messed up: max="+Arrays.toString(max));
|
||||
}
|
||||
}
|
||||
|
||||
void createTransitionTableEntryForState(DFAState s) {
|
||||
void createTransitionTableEntryForState(DFAState d) {
|
||||
/*
|
||||
System.out.println("createTransitionTableEntryForState s"+s.stateNumber+
|
||||
" dec "+s.dfa.decisionNumber+" cyclic="+s.dfa.isCyclic());
|
||||
*/
|
||||
if ( s.edges.size() == 0 ) return;
|
||||
int smax = ((Integer)max.get(s.stateNumber)).intValue();
|
||||
int smin = ((Integer)min.get(s.stateNumber)).intValue();
|
||||
int max = Math.min(this.max[d.stateNumber], MAX_EDGE_VALUE_FOR_TABLE);
|
||||
|
||||
Vector<Integer> stateTransitions = new Vector<Integer>(smax-smin+1);
|
||||
stateTransitions.setSize(smax-smin+1);
|
||||
transition.set(s.stateNumber, stateTransitions);
|
||||
for (Edge e : s.edges) {
|
||||
int[] atoms = e.label.toArray();
|
||||
for (int a = 0; a < atoms.length; a++) {
|
||||
// set the transition if the label is valid (don't do EOF)
|
||||
if ( atoms[a] >= Label.MIN_CHAR_VALUE ) {
|
||||
int labelIndex = atoms[a]-smin; // offset from 0
|
||||
stateTransitions.set(labelIndex,
|
||||
Utils.integer(e.target.stateNumber));
|
||||
}
|
||||
int[] stateTransitions = new int[max+1]; // make table only up to max
|
||||
Arrays.fill(stateTransitions, -1);
|
||||
transition[d.stateNumber] = stateTransitions;
|
||||
|
||||
int[] predTransitions = new int[max+1];
|
||||
Arrays.fill(stateTransitions, -1);
|
||||
transition[d.stateNumber] = stateTransitions;
|
||||
|
||||
for (Edge e : d.edges) {
|
||||
for (Interval I : e.label.getIntervals()) {
|
||||
SemanticContext preds = e.target.getGatedPredicatesInNFAConfigurations();
|
||||
if ( I.a > MAX_EDGE_VALUE_FOR_TABLE || preds!=null ) break;
|
||||
// make sure range is MIN_CHAR_VALUE..MAX_EDGE_VALUE_FOR_TABLE and no preds
|
||||
int a = Math.max(I.a, Label.MIN_CHAR_VALUE);
|
||||
int b = Math.min(I.b, MAX_EDGE_VALUE_FOR_TABLE);
|
||||
//System.out.println("interval "+I+"->"+a+":"+b);
|
||||
for (int i=a; i<=b; i++) stateTransitions[i] = e.target.stateNumber;
|
||||
}
|
||||
}
|
||||
// track unique state transition tables so we can reuse
|
||||
|
||||
// TODO: track unique state transition tables so we can reuse
|
||||
// Integer edgeClass = (Integer)edgeTransitionClassMap.get(stateTransitions);
|
||||
// if ( edgeClass!=null ) {
|
||||
// //System.out.println("we've seen this array before; size="+stateTransitions.size());
|
||||
|
@ -99,4 +141,71 @@ public class CompiledDFA {
|
|||
// edgeTransitionClass++;
|
||||
// }
|
||||
}
|
||||
|
||||
/** Set up the EOF table; we cannot use -1 min/max values so
|
||||
* we need another way to test that in the DFA transition function.
|
||||
*/
|
||||
void createEOFTable(DFAState d) {
|
||||
for (Edge e : d.edges) {
|
||||
int[] atoms = e.label.toArray();
|
||||
for (int a : atoms) {
|
||||
if ( a==Label.EOF ) eof[d.stateNumber] = e.target.stateNumber;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void createSetTable(DFAState d) {
|
||||
// only pay attention if at least one edge's max char is > MAX
|
||||
if ( max[d.stateNumber] > MAX_EDGE_VALUE_FOR_TABLE ) {
|
||||
List<Integer> edges = new ArrayList<Integer>();
|
||||
// { target1, npairs1, range-pairs1,
|
||||
// target2, npairs2, range-pairs2, ... }
|
||||
for (Edge e : d.edges) {
|
||||
// don't gen target if edge has all edges <= max
|
||||
if ( e.label.getMaxElement() <= MAX_EDGE_VALUE_FOR_TABLE ) continue;
|
||||
edges.add(e.target.stateNumber);
|
||||
edges.add(0); // leave whole for n
|
||||
List<Interval> intervals = e.label.getIntervals();
|
||||
int n = 0;
|
||||
for (Interval I : intervals) {
|
||||
// make sure range is beyond max or truncate left side to be above max
|
||||
if ( I.b <= MAX_EDGE_VALUE_FOR_TABLE ) continue;
|
||||
int a = Math.max(I.a, MAX_EDGE_VALUE_FOR_TABLE+1);
|
||||
edges.add(a);
|
||||
edges.add(I.b);
|
||||
n++;
|
||||
}
|
||||
edges.set(1, n);
|
||||
}
|
||||
if ( edges.size()>0 ) set_edges[d.stateNumber] = Utils.toIntArray(edges);
|
||||
}
|
||||
}
|
||||
|
||||
void createPredTable(DFAState d) {
|
||||
List<Integer> edges = new ArrayList<Integer>();
|
||||
// { target1, sempred_index1, target2, sempred_index2, ... }
|
||||
for (Edge e : d.edges) {
|
||||
if ( e.semanticContext!=null ) {
|
||||
System.out.println("gated preds for "+e.target.stateNumber+": "+e.semanticContext);
|
||||
// TODO: translate sempreds and gen proper && expressions for target
|
||||
String p = e.semanticContext.toString();
|
||||
edges.add(e.target.stateNumber);
|
||||
int prevIndex = sempreds.indexOf(p);
|
||||
int i = prevIndex;
|
||||
if ( prevIndex<0 ) {
|
||||
i = sempreds.size();
|
||||
sempreds.add(p);
|
||||
}
|
||||
edges.add(i);
|
||||
}
|
||||
}
|
||||
if ( edges.size()>0 ) pred_edges[d.stateNumber] = Utils.toIntArray(edges);
|
||||
}
|
||||
|
||||
void createActionTable(LexerState d) {
|
||||
if ( d.isAcceptState && d.action!=null ) {
|
||||
action_index[d.stateNumber] = actions.size();
|
||||
actions.add(Misc.strip(d.action.getText(),1)); // TODO: translate action
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ public class LexerFactory {
|
|||
fileST.add("lexer", lexerST);
|
||||
for (String modeName : lg.modes.keySet()) { // for each mode
|
||||
injectDFAs(lg, lexerST, modeName);
|
||||
injectPDAs(lg, lexerST, modeName);
|
||||
//injectPDAs(lg, lexerST, modeName);
|
||||
}
|
||||
|
||||
LinkedHashMap<String,Integer> tokens = new LinkedHashMap<String,Integer>();
|
||||
|
@ -46,7 +46,12 @@ public class LexerFactory {
|
|||
DFA dfa = lg.modeToDFA.get(modeName);
|
||||
ST dfaST = gen.templates.getInstanceOf("DFA");
|
||||
dfaST.add("name", modeName);
|
||||
dfaST.add("model", new CompiledDFA(dfa));
|
||||
CompiledDFA obj = new CompiledDFA(dfa);
|
||||
dfaST.add("model", obj);
|
||||
// ST actionST = gen.templates.getInstanceOf("actionMethod");
|
||||
// actionST.add("name", modeName);
|
||||
// actionST.add("actions", obj.actions);
|
||||
// lexerST.add("actions", actionST);
|
||||
lexerST.add("dfas", dfaST);
|
||||
}
|
||||
|
||||
|
|
|
@ -145,51 +145,6 @@ public class IntervalSet implements IntSet {
|
|||
intervals.add(addition);
|
||||
}
|
||||
|
||||
/*
|
||||
protected void add(Interval addition) {
|
||||
//System.out.println("add "+addition+" to "+intervals.toString());
|
||||
if ( addition.b<addition.a ) {
|
||||
return;
|
||||
}
|
||||
// find position in list
|
||||
//for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
|
||||
int n = intervals.size();
|
||||
for (int i=0; i<n; i++) {
|
||||
Interval r = (Interval)intervals.get(i);
|
||||
if ( addition.equals(r) ) {
|
||||
return;
|
||||
}
|
||||
if ( addition.adjacent(r) || !addition.disjoint(r) ) {
|
||||
// next to each other, make a single larger interval
|
||||
Interval bigger = addition.union(r);
|
||||
intervals.set(i, bigger);
|
||||
// make sure we didn't just create an interval that
|
||||
// should be merged with next interval in list
|
||||
if ( (i+1)<n ) {
|
||||
i++;
|
||||
Interval next = (Interval)intervals.get(i);
|
||||
if ( bigger.adjacent(next)||!bigger.disjoint(next) ) {
|
||||
// if we bump up against or overlap next, merge
|
||||
intervals.remove(i); // remove next one
|
||||
i--;
|
||||
intervals.set(i, bigger.union(next)); // set to 3 merged ones
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
if ( addition.startsBeforeDisjoint(r) ) {
|
||||
// insert before r
|
||||
intervals.add(i, addition);
|
||||
return;
|
||||
}
|
||||
// if disjoint and after r, a future iteration will handle it
|
||||
}
|
||||
// ok, must be after last interval (and disjoint from last interval)
|
||||
// just add it
|
||||
intervals.add(addition);
|
||||
}
|
||||
*/
|
||||
|
||||
/** combine all sets in the array returned the or'd value */
|
||||
public static IntervalSet or(IntervalSet[] sets) {
|
||||
IntervalSet r = new IntervalSet();
|
||||
|
@ -281,125 +236,6 @@ public class IntervalSet implements IntSet {
|
|||
return this.and(((IntervalSet)other).complement(COMPLETE_SET));
|
||||
}
|
||||
|
||||
/** return a new set containing all elements in this but not in other.
|
||||
* Intervals may have to be broken up when ranges in this overlap
|
||||
* with ranges in other. other is assumed to be a subset of this;
|
||||
* anything that is in other but not in this will be ignored.
|
||||
*
|
||||
* Keep around, but 10-20-2005, I decided to make complement work w/o
|
||||
* subtract and so then subtract can simply be a&~b
|
||||
*
|
||||
public IntSet subtract(IntSet other) {
|
||||
if ( other==null || !(other instanceof IntervalSet) ) {
|
||||
return null; // nothing in common with null set
|
||||
}
|
||||
|
||||
IntervalSet diff = new IntervalSet();
|
||||
|
||||
// iterate down both interval lists
|
||||
ListIterator thisIter = this.intervals.listIterator();
|
||||
ListIterator otherIter = ((IntervalSet)other).intervals.listIterator();
|
||||
Interval mine=null;
|
||||
Interval theirs=null;
|
||||
if ( thisIter.hasNext() ) {
|
||||
mine = (Interval)thisIter.next();
|
||||
}
|
||||
if ( otherIter.hasNext() ) {
|
||||
theirs = (Interval)otherIter.next();
|
||||
}
|
||||
while ( mine!=null ) {
|
||||
//System.out.println("mine="+mine+", theirs="+theirs);
|
||||
// CASE 1: nothing in theirs removes a chunk from mine
|
||||
if ( theirs==null || mine.disjoint(theirs) ) {
|
||||
// SUBCASE 1a: finished traversing theirs; keep adding mine now
|
||||
if ( theirs==null ) {
|
||||
// add everything in mine to difference since theirs done
|
||||
diff.add(mine);
|
||||
mine = null;
|
||||
if ( thisIter.hasNext() ) {
|
||||
mine = (Interval)thisIter.next();
|
||||
}
|
||||
}
|
||||
else {
|
||||
// SUBCASE 1b: mine is completely to the left of theirs
|
||||
// so we can add to difference; move mine, but not theirs
|
||||
if ( mine.startsBeforeDisjoint(theirs) ) {
|
||||
diff.add(mine);
|
||||
mine = null;
|
||||
if ( thisIter.hasNext() ) {
|
||||
mine = (Interval)thisIter.next();
|
||||
}
|
||||
}
|
||||
// SUBCASE 1c: theirs is completely to the left of mine
|
||||
else {
|
||||
// keep looking in theirs
|
||||
theirs = null;
|
||||
if ( otherIter.hasNext() ) {
|
||||
theirs = (Interval)otherIter.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// CASE 2: theirs breaks mine into two chunks
|
||||
if ( mine.properlyContains(theirs) ) {
|
||||
// must add two intervals: stuff to left and stuff to right
|
||||
diff.add(mine.a, theirs.a-1);
|
||||
// don't actually add stuff to right yet as next 'theirs'
|
||||
// might overlap with it
|
||||
// The stuff to the right might overlap with next "theirs".
|
||||
// so it is considered next
|
||||
Interval right = new Interval(theirs.b+1, mine.b);
|
||||
mine = right;
|
||||
// move theirs forward
|
||||
theirs = null;
|
||||
if ( otherIter.hasNext() ) {
|
||||
theirs = (Interval)otherIter.next();
|
||||
}
|
||||
}
|
||||
|
||||
// CASE 3: theirs covers mine; nothing to add to diff
|
||||
else if ( theirs.properlyContains(mine) ) {
|
||||
// nothing to add, theirs forces removal totally of mine
|
||||
// just move mine looking for an overlapping interval
|
||||
mine = null;
|
||||
if ( thisIter.hasNext() ) {
|
||||
mine = (Interval)thisIter.next();
|
||||
}
|
||||
}
|
||||
|
||||
// CASE 4: non proper overlap
|
||||
else {
|
||||
// overlap, but not properly contained
|
||||
diff.add(mine.differenceNotProperlyContained(theirs));
|
||||
// update iterators
|
||||
boolean moveTheirs = true;
|
||||
if ( mine.startsBeforeNonDisjoint(theirs) ||
|
||||
theirs.b > mine.b )
|
||||
{
|
||||
// uh oh, right of theirs extends past right of mine
|
||||
// therefore could overlap with next of mine so don't
|
||||
// move theirs iterator yet
|
||||
moveTheirs = false;
|
||||
}
|
||||
// always move mine
|
||||
mine = null;
|
||||
if ( thisIter.hasNext() ) {
|
||||
mine = (Interval)thisIter.next();
|
||||
}
|
||||
if ( moveTheirs ) {
|
||||
theirs = null;
|
||||
if ( otherIter.hasNext() ) {
|
||||
theirs = (Interval)otherIter.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
*/
|
||||
|
||||
public IntSet or(IntSet a) {
|
||||
IntervalSet o = new IntervalSet();
|
||||
o.addAll(this);
|
||||
|
@ -711,11 +547,4 @@ public class IntervalSet implements IntSet {
|
|||
public void remove(int el) {
|
||||
throw new NoSuchMethodError("IntervalSet.remove() unimplemented");
|
||||
}
|
||||
|
||||
/*
|
||||
protected void finalize() throws Throwable {
|
||||
super.finalize();
|
||||
System.out.println("size "+intervals.size()+" "+size());
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
|
|
@ -119,6 +119,13 @@ public class Utils {
|
|||
return x;
|
||||
}
|
||||
|
||||
public static int[] toIntArray(List<Integer> list) {
|
||||
if ( list==null ) return null;
|
||||
int[] a = new int[list.size()];
|
||||
for (int i=0; i<list.size(); i++) a[i] = list.get(i);
|
||||
return a;
|
||||
}
|
||||
|
||||
/** apply methodName to list and return list of results. method has
|
||||
* no args. This pulls data out of a list essentially.
|
||||
*/
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -577,7 +577,7 @@ element
|
|||
| ebnf
|
||||
| ACTION<ActionAST>
|
||||
| SEMPRED
|
||||
( IMPLIES -> GATED_SEMPRED[$IMPLIES]
|
||||
( IMPLIES -> GATED_SEMPRED[$SEMPRED]
|
||||
| -> SEMPRED
|
||||
)
|
||||
| treeSpec
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 ANTLRParser.g 2010-05-24 16:31:57
|
||||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 ANTLRParser.g 2010-06-14 12:35:32
|
||||
|
||||
/*
|
||||
[The "BSD licence"]
|
||||
|
@ -339,7 +339,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: prequelConstruct, rules, mode, grammarType, DOC_COMMENT, id
|
||||
// elements: id, prequelConstruct, grammarType, mode, rules, DOC_COMMENT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1184,7 +1184,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: delegateGrammar, IMPORT
|
||||
// elements: IMPORT, delegateGrammar
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1465,7 +1465,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: tokenSpec, TOKENS
|
||||
// elements: TOKENS, tokenSpec
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1556,12 +1556,12 @@ public class ANTLRParser extends Parser {
|
|||
if ( (LA12_0==RULE_REF) ) {
|
||||
int LA12_1 = input.LA(2);
|
||||
|
||||
if ( (LA12_1==TEMPLATE||(LA12_1>=RBRACE && LA12_1<=RULE_REF)) ) {
|
||||
alt12=2;
|
||||
}
|
||||
else if ( (LA12_1==SEMI||LA12_1==ASSIGN) ) {
|
||||
if ( (LA12_1==SEMI||LA12_1==ASSIGN) ) {
|
||||
alt12=1;
|
||||
}
|
||||
else if ( (LA12_1==TEMPLATE||(LA12_1>=RBRACE && LA12_1<=RULE_REF)) ) {
|
||||
alt12=2;
|
||||
}
|
||||
else {
|
||||
if (state.backtracking>0) {state.failed=true; return retval;}
|
||||
NoViableAltException nvae =
|
||||
|
@ -1760,7 +1760,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: SCOPE, id, ACTION
|
||||
// elements: id, ACTION, SCOPE
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -1914,7 +1914,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: actionScopeName, ACTION, id, AT
|
||||
// elements: ACTION, actionScopeName, AT, id
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2214,7 +2214,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: MODE, rule, id
|
||||
// elements: MODE, id, rule
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2606,7 +2606,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: exceptionGroup, rulePrequels, ruleModifiers, ruleBlock, DOC_COMMENT, ruleReturns, id, ARG_ACTION
|
||||
// elements: ruleModifiers, ruleReturns, id, ruleBlock, ARG_ACTION, exceptionGroup, DOC_COMMENT, rulePrequels
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -2835,7 +2835,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ARG_ACTION, ACTION, CATCH
|
||||
// elements: ACTION, ARG_ACTION, CATCH
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -3352,7 +3352,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: qid, THROWS
|
||||
// elements: THROWS, qid
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -3561,7 +3561,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, SCOPE
|
||||
// elements: SCOPE, id
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -3660,7 +3660,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, AT, ACTION
|
||||
// elements: ACTION, id, AT
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -4178,7 +4178,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: elements, rewrite
|
||||
// elements: rewrite, elements
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -4468,7 +4468,7 @@ public class ANTLRParser extends Parser {
|
|||
};
|
||||
|
||||
// $ANTLR start "element"
|
||||
// ANTLRParser.g:563:1: element : ( labeledElement ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$labeledElement.start,\"BLOCK\"] ^( ALT labeledElement ) ) ) | -> labeledElement ) | atom ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$atom.start,\"BLOCK\"] ^( ALT atom ) ) ) | -> atom ) | ebnf | ACTION | SEMPRED ( IMPLIES -> GATED_SEMPRED[$IMPLIES] | -> SEMPRED ) | treeSpec ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$treeSpec.start,\"BLOCK\"] ^( ALT treeSpec ) ) ) | -> treeSpec ) );
|
||||
// ANTLRParser.g:563:1: element : ( labeledElement ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$labeledElement.start,\"BLOCK\"] ^( ALT labeledElement ) ) ) | -> labeledElement ) | atom ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$atom.start,\"BLOCK\"] ^( ALT atom ) ) ) | -> atom ) | ebnf | ACTION | SEMPRED ( IMPLIES -> GATED_SEMPRED[$SEMPRED] | -> SEMPRED ) | treeSpec ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$treeSpec.start,\"BLOCK\"] ^( ALT treeSpec ) ) ) | -> treeSpec ) );
|
||||
public final ANTLRParser.element_return element() throws RecognitionException {
|
||||
ANTLRParser.element_return retval = new ANTLRParser.element_return();
|
||||
retval.start = input.LT(1);
|
||||
|
@ -4507,7 +4507,7 @@ public class ANTLRParser extends Parser {
|
|||
int m = input.mark();
|
||||
|
||||
try {
|
||||
// ANTLRParser.g:569:2: ( labeledElement ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$labeledElement.start,\"BLOCK\"] ^( ALT labeledElement ) ) ) | -> labeledElement ) | atom ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$atom.start,\"BLOCK\"] ^( ALT atom ) ) ) | -> atom ) | ebnf | ACTION | SEMPRED ( IMPLIES -> GATED_SEMPRED[$IMPLIES] | -> SEMPRED ) | treeSpec ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$treeSpec.start,\"BLOCK\"] ^( ALT treeSpec ) ) ) | -> treeSpec ) )
|
||||
// ANTLRParser.g:569:2: ( labeledElement ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$labeledElement.start,\"BLOCK\"] ^( ALT labeledElement ) ) ) | -> labeledElement ) | atom ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$atom.start,\"BLOCK\"] ^( ALT atom ) ) ) | -> atom ) | ebnf | ACTION | SEMPRED ( IMPLIES -> GATED_SEMPRED[$SEMPRED] | -> SEMPRED ) | treeSpec ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$treeSpec.start,\"BLOCK\"] ^( ALT treeSpec ) ) ) | -> treeSpec ) )
|
||||
int alt37=6;
|
||||
alt37 = dfa37.predict(input);
|
||||
switch (alt37) {
|
||||
|
@ -4550,7 +4550,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ebnfSuffix, labeledElement
|
||||
// elements: labeledElement, ebnfSuffix
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -4767,12 +4767,12 @@ public class ANTLRParser extends Parser {
|
|||
}
|
||||
break;
|
||||
case 5 :
|
||||
// ANTLRParser.g:579:6: SEMPRED ( IMPLIES -> GATED_SEMPRED[$IMPLIES] | -> SEMPRED )
|
||||
// ANTLRParser.g:579:6: SEMPRED ( IMPLIES -> GATED_SEMPRED[$SEMPRED] | -> SEMPRED )
|
||||
{
|
||||
SEMPRED119=(Token)match(input,SEMPRED,FOLLOW_SEMPRED_in_element2845); if (state.failed) return retval;
|
||||
if ( state.backtracking==0 ) stream_SEMPRED.add(SEMPRED119);
|
||||
|
||||
// ANTLRParser.g:580:3: ( IMPLIES -> GATED_SEMPRED[$IMPLIES] | -> SEMPRED )
|
||||
// ANTLRParser.g:580:3: ( IMPLIES -> GATED_SEMPRED[$SEMPRED] | -> SEMPRED )
|
||||
int alt35=2;
|
||||
int LA35_0 = input.LA(1);
|
||||
|
||||
|
@ -4810,9 +4810,9 @@ public class ANTLRParser extends Parser {
|
|||
RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
|
||||
|
||||
root_0 = (GrammarAST)adaptor.nil();
|
||||
// 580:14: -> GATED_SEMPRED[$IMPLIES]
|
||||
// 580:14: -> GATED_SEMPRED[$SEMPRED]
|
||||
{
|
||||
adaptor.addChild(root_0, (GrammarAST)adaptor.create(GATED_SEMPRED, IMPLIES120));
|
||||
adaptor.addChild(root_0, (GrammarAST)adaptor.create(GATED_SEMPRED, SEMPRED119));
|
||||
|
||||
}
|
||||
|
||||
|
@ -5372,7 +5372,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: blockSuffixe, block
|
||||
// elements: block, blockSuffixe
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6177,7 +6177,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: terminal, NOT
|
||||
// elements: NOT, terminal
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6396,7 +6396,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: altList, ra, optionsSpec
|
||||
// elements: altList, optionsSpec, ra
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6618,7 +6618,7 @@ public class ANTLRParser extends Parser {
|
|||
{
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ARG_ACTION, RULE_REF
|
||||
// elements: RULE_REF, ARG_ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -6838,7 +6838,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: TOKEN_REF, ARG_ACTION, elementOptions
|
||||
// elements: ARG_ACTION, elementOptions, TOKEN_REF
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -7450,7 +7450,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: SEMPRED, rewriteAlt, rewriteAlt, SEMPRED
|
||||
// elements: SEMPRED, SEMPRED, rewriteAlt, rewriteAlt
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8231,7 +8231,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: TOKEN_REF, elementOptions, ARG_ACTION
|
||||
// elements: TOKEN_REF, ARG_ACTION, elementOptions
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8315,7 +8315,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: elementOptions, STRING_LITERAL
|
||||
// elements: STRING_LITERAL, elementOptions
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8472,7 +8472,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: rewriteTreeAlt, ebnfSuffix
|
||||
// elements: ebnfSuffix, rewriteTreeAlt
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8609,7 +8609,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: TREE_BEGIN, rewriteTreeAtom, rewriteTreeElement
|
||||
// elements: rewriteTreeAtom, rewriteTreeElement, TREE_BEGIN
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8764,7 +8764,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: TEMPLATE, str, rewriteTemplateArgs
|
||||
// elements: rewriteTemplateArgs, str, TEMPLATE
|
||||
// token labels: str
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -8913,7 +8913,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, rewriteTemplateArgs
|
||||
// elements: rewriteTemplateArgs, id
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -9025,7 +9025,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: ACTION, rewriteTemplateArgs
|
||||
// elements: rewriteTemplateArgs, ACTION
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -9271,7 +9271,7 @@ public class ANTLRParser extends Parser {
|
|||
|
||||
|
||||
// AST REWRITE
|
||||
// elements: id, ACTION
|
||||
// elements: ACTION, id
|
||||
// token labels:
|
||||
// rule labels: retval
|
||||
// token list labels:
|
||||
|
@ -9960,19 +9960,19 @@ public class ANTLRParser extends Parser {
|
|||
this.transition = DFA37_transition;
|
||||
}
|
||||
public String getDescription() {
|
||||
return "563:1: element : ( labeledElement ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$labeledElement.start,\"BLOCK\"] ^( ALT labeledElement ) ) ) | -> labeledElement ) | atom ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$atom.start,\"BLOCK\"] ^( ALT atom ) ) ) | -> atom ) | ebnf | ACTION | SEMPRED ( IMPLIES -> GATED_SEMPRED[$IMPLIES] | -> SEMPRED ) | treeSpec ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$treeSpec.start,\"BLOCK\"] ^( ALT treeSpec ) ) ) | -> treeSpec ) );";
|
||||
return "563:1: element : ( labeledElement ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$labeledElement.start,\"BLOCK\"] ^( ALT labeledElement ) ) ) | -> labeledElement ) | atom ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$atom.start,\"BLOCK\"] ^( ALT atom ) ) ) | -> atom ) | ebnf | ACTION | SEMPRED ( IMPLIES -> GATED_SEMPRED[$SEMPRED] | -> SEMPRED ) | treeSpec ( ebnfSuffix -> ^( ebnfSuffix ^( BLOCK[$treeSpec.start,\"BLOCK\"] ^( ALT treeSpec ) ) ) | -> treeSpec ) );";
|
||||
}
|
||||
}
|
||||
static final String DFA48_eotS =
|
||||
"\16\uffff";
|
||||
static final String DFA48_eofS =
|
||||
"\1\uffff\1\10\1\11\1\uffff\1\11\2\uffff\1\10\2\uffff\1\11\3\uffff";
|
||||
"\1\uffff\1\10\1\12\1\uffff\1\12\2\uffff\1\10\1\uffff\1\12\4\uffff";
|
||||
static final String DFA48_minS =
|
||||
"\1\43\2\4\1\uffff\1\4\2\uffff\1\4\2\uffff\1\4\1\uffff\2\0";
|
||||
"\1\43\2\4\1\uffff\1\4\2\uffff\1\4\1\uffff\1\4\2\uffff\2\0";
|
||||
static final String DFA48_maxS =
|
||||
"\3\104\1\uffff\1\104\2\uffff\1\104\2\uffff\1\104\1\uffff\2\0";
|
||||
"\3\104\1\uffff\1\104\2\uffff\1\104\1\uffff\1\104\2\uffff\2\0";
|
||||
static final String DFA48_acceptS =
|
||||
"\3\uffff\1\1\1\uffff\1\5\1\6\1\uffff\1\4\1\3\1\uffff\1\2\2\uffff";
|
||||
"\3\uffff\1\1\1\uffff\1\5\1\6\1\uffff\1\4\1\uffff\1\3\1\2\2\uffff";
|
||||
static final String DFA48_specialS =
|
||||
"\14\uffff\1\0\1\1}>";
|
||||
static final String[] DFA48_transitionS = {
|
||||
|
@ -9980,23 +9980,23 @@ public class ANTLRParser extends Parser {
|
|||
"\1\10\11\uffff\1\10\1\uffff\1\10\22\uffff\1\10\4\uffff\3\10"+
|
||||
"\4\uffff\4\10\1\uffff\2\10\1\uffff\1\7\2\uffff\2\10\1\uffff"+
|
||||
"\1\10\1\uffff\2\10\3\uffff\1\10",
|
||||
"\1\11\11\uffff\1\11\1\uffff\1\11\22\uffff\1\11\4\uffff\3\11"+
|
||||
"\1\uffff\1\11\2\uffff\4\11\1\uffff\2\11\1\uffff\1\12\2\uffff"+
|
||||
"\2\11\1\uffff\1\11\1\uffff\2\11\3\uffff\1\11",
|
||||
"\1\12\11\uffff\1\12\1\uffff\1\12\22\uffff\1\12\4\uffff\3\12"+
|
||||
"\1\uffff\1\12\2\uffff\4\12\1\uffff\2\12\1\uffff\1\11\2\uffff"+
|
||||
"\2\12\1\uffff\1\12\1\uffff\2\12\3\uffff\1\12",
|
||||
"",
|
||||
"\1\11\13\uffff\1\11\22\uffff\1\11\4\uffff\3\11\1\uffff\1\11"+
|
||||
"\2\uffff\4\11\1\uffff\2\11\1\uffff\1\11\1\13\1\uffff\2\11\1"+
|
||||
"\uffff\1\11\1\uffff\2\11\3\uffff\1\11",
|
||||
"\1\12\13\uffff\1\12\22\uffff\1\12\4\uffff\3\12\1\uffff\1\12"+
|
||||
"\2\uffff\4\12\1\uffff\2\12\1\uffff\1\12\1\13\1\uffff\2\12\1"+
|
||||
"\uffff\1\12\1\uffff\2\12\3\uffff\1\12",
|
||||
"",
|
||||
"",
|
||||
"\1\10\13\uffff\1\10\22\uffff\1\10\4\uffff\3\10\1\uffff\1\10"+
|
||||
"\2\uffff\1\10\1\uffff\2\10\1\uffff\1\10\2\uffff\1\10\2\uffff"+
|
||||
"\2\10\1\uffff\1\10\1\uffff\1\10\1\14\3\uffff\1\10",
|
||||
"",
|
||||
"\1\12\13\uffff\1\12\22\uffff\1\12\4\uffff\3\12\1\uffff\1\12"+
|
||||
"\2\uffff\1\12\1\uffff\2\12\1\uffff\1\12\2\uffff\1\12\2\uffff"+
|
||||
"\2\12\1\uffff\1\12\1\uffff\1\12\1\15\3\uffff\1\12",
|
||||
"",
|
||||
"\1\11\13\uffff\1\11\22\uffff\1\11\4\uffff\3\11\1\uffff\1\11"+
|
||||
"\2\uffff\1\11\1\uffff\2\11\1\uffff\1\11\2\uffff\1\11\2\uffff"+
|
||||
"\2\11\1\uffff\1\11\1\uffff\1\11\1\15\3\uffff\1\11",
|
||||
"",
|
||||
"\1\uffff",
|
||||
"\1\uffff"
|
||||
|
@ -10070,7 +10070,7 @@ public class ANTLRParser extends Parser {
|
|||
input.LT(2).getCharPositionInLine()+1==input.LT(3).getCharPositionInLine()
|
||||
)) ) {s = 3;}
|
||||
|
||||
else if ( (true) ) {s = 9;}
|
||||
else if ( (true) ) {s = 10;}
|
||||
|
||||
|
||||
input.seek(index48_13);
|
||||
|
@ -10184,64 +10184,64 @@ public class ANTLRParser extends Parser {
|
|||
static final String DFA67_eotS =
|
||||
"\124\uffff";
|
||||
static final String DFA67_eofS =
|
||||
"\1\uffff\3\13\1\uffff\1\13\3\uffff\1\13\3\uffff\3\13\10\uffff\1"+
|
||||
"\13\4\uffff\1\13\66\uffff";
|
||||
"\1\uffff\3\12\1\uffff\1\12\3\uffff\1\12\3\uffff\3\12\10\uffff\1"+
|
||||
"\12\3\uffff\1\12\67\uffff";
|
||||
static final String DFA67_minS =
|
||||
"\1\20\1\16\2\20\1\43\1\20\2\uffff\1\43\1\20\2\uffff\1\43\3\20\6"+
|
||||
"\47\2\43\1\16\4\43\1\20\24\47\6\43\24\47\2\43\6\47";
|
||||
"\47\2\43\1\16\3\43\1\20\1\43\24\47\6\43\24\47\2\43\6\47";
|
||||
static final String DFA67_maxS =
|
||||
"\4\104\1\100\1\104\2\uffff\1\100\1\104\2\uffff\1\100\3\104\6\67"+
|
||||
"\2\100\3\104\2\100\1\104\11\67\1\55\3\67\1\55\6\67\1\100\1\104\2"+
|
||||
"\100\1\104\1\100\6\67\1\55\11\67\1\55\3\67\2\100\6\67";
|
||||
"\2\100\2\104\2\100\2\104\11\67\1\55\11\67\1\55\1\100\1\104\2\100"+
|
||||
"\1\104\1\100\6\67\1\55\11\67\1\55\3\67\2\100\6\67";
|
||||
static final String DFA67_acceptS =
|
||||
"\6\uffff\1\3\1\4\2\uffff\1\2\1\1\110\uffff";
|
||||
"\6\uffff\1\3\1\4\2\uffff\1\1\1\2\110\uffff";
|
||||
static final String DFA67_specialS =
|
||||
"\124\uffff}>";
|
||||
static final String[] DFA67_transitionS = {
|
||||
"\1\5\30\uffff\1\7\14\uffff\1\4\4\uffff\1\6\3\uffff\1\1\1\2\3"+
|
||||
"\uffff\1\3",
|
||||
"\1\11\1\uffff\1\13\27\uffff\3\13\1\uffff\1\10\2\uffff\1\12"+
|
||||
"\1\uffff\2\12\1\uffff\1\13\1\uffff\1\13\3\uffff\2\13\3\uffff"+
|
||||
"\2\13\3\uffff\1\13",
|
||||
"\1\13\27\uffff\3\13\4\uffff\1\12\1\uffff\2\12\1\uffff\1\13"+
|
||||
"\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff\1\13",
|
||||
"\1\13\27\uffff\3\13\1\uffff\1\14\2\uffff\1\12\1\uffff\2\12"+
|
||||
"\1\uffff\1\13\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff"+
|
||||
"\1\13",
|
||||
"\1\11\1\uffff\1\12\27\uffff\3\12\1\uffff\1\10\2\uffff\1\13"+
|
||||
"\1\uffff\2\13\1\uffff\1\12\1\uffff\1\12\3\uffff\2\12\3\uffff"+
|
||||
"\2\12\3\uffff\1\12",
|
||||
"\1\12\27\uffff\3\12\4\uffff\1\13\1\uffff\2\13\1\uffff\1\12"+
|
||||
"\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff\1\12",
|
||||
"\1\12\27\uffff\3\12\1\uffff\1\14\2\uffff\1\13\1\uffff\2\13"+
|
||||
"\1\uffff\1\12\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff"+
|
||||
"\1\12",
|
||||
"\1\17\33\uffff\1\16\1\15",
|
||||
"\1\13\27\uffff\3\13\4\uffff\1\12\1\uffff\2\12\1\uffff\1\13"+
|
||||
"\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff\1\13",
|
||||
"\1\12\27\uffff\3\12\4\uffff\1\13\1\uffff\2\13\1\uffff\1\12"+
|
||||
"\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff\1\12",
|
||||
"",
|
||||
"",
|
||||
"\1\22\33\uffff\1\21\1\20",
|
||||
"\1\13\27\uffff\3\13\4\uffff\1\12\1\uffff\2\12\1\uffff\1\13"+
|
||||
"\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff\1\13",
|
||||
"\1\12\27\uffff\3\12\4\uffff\1\13\1\uffff\2\13\1\uffff\1\12"+
|
||||
"\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff\1\12",
|
||||
"",
|
||||
"",
|
||||
"\1\25\33\uffff\1\24\1\23",
|
||||
"\1\13\27\uffff\3\13\4\uffff\1\12\1\uffff\2\12\1\uffff\1\13"+
|
||||
"\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff\1\13",
|
||||
"\1\13\27\uffff\3\13\4\uffff\1\12\1\uffff\2\12\1\uffff\1\13"+
|
||||
"\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff\1\13",
|
||||
"\1\13\27\uffff\3\13\4\uffff\1\12\1\uffff\2\12\1\uffff\1\13"+
|
||||
"\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff\1\13",
|
||||
"\1\12\27\uffff\3\12\4\uffff\1\13\1\uffff\2\13\1\uffff\1\12"+
|
||||
"\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff\1\12",
|
||||
"\1\12\27\uffff\3\12\4\uffff\1\13\1\uffff\2\13\1\uffff\1\12"+
|
||||
"\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff\1\12",
|
||||
"\1\12\27\uffff\3\12\4\uffff\1\13\1\uffff\2\13\1\uffff\1\12"+
|
||||
"\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff\1\12",
|
||||
"\1\27\5\uffff\1\30\1\31\10\uffff\1\26",
|
||||
"\1\27\5\uffff\1\30\1\31\10\uffff\1\26",
|
||||
"\1\27\5\uffff\1\30\1\31\10\uffff\1\26",
|
||||
"\1\34\5\uffff\1\35\1\32\10\uffff\1\33",
|
||||
"\1\34\5\uffff\1\35\1\32\10\uffff\1\33",
|
||||
"\1\34\5\uffff\1\35\1\32\10\uffff\1\33",
|
||||
"\1\33\5\uffff\1\34\1\35\10\uffff\1\32",
|
||||
"\1\33\5\uffff\1\34\1\35\10\uffff\1\32",
|
||||
"\1\33\5\uffff\1\34\1\35\10\uffff\1\32",
|
||||
"\1\40\33\uffff\1\37\1\36",
|
||||
"\1\43\33\uffff\1\42\1\41",
|
||||
"\1\11\1\uffff\1\13\27\uffff\3\13\4\uffff\1\12\1\uffff\2\12"+
|
||||
"\1\uffff\1\13\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff"+
|
||||
"\1\13",
|
||||
"\1\11\1\uffff\1\12\27\uffff\3\12\4\uffff\1\13\1\uffff\2\13"+
|
||||
"\1\uffff\1\12\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff"+
|
||||
"\1\12",
|
||||
"\1\46\33\uffff\1\45\1\44\3\uffff\1\47",
|
||||
"\1\52\33\uffff\1\51\1\50\3\uffff\1\53",
|
||||
"\1\56\33\uffff\1\55\1\54",
|
||||
"\1\61\33\uffff\1\60\1\57",
|
||||
"\1\13\27\uffff\3\13\4\uffff\1\12\1\uffff\2\12\1\uffff\1\13"+
|
||||
"\1\uffff\1\13\3\uffff\2\13\3\uffff\2\13\3\uffff\1\13",
|
||||
"\1\52\33\uffff\1\51\1\50",
|
||||
"\1\55\33\uffff\1\54\1\53",
|
||||
"\1\12\27\uffff\3\12\4\uffff\1\13\1\uffff\2\13\1\uffff\1\12"+
|
||||
"\1\uffff\1\12\3\uffff\2\12\3\uffff\2\12\3\uffff\1\12",
|
||||
"\1\60\33\uffff\1\57\1\56\3\uffff\1\61",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\26",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\26",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\26",
|
||||
|
@ -10252,16 +10252,16 @@ public class ANTLRParser extends Parser {
|
|||
"\1\27\5\uffff\1\30\11\uffff\1\64",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\64",
|
||||
"\1\27\5\uffff\1\30",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\65",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\65",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\65",
|
||||
"\1\34\5\uffff\1\35",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\33",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\33",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\33",
|
||||
"\1\34\5\uffff\1\35\1\66\10\uffff\1\67",
|
||||
"\1\34\5\uffff\1\35\1\66\10\uffff\1\67",
|
||||
"\1\34\5\uffff\1\35\1\66\10\uffff\1\67",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\32",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\32",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\32",
|
||||
"\1\33\5\uffff\1\34\1\66\10\uffff\1\65",
|
||||
"\1\33\5\uffff\1\34\1\66\10\uffff\1\65",
|
||||
"\1\33\5\uffff\1\34\1\66\10\uffff\1\65",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\67",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\67",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\67",
|
||||
"\1\33\5\uffff\1\34",
|
||||
"\1\72\33\uffff\1\71\1\70",
|
||||
"\1\75\33\uffff\1\74\1\73\3\uffff\1\76",
|
||||
"\1\101\33\uffff\1\100\1\77",
|
||||
|
@ -10278,24 +10278,24 @@ public class ANTLRParser extends Parser {
|
|||
"\1\27\5\uffff\1\30\11\uffff\1\64",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\64",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\64",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\65",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\65",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\65",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\115",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\115",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\115",
|
||||
"\1\34\5\uffff\1\35",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\67",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\67",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\67",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\65",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\65",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\65",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\115",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\115",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\115",
|
||||
"\1\33\5\uffff\1\34",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\67",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\67",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\67",
|
||||
"\1\120\33\uffff\1\117\1\116",
|
||||
"\1\123\33\uffff\1\122\1\121",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\114",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\114",
|
||||
"\1\27\5\uffff\1\30\11\uffff\1\114",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\115",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\115",
|
||||
"\1\34\5\uffff\1\35\11\uffff\1\115"
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\115",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\115",
|
||||
"\1\33\5\uffff\1\34\11\uffff\1\115"
|
||||
};
|
||||
|
||||
static final short[] DFA67_eot = DFA.unpackEncodedString(DFA67_eotS);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 ASTVerifier.g 2010-05-24 16:31:58
|
||||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 ASTVerifier.g 2010-06-14 12:35:33
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
@ -2543,12 +2543,12 @@ public class ASTVerifier extends TreeParser {
|
|||
if ( (LA34_1==DOWN) ) {
|
||||
int LA34_2 = input.LA(3);
|
||||
|
||||
if ( (LA34_2==BLOCK) ) {
|
||||
alt34=2;
|
||||
}
|
||||
else if ( (LA34_2==TOKEN_REF||LA34_2==STRING_LITERAL) ) {
|
||||
if ( (LA34_2==TOKEN_REF||LA34_2==STRING_LITERAL) ) {
|
||||
alt34=1;
|
||||
}
|
||||
else if ( (LA34_2==BLOCK) ) {
|
||||
alt34=2;
|
||||
}
|
||||
else {
|
||||
NoViableAltException nvae =
|
||||
new NoViableAltException("", 34, 2, input);
|
||||
|
@ -4251,8 +4251,8 @@ public class ASTVerifier extends TreeParser {
|
|||
"\1\141\2\2\2\uffff\1\2\1\141\2\uffff\2\104\1\127\10\uffff\1\104"+
|
||||
"\2\uffff";
|
||||
static final String DFA33_acceptS =
|
||||
"\3\uffff\1\5\1\10\2\uffff\1\15\1\16\3\uffff\1\13\1\14\1\3\1\1\1"+
|
||||
"\6\1\2\1\7\1\4\1\uffff\1\11\1\12";
|
||||
"\3\uffff\1\5\1\10\2\uffff\1\15\1\16\3\uffff\1\13\1\14\1\1\1\3\1"+
|
||||
"\6\1\2\1\4\1\7\1\uffff\1\11\1\12";
|
||||
static final String DFA33_specialS =
|
||||
"\27\uffff}>";
|
||||
static final String[] DFA33_transitionS = {
|
||||
|
@ -4269,8 +4269,8 @@ public class ASTVerifier extends TreeParser {
|
|||
"\3\15\14\uffff\1\15\2\uffff\1\15",
|
||||
"",
|
||||
"",
|
||||
"\1\17\4\uffff\1\16\1\uffff\1\20\1\10\3\uffff\1\20",
|
||||
"\1\21\4\uffff\1\23\1\uffff\1\22\1\10\3\uffff\1\22",
|
||||
"\1\16\4\uffff\1\17\1\uffff\1\20\1\10\3\uffff\1\20",
|
||||
"\1\21\4\uffff\1\22\1\uffff\1\23\1\10\3\uffff\1\23",
|
||||
"\1\24",
|
||||
"",
|
||||
"",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 ActionSplitter.g 2010-05-24 16:31:57
|
||||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 ActionSplitter.g 2010-06-14 12:35:32
|
||||
|
||||
package org.antlr.v4.parse;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 NFABuilder.g 2010-05-24 16:31:58
|
||||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 NFABuilder.g 2010-06-14 12:35:33
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
@ -1153,12 +1153,12 @@ public class NFABuilder extends TreeParser {
|
|||
if ( (LA11_1==DOWN) ) {
|
||||
int LA11_2 = input.LA(3);
|
||||
|
||||
if ( (LA11_2==BLOCK) ) {
|
||||
alt11=2;
|
||||
}
|
||||
else if ( (LA11_2==BANG||LA11_2==ROOT||LA11_2==TOKEN_REF||LA11_2==STRING_LITERAL) ) {
|
||||
if ( (LA11_2==BANG||LA11_2==ROOT||LA11_2==TOKEN_REF||LA11_2==STRING_LITERAL) ) {
|
||||
alt11=1;
|
||||
}
|
||||
else if ( (LA11_2==BLOCK) ) {
|
||||
alt11=2;
|
||||
}
|
||||
else {
|
||||
NoViableAltException nvae =
|
||||
new NoViableAltException("", 11, 2, input);
|
||||
|
@ -1620,7 +1620,7 @@ public class NFABuilder extends TreeParser {
|
|||
static final String DFA7_maxS =
|
||||
"\1\63\2\2\2\127\2\141\4\uffff";
|
||||
static final String DFA7_acceptS =
|
||||
"\7\uffff\1\2\1\1\1\4\1\3";
|
||||
"\7\uffff\1\1\1\2\1\3\1\4";
|
||||
static final String DFA7_specialS =
|
||||
"\13\uffff}>";
|
||||
static final String[] DFA7_transitionS = {
|
||||
|
@ -1629,10 +1629,10 @@ public class NFABuilder extends TreeParser {
|
|||
"\1\4",
|
||||
"\1\5",
|
||||
"\1\6",
|
||||
"\1\10\4\uffff\1\10\1\uffff\2\10\4\uffff\1\10\1\uffff\2\10\3"+
|
||||
"\uffff\1\10\10\uffff\1\7\23\uffff\1\10",
|
||||
"\1\12\4\uffff\1\12\1\uffff\2\12\4\uffff\1\12\1\uffff\2\12\3"+
|
||||
"\uffff\1\12\10\uffff\1\11\23\uffff\1\12",
|
||||
"\1\7\4\uffff\1\7\1\uffff\2\7\4\uffff\1\7\1\uffff\2\7\3\uffff"+
|
||||
"\1\7\10\uffff\1\10\23\uffff\1\7",
|
||||
"\1\11\4\uffff\1\11\1\uffff\2\11\4\uffff\1\11\1\uffff\2\11\3"+
|
||||
"\uffff\1\11\10\uffff\1\12\23\uffff\1\11",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
|
@ -1683,8 +1683,8 @@ public class NFABuilder extends TreeParser {
|
|||
"\1\141\2\2\2\uffff\1\2\1\141\2\uffff\2\104\1\127\6\uffff\1\104\1"+
|
||||
"\uffff\2\2\1\uffff\2\104";
|
||||
static final String DFA10_acceptS =
|
||||
"\3\uffff\1\5\1\6\2\uffff\1\13\1\14\3\uffff\1\11\1\12\1\1\1\3\1\4"+
|
||||
"\1\2\1\uffff\1\7\2\uffff\1\10\2\uffff";
|
||||
"\3\uffff\1\5\1\6\2\uffff\1\13\1\14\3\uffff\1\11\1\12\1\1\1\3\1\2"+
|
||||
"\1\4\1\uffff\1\7\2\uffff\1\10\2\uffff";
|
||||
static final String DFA10_specialS =
|
||||
"\31\uffff}>";
|
||||
static final String[] DFA10_transitionS = {
|
||||
|
@ -1703,7 +1703,7 @@ public class NFABuilder extends TreeParser {
|
|||
"",
|
||||
"\1\7\4\uffff\1\7\2\uffff\1\16\4\uffff\1\17\1\uffff\1\7\1\10"+
|
||||
"\3\uffff\1\7",
|
||||
"\1\7\4\uffff\1\7\2\uffff\1\21\4\uffff\1\20\1\uffff\1\7\1\10"+
|
||||
"\1\7\4\uffff\1\7\2\uffff\1\20\4\uffff\1\21\1\uffff\1\7\1\10"+
|
||||
"\3\uffff\1\7",
|
||||
"\1\22",
|
||||
"",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 BasicSemanticTriggers.g 2010-05-24 16:31:59
|
||||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 BasicSemanticTriggers.g 2010-06-14 12:35:34
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 CollectSymbols.g 2010-05-24 16:31:59
|
||||
// $ANTLR 3.2.1-SNAPSHOT May 24, 2010 15:02:05 CollectSymbols.g 2010-06-14 12:35:34
|
||||
|
||||
/*
|
||||
[The "BSD license"]
|
||||
|
@ -1804,18 +1804,18 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
|||
}
|
||||
}
|
||||
static final String DFA5_eotS =
|
||||
"\40\uffff";
|
||||
"\41\uffff";
|
||||
static final String DFA5_eofS =
|
||||
"\40\uffff";
|
||||
"\41\uffff";
|
||||
static final String DFA5_minS =
|
||||
"\1\4\3\2\1\uffff\3\4\1\2\1\3\1\2\1\uffff\2\3\1\uffff\4\2\4\4\1\2"+
|
||||
"\1\4\3\2\1\uffff\3\4\1\2\1\3\1\2\2\uffff\2\3\1\uffff\4\2\4\4\1\2"+
|
||||
"\2\3\1\2\3\3\1\uffff\1\3";
|
||||
static final String DFA5_maxS =
|
||||
"\4\146\1\uffff\6\146\1\uffff\2\146\1\uffff\17\146\1\uffff\1\146";
|
||||
"\4\146\1\uffff\6\146\2\uffff\2\146\1\uffff\17\146\1\uffff\1\146";
|
||||
static final String DFA5_acceptS =
|
||||
"\4\uffff\1\1\6\uffff\1\2\2\uffff\1\2\17\uffff\1\2\1\uffff";
|
||||
"\4\uffff\1\1\6\uffff\2\2\2\uffff\1\2\17\uffff\1\2\1\uffff";
|
||||
static final String DFA5_specialS =
|
||||
"\40\uffff}>";
|
||||
"\41\uffff}>";
|
||||
static final String[] DFA5_transitionS = {
|
||||
"\70\4\1\2\16\4\1\1\1\4\1\3\31\4",
|
||||
"\1\5\1\uffff\143\4",
|
||||
|
@ -1825,30 +1825,31 @@ public class CollectSymbols extends org.antlr.v4.runtime.tree.TreeFilter {
|
|||
"\143\10",
|
||||
"\123\4\1\11\17\4",
|
||||
"\143\12",
|
||||
"\2\13\143\10",
|
||||
"\15\4\1\14\126\4",
|
||||
"\1\13\1\15\143\12",
|
||||
"\1\13\1\14\143\10",
|
||||
"\15\4\1\15\126\4",
|
||||
"\1\14\1\16\143\12",
|
||||
"",
|
||||
"\1\16\143\4",
|
||||
"\1\16\70\22\1\20\16\22\1\17\1\22\1\21\31\22",
|
||||
"",
|
||||
"\1\23\1\16\70\22\1\20\16\22\1\17\1\22\1\21\31\22",
|
||||
"\1\24\1\16\70\22\1\20\16\22\1\17\1\22\1\21\31\22",
|
||||
"\1\25\1\16\70\22\1\20\16\22\1\17\1\22\1\21\31\22",
|
||||
"\1\26\1\16\70\22\1\20\16\22\1\17\1\22\1\21\31\22",
|
||||
"\143\27",
|
||||
"\123\31\1\30\17\31",
|
||||
"\1\17\143\4",
|
||||
"\1\17\70\23\1\21\16\23\1\20\1\23\1\22\31\23",
|
||||
"",
|
||||
"\1\24\1\17\70\23\1\21\16\23\1\20\1\23\1\22\31\23",
|
||||
"\1\25\1\17\70\23\1\21\16\23\1\20\1\23\1\22\31\23",
|
||||
"\1\26\1\17\70\23\1\21\16\23\1\20\1\23\1\22\31\23",
|
||||
"\1\27\1\17\70\23\1\21\16\23\1\20\1\23\1\22\31\23",
|
||||
"\143\30",
|
||||
"\123\32\1\31\17\32",
|
||||
"\143\33",
|
||||
"\143\32",
|
||||
"\143\31",
|
||||
"\1\4\1\33\143\27",
|
||||
"\1\35\14\31\1\34\126\31",
|
||||
"\1\35\143\31",
|
||||
"\1\4\1\36\143\32",
|
||||
"\1\36\70\22\1\20\16\22\1\17\1\22\1\21\31\22",
|
||||
"\1\37\143\31",
|
||||
"\1\36\70\22\1\20\16\22\1\17\1\22\1\21\31\22",
|
||||
"\1\4\1\34\143\30",
|
||||
"\1\36\14\32\1\35\126\32",
|
||||
"\1\36\143\32",
|
||||
"\1\4\1\37\143\33",
|
||||
"\1\37\70\23\1\21\16\23\1\20\1\23\1\22\31\23",
|
||||
"\1\40\143\32",
|
||||
"\1\37\70\23\1\21\16\23\1\20\1\23\1\22\31\23",
|
||||
"",
|
||||
"\1\36\70\22\1\20\16\22\1\17\1\22\1\21\31\22"
|
||||
"\1\37\70\23\1\21\16\23\1\20\1\23\1\22\31\23"
|
||||
};
|
||||
|
||||
static final short[] DFA5_eot = DFA.unpackEncodedString(DFA5_eotS);
|
||||
|
|
|
@ -2,6 +2,7 @@ package org.antlr.v4.tool;
|
|||
|
||||
import org.antlr.v4.Tool;
|
||||
import org.antlr.v4.analysis.NFAConfig;
|
||||
import org.antlr.v4.analysis.SemanticContext;
|
||||
import org.antlr.v4.automata.*;
|
||||
import org.antlr.v4.misc.Utils;
|
||||
import org.stringtemplate.v4.ST;
|
||||
|
@ -170,7 +171,7 @@ public class DOTGenerator {
|
|||
DFAState d = work.get(0);
|
||||
if ( markedStates.contains(d) ) { work.remove(0); continue; }
|
||||
markedStates.add(d);
|
||||
|
||||
|
||||
// make a DOT edge for each transition
|
||||
for (int i = 0; i < d.getNumberOfEdges(); i++) {
|
||||
Edge edge = d.edge(i);
|
||||
|
@ -178,15 +179,16 @@ public class DOTGenerator {
|
|||
System.out.println("dfa "+s.dfa.decisionNumber+
|
||||
" edge from s"+s.stateNumber+" ["+i+"] of "+s.getNumberOfTransitions());
|
||||
*/
|
||||
String label = getEdgeLabel(edge.toString(grammar));
|
||||
ST st = stlib.getInstanceOf("edge");
|
||||
// SemanticContext preds = s.getGatedPredicatesInNFAConfigurations();
|
||||
// if ( preds!=null ) {
|
||||
// String predsStr = "";
|
||||
// predsStr = "&&{"+preds.toString()+"}?";
|
||||
// label += predsStr;
|
||||
// }
|
||||
SemanticContext preds = edge.semanticContext; //edge.target.getGatedPredicatesInNFAConfigurations();
|
||||
if ( preds!=null ) {
|
||||
String predsStr = "";
|
||||
predsStr = "&&"+preds.toString();
|
||||
label += predsStr;
|
||||
}
|
||||
|
||||
st.add("label", getEdgeLabel(edge.toString(grammar)));
|
||||
st.add("label", label);
|
||||
st.add("src", "s"+d.stateNumber);
|
||||
st.add("target", "s"+edge.target.stateNumber);
|
||||
st.add("arrowhead", arrowhead);
|
||||
|
@ -321,14 +323,12 @@ public class DOTGenerator {
|
|||
if ( s.isAcceptState ) {
|
||||
if ( s instanceof LexerState ) {
|
||||
buf.append("=>");
|
||||
for (Rule r : ((LexerState)s).matchesRules) {
|
||||
buf.append(" "+r.name);
|
||||
}
|
||||
buf.append(((LexerState)s).predictsRule.name);
|
||||
}
|
||||
else {
|
||||
buf.append("=>"+s.getUniquelyPredictedAlt());
|
||||
}
|
||||
}
|
||||
}
|
||||
if ( Tool.internalOption_ShowNFAConfigsInDFA ) {
|
||||
Set<Integer> alts = ((DFAState)s).getAltSet();
|
||||
if ( alts!=null ) {
|
||||
|
|
|
@ -226,6 +226,15 @@ public class Grammar implements AttributeResolver {
|
|||
r.index = ruleNumber++;
|
||||
}
|
||||
|
||||
// public int getNumRules() {
|
||||
// int n = rules.size();
|
||||
// List<Grammar> imports = getAllImportedGrammars();
|
||||
// if ( imports!=null ) {
|
||||
// for (Grammar g : imports) n += g.getNumRules();
|
||||
// }
|
||||
// return n;
|
||||
// }
|
||||
|
||||
public Rule getRule(String name) {
|
||||
Rule r = rules.get(name);
|
||||
if ( r!=null ) return r;
|
||||
|
|
|
@ -2,6 +2,7 @@ package org.antlr.v4.tool;
|
|||
|
||||
import org.antlr.runtime.Token;
|
||||
import org.antlr.runtime.tree.Tree;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
|
||||
public class RuleAST extends GrammarASTWithOptions {
|
||||
public RuleAST(GrammarAST node) {
|
||||
|
@ -14,4 +15,15 @@ public class RuleAST extends GrammarASTWithOptions {
|
|||
@Override
|
||||
public Tree dupNode() { return new RuleAST(this); }
|
||||
|
||||
public ActionAST getLexerAction() {
|
||||
Tree blk = getFirstChildWithType(ANTLRParser.BLOCK);
|
||||
if ( blk.getChildCount()==1 ) {
|
||||
Tree onlyAlt = blk.getChild(0);
|
||||
Tree lastChild = onlyAlt.getChild(onlyAlt.getChildCount()-1);
|
||||
if ( lastChild.getType()==ANTLRParser.ACTION ) {
|
||||
return (ActionAST)lastChild;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,15 +7,15 @@ public class TestLexerDFAConstruction extends BaseTest {
|
|||
@Test public void unicode() throws Exception {
|
||||
String g =
|
||||
"lexer grammar L;\n" +
|
||||
"A : '\\u0030'..'\\u8000'+ 'a' ;\n" + // TODO: FAILS; \\u not converted
|
||||
"A : '\\u0030'..'\\u8000'+ 'a' ;\n" +
|
||||
"B : '\\u0020' ;";
|
||||
String expecting =
|
||||
"s0-{'0'..'\\u8000'}->s1\n" +
|
||||
"s0-' '->:s2=> B\n" +
|
||||
"s1-'a'->:s3=> A\n" +
|
||||
"s0-' '->:s2=>B\n" +
|
||||
"s1-'a'->:s3=>A\n" +
|
||||
"s1-{'0'..'`', 'b'..'\\u8000'}->s1\n" +
|
||||
":s3=> A-'a'->:s3=> A\n" +
|
||||
":s3=> A-{'0'..'`', 'b'..'\\u8000'}->s1\n";
|
||||
":s3=>A-'a'->:s3=>A\n" +
|
||||
":s3=>A-{'0'..'`', 'b'..'\\u8000'}->s1\n";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
|
@ -28,14 +28,14 @@ public class TestLexerDFAConstruction extends BaseTest {
|
|||
"public fragment\n" +
|
||||
"DIGIT : '0'..'9' ;";
|
||||
String expecting =
|
||||
"s0-'i'->:s1=> ID\n" +
|
||||
"s0-{'a'..'h', 'j'..'z'}->:s2=> ID\n" +
|
||||
"s0-{'0'..'9'}->:s3=> INT\n" +
|
||||
":s1=> ID-'f'->:s4=> IF ID\n" +
|
||||
":s1=> ID-{'a'..'e', 'g'..'z'}->:s2=> ID\n" +
|
||||
":s2=> ID-{'a'..'z'}->:s2=> ID\n" +
|
||||
":s3=> INT-{'0'..'9'}->:s3=> INT\n" +
|
||||
":s4=> IF ID-{'a'..'z'}->:s2=> ID\n";
|
||||
"s0-'i'->:s1=>ID\n" +
|
||||
"s0-{'a'..'h', 'j'..'z'}->:s2=>ID\n" +
|
||||
"s0-{'0'..'9'}->:s3=>INT\n" +
|
||||
":s1=>ID-'f'->:s4=>IF\n" +
|
||||
":s1=>ID-{'a'..'e', 'g'..'z'}->:s2=>ID\n" +
|
||||
":s2=>ID-{'a'..'z'}->:s2=>ID\n" +
|
||||
":s3=>INT-{'0'..'9'}->:s3=>INT\n" +
|
||||
":s4=>IF-{'a'..'z'}->:s2=>ID\n";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
|
@ -62,11 +62,54 @@ public class TestLexerDFAConstruction extends BaseTest {
|
|||
"B : 'b' ;\n" +
|
||||
"C : 'c' ;\n";
|
||||
String expecting =
|
||||
"s0-'b'->:s1=> B\n" +
|
||||
"s0-'c'->:s2=> C\n";
|
||||
"s0-'b'->:s1=>B\n" +
|
||||
"s0-'c'->:s2=>C\n";
|
||||
checkLexerDFA(g, "FOO", expecting);
|
||||
}
|
||||
|
||||
@Test public void pred() throws Exception {
|
||||
String g =
|
||||
"lexer grammar L;\n" +
|
||||
"A : {p1}? 'a' 'b' ;\n" +
|
||||
"B : 'a' 'b' ;";
|
||||
String expecting =
|
||||
"s0-'a'->s1\n" +
|
||||
"s1-'b'->s2\n" +
|
||||
"s2-{p1}?->:s3=>A\n" +
|
||||
"s2-true->:s4=>B\n";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
@Test public void gated_pred() throws Exception {
|
||||
String g =
|
||||
"lexer grammar pred;\n" +
|
||||
"A : {p1}?=> 'a' 'b'\n" +
|
||||
" | 'a' 'c' \n" +
|
||||
" | 'b'\n" +
|
||||
" ;";
|
||||
String expecting =
|
||||
"s0-'a'->s1\n" +
|
||||
"s0-'b'->:s2=>A\n" +
|
||||
"s1-'b'&&{p1}?->:s3=>A\n" +
|
||||
"s1-'c'->:s4=>A\n";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
@Test public void gated_pred2() throws Exception {
|
||||
String g =
|
||||
"lexer grammar T;\n" +
|
||||
"A : {p1}?=> 'a' 'b'\n" +
|
||||
" | 'b'\n" +
|
||||
" ;\n" +
|
||||
"B : 'a' 'c' ;";
|
||||
String expecting =
|
||||
"s0-'a'->s1\n" +
|
||||
"s0-'b'->:s2=>A\n" +
|
||||
"s1-'b'&&{p1}?->:s3=>A\n" +
|
||||
"s1-'c'->:s4=>B\n";
|
||||
checkLexerDFA(g, expecting);
|
||||
}
|
||||
|
||||
|
||||
public void _template() throws Exception {
|
||||
String g =
|
||||
|
|
Loading…
Reference in New Issue