diff --git a/build.xml b/build.xml deleted file mode 100644 index 8f309edb7..000000000 --- a/build.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/gunit/resources/org/antlr/v4/gunit/jUnit.stg b/gunit/resources/org/antlr/v4/gunit/jUnit.stg deleted file mode 100644 index 9aeb8d874..000000000 --- a/gunit/resources/org/antlr/v4/gunit/jUnit.stg +++ /dev/null @@ -1,43 +0,0 @@ -group jUnit; - -jUnitClass(className, header, options, suites) ::= << -
- -import org.antlr.runtime.*; -import org.antlr.runtime.tree.*; -import org.junit.Test; -import org.junit.Before; -import static org.junit.Assert.*; - -public class extends org.antlr.v4.gunit.gUnitBase { - @Before public void setup() { - lexerClassName = ""; - parserClassName = ""; - - adaptorClassName = ""; - - } - -} ->> - -header(action) ::= "" - -testSuite(name,cases) ::= << -}; separator="\n\n"> !> ->> - -parserRuleTestSuccess(input,expecting) ::= << ->> - -parserRuleTestAST(ruleName,scriptLine,input,expecting) ::= << -@Test public void test_() throws Exception { - // gunit test on line - RuleReturnScope rstruct = (RuleReturnScope)execParser("", "", ); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = ""; - assertEquals("testing rule ", expecting, actual); -} ->> - -string(s) ::= "" diff --git a/gunit/src/org/antlr/v4/gunit/ASTVerifier.g b/gunit/src/org/antlr/v4/gunit/ASTVerifier.g deleted file mode 100644 index 876deeb02..000000000 --- a/gunit/src/org/antlr/v4/gunit/ASTVerifier.g +++ /dev/null @@ -1,46 +0,0 @@ -tree grammar ASTVerifier; - -options { - ASTLabelType=CommonTree; - tokenVocab = gUnit; -} - -@header { -package org.antlr.v4.gunit; -} - -gUnitDef - : ^('gunit' ID DOC_COMMENT? (optionsSpec|header)* testsuite+) - ; - -optionsSpec - : ^(OPTIONS option+) - ; - -option - : ^('=' ID ID) - | ^('=' ID STRING) - ; - -header : ^('@header' ACTION); - -testsuite - : ^(SUITE ID ID DOC_COMMENT? testcase+) - | ^(SUITE ID DOC_COMMENT? testcase+) - ; - -testcase - : ^(TEST_OK DOC_COMMENT? input) - | ^(TEST_FAIL DOC_COMMENT? input) - | ^(TEST_RETVAL DOC_COMMENT? input RETVAL) - | ^(TEST_STDOUT DOC_COMMENT? input STRING) - | ^(TEST_STDOUT DOC_COMMENT? input ML_STRING) - | ^(TEST_TREE DOC_COMMENT? input TREE) - | ^(TEST_ACTION DOC_COMMENT? input ACTION) - ; - -input - : STRING - | ML_STRING - | FILENAME - ; \ No newline at end of file diff --git a/gunit/src/org/antlr/v4/gunit/ASTVerifier.java b/gunit/src/org/antlr/v4/gunit/ASTVerifier.java deleted file mode 100644 index ef722bbc7..000000000 --- a/gunit/src/org/antlr/v4/gunit/ASTVerifier.java +++ /dev/null @@ -1,981 +0,0 @@ -// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 ASTVerifier.g 2010-01-27 17:03:31 - -package org.antlr.v4.gunit; - - -import org.antlr.runtime.*; -import org.antlr.runtime.tree.*;import java.util.Stack; -import java.util.List; -import java.util.ArrayList; - -public class ASTVerifier extends TreeParser { - public static final String[] tokenNames = new String[] { - "", "", "", "", "SUITE", "TEST_OK", "TEST_FAIL", "TEST_RETVAL", "TEST_STDOUT", "TEST_TREE", "TEST_ACTION", "DOC_COMMENT", "ID", "OPTIONS", "STRING", "ACTION", "RETVAL", "ML_STRING", "TREE", "FILENAME", "NESTED_RETVAL", "NESTED_AST", "STRING_", "WS", "ID_", "SL_COMMENT", "ML_COMMENT", "XDIGIT", "'gunit'", "';'", "'}'", "'='", "'@header'", "'walks'", "':'", "'OK'", "'FAIL'", "'returns'", "'->'" - }; - public static final int T__29=29; - public static final int T__28=28; - public static final int RETVAL=16; - public static final int TEST_TREE=9; - public static final int STRING_=22; - public static final int NESTED_AST=21; - public static final int ML_STRING=17; - public static final int TEST_FAIL=6; - public static final int ID=12; - public static final int EOF=-1; - public static final int NESTED_RETVAL=20; - public static final int TEST_RETVAL=7; - public static final int TEST_STDOUT=8; - public static final int ACTION=15; - public static final int TEST_OK=5; - public static final int ML_COMMENT=26; - public static final int T__30=30; - public static final int T__31=31; - public static final int T__32=32; - public static final int T__33=33; - public static final int WS=23; - public static final int T__34=34; - public static final int T__35=35; - public static final int T__36=36; - public static final int TREE=18; - public static final int T__37=37; - public static final int T__38=38; - public static final int FILENAME=19; - public static final int ID_=24; - public static final int XDIGIT=27; - public static final int SL_COMMENT=25; - public static final int DOC_COMMENT=11; - public static final int TEST_ACTION=10; - public static final int SUITE=4; - public static final int OPTIONS=13; - public static final int STRING=14; - - // delegates - // delegators - - - public ASTVerifier(TreeNodeStream input) { - this(input, new RecognizerSharedState()); - } - public ASTVerifier(TreeNodeStream input, RecognizerSharedState state) { - super(input, state); - - } - - - public String[] getTokenNames() { return ASTVerifier.tokenNames; } - public String getGrammarFileName() { return "ASTVerifier.g"; } - - - - // $ANTLR start "gUnitDef" - // ASTVerifier.g:12:1: gUnitDef : ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec | header )* ( testsuite )+ ) ; - public final void gUnitDef() throws RecognitionException { - try { - // ASTVerifier.g:13:2: ( ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec | header )* ( testsuite )+ ) ) - // ASTVerifier.g:13:4: ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec | header )* ( testsuite )+ ) - { - match(input,28,FOLLOW_28_in_gUnitDef39); - - match(input, Token.DOWN, null); - match(input,ID,FOLLOW_ID_in_gUnitDef41); - // ASTVerifier.g:13:17: ( DOC_COMMENT )? - int alt1=2; - int LA1_0 = input.LA(1); - - if ( (LA1_0==DOC_COMMENT) ) { - alt1=1; - } - switch (alt1) { - case 1 : - // ASTVerifier.g:13:17: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_gUnitDef43); - - } - break; - - } - - // ASTVerifier.g:13:30: ( optionsSpec | header )* - loop2: - do { - int alt2=3; - int LA2_0 = input.LA(1); - - if ( (LA2_0==OPTIONS) ) { - alt2=1; - } - else if ( (LA2_0==32) ) { - alt2=2; - } - - - switch (alt2) { - case 1 : - // ASTVerifier.g:13:31: optionsSpec - { - pushFollow(FOLLOW_optionsSpec_in_gUnitDef47); - optionsSpec(); - - state._fsp--; - - - } - break; - case 2 : - // ASTVerifier.g:13:43: header - { - pushFollow(FOLLOW_header_in_gUnitDef49); - header(); - - state._fsp--; - - - } - break; - - default : - break loop2; - } - } while (true); - - // ASTVerifier.g:13:52: ( testsuite )+ - int cnt3=0; - loop3: - do { - int alt3=2; - int LA3_0 = input.LA(1); - - if ( (LA3_0==SUITE) ) { - alt3=1; - } - - - switch (alt3) { - case 1 : - // ASTVerifier.g:13:52: testsuite - { - pushFollow(FOLLOW_testsuite_in_gUnitDef53); - testsuite(); - - state._fsp--; - - - } - break; - - default : - if ( cnt3 >= 1 ) break loop3; - EarlyExitException eee = - new EarlyExitException(3, input); - throw eee; - } - cnt3++; - } while (true); - - - match(input, Token.UP, null); - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "gUnitDef" - - - // $ANTLR start "optionsSpec" - // ASTVerifier.g:16:1: optionsSpec : ^( OPTIONS ( option )+ ) ; - public final void optionsSpec() throws RecognitionException { - try { - // ASTVerifier.g:17:2: ( ^( OPTIONS ( option )+ ) ) - // ASTVerifier.g:17:4: ^( OPTIONS ( option )+ ) - { - match(input,OPTIONS,FOLLOW_OPTIONS_in_optionsSpec67); - - match(input, Token.DOWN, null); - // ASTVerifier.g:17:14: ( option )+ - int cnt4=0; - loop4: - do { - int alt4=2; - int LA4_0 = input.LA(1); - - if ( (LA4_0==31) ) { - alt4=1; - } - - - switch (alt4) { - case 1 : - // ASTVerifier.g:17:14: option - { - pushFollow(FOLLOW_option_in_optionsSpec69); - option(); - - state._fsp--; - - - } - break; - - default : - if ( cnt4 >= 1 ) break loop4; - EarlyExitException eee = - new EarlyExitException(4, input); - throw eee; - } - cnt4++; - } while (true); - - - match(input, Token.UP, null); - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "optionsSpec" - - - // $ANTLR start "option" - // ASTVerifier.g:20:1: option : ( ^( '=' ID ID ) | ^( '=' ID STRING ) ); - public final void option() throws RecognitionException { - try { - // ASTVerifier.g:21:5: ( ^( '=' ID ID ) | ^( '=' ID STRING ) ) - int alt5=2; - int LA5_0 = input.LA(1); - - if ( (LA5_0==31) ) { - int LA5_1 = input.LA(2); - - if ( (LA5_1==DOWN) ) { - int LA5_2 = input.LA(3); - - if ( (LA5_2==ID) ) { - int LA5_3 = input.LA(4); - - if ( (LA5_3==ID) ) { - alt5=1; - } - else if ( (LA5_3==STRING) ) { - alt5=2; - } - else { - NoViableAltException nvae = - new NoViableAltException("", 5, 3, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 5, 2, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 5, 1, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 5, 0, input); - - throw nvae; - } - switch (alt5) { - case 1 : - // ASTVerifier.g:21:9: ^( '=' ID ID ) - { - match(input,31,FOLLOW_31_in_option88); - - match(input, Token.DOWN, null); - match(input,ID,FOLLOW_ID_in_option90); - match(input,ID,FOLLOW_ID_in_option92); - - match(input, Token.UP, null); - - } - break; - case 2 : - // ASTVerifier.g:22:9: ^( '=' ID STRING ) - { - match(input,31,FOLLOW_31_in_option104); - - match(input, Token.DOWN, null); - match(input,ID,FOLLOW_ID_in_option106); - match(input,STRING,FOLLOW_STRING_in_option108); - - match(input, Token.UP, null); - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "option" - - - // $ANTLR start "header" - // ASTVerifier.g:25:1: header : ^( '@header' ACTION ) ; - public final void header() throws RecognitionException { - try { - // ASTVerifier.g:25:8: ( ^( '@header' ACTION ) ) - // ASTVerifier.g:25:10: ^( '@header' ACTION ) - { - match(input,32,FOLLOW_32_in_header125); - - match(input, Token.DOWN, null); - match(input,ACTION,FOLLOW_ACTION_in_header127); - - match(input, Token.UP, null); - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "header" - - - // $ANTLR start "testsuite" - // ASTVerifier.g:27:1: testsuite : ( ^( SUITE ID ID ( DOC_COMMENT )? ( testcase )+ ) | ^( SUITE ID ( DOC_COMMENT )? ( testcase )+ ) ); - public final void testsuite() throws RecognitionException { - try { - // ASTVerifier.g:28:2: ( ^( SUITE ID ID ( DOC_COMMENT )? ( testcase )+ ) | ^( SUITE ID ( DOC_COMMENT )? ( testcase )+ ) ) - int alt10=2; - int LA10_0 = input.LA(1); - - if ( (LA10_0==SUITE) ) { - int LA10_1 = input.LA(2); - - if ( (LA10_1==DOWN) ) { - int LA10_2 = input.LA(3); - - if ( (LA10_2==ID) ) { - int LA10_3 = input.LA(4); - - if ( (LA10_3==ID) ) { - alt10=1; - } - else if ( ((LA10_3>=TEST_OK && LA10_3<=DOC_COMMENT)) ) { - alt10=2; - } - else { - NoViableAltException nvae = - new NoViableAltException("", 10, 3, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 10, 2, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 10, 1, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 10, 0, input); - - throw nvae; - } - switch (alt10) { - case 1 : - // ASTVerifier.g:28:4: ^( SUITE ID ID ( DOC_COMMENT )? ( testcase )+ ) - { - match(input,SUITE,FOLLOW_SUITE_in_testsuite138); - - match(input, Token.DOWN, null); - match(input,ID,FOLLOW_ID_in_testsuite140); - match(input,ID,FOLLOW_ID_in_testsuite142); - // ASTVerifier.g:28:18: ( DOC_COMMENT )? - int alt6=2; - int LA6_0 = input.LA(1); - - if ( (LA6_0==DOC_COMMENT) ) { - alt6=1; - } - switch (alt6) { - case 1 : - // ASTVerifier.g:28:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testsuite144); - - } - break; - - } - - // ASTVerifier.g:28:31: ( testcase )+ - int cnt7=0; - loop7: - do { - int alt7=2; - int LA7_0 = input.LA(1); - - if ( ((LA7_0>=TEST_OK && LA7_0<=TEST_ACTION)) ) { - alt7=1; - } - - - switch (alt7) { - case 1 : - // ASTVerifier.g:28:31: testcase - { - pushFollow(FOLLOW_testcase_in_testsuite147); - testcase(); - - state._fsp--; - - - } - break; - - default : - if ( cnt7 >= 1 ) break loop7; - EarlyExitException eee = - new EarlyExitException(7, input); - throw eee; - } - cnt7++; - } while (true); - - - match(input, Token.UP, null); - - } - break; - case 2 : - // ASTVerifier.g:29:4: ^( SUITE ID ( DOC_COMMENT )? ( testcase )+ ) - { - match(input,SUITE,FOLLOW_SUITE_in_testsuite155); - - match(input, Token.DOWN, null); - match(input,ID,FOLLOW_ID_in_testsuite157); - // ASTVerifier.g:29:15: ( DOC_COMMENT )? - int alt8=2; - int LA8_0 = input.LA(1); - - if ( (LA8_0==DOC_COMMENT) ) { - alt8=1; - } - switch (alt8) { - case 1 : - // ASTVerifier.g:29:15: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testsuite159); - - } - break; - - } - - // ASTVerifier.g:29:28: ( testcase )+ - int cnt9=0; - loop9: - do { - int alt9=2; - int LA9_0 = input.LA(1); - - if ( ((LA9_0>=TEST_OK && LA9_0<=TEST_ACTION)) ) { - alt9=1; - } - - - switch (alt9) { - case 1 : - // ASTVerifier.g:29:28: testcase - { - pushFollow(FOLLOW_testcase_in_testsuite162); - testcase(); - - state._fsp--; - - - } - break; - - default : - if ( cnt9 >= 1 ) break loop9; - EarlyExitException eee = - new EarlyExitException(9, input); - throw eee; - } - cnt9++; - } while (true); - - - match(input, Token.UP, null); - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "testsuite" - - - // $ANTLR start "testcase" - // ASTVerifier.g:32:1: testcase : ( ^( TEST_OK ( DOC_COMMENT )? input ) | ^( TEST_FAIL ( DOC_COMMENT )? input ) | ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) | ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) | ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) | ^( TEST_TREE ( DOC_COMMENT )? input TREE ) | ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) ); - public final void testcase() throws RecognitionException { - try { - // ASTVerifier.g:33:2: ( ^( TEST_OK ( DOC_COMMENT )? input ) | ^( TEST_FAIL ( DOC_COMMENT )? input ) | ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) | ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) | ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) | ^( TEST_TREE ( DOC_COMMENT )? input TREE ) | ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) ) - int alt18=7; - alt18 = dfa18.predict(input); - switch (alt18) { - case 1 : - // ASTVerifier.g:33:4: ^( TEST_OK ( DOC_COMMENT )? input ) - { - match(input,TEST_OK,FOLLOW_TEST_OK_in_testcase176); - - match(input, Token.DOWN, null); - // ASTVerifier.g:33:14: ( DOC_COMMENT )? - int alt11=2; - int LA11_0 = input.LA(1); - - if ( (LA11_0==DOC_COMMENT) ) { - alt11=1; - } - switch (alt11) { - case 1 : - // ASTVerifier.g:33:14: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase178); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase181); - input(); - - state._fsp--; - - - match(input, Token.UP, null); - - } - break; - case 2 : - // ASTVerifier.g:34:4: ^( TEST_FAIL ( DOC_COMMENT )? input ) - { - match(input,TEST_FAIL,FOLLOW_TEST_FAIL_in_testcase188); - - match(input, Token.DOWN, null); - // ASTVerifier.g:34:16: ( DOC_COMMENT )? - int alt12=2; - int LA12_0 = input.LA(1); - - if ( (LA12_0==DOC_COMMENT) ) { - alt12=1; - } - switch (alt12) { - case 1 : - // ASTVerifier.g:34:16: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase190); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase193); - input(); - - state._fsp--; - - - match(input, Token.UP, null); - - } - break; - case 3 : - // ASTVerifier.g:35:4: ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) - { - match(input,TEST_RETVAL,FOLLOW_TEST_RETVAL_in_testcase200); - - match(input, Token.DOWN, null); - // ASTVerifier.g:35:18: ( DOC_COMMENT )? - int alt13=2; - int LA13_0 = input.LA(1); - - if ( (LA13_0==DOC_COMMENT) ) { - alt13=1; - } - switch (alt13) { - case 1 : - // ASTVerifier.g:35:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase202); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase205); - input(); - - state._fsp--; - - match(input,RETVAL,FOLLOW_RETVAL_in_testcase207); - - match(input, Token.UP, null); - - } - break; - case 4 : - // ASTVerifier.g:36:4: ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) - { - match(input,TEST_STDOUT,FOLLOW_TEST_STDOUT_in_testcase214); - - match(input, Token.DOWN, null); - // ASTVerifier.g:36:18: ( DOC_COMMENT )? - int alt14=2; - int LA14_0 = input.LA(1); - - if ( (LA14_0==DOC_COMMENT) ) { - alt14=1; - } - switch (alt14) { - case 1 : - // ASTVerifier.g:36:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase216); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase219); - input(); - - state._fsp--; - - match(input,STRING,FOLLOW_STRING_in_testcase221); - - match(input, Token.UP, null); - - } - break; - case 5 : - // ASTVerifier.g:37:4: ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) - { - match(input,TEST_STDOUT,FOLLOW_TEST_STDOUT_in_testcase228); - - match(input, Token.DOWN, null); - // ASTVerifier.g:37:18: ( DOC_COMMENT )? - int alt15=2; - int LA15_0 = input.LA(1); - - if ( (LA15_0==DOC_COMMENT) ) { - alt15=1; - } - switch (alt15) { - case 1 : - // ASTVerifier.g:37:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase230); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase233); - input(); - - state._fsp--; - - match(input,ML_STRING,FOLLOW_ML_STRING_in_testcase235); - - match(input, Token.UP, null); - - } - break; - case 6 : - // ASTVerifier.g:38:4: ^( TEST_TREE ( DOC_COMMENT )? input TREE ) - { - match(input,TEST_TREE,FOLLOW_TEST_TREE_in_testcase242); - - match(input, Token.DOWN, null); - // ASTVerifier.g:38:16: ( DOC_COMMENT )? - int alt16=2; - int LA16_0 = input.LA(1); - - if ( (LA16_0==DOC_COMMENT) ) { - alt16=1; - } - switch (alt16) { - case 1 : - // ASTVerifier.g:38:16: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase244); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase247); - input(); - - state._fsp--; - - match(input,TREE,FOLLOW_TREE_in_testcase249); - - match(input, Token.UP, null); - - } - break; - case 7 : - // ASTVerifier.g:39:4: ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) - { - match(input,TEST_ACTION,FOLLOW_TEST_ACTION_in_testcase256); - - match(input, Token.DOWN, null); - // ASTVerifier.g:39:18: ( DOC_COMMENT )? - int alt17=2; - int LA17_0 = input.LA(1); - - if ( (LA17_0==DOC_COMMENT) ) { - alt17=1; - } - switch (alt17) { - case 1 : - // ASTVerifier.g:39:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase258); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase261); - input(); - - state._fsp--; - - match(input,ACTION,FOLLOW_ACTION_in_testcase263); - - match(input, Token.UP, null); - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "testcase" - - - // $ANTLR start "input" - // ASTVerifier.g:42:1: input : ( STRING | ML_STRING | FILENAME ); - public final void input() throws RecognitionException { - try { - // ASTVerifier.g:43:2: ( STRING | ML_STRING | FILENAME ) - // ASTVerifier.g: - { - if ( input.LA(1)==STRING||input.LA(1)==ML_STRING||input.LA(1)==FILENAME ) { - input.consume(); - state.errorRecovery=false; - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - throw mse; - } - - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "input" - - // Delegated rules - - - protected DFA18 dfa18 = new DFA18(this); - static final String DFA18_eotS = - "\14\uffff"; - static final String DFA18_eofS = - "\14\uffff"; - static final String DFA18_minS = - "\1\5\3\uffff\1\2\2\uffff\1\13\2\16\2\uffff"; - static final String DFA18_maxS = - "\1\12\3\uffff\1\2\2\uffff\2\23\1\21\2\uffff"; - static final String DFA18_acceptS = - "\1\uffff\1\1\1\2\1\3\1\uffff\1\6\1\7\3\uffff\1\4\1\5"; - static final String DFA18_specialS = - "\14\uffff}>"; - static final String[] DFA18_transitionS = { - "\1\1\1\2\1\3\1\4\1\5\1\6", - "", - "", - "", - "\1\7", - "", - "", - "\1\10\2\uffff\1\11\2\uffff\1\11\1\uffff\1\11", - "\1\11\2\uffff\1\11\1\uffff\1\11", - "\1\12\2\uffff\1\13", - "", - "" - }; - - static final short[] DFA18_eot = DFA.unpackEncodedString(DFA18_eotS); - static final short[] DFA18_eof = DFA.unpackEncodedString(DFA18_eofS); - static final char[] DFA18_min = DFA.unpackEncodedStringToUnsignedChars(DFA18_minS); - static final char[] DFA18_max = DFA.unpackEncodedStringToUnsignedChars(DFA18_maxS); - static final short[] DFA18_accept = DFA.unpackEncodedString(DFA18_acceptS); - static final short[] DFA18_special = DFA.unpackEncodedString(DFA18_specialS); - static final short[][] DFA18_transition; - - static { - int numStates = DFA18_transitionS.length; - DFA18_transition = new short[numStates][]; - for (int i=0; i words = new ArrayList(); - int i = 0; - StringBuilder word = new StringBuilder(); - while ( i0 ) { - words.add(word.toString()); - word.setLength(0); - } - words.add(String.valueOf(t.charAt(i))); - i++; - continue; - } - if ( Character.isWhitespace(t.charAt(i)) ) { - // upon WS, save word - if ( word.length()>0 ) { - words.add(word.toString()); - word.setLength(0); - } - i++; - continue; - } - - // ... "x" or ...("x" - if ( t.charAt(i)=='"' && (i-1)>=0 && - (t.charAt(i-1)=='(' || Character.isWhitespace(t.charAt(i-1))) ) - { - i++; - while ( i0 ) { - words.add(word.toString()); - } - //System.out.println("words="+words); - StringBuilder buf = new StringBuilder(); - for (int j=0; j0 && !words.get(j).equals(")") && - !words.get(j-1).equals("(") ) { - buf.append(' '); - } - buf.append(words.get(j)); - } - return buf.toString(); - } - - public static void help() { - System.err.println("org.antlr.v4.gunit.Gen [-o output-dir] gunit-file"); - } -} diff --git a/gunit/src/org/antlr/v4/gunit/Interp.java b/gunit/src/org/antlr/v4/gunit/Interp.java deleted file mode 100644 index 6cff54d29..000000000 --- a/gunit/src/org/antlr/v4/gunit/Interp.java +++ /dev/null @@ -1,21 +0,0 @@ -package org.antlr.v4.gunit; - -import org.antlr.runtime.*; -import org.antlr.runtime.tree.BufferedTreeNodeStream; -import org.antlr.runtime.tree.Tree; - -public class Interp { - public static void main(String[] args) throws Exception { - String fileName = args[0]; - ANTLRFileStream fs = new ANTLRFileStream(fileName); - gUnitLexer lexer = new gUnitLexer(fs); - CommonTokenStream tokens = new CommonTokenStream(lexer); - gUnitParser parser = new gUnitParser(tokens); - RuleReturnScope r = parser.gUnitDef(); - System.out.println(((Tree)r.getTree()).toStringTree()); - - BufferedTreeNodeStream nodes = new BufferedTreeNodeStream(r.getTree()); - ASTVerifier verifier = new ASTVerifier(nodes); - verifier.gUnitDef(); - } -} diff --git a/gunit/src/org/antlr/v4/gunit/Semantics.g b/gunit/src/org/antlr/v4/gunit/Semantics.g deleted file mode 100644 index 7dbb0d4c8..000000000 --- a/gunit/src/org/antlr/v4/gunit/Semantics.g +++ /dev/null @@ -1,36 +0,0 @@ -tree grammar Semantics; - -options { - filter=true; - ASTLabelType=CommonTree; - tokenVocab = gUnit; -} - -@header { -package org.antlr.v4.gunit; -import java.util.Map; -import java.util.HashMap; -} - -@members { - public String name; - public Map options = new HashMap(); -} - -topdown - : optionsSpec - | gUnitDef - ; - -gUnitDef - : ^('gunit' ID .*) {name = $ID.text;} - ; - -optionsSpec - : ^(OPTIONS option+) - ; - -option - : ^('=' o=ID v=ID) {options.put($o.text, $v.text);} - | ^('=' o=ID v=STRING) {options.put($o.text, $v.text);} - ; diff --git a/gunit/src/org/antlr/v4/gunit/Semantics.java b/gunit/src/org/antlr/v4/gunit/Semantics.java deleted file mode 100644 index bad3452c3..000000000 --- a/gunit/src/org/antlr/v4/gunit/Semantics.java +++ /dev/null @@ -1,379 +0,0 @@ -// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 Semantics.g 2010-01-27 17:03:31 - -package org.antlr.v4.gunit; -import java.util.Map; -import java.util.HashMap; - - -import org.antlr.runtime.*; -import org.antlr.runtime.tree.*;import java.util.Stack; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -public class Semantics extends TreeFilter { - public static final String[] tokenNames = new String[] { - "", "", "", "", "SUITE", "TEST_OK", "TEST_FAIL", "TEST_RETVAL", "TEST_STDOUT", "TEST_TREE", "TEST_ACTION", "DOC_COMMENT", "ID", "OPTIONS", "STRING", "ACTION", "RETVAL", "ML_STRING", "TREE", "FILENAME", "NESTED_RETVAL", "NESTED_AST", "STRING_", "WS", "ID_", "SL_COMMENT", "ML_COMMENT", "XDIGIT", "'gunit'", "';'", "'}'", "'='", "'@header'", "'walks'", "':'", "'OK'", "'FAIL'", "'returns'", "'->'" - }; - public static final int T__29=29; - public static final int T__28=28; - public static final int RETVAL=16; - public static final int TEST_TREE=9; - public static final int STRING_=22; - public static final int NESTED_AST=21; - public static final int ML_STRING=17; - public static final int TEST_FAIL=6; - public static final int ID=12; - public static final int EOF=-1; - public static final int NESTED_RETVAL=20; - public static final int TEST_RETVAL=7; - public static final int TEST_STDOUT=8; - public static final int ACTION=15; - public static final int TEST_OK=5; - public static final int ML_COMMENT=26; - public static final int T__30=30; - public static final int T__31=31; - public static final int T__32=32; - public static final int T__33=33; - public static final int WS=23; - public static final int T__34=34; - public static final int T__35=35; - public static final int T__36=36; - public static final int TREE=18; - public static final int T__37=37; - public static final int T__38=38; - public static final int FILENAME=19; - public static final int ID_=24; - public static final int XDIGIT=27; - public static final int SL_COMMENT=25; - public static final int DOC_COMMENT=11; - public static final int TEST_ACTION=10; - public static final int SUITE=4; - public static final int OPTIONS=13; - public static final int STRING=14; - - // delegates - // delegators - - - public Semantics(TreeNodeStream input) { - this(input, new RecognizerSharedState()); - } - public Semantics(TreeNodeStream input, RecognizerSharedState state) { - super(input, state); - - } - - - public String[] getTokenNames() { return Semantics.tokenNames; } - public String getGrammarFileName() { return "Semantics.g"; } - - - public String name; - public Map options = new HashMap(); - - - - // $ANTLR start "topdown" - // Semantics.g:20:1: topdown : ( optionsSpec | gUnitDef ); - public final void topdown() throws RecognitionException { - try { - // Semantics.g:21:2: ( optionsSpec | gUnitDef ) - int alt1=2; - int LA1_0 = input.LA(1); - - if ( (LA1_0==OPTIONS) ) { - alt1=1; - } - else if ( (LA1_0==28) ) { - alt1=2; - } - else { - if (state.backtracking>0) {state.failed=true; return ;} - NoViableAltException nvae = - new NoViableAltException("", 1, 0, input); - - throw nvae; - } - switch (alt1) { - case 1 : - // Semantics.g:21:4: optionsSpec - { - pushFollow(FOLLOW_optionsSpec_in_topdown50); - optionsSpec(); - - state._fsp--; - if (state.failed) return ; - - } - break; - case 2 : - // Semantics.g:22:4: gUnitDef - { - pushFollow(FOLLOW_gUnitDef_in_topdown55); - gUnitDef(); - - state._fsp--; - if (state.failed) return ; - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "topdown" - - - // $ANTLR start "gUnitDef" - // Semantics.g:25:1: gUnitDef : ^( 'gunit' ID ( . )* ) ; - public final void gUnitDef() throws RecognitionException { - CommonTree ID1=null; - - try { - // Semantics.g:26:2: ( ^( 'gunit' ID ( . )* ) ) - // Semantics.g:26:4: ^( 'gunit' ID ( . )* ) - { - match(input,28,FOLLOW_28_in_gUnitDef67); if (state.failed) return ; - - match(input, Token.DOWN, null); if (state.failed) return ; - ID1=(CommonTree)match(input,ID,FOLLOW_ID_in_gUnitDef69); if (state.failed) return ; - // Semantics.g:26:17: ( . )* - loop2: - do { - int alt2=2; - int LA2_0 = input.LA(1); - - if ( ((LA2_0>=SUITE && LA2_0<=38)) ) { - alt2=1; - } - else if ( (LA2_0==UP) ) { - alt2=2; - } - - - switch (alt2) { - case 1 : - // Semantics.g:26:17: . - { - matchAny(input); if (state.failed) return ; - - } - break; - - default : - break loop2; - } - } while (true); - - - match(input, Token.UP, null); if (state.failed) return ; - if ( state.backtracking==1 ) { - name = (ID1!=null?ID1.getText():null); - } - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "gUnitDef" - - - // $ANTLR start "optionsSpec" - // Semantics.g:29:1: optionsSpec : ^( OPTIONS ( option )+ ) ; - public final void optionsSpec() throws RecognitionException { - try { - // Semantics.g:30:2: ( ^( OPTIONS ( option )+ ) ) - // Semantics.g:30:4: ^( OPTIONS ( option )+ ) - { - match(input,OPTIONS,FOLLOW_OPTIONS_in_optionsSpec88); if (state.failed) return ; - - match(input, Token.DOWN, null); if (state.failed) return ; - // Semantics.g:30:14: ( option )+ - int cnt3=0; - loop3: - do { - int alt3=2; - int LA3_0 = input.LA(1); - - if ( (LA3_0==31) ) { - alt3=1; - } - - - switch (alt3) { - case 1 : - // Semantics.g:30:14: option - { - pushFollow(FOLLOW_option_in_optionsSpec90); - option(); - - state._fsp--; - if (state.failed) return ; - - } - break; - - default : - if ( cnt3 >= 1 ) break loop3; - if (state.backtracking>0) {state.failed=true; return ;} - EarlyExitException eee = - new EarlyExitException(3, input); - throw eee; - } - cnt3++; - } while (true); - - - match(input, Token.UP, null); if (state.failed) return ; - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "optionsSpec" - - - // $ANTLR start "option" - // Semantics.g:33:1: option : ( ^( '=' o= ID v= ID ) | ^( '=' o= ID v= STRING ) ); - public final void option() throws RecognitionException { - CommonTree o=null; - CommonTree v=null; - - try { - // Semantics.g:34:5: ( ^( '=' o= ID v= ID ) | ^( '=' o= ID v= STRING ) ) - int alt4=2; - int LA4_0 = input.LA(1); - - if ( (LA4_0==31) ) { - int LA4_1 = input.LA(2); - - if ( (LA4_1==DOWN) ) { - int LA4_2 = input.LA(3); - - if ( (LA4_2==ID) ) { - int LA4_3 = input.LA(4); - - if ( (LA4_3==ID) ) { - alt4=1; - } - else if ( (LA4_3==STRING) ) { - alt4=2; - } - else { - if (state.backtracking>0) {state.failed=true; return ;} - NoViableAltException nvae = - new NoViableAltException("", 4, 3, input); - - throw nvae; - } - } - else { - if (state.backtracking>0) {state.failed=true; return ;} - NoViableAltException nvae = - new NoViableAltException("", 4, 2, input); - - throw nvae; - } - } - else { - if (state.backtracking>0) {state.failed=true; return ;} - NoViableAltException nvae = - new NoViableAltException("", 4, 1, input); - - throw nvae; - } - } - else { - if (state.backtracking>0) {state.failed=true; return ;} - NoViableAltException nvae = - new NoViableAltException("", 4, 0, input); - - throw nvae; - } - switch (alt4) { - case 1 : - // Semantics.g:34:9: ^( '=' o= ID v= ID ) - { - match(input,31,FOLLOW_31_in_option109); if (state.failed) return ; - - match(input, Token.DOWN, null); if (state.failed) return ; - o=(CommonTree)match(input,ID,FOLLOW_ID_in_option113); if (state.failed) return ; - v=(CommonTree)match(input,ID,FOLLOW_ID_in_option117); if (state.failed) return ; - - match(input, Token.UP, null); if (state.failed) return ; - if ( state.backtracking==1 ) { - options.put((o!=null?o.getText():null), (v!=null?v.getText():null)); - } - - } - break; - case 2 : - // Semantics.g:35:9: ^( '=' o= ID v= STRING ) - { - match(input,31,FOLLOW_31_in_option132); if (state.failed) return ; - - match(input, Token.DOWN, null); if (state.failed) return ; - o=(CommonTree)match(input,ID,FOLLOW_ID_in_option136); if (state.failed) return ; - v=(CommonTree)match(input,STRING,FOLLOW_STRING_in_option140); if (state.failed) return ; - - match(input, Token.UP, null); if (state.failed) return ; - if ( state.backtracking==1 ) { - options.put((o!=null?o.getText():null), (v!=null?v.getText():null)); - } - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return ; - } - // $ANTLR end "option" - - // Delegated rules - - - - - public static final BitSet FOLLOW_optionsSpec_in_topdown50 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_gUnitDef_in_topdown55 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_28_in_gUnitDef67 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ID_in_gUnitDef69 = new BitSet(new long[]{0x0000007FFFFFFFF8L}); - public static final BitSet FOLLOW_OPTIONS_in_optionsSpec88 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_option_in_optionsSpec90 = new BitSet(new long[]{0x0000000080000008L}); - public static final BitSet FOLLOW_31_in_option109 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ID_in_option113 = new BitSet(new long[]{0x0000000000001000L}); - public static final BitSet FOLLOW_ID_in_option117 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_31_in_option132 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ID_in_option136 = new BitSet(new long[]{0x0000000000004000L}); - public static final BitSet FOLLOW_STRING_in_option140 = new BitSet(new long[]{0x0000000000000008L}); - -} \ No newline at end of file diff --git a/gunit/src/org/antlr/v4/gunit/gUnit.g b/gunit/src/org/antlr/v4/gunit/gUnit.g deleted file mode 100644 index 1c313788a..000000000 --- a/gunit/src/org/antlr/v4/gunit/gUnit.g +++ /dev/null @@ -1,155 +0,0 @@ -grammar gUnit; -options { - output=AST; - ASTLabelType=CommonTree; -} - -tokens { SUITE; TEST_OK; TEST_FAIL; TEST_RETVAL; TEST_STDOUT; TEST_TREE; TEST_ACTION; } - -@header { -package org.antlr.v4.gunit; -} -@lexer::header { -package org.antlr.v4.gunit; -} - -gUnitDef - : DOC_COMMENT? 'gunit' ID ';' (optionsSpec|header)* testsuite+ - -> ^('gunit' ID DOC_COMMENT? optionsSpec? header? testsuite+) - ; - -optionsSpec - : OPTIONS (option ';')+ '}' -> ^(OPTIONS option+) - ; - -option - : ID '=' optionValue -> ^('=' ID optionValue) - ; - -optionValue - : ID - | STRING - ; - -header : '@header' ACTION -> ^('@header' ACTION); - -testsuite - : DOC_COMMENT? treeRule=ID 'walks' parserRule=ID ':' testcase+ - -> ^(SUITE $treeRule $parserRule DOC_COMMENT? testcase+) - | DOC_COMMENT? ID ':' testcase+ -> ^(SUITE ID DOC_COMMENT? testcase+) - ; - -testcase - : DOC_COMMENT? input 'OK' -> ^(TEST_OK DOC_COMMENT? input) - | DOC_COMMENT? input 'FAIL' -> ^(TEST_FAIL DOC_COMMENT? input) - | DOC_COMMENT? input 'returns' RETVAL -> ^(TEST_RETVAL DOC_COMMENT? input RETVAL) - | DOC_COMMENT? input '->' STRING -> ^(TEST_STDOUT DOC_COMMENT? input STRING) - | DOC_COMMENT? input '->' ML_STRING -> ^(TEST_STDOUT DOC_COMMENT? input ML_STRING) - | DOC_COMMENT? input '->' TREE -> ^(TEST_TREE DOC_COMMENT? input TREE) - | DOC_COMMENT? input '->' ACTION -> ^(TEST_ACTION DOC_COMMENT? input ACTION) - ; - -input - : STRING - | ML_STRING - | FILENAME - ; - -ACTION - : '{' ('\\}'|'\\' ~'}'|~('\\'|'}'))* '}' {setText(getText().substring(1, getText().length()-1));} - ; - -RETVAL - : NESTED_RETVAL {setText(getText().substring(1, getText().length()-1));} - ; - -fragment -NESTED_RETVAL : - '[' - ( options {greedy=false;} - : NESTED_RETVAL - | . - )* - ']' - ; - -TREE : NESTED_AST (' '? NESTED_AST)*; - -fragment -NESTED_AST - : '(' - ( NESTED_AST - | STRING_ - | ~('('|')'|'"') - )* - ')' - ; - -OPTIONS : 'options' WS* '{' ; - -ID : ID_ ('.' ID_)* ; - -fragment -ID_ : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - ) {$channel=HIDDEN;} - ; - -SL_COMMENT - : '//' ~('\r'|'\n')* '\r'? '\n' {$channel=HIDDEN;} - ; - -DOC_COMMENT - : '/**' (options {greedy=false;}:.)* '*/' - ; - -ML_COMMENT - : '/*' ~'*' (options {greedy=false;}:.)* '*/' {$channel=HIDDEN;} - ; - -STRING : STRING_ {setText(getText().substring(1, getText().length()-1));} ; - -fragment -STRING_ - : '"' ('\\"'|'\\' ~'"'|~('\\'|'"'))+ '"' - ; - -ML_STRING - : '<<' .* '>>' {setText(getText().substring(2, getText().length()-2));} - ; - -FILENAME - : '/' ID ('/' ID)* - | ID ('/' ID)+ - ; - -/* -fragment -ESC : '\\' - ( 'n' - | 'r' - | 't' - | 'b' - | 'f' - | '"' - | '\'' - | '\\' - | '>' - | 'u' XDIGIT XDIGIT XDIGIT XDIGIT - | . // unknown, leave as it is - ) - ; -*/ - -fragment -XDIGIT : - '0' .. '9' - | 'a' .. 'f' - | 'A' .. 'F' - ; - diff --git a/gunit/src/org/antlr/v4/gunit/gUnitBase.java b/gunit/src/org/antlr/v4/gunit/gUnitBase.java deleted file mode 100644 index 8d92509d3..000000000 --- a/gunit/src/org/antlr/v4/gunit/gUnitBase.java +++ /dev/null @@ -1,49 +0,0 @@ -package org.antlr.v4.gunit; - -import org.antlr.runtime.*; -import org.antlr.runtime.tree.TreeAdaptor; - -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; - -public class gUnitBase { - public String lexerClassName; - public String parserClassName; - public String adaptorClassName; - - public Object execParser( - String ruleName, - String input, - int scriptLine) - throws Exception - { - ANTLRStringStream is = new ANTLRStringStream(input); - Class lexerClass = Class.forName(lexerClassName); - Class[] lexArgTypes = new Class[]{CharStream.class}; - Constructor lexConstructor = lexerClass.getConstructor(lexArgTypes); - Object[] lexArgs = new Object[]{is}; - TokenSource lexer = (TokenSource)lexConstructor.newInstance(lexArgs); - is.setLine(scriptLine); - - CommonTokenStream tokens = new CommonTokenStream(lexer); - - Class parserClass = Class.forName(parserClassName); - Class[] parArgTypes = new Class[]{TokenStream.class}; - Constructor parConstructor = parserClass.getConstructor(parArgTypes); - Object[] parArgs = new Object[]{tokens}; - Parser parser = (Parser)parConstructor.newInstance(parArgs); - - // set up customized tree adaptor if necessary - if ( adaptorClassName!=null ) { - parArgTypes = new Class[]{TreeAdaptor.class}; - Method m = parserClass.getMethod("setTreeAdaptor", parArgTypes); - Class adaptorClass = Class.forName(adaptorClassName); - m.invoke(parser, adaptorClass.newInstance()); - } - - Method ruleMethod = parserClass.getMethod(ruleName); - - // INVOKE RULE - return ruleMethod.invoke(parser); - } -} diff --git a/gunit/src/org/antlr/v4/gunit/gUnitLexer.java b/gunit/src/org/antlr/v4/gunit/gUnitLexer.java deleted file mode 100644 index 14babe8c2..000000000 --- a/gunit/src/org/antlr/v4/gunit/gUnitLexer.java +++ /dev/null @@ -1,1615 +0,0 @@ -// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 gUnit.g 2010-01-27 17:03:31 - -package org.antlr.v4.gunit; - - -import org.antlr.runtime.*; -import java.util.Stack; -import java.util.List; -import java.util.ArrayList; - -public class gUnitLexer extends Lexer { - public static final int T__29=29; - public static final int T__28=28; - public static final int RETVAL=16; - public static final int TEST_TREE=9; - public static final int STRING_=22; - public static final int NESTED_AST=21; - public static final int ML_STRING=17; - public static final int TEST_FAIL=6; - public static final int ID=12; - public static final int EOF=-1; - public static final int NESTED_RETVAL=20; - public static final int TEST_RETVAL=7; - public static final int TEST_STDOUT=8; - public static final int ACTION=15; - public static final int TEST_OK=5; - public static final int ML_COMMENT=26; - public static final int T__30=30; - public static final int T__31=31; - public static final int T__32=32; - public static final int T__33=33; - public static final int WS=23; - public static final int T__34=34; - public static final int T__35=35; - public static final int T__36=36; - public static final int TREE=18; - public static final int T__37=37; - public static final int T__38=38; - public static final int FILENAME=19; - public static final int ID_=24; - public static final int XDIGIT=27; - public static final int SL_COMMENT=25; - public static final int DOC_COMMENT=11; - public static final int TEST_ACTION=10; - public static final int SUITE=4; - public static final int OPTIONS=13; - public static final int STRING=14; - - // delegates - // delegators - - public gUnitLexer() {;} - public gUnitLexer(CharStream input) { - this(input, new RecognizerSharedState()); - } - public gUnitLexer(CharStream input, RecognizerSharedState state) { - super(input,state); - - } - public String getGrammarFileName() { return "gUnit.g"; } - - // $ANTLR start "T__28" - public final void mT__28() throws RecognitionException { - try { - int _type = T__28; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:7:7: ( 'gunit' ) - // gUnit.g:7:9: 'gunit' - { - match("gunit"); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__28" - - // $ANTLR start "T__29" - public final void mT__29() throws RecognitionException { - try { - int _type = T__29; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:8:7: ( ';' ) - // gUnit.g:8:9: ';' - { - match(';'); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__29" - - // $ANTLR start "T__30" - public final void mT__30() throws RecognitionException { - try { - int _type = T__30; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:9:7: ( '}' ) - // gUnit.g:9:9: '}' - { - match('}'); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__30" - - // $ANTLR start "T__31" - public final void mT__31() throws RecognitionException { - try { - int _type = T__31; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:10:7: ( '=' ) - // gUnit.g:10:9: '=' - { - match('='); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__31" - - // $ANTLR start "T__32" - public final void mT__32() throws RecognitionException { - try { - int _type = T__32; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:11:7: ( '@header' ) - // gUnit.g:11:9: '@header' - { - match("@header"); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__32" - - // $ANTLR start "T__33" - public final void mT__33() throws RecognitionException { - try { - int _type = T__33; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:12:7: ( 'walks' ) - // gUnit.g:12:9: 'walks' - { - match("walks"); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__33" - - // $ANTLR start "T__34" - public final void mT__34() throws RecognitionException { - try { - int _type = T__34; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:13:7: ( ':' ) - // gUnit.g:13:9: ':' - { - match(':'); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__34" - - // $ANTLR start "T__35" - public final void mT__35() throws RecognitionException { - try { - int _type = T__35; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:14:7: ( 'OK' ) - // gUnit.g:14:9: 'OK' - { - match("OK"); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__35" - - // $ANTLR start "T__36" - public final void mT__36() throws RecognitionException { - try { - int _type = T__36; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:15:7: ( 'FAIL' ) - // gUnit.g:15:9: 'FAIL' - { - match("FAIL"); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__36" - - // $ANTLR start "T__37" - public final void mT__37() throws RecognitionException { - try { - int _type = T__37; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:16:7: ( 'returns' ) - // gUnit.g:16:9: 'returns' - { - match("returns"); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__37" - - // $ANTLR start "T__38" - public final void mT__38() throws RecognitionException { - try { - int _type = T__38; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:17:7: ( '->' ) - // gUnit.g:17:9: '->' - { - match("->"); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "T__38" - - // $ANTLR start "ACTION" - public final void mACTION() throws RecognitionException { - try { - int _type = ACTION; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:59:2: ( '{' ( '\\\\}' | '\\\\' ~ '}' | ~ ( '\\\\' | '}' ) )* '}' ) - // gUnit.g:59:4: '{' ( '\\\\}' | '\\\\' ~ '}' | ~ ( '\\\\' | '}' ) )* '}' - { - match('{'); - // gUnit.g:59:8: ( '\\\\}' | '\\\\' ~ '}' | ~ ( '\\\\' | '}' ) )* - loop1: - do { - int alt1=4; - int LA1_0 = input.LA(1); - - if ( (LA1_0=='\\') ) { - int LA1_2 = input.LA(2); - - if ( (LA1_2=='}') ) { - alt1=1; - } - else if ( ((LA1_2>='\u0000' && LA1_2<='|')||(LA1_2>='~' && LA1_2<='\uFFFF')) ) { - alt1=2; - } - - - } - else if ( ((LA1_0>='\u0000' && LA1_0<='[')||(LA1_0>=']' && LA1_0<='|')||(LA1_0>='~' && LA1_0<='\uFFFF')) ) { - alt1=3; - } - - - switch (alt1) { - case 1 : - // gUnit.g:59:9: '\\\\}' - { - match("\\}"); - - - } - break; - case 2 : - // gUnit.g:59:15: '\\\\' ~ '}' - { - match('\\'); - if ( (input.LA(1)>='\u0000' && input.LA(1)<='|')||(input.LA(1)>='~' && input.LA(1)<='\uFFFF') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - - } - break; - case 3 : - // gUnit.g:59:25: ~ ( '\\\\' | '}' ) - { - if ( (input.LA(1)>='\u0000' && input.LA(1)<='[')||(input.LA(1)>=']' && input.LA(1)<='|')||(input.LA(1)>='~' && input.LA(1)<='\uFFFF') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - - } - break; - - default : - break loop1; - } - } while (true); - - match('}'); - setText(getText().substring(1, getText().length()-1)); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "ACTION" - - // $ANTLR start "RETVAL" - public final void mRETVAL() throws RecognitionException { - try { - int _type = RETVAL; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:63:2: ( NESTED_RETVAL ) - // gUnit.g:63:4: NESTED_RETVAL - { - mNESTED_RETVAL(); - setText(getText().substring(1, getText().length()-1)); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "RETVAL" - - // $ANTLR start "NESTED_RETVAL" - public final void mNESTED_RETVAL() throws RecognitionException { - try { - // gUnit.g:67:15: ( '[' ( options {greedy=false; } : NESTED_RETVAL | . )* ']' ) - // gUnit.g:68:2: '[' ( options {greedy=false; } : NESTED_RETVAL | . )* ']' - { - match('['); - // gUnit.g:69:2: ( options {greedy=false; } : NESTED_RETVAL | . )* - loop2: - do { - int alt2=3; - int LA2_0 = input.LA(1); - - if ( (LA2_0==']') ) { - alt2=3; - } - else if ( (LA2_0=='[') ) { - alt2=1; - } - else if ( ((LA2_0>='\u0000' && LA2_0<='Z')||LA2_0=='\\'||(LA2_0>='^' && LA2_0<='\uFFFF')) ) { - alt2=2; - } - - - switch (alt2) { - case 1 : - // gUnit.g:70:4: NESTED_RETVAL - { - mNESTED_RETVAL(); - - } - break; - case 2 : - // gUnit.g:71:4: . - { - matchAny(); - - } - break; - - default : - break loop2; - } - } while (true); - - match(']'); - - } - - } - finally { - } - } - // $ANTLR end "NESTED_RETVAL" - - // $ANTLR start "TREE" - public final void mTREE() throws RecognitionException { - try { - int _type = TREE; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:76:6: ( NESTED_AST ( ( ' ' )? NESTED_AST )* ) - // gUnit.g:76:8: NESTED_AST ( ( ' ' )? NESTED_AST )* - { - mNESTED_AST(); - // gUnit.g:76:19: ( ( ' ' )? NESTED_AST )* - loop4: - do { - int alt4=2; - int LA4_0 = input.LA(1); - - if ( (LA4_0==' '||LA4_0=='(') ) { - alt4=1; - } - - - switch (alt4) { - case 1 : - // gUnit.g:76:20: ( ' ' )? NESTED_AST - { - // gUnit.g:76:20: ( ' ' )? - int alt3=2; - int LA3_0 = input.LA(1); - - if ( (LA3_0==' ') ) { - alt3=1; - } - switch (alt3) { - case 1 : - // gUnit.g:76:20: ' ' - { - match(' '); - - } - break; - - } - - mNESTED_AST(); - - } - break; - - default : - break loop4; - } - } while (true); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "TREE" - - // $ANTLR start "NESTED_AST" - public final void mNESTED_AST() throws RecognitionException { - try { - // gUnit.g:80:2: ( '(' ( NESTED_AST | STRING_ | ~ ( '(' | ')' | '\"' ) )* ')' ) - // gUnit.g:80:4: '(' ( NESTED_AST | STRING_ | ~ ( '(' | ')' | '\"' ) )* ')' - { - match('('); - // gUnit.g:81:3: ( NESTED_AST | STRING_ | ~ ( '(' | ')' | '\"' ) )* - loop5: - do { - int alt5=4; - int LA5_0 = input.LA(1); - - if ( (LA5_0=='(') ) { - alt5=1; - } - else if ( (LA5_0=='\"') ) { - alt5=2; - } - else if ( ((LA5_0>='\u0000' && LA5_0<='!')||(LA5_0>='#' && LA5_0<='\'')||(LA5_0>='*' && LA5_0<='\uFFFF')) ) { - alt5=3; - } - - - switch (alt5) { - case 1 : - // gUnit.g:81:5: NESTED_AST - { - mNESTED_AST(); - - } - break; - case 2 : - // gUnit.g:82:7: STRING_ - { - mSTRING_(); - - } - break; - case 3 : - // gUnit.g:83:5: ~ ( '(' | ')' | '\"' ) - { - if ( (input.LA(1)>='\u0000' && input.LA(1)<='!')||(input.LA(1)>='#' && input.LA(1)<='\'')||(input.LA(1)>='*' && input.LA(1)<='\uFFFF') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - - } - break; - - default : - break loop5; - } - } while (true); - - match(')'); - - } - - } - finally { - } - } - // $ANTLR end "NESTED_AST" - - // $ANTLR start "OPTIONS" - public final void mOPTIONS() throws RecognitionException { - try { - int _type = OPTIONS; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:88:9: ( 'options' ( WS )* '{' ) - // gUnit.g:88:11: 'options' ( WS )* '{' - { - match("options"); - - // gUnit.g:88:21: ( WS )* - loop6: - do { - int alt6=2; - int LA6_0 = input.LA(1); - - if ( ((LA6_0>='\t' && LA6_0<='\n')||LA6_0=='\r'||LA6_0==' ') ) { - alt6=1; - } - - - switch (alt6) { - case 1 : - // gUnit.g:88:21: WS - { - mWS(); - - } - break; - - default : - break loop6; - } - } while (true); - - match('{'); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "OPTIONS" - - // $ANTLR start "ID" - public final void mID() throws RecognitionException { - try { - int _type = ID; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:90:4: ( ID_ ( '.' ID_ )* ) - // gUnit.g:90:6: ID_ ( '.' ID_ )* - { - mID_(); - // gUnit.g:90:10: ( '.' ID_ )* - loop7: - do { - int alt7=2; - int LA7_0 = input.LA(1); - - if ( (LA7_0=='.') ) { - alt7=1; - } - - - switch (alt7) { - case 1 : - // gUnit.g:90:11: '.' ID_ - { - match('.'); - mID_(); - - } - break; - - default : - break loop7; - } - } while (true); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "ID" - - // $ANTLR start "ID_" - public final void mID_() throws RecognitionException { - try { - // gUnit.g:93:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) - // gUnit.g:93:7: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* - { - if ( (input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - // gUnit.g:93:31: ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* - loop8: - do { - int alt8=2; - int LA8_0 = input.LA(1); - - if ( ((LA8_0>='0' && LA8_0<='9')||(LA8_0>='A' && LA8_0<='Z')||LA8_0=='_'||(LA8_0>='a' && LA8_0<='z')) ) { - alt8=1; - } - - - switch (alt8) { - case 1 : - // gUnit.g: - { - if ( (input.LA(1)>='0' && input.LA(1)<='9')||(input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - - } - break; - - default : - break loop8; - } - } while (true); - - - } - - } - finally { - } - } - // $ANTLR end "ID_" - - // $ANTLR start "WS" - public final void mWS() throws RecognitionException { - try { - int _type = WS; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:96:5: ( ( ' ' | '\\t' | '\\r' | '\\n' ) ) - // gUnit.g:96:9: ( ' ' | '\\t' | '\\r' | '\\n' ) - { - if ( (input.LA(1)>='\t' && input.LA(1)<='\n')||input.LA(1)=='\r'||input.LA(1)==' ' ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - _channel=HIDDEN; - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "WS" - - // $ANTLR start "SL_COMMENT" - public final void mSL_COMMENT() throws RecognitionException { - try { - int _type = SL_COMMENT; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:104:3: ( '//' (~ ( '\\r' | '\\n' ) )* ( '\\r' )? '\\n' ) - // gUnit.g:104:5: '//' (~ ( '\\r' | '\\n' ) )* ( '\\r' )? '\\n' - { - match("//"); - - // gUnit.g:104:10: (~ ( '\\r' | '\\n' ) )* - loop9: - do { - int alt9=2; - int LA9_0 = input.LA(1); - - if ( ((LA9_0>='\u0000' && LA9_0<='\t')||(LA9_0>='\u000B' && LA9_0<='\f')||(LA9_0>='\u000E' && LA9_0<='\uFFFF')) ) { - alt9=1; - } - - - switch (alt9) { - case 1 : - // gUnit.g:104:10: ~ ( '\\r' | '\\n' ) - { - if ( (input.LA(1)>='\u0000' && input.LA(1)<='\t')||(input.LA(1)>='\u000B' && input.LA(1)<='\f')||(input.LA(1)>='\u000E' && input.LA(1)<='\uFFFF') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - - } - break; - - default : - break loop9; - } - } while (true); - - // gUnit.g:104:24: ( '\\r' )? - int alt10=2; - int LA10_0 = input.LA(1); - - if ( (LA10_0=='\r') ) { - alt10=1; - } - switch (alt10) { - case 1 : - // gUnit.g:104:24: '\\r' - { - match('\r'); - - } - break; - - } - - match('\n'); - _channel=HIDDEN; - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "SL_COMMENT" - - // $ANTLR start "DOC_COMMENT" - public final void mDOC_COMMENT() throws RecognitionException { - try { - int _type = DOC_COMMENT; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:108:2: ( '/**' ( options {greedy=false; } : . )* '*/' ) - // gUnit.g:108:4: '/**' ( options {greedy=false; } : . )* '*/' - { - match("/**"); - - // gUnit.g:108:10: ( options {greedy=false; } : . )* - loop11: - do { - int alt11=2; - int LA11_0 = input.LA(1); - - if ( (LA11_0=='*') ) { - int LA11_1 = input.LA(2); - - if ( (LA11_1=='/') ) { - alt11=2; - } - else if ( ((LA11_1>='\u0000' && LA11_1<='.')||(LA11_1>='0' && LA11_1<='\uFFFF')) ) { - alt11=1; - } - - - } - else if ( ((LA11_0>='\u0000' && LA11_0<=')')||(LA11_0>='+' && LA11_0<='\uFFFF')) ) { - alt11=1; - } - - - switch (alt11) { - case 1 : - // gUnit.g:108:35: . - { - matchAny(); - - } - break; - - default : - break loop11; - } - } while (true); - - match("*/"); - - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "DOC_COMMENT" - - // $ANTLR start "ML_COMMENT" - public final void mML_COMMENT() throws RecognitionException { - try { - int _type = ML_COMMENT; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:112:2: ( '/*' ~ '*' ( options {greedy=false; } : . )* '*/' ) - // gUnit.g:112:4: '/*' ~ '*' ( options {greedy=false; } : . )* '*/' - { - match("/*"); - - if ( (input.LA(1)>='\u0000' && input.LA(1)<=')')||(input.LA(1)>='+' && input.LA(1)<='\uFFFF') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - // gUnit.g:112:14: ( options {greedy=false; } : . )* - loop12: - do { - int alt12=2; - int LA12_0 = input.LA(1); - - if ( (LA12_0=='*') ) { - int LA12_1 = input.LA(2); - - if ( (LA12_1=='/') ) { - alt12=2; - } - else if ( ((LA12_1>='\u0000' && LA12_1<='.')||(LA12_1>='0' && LA12_1<='\uFFFF')) ) { - alt12=1; - } - - - } - else if ( ((LA12_0>='\u0000' && LA12_0<=')')||(LA12_0>='+' && LA12_0<='\uFFFF')) ) { - alt12=1; - } - - - switch (alt12) { - case 1 : - // gUnit.g:112:39: . - { - matchAny(); - - } - break; - - default : - break loop12; - } - } while (true); - - match("*/"); - - _channel=HIDDEN; - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "ML_COMMENT" - - // $ANTLR start "STRING" - public final void mSTRING() throws RecognitionException { - try { - int _type = STRING; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:115:8: ( STRING_ ) - // gUnit.g:115:10: STRING_ - { - mSTRING_(); - setText(getText().substring(1, getText().length()-1)); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "STRING" - - // $ANTLR start "STRING_" - public final void mSTRING_() throws RecognitionException { - try { - // gUnit.g:119:2: ( '\"' ( '\\\\\"' | '\\\\' ~ '\"' | ~ ( '\\\\' | '\"' ) )+ '\"' ) - // gUnit.g:119:4: '\"' ( '\\\\\"' | '\\\\' ~ '\"' | ~ ( '\\\\' | '\"' ) )+ '\"' - { - match('\"'); - // gUnit.g:119:8: ( '\\\\\"' | '\\\\' ~ '\"' | ~ ( '\\\\' | '\"' ) )+ - int cnt13=0; - loop13: - do { - int alt13=4; - int LA13_0 = input.LA(1); - - if ( (LA13_0=='\\') ) { - int LA13_2 = input.LA(2); - - if ( (LA13_2=='\"') ) { - alt13=1; - } - else if ( ((LA13_2>='\u0000' && LA13_2<='!')||(LA13_2>='#' && LA13_2<='\uFFFF')) ) { - alt13=2; - } - - - } - else if ( ((LA13_0>='\u0000' && LA13_0<='!')||(LA13_0>='#' && LA13_0<='[')||(LA13_0>=']' && LA13_0<='\uFFFF')) ) { - alt13=3; - } - - - switch (alt13) { - case 1 : - // gUnit.g:119:9: '\\\\\"' - { - match("\\\""); - - - } - break; - case 2 : - // gUnit.g:119:15: '\\\\' ~ '\"' - { - match('\\'); - if ( (input.LA(1)>='\u0000' && input.LA(1)<='!')||(input.LA(1)>='#' && input.LA(1)<='\uFFFF') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - - } - break; - case 3 : - // gUnit.g:119:25: ~ ( '\\\\' | '\"' ) - { - if ( (input.LA(1)>='\u0000' && input.LA(1)<='!')||(input.LA(1)>='#' && input.LA(1)<='[')||(input.LA(1)>=']' && input.LA(1)<='\uFFFF') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - - } - break; - - default : - if ( cnt13 >= 1 ) break loop13; - EarlyExitException eee = - new EarlyExitException(13, input); - throw eee; - } - cnt13++; - } while (true); - - match('\"'); - - } - - } - finally { - } - } - // $ANTLR end "STRING_" - - // $ANTLR start "ML_STRING" - public final void mML_STRING() throws RecognitionException { - try { - int _type = ML_STRING; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:123:2: ( '<<' ( . )* '>>' ) - // gUnit.g:123:4: '<<' ( . )* '>>' - { - match("<<"); - - // gUnit.g:123:9: ( . )* - loop14: - do { - int alt14=2; - int LA14_0 = input.LA(1); - - if ( (LA14_0=='>') ) { - int LA14_1 = input.LA(2); - - if ( (LA14_1=='>') ) { - alt14=2; - } - else if ( ((LA14_1>='\u0000' && LA14_1<='=')||(LA14_1>='?' && LA14_1<='\uFFFF')) ) { - alt14=1; - } - - - } - else if ( ((LA14_0>='\u0000' && LA14_0<='=')||(LA14_0>='?' && LA14_0<='\uFFFF')) ) { - alt14=1; - } - - - switch (alt14) { - case 1 : - // gUnit.g:123:9: . - { - matchAny(); - - } - break; - - default : - break loop14; - } - } while (true); - - match(">>"); - - setText(getText().substring(2, getText().length()-2)); - - } - - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "ML_STRING" - - // $ANTLR start "FILENAME" - public final void mFILENAME() throws RecognitionException { - try { - int _type = FILENAME; - int _channel = DEFAULT_TOKEN_CHANNEL; - // gUnit.g:127:2: ( '/' ID ( '/' ID )* | ID ( '/' ID )+ ) - int alt17=2; - int LA17_0 = input.LA(1); - - if ( (LA17_0=='/') ) { - alt17=1; - } - else if ( ((LA17_0>='A' && LA17_0<='Z')||LA17_0=='_'||(LA17_0>='a' && LA17_0<='z')) ) { - alt17=2; - } - else { - NoViableAltException nvae = - new NoViableAltException("", 17, 0, input); - - throw nvae; - } - switch (alt17) { - case 1 : - // gUnit.g:127:4: '/' ID ( '/' ID )* - { - match('/'); - mID(); - // gUnit.g:127:11: ( '/' ID )* - loop15: - do { - int alt15=2; - int LA15_0 = input.LA(1); - - if ( (LA15_0=='/') ) { - alt15=1; - } - - - switch (alt15) { - case 1 : - // gUnit.g:127:12: '/' ID - { - match('/'); - mID(); - - } - break; - - default : - break loop15; - } - } while (true); - - - } - break; - case 2 : - // gUnit.g:128:4: ID ( '/' ID )+ - { - mID(); - // gUnit.g:128:7: ( '/' ID )+ - int cnt16=0; - loop16: - do { - int alt16=2; - int LA16_0 = input.LA(1); - - if ( (LA16_0=='/') ) { - alt16=1; - } - - - switch (alt16) { - case 1 : - // gUnit.g:128:8: '/' ID - { - match('/'); - mID(); - - } - break; - - default : - if ( cnt16 >= 1 ) break loop16; - EarlyExitException eee = - new EarlyExitException(16, input); - throw eee; - } - cnt16++; - } while (true); - - - } - break; - - } - state.type = _type; - state.channel = _channel; - } - finally { - } - } - // $ANTLR end "FILENAME" - - // $ANTLR start "XDIGIT" - public final void mXDIGIT() throws RecognitionException { - try { - // gUnit.g:150:8: ( '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' ) - // gUnit.g: - { - if ( (input.LA(1)>='0' && input.LA(1)<='9')||(input.LA(1)>='A' && input.LA(1)<='F')||(input.LA(1)>='a' && input.LA(1)<='f') ) { - input.consume(); - - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - recover(mse); - throw mse;} - - - } - - } - finally { - } - } - // $ANTLR end "XDIGIT" - - public void mTokens() throws RecognitionException { - // gUnit.g:1:8: ( T__28 | T__29 | T__30 | T__31 | T__32 | T__33 | T__34 | T__35 | T__36 | T__37 | T__38 | ACTION | RETVAL | TREE | OPTIONS | ID | WS | SL_COMMENT | DOC_COMMENT | ML_COMMENT | STRING | ML_STRING | FILENAME ) - int alt18=23; - alt18 = dfa18.predict(input); - switch (alt18) { - case 1 : - // gUnit.g:1:10: T__28 - { - mT__28(); - - } - break; - case 2 : - // gUnit.g:1:16: T__29 - { - mT__29(); - - } - break; - case 3 : - // gUnit.g:1:22: T__30 - { - mT__30(); - - } - break; - case 4 : - // gUnit.g:1:28: T__31 - { - mT__31(); - - } - break; - case 5 : - // gUnit.g:1:34: T__32 - { - mT__32(); - - } - break; - case 6 : - // gUnit.g:1:40: T__33 - { - mT__33(); - - } - break; - case 7 : - // gUnit.g:1:46: T__34 - { - mT__34(); - - } - break; - case 8 : - // gUnit.g:1:52: T__35 - { - mT__35(); - - } - break; - case 9 : - // gUnit.g:1:58: T__36 - { - mT__36(); - - } - break; - case 10 : - // gUnit.g:1:64: T__37 - { - mT__37(); - - } - break; - case 11 : - // gUnit.g:1:70: T__38 - { - mT__38(); - - } - break; - case 12 : - // gUnit.g:1:76: ACTION - { - mACTION(); - - } - break; - case 13 : - // gUnit.g:1:83: RETVAL - { - mRETVAL(); - - } - break; - case 14 : - // gUnit.g:1:90: TREE - { - mTREE(); - - } - break; - case 15 : - // gUnit.g:1:95: OPTIONS - { - mOPTIONS(); - - } - break; - case 16 : - // gUnit.g:1:103: ID - { - mID(); - - } - break; - case 17 : - // gUnit.g:1:106: WS - { - mWS(); - - } - break; - case 18 : - // gUnit.g:1:109: SL_COMMENT - { - mSL_COMMENT(); - - } - break; - case 19 : - // gUnit.g:1:120: DOC_COMMENT - { - mDOC_COMMENT(); - - } - break; - case 20 : - // gUnit.g:1:132: ML_COMMENT - { - mML_COMMENT(); - - } - break; - case 21 : - // gUnit.g:1:143: STRING - { - mSTRING(); - - } - break; - case 22 : - // gUnit.g:1:150: ML_STRING - { - mML_STRING(); - - } - break; - case 23 : - // gUnit.g:1:160: FILENAME - { - mFILENAME(); - - } - break; - - } - - } - - - protected DFA18 dfa18 = new DFA18(this); - static final String DFA18_eotS = - "\1\uffff\1\27\4\uffff\1\27\1\uffff\3\27\4\uffff\2\27\4\uffff\2\27"+ - "\3\uffff\1\27\1\44\3\27\2\uffff\3\27\1\uffff\3\27\2\uffff\3\27\1"+ - "\62\2\27\1\65\1\66\1\uffff\2\27\2\uffff\2\27\1\73\1\27\2\uffff"; - static final String DFA18_eofS = - "\75\uffff"; - static final String DFA18_minS = - "\1\11\1\56\4\uffff\1\56\1\uffff\3\56\4\uffff\2\56\1\uffff\1\52\2"+ - "\uffff\2\56\1\uffff\1\101\1\uffff\5\56\1\uffff\1\0\3\56\1\uffff"+ - "\3\56\2\uffff\10\56\1\uffff\2\56\2\uffff\3\56\1\11\2\uffff"; - static final String DFA18_maxS = - "\1\175\1\172\4\uffff\1\172\1\uffff\3\172\4\uffff\2\172\1\uffff\1"+ - "\172\2\uffff\2\172\1\uffff\1\172\1\uffff\5\172\1\uffff\1\uffff\3"+ - "\172\1\uffff\3\172\2\uffff\10\172\1\uffff\2\172\2\uffff\3\172\1"+ - "\173\2\uffff"; - static final String DFA18_acceptS = - "\2\uffff\1\2\1\3\1\4\1\5\1\uffff\1\7\3\uffff\1\13\1\14\1\15\1\16"+ - "\2\uffff\1\21\1\uffff\1\25\1\26\2\uffff\1\20\1\uffff\1\27\5\uffff"+ - "\1\22\4\uffff\1\10\3\uffff\1\23\1\24\10\uffff\1\11\2\uffff\1\1\1"+ - "\6\4\uffff\1\12\1\17"; - static final String DFA18_specialS = - "\40\uffff\1\0\34\uffff}>"; - static final String[] DFA18_transitionS = { - "\2\21\2\uffff\1\21\22\uffff\1\21\1\uffff\1\23\5\uffff\1\16\4"+ - "\uffff\1\13\1\uffff\1\22\12\uffff\1\7\1\2\1\24\1\4\2\uffff\1"+ - "\5\5\20\1\11\10\20\1\10\13\20\1\15\3\uffff\1\20\1\uffff\6\20"+ - "\1\1\7\20\1\17\2\20\1\12\4\20\1\6\3\20\1\14\1\uffff\1\3", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\24\26\1"+ - "\25\5\26", - "", - "", - "", - "", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\1\32\31"+ - "\26", - "", - "\1\30\1\31\12\26\7\uffff\12\26\1\33\17\26\4\uffff\1\26\1\uffff"+ - "\32\26", - "\1\30\1\31\12\26\7\uffff\1\34\31\26\4\uffff\1\26\1\uffff\32"+ - "\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\4\26\1"+ - "\35\25\26", - "", - "", - "", - "", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\17\26\1"+ - "\36\12\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\32\26", - "", - "\1\40\4\uffff\1\37\21\uffff\32\31\4\uffff\1\31\1\uffff\32\31", - "", - "", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\15\26\1"+ - "\41\14\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\32\26", - "", - "\32\42\4\uffff\1\42\1\uffff\32\42", - "", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\13\26\1"+ - "\43\16\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\32\26", - "\1\30\1\31\12\26\7\uffff\10\26\1\45\21\26\4\uffff\1\26\1\uffff"+ - "\32\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\23\26\1"+ - "\46\6\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\23\26\1"+ - "\47\6\26", - "", - "\52\51\1\50\uffd5\51", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\10\26\1"+ - "\52\21\26", - "\1\30\1\31\12\53\7\uffff\32\53\4\uffff\1\53\1\uffff\32\53", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\12\26\1"+ - "\54\17\26", - "", - "\1\30\1\31\12\26\7\uffff\13\26\1\55\16\26\4\uffff\1\26\1\uffff"+ - "\32\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\24\26\1"+ - "\56\5\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\10\26\1"+ - "\57\21\26", - "", - "", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\23\26\1"+ - "\60\6\26", - "\1\30\1\31\12\53\7\uffff\32\53\4\uffff\1\53\1\uffff\32\53", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\22\26\1"+ - "\61\7\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\32\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\21\26\1"+ - "\63\10\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\16\26\1"+ - "\64\13\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\32\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\32\26", - "", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\15\26\1"+ - "\67\14\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\15\26\1"+ - "\70\14\26", - "", - "", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\22\26\1"+ - "\71\7\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\22\26\1"+ - "\72\7\26", - "\1\30\1\31\12\26\7\uffff\32\26\4\uffff\1\26\1\uffff\32\26", - "\2\74\2\uffff\1\74\22\uffff\1\74\15\uffff\1\30\1\31\12\26\7"+ - "\uffff\32\26\4\uffff\1\26\1\uffff\32\26\1\74", - "", - "" - }; - - static final short[] DFA18_eot = DFA.unpackEncodedString(DFA18_eotS); - static final short[] DFA18_eof = DFA.unpackEncodedString(DFA18_eofS); - static final char[] DFA18_min = DFA.unpackEncodedStringToUnsignedChars(DFA18_minS); - static final char[] DFA18_max = DFA.unpackEncodedStringToUnsignedChars(DFA18_maxS); - static final short[] DFA18_accept = DFA.unpackEncodedString(DFA18_acceptS); - static final short[] DFA18_special = DFA.unpackEncodedString(DFA18_specialS); - static final short[][] DFA18_transition; - - static { - int numStates = DFA18_transitionS.length; - DFA18_transition = new short[numStates][]; - for (int i=0; i='\u0000' && LA18_32<=')')||(LA18_32>='+' && LA18_32<='\uFFFF')) ) {s = 41;} - - if ( s>=0 ) return s; - break; - } - NoViableAltException nvae = - new NoViableAltException(getDescription(), 18, _s, input); - error(nvae); - throw nvae; - } - } - - -} \ No newline at end of file diff --git a/gunit/src/org/antlr/v4/gunit/gUnitParser.java b/gunit/src/org/antlr/v4/gunit/gUnitParser.java deleted file mode 100644 index 2878d923c..000000000 --- a/gunit/src/org/antlr/v4/gunit/gUnitParser.java +++ /dev/null @@ -1,1782 +0,0 @@ -// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 gUnit.g 2010-01-27 17:03:30 - -package org.antlr.v4.gunit; - - -import org.antlr.runtime.*; -import java.util.Stack; -import java.util.List; -import java.util.ArrayList; - - -import org.antlr.runtime.tree.*; - -public class gUnitParser extends Parser { - public static final String[] tokenNames = new String[] { - "", "", "", "", "SUITE", "TEST_OK", "TEST_FAIL", "TEST_RETVAL", "TEST_STDOUT", "TEST_TREE", "TEST_ACTION", "DOC_COMMENT", "ID", "OPTIONS", "STRING", "ACTION", "RETVAL", "ML_STRING", "TREE", "FILENAME", "NESTED_RETVAL", "NESTED_AST", "STRING_", "WS", "ID_", "SL_COMMENT", "ML_COMMENT", "XDIGIT", "'gunit'", "';'", "'}'", "'='", "'@header'", "'walks'", "':'", "'OK'", "'FAIL'", "'returns'", "'->'" - }; - public static final int T__29=29; - public static final int RETVAL=16; - public static final int T__28=28; - public static final int TEST_TREE=9; - public static final int STRING_=22; - public static final int NESTED_AST=21; - public static final int ML_STRING=17; - public static final int TEST_FAIL=6; - public static final int ID=12; - public static final int EOF=-1; - public static final int NESTED_RETVAL=20; - public static final int TEST_RETVAL=7; - public static final int TEST_STDOUT=8; - public static final int ACTION=15; - public static final int TEST_OK=5; - public static final int ML_COMMENT=26; - public static final int T__30=30; - public static final int T__31=31; - public static final int T__32=32; - public static final int WS=23; - public static final int T__33=33; - public static final int T__34=34; - public static final int T__35=35; - public static final int TREE=18; - public static final int T__36=36; - public static final int T__37=37; - public static final int FILENAME=19; - public static final int T__38=38; - public static final int ID_=24; - public static final int XDIGIT=27; - public static final int SL_COMMENT=25; - public static final int DOC_COMMENT=11; - public static final int TEST_ACTION=10; - public static final int SUITE=4; - public static final int OPTIONS=13; - public static final int STRING=14; - - // delegates - // delegators - - - public gUnitParser(TokenStream input) { - this(input, new RecognizerSharedState()); - } - public gUnitParser(TokenStream input, RecognizerSharedState state) { - super(input, state); - - } - - protected TreeAdaptor adaptor = new CommonTreeAdaptor(); - - public void setTreeAdaptor(TreeAdaptor adaptor) { - this.adaptor = adaptor; - } - public TreeAdaptor getTreeAdaptor() { - return adaptor; - } - - public String[] getTokenNames() { return gUnitParser.tokenNames; } - public String getGrammarFileName() { return "gUnit.g"; } - - - public static class gUnitDef_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - - // $ANTLR start "gUnitDef" - // gUnit.g:16:1: gUnitDef : ( DOC_COMMENT )? 'gunit' ID ';' ( optionsSpec | header )* ( testsuite )+ -> ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec )? ( header )? ( testsuite )+ ) ; - public final gUnitParser.gUnitDef_return gUnitDef() throws RecognitionException { - gUnitParser.gUnitDef_return retval = new gUnitParser.gUnitDef_return(); - retval.start = input.LT(1); - - CommonTree root_0 = null; - - Token DOC_COMMENT1=null; - Token string_literal2=null; - Token ID3=null; - Token char_literal4=null; - gUnitParser.optionsSpec_return optionsSpec5 = null; - - gUnitParser.header_return header6 = null; - - gUnitParser.testsuite_return testsuite7 = null; - - - CommonTree DOC_COMMENT1_tree=null; - CommonTree string_literal2_tree=null; - CommonTree ID3_tree=null; - CommonTree char_literal4_tree=null; - RewriteRuleTokenStream stream_DOC_COMMENT=new RewriteRuleTokenStream(adaptor,"token DOC_COMMENT"); - RewriteRuleTokenStream stream_ID=new RewriteRuleTokenStream(adaptor,"token ID"); - RewriteRuleTokenStream stream_28=new RewriteRuleTokenStream(adaptor,"token 28"); - RewriteRuleTokenStream stream_29=new RewriteRuleTokenStream(adaptor,"token 29"); - RewriteRuleSubtreeStream stream_optionsSpec=new RewriteRuleSubtreeStream(adaptor,"rule optionsSpec"); - RewriteRuleSubtreeStream stream_testsuite=new RewriteRuleSubtreeStream(adaptor,"rule testsuite"); - RewriteRuleSubtreeStream stream_header=new RewriteRuleSubtreeStream(adaptor,"rule header"); - try { - // gUnit.g:17:2: ( ( DOC_COMMENT )? 'gunit' ID ';' ( optionsSpec | header )* ( testsuite )+ -> ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec )? ( header )? ( testsuite )+ ) ) - // gUnit.g:17:4: ( DOC_COMMENT )? 'gunit' ID ';' ( optionsSpec | header )* ( testsuite )+ - { - // gUnit.g:17:4: ( DOC_COMMENT )? - int alt1=2; - int LA1_0 = input.LA(1); - - if ( (LA1_0==DOC_COMMENT) ) { - alt1=1; - } - switch (alt1) { - case 1 : - // gUnit.g:17:4: DOC_COMMENT - { - DOC_COMMENT1=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_gUnitDef67); - stream_DOC_COMMENT.add(DOC_COMMENT1); - - - } - break; - - } - - string_literal2=(Token)match(input,28,FOLLOW_28_in_gUnitDef70); - stream_28.add(string_literal2); - - ID3=(Token)match(input,ID,FOLLOW_ID_in_gUnitDef72); - stream_ID.add(ID3); - - char_literal4=(Token)match(input,29,FOLLOW_29_in_gUnitDef74); - stream_29.add(char_literal4); - - // gUnit.g:17:32: ( optionsSpec | header )* - loop2: - do { - int alt2=3; - int LA2_0 = input.LA(1); - - if ( (LA2_0==OPTIONS) ) { - alt2=1; - } - else if ( (LA2_0==32) ) { - alt2=2; - } - - - switch (alt2) { - case 1 : - // gUnit.g:17:33: optionsSpec - { - pushFollow(FOLLOW_optionsSpec_in_gUnitDef77); - optionsSpec5=optionsSpec(); - - state._fsp--; - - stream_optionsSpec.add(optionsSpec5.getTree()); - - } - break; - case 2 : - // gUnit.g:17:45: header - { - pushFollow(FOLLOW_header_in_gUnitDef79); - header6=header(); - - state._fsp--; - - stream_header.add(header6.getTree()); - - } - break; - - default : - break loop2; - } - } while (true); - - // gUnit.g:17:54: ( testsuite )+ - int cnt3=0; - loop3: - do { - int alt3=2; - int LA3_0 = input.LA(1); - - if ( ((LA3_0>=DOC_COMMENT && LA3_0<=ID)) ) { - alt3=1; - } - - - switch (alt3) { - case 1 : - // gUnit.g:17:54: testsuite - { - pushFollow(FOLLOW_testsuite_in_gUnitDef83); - testsuite7=testsuite(); - - state._fsp--; - - stream_testsuite.add(testsuite7.getTree()); - - } - break; - - default : - if ( cnt3 >= 1 ) break loop3; - EarlyExitException eee = - new EarlyExitException(3, input); - throw eee; - } - cnt3++; - } while (true); - - - - // AST REWRITE - // elements: header, optionsSpec, 28, testsuite, DOC_COMMENT, ID - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 18:6: -> ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec )? ( header )? ( testsuite )+ ) - { - // gUnit.g:18:9: ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec )? ( header )? ( testsuite )+ ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot(stream_28.nextNode(), root_1); - - adaptor.addChild(root_1, stream_ID.nextNode()); - // gUnit.g:18:22: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - // gUnit.g:18:35: ( optionsSpec )? - if ( stream_optionsSpec.hasNext() ) { - adaptor.addChild(root_1, stream_optionsSpec.nextTree()); - - } - stream_optionsSpec.reset(); - // gUnit.g:18:48: ( header )? - if ( stream_header.hasNext() ) { - adaptor.addChild(root_1, stream_header.nextTree()); - - } - stream_header.reset(); - if ( !(stream_testsuite.hasNext()) ) { - throw new RewriteEarlyExitException(); - } - while ( stream_testsuite.hasNext() ) { - adaptor.addChild(root_1, stream_testsuite.nextTree()); - - } - stream_testsuite.reset(); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - - retval.stop = input.LT(-1); - - retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); - adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); - - } - finally { - } - return retval; - } - // $ANTLR end "gUnitDef" - - public static class optionsSpec_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - - // $ANTLR start "optionsSpec" - // gUnit.g:21:1: optionsSpec : OPTIONS ( option ';' )+ '}' -> ^( OPTIONS ( option )+ ) ; - public final gUnitParser.optionsSpec_return optionsSpec() throws RecognitionException { - gUnitParser.optionsSpec_return retval = new gUnitParser.optionsSpec_return(); - retval.start = input.LT(1); - - CommonTree root_0 = null; - - Token OPTIONS8=null; - Token char_literal10=null; - Token char_literal11=null; - gUnitParser.option_return option9 = null; - - - CommonTree OPTIONS8_tree=null; - CommonTree char_literal10_tree=null; - CommonTree char_literal11_tree=null; - RewriteRuleTokenStream stream_30=new RewriteRuleTokenStream(adaptor,"token 30"); - RewriteRuleTokenStream stream_OPTIONS=new RewriteRuleTokenStream(adaptor,"token OPTIONS"); - RewriteRuleTokenStream stream_29=new RewriteRuleTokenStream(adaptor,"token 29"); - RewriteRuleSubtreeStream stream_option=new RewriteRuleSubtreeStream(adaptor,"rule option"); - try { - // gUnit.g:22:2: ( OPTIONS ( option ';' )+ '}' -> ^( OPTIONS ( option )+ ) ) - // gUnit.g:22:4: OPTIONS ( option ';' )+ '}' - { - OPTIONS8=(Token)match(input,OPTIONS,FOLLOW_OPTIONS_in_optionsSpec120); - stream_OPTIONS.add(OPTIONS8); - - // gUnit.g:22:12: ( option ';' )+ - int cnt4=0; - loop4: - do { - int alt4=2; - int LA4_0 = input.LA(1); - - if ( (LA4_0==ID) ) { - alt4=1; - } - - - switch (alt4) { - case 1 : - // gUnit.g:22:13: option ';' - { - pushFollow(FOLLOW_option_in_optionsSpec123); - option9=option(); - - state._fsp--; - - stream_option.add(option9.getTree()); - char_literal10=(Token)match(input,29,FOLLOW_29_in_optionsSpec125); - stream_29.add(char_literal10); - - - } - break; - - default : - if ( cnt4 >= 1 ) break loop4; - EarlyExitException eee = - new EarlyExitException(4, input); - throw eee; - } - cnt4++; - } while (true); - - char_literal11=(Token)match(input,30,FOLLOW_30_in_optionsSpec129); - stream_30.add(char_literal11); - - - - // AST REWRITE - // elements: OPTIONS, option - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 22:30: -> ^( OPTIONS ( option )+ ) - { - // gUnit.g:22:33: ^( OPTIONS ( option )+ ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot(stream_OPTIONS.nextNode(), root_1); - - if ( !(stream_option.hasNext()) ) { - throw new RewriteEarlyExitException(); - } - while ( stream_option.hasNext() ) { - adaptor.addChild(root_1, stream_option.nextTree()); - - } - stream_option.reset(); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - - retval.stop = input.LT(-1); - - retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); - adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); - - } - finally { - } - return retval; - } - // $ANTLR end "optionsSpec" - - public static class option_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - - // $ANTLR start "option" - // gUnit.g:25:1: option : ID '=' optionValue -> ^( '=' ID optionValue ) ; - public final gUnitParser.option_return option() throws RecognitionException { - gUnitParser.option_return retval = new gUnitParser.option_return(); - retval.start = input.LT(1); - - CommonTree root_0 = null; - - Token ID12=null; - Token char_literal13=null; - gUnitParser.optionValue_return optionValue14 = null; - - - CommonTree ID12_tree=null; - CommonTree char_literal13_tree=null; - RewriteRuleTokenStream stream_31=new RewriteRuleTokenStream(adaptor,"token 31"); - RewriteRuleTokenStream stream_ID=new RewriteRuleTokenStream(adaptor,"token ID"); - RewriteRuleSubtreeStream stream_optionValue=new RewriteRuleSubtreeStream(adaptor,"rule optionValue"); - try { - // gUnit.g:26:5: ( ID '=' optionValue -> ^( '=' ID optionValue ) ) - // gUnit.g:26:9: ID '=' optionValue - { - ID12=(Token)match(input,ID,FOLLOW_ID_in_option154); - stream_ID.add(ID12); - - char_literal13=(Token)match(input,31,FOLLOW_31_in_option156); - stream_31.add(char_literal13); - - pushFollow(FOLLOW_optionValue_in_option158); - optionValue14=optionValue(); - - state._fsp--; - - stream_optionValue.add(optionValue14.getTree()); - - - // AST REWRITE - // elements: ID, optionValue, 31 - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 26:28: -> ^( '=' ID optionValue ) - { - // gUnit.g:26:31: ^( '=' ID optionValue ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot(stream_31.nextNode(), root_1); - - adaptor.addChild(root_1, stream_ID.nextNode()); - adaptor.addChild(root_1, stream_optionValue.nextTree()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - - retval.stop = input.LT(-1); - - retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); - adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); - - } - finally { - } - return retval; - } - // $ANTLR end "option" - - public static class optionValue_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - - // $ANTLR start "optionValue" - // gUnit.g:29:1: optionValue : ( ID | STRING ); - public final gUnitParser.optionValue_return optionValue() throws RecognitionException { - gUnitParser.optionValue_return retval = new gUnitParser.optionValue_return(); - retval.start = input.LT(1); - - CommonTree root_0 = null; - - Token set15=null; - - CommonTree set15_tree=null; - - try { - // gUnit.g:30:5: ( ID | STRING ) - // gUnit.g: - { - root_0 = (CommonTree)adaptor.nil(); - - set15=(Token)input.LT(1); - if ( input.LA(1)==ID||input.LA(1)==STRING ) { - input.consume(); - adaptor.addChild(root_0, (CommonTree)adaptor.create(set15)); - state.errorRecovery=false; - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - throw mse; - } - - - } - - retval.stop = input.LT(-1); - - retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); - adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); - - } - finally { - } - return retval; - } - // $ANTLR end "optionValue" - - public static class header_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - - // $ANTLR start "header" - // gUnit.g:34:1: header : '@header' ACTION -> ^( '@header' ACTION ) ; - public final gUnitParser.header_return header() throws RecognitionException { - gUnitParser.header_return retval = new gUnitParser.header_return(); - retval.start = input.LT(1); - - CommonTree root_0 = null; - - Token string_literal16=null; - Token ACTION17=null; - - CommonTree string_literal16_tree=null; - CommonTree ACTION17_tree=null; - RewriteRuleTokenStream stream_32=new RewriteRuleTokenStream(adaptor,"token 32"); - RewriteRuleTokenStream stream_ACTION=new RewriteRuleTokenStream(adaptor,"token ACTION"); - - try { - // gUnit.g:34:8: ( '@header' ACTION -> ^( '@header' ACTION ) ) - // gUnit.g:34:10: '@header' ACTION - { - string_literal16=(Token)match(input,32,FOLLOW_32_in_header215); - stream_32.add(string_literal16); - - ACTION17=(Token)match(input,ACTION,FOLLOW_ACTION_in_header217); - stream_ACTION.add(ACTION17); - - - - // AST REWRITE - // elements: ACTION, 32 - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 34:27: -> ^( '@header' ACTION ) - { - // gUnit.g:34:30: ^( '@header' ACTION ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot(stream_32.nextNode(), root_1); - - adaptor.addChild(root_1, stream_ACTION.nextNode()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - - retval.stop = input.LT(-1); - - retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); - adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); - - } - finally { - } - return retval; - } - // $ANTLR end "header" - - public static class testsuite_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - - // $ANTLR start "testsuite" - // gUnit.g:36:1: testsuite : ( ( DOC_COMMENT )? treeRule= ID 'walks' parserRule= ID ':' ( testcase )+ -> ^( SUITE $treeRule $parserRule ( DOC_COMMENT )? ( testcase )+ ) | ( DOC_COMMENT )? ID ':' ( testcase )+ -> ^( SUITE ID ( DOC_COMMENT )? ( testcase )+ ) ); - public final gUnitParser.testsuite_return testsuite() throws RecognitionException { - gUnitParser.testsuite_return retval = new gUnitParser.testsuite_return(); - retval.start = input.LT(1); - - CommonTree root_0 = null; - - Token treeRule=null; - Token parserRule=null; - Token DOC_COMMENT18=null; - Token string_literal19=null; - Token char_literal20=null; - Token DOC_COMMENT22=null; - Token ID23=null; - Token char_literal24=null; - gUnitParser.testcase_return testcase21 = null; - - gUnitParser.testcase_return testcase25 = null; - - - CommonTree treeRule_tree=null; - CommonTree parserRule_tree=null; - CommonTree DOC_COMMENT18_tree=null; - CommonTree string_literal19_tree=null; - CommonTree char_literal20_tree=null; - CommonTree DOC_COMMENT22_tree=null; - CommonTree ID23_tree=null; - CommonTree char_literal24_tree=null; - RewriteRuleTokenStream stream_DOC_COMMENT=new RewriteRuleTokenStream(adaptor,"token DOC_COMMENT"); - RewriteRuleTokenStream stream_ID=new RewriteRuleTokenStream(adaptor,"token ID"); - RewriteRuleTokenStream stream_33=new RewriteRuleTokenStream(adaptor,"token 33"); - RewriteRuleTokenStream stream_34=new RewriteRuleTokenStream(adaptor,"token 34"); - RewriteRuleSubtreeStream stream_testcase=new RewriteRuleSubtreeStream(adaptor,"rule testcase"); - try { - // gUnit.g:37:2: ( ( DOC_COMMENT )? treeRule= ID 'walks' parserRule= ID ':' ( testcase )+ -> ^( SUITE $treeRule $parserRule ( DOC_COMMENT )? ( testcase )+ ) | ( DOC_COMMENT )? ID ':' ( testcase )+ -> ^( SUITE ID ( DOC_COMMENT )? ( testcase )+ ) ) - int alt9=2; - int LA9_0 = input.LA(1); - - if ( (LA9_0==DOC_COMMENT) ) { - int LA9_1 = input.LA(2); - - if ( (LA9_1==ID) ) { - int LA9_2 = input.LA(3); - - if ( (LA9_2==33) ) { - alt9=1; - } - else if ( (LA9_2==34) ) { - alt9=2; - } - else { - NoViableAltException nvae = - new NoViableAltException("", 9, 2, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 9, 1, input); - - throw nvae; - } - } - else if ( (LA9_0==ID) ) { - int LA9_2 = input.LA(2); - - if ( (LA9_2==33) ) { - alt9=1; - } - else if ( (LA9_2==34) ) { - alt9=2; - } - else { - NoViableAltException nvae = - new NoViableAltException("", 9, 2, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 9, 0, input); - - throw nvae; - } - switch (alt9) { - case 1 : - // gUnit.g:37:4: ( DOC_COMMENT )? treeRule= ID 'walks' parserRule= ID ':' ( testcase )+ - { - // gUnit.g:37:4: ( DOC_COMMENT )? - int alt5=2; - int LA5_0 = input.LA(1); - - if ( (LA5_0==DOC_COMMENT) ) { - alt5=1; - } - switch (alt5) { - case 1 : - // gUnit.g:37:4: DOC_COMMENT - { - DOC_COMMENT18=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testsuite234); - stream_DOC_COMMENT.add(DOC_COMMENT18); - - - } - break; - - } - - treeRule=(Token)match(input,ID,FOLLOW_ID_in_testsuite239); - stream_ID.add(treeRule); - - string_literal19=(Token)match(input,33,FOLLOW_33_in_testsuite241); - stream_33.add(string_literal19); - - parserRule=(Token)match(input,ID,FOLLOW_ID_in_testsuite245); - stream_ID.add(parserRule); - - char_literal20=(Token)match(input,34,FOLLOW_34_in_testsuite247); - stream_34.add(char_literal20); - - // gUnit.g:37:55: ( testcase )+ - int cnt6=0; - loop6: - do { - int alt6=2; - int LA6_0 = input.LA(1); - - if ( (LA6_0==DOC_COMMENT) ) { - int LA6_2 = input.LA(2); - - if ( (LA6_2==STRING||LA6_2==ML_STRING||LA6_2==FILENAME) ) { - alt6=1; - } - - - } - else if ( (LA6_0==STRING||LA6_0==ML_STRING||LA6_0==FILENAME) ) { - alt6=1; - } - - - switch (alt6) { - case 1 : - // gUnit.g:37:55: testcase - { - pushFollow(FOLLOW_testcase_in_testsuite249); - testcase21=testcase(); - - state._fsp--; - - stream_testcase.add(testcase21.getTree()); - - } - break; - - default : - if ( cnt6 >= 1 ) break loop6; - EarlyExitException eee = - new EarlyExitException(6, input); - throw eee; - } - cnt6++; - } while (true); - - - - // AST REWRITE - // elements: treeRule, DOC_COMMENT, testcase, parserRule - // token labels: parserRule, treeRule - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleTokenStream stream_parserRule=new RewriteRuleTokenStream(adaptor,"token parserRule",parserRule); - RewriteRuleTokenStream stream_treeRule=new RewriteRuleTokenStream(adaptor,"token treeRule",treeRule); - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 38:3: -> ^( SUITE $treeRule $parserRule ( DOC_COMMENT )? ( testcase )+ ) - { - // gUnit.g:38:6: ^( SUITE $treeRule $parserRule ( DOC_COMMENT )? ( testcase )+ ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(SUITE, "SUITE"), root_1); - - adaptor.addChild(root_1, stream_treeRule.nextNode()); - adaptor.addChild(root_1, stream_parserRule.nextNode()); - // gUnit.g:38:36: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - if ( !(stream_testcase.hasNext()) ) { - throw new RewriteEarlyExitException(); - } - while ( stream_testcase.hasNext() ) { - adaptor.addChild(root_1, stream_testcase.nextTree()); - - } - stream_testcase.reset(); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - case 2 : - // gUnit.g:39:4: ( DOC_COMMENT )? ID ':' ( testcase )+ - { - // gUnit.g:39:4: ( DOC_COMMENT )? - int alt7=2; - int LA7_0 = input.LA(1); - - if ( (LA7_0==DOC_COMMENT) ) { - alt7=1; - } - switch (alt7) { - case 1 : - // gUnit.g:39:4: DOC_COMMENT - { - DOC_COMMENT22=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testsuite275); - stream_DOC_COMMENT.add(DOC_COMMENT22); - - - } - break; - - } - - ID23=(Token)match(input,ID,FOLLOW_ID_in_testsuite278); - stream_ID.add(ID23); - - char_literal24=(Token)match(input,34,FOLLOW_34_in_testsuite280); - stream_34.add(char_literal24); - - // gUnit.g:39:24: ( testcase )+ - int cnt8=0; - loop8: - do { - int alt8=2; - int LA8_0 = input.LA(1); - - if ( (LA8_0==DOC_COMMENT) ) { - int LA8_2 = input.LA(2); - - if ( (LA8_2==STRING||LA8_2==ML_STRING||LA8_2==FILENAME) ) { - alt8=1; - } - - - } - else if ( (LA8_0==STRING||LA8_0==ML_STRING||LA8_0==FILENAME) ) { - alt8=1; - } - - - switch (alt8) { - case 1 : - // gUnit.g:39:24: testcase - { - pushFollow(FOLLOW_testcase_in_testsuite282); - testcase25=testcase(); - - state._fsp--; - - stream_testcase.add(testcase25.getTree()); - - } - break; - - default : - if ( cnt8 >= 1 ) break loop8; - EarlyExitException eee = - new EarlyExitException(8, input); - throw eee; - } - cnt8++; - } while (true); - - - - // AST REWRITE - // elements: ID, DOC_COMMENT, testcase - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 39:34: -> ^( SUITE ID ( DOC_COMMENT )? ( testcase )+ ) - { - // gUnit.g:39:37: ^( SUITE ID ( DOC_COMMENT )? ( testcase )+ ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(SUITE, "SUITE"), root_1); - - adaptor.addChild(root_1, stream_ID.nextNode()); - // gUnit.g:39:48: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - if ( !(stream_testcase.hasNext()) ) { - throw new RewriteEarlyExitException(); - } - while ( stream_testcase.hasNext() ) { - adaptor.addChild(root_1, stream_testcase.nextTree()); - - } - stream_testcase.reset(); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - - } - retval.stop = input.LT(-1); - - retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); - adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); - - } - finally { - } - return retval; - } - // $ANTLR end "testsuite" - - public static class testcase_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - - // $ANTLR start "testcase" - // gUnit.g:42:1: testcase : ( ( DOC_COMMENT )? input 'OK' -> ^( TEST_OK ( DOC_COMMENT )? input ) | ( DOC_COMMENT )? input 'FAIL' -> ^( TEST_FAIL ( DOC_COMMENT )? input ) | ( DOC_COMMENT )? input 'returns' RETVAL -> ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) | ( DOC_COMMENT )? input '->' STRING -> ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) | ( DOC_COMMENT )? input '->' ML_STRING -> ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) | ( DOC_COMMENT )? input '->' TREE -> ^( TEST_TREE ( DOC_COMMENT )? input TREE ) | ( DOC_COMMENT )? input '->' ACTION -> ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) ); - public final gUnitParser.testcase_return testcase() throws RecognitionException { - gUnitParser.testcase_return retval = new gUnitParser.testcase_return(); - retval.start = input.LT(1); - - CommonTree root_0 = null; - - Token DOC_COMMENT26=null; - Token string_literal28=null; - Token DOC_COMMENT29=null; - Token string_literal31=null; - Token DOC_COMMENT32=null; - Token string_literal34=null; - Token RETVAL35=null; - Token DOC_COMMENT36=null; - Token string_literal38=null; - Token STRING39=null; - Token DOC_COMMENT40=null; - Token string_literal42=null; - Token ML_STRING43=null; - Token DOC_COMMENT44=null; - Token string_literal46=null; - Token TREE47=null; - Token DOC_COMMENT48=null; - Token string_literal50=null; - Token ACTION51=null; - gUnitParser.input_return input27 = null; - - gUnitParser.input_return input30 = null; - - gUnitParser.input_return input33 = null; - - gUnitParser.input_return input37 = null; - - gUnitParser.input_return input41 = null; - - gUnitParser.input_return input45 = null; - - gUnitParser.input_return input49 = null; - - - CommonTree DOC_COMMENT26_tree=null; - CommonTree string_literal28_tree=null; - CommonTree DOC_COMMENT29_tree=null; - CommonTree string_literal31_tree=null; - CommonTree DOC_COMMENT32_tree=null; - CommonTree string_literal34_tree=null; - CommonTree RETVAL35_tree=null; - CommonTree DOC_COMMENT36_tree=null; - CommonTree string_literal38_tree=null; - CommonTree STRING39_tree=null; - CommonTree DOC_COMMENT40_tree=null; - CommonTree string_literal42_tree=null; - CommonTree ML_STRING43_tree=null; - CommonTree DOC_COMMENT44_tree=null; - CommonTree string_literal46_tree=null; - CommonTree TREE47_tree=null; - CommonTree DOC_COMMENT48_tree=null; - CommonTree string_literal50_tree=null; - CommonTree ACTION51_tree=null; - RewriteRuleTokenStream stream_DOC_COMMENT=new RewriteRuleTokenStream(adaptor,"token DOC_COMMENT"); - RewriteRuleTokenStream stream_RETVAL=new RewriteRuleTokenStream(adaptor,"token RETVAL"); - RewriteRuleTokenStream stream_TREE=new RewriteRuleTokenStream(adaptor,"token TREE"); - RewriteRuleTokenStream stream_35=new RewriteRuleTokenStream(adaptor,"token 35"); - RewriteRuleTokenStream stream_36=new RewriteRuleTokenStream(adaptor,"token 36"); - RewriteRuleTokenStream stream_ML_STRING=new RewriteRuleTokenStream(adaptor,"token ML_STRING"); - RewriteRuleTokenStream stream_ACTION=new RewriteRuleTokenStream(adaptor,"token ACTION"); - RewriteRuleTokenStream stream_STRING=new RewriteRuleTokenStream(adaptor,"token STRING"); - RewriteRuleTokenStream stream_37=new RewriteRuleTokenStream(adaptor,"token 37"); - RewriteRuleTokenStream stream_38=new RewriteRuleTokenStream(adaptor,"token 38"); - RewriteRuleSubtreeStream stream_input=new RewriteRuleSubtreeStream(adaptor,"rule input"); - try { - // gUnit.g:43:2: ( ( DOC_COMMENT )? input 'OK' -> ^( TEST_OK ( DOC_COMMENT )? input ) | ( DOC_COMMENT )? input 'FAIL' -> ^( TEST_FAIL ( DOC_COMMENT )? input ) | ( DOC_COMMENT )? input 'returns' RETVAL -> ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) | ( DOC_COMMENT )? input '->' STRING -> ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) | ( DOC_COMMENT )? input '->' ML_STRING -> ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) | ( DOC_COMMENT )? input '->' TREE -> ^( TEST_TREE ( DOC_COMMENT )? input TREE ) | ( DOC_COMMENT )? input '->' ACTION -> ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) ) - int alt17=7; - alt17 = dfa17.predict(input); - switch (alt17) { - case 1 : - // gUnit.g:43:4: ( DOC_COMMENT )? input 'OK' - { - // gUnit.g:43:4: ( DOC_COMMENT )? - int alt10=2; - int LA10_0 = input.LA(1); - - if ( (LA10_0==DOC_COMMENT) ) { - alt10=1; - } - switch (alt10) { - case 1 : - // gUnit.g:43:4: DOC_COMMENT - { - DOC_COMMENT26=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase308); - stream_DOC_COMMENT.add(DOC_COMMENT26); - - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase311); - input27=input(); - - state._fsp--; - - stream_input.add(input27.getTree()); - string_literal28=(Token)match(input,35,FOLLOW_35_in_testcase313); - stream_35.add(string_literal28); - - - - // AST REWRITE - // elements: input, DOC_COMMENT - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 43:31: -> ^( TEST_OK ( DOC_COMMENT )? input ) - { - // gUnit.g:43:34: ^( TEST_OK ( DOC_COMMENT )? input ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(TEST_OK, "TEST_OK"), root_1); - - // gUnit.g:43:44: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - adaptor.addChild(root_1, stream_input.nextTree()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - case 2 : - // gUnit.g:44:4: ( DOC_COMMENT )? input 'FAIL' - { - // gUnit.g:44:4: ( DOC_COMMENT )? - int alt11=2; - int LA11_0 = input.LA(1); - - if ( (LA11_0==DOC_COMMENT) ) { - alt11=1; - } - switch (alt11) { - case 1 : - // gUnit.g:44:4: DOC_COMMENT - { - DOC_COMMENT29=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase332); - stream_DOC_COMMENT.add(DOC_COMMENT29); - - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase335); - input30=input(); - - state._fsp--; - - stream_input.add(input30.getTree()); - string_literal31=(Token)match(input,36,FOLLOW_36_in_testcase337); - stream_36.add(string_literal31); - - - - // AST REWRITE - // elements: input, DOC_COMMENT - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 44:33: -> ^( TEST_FAIL ( DOC_COMMENT )? input ) - { - // gUnit.g:44:36: ^( TEST_FAIL ( DOC_COMMENT )? input ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(TEST_FAIL, "TEST_FAIL"), root_1); - - // gUnit.g:44:48: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - adaptor.addChild(root_1, stream_input.nextTree()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - case 3 : - // gUnit.g:45:4: ( DOC_COMMENT )? input 'returns' RETVAL - { - // gUnit.g:45:4: ( DOC_COMMENT )? - int alt12=2; - int LA12_0 = input.LA(1); - - if ( (LA12_0==DOC_COMMENT) ) { - alt12=1; - } - switch (alt12) { - case 1 : - // gUnit.g:45:4: DOC_COMMENT - { - DOC_COMMENT32=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase356); - stream_DOC_COMMENT.add(DOC_COMMENT32); - - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase359); - input33=input(); - - state._fsp--; - - stream_input.add(input33.getTree()); - string_literal34=(Token)match(input,37,FOLLOW_37_in_testcase361); - stream_37.add(string_literal34); - - RETVAL35=(Token)match(input,RETVAL,FOLLOW_RETVAL_in_testcase363); - stream_RETVAL.add(RETVAL35); - - - - // AST REWRITE - // elements: DOC_COMMENT, RETVAL, input - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 45:40: -> ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) - { - // gUnit.g:45:43: ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(TEST_RETVAL, "TEST_RETVAL"), root_1); - - // gUnit.g:45:57: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - adaptor.addChild(root_1, stream_input.nextTree()); - adaptor.addChild(root_1, stream_RETVAL.nextNode()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - case 4 : - // gUnit.g:46:4: ( DOC_COMMENT )? input '->' STRING - { - // gUnit.g:46:4: ( DOC_COMMENT )? - int alt13=2; - int LA13_0 = input.LA(1); - - if ( (LA13_0==DOC_COMMENT) ) { - alt13=1; - } - switch (alt13) { - case 1 : - // gUnit.g:46:4: DOC_COMMENT - { - DOC_COMMENT36=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase381); - stream_DOC_COMMENT.add(DOC_COMMENT36); - - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase384); - input37=input(); - - state._fsp--; - - stream_input.add(input37.getTree()); - string_literal38=(Token)match(input,38,FOLLOW_38_in_testcase386); - stream_38.add(string_literal38); - - STRING39=(Token)match(input,STRING,FOLLOW_STRING_in_testcase388); - stream_STRING.add(STRING39); - - - - // AST REWRITE - // elements: STRING, input, DOC_COMMENT - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 46:36: -> ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) - { - // gUnit.g:46:39: ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(TEST_STDOUT, "TEST_STDOUT"), root_1); - - // gUnit.g:46:53: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - adaptor.addChild(root_1, stream_input.nextTree()); - adaptor.addChild(root_1, stream_STRING.nextNode()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - case 5 : - // gUnit.g:47:4: ( DOC_COMMENT )? input '->' ML_STRING - { - // gUnit.g:47:4: ( DOC_COMMENT )? - int alt14=2; - int LA14_0 = input.LA(1); - - if ( (LA14_0==DOC_COMMENT) ) { - alt14=1; - } - switch (alt14) { - case 1 : - // gUnit.g:47:4: DOC_COMMENT - { - DOC_COMMENT40=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase407); - stream_DOC_COMMENT.add(DOC_COMMENT40); - - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase410); - input41=input(); - - state._fsp--; - - stream_input.add(input41.getTree()); - string_literal42=(Token)match(input,38,FOLLOW_38_in_testcase412); - stream_38.add(string_literal42); - - ML_STRING43=(Token)match(input,ML_STRING,FOLLOW_ML_STRING_in_testcase414); - stream_ML_STRING.add(ML_STRING43); - - - - // AST REWRITE - // elements: input, ML_STRING, DOC_COMMENT - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 47:38: -> ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) - { - // gUnit.g:47:41: ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(TEST_STDOUT, "TEST_STDOUT"), root_1); - - // gUnit.g:47:55: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - adaptor.addChild(root_1, stream_input.nextTree()); - adaptor.addChild(root_1, stream_ML_STRING.nextNode()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - case 6 : - // gUnit.g:48:4: ( DOC_COMMENT )? input '->' TREE - { - // gUnit.g:48:4: ( DOC_COMMENT )? - int alt15=2; - int LA15_0 = input.LA(1); - - if ( (LA15_0==DOC_COMMENT) ) { - alt15=1; - } - switch (alt15) { - case 1 : - // gUnit.g:48:4: DOC_COMMENT - { - DOC_COMMENT44=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase432); - stream_DOC_COMMENT.add(DOC_COMMENT44); - - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase435); - input45=input(); - - state._fsp--; - - stream_input.add(input45.getTree()); - string_literal46=(Token)match(input,38,FOLLOW_38_in_testcase437); - stream_38.add(string_literal46); - - TREE47=(Token)match(input,TREE,FOLLOW_TREE_in_testcase439); - stream_TREE.add(TREE47); - - - - // AST REWRITE - // elements: DOC_COMMENT, input, TREE - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 48:34: -> ^( TEST_TREE ( DOC_COMMENT )? input TREE ) - { - // gUnit.g:48:37: ^( TEST_TREE ( DOC_COMMENT )? input TREE ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(TEST_TREE, "TEST_TREE"), root_1); - - // gUnit.g:48:49: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - adaptor.addChild(root_1, stream_input.nextTree()); - adaptor.addChild(root_1, stream_TREE.nextNode()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - case 7 : - // gUnit.g:49:4: ( DOC_COMMENT )? input '->' ACTION - { - // gUnit.g:49:4: ( DOC_COMMENT )? - int alt16=2; - int LA16_0 = input.LA(1); - - if ( (LA16_0==DOC_COMMENT) ) { - alt16=1; - } - switch (alt16) { - case 1 : - // gUnit.g:49:4: DOC_COMMENT - { - DOC_COMMENT48=(Token)match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase458); - stream_DOC_COMMENT.add(DOC_COMMENT48); - - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase461); - input49=input(); - - state._fsp--; - - stream_input.add(input49.getTree()); - string_literal50=(Token)match(input,38,FOLLOW_38_in_testcase463); - stream_38.add(string_literal50); - - ACTION51=(Token)match(input,ACTION,FOLLOW_ACTION_in_testcase465); - stream_ACTION.add(ACTION51); - - - - // AST REWRITE - // elements: ACTION, input, DOC_COMMENT - // token labels: - // rule labels: retval - // token list labels: - // rule list labels: - // wildcard labels: - retval.tree = root_0; - RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null); - - root_0 = (CommonTree)adaptor.nil(); - // 49:36: -> ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) - { - // gUnit.g:49:39: ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) - { - CommonTree root_1 = (CommonTree)adaptor.nil(); - root_1 = (CommonTree)adaptor.becomeRoot((CommonTree)adaptor.create(TEST_ACTION, "TEST_ACTION"), root_1); - - // gUnit.g:49:53: ( DOC_COMMENT )? - if ( stream_DOC_COMMENT.hasNext() ) { - adaptor.addChild(root_1, stream_DOC_COMMENT.nextNode()); - - } - stream_DOC_COMMENT.reset(); - adaptor.addChild(root_1, stream_input.nextTree()); - adaptor.addChild(root_1, stream_ACTION.nextNode()); - - adaptor.addChild(root_0, root_1); - } - - } - - retval.tree = root_0; - } - break; - - } - retval.stop = input.LT(-1); - - retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); - adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); - - } - finally { - } - return retval; - } - // $ANTLR end "testcase" - - public static class input_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - - // $ANTLR start "input" - // gUnit.g:52:1: input : ( STRING | ML_STRING | FILENAME ); - public final gUnitParser.input_return input() throws RecognitionException { - gUnitParser.input_return retval = new gUnitParser.input_return(); - retval.start = input.LT(1); - - CommonTree root_0 = null; - - Token set52=null; - - CommonTree set52_tree=null; - - try { - // gUnit.g:53:2: ( STRING | ML_STRING | FILENAME ) - // gUnit.g: - { - root_0 = (CommonTree)adaptor.nil(); - - set52=(Token)input.LT(1); - if ( input.LA(1)==STRING||input.LA(1)==ML_STRING||input.LA(1)==FILENAME ) { - input.consume(); - adaptor.addChild(root_0, (CommonTree)adaptor.create(set52)); - state.errorRecovery=false; - } - else { - MismatchedSetException mse = new MismatchedSetException(null,input); - throw mse; - } - - - } - - retval.stop = input.LT(-1); - - retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); - adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); - - } - finally { - } - return retval; - } - // $ANTLR end "input" - - // Delegated rules - - - protected DFA17 dfa17 = new DFA17(this); - static final String DFA17_eotS = - "\13\uffff"; - static final String DFA17_eofS = - "\13\uffff"; - static final String DFA17_minS = - "\1\13\1\16\1\43\2\uffff\1\16\5\uffff"; - static final String DFA17_maxS = - "\2\23\1\46\2\uffff\1\22\5\uffff"; - static final String DFA17_acceptS = - "\3\uffff\1\3\1\1\1\uffff\1\2\1\5\1\4\1\6\1\7"; - static final String DFA17_specialS = - "\13\uffff}>"; - static final String[] DFA17_transitionS = { - "\1\1\2\uffff\1\2\2\uffff\1\2\1\uffff\1\2", - "\1\2\2\uffff\1\2\1\uffff\1\2", - "\1\4\1\6\1\3\1\5", - "", - "", - "\1\10\1\12\1\uffff\1\7\1\11", - "", - "", - "", - "", - "" - }; - - static final short[] DFA17_eot = DFA.unpackEncodedString(DFA17_eotS); - static final short[] DFA17_eof = DFA.unpackEncodedString(DFA17_eofS); - static final char[] DFA17_min = DFA.unpackEncodedStringToUnsignedChars(DFA17_minS); - static final char[] DFA17_max = DFA.unpackEncodedStringToUnsignedChars(DFA17_maxS); - static final short[] DFA17_accept = DFA.unpackEncodedString(DFA17_acceptS); - static final short[] DFA17_special = DFA.unpackEncodedString(DFA17_specialS); - static final short[][] DFA17_transition; - - static { - int numStates = DFA17_transitionS.length; - DFA17_transition = new short[numStates][]; - for (int i=0; i ^( TEST_OK ( DOC_COMMENT )? input ) | ( DOC_COMMENT )? input 'FAIL' -> ^( TEST_FAIL ( DOC_COMMENT )? input ) | ( DOC_COMMENT )? input 'returns' RETVAL -> ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) | ( DOC_COMMENT )? input '->' STRING -> ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) | ( DOC_COMMENT )? input '->' ML_STRING -> ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) | ( DOC_COMMENT )? input '->' TREE -> ^( TEST_TREE ( DOC_COMMENT )? input TREE ) | ( DOC_COMMENT )? input '->' ACTION -> ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) );"; - } - } - - - public static final BitSet FOLLOW_DOC_COMMENT_in_gUnitDef67 = new BitSet(new long[]{0x0000000010000000L}); - public static final BitSet FOLLOW_28_in_gUnitDef70 = new BitSet(new long[]{0x0000000000001000L}); - public static final BitSet FOLLOW_ID_in_gUnitDef72 = new BitSet(new long[]{0x0000000020000000L}); - public static final BitSet FOLLOW_29_in_gUnitDef74 = new BitSet(new long[]{0x0000000100003800L}); - public static final BitSet FOLLOW_optionsSpec_in_gUnitDef77 = new BitSet(new long[]{0x0000000100003800L}); - public static final BitSet FOLLOW_header_in_gUnitDef79 = new BitSet(new long[]{0x0000000100003800L}); - public static final BitSet FOLLOW_testsuite_in_gUnitDef83 = new BitSet(new long[]{0x0000000100003802L}); - public static final BitSet FOLLOW_OPTIONS_in_optionsSpec120 = new BitSet(new long[]{0x0000000000001000L}); - public static final BitSet FOLLOW_option_in_optionsSpec123 = new BitSet(new long[]{0x0000000020000000L}); - public static final BitSet FOLLOW_29_in_optionsSpec125 = new BitSet(new long[]{0x0000000040001000L}); - public static final BitSet FOLLOW_30_in_optionsSpec129 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_ID_in_option154 = new BitSet(new long[]{0x0000000080000000L}); - public static final BitSet FOLLOW_31_in_option156 = new BitSet(new long[]{0x0000000000005000L}); - public static final BitSet FOLLOW_optionValue_in_option158 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_set_in_optionValue0 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_32_in_header215 = new BitSet(new long[]{0x0000000000008000L}); - public static final BitSet FOLLOW_ACTION_in_header217 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testsuite234 = new BitSet(new long[]{0x0000000000001000L}); - public static final BitSet FOLLOW_ID_in_testsuite239 = new BitSet(new long[]{0x0000000200000000L}); - public static final BitSet FOLLOW_33_in_testsuite241 = new BitSet(new long[]{0x0000000000001000L}); - public static final BitSet FOLLOW_ID_in_testsuite245 = new BitSet(new long[]{0x0000000400000000L}); - public static final BitSet FOLLOW_34_in_testsuite247 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_testcase_in_testsuite249 = new BitSet(new long[]{0x00000000000A4802L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testsuite275 = new BitSet(new long[]{0x0000000000001000L}); - public static final BitSet FOLLOW_ID_in_testsuite278 = new BitSet(new long[]{0x0000000400000000L}); - public static final BitSet FOLLOW_34_in_testsuite280 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_testcase_in_testsuite282 = new BitSet(new long[]{0x00000000000A4802L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase308 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_input_in_testcase311 = new BitSet(new long[]{0x0000000800000000L}); - public static final BitSet FOLLOW_35_in_testcase313 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase332 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_input_in_testcase335 = new BitSet(new long[]{0x0000001000000000L}); - public static final BitSet FOLLOW_36_in_testcase337 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase356 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_input_in_testcase359 = new BitSet(new long[]{0x0000002000000000L}); - public static final BitSet FOLLOW_37_in_testcase361 = new BitSet(new long[]{0x0000000000010000L}); - public static final BitSet FOLLOW_RETVAL_in_testcase363 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase381 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_input_in_testcase384 = new BitSet(new long[]{0x0000004000000000L}); - public static final BitSet FOLLOW_38_in_testcase386 = new BitSet(new long[]{0x0000000000004000L}); - public static final BitSet FOLLOW_STRING_in_testcase388 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase407 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_input_in_testcase410 = new BitSet(new long[]{0x0000004000000000L}); - public static final BitSet FOLLOW_38_in_testcase412 = new BitSet(new long[]{0x0000000000020000L}); - public static final BitSet FOLLOW_ML_STRING_in_testcase414 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase432 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_input_in_testcase435 = new BitSet(new long[]{0x0000004000000000L}); - public static final BitSet FOLLOW_38_in_testcase437 = new BitSet(new long[]{0x0000000000040000L}); - public static final BitSet FOLLOW_TREE_in_testcase439 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase458 = new BitSet(new long[]{0x00000000000A4800L}); - public static final BitSet FOLLOW_input_in_testcase461 = new BitSet(new long[]{0x0000004000000000L}); - public static final BitSet FOLLOW_38_in_testcase463 = new BitSet(new long[]{0x0000000000008000L}); - public static final BitSet FOLLOW_ACTION_in_testcase465 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_set_in_input0 = new BitSet(new long[]{0x0000000000000002L}); - -} \ No newline at end of file diff --git a/gunit/src/org/antlr/v4/gunit/jUnitGen.g b/gunit/src/org/antlr/v4/gunit/jUnitGen.g deleted file mode 100644 index adb40d0a0..000000000 --- a/gunit/src/org/antlr/v4/gunit/jUnitGen.g +++ /dev/null @@ -1,53 +0,0 @@ -tree grammar jUnitGen; - -options { - output=template; - ASTLabelType=CommonTree; - tokenVocab = gUnit; -} - -@header { -package org.antlr.v4.gunit; -} - -gUnitDef - : ^('gunit' ID DOC_COMMENT? (optionsSpec|header)* suites+=testsuite+) - -> jUnitClass(className={$ID.text}, header={$header.st}, suites={$suites}) - ; - -optionsSpec - : ^(OPTIONS option+) - ; - -option - : ^('=' ID ID) - | ^('=' ID STRING) - ; - -header : ^('@header' ACTION) -> header(action={$ACTION.text}); - -testsuite - : ^(SUITE rule=ID ID DOC_COMMENT? cases+=testcase[$rule.text]+) - | ^(SUITE rule=ID DOC_COMMENT? cases+=testcase[$rule.text]+) - -> testSuite(name={$rule.text}, cases={$cases}) - ; - -testcase[String ruleName] - : ^(TEST_OK DOC_COMMENT? input) - | ^(TEST_FAIL DOC_COMMENT? input) - | ^(TEST_RETVAL DOC_COMMENT? input RETVAL) - | ^(TEST_STDOUT DOC_COMMENT? input STRING) - | ^(TEST_STDOUT DOC_COMMENT? input ML_STRING) - | ^(TEST_TREE DOC_COMMENT? input TREE) - -> parserRuleTestAST(ruleName={$ruleName}, - input={$input.st}, - expecting={Gen.normalizeTreeSpec($TREE.text)}, - scriptLine={$input.start.getLine()}) - | ^(TEST_ACTION DOC_COMMENT? input ACTION) - ; - -input - : STRING -> string(s={Gen.escapeForJava($STRING.text)}) - | ML_STRING -> string(s={Gen.escapeForJava($ML_STRING.text)}) - | FILENAME - ; \ No newline at end of file diff --git a/gunit/src/org/antlr/v4/gunit/jUnitGen.java b/gunit/src/org/antlr/v4/gunit/jUnitGen.java deleted file mode 100644 index 89f4539ad..000000000 --- a/gunit/src/org/antlr/v4/gunit/jUnitGen.java +++ /dev/null @@ -1,1180 +0,0 @@ -// $ANTLR 3.2.1-SNAPSHOT Jan 26, 2010 15:12:28 jUnitGen.g 2010-01-27 17:03:31 - -package org.antlr.v4.gunit; - - -import org.antlr.runtime.*; -import org.antlr.runtime.tree.*;import java.util.Stack; -import java.util.List; -import java.util.ArrayList; - -import org.antlr.stringtemplate.*; -import org.antlr.stringtemplate.language.*; -import java.util.HashMap; -public class jUnitGen extends TreeParser { - public static final String[] tokenNames = new String[] { - "", "", "", "", "SUITE", "TEST_OK", "TEST_FAIL", "TEST_RETVAL", "TEST_STDOUT", "TEST_TREE", "TEST_ACTION", "DOC_COMMENT", "ID", "OPTIONS", "STRING", "ACTION", "RETVAL", "ML_STRING", "TREE", "FILENAME", "NESTED_RETVAL", "NESTED_AST", "STRING_", "WS", "ID_", "SL_COMMENT", "ML_COMMENT", "XDIGIT", "'gunit'", "';'", "'}'", "'='", "'@header'", "'walks'", "':'", "'OK'", "'FAIL'", "'returns'", "'->'" - }; - public static final int T__29=29; - public static final int T__28=28; - public static final int RETVAL=16; - public static final int TEST_TREE=9; - public static final int STRING_=22; - public static final int NESTED_AST=21; - public static final int ML_STRING=17; - public static final int TEST_FAIL=6; - public static final int ID=12; - public static final int EOF=-1; - public static final int NESTED_RETVAL=20; - public static final int TEST_RETVAL=7; - public static final int TEST_STDOUT=8; - public static final int ACTION=15; - public static final int TEST_OK=5; - public static final int ML_COMMENT=26; - public static final int T__30=30; - public static final int T__31=31; - public static final int T__32=32; - public static final int T__33=33; - public static final int WS=23; - public static final int T__34=34; - public static final int T__35=35; - public static final int T__36=36; - public static final int TREE=18; - public static final int T__37=37; - public static final int T__38=38; - public static final int FILENAME=19; - public static final int ID_=24; - public static final int XDIGIT=27; - public static final int SL_COMMENT=25; - public static final int DOC_COMMENT=11; - public static final int TEST_ACTION=10; - public static final int SUITE=4; - public static final int OPTIONS=13; - public static final int STRING=14; - - // delegates - // delegators - - - public jUnitGen(TreeNodeStream input) { - this(input, new RecognizerSharedState()); - } - public jUnitGen(TreeNodeStream input, RecognizerSharedState state) { - super(input, state); - - } - - protected StringTemplateGroup templateLib = - new StringTemplateGroup("jUnitGenTemplates", AngleBracketTemplateLexer.class); - - public void setTemplateLib(StringTemplateGroup templateLib) { - this.templateLib = templateLib; - } - public StringTemplateGroup getTemplateLib() { - return templateLib; - } - /** allows convenient multi-value initialization: - * "new STAttrMap().put(...).put(...)" - */ - public static class STAttrMap extends HashMap { - public STAttrMap put(String attrName, Object value) { - super.put(attrName, value); - return this; - } - public STAttrMap put(String attrName, int value) { - super.put(attrName, new Integer(value)); - return this; - } - } - - public String[] getTokenNames() { return jUnitGen.tokenNames; } - public String getGrammarFileName() { return "jUnitGen.g"; } - - - public static class gUnitDef_return extends TreeRuleReturnScope { - public StringTemplate st; - public Object getTemplate() { return st; } - public String toString() { return st==null?null:st.toString(); } - }; - - // $ANTLR start "gUnitDef" - // jUnitGen.g:13:1: gUnitDef : ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec | header )* (suites+= testsuite )+ ) -> jUnitClass(className=$ID.textheader=$header.stsuites=$suites); - public final jUnitGen.gUnitDef_return gUnitDef() throws RecognitionException { - jUnitGen.gUnitDef_return retval = new jUnitGen.gUnitDef_return(); - retval.start = input.LT(1); - - CommonTree ID1=null; - List list_suites=null; - jUnitGen.header_return header2 = null; - - RuleReturnScope suites = null; - try { - // jUnitGen.g:14:2: ( ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec | header )* (suites+= testsuite )+ ) -> jUnitClass(className=$ID.textheader=$header.stsuites=$suites)) - // jUnitGen.g:14:4: ^( 'gunit' ID ( DOC_COMMENT )? ( optionsSpec | header )* (suites+= testsuite )+ ) - { - match(input,28,FOLLOW_28_in_gUnitDef45); - - match(input, Token.DOWN, null); - ID1=(CommonTree)match(input,ID,FOLLOW_ID_in_gUnitDef47); - // jUnitGen.g:14:17: ( DOC_COMMENT )? - int alt1=2; - int LA1_0 = input.LA(1); - - if ( (LA1_0==DOC_COMMENT) ) { - alt1=1; - } - switch (alt1) { - case 1 : - // jUnitGen.g:14:17: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_gUnitDef49); - - } - break; - - } - - // jUnitGen.g:14:30: ( optionsSpec | header )* - loop2: - do { - int alt2=3; - int LA2_0 = input.LA(1); - - if ( (LA2_0==OPTIONS) ) { - alt2=1; - } - else if ( (LA2_0==32) ) { - alt2=2; - } - - - switch (alt2) { - case 1 : - // jUnitGen.g:14:31: optionsSpec - { - pushFollow(FOLLOW_optionsSpec_in_gUnitDef53); - optionsSpec(); - - state._fsp--; - - - } - break; - case 2 : - // jUnitGen.g:14:43: header - { - pushFollow(FOLLOW_header_in_gUnitDef55); - header2=header(); - - state._fsp--; - - - } - break; - - default : - break loop2; - } - } while (true); - - // jUnitGen.g:14:58: (suites+= testsuite )+ - int cnt3=0; - loop3: - do { - int alt3=2; - int LA3_0 = input.LA(1); - - if ( (LA3_0==SUITE) ) { - alt3=1; - } - - - switch (alt3) { - case 1 : - // jUnitGen.g:14:58: suites+= testsuite - { - pushFollow(FOLLOW_testsuite_in_gUnitDef61); - suites=testsuite(); - - state._fsp--; - - if (list_suites==null) list_suites=new ArrayList(); - list_suites.add(suites.getTemplate()); - - - } - break; - - default : - if ( cnt3 >= 1 ) break loop3; - EarlyExitException eee = - new EarlyExitException(3, input); - throw eee; - } - cnt3++; - } while (true); - - - match(input, Token.UP, null); - - - // TEMPLATE REWRITE - // 15:3: -> jUnitClass(className=$ID.textheader=$header.stsuites=$suites) - { - retval.st = templateLib.getInstanceOf("jUnitClass", - new STAttrMap().put("className", (ID1!=null?ID1.getText():null)).put("header", (header2!=null?header2.st:null)).put("suites", list_suites)); - } - - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return retval; - } - // $ANTLR end "gUnitDef" - - public static class optionsSpec_return extends TreeRuleReturnScope { - public StringTemplate st; - public Object getTemplate() { return st; } - public String toString() { return st==null?null:st.toString(); } - }; - - // $ANTLR start "optionsSpec" - // jUnitGen.g:18:1: optionsSpec : ^( OPTIONS ( option )+ ) ; - public final jUnitGen.optionsSpec_return optionsSpec() throws RecognitionException { - jUnitGen.optionsSpec_return retval = new jUnitGen.optionsSpec_return(); - retval.start = input.LT(1); - - try { - // jUnitGen.g:19:2: ( ^( OPTIONS ( option )+ ) ) - // jUnitGen.g:19:4: ^( OPTIONS ( option )+ ) - { - match(input,OPTIONS,FOLLOW_OPTIONS_in_optionsSpec96); - - match(input, Token.DOWN, null); - // jUnitGen.g:19:14: ( option )+ - int cnt4=0; - loop4: - do { - int alt4=2; - int LA4_0 = input.LA(1); - - if ( (LA4_0==31) ) { - alt4=1; - } - - - switch (alt4) { - case 1 : - // jUnitGen.g:19:14: option - { - pushFollow(FOLLOW_option_in_optionsSpec98); - option(); - - state._fsp--; - - - } - break; - - default : - if ( cnt4 >= 1 ) break loop4; - EarlyExitException eee = - new EarlyExitException(4, input); - throw eee; - } - cnt4++; - } while (true); - - - match(input, Token.UP, null); - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return retval; - } - // $ANTLR end "optionsSpec" - - public static class option_return extends TreeRuleReturnScope { - public StringTemplate st; - public Object getTemplate() { return st; } - public String toString() { return st==null?null:st.toString(); } - }; - - // $ANTLR start "option" - // jUnitGen.g:22:1: option : ( ^( '=' ID ID ) | ^( '=' ID STRING ) ); - public final jUnitGen.option_return option() throws RecognitionException { - jUnitGen.option_return retval = new jUnitGen.option_return(); - retval.start = input.LT(1); - - try { - // jUnitGen.g:23:5: ( ^( '=' ID ID ) | ^( '=' ID STRING ) ) - int alt5=2; - int LA5_0 = input.LA(1); - - if ( (LA5_0==31) ) { - int LA5_1 = input.LA(2); - - if ( (LA5_1==DOWN) ) { - int LA5_2 = input.LA(3); - - if ( (LA5_2==ID) ) { - int LA5_3 = input.LA(4); - - if ( (LA5_3==ID) ) { - alt5=1; - } - else if ( (LA5_3==STRING) ) { - alt5=2; - } - else { - NoViableAltException nvae = - new NoViableAltException("", 5, 3, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 5, 2, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 5, 1, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 5, 0, input); - - throw nvae; - } - switch (alt5) { - case 1 : - // jUnitGen.g:23:9: ^( '=' ID ID ) - { - match(input,31,FOLLOW_31_in_option117); - - match(input, Token.DOWN, null); - match(input,ID,FOLLOW_ID_in_option119); - match(input,ID,FOLLOW_ID_in_option121); - - match(input, Token.UP, null); - - } - break; - case 2 : - // jUnitGen.g:24:9: ^( '=' ID STRING ) - { - match(input,31,FOLLOW_31_in_option133); - - match(input, Token.DOWN, null); - match(input,ID,FOLLOW_ID_in_option135); - match(input,STRING,FOLLOW_STRING_in_option137); - - match(input, Token.UP, null); - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return retval; - } - // $ANTLR end "option" - - public static class header_return extends TreeRuleReturnScope { - public StringTemplate st; - public Object getTemplate() { return st; } - public String toString() { return st==null?null:st.toString(); } - }; - - // $ANTLR start "header" - // jUnitGen.g:27:1: header : ^( '@header' ACTION ) -> header(action=$ACTION.text); - public final jUnitGen.header_return header() throws RecognitionException { - jUnitGen.header_return retval = new jUnitGen.header_return(); - retval.start = input.LT(1); - - CommonTree ACTION3=null; - - try { - // jUnitGen.g:27:8: ( ^( '@header' ACTION ) -> header(action=$ACTION.text)) - // jUnitGen.g:27:10: ^( '@header' ACTION ) - { - match(input,32,FOLLOW_32_in_header154); - - match(input, Token.DOWN, null); - ACTION3=(CommonTree)match(input,ACTION,FOLLOW_ACTION_in_header156); - - match(input, Token.UP, null); - - - // TEMPLATE REWRITE - // 27:30: -> header(action=$ACTION.text) - { - retval.st = templateLib.getInstanceOf("header", - new STAttrMap().put("action", (ACTION3!=null?ACTION3.getText():null))); - } - - - } - - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return retval; - } - // $ANTLR end "header" - - public static class testsuite_return extends TreeRuleReturnScope { - public StringTemplate st; - public Object getTemplate() { return st; } - public String toString() { return st==null?null:st.toString(); } - }; - - // $ANTLR start "testsuite" - // jUnitGen.g:29:1: testsuite : ( ^( SUITE rule= ID ID ( DOC_COMMENT )? (cases+= testcase[$rule.text] )+ ) | ^( SUITE rule= ID ( DOC_COMMENT )? (cases+= testcase[$rule.text] )+ ) -> testSuite(name=$rule.textcases=$cases)); - public final jUnitGen.testsuite_return testsuite() throws RecognitionException { - jUnitGen.testsuite_return retval = new jUnitGen.testsuite_return(); - retval.start = input.LT(1); - - CommonTree rule=null; - List list_cases=null; - RuleReturnScope cases = null; - try { - // jUnitGen.g:30:2: ( ^( SUITE rule= ID ID ( DOC_COMMENT )? (cases+= testcase[$rule.text] )+ ) | ^( SUITE rule= ID ( DOC_COMMENT )? (cases+= testcase[$rule.text] )+ ) -> testSuite(name=$rule.textcases=$cases)) - int alt10=2; - int LA10_0 = input.LA(1); - - if ( (LA10_0==SUITE) ) { - int LA10_1 = input.LA(2); - - if ( (LA10_1==DOWN) ) { - int LA10_2 = input.LA(3); - - if ( (LA10_2==ID) ) { - int LA10_3 = input.LA(4); - - if ( (LA10_3==ID) ) { - alt10=1; - } - else if ( ((LA10_3>=TEST_OK && LA10_3<=DOC_COMMENT)) ) { - alt10=2; - } - else { - NoViableAltException nvae = - new NoViableAltException("", 10, 3, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 10, 2, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 10, 1, input); - - throw nvae; - } - } - else { - NoViableAltException nvae = - new NoViableAltException("", 10, 0, input); - - throw nvae; - } - switch (alt10) { - case 1 : - // jUnitGen.g:30:4: ^( SUITE rule= ID ID ( DOC_COMMENT )? (cases+= testcase[$rule.text] )+ ) - { - match(input,SUITE,FOLLOW_SUITE_in_testsuite176); - - match(input, Token.DOWN, null); - rule=(CommonTree)match(input,ID,FOLLOW_ID_in_testsuite180); - match(input,ID,FOLLOW_ID_in_testsuite182); - // jUnitGen.g:30:23: ( DOC_COMMENT )? - int alt6=2; - int LA6_0 = input.LA(1); - - if ( (LA6_0==DOC_COMMENT) ) { - alt6=1; - } - switch (alt6) { - case 1 : - // jUnitGen.g:30:23: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testsuite184); - - } - break; - - } - - // jUnitGen.g:30:41: (cases+= testcase[$rule.text] )+ - int cnt7=0; - loop7: - do { - int alt7=2; - int LA7_0 = input.LA(1); - - if ( ((LA7_0>=TEST_OK && LA7_0<=TEST_ACTION)) ) { - alt7=1; - } - - - switch (alt7) { - case 1 : - // jUnitGen.g:30:41: cases+= testcase[$rule.text] - { - pushFollow(FOLLOW_testcase_in_testsuite189); - cases=testcase((rule!=null?rule.getText():null)); - - state._fsp--; - - if (list_cases==null) list_cases=new ArrayList(); - list_cases.add(cases.getTemplate()); - - - } - break; - - default : - if ( cnt7 >= 1 ) break loop7; - EarlyExitException eee = - new EarlyExitException(7, input); - throw eee; - } - cnt7++; - } while (true); - - - match(input, Token.UP, null); - - } - break; - case 2 : - // jUnitGen.g:31:4: ^( SUITE rule= ID ( DOC_COMMENT )? (cases+= testcase[$rule.text] )+ ) - { - match(input,SUITE,FOLLOW_SUITE_in_testsuite198); - - match(input, Token.DOWN, null); - rule=(CommonTree)match(input,ID,FOLLOW_ID_in_testsuite202); - // jUnitGen.g:31:23: ( DOC_COMMENT )? - int alt8=2; - int LA8_0 = input.LA(1); - - if ( (LA8_0==DOC_COMMENT) ) { - alt8=1; - } - switch (alt8) { - case 1 : - // jUnitGen.g:31:23: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testsuite207); - - } - break; - - } - - // jUnitGen.g:31:41: (cases+= testcase[$rule.text] )+ - int cnt9=0; - loop9: - do { - int alt9=2; - int LA9_0 = input.LA(1); - - if ( ((LA9_0>=TEST_OK && LA9_0<=TEST_ACTION)) ) { - alt9=1; - } - - - switch (alt9) { - case 1 : - // jUnitGen.g:31:41: cases+= testcase[$rule.text] - { - pushFollow(FOLLOW_testcase_in_testsuite212); - cases=testcase((rule!=null?rule.getText():null)); - - state._fsp--; - - if (list_cases==null) list_cases=new ArrayList(); - list_cases.add(cases.getTemplate()); - - - } - break; - - default : - if ( cnt9 >= 1 ) break loop9; - EarlyExitException eee = - new EarlyExitException(9, input); - throw eee; - } - cnt9++; - } while (true); - - - match(input, Token.UP, null); - - - // TEMPLATE REWRITE - // 32:3: -> testSuite(name=$rule.textcases=$cases) - { - retval.st = templateLib.getInstanceOf("testSuite", - new STAttrMap().put("name", (rule!=null?rule.getText():null)).put("cases", list_cases)); - } - - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return retval; - } - // $ANTLR end "testsuite" - - public static class testcase_return extends TreeRuleReturnScope { - public StringTemplate st; - public Object getTemplate() { return st; } - public String toString() { return st==null?null:st.toString(); } - }; - - // $ANTLR start "testcase" - // jUnitGen.g:35:1: testcase[String ruleName] : ( ^( TEST_OK ( DOC_COMMENT )? input ) | ^( TEST_FAIL ( DOC_COMMENT )? input ) | ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) | ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) | ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) | ^( TEST_TREE ( DOC_COMMENT )? input TREE ) -> parserRuleTestAST(ruleName=$ruleNameinput=$input.stexpecting=Gen.normalizeTreeSpec($TREE.text)scriptLine=$input.start.getLine()) | ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) ); - public final jUnitGen.testcase_return testcase(String ruleName) throws RecognitionException { - jUnitGen.testcase_return retval = new jUnitGen.testcase_return(); - retval.start = input.LT(1); - - CommonTree TREE5=null; - jUnitGen.input_return input4 = null; - - - try { - // jUnitGen.g:36:2: ( ^( TEST_OK ( DOC_COMMENT )? input ) | ^( TEST_FAIL ( DOC_COMMENT )? input ) | ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) | ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) | ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) | ^( TEST_TREE ( DOC_COMMENT )? input TREE ) -> parserRuleTestAST(ruleName=$ruleNameinput=$input.stexpecting=Gen.normalizeTreeSpec($TREE.text)scriptLine=$input.start.getLine()) | ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) ) - int alt18=7; - alt18 = dfa18.predict(input); - switch (alt18) { - case 1 : - // jUnitGen.g:36:4: ^( TEST_OK ( DOC_COMMENT )? input ) - { - match(input,TEST_OK,FOLLOW_TEST_OK_in_testcase244); - - match(input, Token.DOWN, null); - // jUnitGen.g:36:14: ( DOC_COMMENT )? - int alt11=2; - int LA11_0 = input.LA(1); - - if ( (LA11_0==DOC_COMMENT) ) { - alt11=1; - } - switch (alt11) { - case 1 : - // jUnitGen.g:36:14: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase246); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase249); - input(); - - state._fsp--; - - - match(input, Token.UP, null); - - } - break; - case 2 : - // jUnitGen.g:37:4: ^( TEST_FAIL ( DOC_COMMENT )? input ) - { - match(input,TEST_FAIL,FOLLOW_TEST_FAIL_in_testcase256); - - match(input, Token.DOWN, null); - // jUnitGen.g:37:16: ( DOC_COMMENT )? - int alt12=2; - int LA12_0 = input.LA(1); - - if ( (LA12_0==DOC_COMMENT) ) { - alt12=1; - } - switch (alt12) { - case 1 : - // jUnitGen.g:37:16: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase258); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase261); - input(); - - state._fsp--; - - - match(input, Token.UP, null); - - } - break; - case 3 : - // jUnitGen.g:38:4: ^( TEST_RETVAL ( DOC_COMMENT )? input RETVAL ) - { - match(input,TEST_RETVAL,FOLLOW_TEST_RETVAL_in_testcase268); - - match(input, Token.DOWN, null); - // jUnitGen.g:38:18: ( DOC_COMMENT )? - int alt13=2; - int LA13_0 = input.LA(1); - - if ( (LA13_0==DOC_COMMENT) ) { - alt13=1; - } - switch (alt13) { - case 1 : - // jUnitGen.g:38:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase270); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase273); - input(); - - state._fsp--; - - match(input,RETVAL,FOLLOW_RETVAL_in_testcase275); - - match(input, Token.UP, null); - - } - break; - case 4 : - // jUnitGen.g:39:4: ^( TEST_STDOUT ( DOC_COMMENT )? input STRING ) - { - match(input,TEST_STDOUT,FOLLOW_TEST_STDOUT_in_testcase282); - - match(input, Token.DOWN, null); - // jUnitGen.g:39:18: ( DOC_COMMENT )? - int alt14=2; - int LA14_0 = input.LA(1); - - if ( (LA14_0==DOC_COMMENT) ) { - alt14=1; - } - switch (alt14) { - case 1 : - // jUnitGen.g:39:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase284); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase287); - input(); - - state._fsp--; - - match(input,STRING,FOLLOW_STRING_in_testcase289); - - match(input, Token.UP, null); - - } - break; - case 5 : - // jUnitGen.g:40:4: ^( TEST_STDOUT ( DOC_COMMENT )? input ML_STRING ) - { - match(input,TEST_STDOUT,FOLLOW_TEST_STDOUT_in_testcase296); - - match(input, Token.DOWN, null); - // jUnitGen.g:40:18: ( DOC_COMMENT )? - int alt15=2; - int LA15_0 = input.LA(1); - - if ( (LA15_0==DOC_COMMENT) ) { - alt15=1; - } - switch (alt15) { - case 1 : - // jUnitGen.g:40:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase298); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase301); - input(); - - state._fsp--; - - match(input,ML_STRING,FOLLOW_ML_STRING_in_testcase303); - - match(input, Token.UP, null); - - } - break; - case 6 : - // jUnitGen.g:41:4: ^( TEST_TREE ( DOC_COMMENT )? input TREE ) - { - match(input,TEST_TREE,FOLLOW_TEST_TREE_in_testcase310); - - match(input, Token.DOWN, null); - // jUnitGen.g:41:16: ( DOC_COMMENT )? - int alt16=2; - int LA16_0 = input.LA(1); - - if ( (LA16_0==DOC_COMMENT) ) { - alt16=1; - } - switch (alt16) { - case 1 : - // jUnitGen.g:41:16: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase312); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase315); - input4=input(); - - state._fsp--; - - TREE5=(CommonTree)match(input,TREE,FOLLOW_TREE_in_testcase317); - - match(input, Token.UP, null); - - - // TEMPLATE REWRITE - // 42:4: -> parserRuleTestAST(ruleName=$ruleNameinput=$input.stexpecting=Gen.normalizeTreeSpec($TREE.text)scriptLine=$input.start.getLine()) - { - retval.st = templateLib.getInstanceOf("parserRuleTestAST", - new STAttrMap().put("ruleName", ruleName).put("input", (input4!=null?input4.st:null)).put("expecting", Gen.normalizeTreeSpec((TREE5!=null?TREE5.getText():null))).put("scriptLine", (input4!=null?((CommonTree)input4.start):null).getLine())); - } - - - } - break; - case 7 : - // jUnitGen.g:46:4: ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) - { - match(input,TEST_ACTION,FOLLOW_TEST_ACTION_in_testcase387); - - match(input, Token.DOWN, null); - // jUnitGen.g:46:18: ( DOC_COMMENT )? - int alt17=2; - int LA17_0 = input.LA(1); - - if ( (LA17_0==DOC_COMMENT) ) { - alt17=1; - } - switch (alt17) { - case 1 : - // jUnitGen.g:46:18: DOC_COMMENT - { - match(input,DOC_COMMENT,FOLLOW_DOC_COMMENT_in_testcase389); - - } - break; - - } - - pushFollow(FOLLOW_input_in_testcase392); - input(); - - state._fsp--; - - match(input,ACTION,FOLLOW_ACTION_in_testcase394); - - match(input, Token.UP, null); - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return retval; - } - // $ANTLR end "testcase" - - public static class input_return extends TreeRuleReturnScope { - public StringTemplate st; - public Object getTemplate() { return st; } - public String toString() { return st==null?null:st.toString(); } - }; - - // $ANTLR start "input" - // jUnitGen.g:49:1: input : ( STRING -> string(s=Gen.escapeForJava($STRING.text)) | ML_STRING -> string(s=Gen.escapeForJava($ML_STRING.text)) | FILENAME ); - public final jUnitGen.input_return input() throws RecognitionException { - jUnitGen.input_return retval = new jUnitGen.input_return(); - retval.start = input.LT(1); - - CommonTree STRING6=null; - CommonTree ML_STRING7=null; - - try { - // jUnitGen.g:50:2: ( STRING -> string(s=Gen.escapeForJava($STRING.text)) | ML_STRING -> string(s=Gen.escapeForJava($ML_STRING.text)) | FILENAME ) - int alt19=3; - switch ( input.LA(1) ) { - case STRING: - { - alt19=1; - } - break; - case ML_STRING: - { - alt19=2; - } - break; - case FILENAME: - { - alt19=3; - } - break; - default: - NoViableAltException nvae = - new NoViableAltException("", 19, 0, input); - - throw nvae; - } - - switch (alt19) { - case 1 : - // jUnitGen.g:50:4: STRING - { - STRING6=(CommonTree)match(input,STRING,FOLLOW_STRING_in_input406); - - - // TEMPLATE REWRITE - // 50:12: -> string(s=Gen.escapeForJava($STRING.text)) - { - retval.st = templateLib.getInstanceOf("string", - new STAttrMap().put("s", Gen.escapeForJava((STRING6!=null?STRING6.getText():null)))); - } - - - } - break; - case 2 : - // jUnitGen.g:51:4: ML_STRING - { - ML_STRING7=(CommonTree)match(input,ML_STRING,FOLLOW_ML_STRING_in_input421); - - - // TEMPLATE REWRITE - // 51:14: -> string(s=Gen.escapeForJava($ML_STRING.text)) - { - retval.st = templateLib.getInstanceOf("string", - new STAttrMap().put("s", Gen.escapeForJava((ML_STRING7!=null?ML_STRING7.getText():null)))); - } - - - } - break; - case 3 : - // jUnitGen.g:52:4: FILENAME - { - match(input,FILENAME,FOLLOW_FILENAME_in_input435); - - } - break; - - } - } - catch (RecognitionException re) { - reportError(re); - recover(input,re); - } - finally { - } - return retval; - } - // $ANTLR end "input" - - // Delegated rules - - - protected DFA18 dfa18 = new DFA18(this); - static final String DFA18_eotS = - "\16\uffff"; - static final String DFA18_eofS = - "\16\uffff"; - static final String DFA18_minS = - "\1\5\3\uffff\1\2\2\uffff\1\13\4\16\2\uffff"; - static final String DFA18_maxS = - "\1\12\3\uffff\1\2\2\uffff\2\23\3\21\2\uffff"; - static final String DFA18_acceptS = - "\1\uffff\1\1\1\2\1\3\1\uffff\1\6\1\7\5\uffff\1\4\1\5"; - static final String DFA18_specialS = - "\16\uffff}>"; - static final String[] DFA18_transitionS = { - "\1\1\1\2\1\3\1\4\1\5\1\6", - "", - "", - "", - "\1\7", - "", - "", - "\1\10\2\uffff\1\11\2\uffff\1\12\1\uffff\1\13", - "\1\11\2\uffff\1\12\1\uffff\1\13", - "\1\14\2\uffff\1\15", - "\1\14\2\uffff\1\15", - "\1\14\2\uffff\1\15", - "", - "" - }; - - static final short[] DFA18_eot = DFA.unpackEncodedString(DFA18_eotS); - static final short[] DFA18_eof = DFA.unpackEncodedString(DFA18_eofS); - static final char[] DFA18_min = DFA.unpackEncodedStringToUnsignedChars(DFA18_minS); - static final char[] DFA18_max = DFA.unpackEncodedStringToUnsignedChars(DFA18_maxS); - static final short[] DFA18_accept = DFA.unpackEncodedString(DFA18_acceptS); - static final short[] DFA18_special = DFA.unpackEncodedString(DFA18_specialS); - static final short[][] DFA18_transition; - - static { - int numStates = DFA18_transitionS.length; - DFA18_transition = new short[numStates][]; - for (int i=0; i parserRuleTestAST(ruleName=$ruleNameinput=$input.stexpecting=Gen.normalizeTreeSpec($TREE.text)scriptLine=$input.start.getLine()) | ^( TEST_ACTION ( DOC_COMMENT )? input ACTION ) );"; - } - } - - - public static final BitSet FOLLOW_28_in_gUnitDef45 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ID_in_gUnitDef47 = new BitSet(new long[]{0x0000000100002810L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_gUnitDef49 = new BitSet(new long[]{0x0000000100002810L}); - public static final BitSet FOLLOW_optionsSpec_in_gUnitDef53 = new BitSet(new long[]{0x0000000100002810L}); - public static final BitSet FOLLOW_header_in_gUnitDef55 = new BitSet(new long[]{0x0000000100002810L}); - public static final BitSet FOLLOW_testsuite_in_gUnitDef61 = new BitSet(new long[]{0x0000000100002818L}); - public static final BitSet FOLLOW_OPTIONS_in_optionsSpec96 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_option_in_optionsSpec98 = new BitSet(new long[]{0x0000000080000008L}); - public static final BitSet FOLLOW_31_in_option117 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ID_in_option119 = new BitSet(new long[]{0x0000000000001000L}); - public static final BitSet FOLLOW_ID_in_option121 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_31_in_option133 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ID_in_option135 = new BitSet(new long[]{0x0000000000004000L}); - public static final BitSet FOLLOW_STRING_in_option137 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_32_in_header154 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ACTION_in_header156 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_SUITE_in_testsuite176 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ID_in_testsuite180 = new BitSet(new long[]{0x0000000000001000L}); - public static final BitSet FOLLOW_ID_in_testsuite182 = new BitSet(new long[]{0x0000000000000FE0L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testsuite184 = new BitSet(new long[]{0x0000000000000FE0L}); - public static final BitSet FOLLOW_testcase_in_testsuite189 = new BitSet(new long[]{0x0000000000000FE8L}); - public static final BitSet FOLLOW_SUITE_in_testsuite198 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_ID_in_testsuite202 = new BitSet(new long[]{0x0000000000000FE0L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testsuite207 = new BitSet(new long[]{0x0000000000000FE0L}); - public static final BitSet FOLLOW_testcase_in_testsuite212 = new BitSet(new long[]{0x0000000000000FE8L}); - public static final BitSet FOLLOW_TEST_OK_in_testcase244 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase246 = new BitSet(new long[]{0x00000000000A4000L}); - public static final BitSet FOLLOW_input_in_testcase249 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_TEST_FAIL_in_testcase256 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase258 = new BitSet(new long[]{0x00000000000A4000L}); - public static final BitSet FOLLOW_input_in_testcase261 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_TEST_RETVAL_in_testcase268 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase270 = new BitSet(new long[]{0x00000000000A4000L}); - public static final BitSet FOLLOW_input_in_testcase273 = new BitSet(new long[]{0x0000000000010000L}); - public static final BitSet FOLLOW_RETVAL_in_testcase275 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_TEST_STDOUT_in_testcase282 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase284 = new BitSet(new long[]{0x00000000000A4000L}); - public static final BitSet FOLLOW_input_in_testcase287 = new BitSet(new long[]{0x0000000000004000L}); - public static final BitSet FOLLOW_STRING_in_testcase289 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_TEST_STDOUT_in_testcase296 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase298 = new BitSet(new long[]{0x00000000000A4000L}); - public static final BitSet FOLLOW_input_in_testcase301 = new BitSet(new long[]{0x0000000000020000L}); - public static final BitSet FOLLOW_ML_STRING_in_testcase303 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_TEST_TREE_in_testcase310 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase312 = new BitSet(new long[]{0x00000000000A4000L}); - public static final BitSet FOLLOW_input_in_testcase315 = new BitSet(new long[]{0x0000000000040000L}); - public static final BitSet FOLLOW_TREE_in_testcase317 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_TEST_ACTION_in_testcase387 = new BitSet(new long[]{0x0000000000000004L}); - public static final BitSet FOLLOW_DOC_COMMENT_in_testcase389 = new BitSet(new long[]{0x00000000000A4000L}); - public static final BitSet FOLLOW_input_in_testcase392 = new BitSet(new long[]{0x0000000000008000L}); - public static final BitSet FOLLOW_ACTION_in_testcase394 = new BitSet(new long[]{0x0000000000000008L}); - public static final BitSet FOLLOW_STRING_in_input406 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_ML_STRING_in_input421 = new BitSet(new long[]{0x0000000000000002L}); - public static final BitSet FOLLOW_FILENAME_in_input435 = new BitSet(new long[]{0x0000000000000002L}); - -} \ No newline at end of file diff --git a/runtime/Java/src/org/antlr/v4/runtime/ANTLRParserListener.java b/runtime/Java/src/org/antlr/v4/runtime/ANTLRParserListener.java deleted file mode 100644 index efbd9cea9..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/ANTLRParserListener.java +++ /dev/null @@ -1,6 +0,0 @@ -package org.antlr.v4.runtime; - -/** */ -public interface ANTLRParserListener { - public void error(RecognitionException msg); -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/BaseRecognizer.java b/runtime/Java/src/org/antlr/v4/runtime/BaseRecognizer.java deleted file mode 100644 index dc936f2e8..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/BaseRecognizer.java +++ /dev/null @@ -1,901 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.IntStream; -import org.antlr.runtime.Token; -import org.antlr.runtime.TokenStream; -import org.antlr.v4.runtime.misc.LABitSet; - -import java.util.*; - -/** A generic recognizer that can handle recognizers generated from - * parser and tree grammars. This is all the parsing - * support code essentially; most of it is error recovery stuff and - * backtracking. - * - * TODO: rename since lexer not under. or reorg parser/treeparser; treeparser under parser? - */ -public abstract class BaseRecognizer { - public static final int EOF=-1; - - public static final int MEMO_RULE_FAILED = -2; - public static final int MEMO_RULE_UNKNOWN = -1; - - public static final String NEXT_TOKEN_RULE_NAME = "nextToken"; - - /** State of a lexer, parser, or tree parser are collected into a state - * object so the state can be shared. This sharing is needed to - * have one grammar import others and share same error variables - * and other state variables. It's a kind of explicit multiple - * inheritance via delegation of methods and shared state. - */ - public ParserSharedState state; - - public BaseRecognizer(IntStream input) { - this(input, new ParserSharedState()); - } - - public BaseRecognizer(IntStream input, ParserSharedState state) { - if ( state==null ) { - state = new ParserSharedState(); - } - this.state = state; - state.input = input; - } - - /** reset the parser's state; subclasses must rewinds the input stream */ - public void reset() { - // wack everything related to error recovery - if ( state==null ) { - return; // no shared state work to do - } - state.ctx.clear(); - state.errorRecovery = false; - state.lastErrorIndex = -1; - state.failed = false; - state.syntaxErrors = 0; - // wack everything related to backtracking and memoization - state.backtracking = 0; - for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) { // wipe cache - state.ruleMemo[i] = null; - } - } - - /** Match current input symbol against ttype. Attempt - * single token insertion or deletion error recovery. If - * that fails, throw MismatchedTokenException. - * - * To turn off single token insertion or deletion error - * recovery, override recoverFromMismatchedToken() and have it - * throw an exception. See TreeParser.recoverFromMismatchedToken(). - * This way any error in a rule will cause an exception and - * immediate exit from rule. Rule would recover by resynchronizing - * to the set of symbols that can follow rule ref. - */ - public Object match(int ttype, LABitSet follow) - throws RecognitionException - { - System.out.println("match "+((TokenStream)state.input).LT(1)+" vs expected "+ttype); - Object matchedSymbol = getCurrentInputSymbol(); - if ( state.input.LA(1)==ttype ) { - state.input.consume(); - state.errorRecovery = false; - state.failed = false; - return matchedSymbol; - } -// if ( state.backtracking>0 ) { -// state.failed = true; -// return matchedSymbol; -// } - matchedSymbol = recoverFromMismatchedToken(ttype, follow); - System.out.println("rsync'd to "+matchedSymbol); - return matchedSymbol; - } - - // like matchSet but w/o consume; error checking routine. - public void sync(LABitSet expecting) { - if ( expecting.member(state.input.LA(1)) ) return; - System.out.println("failed sync to "+expecting); - LABitSet followSet = computeErrorRecoverySet(); - followSet.orInPlace(expecting); - NoViableAltException e = new NoViableAltException(this, followSet); - recoverFromMismatchedSet(e, followSet); - } - - /** Match the wildcard: in a symbol */ - public void matchAny() { - state.errorRecovery = false; - state.failed = false; - state.input.consume(); - } - - public boolean mismatchIsUnwantedToken(int ttype) { - return state.input.LA(2)==ttype; - } - - public boolean mismatchIsMissingToken(LABitSet follow) { - if ( follow==null ) { - // we have no information about the follow; we can only consume - // a single token and hope for the best - return false; - } - // compute what can follow this grammar element reference - if ( follow.member(Token.EOR_TOKEN_TYPE) ) { - LABitSet viableTokensFollowingThisRule = computeNextViableTokenSet(); - follow = follow.or(viableTokensFollowingThisRule); - if ( state.ctx.sp>=0 ) { // remove EOR if we're not the start symbol - follow.remove(Token.EOR_TOKEN_TYPE); - } - } - // if current token is consistent with what could come after set - // then we know we're missing a token; error recovery is free to - // "insert" the missing token - - //System.out.println("viable tokens="+follow.toString(getTokenNames())); - //System.out.println("LT(1)="+((TokenStream)state.input).LT(1)); - - // LABitSet cannot handle negative numbers like -1 (EOF) so I leave EOR - // in follow set to indicate that the fall of the start symbol is - // in the set (EOF can follow). - if ( follow.member(state.input.LA(1)) || follow.member(Token.EOR_TOKEN_TYPE) ) { - //System.out.println("LT(1)=="+((TokenStream)state.input).LT(1)+" is consistent with what follows; inserting..."); - return true; - } - return false; - } - - /** Report a recognition problem. - * - * This method sets errorRecovery to indicate the parser is recovering - * not parsing. Once in recovery mode, no errors are generated. - * To get out of recovery mode, the parser must successfully match - * a token (after a resync). So it will go: - * - * 1. error occurs - * 2. enter recovery mode, report error - * 3. consume until token found in resynch set - * 4. try to resume parsing - * 5. next match() will reset errorRecovery mode - */ - public void reportError(RecognitionException e) { - // if we've already reported an error and have not matched a token - // yet successfully, don't report any errors. - if ( state.errorRecovery ) { - //System.err.print("[SPURIOUS] "); - return; - } - state.syntaxErrors++; // don't count spurious - state.errorRecovery = true; - - notifyListeners(e); - } - - /** What error message should be generated for the various - * exception types? - * - * Not very object-oriented code, but I like having all error message - * generation within one method rather than spread among all of the - * exception classes. This also makes it much easier for the exception - * handling because the exception classes do not have to have pointers back - * to this object to access utility routines and so on. Also, changing - * the message for an exception type would be difficult because you - * would have to subclassing exception, but then somehow get ANTLR - * to make those kinds of exception objects instead of the default. - * This looks weird, but trust me--it makes the most sense in terms - * of flexibility. - * - * For grammar debugging, you will want to override this to add - * more information such as the stack frame with - * getRuleInvocationStack(e, this.getClass().getName()) and, - * for no viable alts, the decision description and state etc... - * - * Override this to change the message generated for one or more - * exception types. - */ - public String getErrorMessage(RecognitionException e) { - String[] tokenNames = getTokenNames(); - String msg = e.getMessage(); - if ( e instanceof UnwantedTokenException ) { - UnwantedTokenException ute = (UnwantedTokenException)e; - String tokenName=""; - if ( ute.expecting.member(Token.EOF) ) { - tokenName = "EOF"; - } - else { - tokenName = tokenNames[ute.expecting.getSingleElement()]; - } - msg = "extraneous input "+getTokenErrorDisplay(ute.getUnexpectedToken())+ - " expecting "+tokenName; - } - else if ( e instanceof MissingTokenException ) { - MissingTokenException mte = (MissingTokenException)e; - String tokenName=""; - if ( mte.expecting.member(Token.EOF) ) { - tokenName = "EOF"; - } - else { - tokenName = tokenNames[mte.expecting.getSingleElement()]; - } - msg = "missing "+tokenName+" at "+getTokenErrorDisplay(e.token); - } - else if ( e instanceof MismatchedTokenException ) { - MismatchedTokenException mte = (MismatchedTokenException)e; - String tokenName=""; - if ( mte.expecting.member(Token.EOF) ) { - tokenName = "EOF"; - } - else { - tokenName = tokenNames[mte.expecting.getSingleElement()]; - } - msg = "mismatched input "+getTokenErrorDisplay(e.token)+ - " expecting "+tokenName; - } - else if ( e instanceof MismatchedTreeNodeException ) { - MismatchedTreeNodeException mtne = (MismatchedTreeNodeException)e; - String tokenName=""; - if ( mtne.expecting.member(Token.EOF) ) { - tokenName = "EOF"; - } - else { - tokenName = tokenNames[mtne.expecting.getSingleElement()]; - } - msg = "mismatched tree node: "+mtne.node+ - " expecting "+tokenName; - } - else if ( e instanceof NoViableAltException ) { - //NoViableAltException nvae = (NoViableAltException)e; - // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>" - // and "(decision="+nvae.decisionNumber+") and - // "state "+nvae.stateNumber - msg = "no viable alternative at input "+getTokenErrorDisplay(e.token); - } - else if ( e instanceof EarlyExitException ) { - //EarlyExitException eee = (EarlyExitException)e; - // for development, can add "(decision="+eee.decisionNumber+")" - msg = "required (...)+ loop did not match anything at input "+ - getTokenErrorDisplay(e.token); - } - else if ( e instanceof MismatchedSetException ) { - MismatchedSetException mse = (MismatchedSetException)e; - msg = "mismatched input "+getTokenErrorDisplay(e.token)+ - " expecting set "+mse.expecting; - } - else if ( e instanceof MismatchedNotSetException ) { - MismatchedNotSetException mse = (MismatchedNotSetException)e; - msg = "mismatched input "+getTokenErrorDisplay(e.token)+ - " expecting set "+mse.expecting; - } - else if ( e instanceof FailedPredicateException ) { - FailedPredicateException fpe = (FailedPredicateException)e; - msg = "rule "+fpe.ruleName+" failed predicate: {"+ - fpe.predicateText+"}?"; - } - return msg; - } - - /** Get number of recognition errors (lexer, parser, tree parser). Each - * recognizer tracks its own number. So parser and lexer each have - * separate count. Does not count the spurious errors found between - * an error and next valid token match - * - * See also reportError() - */ - public int getNumberOfSyntaxErrors() { - return state.syntaxErrors; - } - - /** What is the error header, normally line/character position information? */ - public String getErrorHeader(RecognitionException e) { - return "line "+e.line+":"+e.charPositionInLine; - } - - /** How should a token be displayed in an error message? The default - * is to display just the text, but during development you might - * want to have a lot of information spit out. Override in that case - * to use t.toString() (which, for CommonToken, dumps everything about - * the token). This is better than forcing you to override a method in - * your token objects because you don't have to go modify your lexer - * so that it creates a new Java type. - */ - public String getTokenErrorDisplay(Token t) { - String s = t.getText(); - if ( s==null ) { - if ( t.getType()==Token.EOF ) { - s = ""; - } - else { - s = "<"+t.getType()+">"; - } - } - s = s.replaceAll("\n","\\\\n"); - s = s.replaceAll("\r","\\\\r"); - s = s.replaceAll("\t","\\\\t"); - return "'"+s+"'"; - } - - /** Recover from an error found on the input stream. This is - * for NoViableAlt and mismatched symbol exceptions. If you enable - * single token insertion and deletion, this will usually not - * handle mismatched symbol exceptions but there could be a mismatched - * token that the match() routine could not recover from. - */ - public void recover() { - if ( state.lastErrorIndex==state.input.index() ) { - // uh oh, another error at same token index; must be a case - // where LT(1) is in the recovery token set so nothing is - // consumed; consume a single token so at least to prevent - // an infinite loop; this is a failsafe. - state.input.consume(); - } - state.lastErrorIndex = state.input.index(); - LABitSet followSet = computeErrorRecoverySet(); - beginResync(); - consumeUntil(followSet); - endResync(); - } - - /** A hook to listen in on the token consumption during error recovery. - * The DebugParser subclasses this to fire events to the listenter. - */ - public void beginResync() { - } - - public void endResync() { - } - - /* Compute the error recovery set for the current rule. During - * rule invocation, the parser pushes the set of tokens that can - * follow that rule reference on the stack; this amounts to - * computing FIRST of what follows the rule reference in the - * enclosing rule. See LinearApproximator.FIRST(). - * This local follow set only includes tokens - * from within the rule; i.e., the FIRST computation done by - * ANTLR stops at the end of a rule. - * - * EXAMPLE - * - * When you find a "no viable alt exception", the input is not - * consistent with any of the alternatives for rule r. The best - * thing to do is to consume tokens until you see something that - * can legally follow a call to r *or* any rule that called r. - * You don't want the exact set of viable next tokens because the - * input might just be missing a token--you might consume the - * rest of the input looking for one of the missing tokens. - * - * Consider grammar: - * - * a : '[' b ']' - * | '(' b ')' - * ; - * b : c '^' INT ; - * c : ID - * | INT - * ; - * - * At each rule invocation, the set of tokens that could follow - * that rule is pushed on a stack. Here are the various - * context-sensitive follow sets: - * - * FOLLOW(b1_in_a) = FIRST(']') = ']' - * FOLLOW(b2_in_a) = FIRST(')') = ')' - * FOLLOW(c_in_b) = FIRST('^') = '^' - * - * Upon erroneous input "[]", the call chain is - * - * a -> b -> c - * - * and, hence, the follow context stack is: - * - * depth follow set start of rule execution - * 0 a (from main()) - * 1 ']' b - * 2 '^' c - * - * Notice that ')' is not included, because b would have to have - * been called from a different context in rule a for ')' to be - * included. - * - * For error recovery, we cannot consider FOLLOW(c) - * (context-sensitive or otherwise). We need the combined set of - * all context-sensitive FOLLOW sets--the set of all tokens that - * could follow any reference in the call chain. We need to - * resync to one of those tokens. Note that FOLLOW(c)='^' and if - * we resync'd to that token, we'd consume until EOF. We need to - * sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. - * In this case, for input "[]", LA(1) is ']' and in the set, so we would - * not consume anything. After printing an error, rule c would - * return normally. Rule b would not find the required '^' though. - * At this point, it gets a mismatched token error and throws an - * exception (since LA(1) is not in the viable following token - * set). The rule exception handler tries to recover, but finds - * the same recovery set and doesn't consume anything. Rule b - * exits normally returning to rule a. Now it finds the ']' (and - * with the successful match exits errorRecovery mode). - * - * So, you can see that the parser walks up the call chain looking - * for the token that was a member of the recovery set. - * - * Errors are not generated in errorRecovery mode. - * - * ANTLR's error recovery mechanism is based upon original ideas: - * - * "Algorithms + Data Structures = Programs" by Niklaus Wirth - * - * and - * - * "A note on error recovery in recursive descent parsers": - * http://portal.acm.org/citation.cfm?id=947902.947905 - * - * Later, Josef Grosch had some good ideas: - * - * "Efficient and Comfortable Error Recovery in Recursive Descent - * Parsers": - * ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip - * - * Like Grosch I implement context-sensitive FOLLOW sets that are combined - * at run-time upon error to avoid overhead during parsing. - */ - protected LABitSet computeErrorRecoverySet() { - int top = state.ctx.sp; - LABitSet followSet = new LABitSet(); - for (int i=top; i>=0; i--) { // i==0 is EOF context for start rule invocation - LABitSet f = (LABitSet)state.ctx.get(i).follow; - followSet.orInPlace(f); - } - return followSet; - } - - /** Compute the context-sensitive FOLLOW set for current rule. - * This is set of token types that can follow a specific rule - * reference given a specific call chain. You get the set of - * viable tokens that can possibly come next (lookahead depth 1) - * given the current call chain. Contrast this with the - * definition of plain FOLLOW for rule r: - * - * FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)} - * - * where x in T* and alpha, beta in V*; T is set of terminals and - * V is the set of terminals and nonterminals. In other words, - * FOLLOW(r) is the set of all tokens that can possibly follow - * references to r in *any* sentential form (context). At - * runtime, however, we know precisely which context applies as - * we have the call chain. We may compute the exact (rather - * than covering superset) set of following tokens. - * - * For example, consider grammar: - * - * stat : ID '=' expr ';' // FOLLOW(stat)=={EOF} - * | "return" expr '.' - * ; - * expr : atom ('+' atom)* ; // FOLLOW(expr)=={';','.',')'} - * atom : INT // FOLLOW(atom)=={'+',')',';','.'} - * | '(' expr ')' - * ; - * - * The FOLLOW sets are all inclusive whereas context-sensitive - * FOLLOW sets are precisely what could follow a rule reference. - * For input input "i=(3);", here is the derivation: - * - * stat => ID '=' expr ';' - * => ID '=' atom ('+' atom)* ';' - * => ID '=' '(' expr ')' ('+' atom)* ';' - * => ID '=' '(' atom ')' ('+' atom)* ';' - * => ID '=' '(' INT ')' ('+' atom)* ';' - * => ID '=' '(' INT ')' ';' - * - * At the "3" token, you'd have a call chain of - * - * stat -> expr -> atom -> expr -> atom - * - * What can follow that specific nested ref to atom? Exactly ')' - * as you can see by looking at the derivation of this specific - * input. Contrast this with the FOLLOW(atom)={'+',')',';','.'}. - * - * You want the exact viable token set when recovering from a - * token mismatch. Upon token mismatch, if LA(1) is member of - * the viable next token set, then you know there is most likely - * a missing token in the input stream. "Insert" one by just not - * throwing an exception. - */ - public LABitSet computeNextViableTokenSet() { - int top = state.ctx.sp; - LABitSet followSet = new LABitSet(); - for (int i=top; i>=0; i--) { // i==0 is EOF context for start rule invocation - LABitSet f = (LABitSet)state.ctx.get(i).follow; - followSet.orInPlace(f); - // can we see end of rule? if not, don't include follow of this rule - if ( !f.member(Token.EOR_TOKEN_TYPE) ) break; - // else combine with tokens that can follow this rule (rm EOR also) - // EOR indicates we have to include follow(start rule); i.e., EOF - followSet.remove(Token.EOR_TOKEN_TYPE); - } - return followSet; - } - - /** Attempt to recover from a single missing or extra token. - * - * EXTRA TOKEN - * - * LA(1) is not what we are looking for. If LA(2) has the right token, - * however, then assume LA(1) is some extra spurious token. Delete it - * and LA(2) as if we were doing a normal match(), which advances the - * input. - * - * MISSING TOKEN - * - * If current token is consistent with what could come after - * ttype then it is ok to "insert" the missing token, else throw - * exception For example, Input "i=(3;" is clearly missing the - * ')'. When the parser returns from the nested call to expr, it - * will have call chain: - * - * stat -> expr -> atom - * - * and it will be trying to match the ')' at this point in the - * derivation: - * - * => ID '=' '(' INT ')' ('+' atom)* ';' - * ^ - * match() will see that ';' doesn't match ')' and report a - * mismatched token error. To recover, it sees that LA(1)==';' - * is in the set of tokens that can follow the ')' token - * reference in rule atom. It can assume that you forgot the ')'. - */ - protected Object recoverFromMismatchedToken(int ttype, LABitSet follow) - throws RecognitionException - { - RecognitionException e = null; - // if next token is what we are looking for then "delete" this token - if ( mismatchIsUnwantedToken(ttype) ) { - e = new UnwantedTokenException(this, ttype); - /* - System.err.println("recoverFromMismatchedToken deleting "+ - ((TokenStream)state.input).LT(1)+ - " since "+((TokenStream)state.input).LT(2)+" is what we want"); - */ - beginResync(); - state.input.consume(); // simply delete extra token - endResync(); - reportError(e); // report after consuming so AW sees the token in the exception - // we want to return the token we're actually matching - Object matchedSymbol = getCurrentInputSymbol(); - state.input.consume(); // move past ttype token as if all were ok - return matchedSymbol; - } - // can't recover with single token deletion, try insertion - if ( mismatchIsMissingToken(follow) ) { - Object inserted = getMissingSymbol(e, ttype, follow); - e = new MissingTokenException(this, ttype, inserted); - reportError(e); // report after inserting so AW sees the token in the exception - return inserted; - } - // even that didn't work; must throw the exception - e = new MismatchedTokenException(this, ttype); - throw e; - } - - public Object recoverFromMismatchedSet(RecognitionException e, - LABitSet follow) - throws RecognitionException - { - if ( mismatchIsMissingToken(follow) ) { - // System.out.println("missing token"); - reportError(e); - // we don't know how to conjure up a token for sets yet - return getMissingSymbol(e, Token.INVALID_TOKEN_TYPE, follow); - } - // TODO do single token deletion like above for Token mismatch - throw e; - } - - /** Match needs to return the current input symbol, which gets put - * into the label for the associated token ref; e.g., x=ID. Token - * and tree parsers need to return different objects. Rather than test - * for input stream type or change the IntStream interface, I use - * a simple method to ask the recognizer to tell me what the current - * input symbol is. - */ - protected Object getCurrentInputSymbol() { return null; } - - /** Conjure up a missing token during error recovery. - * - * The recognizer attempts to recover from single missing - * symbols. But, actions might refer to that missing symbol. - * For example, x=ID {f($x);}. The action clearly assumes - * that there has been an identifier matched previously and that - * $x points at that token. If that token is missing, but - * the next token in the stream is what we want we assume that - * this token is missing and we keep going. Because we - * have to return some token to replace the missing token, - * we have to conjure one up. This method gives the user control - * over the tokens returned for missing tokens. Mostly, - * you will want to create something special for identifier - * tokens. For literals such as '{' and ',', the default - * action in the parser or tree parser works. It simply creates - * a CommonToken of the appropriate type. The text will be the token. - * If you change what tokens must be created by the lexer, - * override this method to create the appropriate tokens. - */ - protected Object getMissingSymbol(RecognitionException e, - int expectedTokenType, - LABitSet follow) - { - return null; - } - - public void consumeUntil(int tokenType) { - //System.out.println("consumeUntil "+tokenType); - int ttype = state.input.LA(1); - while (ttype != Token.EOF && ttype != tokenType) { - state.input.consume(); - ttype = state.input.LA(1); - } - } - - /** Consume tokens until one matches the given token set */ - public void consumeUntil(LABitSet set) { - //System.out.println("consumeUntil("+set.toString(getTokenNames())+")"); - int ttype = state.input.LA(1); - while (ttype != Token.EOF && !set.member(ttype) ) { - //System.out.println("consume during recover LA(1)="+getTokenNames()[state.input.LA(1)]); - state.input.consume(); - ttype = state.input.LA(1); - } - } - - /** Return List of the rules in your parser instance - * leading up to a call to this method. You could override if - * you want more details such as the file/line info of where - * in the parser java code a rule is invoked. - * - * This is very useful for error messages and for context-sensitive - * error recovery. - */ - public List getRuleInvocationStack() { - String parserClassName = getClass().getName(); - return getRuleInvocationStack(new Throwable(), parserClassName); - } - - /** A more general version of getRuleInvocationStack where you can - * pass in, for example, a RecognitionException to get it's rule - * stack trace. This routine is shared with all recognizers, hence, - * static. - * - * TODO: move to a utility class or something; weird having lexer call this - */ - public static List getRuleInvocationStack(Throwable e, - String recognizerClassName) - { - List rules = new ArrayList(); - StackTraceElement[] stack = e.getStackTrace(); - int i = 0; - for (i=stack.length-1; i>=0; i--) { - StackTraceElement t = stack[i]; - if ( t.getClassName().startsWith("org.antlr.v4.runtime.") ) { - continue; // skip support code such as this method - } - if ( t.getMethodName().equals(NEXT_TOKEN_RULE_NAME) ) { - continue; - } - if ( !t.getClassName().equals(recognizerClassName) ) { - continue; // must not be part of this parser - } - rules.add(t.getMethodName()); - } - return rules; - } - - public int getBacktrackingLevel() { return state.backtracking; } - - public void setBacktrackingLevel(int n) { state.backtracking = n; } - - /** Return whether or not a backtracking attempt failed. */ - public boolean failed() { return state.failed; } - - /** Used to print out token names like ID during debugging and - * error reporting. The generated parsers implement a method - * that overrides this to point to their String[] tokenNames. - */ - public String[] getTokenNames() { - return null; - } - - /** For debugging and other purposes, might want the grammar name. - * Have ANTLR generate an implementation for this method. - */ - public String getGrammarFileName() { - return null; - } - - public abstract String getSourceName(); - - /** A convenience method for use most often with template rewrites. - * Convert a List to List - */ - public List toStrings(List tokens) { - if ( tokens==null ) return null; - List strings = new ArrayList(tokens.size()); - for (int i=0; i= state.ruleMemo.length ) { - System.err.println("!!!!!!!!! memo size is "+state.ruleMemo.length+", but rule index is "+ruleIndex); - } - if ( state.ruleMemo[ruleIndex]!=null ) { - state.ruleMemo[ruleIndex].put( - new Integer(ruleStartIndex), new Integer(stopTokenIndex) - ); - } - } - - /** return how many rule/input-index pairs there are in total. - * TODO: this includes synpreds. :( - */ - public int getRuleMemoizationCacheSize() { - int n = 0; - for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) { - Map ruleMap = state.ruleMemo[i]; - if ( ruleMap!=null ) { - n += ruleMap.size(); // how many input indexes are recorded? - } - } - return n; - } - - public void traceIn(String ruleName, int ruleIndex, Object inputSymbol) { - System.out.print("enter "+ruleName+" "+inputSymbol); - if ( state.backtracking>0 ) { - System.out.print(" backtracking="+state.backtracking); - } - System.out.println(); - } - - public void traceOut(String ruleName, - int ruleIndex, - Object inputSymbol) - { - System.out.print("exit "+ruleName+" "+inputSymbol); - if ( state.backtracking>0 ) { - System.out.print(" backtracking="+state.backtracking); - if ( state.failed ) System.out.print(" failed"); - else System.out.print(" succeeded"); - } - System.out.println(); - } - - /* In v3, programmers altered error messages by overriding - displayRecognitionError() and possibly getTokenErrorDisplay(). - They overrode emitErrorMessage(String) to change where the output goes. - - Now, in v4, we're going to use a listener mechanism. This makes it - easier for language applications to have parsers notify them - upon error without having to override the parsers. If you don't specify - a listener, ANTLR calls the v3 legacy displayRecognitionError() - method. All that does is format a message and call emitErrorMessage(). - Otherwise, your listener will receive RecognitionException - exceptions and you can do what ever you want with them including - reproducing the same behavior by calling the legacy methods. - (In v4, RecognitionException includes the recognizer object). - - Grammar tools can have a listeners without having to worry about - messing up the programmers' error handling. - */ - - public void displayRecognitionError(RecognitionException e) { - String hdr = getErrorHeader(e); - String msg = getErrorMessage(e); - emitErrorMessage(hdr+" "+msg); - } - - /** Override this method to change where error messages go */ - public void emitErrorMessage(String msg) { - System.err.println(msg); - } - - public void addListener(ANTLRParserListener pl) { - if ( state.listeners==null ) { - state.listeners = - Collections.synchronizedList(new ArrayList(2)); - } - if ( pl!=null ) state.listeners.add(pl); - } - public void removeListener(ANTLRParserListener pl) { state.listeners.remove(pl); } - public void removeListeners() { state.listeners.clear(); } - public List getListeners() { return state.listeners; } - - public void notifyListeners(RecognitionException re) { - if ( state.listeners==null || state.listeners.size()==0 ) { - // call legacy v3 func; this calls emitErrorMessage(String msg) - displayRecognitionError(re); - return; - } - for (ANTLRParserListener pl : state.listeners) pl.error(re); - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/CharStreamState.java b/runtime/Java/src/org/antlr/v4/runtime/CharStreamState.java deleted file mode 100644 index 250970398..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/CharStreamState.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -/** When walking ahead with cyclic DFA or for syntactic predicates, - * we need to record the state of the input stream (char index, - * line, etc...) so that we can rewind the state after scanning ahead. - * - * This is the complete state of a stream. - */ -public class CharStreamState { - /** Index into the char stream of next lookahead char */ - public int p; - - /** What line number is the scanner at before processing buffer[p]? */ - public int line; - - /** What char position 0..n-1 in line is scanner before processing buffer[p]? */ - public int charPositionInLine; -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/CommonToken.java b/runtime/Java/src/org/antlr/v4/runtime/CommonToken.java deleted file mode 100644 index 1aabb98d9..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/CommonToken.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.CharStream; -import org.antlr.runtime.Token; - -import java.io.Serializable; - -public class CommonToken implements Token, Serializable { - protected int type; - protected int line; - protected int charPositionInLine = -1; // set to invalid position - protected int channel=DEFAULT_CHANNEL; - protected transient CharStream input; - - /** We need to be able to change the text once in a while. If - * this is non-null, then getText should return this. Note that - * start/stop are not affected by changing this. - */ - protected String text; - - /** What token number is this from 0..n-1 tokens; < 0 implies invalid index */ - protected int index = -1; - - /** The char position into the input buffer where this token starts */ - protected int start; - - /** The char position into the input buffer where this token stops */ - protected int stop; - - public CommonToken(int type) { - this.type = type; - } - - public CommonToken(CharStream input, int type, int channel, int start, int stop) { - this.input = input; - this.type = type; - this.channel = channel; - this.start = start; - this.stop = stop; - this.line = input.getLine(); - this.charPositionInLine = input.getCharPositionInLine(); - } - - public CommonToken(int type, String text) { - this.type = type; - this.channel = DEFAULT_CHANNEL; - this.text = text; - } - - public CommonToken(Token oldToken) { - text = oldToken.getText(); - type = oldToken.getType(); - line = oldToken.getLine(); - index = oldToken.getTokenIndex(); - charPositionInLine = oldToken.getCharPositionInLine(); - channel = oldToken.getChannel(); - input = oldToken.getInputStream(); - if ( oldToken instanceof CommonToken ) { - start = ((CommonToken)oldToken).start; - stop = ((CommonToken)oldToken).stop; - } - } - - public int getType() { - return type; - } - - public void setLine(int line) { - this.line = line; - } - - public String getText() { - if ( text!=null ) { - return text; - } - if ( input==null ) { - return null; - } - if ( start0 ) { - channelStr=",channel="+channel; - } - String txt = getText(); - if ( txt!=null ) { - txt = txt.replaceAll("\n","\\\\n"); - txt = txt.replaceAll("\r","\\\\r"); - txt = txt.replaceAll("\t","\\\\t"); - } - else { - txt = ""; - } - return "[@"+getTokenIndex()+","+start+":"+stop+"='"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/EarlyExitException.java b/runtime/Java/src/org/antlr/v4/runtime/EarlyExitException.java deleted file mode 100644 index 30cda98df..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/EarlyExitException.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.v4.runtime.misc.LABitSet; - -/** The recognizer did not match anything for a (..)+ loop. */ -public class EarlyExitException extends RecognitionException { - /** Used for remote debugger deserialization */ - public EarlyExitException() {;} - - public EarlyExitException(BaseRecognizer recognizer, LABitSet firstSet) { - super(recognizer, firstSet); - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/FailedPredicateException.java b/runtime/Java/src/org/antlr/v4/runtime/FailedPredicateException.java deleted file mode 100644 index 5e0b6d25f..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/FailedPredicateException.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -/** A semantic predicate failed during validation. Validation of predicates - * occurs when normally parsing the alternative just like matching a token. - * Disambiguating predicate evaluation occurs when we hoist a predicate into - * a prediction decision. - */ -public class FailedPredicateException extends RecognitionException { - public String ruleName; - public String predicateText; - - /** Used for remote debugger deserialization */ - public FailedPredicateException() {;} - - public FailedPredicateException(BaseRecognizer recognizer, - String ruleName, - String predicateText) - { - super(recognizer); - this.ruleName = ruleName; - this.predicateText = predicateText; - } - - public String toString() { - return "FailedPredicateException("+ruleName+",{"+predicateText+"}?)"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/Lexer.java b/runtime/Java/src/org/antlr/v4/runtime/Lexer.java deleted file mode 100644 index 8c125d06a..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/Lexer.java +++ /dev/null @@ -1,359 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.CharStream; -import org.antlr.runtime.Token; -import org.antlr.runtime.TokenSource; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.QStack; -import org.antlr.v4.runtime.pda.Bytecode; -import org.antlr.v4.runtime.pda.PDA; - -import java.util.EmptyStackException; - -/** A lexer is recognizer that draws input symbols from a character stream. - * lexer grammars result in a subclass of this object. A Lexer object - * uses simplified match() and error recovery mechanisms in the interest - * of speed. - */ -public abstract class Lexer implements TokenSource { - public static final int DEFAULT_MODE = 0; - public static final int MORE = -2; - public static final int SKIP = -3; - - public static final int DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL; - public static final int HIDDEN = Token.HIDDEN_CHANNEL; - - public LexerSharedState state; - - public static PDA[] modeToPDA; - public static DFA[] modeToDFA; - - public Lexer(CharStream input) { - this(input, new LexerSharedState()); - } - - public Lexer(CharStream input, LexerSharedState state) { - if ( state==null ) { - state = new LexerSharedState(); - } - this.state = state; - state.input = input; - } - - public void reset() { - // wack Lexer state variables - if ( state.input!=null ) { - state.input.seek(0); // rewind the input - } - if ( state==null ) { - return; // no shared state work to do - } - state.token = null; - state.type = Token.INVALID_TOKEN_TYPE; - state.channel = Token.DEFAULT_CHANNEL; - state.tokenStartCharIndex = -1; - state.tokenStartCharPositionInLine = -1; - state.tokenStartLine = -1; - state.text = null; - } - - /** Return a token from this source; i.e., match a token on the char - * stream. - */ - public Token nextToken() { - outer: - while (true) { - state.token = null; - state.channel = Token.DEFAULT_CHANNEL; - state.tokenStartCharIndex = state.input.index(); - state.tokenStartCharPositionInLine = state.input.getCharPositionInLine(); - state.tokenStartLine = state.input.getLine(); - state.text = null; - do { - state.type = Token.INVALID_TOKEN_TYPE; - if ( state.input.LA(1)==CharStream.EOF ) { - Token eof = new org.antlr.runtime.CommonToken(state.input,Token.EOF, - Token.DEFAULT_CHANNEL, - state.input.index(),state.input.index()); - eof.setLine(getLine()); - eof.setCharPositionInLine(getCharPositionInLine()); - return eof; - } - System.err.println("predict mode "+state.mode+" at index "+state.input.index()); - int ttype = modeToDFA[state.mode].predict(state.input); - System.err.println("returns "+ttype); - if ( state.type == Token.INVALID_TOKEN_TYPE ) state.type = ttype; - if ( state.type==SKIP ) { - continue outer; - } - } while ( state.type==MORE ); - if ( state.token==null ) emit(); - return state.token; - } - } - - public Token nextToken_PDA() { - outer: - while (true) { - state.token = null; - state.channel = Token.DEFAULT_CHANNEL; - state.tokenStartCharIndex = state.input.index(); - state.tokenStartCharPositionInLine = ((CharStream)state.input).getCharPositionInLine(); - state.tokenStartLine = ((CharStream)state.input).getLine(); - state.text = null; - do { - state.type = Token.INVALID_TOKEN_TYPE; - if ( state.input.LA(1)==CharStream.EOF ) { - Token eof = new CommonToken((CharStream)state.input,Token.EOF, - Token.DEFAULT_CHANNEL, - state.input.index(),state.input.index()); - eof.setLine(getLine()); - eof.setCharPositionInLine(getCharPositionInLine()); - return eof; - } - int ttype = 0; - try { - ttype = modeToPDA[state.mode].execThompson(state.input); - } - catch (PDA.InvalidElement re) { - CharStream cs = (CharStream)state.input; - System.err.println("!!!!! no match for char "+ - Bytecode.quotedCharLiteral(state.input.LA(1))+ - " at "+state.input.index()+ - " line "+cs.getLine()+":"+cs.getCharPositionInLine()); - state.input.consume(); - continue; - } - if ( state.type == Token.INVALID_TOKEN_TYPE ) state.type = ttype; - if ( state.type==SKIP ) { - continue outer; - } - } while ( state.type==MORE ); - if ( state.token==null ) emit(); - return state.token; - } - } - - /** Instruct the lexer to skip creating a token for current lexer rule - * and look for another token. nextToken() knows to keep looking when - * a lexer rule finishes with token set to SKIP_TOKEN. Recall that - * if token==null at end of any token rule, it creates one for you - * and emits it. - */ - public void skip() { - state.type = SKIP; - } - - public void more() { - state.type = MORE; - } - - public void mode(int m) { - state.mode = m; - } - - public void pushMode(int m) { - if ( state.modeStack==null ) state.modeStack = new QStack(); - state.modeStack.push(state.mode); - mode(m); - } - - public int popMode() { - if ( state.modeStack==null ) throw new EmptyStackException(); - mode( state.modeStack.pop() ); - return state.mode; - } - - /** Set the char stream and reset the lexer */ - public void setCharStream(CharStream input) { - this.state.input = null; - reset(); - this.state.input = input; - } - - public CharStream getCharStream() { - return ((CharStream)state.input); - } - - public String getSourceName() { - return state.input.getSourceName(); - } - - /** Currently does not support multiple emits per nextToken invocation - * for efficiency reasons. Subclass and override this method and - * nextToken (to push tokens into a list and pull from that list rather - * than a single variable as this implementation does). - */ - public void emit(Token token) { - //System.err.println("emit "+token); - state.token = token; - } - - /** The standard method called to automatically emit a token at the - * outermost lexical rule. The token object should point into the - * char buffer start..stop. If there is a text override in 'text', - * use that to set the token's text. Override this method to emit - * custom Token objects. - * - * If you are building trees, then you should also override - * Parser or TreeParser.getMissingSymbol(). - */ - public Token emit() { - Token t = new CommonToken(((CharStream)state.input), state.type, - state.channel, state.tokenStartCharIndex, - getCharIndex()-1); - t.setLine(state.tokenStartLine); - t.setText(state.text); - t.setCharPositionInLine(state.tokenStartCharPositionInLine); - emit(t); - return t; - } - - public int getLine() { - return ((CharStream)state.input).getLine(); - } - - public int getCharPositionInLine() { - return ((CharStream)state.input).getCharPositionInLine(); - } - - /** What is the index of the current character of lookahead? */ - public int getCharIndex() { - return state.input.index(); - } - - /** Return the text matched so far for the current token or any - * text override. - */ - public String getText() { - if ( state.text!=null ) { - return state.text; - } - return ((CharStream)state.input).substring(state.tokenStartCharIndex,getCharIndex()-1); - } - - /** Set the complete text of this token; it wipes any previous - * changes to the text. - */ - public void setText(String text) { - state.text = text; - } - - public void reportError(RecognitionException e) { - /** TODO: not thought about recovery in lexer yet. - * - // if we've already reported an error and have not matched a token - // yet successfully, don't report any errors. - if ( errorRecovery ) { - //System.err.print("[SPURIOUS] "); - return; - } - errorRecovery = true; - */ - - //displayRecognitionError(this.getTokenNames(), e); - } - - /** Used to print out token names like ID during debugging and - * error reporting. The generated parsers implement a method - * that overrides this to point to their String[] tokenNames. - */ - public String[] getTokenNames() { - return null; - } - - public String getErrorMessage(RecognitionException e) { - String msg = null; - if ( e instanceof MismatchedTokenException ) { - MismatchedTokenException mte = (MismatchedTokenException)e; - msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting "+ - getCharErrorDisplay(mte.expecting.getSingleElement()); - } - else if ( e instanceof NoViableAltException ) { - NoViableAltException nvae = (NoViableAltException)e; - // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>" - // and "(decision="+nvae.decisionNumber+") and - // "state "+nvae.stateNumber - msg = "no viable alternative at character "+getCharErrorDisplay(e.c); - } - else if ( e instanceof EarlyExitException ) { - EarlyExitException eee = (EarlyExitException)e; - // for development, can add "(decision="+eee.decisionNumber+")" - msg = "required (...)+ loop did not match anything at character "+getCharErrorDisplay(e.c); - } - else if ( e instanceof MismatchedNotSetException ) { - MismatchedNotSetException mse = (MismatchedNotSetException)e; - msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting; - } - else if ( e instanceof MismatchedSetException ) { - MismatchedSetException mse = (MismatchedSetException)e; - msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting; - } - else if ( e instanceof MismatchedRangeException ) { - MismatchedRangeException mre = (MismatchedRangeException)e; - msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+ - getCharErrorDisplay(mre.a)+".."+getCharErrorDisplay(mre.b); - } - else { - //msg = super.getErrorMessage(e, tokenNames); - } - return msg; - } - - public String getCharErrorDisplay(int c) { - String s = String.valueOf((char)c); - switch ( c ) { - case Token.EOF : - s = ""; - break; - case '\n' : - s = "\\n"; - break; - case '\t' : - s = "\\t"; - break; - case '\r' : - s = "\\r"; - break; - } - return "'"+s+"'"; - } - - /** Lexers can normally match any char in it's vocabulary after matching - * a token, so do the easy thing and just kill a character and hope - * it all works out. You can instead use the rule invocation stack - * to do sophisticated error recovery if you are in a fragment rule. - */ - public void recover(RecognitionException re) { - //System.out.println("consuming char "+(char)state.input.LA(1)+" during recovery"); - //re.printStackTrace(); - state.input.consume(); - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/LexerSharedState.java b/runtime/Java/src/org/antlr/v4/runtime/LexerSharedState.java deleted file mode 100644 index ecb79f8c7..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/LexerSharedState.java +++ /dev/null @@ -1,58 +0,0 @@ -package org.antlr.v4.runtime; - -import org.antlr.runtime.CharStream; -import org.antlr.runtime.Token; -import org.antlr.v4.runtime.misc.QStack; - -public class LexerSharedState { - public CharStream input; - - /** The goal of all lexer rules/methods is to create a token object. - * This is an instance variable as multiple rules may collaborate to - * create a single token. nextToken will return this object after - * matching lexer rule(s). If you subclass to allow multiple token - * emissions, then set this to the last token to be matched or - * something nonnull so that the auto token emit mechanism will not - * emit another token. - */ - public Token token; - - /** What character index in the stream did the current token start at? - * Needed, for example, to get the text for current token. Set at - * the start of nextToken. - */ - public int tokenStartCharIndex = -1; - - /** The line on which the first character of the token resides */ - public int tokenStartLine; - - /** The character position of first character within the line */ - public int tokenStartCharPositionInLine; - - /** The channel number for the current token */ - public int channel; - - /** The token type for the current token */ - public int type; - - public QStack modeStack; - public int mode = Lexer.DEFAULT_MODE; - - /** You can set the text for the current token to override what is in - * the input char buffer. Use setText() or can set this instance var. - */ - public String text; - - public LexerSharedState() { - } - - public LexerSharedState(LexerSharedState state) { - this.token = state.token; - this.tokenStartCharIndex = state.tokenStartCharIndex; - this.tokenStartLine = state.tokenStartLine; - this.tokenStartCharPositionInLine = state.tokenStartCharPositionInLine; - this.channel = state.channel; - this.type = state.type; - this.text = state.text; - } -} \ No newline at end of file diff --git a/runtime/Java/src/org/antlr/v4/runtime/MismatchedNotSetException.java b/runtime/Java/src/org/antlr/v4/runtime/MismatchedNotSetException.java deleted file mode 100644 index 9600295e7..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/MismatchedNotSetException.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.v4.runtime.misc.LABitSet; - -public class MismatchedNotSetException extends MismatchedSetException { - /** Used for remote debugger deserialization */ - public MismatchedNotSetException() {;} - - public MismatchedNotSetException(BaseRecognizer recognizer, LABitSet expecting) { - super(recognizer, expecting); - } - - public String toString() { - return "MismatchedNotSetException("+getUnexpectedType()+"!="+expecting+")"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/MismatchedRangeException.java b/runtime/Java/src/org/antlr/v4/runtime/MismatchedRangeException.java deleted file mode 100644 index 1c49fd35c..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/MismatchedRangeException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -public class MismatchedRangeException extends RecognitionException { - public int a,b; - - /** Used for remote debugger deserialization */ - public MismatchedRangeException() {;} - - public MismatchedRangeException(BaseRecognizer recognizer, int a, int b) { - super(recognizer); - this.a = a; - this.b = b; - } - - public String toString() { - return "MismatchedNotSetException("+getUnexpectedType()+" not in ["+a+","+b+"])"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/MismatchedSetException.java b/runtime/Java/src/org/antlr/v4/runtime/MismatchedSetException.java deleted file mode 100644 index 1f5662364..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/MismatchedSetException.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.v4.runtime.misc.LABitSet; - -public class MismatchedSetException extends RecognitionException { - /** Used for remote debugger deserialization */ - public MismatchedSetException() {;} - - public MismatchedSetException(BaseRecognizer recognizer, LABitSet firstSet) { - super(recognizer, firstSet); - } - - public String toString() { - return "MismatchedSetException("+getUnexpectedType()+"!="+expecting+")"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/MismatchedTokenException.java b/runtime/Java/src/org/antlr/v4/runtime/MismatchedTokenException.java deleted file mode 100644 index 67c40c193..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/MismatchedTokenException.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.v4.runtime.misc.LABitSet; - -/** A mismatched char or Token or tree node */ -public class MismatchedTokenException extends RecognitionException { - /** Used for remote debugger deserialization */ - public MismatchedTokenException() {;} - - public MismatchedTokenException(BaseRecognizer recognizer, int firstSet) { - super(recognizer, LABitSet.of(firstSet)); - } - - public String toString() { - return "MismatchedTokenException("+getUnexpectedType()+"!="+expecting+")"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/MismatchedTreeNodeException.java b/runtime/Java/src/org/antlr/v4/runtime/MismatchedTreeNodeException.java deleted file mode 100644 index 40f5cb3e7..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/MismatchedTreeNodeException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.v4.runtime.misc.LABitSet; - -/** - */ -public class MismatchedTreeNodeException extends RecognitionException { - public MismatchedTreeNodeException() { - } - - public MismatchedTreeNodeException(BaseRecognizer recognizer, - int firstSet) - { - super(recognizer, LABitSet.of(firstSet)); - } - - public String toString() { - return "MismatchedTreeNodeException("+getUnexpectedType()+"!="+expecting+")"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/MissingTokenException.java b/runtime/Java/src/org/antlr/v4/runtime/MissingTokenException.java deleted file mode 100644 index 96b255f59..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/MissingTokenException.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -/** We were expecting a token but it's not found. The current token - * is actually what we wanted next. Used for tree node errors too. - */ -public class MissingTokenException extends MismatchedTokenException { - public Object inserted; - /** Used for remote debugger deserialization */ - public MissingTokenException() {;} - - public MissingTokenException(BaseRecognizer recognizer, int expecting, Object inserted) { - super(recognizer,expecting); - this.inserted = inserted; - } - - public int getMissingType() { - return expecting.getSingleElement(); - } - - public String toString() { - if ( inserted!=null && token!=null ) { - return "MissingTokenException(inserted "+inserted+" at "+token.getText()+")"; - } - if ( token!=null ) { - return "MissingTokenException(at "+token.getText()+")"; - } - return "MissingTokenException"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/NoViableAltException.java b/runtime/Java/src/org/antlr/v4/runtime/NoViableAltException.java deleted file mode 100644 index b6d2d81d3..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/NoViableAltException.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.CharStream; -import org.antlr.v4.runtime.misc.LABitSet; - -public class NoViableAltException extends RecognitionException { - /** Used for remote debugger deserialization */ - public NoViableAltException() {;} - - public NoViableAltException(BaseRecognizer recognizer, - LABitSet firstSet) - { - super(recognizer, firstSet); - } - - public String toString() { - if ( recognizer.state.input instanceof CharStream) { - return "NoViableAltException('"+(char)getUnexpectedType()+", expecting "+expecting+")"; - } - else { - return "NoViableAltException('"+getUnexpectedType()+", expecting "+expecting+")"; - } - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/Parser.java b/runtime/Java/src/org/antlr/v4/runtime/Parser.java deleted file mode 100644 index 2b24d599b..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/Parser.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.RecognitionException; -import org.antlr.runtime.Token; -import org.antlr.runtime.TokenStream; -import org.antlr.v4.runtime.misc.LABitSet; - -/** A parser for TokenStreams. "parser grammars" result in a subclass - * of this. - */ -public class Parser extends BaseRecognizer { - public Parser(TokenStream input) { - super(input); - } - - public Parser(TokenStream input, ParserSharedState state) { - super(input, state); // share the state object with another parser - } - - public void reset() { - super.reset(); // reset all recognizer state variables - if ( state.input!=null ) { - state.input.seek(0); // rewind the input - } - } - - protected Object getCurrentInputSymbol() { - return ((TokenStream)state.input).LT(1); - } - - protected Object getMissingSymbol(RecognitionException e, - int expectedTokenType, - LABitSet follow) - { - String tokenText = null; - if ( expectedTokenType== Token.EOF ) tokenText = ""; - else tokenText = ""; - CommonToken t = new CommonToken(expectedTokenType, tokenText); - Token current = ((TokenStream)state.input).LT(1); - if ( current.getType() == Token.EOF ) { - current = ((TokenStream)state.input).LT(-1); - } - t.line = current.getLine(); - t.charPositionInLine = current.getCharPositionInLine(); - t.channel = Token.DEFAULT_CHANNEL; - return t; - } - - /** Set the token stream and reset the parser */ - public void setTokenStream(TokenStream input) { - this.state.input = null; - reset(); - this.state.input = input; - } - - public TokenStream getTokenStream() { - return (TokenStream)state.input; - } - - public String getSourceName() { - return state.input.getSourceName(); - } - - public void traceIn(String ruleName, int ruleIndex) { - super.traceIn(ruleName, ruleIndex, ((TokenStream)state.input).LT(1)); - } - - public void traceOut(String ruleName, int ruleIndex) { - super.traceOut(ruleName, ruleIndex, ((TokenStream)state.input).LT(1)); - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/ParserRuleContext.java b/runtime/Java/src/org/antlr/v4/runtime/ParserRuleContext.java deleted file mode 100644 index b1f4b5b18..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/ParserRuleContext.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - [BSD] - Copyright (c) 2010 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.Token; -import org.antlr.v4.runtime.misc.LABitSet; - -/** Rules return values in an object containing all the values. - * Besides the properties defined in - * RuleLabelScope.predefinedRulePropertiesScope there may be user-defined - * return values. This class simply defines the minimum properties that - * are always defined and methods to access the others that might be - * available depending on output option such as template and tree. - * - * Note text is not an actual property of the return value, it is computed - * from start and stop using the input stream's toString() method. I - * could add a ctor to this so that we can pass in and store the input - * stream, but I'm not sure we want to do that. It would seem to be undefined - * to get the .text property anyway if the rule matches tokens from multiple - * input streams. - * - * I do not use getters for fields of objects that are used simply to - * group values such as this aggregate. The getters/setters are there to - * satisfy the superclass interface. - */ -public class ParserRuleContext extends RuleContext { - public Token start, stop; - public Object getStart() { return start; } - public Object getStop() { return stop; } - public ParserRuleContext() { super(); } - public ParserRuleContext(LABitSet follow) { super(follow); } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/ParserSharedState.java b/runtime/Java/src/org/antlr/v4/runtime/ParserSharedState.java deleted file mode 100644 index b6ab24c08..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/ParserSharedState.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.IntStream; -import org.antlr.v4.runtime.misc.QStack; - -import java.util.List; -import java.util.Map; - -/** The set of fields needed by an abstract recognizer to recognize input - * and recover from errors etc... As a separate state object, it can be - * shared among multiple grammars; e.g., when one grammar imports another. - */ -public class ParserSharedState { - public IntStream input; - - /** First on stack is fake a call to start rule from S' : S EOF ; - * Generated start rule does this. - */ - public QStack ctx; - - /** This is true when we see an error and before having successfully - * matched a token. Prevents generation of more than one error message - * per error. - */ - public boolean errorRecovery = false; - - /** The index into the input stream where the last error occurred. - * This is used to prevent infinite loops where an error is found - * but no token is consumed during recovery...another error is found, - * ad naseum. This is a failsafe mechanism to guarantee that at least - * one token/tree node is consumed for two errors. - */ - public int lastErrorIndex = -1; - - /** In lieu of a return value, this indicates that a rule or token - * has failed to match. Reset to false upon valid token match. - */ - public boolean failed = false; - - /** Did the recognizer encounter a syntax error? Track how many. */ - public int syntaxErrors = 0; - - /** If 0, no backtracking is going on. Safe to exec actions etc... - * If >0 then it's the level of backtracking. - */ - public int backtracking = 0; - - /** An array[size num rules] of Map that tracks - * the stop token index for each rule. ruleMemo[ruleIndex] is - * the memoization table for ruleIndex. For key ruleStartIndex, you - * get back the stop token for associated rule or MEMO_RULE_FAILED. - * - * This is only used if rule memoization is on (which it is by default). - */ - public Map[] ruleMemo; - - List listeners; - - public ParserSharedState() { - ctx = new QStack(); - } - -// public RecognizerSharedState(RecognizerSharedState state) { -// this.ctx = state.ctx; -// this.errorRecovery = state.errorRecovery; -// this.lastErrorIndex = state.lastErrorIndex; -// this.failed = state.failed; -// this.syntaxErrors = state.syntaxErrors; -// this.backtracking = state.backtracking; -// if ( state.ruleMemo!=null ) { -// this.ruleMemo = new Map[state.ruleMemo.length]; -// System.arraycopy(state.ruleMemo, 0, this.ruleMemo, 0, state.ruleMemo.length); -// } -// } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/RecognitionException.java b/runtime/Java/src/org/antlr/v4/runtime/RecognitionException.java deleted file mode 100644 index dca775404..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/RecognitionException.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.CharStream; -import org.antlr.runtime.IntStream; -import org.antlr.runtime.Token; -import org.antlr.runtime.TokenStream; -import org.antlr.runtime.tree.CommonTree; -import org.antlr.runtime.tree.Tree; -import org.antlr.runtime.tree.TreeAdaptor; -import org.antlr.runtime.tree.TreeNodeStream; -import org.antlr.v4.runtime.misc.LABitSet; - -/** The root of the ANTLR exception hierarchy. - * - * To avoid English-only error messages and to generally make things - * as flexible as possible, these exceptions are not created with strings, - * but rather the information necessary to generate an error. Then - * the various reporting methods in Parser and Lexer can be overridden - * to generate a localized error message. For example, MismatchedToken - * exceptions are built with the expected token type. - * So, don't expect getMessage() to return anything. - * - * Note that as of Java 1.4, you can access the stack trace, which means - * that you can compute the complete trace of rules from the start symbol. - * This gives you considerable context information with which to generate - * useful error messages. - * - * ANTLR generates code that throws exceptions upon recognition error and - * also generates code to catch these exceptions in each rule. If you - * want to quit upon first error, you can turn off the automatic error - * handling mechanism using rulecatch action, but you still need to - * override methods mismatch and recoverFromMismatchSet. - * - * In general, the recognition exceptions can track where in a grammar a - * problem occurred and/or what was the expected input. While the parser - * knows its state (such as current input symbol and line info) that - * state can change before the exception is reported so current token index - * is computed and stored at exception time. From this info, you can - * perhaps print an entire line of input not just a single token, for example. - * Better to just say the recognizer had a problem and then let the parser - * figure out a fancy report. - */ -public class RecognitionException extends RuntimeException { - /** Who threw the exception? */ - public BaseRecognizer recognizer; - - public LABitSet expecting; - - /** What is index of token/char were we looking at when the error occurred? */ - public int index; - - /** The current Token when an error occurred. Since not all streams - * can retrieve the ith Token, we have to track the Token object. - * For parsers. Even when it's a tree parser, token might be set. - */ - public Token token; - - /** If this is a tree parser exception, node is set to the node with - * the problem. - */ - public Object node; - - /** The current char when an error occurred. For lexers. */ - public int c; - - /** Track the line at which the error occurred in case this is - * generated from a lexer. We need to track this since the - * unexpected char doesn't carry the line info. - */ - public int line; - - public int charPositionInLine; - - /** If you are parsing a tree node stream, you will encounter som - * imaginary nodes w/o line/col info. We now search backwards looking - * for most recent token with line/col info, but notify getErrorHeader() - * that info is approximate. - */ - public boolean approximateLineInfo; - - /** Used for remote debugger deserialization */ - public RecognitionException() { - } - - public RecognitionException(BaseRecognizer recognizer) { - this(recognizer, null); - } - - public RecognitionException(BaseRecognizer recognizer, LABitSet firstSet) { - this.recognizer = recognizer; - // firstSet is what can we're expecting within rule that calls this ctor. - // must combine with context-sensitive FOLLOW of that rule. - LABitSet viableTokensFollowingThisRule = recognizer.computeNextViableTokenSet(); - this.expecting = viableTokensFollowingThisRule.or(firstSet); - IntStream input = recognizer.state.input; - this.index = input.index(); - if ( input instanceof TokenStream ) { - this.token = ((TokenStream)input).LT(1); - this.line = token.getLine(); - this.charPositionInLine = token.getCharPositionInLine(); - } - if ( input instanceof TreeNodeStream ) { - extractInformationFromTreeNodeStream(input); - } - else if ( input instanceof CharStream) { - this.c = input.LA(1); - this.line = ((CharStream)input).getLine(); - this.charPositionInLine = ((CharStream)input).getCharPositionInLine(); - } - else { - this.c = input.LA(1); - } - } - - protected void extractInformationFromTreeNodeStream(IntStream input) { - TreeNodeStream nodes = (TreeNodeStream)input; - this.node = nodes.LT(1); - TreeAdaptor adaptor = nodes.getTreeAdaptor(); - Token payload = adaptor.getToken(node); - if ( payload!=null ) { - this.token = payload; - if ( payload.getLine()<= 0 ) { - // imaginary node; no line/pos info; scan backwards - int i = -1; - Object priorNode = nodes.LT(i); - while ( priorNode!=null ) { - Token priorPayload = adaptor.getToken(priorNode); - if ( priorPayload!=null && priorPayload.getLine()>0 ) { - // we found the most recent real line / pos info - this.line = priorPayload.getLine(); - this.charPositionInLine = priorPayload.getCharPositionInLine(); - this.approximateLineInfo = true; - break; - } - --i; - priorNode = nodes.LT(i); - } - } - else { // node created from real token - this.line = payload.getLine(); - this.charPositionInLine = payload.getCharPositionInLine(); - } - } - else if ( this.node instanceof Tree) { - this.line = ((Tree)this.node).getLine(); - this.charPositionInLine = ((Tree)this.node).getCharPositionInLine(); - if ( this.node instanceof CommonTree) { - this.token = ((CommonTree)this.node).token; - } - } - else { - int type = adaptor.getType(this.node); - String text = adaptor.getText(this.node); - this.token = new CommonToken(type, text); - } - } - - /** Return the token type or char of the unexpected input element */ - public int getUnexpectedType() { - if ( recognizer.state.input instanceof TokenStream) { - return token.getType(); - } - else if ( recognizer.state.input instanceof TreeNodeStream ) { - TreeNodeStream nodes = (TreeNodeStream)recognizer.state.input; - TreeAdaptor adaptor = nodes.getTreeAdaptor(); - return adaptor.getType(node); - } - else { - return c; - } - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/RuleContext.java b/runtime/Java/src/org/antlr/v4/runtime/RuleContext.java deleted file mode 100644 index f6d2b9d44..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/RuleContext.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.v4.runtime.misc.LABitSet; - -/** Rules can return start/stop info as well as possible trees and templates. - * Each context must have a FOLLOW context. It's EOF if none is specified. - */ -public class RuleContext { - /** Track the set of token types that can follow any rule invocation. */ - public LABitSet follow; - - /** Return the start token or tree */ - public Object getStart() { return null; } - - /** Return the stop token or tree */ - public Object getStop() { return null; } - - /** Has a value potentially if output=AST; */ - public Object getTree() { return null; } - - /** Has a value potentially if output=template; Don't use StringTemplate - * type as it then causes a dependency with ST lib. - */ - public Object getTemplate() { return null; } - - public RuleContext() { this(LABitSet.EOF_SET); } - public RuleContext(LABitSet follow) { this.follow = follow; } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/UnwantedTokenException.java b/runtime/Java/src/org/antlr/v4/runtime/UnwantedTokenException.java deleted file mode 100644 index 276fe3182..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/UnwantedTokenException.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime; - -import org.antlr.runtime.Token; - -/** An extra token while parsing a TokenStream */ -public class UnwantedTokenException extends MismatchedTokenException { - /** Used for remote debugger deserialization */ - public UnwantedTokenException() {;} - - public UnwantedTokenException(BaseRecognizer recognizer, int expecting) { - super(recognizer, expecting); - } - - public Token getUnexpectedToken() { - return token; - } - - public String toString() { - String exp = ", expected "+expecting; - if ( token==null ) { - return "UnwantedTokenException(found="+null+exp+")"; - } - return "UnwantedTokenException(found="+token.getText()+exp+")"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/dfa/DFA.java b/runtime/Java/src/org/antlr/v4/runtime/dfa/DFA.java deleted file mode 100644 index f182dd828..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/dfa/DFA.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - [BSD] - Copyright (c) 2010 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime.dfa; - -import org.antlr.runtime.IntStream; -import org.antlr.runtime.Token; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.RecognitionException; - -/** A DFA implemented as a set of transition tables. - * - * Any state that has a semantic predicate edge is special; those states - * are generated with if-then-else structures in a specialStateTransition() - * which is generated by cyclicDFA template. - * - * There are at most 32767 states (16-bit signed short). - * Could get away with byte sometimes but would have to generate different - * types and the simulation code too. For a point of reference, the Java - * lexer's Tokens rule DFA has 326 states roughly. - */ -public class DFA { - public short[] eof; - public char[] max; - public short[] accept; - /** { target1, npairs1, range-pairs1, - target2, npairs2, range-pairs2, ... } - */ - public int[][] set_edges; - public int[][] pred_edges; // 'a'&&{p1}? - public short[][] transition; - public short[] action_index; - - public int decisionNumber; - - /** Which recognizer encloses this DFA? Needed to check backtracking */ - //public BaseRecognizer recognizer; - - public static final boolean debug = false; - - /** From the input stream, predict what alternative will succeed - * using this DFA (representing the covering regular approximation - * to the underlying CFL). Return an alternative number 1..n. Throw - * an exception upon error. - */ - public int predict(IntStream input) - throws RecognitionException - { - if ( debug ) { - System.err.println("Enter DFA.predict for decision "+decisionNumber); - } - //int mark = input.mark(); // remember where decision started in input - int prevAcceptMarker = -1; - int prevAcceptState = -1; - int s = 0; // we always start at s0 - try { - while ( true ) { - if ( debug ) System.err.println("DFA "+decisionNumber+" state "+s+" LA(1)="+(char)input.LA(1)+"("+input.LA(1)+ - "), index="+input.index()); - if ( accept[s] >= 1 ) { - // TODO: have to keep going and then backtrack if we fail!!!! - if ( debug ) System.err.println("accept; predict "+accept[s]+" from state "+s); - prevAcceptMarker = input.mark(); - prevAcceptState = s; - // keep going - } - // look for a normal char transition - char c = (char)input.LA(1); // -1 == \uFFFF, all types fit in 64k space - if ( c<=max[s] ) { - int snext = transition[s][c]; // move to next state - if ( snext < 0 ) { - // was in range but not valid transition - // TODO: check if eof[s]>=0, indicating that EOF goes to another - // state. - // TODO: refactor this common fail code - if ( prevAcceptMarker<0 ) noViableAlt(s,input); - input.rewind(prevAcceptMarker); - s = prevAcceptState; - if ( action_index[s]>=0 ) action(action_index[s]); - System.err.println("accept state "+s+" with ttype "+accept[s]+" at index "+input.index()); - return accept[s]; - } - s = snext; - input.consume(); - continue; - } - if ( set_edges[s]!=null ) { - // TODO: unicode - } - if ( pred_edges[s]!=null ) { - // TODO: gated or disambiguating sem - } - if ( c==(char)Token.EOF && eof[s]>=0 ) { // EOF Transition to accept state? - if ( debug ) System.err.println("accept via EOF; predict "+accept[eof[s]]+" from "+eof[s]); - // TODO: have to keep going and then backtrack if we fail?? - return accept[eof[s]]; - } - // not in range and not EOF/EOT, must be invalid symbol - if ( debug ) { - System.err.println("max["+s+"]="+max[s]); - System.err.println("eof["+s+"]="+eof[s]); - if ( transition[s]!=null ) { - System.err.print("transitions="); - for (int p=0; p=0 ) action(action_index[s]); - System.err.println("accept state "+s+" with ttype "+accept[s]+" at index "+input.index()); - return accept[s]; - } - } - finally { -// input.rewind(mark); - } - } - - // subclass needs to override these if there are sempreds or actions in lexer rules - - public boolean sempred(int predIndex) { - return true; - } - - public void action(int actionIndex) { - } - - public void noViableAlt(int s, IntStream input) throws NoViableAltException { - NoViableAltException nvae = new NoViableAltException(); -// new NoViableAltException(getDescription(), -// decisionNumber, -// s, -// input); - error(nvae); - throw nvae; - } - - /** A hook for debugging interface */ - public void error(NoViableAltException nvae) { ; } - - public int specialStateTransition(int s, IntStream input) - throws NoViableAltException - { - return -1; - } - - public String getDescription() { - return "n/a"; - } - - /** Given a String that has a run-length-encoding of some unsigned shorts - * like "\1\2\3\9", convert to short[] {2,9,9,9}. We do this to avoid - * static short[] which generates so much init code that the class won't - * compile. :( - */ - public static short[] unpackEncodedString(String encodedString) { - // walk first to find how big it is. - int size = 0; - for (int i=0; i> LOG_BITS) + 1]; - } - - /** Construction from a static array of longs */ - public LABitSet(long[] bits_) { - if ( bits_==null || bits_.length==0 ) bits = new long[1]; - else bits = bits_; - } - - /** Construction from a static array of longs */ - public LABitSet(long[] bits_, boolean EOF) { - this(bits_); - this.EOF = EOF; - } - - public static LABitSet of(int el) { - LABitSet s = new LABitSet(el + 1); - s.add(el); - return s; - } - - /** or this element into this set (grow as necessary to accommodate) */ - public void add(int el) { - //System.out.println("add("+el+")"); - if ( el==Token.EOF ) { EOF = true; return; } - int n = wordNumber(el); - //System.out.println("word number is "+n); - //System.out.println("bits.length "+bits.length); - if (n >= bits.length) { - growToInclude(el); - } - bits[n] |= bitMask(el); - } - - public boolean member(int el) { - if ( el == Token.EOF ) return EOF; - int n = wordNumber(el); - if (n >= bits.length) return false; - return (bits[n] & bitMask(el)) != 0; - } - - /** return this | a in a new set */ - public LABitSet or(LABitSet a) { - if ( a==null ) { - return this; - } - LABitSet s = (LABitSet)this.clone(); - s.orInPlace((LABitSet)a); - return s; - } - - public void orInPlace(LABitSet a) { - if ( a==null ) { - return; - } - // If this is smaller than a, grow this first - if (a.bits.length > bits.length) { - setSize(a.bits.length); - } - int min = Math.min(bits.length, a.bits.length); - for (int i = min - 1; i >= 0; i--) { - bits[i] |= a.bits[i]; - } - EOF = EOF | a.EOF; - } - - // remove this element from this set - public void remove(int el) { - if ( el==Token.EOF ) { EOF = false; return; } - int n = wordNumber(el); - if (n >= bits.length) { - throw new IllegalArgumentException(el+" is outside set range of "+bits.length+" words"); - } - bits[n] &= ~bitMask(el); - } - - public Object clone() { - LABitSet s; - try { - s = (LABitSet)super.clone(); - s.bits = new long[bits.length]; - System.arraycopy(bits, 0, s.bits, 0, bits.length); - s.EOF = EOF; - return s; - } - catch (CloneNotSupportedException e) { - e.printStackTrace(System.err); - } - return null; - } - - /** - * Sets the size of a set. - * @param nwords how many words the new set should be - */ - void setSize(int nwords) { - long newbits[] = new long[nwords]; - int n = Math.min(nwords, bits.length); - System.arraycopy(bits, 0, newbits, 0, n); - bits = newbits; - } - - /** Get the first element you find and return it. */ - public int getSingleElement() { - for (int i = 0; i < (bits.length << LOG_BITS); i++) { - if (member(i)) { - return i; - } - } - return Token.INVALID_TOKEN_TYPE; - } - - /** Transform a bit set into a string by formatting each element as an integer - * separator The string to put in between elements - * @return A commma-separated list of values - */ - public String toString() { - StringBuffer buf = new StringBuffer(); - String separator = ","; - boolean havePrintedAnElement = false; - buf.append('{'); - if ( EOF ) { buf.append("EOF"); havePrintedAnElement=true; } - - for (int i = 0; i < (bits.length << LOG_BITS); i++) { - if (member(i)) { - if ( havePrintedAnElement ) { - buf.append(separator); - } - buf.append(i); - havePrintedAnElement = true; - } - } - buf.append('}'); - return buf.toString(); - } - -// /**Create a string representation where instead of integer elements, the -// * ith element of vocabulary is displayed instead. Vocabulary is a Vector -// * of Strings. -// * separator The string to put in between elements -// * @return A commma-separated list of character constants. -// */ -// public String toString(String separator, List vocabulary) { -// String str = ""; -// for (int i = 0; i < (bits.length << LOG_BITS); i++) { -// if (member(i)) { -// if (str.length() > 0) { -// str += separator; -// } -// if (i >= vocabulary.size()) { -// str += "'" + (char)i + "'"; -// } -// else if (vocabulary.get(i) == null) { -// str += "'" + (char)i + "'"; -// } -// else { -// str += (String)vocabulary.get(i); -// } -// } -// } -// return str; -// } - - /** - * Grows the set to a larger number of bits. - * @param bit element that must fit in set - */ - public void growToInclude(int bit) { - int newSize = Math.max(bits.length << 1, numWordsToHold(bit)); - long newbits[] = new long[newSize]; - System.arraycopy(bits, 0, newbits, 0, bits.length); - bits = newbits; - } - - static long bitMask(int bitNumber) { - int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS - return 1L << bitPosition; - } - - static int numWordsToHold(int el) { - return (el >> LOG_BITS) + 1; - } - - static int wordNumber(int bit) { - return bit >> LOG_BITS; // bit / BITS - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/misc/QStack.java b/runtime/Java/src/org/antlr/v4/runtime/misc/QStack.java deleted file mode 100644 index 21ce9ec4f..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/misc/QStack.java +++ /dev/null @@ -1,46 +0,0 @@ -package org.antlr.v4.runtime.misc; - -import java.util.EmptyStackException; - -/** A quicker stack than Stack */ -public class QStack { - Object[] elements; - public int sp = -1; - - public QStack() { - elements = new Object[10]; - } - - public QStack(QStack s) { - elements = new Object[s.elements.length]; - System.arraycopy(s.elements, 0, elements, 0, s.elements.length); - this.sp = s.sp; - } - - public void push(T fset) { - if ( (sp+1)>=elements.length ) { - Object[] f = new Object[elements.length*2]; - System.arraycopy(elements, 0, f, 0, elements.length); - elements = f; - } - elements[++sp] = fset; - } - - public T peek() { - if ( sp<0 ) throw new EmptyStackException(); - return (T)elements[sp]; - } - - public T get(int i) { - if ( i<0 ) throw new IllegalArgumentException("i<0"); - if ( i>sp ) throw new IllegalArgumentException("i>"+sp); - return (T)elements[sp]; - } - - public T pop() { - if ( sp<0 ) throw new EmptyStackException(); - return (T)elements[sp--]; - } - - public void clear() { sp = -1; } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/pda/Bytecode.java b/runtime/Java/src/org/antlr/v4/runtime/pda/Bytecode.java deleted file mode 100644 index e4e74a5a4..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/pda/Bytecode.java +++ /dev/null @@ -1,222 +0,0 @@ -package org.antlr.v4.runtime.pda; - -import org.antlr.runtime.Token; - -import java.util.ArrayList; -import java.util.List; - -/** */ -public class Bytecode { - public static final int MAX_OPNDS = 3; // Or single opnd indicating variable number - public static final int ADDR_SIZE = 2; - public enum OperandType { - NONE(0), BYTE(1), CHAR(2), ADDR(ADDR_SIZE), SHORT(2), INT(4), VARARGS(0); - public int sizeInBytes; - OperandType(int sizeInBytes) { this.sizeInBytes = sizeInBytes; } - } - - public static class Instruction { - String name; // E.g., "load_str", "new" - OperandType[] type = new OperandType[MAX_OPNDS]; - int n = 0; - public Instruction(String name) { - this(name,OperandType.NONE,OperandType.NONE,OperandType.NONE); n=0; - } - public Instruction(String name, OperandType a) { - this(name,a,OperandType.NONE,OperandType.NONE); n=1; - } - public Instruction(String name, OperandType a, OperandType b) { - this(name,a,b,OperandType.NONE); n=2; - } - public Instruction(String name, OperandType a, OperandType b, OperandType c) { - this.name = name; - type[0] = a; - type[1] = b; - type[2] = c; - n = MAX_OPNDS; - } - } - - // don't use enum for efficiency; don't want code block to - // be an array of objects (Bytecode[]). We want it to be byte[]. - - // INSTRUCTION BYTECODES (byte is signed; use a short to keep 0..255) - public static final short ACCEPT = 1; - public static final short JMP = 2; - public static final short SPLIT = 3; - public static final short MATCH8 = 4; - public static final short MATCH16 = 5; - public static final short RANGE8 = 6; - public static final short RANGE16 = 7; - public static final short WILDCARD = 8; - public static final short SET = 9; - public static final short CALL = 10; // JMP with a push - public static final short RET = 11; // an accept instr for fragment rules - public static final short LABEL = 12; - public static final short SAVE = 13; - public static final short SEMPRED = 14; - public static final short ACTION = 15; - public static final short NOT = 16; // not next match instr - public static final short SWITCH = 17; - - /** Used for disassembly; describes instruction set */ - public static Instruction[] instructions = new Instruction[] { - null, // - new Instruction("accept", OperandType.SHORT), // index is the opcode - new Instruction("jmp", OperandType.ADDR), - new Instruction("split", OperandType.VARARGS), - new Instruction("match8", OperandType.BYTE), - new Instruction("match16", OperandType.CHAR), - new Instruction("range8", OperandType.BYTE, OperandType.BYTE), - new Instruction("range16", OperandType.CHAR, OperandType.CHAR), - new Instruction("wildcard"), - new Instruction("set", OperandType.SHORT), - new Instruction("call", OperandType.ADDR), - new Instruction("ret"), - new Instruction("label", OperandType.SHORT), - new Instruction("save", OperandType.SHORT), - new Instruction("sempred", OperandType.SHORT, OperandType.SHORT), // sempred ruleIndex, predIndex - new Instruction("action", OperandType.SHORT, OperandType.SHORT), // action ruleIndex, actionIndex - new Instruction("not"), - new Instruction("switch", OperandType.SHORT), - }; - - public static String disassemble(byte[] code, int start, boolean operandsAreChars) { - StringBuilder buf = new StringBuilder(); - int i=start; - while (i=code.length ) { - throw new IllegalArgumentException("ip out of range: "+ip); - } - Bytecode.Instruction I = - Bytecode.instructions[opcode]; - if ( I==null ) { - throw new IllegalArgumentException("no such instruction "+opcode+ - " at address "+ip); - } - String instrName = I.name; - buf.append( String.format("%04d:\t%-14s", ip, instrName) ); - ip++; - if ( I.n==0 ) { - buf.append(" "); - return ip; - } - List operands = new ArrayList(); - if ( I.n==1 && I.type[0]==OperandType.VARARGS) { // get n (opnd) operands - int n = getShort(code, ip); - ip += 2; - // operands.add(String.valueOf(n)); don't show n in varargs - for (int j=1; j<=n; j++) { - operands.add(String.valueOf(getShort(code, ip))); - ip += ADDR_SIZE; // VARARGS only works on address for now - } - } - else { - for (int i=0; i0 ) buf.append(", "); - buf.append( s ); - } - return ip; - } - - public static int getInt(byte[] memory, int index) { - int b1 = memory[index++]&0xFF; // high byte - int b2 = memory[index++]&0xFF; - int b3 = memory[index++]&0xFF; - int b4 = memory[index++]&0xFF; // low byte - return b1<<(8*3) | b2<<(8*2) | b3<<(8*1) | b4; - } - - public static int getShort(byte[] memory, int index) { - int b1 = memory[index++]&0xFF; // mask off sign-extended bits - int b2 = memory[index++]&0xFF; - return b1<<(8*1) | b2; - } - - public static String LiteralCharValueEscape[] = new String[255]; - - static { - LiteralCharValueEscape['\n'] = "\\n"; - LiteralCharValueEscape['\r'] = "\\r"; - LiteralCharValueEscape['\t'] = "\\t"; - LiteralCharValueEscape['\b'] = "\\b"; - LiteralCharValueEscape['\f'] = "\\f"; - LiteralCharValueEscape['\\'] = "\\\\"; - LiteralCharValueEscape['\''] = "\\'"; - } - - /** Return a string representing the escaped char for code c. E.g., If c - * has value 0x100, you will get "\u0100". ASCII gets the usual - * char (non-hex) representation. Control characters are spit out - * as unicode. - */ - public static String quotedCharLiteral(int c) { - if ( c== Token.EOF ) return "''"; - if ( c= 0 ) { - this.cachedHashCode = returnAddr; - } - if ( parent!=null ) { - this.cachedHashCode += parent.cachedHashCode; - } - } - - public int hashCode() { return cachedHashCode; } - - /** Two contexts are equals() if both have - * same call stack; walk upwards to the root. - * Recall that the root sentinel node has no parent. - * Note that you may be comparing contextsv in different alt trees. - */ - public boolean equals(Object o) { - NFAStack other = ((NFAStack)o); - if ( this.cachedHashCode != other.cachedHashCode ) { - return false; // can't be same if hash is different - } - if ( this==other ) return true; - - // System.out.println("comparing "+this+" with "+other); - NFAStack sp = this; - while ( sp.parent!=null && other.parent!=null ) { - if ( sp.returnAddr != other.returnAddr) return false; - sp = sp.parent; - other = other.parent; - } - if ( !(sp.parent==null && other.parent==null) ) { - return false; // both pointers must be at their roots after walk - } - return true; - } - - public String toString() { - StringBuffer buf = new StringBuffer(); - NFAStack sp = this; - buf.append("["); - while ( sp.parent!=null ) { - buf.append(sp.returnAddr); - buf.append(" "); - sp = sp.parent; - } - buf.append("$]"); - return buf.toString(); - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/pda/PDA.java b/runtime/Java/src/org/antlr/v4/runtime/pda/PDA.java deleted file mode 100644 index a5ce058d8..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/pda/PDA.java +++ /dev/null @@ -1,654 +0,0 @@ -package org.antlr.v4.runtime.pda; - -import org.antlr.runtime.*; -import org.antlr.v4.runtime.CommonToken; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** A (nondeterministic) pushdown bytecode machine for lexing and LL prediction. - * Derived partially from Cox' description of Thompson's 1960s work: - * http://swtch.com/~rsc/regexp/regexp2.html - * - * Primary difference is that I've extended to have actions, semantic predicates - * and a stack for rule invocation. - */ -public class PDA { - public static class InvalidElement extends RuntimeException {} - public static final InvalidElement INVALID_ELEMENT = new InvalidElement(); - - public interface action_fptr { void exec(int action); } - public interface sempred_fptr { boolean eval(int predIndex); } - - public byte[] code; - //public Map ruleToAddr; - public int[] altToAddr; // either token type (in lexer) or alt num for DFA in parser - public CommonToken[] labelValues; - public int nLabels; - - public int[][] charToAddr; - - /** If we hit an action, we'll have to rewind and do the winning rule again */ - boolean bypassedAction; - - boolean notNextMatch; - - List s0_closure; - List[] closure_cache; - - public PDA(byte[] code, int[] altToAddr, int nLabels) { - //System.out.println("code="+Arrays.toString(code)); - this.code = code; - this.altToAddr = altToAddr; - this.nLabels = nLabels; - labelValues = new CommonToken[nLabels]; - closure_cache = new ArrayList[255+1]; - } - - public int execThompson(IntStream input) { - int m = input.mark(); - Arrays.fill(labelValues, null); - int ttype = execThompson(input, 0, false); -// System.out.println("first attempt ttype="+ttype); - if ( bypassedAction ) { - input.rewind(m); - //System.out.println("Bypassed action; rewinding to "+input.index()+" doing with feeling"); - bypassedAction = false; - Arrays.fill(labelValues, null); - int ttype2 = execThompson(input, altToAddr[ttype], true); - if ( ttype!=ttype2 ) { - System.err.println("eh? token diff with action(s)"); - } - //else System.out.println("types are same"); - } - else input.release(m); - return ttype; - } - - public int execThompson(IntStream input, int ip, boolean doActions) { - int c = input.LA(1); - if ( c==Token.EOF ) return Token.EOF; - -// List closure = null; -// int[] x = charToAddr[c]; -// //System.out.println("list for "+Bytecode.quotedCharLiteral(c)+" is "+Arrays.toString(x)); -// if ( closure_cache[c] != null ) { -// closure = new ArrayList(); -// closure.addAll(closure_cache[c]); -// } -// else { -// if ( x!=null ) { -// closure = new ArrayList(); -// int i = 1; -// for (int v : x) { -// //ThreadState t = new ThreadState(v, i, NFAStack.EMPTY); -// addToClosure(closure, v, i, NFAStack.EMPTY); -// //closure.add(t); -// i++; -// } -// closure_cache[c] = new ArrayList(); -// closure_cache[c].addAll(closure); -// //System.out.println("caching "+closure); -// } -// else { -// System.err.println("invalid char: "+Bytecode.quotedCharLiteral(c)); -// } -// } - - List closure = null; - if ( s0_closure == null ) { - s0_closure = computeStartState(ip); - } - closure = new ArrayList(); - closure.addAll(s0_closure); - - List reach = new ArrayList(); - ThreadState prevAccept = new ThreadState(Integer.MAX_VALUE, -1, NFAStack.EMPTY); - ThreadState firstAccept = null; - - int firstCharIndex = input.index(); // use when creating Token - - do { // while more work - c = input.LA(1); - int i = 0; - boolean accepted = false; -// System.out.println("input["+input.index()+"]=="+Bytecode.quotedCharLiteral(c)+ -// " closure="+closure); -processOneChar: - while ( i= code[ip] && c <= code[ip + 1]; - if ( (!notNextMatch && matched) || (notNextMatch && matched && c != Token.EOF) ) { - addToClosure(reach, ip+2, alt, context); - } - notNextMatch = false; - break; - case Bytecode.RANGE16 : - matched = c >= getShort(code, ip) && c <= getShort(code, ip + 2); - if ( (!notNextMatch && matched) || (notNextMatch && matched && c != Token.EOF) ) { - addToClosure(reach, ip+4, alt, context); - } - notNextMatch = false; - break; - case Bytecode.WILDCARD : - if ( c!=Token.EOF ) { - addToClosure(reach, ip, alt, context); - } - break; - case Bytecode.LABEL : // lexers only - int labelIndex = getShort(code, ip); - labelValues[labelIndex] = - new CommonToken(((CharStream)input), 0, 0, input.index(), -1); - break; - case Bytecode.SAVE : - labelIndex = getShort(code, ip); - labelValues[labelIndex].setStopIndex(input.index()-1); - break; - case Bytecode.ACTION : - bypassedAction = true; - if ( doActions ) { - int ruleIndex = getShort(code, ip); - int actionIndex = getShort(code, ip+2); - action(ruleIndex, actionIndex); - } - break; - case Bytecode.ACCEPT : - if ( context != NFAStack.EMPTY ) break; // only do accept for outermost rule - accepted = true; - int tokenLastCharIndex = input.index() - 1; - int ttype = getShort(code, ip); - ANTLRStringStream is = (ANTLRStringStream)input; -// System.out.println("ACCEPT "+is.substring(firstCharIndex,tokenLastCharIndex)+" as type "+ttype); - if ( tokenLastCharIndex > prevAccept.inputIndex ) { - prevAccept.inputIndex = tokenLastCharIndex; - // choose longest match so far regardless of rule priority -// System.out.println("replacing old best match @ "+prevAccept.addr); - prevAccept.addr = ip-1; - prevAccept.inputMarker = input.mark(); - if ( firstAccept==null ) firstAccept = prevAccept; - } - else if ( tokenLastCharIndex == prevAccept.inputIndex ) { - // choose first rule matched if match is of same length - if ( ip-1 < prevAccept.addr ) { // it will see both accepts for ambig rules -// System.out.println("replacing old best match @ "+prevAccept.addr); - prevAccept.addr = ip-1; - prevAccept.inputMarker = input.mark(); - } - } - // if we reach accept state, toss out any addresses in rest - // of work list associated with accept's rule; that rule is done - int j=i+1; - while ( j0 ) { // if we reached other states, consume and process them - input.consume(); - } - else if ( !accepted && c!=Token.EOF ) { - throw INVALID_ELEMENT; - } - // else reach.size==0 && matched, don't consume: accepted - - // swap to avoid reallocating space - List tmp = reach; - reach = closure; - closure = tmp; - reach.clear(); - } while ( closure.size()>0 ); - - if ( prevAccept.addr >= code.length ) return Token.INVALID_TOKEN_TYPE; - int ttype = getShort(code, prevAccept.addr+1); - input.rewind(prevAccept.inputMarker); // does nothing if we accept'd at input.index() but might need to rewind - if ( firstAccept.inputMarker < prevAccept.inputMarker ) { - System.out.println("done at index "+input.index()); - System.out.println("accept marker="+prevAccept.inputMarker); - input.release(firstAccept.inputMarker); // kill any other markers in stream we made - System.out.println("leaving with index "+input.index()); - } - return ttype; - } - - void addToClosure(List closure, int ip, int alt, NFAStack context) { - ThreadState t = new ThreadState(ip, alt, context); - //System.out.println("add to closure "+ip+" "+closure); - if ( closure.contains(t) ) return; - short opcode = code[ip]; - ip++; // move to next instruction or first byte of operand - switch (opcode) { - case Bytecode.NOT : // see thru NOT but include in closure so we exec during reach - closure.add(t); // add to closure; need to execute during reach - // add NOT and next instruction since reach only looks at - // what's in closure (it doesn't jump to ip after NOT) - addToClosure(closure, ip, alt, context); - break; - case Bytecode.JMP : - addToClosure(closure, getShort(code, ip), alt, context); - break; - case Bytecode.ACTION : - ip += 2; // has 2 more bytes than LABEL/SAVE - case Bytecode.LABEL : - case Bytecode.SAVE : - // see through them for closure ops - closure.add(t); // add to closure; need to execute during reach - ip += 2; - addToClosure(closure, ip, alt, context); // do closure past SAVE - break; - case Bytecode.SPLIT : - int nopnds = getShort(code, ip); - ip += 2; - // add split addresses to work queue in reverse order ('cept first one) - for (int i=0; i computeStartState(int ip) { - // if we're starting at a SPLIT, add closure of all SPLIT targets - // else just add closure of ip - List closure = new ArrayList(); - if ( code[ip]!=Bytecode.SPLIT ) { - addToClosure(closure, ip, 1, NFAStack.EMPTY); - return closure; - } - ip++; - int nalts = getShort(code, ip); - ip += 2; - // add split addresses to work queue in reverse order ('cept first one) - for (int i=1; i<=nalts; i++) { - addToClosure(closure, getShort(code, ip), i, NFAStack.EMPTY); - ip += Bytecode.ADDR_SIZE; - } - return closure; - } - - // --------------------------------------------------------------------- - - // this stuff below can't do SAVE nor CALL/RET but faster. (nor preds) - -/* - public int execThompson_no_stack(CharStream input, int ip) { - int c = input.LA(1); - if ( c==Token.EOF ) return Token.EOF; - - List closure = new ArrayList(); - List reach = new ArrayList(); - int prevAcceptAddr = Integer.MAX_VALUE; - int prevAcceptLastCharIndex = -1; - int prevAcceptInputMarker = -1; - int firstAcceptInputMarker = -1; - addToClosure_no_stack(closure, ip); - do { // while more work - c = input.LA(1); - int i = 0; -processOneChar: - while ( i prevAcceptLastCharIndex ) { - prevAcceptLastCharIndex = tokenLastCharIndex; - // choose longest match so far regardless of rule priority - System.out.println("replacing old best match @ "+prevAcceptAddr); - prevAcceptAddr = ip-1; - prevAcceptInputMarker = input.mark(); - firstAcceptInputMarker = prevAcceptInputMarker; - } - else if ( tokenLastCharIndex == prevAcceptLastCharIndex ) { - // choose first rule matched if match is of same length - if ( ip-1 < prevAcceptAddr ) { // it will see both accepts for ambig rules - System.out.println("replacing old best match @ "+prevAcceptAddr); - prevAcceptAddr = ip-1; - prevAcceptInputMarker = input.mark(); - } - } - // if we reach accept state, toss out any addresses in rest - // of work list associated with accept's rule; that rule is done - int ruleStart = altToAddr[ttype]; - int ruleStop = code.length; - if ( ttype+1 < altToAddr.length ) { - ruleStop = altToAddr[ttype+1]-1; - } - System.out.println("kill range "+ruleStart+".."+ruleStop); - int j=i+1; - while ( j=ruleStart || cl<=ruleStop ) closure.remove(j); - else j++; - } - // then, move to next char, looking for longer match - // (we continue processing if there are states in reach) - break; - //break processOneChar; - case Bytecode.JMP : // ignore - case Bytecode.SPLIT : - break; - default : - throw new RuntimeException("invalid instruction @ "+ip+": "+opcode); - } - i++; - } - if ( reach.size()>0 ) { // if we reached other states, consume and process them - input.consume(); - } - // swap to avoid reallocating space - List tmp = reach; - reach = closure; - closure = tmp; - reach.clear(); - } while ( closure.size()>0 ); - - if ( prevAcceptAddr >= code.length ) return Token.INVALID_TOKEN_TYPE; - int ttype = getShort(code, prevAcceptAddr+1); - System.out.println("done at index "+input.index()); - System.out.println("accept marker="+prevAcceptInputMarker); - input.rewind(prevAcceptInputMarker); // does nothing if we accept'd at input.index() but might need to rewind - input.release(firstAcceptInputMarker); // kill any other markers in stream we made - System.out.println("leaving with index "+input.index()); - return ttype; - } - - void addToClosure_no_stack(List closure, int ip) { - //System.out.println("add to closure "+ip+" "+closure); - if ( closure.contains(ip) ) return; // TODO: VERY INEFFICIENT! use int[num-states] as set test - closure.add(ip); - short opcode = code[ip]; - ip++; // move to next instruction or first byte of operand - switch (opcode) { - case Bytecode.JMP : - addToClosure_no_stack(closure, getShort(code, ip)); - break; - case Bytecode.SAVE : - int labelIndex = getShort(code, ip); - ip += 2; - addToClosure_no_stack(closure, ip); // do closure pass SAVE - // TODO: impl - break; - case Bytecode.SPLIT : - int nopnds = getShort(code, ip); - ip += 2; - // add split addresses to work queue in reverse order ('cept first one) - for (int i=0; i reach) { - int ip = t.addr; - String instr = Bytecode.disassembleInstruction(code, ip, true); - System.out.println(instr+"\t\t reach="+reach); - } - - void traceDFA(int ip) { - String instr = Bytecode.disassembleInstruction(code, ip, false); - System.out.println(instr); - } - - public static int getShort(byte[] memory, int index) { - return (memory[index]&0xFF) <<(8*1) | (memory[index+1]&0xFF); // prevent sign extension with mask - } - - public static class Context { - public int ip; - public int inputMarker; - public Context(int ip, int inputMarker) { - this.ip = ip; - this.inputMarker = inputMarker; - } - } - - public int execNoRecursion(TokenStream input, int ip) { - System.out.println("execNoRecursion @"+ip); - List work = new ArrayList(); - work.add(new Context(ip, input.mark())); -workLoop: - while ( work.size()>0 ) { - Context ctx = work.remove(work.size()-1); // treat like stack - ip = ctx.ip; - input.rewind(ctx.inputMarker); - while ( ip < code.length ) { - int c = input.LA(1); - traceDFA(ip); - short opcode = code[ip]; - ip++; // move to next instruction or first byte of operand - switch (opcode) { - case Bytecode.MATCH8 : - if ( c != code[ip] ) continue workLoop; - ip++; - input.consume(); - break; - case Bytecode.MATCH16 : - if ( c != getShort(code, ip) ) continue workLoop; - ip += 2; - input.consume(); - break; - case Bytecode.RANGE8 : - if ( ccode[ip+1] ) continue workLoop; - ip += 2; - input.consume(); - break; - case Bytecode.RANGE16 : - if ( cgetShort(code, ip+2) ) continue workLoop; - ip += 4; - input.consume(); - break; - case Bytecode.ACCEPT : - int altIndex = getShort(code, ip); - ip += 2; - System.out.println("accept "+altIndex); - // returning gives first match not longest; i.e., like PEG - return altIndex; - case Bytecode.JMP : - int target = getShort(code, ip); - ip = target; - continue; - case Bytecode.SPLIT : - int nopnds = getShort(code, ip); - ip += 2; - // add split addresses to work queue in reverse order ('cept first one) - for (int i=nopnds-1; i>=1; i--) { - int addr = getShort(code, ip+i*2); - //System.out.println("try alt "+i+" at "+addr); - work.add(new Context(addr, input.mark())); - } - // try first alternative (w/o adding to work list) - int addr = getShort(code, ip); - ip = addr; - //System.out.println("try alt "+nopnds+" at "+addr); - continue; - default : - throw new RuntimeException("invalid instruction @ "+ip+": "+opcode); - } - } - } - return 0; - } - -/* - public int exec(CharStream input, String ruleName) { - return exec(input, ruleToAddr.get(ruleName)); - } - - public int exec(CharStream input) { return exec(input, 0); } - - public int exec(CharStream input, int ip) { - while ( ip < code.length ) { - int c = input.LA(1); - trace(ip); - short opcode = code[ip]; - ip++; // move to next instruction or first byte of operand - switch (opcode) { - case Bytecode.MATCH8 : - if ( c != code[ip] ) return 0; - ip++; - input.consume(); - break; - case Bytecode.MATCH16 : - if ( c != getShort(code, ip) ) return 0; - ip += 2; - input.consume(); - break; - case Bytecode.RANGE8 : - if ( ccode[ip+1] ) return 0; - ip += 2; - input.consume(); - break; - case Bytecode.RANGE16 : - if ( cgetShort(code, ip+2) ) return 0; - ip += 4; - input.consume(); - break; - case Bytecode.ACCEPT : - int ruleIndex = getShort(code, ip); - ip += 2; - System.out.println("accept "+ruleIndex); - return ruleIndex; - case Bytecode.JMP : - int target = getShort(code, ip); - ip = target; - continue; - case Bytecode.SPLIT : - int nopnds = getShort(code, ip); - ip += 2; - for (int i=1; i<=nopnds-1; i++) { - int addr = getShort(code, ip); - ip += 2; - //System.out.println("try alt "+i+" at "+addr); - int m = input.mark(); - int r = exec(input, addr); - if ( r>0 ) { input.release(m); return r; } - input.rewind(m); - } - // try final alternative (w/o recursion) - int addr = getShort(code, ip); - ip = addr; - //System.out.println("try alt "+nopnds+" at "+addr); - continue; - default : - throw new RuntimeException("invalid instruction @ "+ip+": "+opcode); - } - } - return 0; - } - -*/ - -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/pda/ThreadState.java b/runtime/Java/src/org/antlr/v4/runtime/pda/ThreadState.java deleted file mode 100644 index 0e2e3f34a..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/pda/ThreadState.java +++ /dev/null @@ -1,41 +0,0 @@ -package org.antlr.v4.runtime.pda; - -/** NFA simulation thread state */ -public class ThreadState { - public int addr; - public int alt; // or speculatively matched token type for lexers - public NFAStack context; - public int inputIndex = -1; // char (or token?) index from 0 - public int inputMarker = -1; // accept states track input markers in case we need to rewind - - public ThreadState(int addr, int alt, NFAStack context) { - this.addr = addr; - this.alt = alt; - this.context = context; - } - - public ThreadState(ThreadState t) { - this.addr = t.addr; - this.alt = t.alt; - this.context = t.context; - this.inputIndex = t.inputIndex; - } - - public boolean equals(Object o) { - if ( o==null ) return false; - if ( this==o ) return true; - ThreadState other = (ThreadState)o; - return this.addr==other.addr && - this.alt==other.alt && - this.context.equals(other.context); - } - - public int hashCode() { return addr + context.hashCode(); } - - public String toString() { - if ( context.parent==null ) { - return "("+addr+","+alt+")"; - } - return "("+addr+","+alt+","+context+")"; - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/CommonTree.java b/runtime/Java/src/org/antlr/v4/runtime/tree/CommonTree.java deleted file mode 100644 index 164a66343..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/CommonTree.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime.tree; - -import org.antlr.runtime.BitSet; -import org.antlr.runtime.Token; -import org.antlr.runtime.tree.BaseTree; -import org.antlr.runtime.tree.Tree; -import org.antlr.v4.runtime.tree.gui.ASTViewer; - -/** A tree node that is wrapper for a Token object. After 3.0 release - * while building tree rewrite stuff, it became clear that computing - * parent and child index is very difficult and cumbersome. Better to - * spend the space in every tree node. If you don't want these extra - * fields, it's easy to cut them out in your own BaseTree subclass. - */ -public class CommonTree extends BaseTree { - /** A single token is the payload */ - public Token token; - - /** What token indexes bracket all tokens associated with this node - * and below? - */ - protected int startIndex=-1, stopIndex=-1; - - /** Who is the parent node of this node; if null, implies node is root */ - public CommonTree parent; - - /** What index is this node in the child list? Range: 0..n-1 */ - public int childIndex = -1; - - public CommonTree() { } - - public CommonTree(CommonTree node) { - super(node); - this.token = node.token; - this.startIndex = node.startIndex; - this.stopIndex = node.stopIndex; - } - - public CommonTree(Token t) { - this.token = t; - } - - public Token getToken() { - return token; - } - - public Tree dupNode() { - return new CommonTree(this); - } - - public boolean isNil() { - return token==null; - } - - public int getType() { - if ( token==null ) { - return Token.INVALID_TOKEN_TYPE; - } - return token.getType(); - } - - public String getText() { - if ( token==null ) { - return null; - } - return token.getText(); - } - - public int getLine() { - if ( token==null || token.getLine()==0 ) { - if ( getChildCount()>0 ) { - return getChild(0).getLine(); - } - return 0; - } - return token.getLine(); - } - - public int getCharPositionInLine() { - if ( token==null || token.getCharPositionInLine()==-1 ) { - if ( getChildCount()>0 ) { - return getChild(0).getCharPositionInLine(); - } - return 0; - } - return token.getCharPositionInLine(); - } - - public int getTokenStartIndex() { - if ( startIndex==-1 && token!=null ) { - return token.getTokenIndex(); - } - return startIndex; - } - - public void setTokenStartIndex(int index) { - startIndex = index; - } - - public int getTokenStopIndex() { - if ( stopIndex==-1 && token!=null ) { - return token.getTokenIndex(); - } - return stopIndex; - } - - public void setTokenStopIndex(int index) { - stopIndex = index; - } - - /** For every node in this subtree, make sure it's start/stop token's - * are set. Walk depth first, visit bottom up. Only updates nodes - * with at least one token index < 0. - */ - public void setUnknownTokenBoundaries() { - if ( children==null ) { - if ( startIndex<0 || stopIndex<0 ) { - startIndex = stopIndex = token.getTokenIndex(); - } - return; - } - for (int i=0; i=0 && stopIndex>=0 ) return; // already set - if ( children.size() > 0 ) { - CommonTree firstChild = (CommonTree)children.get(0); - CommonTree lastChild = (CommonTree)children.get(children.size()-1); - startIndex = firstChild.getTokenStartIndex(); - stopIndex = lastChild.getTokenStopIndex(); - } - } - - public int getChildIndex() { - return childIndex; - } - - public Tree getParent() { - return parent; - } - - public void setParent(Tree t) { - this.parent = (CommonTree)t; - } - - public void setChildIndex(int index) { - this.childIndex = index; - } - - // TODO: move to basetree when i settle on how runtime works - public void inspect() { - ASTViewer viewer = new ASTViewer(this); - viewer.open(); - } - - // TODO: move to basetree when i settle on how runtime works - // TODO: don't include this node!! - // TODO: reuse other method - public CommonTree getFirstDescendantWithType(int type) { - if ( getType()==type ) return this; - if ( children==null ) return null; - for (Object c : children) { - CommonTree t = (CommonTree)c; - if ( t.getType()==type ) return t; - CommonTree d = t.getFirstDescendantWithType(type); - if ( d!=null ) return d; - } - return null; - } - - // TODO: don't include this node!! - public CommonTree getFirstDescendantWithType(BitSet types) { - if ( types.member(getType()) ) return this; - if ( children==null ) return null; - for (Object c : children) { - CommonTree t = (CommonTree)c; - if ( types.member(t.getType()) ) return t; - CommonTree d = t.getFirstDescendantWithType(types); - if ( d!=null ) return d; - } - return null; - } - - public String toString() { - if ( isNil() ) { - return "nil"; - } - if ( getType()==Token.INVALID_TOKEN_TYPE ) { - return ""; - } - if ( token==null ) { - return null; - } - return token.getText(); - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/TreeFilter.java b/runtime/Java/src/org/antlr/v4/runtime/tree/TreeFilter.java deleted file mode 100644 index 2919f9544..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/TreeFilter.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime.tree; - -import org.antlr.runtime.RecognitionException; -import org.antlr.runtime.RecognizerSharedState; -import org.antlr.runtime.TokenStream; -import org.antlr.runtime.tree.*; - -/** - Cut-n-paste from material I'm not using in the book anymore (edit later - to make sense): - - Now, how are we going to test these tree patterns against every -subtree in our original tree? In what order should we visit nodes? -For this application, it turns out we need a simple ``apply once'' -rule application strategy and a ``down then up'' tree traversal -strategy. Let's look at rule application first. - -As we visit each node, we need to see if any of our patterns match. If -a pattern matches, we execute the associated tree rewrite and move on -to the next node. In other words, we only look for a single rule -application opportunity (we'll see below that we sometimes need to -repeatedly apply rules). The following method applies a rule in a @cl -TreeParser (derived from a tree grammar) to a tree: - -here is where weReferenced code/walking/patterns/TreePatternMatcher.java - -It uses reflection to lookup the appropriate rule within the generated -tree parser class (@cl Simplify in this case). Most of the time, the -rule will not match the tree. To avoid issuing syntax errors and -attempting error recovery, it bumps up the backtracking level. Upon -failure, the invoked rule immediately returns. If you don't plan on -using this technique in your own ANTLR-based application, don't sweat -the details. This method boils down to ``call a rule to match a tree, -executing any embedded actions and rewrite rules.'' - -At this point, we know how to define tree grammar rules and how to -apply them to a particular subtree. The final piece of the tree -pattern matcher is the actual tree traversal. We have to get the -correct node visitation order. In particular, we need to perform the -scalar-vector multiply transformation on the way down (preorder) and -we need to reduce multiply-by-zero subtrees on the way up (postorder). - -To implement a top-down visitor, we do a depth first walk of the tree, -executing an action in the preorder position. To get a bottom-up -visitor, we execute an action in the postorder position. ANTLR -provides a standard @cl TreeVisitor class with a depth first search @v -visit method. That method executes either a @m pre or @m post method -or both. In our case, we need to call @m applyOnce in both. On the way -down, we'll look for @r vmult patterns. On the way up, -we'll look for @r mult0 patterns. - */ -public class TreeFilter extends TreeParser { - public interface fptr { - public void rule() throws RecognitionException; - } - - protected TokenStream originalTokenStream; - protected TreeAdaptor originalAdaptor; - - public TreeFilter(TreeNodeStream input) { - this(input, new RecognizerSharedState()); - } - public TreeFilter(TreeNodeStream input, RecognizerSharedState state) { - super(input, state); - originalAdaptor = (TreeAdaptor) input.getTreeAdaptor(); - originalTokenStream = input.getTokenStream(); - } - - public void applyOnce(Object t, fptr whichRule) { - if ( t==null ) return; - try { - // share TreeParser object but not parsing-related state - state = new RecognizerSharedState(); - input = new CommonTreeNodeStream(originalAdaptor, t); - ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream); - setBacktrackingLevel(1); - whichRule.rule(); - setBacktrackingLevel(0); - } - catch (RecognitionException e) { ; } - } - - public void downup(Object t) { - TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor()); - TreeVisitorAction actions = new TreeVisitorAction() { - public Object pre(Object t) { applyOnce(t, topdown_fptr); return t; } - public Object post(Object t) { applyOnce(t, bottomup_fptr); return t; } - }; - v.visit(t, actions); - } - - fptr topdown_fptr = new fptr() { - public void rule() throws RecognitionException { - topdown(); - } - }; - - fptr bottomup_fptr = new fptr() { - public void rule() throws RecognitionException { - bottomup(); - } - }; - - // methods the downup strategy uses to do the up and down rules. - // to override, just define tree grammar rule topdown and turn on - // filter=true. - public void topdown() throws RecognitionException {;} - public void bottomup() throws RecognitionException {;} -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/TreeParser.java b/runtime/Java/src/org/antlr/v4/runtime/tree/TreeParser.java deleted file mode 100644 index 7003a98fd..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/TreeParser.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.runtime.tree; - -import org.antlr.runtime.*; -import org.antlr.runtime.tree.TreeAdaptor; -import org.antlr.runtime.tree.TreeNodeStream; - -import java.util.regex.Pattern; -import java.util.regex.Matcher; - -/** A parser for a stream of tree nodes. "tree grammars" result in a subclass - * of this. All the error reporting and recovery is shared with Parser via - * the BaseRecognizer superclass. -*/ -public class TreeParser extends BaseRecognizer { - public static final int DOWN = Token.DOWN; - public static final int UP = Token.UP; - - // precompiled regex used by inContext - static String dotdot = ".*[^.]\\.\\.[^.].*"; - static String doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*"; - static Pattern dotdotPattern = Pattern.compile(dotdot); - static Pattern doubleEtcPattern = Pattern.compile(doubleEtc); - - protected TreeNodeStream input; - - public TreeParser(TreeNodeStream input) { - super(); // highlight that we go to super to set state object - setTreeNodeStream(input); - } - - public TreeParser(TreeNodeStream input, RecognizerSharedState state) { - super(state); // share the state object with another parser - setTreeNodeStream(input); - } - - public void reset() { - super.reset(); // reset all recognizer state variables - if ( input!=null ) { - input.seek(0); // rewind the input - } - } - - /** Set the input stream */ - public void setTreeNodeStream(TreeNodeStream input) { - this.input = input; - } - - public TreeNodeStream getTreeNodeStream() { - return input; - } - - public String getSourceName() { - return input.getSourceName(); - } - - protected Object getCurrentInputSymbol(IntStream input) { - return ((TreeNodeStream)input).LT(1); - } - - protected Object getMissingSymbol(IntStream input, - RecognitionException e, - int expectedTokenType, - BitSet follow) - { - String tokenText = - ""; - TreeAdaptor adaptor = ((TreeNodeStream)e.input).getTreeAdaptor(); - return adaptor.create(new CommonToken(expectedTokenType, tokenText)); - } - - /** Match '.' in tree parser has special meaning. Skip node or - * entire tree if node has children. If children, scan until - * corresponding UP node. - */ - public void matchAny(IntStream ignore) { // ignore stream, copy of input - state.errorRecovery = false; - state.failed = false; - Object look = input.LT(1); - if ( input.getTreeAdaptor().getChildCount(look)==0 ) { - input.consume(); // not subtree, consume 1 node and return - return; - } - // current node is a subtree, skip to corresponding UP. - // must count nesting level to get right UP - int level=0; - int tokenType = input.getTreeAdaptor().getType(look); - while ( tokenType!=Token.EOF && !(tokenType==UP && level==0) ) { - input.consume(); - look = input.LT(1); - tokenType = input.getTreeAdaptor().getType(look); - if ( tokenType == DOWN ) { - level++; - } - else if ( tokenType == UP ) { - level--; - } - } - input.consume(); // consume UP - } - - /** We have DOWN/UP nodes in the stream that have no line info; override. - * plus we want to alter the exception type. Don't try to recover - * from tree parser errors inline... - */ - protected Object recoverFromMismatchedToken(IntStream input, - int ttype, - BitSet follow) - throws RecognitionException - { - throw new MismatchedTreeNodeException(ttype, (TreeNodeStream)input); - } - - /** Prefix error message with the grammar name because message is - * always intended for the programmer because the parser built - * the input tree not the user. - */ - public String getErrorHeader(RecognitionException e) { - return getGrammarFileName()+": node from "+ - (e.approximateLineInfo?"after ":"")+"line "+e.line+":"+e.charPositionInLine; - } - - /** Tree parsers parse nodes they usually have a token object as - * payload. Set the exception token and do the default behavior. - */ - public String getErrorMessage(RecognitionException e, String[] tokenNames) { - if ( this instanceof TreeParser ) { - TreeAdaptor adaptor = ((TreeNodeStream)e.input).getTreeAdaptor(); - e.token = adaptor.getToken(e.node); - if ( e.token==null ) { // could be an UP/DOWN node - e.token = new CommonToken(adaptor.getType(e.node), - adaptor.getText(e.node)); - } - } - return super.getErrorMessage(e, tokenNames); - } - - /** Check if current node in input has a context. Context means sequence - * of nodes towards root of tree. For example, you might say context - * is "MULT" which means my parent must be MULT. "CLASS VARDEF" says - * current node must be child of a VARDEF and whose parent is a CLASS node. - * You can use "..." to mean zero-or-more nodes. "METHOD ... VARDEF" - * means my parent is VARDEF and somewhere above that is a METHOD node. - * The first node in the context is not necessarily the root. The context - * matcher stops matching and returns true when it runs out of context. - * There is no way to force the first node to be the root. - */ - public boolean inContext(String context) { - return inContext(input.getTreeAdaptor(), getTokenNames(), input.LT(1), context); - } - - /** The worker for inContext. It's static and full of parameters for - * testing purposes. - */ - public static boolean inContext(TreeAdaptor adaptor, - String[] tokenNames, - Object t, - String context) - { - Matcher dotdotMatcher = dotdotPattern.matcher(context); - Matcher doubleEtcMatcher = doubleEtcPattern.matcher(context); - if ( dotdotMatcher.find() ) { // don't allow "..", must be "..." - throw new IllegalArgumentException("invalid syntax: .."); - } - if ( doubleEtcMatcher.find() ) { // don't allow double "..." - throw new IllegalArgumentException("invalid syntax: ... ..."); - } - context = context.replaceAll("\\.\\.\\.", " ... "); // ensure spaces around ... - context = context.trim(); - String[] nodes = context.split("\\s+"); - int ni = nodes.length-1; - t = adaptor.getParent(t); - while ( ni>=0 && t!=null ) { - if ( nodes[ni].equals("...") ) { - // walk upwards until we see nodes[ni-1] then continue walking - if ( ni==0 ) return true; // ... at start is no-op - String goal = nodes[ni-1]; - Object ancestor = getAncestor(adaptor, tokenNames, t, goal); - if ( ancestor==null ) return false; - t = ancestor; - ni--; - } - String name = tokenNames[adaptor.getType(t)]; - if ( !name.equals(nodes[ni]) ) { - //System.err.println("not matched: "+nodes[ni]+" at "+t); - return false; - } - // advance to parent and to previous element in context node list - ni--; - t = adaptor.getParent(t); - } - - if ( t==null && ni>=0 ) return false; // at root but more nodes to match - return true; - } - - /** Helper for static inContext */ - protected static Object getAncestor(TreeAdaptor adaptor, String[] tokenNames, Object t, String goal) { - while ( t!=null ) { - String name = tokenNames[adaptor.getType(t)]; - if ( name.equals(goal) ) return t; - t = adaptor.getParent(t); - } - return null; - } - - public void traceIn(String ruleName, int ruleIndex) { - super.traceIn(ruleName, ruleIndex, input.LT(1)); - } - - public void traceOut(String ruleName, int ruleIndex) { - super.traceOut(ruleName, ruleIndex, input.LT(1)); - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ANTLRASTViewer.jfdproj b/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ANTLRASTViewer.jfdproj deleted file mode 100644 index 8b06df3b6..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ANTLRASTViewer.jfdproj +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewFrame.java b/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewFrame.java deleted file mode 100644 index 064c69513..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewFrame.java +++ /dev/null @@ -1,43 +0,0 @@ -package org.antlr.v4.runtime.tree.gui; - -import javax.swing.*; -import java.awt.*; -/* - * Created by JFormDesigner on Mon Jan 18 14:54:16 PST 2010 - */ - -/** - * @author Terence Parr - */ -public class ASTViewFrame extends JFrame { - public ASTViewFrame() { - initComponents(); - } - - private void initComponents() { - // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents - // Generated using JFormDesigner non-commercial license - scrollPane1 = new JScrollPane(); - tree = new JTree(); - - //======== this ======== - setTitle("ANTLR AST Viewer"); - Container contentPane = getContentPane(); - contentPane.setLayout(new GridLayout(1, 1)); - - //======== scrollPane1 ======== - { - scrollPane1.setViewportView(tree); - } - contentPane.add(scrollPane1); - pack(); - setLocationRelativeTo(getOwner()); - // JFormDesigner - End of component initialization //GEN-END:initComponents - } - - // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables - // Generated using JFormDesigner non-commercial license - private JScrollPane scrollPane1; - public JTree tree; - // JFormDesigner - End of variables declaration //GEN-END:variables -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewFrame.jfd b/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewFrame.jfd deleted file mode 100644 index 5cddad9ac..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewFrame.jfd +++ /dev/null @@ -1,73 +0,0 @@ - - - - - form/swing - - - - - - javax.swing.JFrame - - java.awt.GridLayout - - columns - 1 - - - - title - ANTLR AST Viewer - - - - javax.swing.JScrollPane - - javax.swing.JScrollPane - - - scrollPane1 - - - - javax.swing.JTree - - tree - - - - JavaCodeGenerator.variableModifiers - 1 - - - - - - - - this - - - - - - location - - 0 - 0 - - - - size - - 400 - 300 - - - - - - - - diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewer.java b/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewer.java deleted file mode 100644 index 58c49195e..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/ASTViewer.java +++ /dev/null @@ -1,28 +0,0 @@ -package org.antlr.v4.runtime.tree.gui; - -import org.antlr.runtime.tree.CommonTreeAdaptor; -import org.antlr.runtime.tree.TreeAdaptor; - -/** */ -public class ASTViewer { - TreeAdaptor adaptor; - Object root; - - public ASTViewer(TreeAdaptor adaptor, Object root) { - this.adaptor = adaptor; - this.root = root; - } - - public ASTViewer(Object root) { - this.adaptor = new CommonTreeAdaptor(); - this.root = root; - } - - public void open() { - ASTViewFrame m = new ASTViewFrame(); - m.tree.setModel(new JTreeASTModel(adaptor, root)); - m.pack(); - m.setSize(800,600); - m.setVisible(true); - } -} diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/JTreeASTModel.java b/runtime/Java/src/org/antlr/v4/runtime/tree/gui/JTreeASTModel.java deleted file mode 100644 index ce623feb6..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/gui/JTreeASTModel.java +++ /dev/null @@ -1,51 +0,0 @@ -package org.antlr.v4.runtime.tree.gui; - -import org.antlr.runtime.tree.CommonTreeAdaptor; -import org.antlr.runtime.tree.TreeAdaptor; - -import javax.swing.event.TreeModelListener; -import javax.swing.tree.TreeModel; -import javax.swing.tree.TreePath; - -public class JTreeASTModel implements TreeModel { - TreeAdaptor adaptor; - Object root; - - public JTreeASTModel(TreeAdaptor adaptor, Object root) { - this.adaptor = adaptor; - this.root = root; - } - - public JTreeASTModel(Object root) { - this.adaptor = new CommonTreeAdaptor(); - this.root = root; - } - - public int getChildCount(Object parent) { - return adaptor.getChildCount(parent); - } - - public int getIndexOfChild(Object parent, Object child){ - if ( parent==null ) return -1; - return adaptor.getChildIndex(child); - } - - public Object getChild(Object parent, int index){ - return adaptor.getChild(parent, index); - } - - public boolean isLeaf(Object node) { - return getChildCount(node)==0; - } - - public Object getRoot() { return root; } - - public void valueForPathChanged(TreePath treePath, Object o) { - } - - public void addTreeModelListener(TreeModelListener treeModelListener) { - } - - public void removeTreeModelListener(TreeModelListener treeModelListener) { - } -} diff --git a/tool/build.properties b/tool/build.properties deleted file mode 100644 index e69de29bb..000000000 diff --git a/tool/build.xml b/tool/build.xml deleted file mode 100644 index 9b7c3c37f..000000000 --- a/tool/build.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Java/Java.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Java/Java.stg deleted file mode 100644 index 491712920..000000000 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Java/Java.stg +++ /dev/null @@ -1,456 +0,0 @@ -javaTypeInitMap ::= [ - "int":"0", - "long":"0", - "float":"0.0f", - "double":"0.0", - "boolean":"false", - "byte":"0", - "short":"0", - "char":"0", - default:"null" // anything other than an atomic type -] - -// args must be , - -ParserFile(file, parser, dfaDecls, bitSetDecls, namedActions) ::= << -// $ANTLR ANTLRVersion> generatedTimestamp> - -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.EarlyExitException; -import org.antlr.v4.runtime.ParserSharedState; -import org.antlr.v4.runtime.RecognitionException; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.misc.*; -import org.antlr.runtime.*; - - ->> - -Parser(parser, scopes, funcs) ::= << -public class extends Parser { -()}; separator=", "> - ; - public int type; - TokenType(int type) { this.type = type; } - } -!> - =;}; separator="\n"> - - - - - - -} ->> - -ctor(p) ::= << -public (TokenStream input) { - this(input, new ParserSharedState()); -} -public (TokenStream input, ParserSharedState state) { - super(input, state); -} ->> - -/* - // S.g:5:1: b returns [String q, float x] : A ; - public final S.b_return b() throws RecognitionException { - b_stack.push(new b_scope()); - S.b_return retval = new S.b_return(); -*/ - -RuleFunction(f,code,decls,context,scope,namedActions,finallyAction) ::= << - - - - }>public final ( _ctx) throws RecognitionException { - state.ctx.push(_ctx); - - _stack.push(new ()); - - _stack.push(new ());}; separator="\n"> - - - try { - - } - catch (RecognitionException re) { - reportError(re); - recover(); - } - finally { - - _stack.pop();}; separator="\n"> - _stack.pop(); - - return ()state.ctx.pop(); - } -} ->> - -/** Convenience method to call from outside */ -StartRuleFunction(f) ::= << - }>public final () throws RecognitionException { - return (new (, }>LABitSet.EOF_SET)); -} ->> - -CodeBlock(c, ops) ::= << - ->> - -LL1AltBlock(choice, alts, error) ::= << -switch ( state.input.LA(1) ) { - - - break;}; separator="\n"> - default : - -} ->> - -// follow set included as choice by analysis -LL1OptionalBlock ::= LL1AltBlock - -LL1OptionalBlockSingleAlt(choice, expr, alts, preamble, error, followExpr) ::= << - -if ( ) { - -} -else if ( !() ) ->> - -LL1StarBlock(choice, alts, sync) ::= << -: -while (true) { - switch ( state.input.LA(1) ) { - - - break;}; separator="\n"> - - break ; - } - -} ->> - -LL1StarBlockSingleAlt(choice, expr, alts, preamble, iteration, sync) ::= << - -while ( ) { - - - -} ->> - -LL1PlusBlock(choice, alts, earlyExitError, sync, iterationSync) ::= << - -int = 0; -: -while (true) { - switch ( state.input.LA(1) ) { - - - break;}; separator="\n"> - - if ( >= 1 ) break ; - else - } - ++; - -} ->> - -LL1PlusBlockSingleAlt(choice, expr, alts, preamble, iteration, - earlyExitError, sync, iterationSync) ::= -<< - - -do { - - - -} while ( ); ->> - -Sync(s) ::= "sync();" - -ThrowNoViableAlt(t) ::= "throw new NoViableAltException(this, );" -ThrowEarlyExitException(t) ::= "throw new EarlyExitException(this, );" - -TestSet(s) ::= << -.member(state.input.LA(1)) ->> - -TestSetInline(s) ::= << -==}; separator=" || "> ->> - -cases(ttypes) ::= << -:}; separator="\n"> ->> - -InvokeRule(r) ::= << - = }>(new (,}>)); ->> - -MatchToken(m) ::= << - = }>(Token)match(, ); ->> - -// ACTION STUFF - -Action(a, chunks) ::= "" - -SemPred(p) ::= << -if (!()) throw new FailedPredicateException(this,"", ""); ->> - -ActionText(t) ::= "" -ArgRef(a) ::= "_ctx." -RetValueRef(a) ::= "_ctx." -QRetValueRef(a) ::= "." -/** How to translate $tokenLabel */ -TokenRef(t) ::= "" -SetAttr(s,rhsChunks) ::= "_ctx. = ;" -SetQAttr(s,rhsChunks) ::= ". = ;" - -TokenPropertyRef_text(t) ::= "(!=null?.getText():null)" -TokenPropertyRef_type(t) ::= "(!=null?.getType():0)" -TokenPropertyRef_line(t) ::= "(!=null?.getLine():0)" -TokenPropertyRef_pos(t) ::= "(!=null?.getCharPositionInLine():0)" -TokenPropertyRef_channel(t) ::= "(!=null?.getChannel():0)" -TokenPropertyRef_index(t) ::= "(!=null?.getTokenIndex():0)" -TokenPropertyRef_tree(t) ::= "_tree" -TokenPropertyRef_int(t) ::= "(!=null?Integer.valueOf(.getText()):0)" - -RulePropertyRef_start(r) ::= "(!=null?(().start):null)" -RulePropertyRef_stop(r) ::= "(!=null?(().stop):null)" -RulePropertyRef_tree(r) ::= "(!=null?(().tree):null)" -RulePropertyRef_text(r) ::= "(!=null?((TokenStream)state.input).toString(.start,.stop):null)" -RulePropertyRef_st(r) ::= "(!=null?.st:null)" - -DynScopeRef(s) ::= "_stack" -DynScopeAttrRef(s) ::= "_stack.peek()." -DynScopeAttrRef_negIndex(s, indexChunks) ::= - "_stack.get(_stack.size()--1)." -DynScopeAttrRef_index(s, indexChunks) ::= - "_stack.get()." -SetDynScopeAttr(s, rhsChunks) ::= - "_stack.peek(). =;" -SetDynScopeAttr_negIndex(s, indexChunks, rhsChunks) ::= - "_stack.get(_stack.size()--1). =;" -SetDynScopeAttr_index(s, indexChunks, rhsChunks) ::= - "_stack.get(). =;" - -AddToList(a) ::= ".add();" - -TokenDecl(t) ::= "Token ;" -TokenTypeDecl(t) ::= "int ;" -TokenListDecl(t) ::= "List\ = new ArrayList\();" -RuleContextDecl(r) ::= " ;" - -CaptureNextToken(d) ::= " = state.input.LT(1);" -CaptureNextTokenType(d) ::= " = state.input.LA(1);" - -StructDecl(s,attrs) ::= << -public static class extends ParserRuleContext { - ;}; separator="\n"> - - public (,}> LABitSet follow) { - super(follow); - = ;}; separator="\n"> - } - -}; ->> - -DynamicScopeStruct(d,attrs) ::= << -public static class { - ;}; separator="\n"> -}; -public QStack\<\> _stack = new QStack\<\>(); ->> - -AttributeDecl(d) ::= "" - -DFADecl(dfa) ::= << -// define ->> - -BitSetDecl(b) ::= << -public static final LABitSet =new LABitSet(new long[]{L};separator=",">}, true); ->> - -LexerFile(fileName, lexer) ::= << -// $ANTLR ANTLRVersion> generatedTimestamp> -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.LexerSharedState; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.pda.*; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.*; -import org.antlr.runtime.*; - - ->> - -Lexer(lexerName, modes, dfas, pdas, tokens, actions, sempreds, namedActions) ::= << -public class extends Lexer { - =;}; separator="\n"> - = ;}; separator="\n"> - - public (CharStream input) { - this(input, new LexerSharedState()); - } - public (CharStream input, LexerSharedState state) { - super(input,state); -// modeToPDA = new PDA[] { _PDA()}; separator=", "> }; - modeToDFA = new DFA[] { _DFA()}; separator=", "> }; - } - public String getGrammarFileName() { return ""; } - - - - - - - -} ->> - -DFA(name, model) ::= << -public static final short[] _accept = { - -}; -public static final short[] _eof = { - -}; -public static final char[] _max = { - -}; -public static final short[][] _transition = { - \}}; separator=",\n", null="null"> -}; -public static final int[][] _set_edges = { - \}}; separator=",\n", null="null"> -}; -public static final int[][] _pred_edges = { - \}}; separator=",\n", null="null"> -}; -public static final short[] _action_index = { - -}; - -public final class _DFA extends DFA { - - public void action(int action) { - switch ( action ) { - : - - break; - }> - } - } - - - public boolean sempred(int sempred) { - switch ( sempred ) { - : - return

; - break; - }> - } - return false; - } - - public _DFA() { - this.eof = _eof; - this.max = _max; - this.accept = _accept; - this.transition = _transition; - this.set_edges = _set_edges; - this.pred_edges = _pred_edges; - this.action_index = _action_index; - } -} ->> - -PDA(name, model, actions, sempreds) ::= << -public static final byte[] _code = { - -}; -public static final int[] _tokenTypeToAddr = { - -}; - -public final class _PDA extends PDA { - - public void action(int r, int a) { - switch ( r ) { - : _actions(a); break; - }> - } - } - - - public void sempred(int r, int a) { - switch ( r ) { - : return _sempreds(a); - }> - } - } - - public _PDA() { - super(_code, _tokenTypeToAddr, ); - } -}<\n> ->> - -actionMethod(name, actions) ::= << -public void _actions(int action) { - System.out.println("exec action "+action); - switch ( action ) { - : - - break; - }> - } -} ->> - -sempredMethod(name, preds) ::= << -public boolean _sempreds(int pred) { - switch ( pred ) { - : - return

; - }> - default : return false; - } -} ->> - -/** Using a type to init value map, try to init a type; if not in table - * must be an object, default value is "null". - */ -initValue(typeName) ::= << - ->> - -codeFileExtension() ::= ".java" - -true() ::= "true" -false() ::= "false" \ No newline at end of file diff --git a/tool/resources/org/antlr/v4/tool/templates/depend.stg b/tool/resources/org/antlr/v4/tool/templates/depend.stg deleted file mode 100644 index c093054eb..000000000 --- a/tool/resources/org/antlr/v4/tool/templates/depend.stg +++ /dev/null @@ -1,12 +0,0 @@ -/** templates used to generate make-compatible dependencies */ -group depend; - -/** Generate "f : x, y, z" dependencies for input - * dependencies and generated files. in and out - * are File objects. For example, you can say - * - */ -dependencies(grammarFileName,in,out) ::= << -: - : }; separator="\n"> ->> diff --git a/tool/resources/org/antlr/v4/tool/templates/dot/action-edge.st b/tool/resources/org/antlr/v4/tool/templates/dot/action-edge.st deleted file mode 100644 index 210278de7..000000000 --- a/tool/resources/org/antlr/v4/tool/templates/dot/action-edge.st +++ /dev/null @@ -1,3 +0,0 @@ -action-edge() ::= << - -> [fontsize=11, fontname="Courier", arrowsize=.7, label = "Errors Reported by the ANTLR Tool. - * - * TODO: add notion of single issuance of an error; some don't need to be repeated; AST_OP_IN_ALT_WITH_REWRITE and option issues - * - * @author Jim Idle - * @since 4.0 - */ -public enum ErrorType { - INVALID(ErrorSeverity.ERROR,true,true), - - // TODO: set all of the true, true appropriately - CANNOT_WRITE_FILE(ErrorSeverity.ERROR, true, true), - CANNOT_CLOSE_FILE(ErrorSeverity.ERROR, true, true), - CANNOT_FIND_TOKENS_FILE(ErrorSeverity.ERROR, true, true), - ERROR_READING_TOKENS_FILE(ErrorSeverity.ERROR, true, true), - DIR_NOT_FOUND(ErrorSeverity.ERROR, true, true), - OUTPUT_DIR_IS_FILE(ErrorSeverity.ERROR, true, true), - CANNOT_OPEN_FILE(ErrorSeverity.ERROR, true, true), - FILE_AND_GRAMMAR_NAME_DIFFER(ErrorSeverity.ERROR, true, true), - FILENAME_EXTENSION_ERROR(ErrorSeverity.ERROR, true, true), - - INTERNAL_ERROR(ErrorSeverity.ERROR, true, true), - INTERNAL_WARNING(ErrorSeverity.ERROR, true, true), - TOKENS_FILE_SYNTAX_ERROR(ErrorSeverity.ERROR, true, true), - CANNOT_GEN_DOT_FILE(ErrorSeverity.ERROR, true, true), - - // Code generation errors - MISSING_CODE_GEN_TEMPLATES(ErrorSeverity.ERROR, false, true), - CANNOT_CREATE_TARGET_GENERATOR(ErrorSeverity.ERROR, false, true), - CODE_TEMPLATE_ARG_ISSUE(ErrorSeverity.ERROR, false, true), - CODE_GEN_TEMPLATES_INCOMPLETE(ErrorSeverity.ERROR, false, true), - NO_MODEL_TO_TEMPLATE_MAPPING(ErrorSeverity.ERROR, false, true), - - // Grammar errors - SYNTAX_ERROR(ErrorSeverity.ERROR, true, true), - RULE_REDEFINITION(ErrorSeverity.ERROR, true, true), - LEXER_RULES_NOT_ALLOWED(ErrorSeverity.ERROR, true, true), - PARSER_RULES_NOT_ALLOWED(ErrorSeverity.ERROR, true, true), - REPEATED_PREQUEL(ErrorSeverity.ERROR, true, true), - NO_TOKEN_DEFINITION(ErrorSeverity.ERROR, true, true), - UNDEFINED_RULE_REF(ErrorSeverity.ERROR, true, true), - LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(ErrorSeverity.ERROR, true, true), - CANNOT_ALIAS_TOKENS(ErrorSeverity.ERROR, true, true), - TOKEN_NAMES_MUST_START_UPPER(ErrorSeverity.ERROR, true, true), - ATTRIBUTE_REF_NOT_IN_RULE(ErrorSeverity.ERROR, true, true), - INVALID_RULE_SCOPE_ATTRIBUTE_REF(ErrorSeverity.ERROR, true, true), - UNKNOWN_SIMPLE_ATTRIBUTE(ErrorSeverity.ERROR, true, true), - INVALID_RULE_PARAMETER_REF(ErrorSeverity.ERROR, true, true), - UNKNOWN_RULE_ATTRIBUTE(ErrorSeverity.ERROR, true, true), - UNKNOWN_ATTRIBUTE_IN_SCOPE(ErrorSeverity.ERROR, true, true), - ISOLATED_RULE_REF(ErrorSeverity.ERROR, true, true), - SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE(ErrorSeverity.ERROR, true, true), - LABEL_CONFLICTS_WITH_RULE(ErrorSeverity.ERROR, true, true), - LABEL_CONFLICTS_WITH_TOKEN(ErrorSeverity.ERROR, true, true), - LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE(ErrorSeverity.ERROR, true, true), - LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL(ErrorSeverity.ERROR, true, true), - ATTRIBUTE_CONFLICTS_WITH_RULE(ErrorSeverity.ERROR, true, true), - ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL(ErrorSeverity.ERROR, true, true), - LABEL_TYPE_CONFLICT(ErrorSeverity.ERROR, true, true), - ARG_RETVAL_CONFLICT(ErrorSeverity.ERROR, true, true), - NONUNIQUE_REF(ErrorSeverity.ERROR, true, true), - FORWARD_ELEMENT_REF(ErrorSeverity.ERROR, true, true), - MISSING_RULE_ARGS(ErrorSeverity.ERROR, true, true), - RULE_HAS_NO_ARGS(ErrorSeverity.ERROR, true, true), - ARGS_ON_TOKEN_REF(ErrorSeverity.ERROR, true, true), - RULE_REF_AMBIG_WITH_RULE_IN_ALT(ErrorSeverity.ERROR, true, true), - ILLEGAL_OPTION(ErrorSeverity.ERROR, true, true), - LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT(ErrorSeverity.ERROR, true, true), - REWRITE_ELEMENT_NOT_PRESENT_ON_LHS(ErrorSeverity.ERROR, true, true), - //UNDEFINED_TOKEN_REF_IN_REWRITE(ErrorSeverity.ERROR, true, true), - ///UNDEFINED_LABEL_REF_IN_REWRITE(ErrorSeverity.ERROR, true, true), use previous - NO_GRAMMAR_START_RULE(ErrorSeverity.ERROR, true, true), - EMPTY_COMPLEMENT(ErrorSeverity.ERROR, true, true), - UNKNOWN_DYNAMIC_SCOPE(ErrorSeverity.ERROR, true, true), - UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE(ErrorSeverity.ERROR, true, true), - ISOLATED_RULE_ATTRIBUTE(ErrorSeverity.ERROR, true, true), - INVALID_ACTION_SCOPE(ErrorSeverity.ERROR, true, true), - ACTION_REDEFINITION(ErrorSeverity.ERROR, true, true), - SCOPE_REDEFINITION(ErrorSeverity.ERROR, true, true), - INVALID_TEMPLATE_ACTION(ErrorSeverity.ERROR, true, true), - ARG_INIT_VALUES_ILLEGAL(ErrorSeverity.ERROR, true, true), - REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION(ErrorSeverity.ERROR, true, true), - NO_RULES(ErrorSeverity.ERROR, true, true), - WRITE_TO_READONLY_ATTR(ErrorSeverity.ERROR, true, true), - MISSING_AST_TYPE_IN_TREE_GRAMMAR(ErrorSeverity.ERROR, true, true), - REWRITE_FOR_MULTI_ELEMENT_ALT(ErrorSeverity.ERROR, true, true), - RULE_INVALID_SET(ErrorSeverity.ERROR, true, true), - HETERO_ILLEGAL_IN_REWRITE_ALT(ErrorSeverity.ERROR, true, true), - NO_SUCH_GRAMMAR_SCOPE(ErrorSeverity.ERROR, true, true), - NO_SUCH_RULE_IN_SCOPE(ErrorSeverity.ERROR, true, true), - TOKEN_ALIAS_CONFLICT(ErrorSeverity.ERROR, true, true), - TOKEN_ALIAS_REASSIGNMENT(ErrorSeverity.ERROR, true, true), - TOKEN_VOCAB_IN_DELEGATE(ErrorSeverity.ERROR, true, true), - TOKEN_ALIAS_IN_DELEGATE(ErrorSeverity.ERROR, true, true), - INVALID_IMPORT(ErrorSeverity.ERROR, true, true), - IMPORTED_TOKENS_RULE_EMPTY(ErrorSeverity.ERROR, true, true), - IMPORT_NAME_CLASH(ErrorSeverity.ERROR, true, true), - AST_OP_WITH_NON_AST_OUTPUT_OPTION(ErrorSeverity.ERROR, true, true), - AST_OP_IN_ALT_WITH_REWRITE(ErrorSeverity.ERROR, true, true), - WILDCARD_AS_ROOT(ErrorSeverity.ERROR, true, true), - CONFLICTING_OPTION_IN_TREE_FILTER(ErrorSeverity.ERROR, true, true), - - AMBIGUITY(ErrorSeverity.ERROR, true, true), - UNREACHABLE_ALTS(ErrorSeverity.ERROR, true, true), - //MULTIPLE_RECURSIVE_ALTS(ErrorSeverity.ERROR, true, true), - INSUFFICIENT_PREDICATES(ErrorSeverity.ERROR, true, true), - - // these next 3 can happen in recursion-limited LL(*) - //RECURSION_OVERFLOW(ErrorSeverity.ERROR, true, true), - LEFT_RECURSION_CYCLES(ErrorSeverity.ERROR, true, true), - ANALYSIS_TIMEOUT(ErrorSeverity.ERROR, true, true), - - MODE_NOT_IN_LEXER(ErrorSeverity.ERROR, true, true), - - /** Documentation comment is unterminated */ - //UNTERMINATED_DOC_COMMENT(ErrorSeverity.ERROR, true, true), - - // Dependency sorting errors - // - /** t1.g -> t2.g -> t3.g ->t1.g */ - CIRCULAR_DEPENDENCY(ErrorSeverity.ERROR, true, true), - - // Simple informational messages - // - /** A standby generic message that jsut spits out the arguments it is given */ -// GENERIC_INFO(ErrorSeverity.INFO, false, false), -// /** How to print out the version of the ANTLR tool that we are */ -// ANTLR_VERSION(ErrorSeverity.INFO, false, false), -// -// // Command line tool errors/warnings -// /** -fo option was incorrectly formed */ -// MISSING_OUTPUT_FO(ErrorSeverity.WARNING, false, false), -// /** -lib option is missing a directory argument */ -// MISSING_LIBDIR(ErrorSeverity.WARNING, false, false), -// /** -format option was not given the name of a message format */ -// MISSING_FORMAT(ErrorSeverity.WARNING, false, false), -// /** Max state count missing from the option */ -// MISSING_MAXSTATES(ErrorSeverity.WARNING, false, false), -// /** Max labels in a switch argument is missing */ -// MISSING_MAXSWITCH(ErrorSeverity.WARNING, false, false), -// /** Min labels in a switch argument is missing */ -// MISSING_MINSWITCH(ErrorSeverity.WARNING, false, false), -// /** Missing recursion limit argument */ -// MISSING_MAXRECUR(ErrorSeverity.WARNING, false, false), -// /** Missing max edges argument */ -// MISSING_MAXEDGE(ErrorSeverity.WARNING, false, false), -// /** Misng ms timeout argument */ -// MISSING_MAXTIME(ErrorSeverity.WARNING, false, false), -// -// // Help messages -// HELP_USAGE(ErrorSeverity.INFO, false, false), -// HELP_EXTENDED(ErrorSeverity.INFO, false, false), - - ; - - /** - * Local storage for the severity level of the message - */ - private ErrorSeverity severity; - - /** - * Returns the severity level of this message - * @return - */ - public ErrorSeverity getSeverity() { - return severity; - } - - /** - * Internal storage for the flag that indicates whether this particular message - * should abort the analysis phase or not. - */ - private Boolean abortsAnalysis; - - /** - * Indicates whether the raising of this error messsage should abort the - * analysis phase (or prevent it from starting). - * - * @return true if this message should abort the analysis phase - */ - public Boolean abortsAnalysis() { - return abortsAnalysis; - } - - /** - * Indicates whether the raising of this error message aborts code - * generation or not. - */ - private Boolean abortsCodegen; - - /** - * Indicates whether the raising of this error message aborts code - * generation or not. - * - * @return true if this message should abort code generation - */ - public Boolean abortsCodegen() { - return abortsCodegen; - } - - /** - * Local constructor produces an instance of the entries in this Enum - */ - private ErrorType(ErrorSeverity severity, boolean abortsAnalysis, boolean abortsCodegen) { - this.severity = severity; - this.abortsAnalysis = abortsAnalysis; - - } -} diff --git a/tool/src/org/antlr/v4/tool/Grammar.java b/tool/src/org/antlr/v4/tool/Grammar.java deleted file mode 100644 index fde473fae..000000000 --- a/tool/src/org/antlr/v4/tool/Grammar.java +++ /dev/null @@ -1,615 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.*; -import org.antlr.runtime.tree.TreeWizard; -import org.antlr.v4.Tool; -import org.antlr.v4.automata.DFA; -import org.antlr.v4.automata.Label; -import org.antlr.v4.automata.NFA; -import org.antlr.v4.misc.CharSupport; -import org.antlr.v4.misc.IntSet; -import org.antlr.v4.misc.IntervalSet; -import org.antlr.v4.misc.Utils; -import org.antlr.v4.parse.ANTLRLexer; -import org.antlr.v4.parse.ANTLRParser; -import org.antlr.v4.parse.GrammarASTAdaptor; -import org.antlr.v4.parse.ToolANTLRParser; - -import java.util.*; - -public class Grammar implements AttributeResolver { - public static final Set doNotCopyOptionsToLexer = - new HashSet() { - { - add("output"); add("ASTLabelType"); add("superClass"); - add("k"); add("backtrack"); add("memoize"); add("rewrite"); - } - }; - - public static Map grammarAndLabelRefTypeToScope = - new HashMap() {{ - put("lexer:RULE_LABEL", Rule.predefinedLexerRulePropertiesDict); - put("lexer:LEXER_STRING_LABEL", Rule.predefinedLexerRulePropertiesDict); - put("lexer:TOKEN_LABEL", AttributeDict.predefinedTokenDict); - put("parser:RULE_LABEL", Rule.predefinedRulePropertiesDict); - put("parser:TOKEN_LABEL", AttributeDict.predefinedTokenDict); - put("tree:RULE_LABEL", Rule.predefinedTreeRulePropertiesDict); - put("tree:TOKEN_LABEL", AttributeDict.predefinedTokenDict); - put("tree:WILDCARD_TREE_LABEL", AttributeDict.predefinedTokenDict); - put("combined:RULE_LABEL", Rule.predefinedRulePropertiesDict); - put("combined:TOKEN_LABEL", AttributeDict.predefinedTokenDict); - }}; - - public String name; - public GrammarRootAST ast; - public String text; // testing only - public String fileName; - - /** Was this created from a COMBINED grammar? */ - public Grammar implicitLexer; - public Grammar implicitLexerOwner; - - /** If we're imported, who imported us? If null, implies grammar is root */ - public Grammar parent; - public List importedGrammars; - public Map rules = new LinkedHashMap(); - int ruleNumber = 1; - - /** The NFA that represents the grammar with edges labelled with tokens - * or epsilon. It is more suitable to analysis than an AST representation. - */ - public NFA nfa; - - public Map decisionDFAs = new HashMap(); - - public Tool tool; - - /** Token names and literal tokens like "void" are uniquely indexed. - * with -1 implying EOF. Characters are different; they go from - * -1 (EOF) to \uFFFE. For example, 0 could be a binary byte you - * want to lexer. Labels of DFA/NFA transitions can be both tokens - * and characters. I use negative numbers for bookkeeping labels - * like EPSILON. Char/String literals and token types overlap in the same - * space, however. - */ - int maxTokenType = Token.MIN_TOKEN_TYPE-1; - - /** Map token like ID (but not literals like "while") to its token type */ - public Map tokenNameToTypeMap = new LinkedHashMap(); - - /** Map token literals like "while" to its token type. It may be that - * WHILE="while"=35, in which case both tokenIDToTypeMap and this - * field will have entries both mapped to 35. - */ - public Map stringLiteralToTypeMap = new LinkedHashMap(); - /** Reverse index for stringLiteralToTypeMap */ - public Vector typeToStringLiteralList = new Vector(); - - /** Map a token type to its token name. - * Must subtract MIN_TOKEN_TYPE from index. - */ - public Vector typeToTokenList = new Vector(); - - /** Map a name to an action. - * The code generator will use this to fill holes in the output files. - * I track the AST node for the action in case I need the line number - * for errors. - */ - public Map namedActions = new HashMap(); - ////public DoubleKeyMap namedActions = new DoubleKeyMap(); - - public Map scopes = new LinkedHashMap(); - public static final String AUTO_GENERATED_TOKEN_NAME_PREFIX = "T__"; - - - public Grammar(Tool tool, GrammarRootAST ast) { - if ( ast==null ) throw new IllegalArgumentException("can't pass null tree"); - this.tool = tool; - this.ast = ast; - this.name = ((GrammarAST)ast.getChild(0)).getText(); - initTokenSymbolTables(); - } - - /** For testing */ - public Grammar(String grammarText) throws RecognitionException { - this("", grammarText, null); - } - - /** For testing */ - public Grammar(String grammarText, ANTLRToolListener listener) - throws RecognitionException - { - this("", grammarText, listener); - } - - /** For testing; only builds trees; no sem anal */ - public Grammar(String fileName, String grammarText, ANTLRToolListener listener) - throws RecognitionException - { - this.text = grammarText; - this.fileName = fileName; - this.tool = new Tool(); - this.tool.addListener(listener); - ANTLRStringStream in = new ANTLRStringStream(grammarText); - in.name = fileName; - ANTLRLexer lexer = new ANTLRLexer(in); - CommonTokenStream tokens = new CommonTokenStream(lexer); - ToolANTLRParser p = new ToolANTLRParser(tokens,tool); - p.setTreeAdaptor(new GrammarASTAdaptor(in)); - ParserRuleReturnScope r = p.grammarSpec(); - if ( r.getTree() instanceof GrammarRootAST ) { - this.ast = (GrammarRootAST)r.getTree(); - this.ast.hasErrors = p.getNumberOfSyntaxErrors()>0; - this.name = ((GrammarAST)ast.getChild(0)).getText(); - } - initTokenSymbolTables(); - } - - protected void initTokenSymbolTables() { - // the faux token types take first NUM_FAUX_LABELS positions - // then we must have room for the predefined runtime token types - // like DOWN/UP used for tree parsing. - typeToTokenList.setSize(Label.NUM_FAUX_LABELS+Token.MIN_TOKEN_TYPE-1); - typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.INVALID, ""); - typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOT, ""); -// typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SEMPRED, ""); -// typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SET, ""); -// typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EPSILON, Label.EPSILON_STR); - typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOF, "EOF"); - typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "EOR"); - if ( isTreeGrammar() ) { - typeToTokenList.set(Label.NUM_FAUX_LABELS+Token.DOWN-1, "DOWN"); - typeToTokenList.set(Label.NUM_FAUX_LABELS+Token.UP-1, "UP"); - } - tokenNameToTypeMap.put("", Label.INVALID); -// tokenNameToTypeMap.put("", Label.ACTION); -// tokenNameToTypeMap.put("", Label.EPSILON); -// tokenNameToTypeMap.put("", Label.SEMPRED); -// tokenNameToTypeMap.put("", Label.SET); - tokenNameToTypeMap.put("", Label.EOT); - tokenNameToTypeMap.put("EOF", Label.EOF); - tokenNameToTypeMap.put("EOR", Label.EOR_TOKEN_TYPE); - if ( isTreeGrammar() ) { - tokenNameToTypeMap.put("DOWN", Token.DOWN); - tokenNameToTypeMap.put("UP", Token.UP); - } - } - - public void loadImportedGrammars() { - if ( ast==null ) return; - GrammarAST i = (GrammarAST)ast.getFirstChildWithType(ANTLRParser.IMPORT); - if ( i==null ) return; - importedGrammars = new ArrayList(); - for (Object c : i.getChildren()) { - GrammarAST t = (GrammarAST)c; - String importedGrammarName = null; - if ( t.getType()==ANTLRParser.ASSIGN ) { - importedGrammarName = t.getChild(1).getText(); - System.out.println("import "+ importedGrammarName); - } - else if ( t.getType()==ANTLRParser.ID ) { - importedGrammarName = t.getText(); - System.out.println("import "+t.getText()); - } - try { - GrammarAST root = tool.load(importedGrammarName+".g"); - if ( root instanceof GrammarASTErrorNode ) return; // came back as error node - GrammarRootAST ast = (GrammarRootAST)root; - Grammar g = tool.createGrammar(ast); - g.fileName = importedGrammarName+".g"; - g.parent = this; - importedGrammars.add(g); - } - catch (Exception e) { - System.err.println("can't load grammar "+importedGrammarName); - } - } - } - - public void defineAction(GrammarAST atAST) { - if ( atAST.getChildCount()==2 ) { - String name = atAST.getChild(0).getText(); - namedActions.put(name, (ActionAST)atAST.getChild(1)); - } - else { - String scope = atAST.getChild(0).getText(); - if ( scope.equals(getTypeString()) ) { - String name = atAST.getChild(1).getText(); - namedActions.put(name, (ActionAST)atAST.getChild(2)); - } - } - } - - public void defineRule(Rule r) { - if ( rules.get(r.name)!=null ) return; - rules.put(r.name, r); - r.index = ruleNumber++; - } - -// public int getNumRules() { -// int n = rules.size(); -// List imports = getAllImportedGrammars(); -// if ( imports!=null ) { -// for (Grammar g : imports) n += g.getNumRules(); -// } -// return n; -// } - - public Rule getRule(String name) { - Rule r = rules.get(name); - if ( r!=null ) return r; - List imports = getAllImportedGrammars(); - if ( imports==null ) return null; - for (Grammar g : imports) { - r = g.rules.get(name); - if ( r!=null ) return r; - } - return null; - } - - public Rule getRule(String grammarName, String ruleName) { - if ( grammarName!=null ) { // scope override - Grammar g = getImportedGrammar(grammarName); - if ( g ==null ) { - return null; - } - return g.rules.get(ruleName); - } - return getRule(ruleName); - } - - public void defineScope(AttributeDict s) { scopes.put(s.getName(), s); } - - /** Get list of all imports from all grammars in the delegate subtree of g. - * The grammars are in import tree preorder. Don't include ourselves - * in list as we're not a delegate of ourselves. - */ - public List getAllImportedGrammars() { - if ( importedGrammars==null ) return null; - List delegates = new ArrayList(); - for (int i = 0; i < importedGrammars.size(); i++) { - Grammar d = importedGrammars.get(i); - delegates.add(d); - List ds = d.getAllImportedGrammars(); - if ( ds!=null ) delegates.addAll( ds ); - } - return delegates; - } - - public List getImportedGrammars() { return importedGrammars; } - - /** Get delegates below direct delegates of g - public List getIndirectDelegates(Grammar g) { - List direct = getDirectDelegates(g); - List delegates = getDelegates(g); - delegates.removeAll(direct); - return delegates; - } -*/ - - /** Return list of imported grammars from root down to our parent. - * Order is [root, ..., this.parent]. (us not included). - */ - public List getGrammarAncestors() { - Grammar root = getOutermostGrammar(); - if ( this==root ) return null; - List grammars = new ArrayList(); - // walk backwards to root, collecting grammars - Grammar p = this.parent; - while ( p!=null ) { - grammars.add(0, p); // add to head so in order later - p = p.parent; - } - return grammars; - } - - /** Return the grammar that imported us and our parents. Return this - * if we're root. - */ - public Grammar getOutermostGrammar() { - if ( parent==null ) return this; - return parent.getOutermostGrammar(); - } - - /** Get the name of the generated recognizer; may or may not be same - * as grammar name. - * Recognizer is TParser and TLexer from T if combined, else - * just use T regardless of grammar type. - */ - public String getRecognizerName() { - String suffix = ""; - List grammarsFromRootToMe = getOutermostGrammar().getGrammarAncestors(); - String qualifiedName = name; - if ( grammarsFromRootToMe!=null ) { - StringBuffer buf = new StringBuffer(); - for (Grammar g : grammarsFromRootToMe) { - buf.append(g.name); - buf.append('_'); - } - buf.append(name); - qualifiedName = buf.toString(); - } - if ( isCombined() || (isLexer() && implicitLexer!=null) ) - { - suffix = Grammar.getGrammarTypeToFileNameSuffix(getType()); - } - return qualifiedName+suffix; - } - - public String getStringLiteralLexerRuleName(String lit) { - int ttype = getTokenType(lit); - return AUTO_GENERATED_TOKEN_NAME_PREFIX +ttype; - } - - /** Return grammar directly imported by this grammar */ - public Grammar getImportedGrammar(String name) { - for (Grammar g : importedGrammars) { - if ( g.name.equals(name) ) return g; - } - return null; - } - - public int getTokenType(String token) { - Integer I = null; - if ( token.charAt(0)=='\'') { - I = stringLiteralToTypeMap.get(token); - } - else { // must be a label like ID - I = tokenNameToTypeMap.get(token); - } - int i = (I!=null)?I.intValue(): Label.INVALID; - //System.out.println("grammar type "+type+" "+tokenName+"->"+i); - return i; - } - - /** Given a token type, get a meaningful name for it such as the ID - * or string literal. If this is a lexer and the ttype is in the - * char vocabulary, compute an ANTLR-valid (possibly escaped) char literal. - */ - public String getTokenDisplayName(int ttype) { - String tokenName = null; - int index=0; - // inside any target's char range and is lexer grammar? - if ( isLexer() && - ttype >= Label.MIN_CHAR_VALUE && ttype <= Label.MAX_CHAR_VALUE ) - { - return CharSupport.getANTLRCharLiteralForChar(ttype); - } - // faux label? - else if ( ttype<0 ) { - tokenName = typeToTokenList.get(Label.NUM_FAUX_LABELS+ttype); - } - else { - // compute index in typeToTokenList for ttype - index = ttype-1; // normalize to 0..n-1 - index += Label.NUM_FAUX_LABELS; // jump over faux tokens - - if ( index"; - } - } - - public String getOption(String key) { - if ( ast.options==null ) return null; - return ast.options.get(key); - } - - public String getOption(String key, String defaultValue) { - if ( ast.options==null ) return defaultValue; - String v = ast.options.get(key); - if ( v!=null ) return v; - return defaultValue; - } - - public static Map getStringLiteralAliasesFromLexerRules(GrammarRootAST ast) { - GrammarAST combinedRulesRoot = - (GrammarAST)ast.getFirstChildWithType(ANTLRParser.RULES); - if ( combinedRulesRoot==null ) return null; - - List ruleNodes = combinedRulesRoot.getChildren(); - if ( ruleNodes==null || ruleNodes.size()==0 ) return null; - GrammarASTAdaptor adaptor = new GrammarASTAdaptor(ruleNodes.get(0).token.getInputStream()); - TreeWizard wiz = new TreeWizard(adaptor,ANTLRParser.tokenNames); - Map lexerRuleToStringLiteral = new HashMap(); - - for (GrammarASTWithOptions r : ruleNodes) { - String ruleName = r.getChild(0).getText(); - if ( Character.isUpperCase(ruleName.charAt(0)) ) { - Map nodes = new HashMap(); - boolean isLitRule = - wiz.parse(r, "(RULE %name:ID (BLOCK (ALT %lit:STRING_LITERAL)))", nodes); - if ( isLitRule ) { - GrammarAST litNode = (GrammarAST)nodes.get("lit"); - GrammarAST nameNode = (GrammarAST)nodes.get("name"); - lexerRuleToStringLiteral.put(litNode.getText(), nameNode.getText()); - } - } - } - return lexerRuleToStringLiteral; - } - - public void setLookaheadDFA(int decision, DFA lookaheadDFA) { - decisionDFAs.put(Utils.integer(decision), lookaheadDFA); - } -} \ No newline at end of file diff --git a/tool/src/org/antlr/v4/tool/GrammarAST.java b/tool/src/org/antlr/v4/tool/GrammarAST.java deleted file mode 100644 index f160655d9..000000000 --- a/tool/src/org/antlr/v4/tool/GrammarAST.java +++ /dev/null @@ -1,75 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.CommonToken; -import org.antlr.runtime.Token; -import org.antlr.runtime.tree.Tree; -import org.antlr.v4.automata.NFAState; -import org.antlr.v4.misc.BitSet; -import org.antlr.v4.parse.ANTLRParser; -import org.antlr.v4.runtime.tree.CommonTree; - -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; - -public class GrammarAST extends CommonTree { - /** If we build an NFA, we make AST node point at left edge of NFA construct */ - public NFAState nfaState; - - public GrammarAST() {;} - public GrammarAST(Token t) { super(t); } - public GrammarAST(GrammarAST node) { super(node); } - public GrammarAST(int type) { super(new CommonToken(type, ANTLRParser.tokenNames[type])); } - public GrammarAST(int type, Token t) { - this(new CommonToken(type, t.getText())); - token.setInputStream(t.getInputStream()); - token.setLine(t.getLine()); - token.setCharPositionInLine(t.getCharPositionInLine()); - } - public GrammarAST(int type, Token t, String text) { - this(new CommonToken(type, text)); - token.setInputStream(t.getInputStream()); - token.setLine(t.getLine()); - token.setCharPositionInLine(t.getCharPositionInLine()); - } - - public List getNodesWithType(int ttype) { - return getNodesWithType(BitSet.of(ttype)); - } - - public List getNodesWithType(BitSet types) { - List nodes = new ArrayList(); - List work = new LinkedList(); - work.add(this); - GrammarAST t = null; - while ( work.size()>0 ) { - t = work.remove(0); - if ( types.member(t.getType()) ) nodes.add(t); - if ( t.children!=null ) work.addAll(t.children); - } - return nodes; - } - - public AltAST getOutermostAltNode() { - if ( this instanceof AltAST && parent.parent instanceof RuleAST ) { - return (AltAST)this; - } - if ( parent!=null ) return ((GrammarAST)parent).getOutermostAltNode(); - return null; - } - -// @Override -// public boolean equals(Object obj) { -// return super.equals(obj); -// } - - @Override - public Tree dupNode() { - return new GrammarAST(this); - } - - @Override - public String toString() { - return super.toString(); - } -} diff --git a/tool/src/org/antlr/v4/tool/GrammarASTErrorNode.java b/tool/src/org/antlr/v4/tool/GrammarASTErrorNode.java deleted file mode 100644 index 71ca504d7..000000000 --- a/tool/src/org/antlr/v4/tool/GrammarASTErrorNode.java +++ /dev/null @@ -1,23 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.RecognitionException; -import org.antlr.runtime.Token; -import org.antlr.runtime.TokenStream; -import org.antlr.runtime.tree.CommonErrorNode; - -/** A node representing erroneous token range in token stream */ -public class GrammarASTErrorNode extends GrammarAST { - CommonErrorNode delegate; - public GrammarASTErrorNode(TokenStream input, Token start, Token stop, - RecognitionException e) - { - delegate = new CommonErrorNode(input,start,stop,e); - } - - public boolean isNil() { return delegate.isNil(); } - - public int getType() { return delegate.getType(); } - - public String getText() { return delegate.getText(); } - public String toString() { return delegate.toString(); } -} diff --git a/tool/src/org/antlr/v4/tool/GrammarASTWithOptions.java b/tool/src/org/antlr/v4/tool/GrammarASTWithOptions.java deleted file mode 100644 index a8b70224f..000000000 --- a/tool/src/org/antlr/v4/tool/GrammarASTWithOptions.java +++ /dev/null @@ -1,32 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.Token; - -import java.util.HashMap; -import java.util.Map; - -public class GrammarASTWithOptions extends GrammarAST { - protected Map options; - - public GrammarASTWithOptions(GrammarAST node) { - super(node); - this.options = ((GrammarASTWithOptions)node).options; - } - - public GrammarASTWithOptions(Token t) { super(t); } - public GrammarASTWithOptions(int type) { super(type); } - public GrammarASTWithOptions(int type, Token t) { super(type, t); } - public GrammarASTWithOptions(int type, Token t, String text) { super(type,t,text); } - - public void setOption(String key, String value) { - if ( options==null ) options = new HashMap(); - options.put(key, value); - } - - public String getOption(String key) { - if ( options==null ) return null; - return options.get(key); - } - - public Map getOptions() { return options; } -} diff --git a/tool/src/org/antlr/v4/tool/GrammarRootAST.java b/tool/src/org/antlr/v4/tool/GrammarRootAST.java deleted file mode 100644 index 5b83543a3..000000000 --- a/tool/src/org/antlr/v4/tool/GrammarRootAST.java +++ /dev/null @@ -1,34 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.Token; -import org.antlr.runtime.tree.Tree; - -import java.util.HashMap; -import java.util.Map; - -public class GrammarRootAST extends GrammarASTWithOptions { - public static final Map defaultOptions = - new HashMap() { - { - put("language","Java"); - } - }; - public int grammarType; // LEXER, PARSER, TREE, GRAMMAR (combined) - public boolean hasErrors; - - public GrammarRootAST(GrammarAST node) { - super(node); - this.grammarType = ((GrammarRootAST)node).grammarType; - this.hasErrors = ((GrammarRootAST)node).hasErrors; - } - - @Override - public Tree dupNode() { return new GrammarRootAST(this); } - - public GrammarRootAST(int type) { super(type); } - public GrammarRootAST(Token t) { super(t); } - public GrammarRootAST(int type, Token t) { super(type, t); } - public GrammarRootAST(int type, Token t, String text) { - super(type,t,text); - } -} diff --git a/tool/src/org/antlr/v4/tool/GrammarSemanticsMessage.java b/tool/src/org/antlr/v4/tool/GrammarSemanticsMessage.java deleted file mode 100644 index 7f5405925..000000000 --- a/tool/src/org/antlr/v4/tool/GrammarSemanticsMessage.java +++ /dev/null @@ -1,45 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.Token; - -/** A problem with the symbols and/or meaning of a grammar such as rule - * redefinition. - */ -public class GrammarSemanticsMessage extends Message { - public Grammar g; - /** Most of the time, we'll have a token such as an undefined rule ref - * and so this will be set. - */ - public Token offendingToken; - - /* - public GrammarSemanticsMessage(ErrorType etype, - Grammar g, - Token offendingToken, - Object... args) - { - super(etype,args); - this.g = g; - if ( g!=null ) fileName = g.fileName; - this.offendingToken = offendingToken; - if ( offendingToken!=null ) { - line = offendingToken.getLine(); - charPosition = offendingToken.getCharPositionInLine(); - } - } -*/ - public GrammarSemanticsMessage(ErrorType etype, - String fileName, - Token offendingToken, - Object... args) - { - super(etype,args); - this.fileName = fileName; - this.offendingToken = offendingToken; - if ( offendingToken!=null ) { - line = offendingToken.getLine(); - charPosition = offendingToken.getCharPositionInLine(); - } - } -} - diff --git a/tool/src/org/antlr/v4/tool/GrammarSyntaxMessage.java b/tool/src/org/antlr/v4/tool/GrammarSyntaxMessage.java deleted file mode 100644 index 3f4b1c80c..000000000 --- a/tool/src/org/antlr/v4/tool/GrammarSyntaxMessage.java +++ /dev/null @@ -1,30 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.RecognitionException; -import org.antlr.runtime.Token; - -/** A problem with the syntax of your antlr grammar such as - * "The '{' came as a complete surprise to me at this point in your program" - */ -public class GrammarSyntaxMessage extends Message { - public Grammar g; - /** Most of the time, we'll have a token and so this will be set. */ - public Token offendingToken; - public RecognitionException antlrException; - - public GrammarSyntaxMessage(ErrorType etype, - String fileName, - Token offendingToken, - RecognitionException antlrException, - Object... args) - { - super(etype,args); - this.fileName = fileName; - this.offendingToken = offendingToken; - this.antlrException = antlrException; - if ( offendingToken!=null ) { - line = offendingToken.getLine(); - charPosition = offendingToken.getCharPositionInLine(); - } - } -} diff --git a/tool/src/org/antlr/v4/tool/InsufficientPredicatesMessage.java b/tool/src/org/antlr/v4/tool/InsufficientPredicatesMessage.java deleted file mode 100644 index a89f39e1b..000000000 --- a/tool/src/org/antlr/v4/tool/InsufficientPredicatesMessage.java +++ /dev/null @@ -1,39 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.Token; -import org.antlr.v4.automata.DFAState; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -/** */ -public class InsufficientPredicatesMessage extends Message { - public DFAState d; - public String input; - public Map> incompletelyCoveredAlts; - - public InsufficientPredicatesMessage(ErrorType etype, - String fileName, - DFAState d, - String input, - Map> incompletelyCoveredAlts, - boolean hasPredicateBlockedByAction) - { - super(etype); - this.fileName = fileName; - this.d = d; - this.input = input; - this.incompletelyCoveredAlts = incompletelyCoveredAlts; - - this.line = d.dfa.decisionNFAStartState.ast.getLine(); - this.charPosition = d.dfa.decisionNFAStartState.ast.getCharPositionInLine(); - - Map info = new HashMap(); - info.put("dfaState", d); - info.put("input", input); - info.put("altToLocations", incompletelyCoveredAlts); - info.put("hasPredicateBlockedByAction", hasPredicateBlockedByAction); - args = new Object[] {info}; // pass this whole object in to message - } -} diff --git a/tool/src/org/antlr/v4/tool/LabelElementPair.java b/tool/src/org/antlr/v4/tool/LabelElementPair.java deleted file mode 100644 index e76171e35..000000000 --- a/tool/src/org/antlr/v4/tool/LabelElementPair.java +++ /dev/null @@ -1,48 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.BitSet; -import org.antlr.v4.parse.ANTLRParser; - -public class LabelElementPair { - public static final BitSet tokenTypeForTokens = new BitSet(); - static { - tokenTypeForTokens.add(ANTLRParser.TOKEN_REF); - tokenTypeForTokens.add(ANTLRParser.STRING_LITERAL); - tokenTypeForTokens.add(ANTLRParser.WILDCARD); - } - - public GrammarAST label; - public GrammarAST element; - public LabelType type; - - public LabelElementPair(Grammar g, GrammarAST label, GrammarAST element, int labelOp) { - this.label = label; - this.element = element; - // compute general case for label type - if ( element.getFirstDescendantWithType(tokenTypeForTokens)!=null ) { - if ( labelOp==ANTLRParser.ASSIGN ) type = LabelType.TOKEN_LABEL; - else type = LabelType.TOKEN_LIST_LABEL; - } - else if ( element.getFirstDescendantWithType(ANTLRParser.RULE_REF)!=null ) { - if ( labelOp==ANTLRParser.ASSIGN ) type = LabelType.RULE_LABEL; - else type = LabelType.RULE_LIST_LABEL; - } - - // now reset if lexer and string - if ( g.isLexer() ) { - if ( element.getFirstDescendantWithType(ANTLRParser.STRING_LITERAL)!=null ) { - if ( labelOp==ANTLRParser.ASSIGN ) type = LabelType.LEXER_STRING_LABEL; - } - } - else if ( g.isTreeGrammar() ) { - if ( element.getFirstDescendantWithType(ANTLRParser.WILDCARD)!=null ) { - if ( labelOp==ANTLRParser.ASSIGN ) type = LabelType.WILDCARD_TREE_LABEL; - else type = LabelType.WILDCARD_TREE_LIST_LABEL; - } - } - } - - public String toString() { - return label.getText()+" "+type+" "+element.toString(); - } -} diff --git a/tool/src/org/antlr/v4/tool/LabelType.java b/tool/src/org/antlr/v4/tool/LabelType.java deleted file mode 100644 index e60367211..000000000 --- a/tool/src/org/antlr/v4/tool/LabelType.java +++ /dev/null @@ -1,15 +0,0 @@ -package org.antlr.v4.tool; - -/** the various kinds of labels. t=type, id=ID, types+=type ids+=ID */ -public enum LabelType { - RULE_LABEL, - TOKEN_LABEL, - RULE_LIST_LABEL, - TOKEN_LIST_LABEL, - LEXER_STRING_LABEL, // used in lexer for x='a' - SUBRULE_LABEL, // x=(...) - SUBRULE_LIST_LABEL, // x+=(...) - WILDCARD_TREE_LABEL, // Used in tree grammar x=. - WILDCARD_TREE_LIST_LABEL // Used in tree grammar x+=. - ; -} diff --git a/tool/src/org/antlr/v4/tool/LeftRecursionCyclesMessage.java b/tool/src/org/antlr/v4/tool/LeftRecursionCyclesMessage.java deleted file mode 100644 index f77d9649e..000000000 --- a/tool/src/org/antlr/v4/tool/LeftRecursionCyclesMessage.java +++ /dev/null @@ -1,13 +0,0 @@ -package org.antlr.v4.tool; - -import java.util.Collection; - -public class LeftRecursionCyclesMessage extends Message { - public Collection cycles; - - public LeftRecursionCyclesMessage(String fileName, Collection cycles) { - super(ErrorType.LEFT_RECURSION_CYCLES, cycles); - this.cycles = cycles; - this.fileName = fileName; - } -} diff --git a/tool/src/org/antlr/v4/tool/LexerGrammar.java b/tool/src/org/antlr/v4/tool/LexerGrammar.java deleted file mode 100644 index 92797bdc4..000000000 --- a/tool/src/org/antlr/v4/tool/LexerGrammar.java +++ /dev/null @@ -1,40 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.misc.MultiMap; -import org.antlr.runtime.RecognitionException; -import org.antlr.v4.Tool; -import org.antlr.v4.automata.DFA; - -import java.util.HashMap; -import java.util.Map; - -/** */ -public class LexerGrammar extends Grammar { - public static final String DEFAULT_MODE_NAME = "DEFAULT_MODE"; - - public MultiMap modes = new MultiMap(); - - public Map modeToDFA = new HashMap(); - - public LexerGrammar(Tool tool, GrammarRootAST ast) { - super(tool, ast); - } - - public LexerGrammar(String grammarText) throws RecognitionException { - super(grammarText); - } - - public LexerGrammar(String grammarText, ANTLRToolListener listener) throws RecognitionException { - super(grammarText, listener); - } - - public LexerGrammar(String fileName, String grammarText, ANTLRToolListener listener) throws RecognitionException { - super(fileName, grammarText, listener); - } - - @Override - public void defineRule(Rule r) { - super.defineRule(r); - modes.map(r.mode, r); - } -} diff --git a/tool/src/org/antlr/v4/tool/Message.java b/tool/src/org/antlr/v4/tool/Message.java deleted file mode 100644 index 77dc00ae5..000000000 --- a/tool/src/org/antlr/v4/tool/Message.java +++ /dev/null @@ -1,92 +0,0 @@ -package org.antlr.v4.tool; - -import java.util.Arrays; - -public class Message { - public ErrorType errorType; - public Object[] args; - public Throwable e; - - // used for location template - public String fileName; - public int line = -1; - public int charPosition = -1; - - public Message() { - } - - public Message(ErrorType errorType) { - this.errorType = errorType; - } - - public Message(ErrorType errorType, Object... args) { - this(errorType); - this.args = args; - } - - public Message(ErrorType errorType, Throwable e, Object... args) { - this(errorType, args); - this.e = e; - } - - /** Return a new template instance every time someone tries to print - * a Message. - */ - /* - public ST getMessageTemplate() { - ST messageST = ErrorManager.getMessageTemplate(errorType); - ST locationST = ErrorManager.getLocationFormat(); - ST reportST = ErrorManager.getReportFormat(errorType.getSeverity()); - ST messageFormatST = ErrorManager.getMessageFormat(); - - if ( args!=null ) { // fill in arg1, arg2, ... - for (int i=0; i0 ) attr += i + 1; - messageST.add(attr, args[i]); - } - } - if ( e!=null ) { - messageST.add("exception", e); - messageST.add("stackTrace", e.getStackTrace()); - } - - boolean locationValid = false; - if (line != -1) { - locationST.add("line", line); - locationValid = true; - } - if (charPosition != -1) { - locationST.add("column", charPosition); - locationValid = true; - } - if (fileName != null) { - locationST.add("file", fileName); - locationValid = true; - } - - messageFormatST.add("id", errorType.ordinal()); - messageFormatST.add("text", messageST); - - if (locationValid) reportST.add("location", locationST); - reportST.add("message", messageFormatST); - //((DebugST)reportST).inspect(); - //reportST.impl.dump(); - return reportST; - } - */ - @Override - public String toString() { - return "Message{" + - "errorType=" + errorType + - ", args=" + (args == null ? null : Arrays.asList(args)) + - ", e=" + e + - ", fileName='" + fileName + '\'' + - ", line=" + line + - ", charPosition=" + charPosition + - '}'; - } -} diff --git a/tool/src/org/antlr/v4/tool/Rule.java b/tool/src/org/antlr/v4/tool/Rule.java deleted file mode 100644 index b9d87a807..000000000 --- a/tool/src/org/antlr/v4/tool/Rule.java +++ /dev/null @@ -1,273 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.Token; -import org.stringtemplate.v4.misc.MultiMap; - -import java.util.*; - -public class Rule implements AttributeResolver { - /** Rule refs have a predefined set of attributes as well as - * the return values and args. - */ - public static AttributeDict predefinedRulePropertiesDict = - new AttributeDict(AttributeDict.DictType.PREDEFINED_RULE) {{ - add(new Attribute("text")); - add(new Attribute("start")); - add(new Attribute("stop")); - add(new Attribute("tree")); - add(new Attribute("st")); - }}; - - public static AttributeDict predefinedTreeRulePropertiesDict = - new AttributeDict(AttributeDict.DictType.PREDEFINED_TREE_RULE) {{ - add(new Attribute("text")); - add(new Attribute("start")); // note: no stop; not meaningful - add(new Attribute("tree")); - add(new Attribute("st")); - }}; - - public static AttributeDict predefinedLexerRulePropertiesDict = - new AttributeDict(AttributeDict.DictType.PREDEFINED_LEXER_RULE) {{ - add(new Attribute("text")); - add(new Attribute("type")); - add(new Attribute("line")); - add(new Attribute("index")); - add(new Attribute("pos")); - add(new Attribute("channel")); - add(new Attribute("start")); - add(new Attribute("stop")); - add(new Attribute("int")); - }}; - - public String name; - public List modifiers; - - public RuleAST ast; - public AttributeDict args; - public AttributeDict retvals; - public AttributeDict scope; // scope { int i; } - - /** A list of scope names used by this rule */ - public List useScopes; - - public Grammar g; - - /** If we're in a lexer grammar, we might be in a mode */ - public String mode; - - /** Map a name to an action for this rule like @init {...}. - * The code generator will use this to fill holes in the rule template. - * I track the AST node for the action in case I need the line number - * for errors. - */ - public Map namedActions = - new HashMap(); - - /** Track exception handler actions (exception type is prev child); - * don't track finally action - */ - public List exceptionActions = new ArrayList(); - - public ActionAST finallyAction; - - public int numberOfAlts; - - public boolean isStartRule = true; // nobody calls us - - /** Labels are visible to all alts in a rule. Record all defs here. - * We need to ensure labels are used to track same kind of symbols. - * Tracks all label defs for a label. - public MultiMap labelDefs = - new MultiMap(); - */ - - public Alternative[] alt; - - /** All rules have unique index 1..n */ - public int index; - - public Rule(Grammar g, String name, RuleAST ast, int numberOfAlts) { - this.g = g; - this.name = name; - this.ast = ast; - this.numberOfAlts = numberOfAlts; - alt = new Alternative[numberOfAlts+1]; // 1..n - for (int i=1; i<=numberOfAlts; i++) alt[i] = new Alternative(this); - } - - /** Is isolated x an arg, retval, predefined prop? */ -// public boolean resolves(String x, ActionAST node) { -// if ( resolvesAsRetvalOrProperty(x) ) return true; -// if ( args.get(x)!=null ) return true; -// // resolve outside of an alt? -// if ( node.resolver instanceof Alternative ) return getParent().resolves(x, node); -// if ( getLabelNames().contains(x) ) return true; // can see all labels if not in alt -// return getParent().resolves(x, node); -// } -// -// /** For $x.y, is x an arg, retval, predefined prop, token/rule/label ref? -// * If so, make sure y resolves within that perspective. -// * For $x::y, is x this rule or another? If so, is y in that scope? -// */ -// public boolean resolves(String x, String y, ActionAST node) { -// Rule r = resolveRule(x, node); -// if ( r!=null ) return r.resolvesAsRetvalOrProperty(y); -// return getParent().resolves(x,y,node); -// } - - public Attribute resolveRetvalOrProperty(String y) { - if ( retvals!=null ) { - Attribute a = retvals.get(y); - if ( a!=null ) return a; - } - AttributeDict d = getPredefinedScope(LabelType.RULE_LABEL); - return d.get(y); - } - - // TODO: move to code gen InvokeRule function? is only place we ref? - public Set getRuleRefs() { - Set refs = new HashSet(); - for (int i=1; i<=numberOfAlts; i++) { - refs.addAll(alt[i].ruleRefs.keySet()); - } - return refs; - } - - public Set getTokenRefs() { - Set refs = new HashSet(); - for (int i=1; i<=numberOfAlts; i++) { - refs.addAll(alt[i].tokenRefs.keySet()); - } - return refs; - } - - public Set getLabelNames() { - Set refs = new HashSet(); - for (int i=1; i<=numberOfAlts; i++) { - refs.addAll(alt[i].labelDefs.keySet()); - } - return refs; - } - - // TODO: called frequently; make it more efficient - public MultiMap getLabelDefs() { - MultiMap defs = - new MultiMap(); - for (int i=1; i<=numberOfAlts; i++) { - for (List pairs : alt[i].labelDefs.values()) { - for (LabelElementPair p : pairs) { - defs.map(p.label.getText(), p); - } - } - } - return defs; - } - - public AttributeDict getUniqueDictFor(String x, ActionAST node) { - if ( name.equals(x) ) { // x is this rule? - return getPredefinedScope(LabelType.RULE_LABEL); - } - return null; - } - - /** $x Attribute: rule arguments, return values, predefined rule prop. - */ - public Attribute resolveToAttribute(String x, ActionAST node) { - if ( args!=null ) { - Attribute a = args.get(x); if ( a!=null ) return a; - } - if ( retvals!=null ) { - Attribute a = retvals.get(x); if ( a!=null ) return a; - } - AttributeDict properties = getPredefinedScope(LabelType.RULE_LABEL); - return properties.get(x); - } - - /** $x.y Attribute: x is surrounding rule, label ref (in any alts) */ - public Attribute resolveToAttribute(String x, String y, ActionAST node) { - if ( this.name.equals(x) ) { // x is this rule? - return resolveToAttribute(y, node); - } - LabelElementPair anyLabelDef = getAnyLabelDef(x); - if ( anyLabelDef!=null ) { - if ( anyLabelDef.type==LabelType.RULE_LABEL ) { - return g.getRule(anyLabelDef.element.getText()).resolveRetvalOrProperty(y); - } - else { - return getPredefinedScope(anyLabelDef.type).get(y); - } - } - return null; - - } - - public AttributeDict resolveToDynamicScope(String x, ActionAST node) { - Rule r = resolveToRule(x); - if ( r!=null && r.scope!=null ) return r.scope; - return g.scopes.get(x); - } - - public boolean resolvesToLabel(String x, ActionAST node) { - return false; - } - - public boolean resolvesToListLabel(String x, ActionAST node) { - LabelElementPair anyLabelDef = getAnyLabelDef(x); - return anyLabelDef!=null && - (anyLabelDef.type==LabelType.RULE_LIST_LABEL || - anyLabelDef.type==LabelType.TOKEN_LIST_LABEL); - } - - public boolean resolvesToToken(String x, ActionAST node) { - LabelElementPair anyLabelDef = getAnyLabelDef(x); - if ( anyLabelDef!=null && anyLabelDef.type==LabelType.TOKEN_LABEL ) return true; - return false; - } - - public boolean resolvesToAttributeDict(String x, ActionAST node) { - if ( resolvesToToken(x, node) ) return true; - if ( x.equals(name) ) return true; // $r for action in rule r, $r is a dict - if ( scope!=null ) return true; - if ( g.scopes.get(x)!=null ) return true; - return false; - } - - public Rule resolveToRule(String x) { - if ( x.equals(this.name) ) return this; - LabelElementPair anyLabelDef = getAnyLabelDef(x); - if ( anyLabelDef!=null && anyLabelDef.type==LabelType.RULE_LABEL ) { - return g.getRule(anyLabelDef.element.getText()); - } - return g.getRule(x); - } - - public LabelElementPair getAnyLabelDef(String x) { - List labels = getLabelDefs().get(x); - if ( labels!=null ) return labels.get(0); - return null; - } - - public AttributeDict getPredefinedScope(LabelType ltype) { - String grammarLabelKey = g.getTypeString() + ":" + ltype; - return Grammar.grammarAndLabelRefTypeToScope.get(grammarLabelKey); - } - - public boolean isFragment() { - if ( modifiers==null ) return false; - for (GrammarAST a : modifiers) { - if ( a.getText().equals("fragment") ) return true; - } - return false; - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(); - buf.append("Rule{name="+name); - if ( args!=null ) buf.append(", args=" + args); - if ( retvals!=null ) buf.append(", retvals=" + retvals); - if ( scope!=null ) buf.append(", scope=" + scope); - buf.append("}"); - return buf.toString(); - } -} diff --git a/tool/src/org/antlr/v4/tool/RuleAST.java b/tool/src/org/antlr/v4/tool/RuleAST.java deleted file mode 100644 index 90789f4b1..000000000 --- a/tool/src/org/antlr/v4/tool/RuleAST.java +++ /dev/null @@ -1,29 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.Token; -import org.antlr.runtime.tree.Tree; -import org.antlr.v4.parse.ANTLRParser; - -public class RuleAST extends GrammarASTWithOptions { - public RuleAST(GrammarAST node) { - super(node); - } - - public RuleAST(Token t) { super(t); } - public RuleAST(int type) { super(type); } - - @Override - public Tree dupNode() { return new RuleAST(this); } - - public ActionAST getLexerAction() { - Tree blk = getFirstChildWithType(ANTLRParser.BLOCK); - if ( blk.getChildCount()==1 ) { - Tree onlyAlt = blk.getChild(0); - Tree lastChild = onlyAlt.getChild(onlyAlt.getChildCount()-1); - if ( lastChild.getType()==ANTLRParser.ACTION ) { - return (ActionAST)lastChild; - } - } - return null; - } -} diff --git a/tool/src/org/antlr/v4/tool/TerminalAST.java b/tool/src/org/antlr/v4/tool/TerminalAST.java deleted file mode 100644 index b2ae785ff..000000000 --- a/tool/src/org/antlr/v4/tool/TerminalAST.java +++ /dev/null @@ -1,19 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.runtime.Token; -import org.antlr.runtime.tree.Tree; - -public class TerminalAST extends GrammarASTWithOptions { - public static final String defaultTokenOption = "node"; - - public TerminalAST(GrammarAST node) { - super(node); - } - - public TerminalAST(Token t) { super(t); } - public TerminalAST(int type) { super(type); } - public TerminalAST(int type, Token t) { super(type, t); } - - @Override - public Tree dupNode() { return new TerminalAST(this); } -} diff --git a/tool/src/org/antlr/v4/tool/ToolMessage.java b/tool/src/org/antlr/v4/tool/ToolMessage.java deleted file mode 100644 index bdfd60c0d..000000000 --- a/tool/src/org/antlr/v4/tool/ToolMessage.java +++ /dev/null @@ -1,21 +0,0 @@ -package org.antlr.v4.tool; - -/** A generic message from the tool such as "file not found" type errors; there - * is no reason to create a special object for each error unlike the grammar - * errors, which may be rather complex. - * - * Sometimes you need to pass in a filename or something to say it is "bad". - * Allow a generic object to be passed in and the string template can deal - * with just printing it or pulling a property out of it. - */ -public class ToolMessage extends Message { - public ToolMessage(ErrorType errorType) { - super(errorType); - } - public ToolMessage(ErrorType errorType, Object... args) { - super(errorType, null, args); - } - public ToolMessage(ErrorType errorType, Throwable e, Object... args) { - super(errorType, e, args); - } -} diff --git a/tool/src/org/antlr/v4/tool/UnreachableAltsMessage.java b/tool/src/org/antlr/v4/tool/UnreachableAltsMessage.java deleted file mode 100644 index 6fe3f0467..000000000 --- a/tool/src/org/antlr/v4/tool/UnreachableAltsMessage.java +++ /dev/null @@ -1,31 +0,0 @@ -package org.antlr.v4.tool; - -import org.antlr.v4.automata.DFA; - -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -public class UnreachableAltsMessage extends Message { - public DFA dfa; - public Collection conflictingAlts; - - public UnreachableAltsMessage(ErrorType etype, - String fileName, - DFA dfa, - Collection conflictingAlts) - { - super(etype); - this.fileName = fileName; - this.dfa = dfa; - this.conflictingAlts = conflictingAlts; - this.line = dfa.decisionNFAStartState.ast.getLine(); - this.charPosition = dfa.decisionNFAStartState.ast.getCharPositionInLine(); - - Map info = new HashMap(); - info.put("dfa", dfa); - info.put("alts", conflictingAlts); - args = new Object[] {info}; // pass in a map so we can name the args instead of arg1, arg2, ... - } - -} diff --git a/tool/test/org/antlr/v4/test/BaseTest.java b/tool/test/org/antlr/v4/test/BaseTest.java deleted file mode 100644 index cb4943972..000000000 --- a/tool/test/org/antlr/v4/test/BaseTest.java +++ /dev/null @@ -1,1027 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.test; - - -import org.antlr.runtime.*; -import org.antlr.v4.Tool; -import org.antlr.v4.analysis.DFAMinimizer; -import org.antlr.v4.analysis.LexerNFAToDFAConverter; -import org.antlr.v4.analysis.PredictionDFAFactory; -import org.antlr.v4.automata.DFA; -import org.antlr.v4.automata.*; -import org.antlr.v4.codegen.CompiledPDA; -import org.antlr.v4.codegen.LexerCompiler; -import org.antlr.v4.misc.Utils; -import org.antlr.v4.runtime.pda.PDA; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.tool.*; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.stringtemplate.v4.ST; - -import java.io.*; -import java.util.*; - - -public abstract class BaseTest { - public static final String jikes = null;//"/usr/bin/jikes"; - public static final String pathSep = System.getProperty("path.separator"); - - /** - * When runnning from Maven, the junit tests are run via the surefire plugin. It sets the - * classpath for the test environment into the following property. We need to pick this up - * for the junit tests that are going to generate and try to run code. - */ - public static final String SUREFIRE_CLASSPATH = System.getProperty("surefire.test.class.path", ""); - - /** - * Build up the full classpath we need, including the surefire path (if present) - */ - public static final String CLASSPATH = System.getProperty("java.class.path") + (SUREFIRE_CLASSPATH.equals("") ? "" : pathSep + SUREFIRE_CLASSPATH); - - public String tmpdir = null; - - /** reset during setUp and set to true if we find a problem */ - protected boolean lastTestFailed = false; - - /** If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - @Before - public void setUp() throws Exception { - lastTestFailed = false; // hope for the best, but set to true in asserts that fail - // new output dir for each test - tmpdir = new File(System.getProperty("java.io.tmpdir"), - "antlr-"+getClass().getName()+"-"+System.currentTimeMillis()).getAbsolutePath(); - } - - @After - public void tearDown() throws Exception { - // remove tmpdir if no error. - if ( !lastTestFailed ) eraseTempDir(); - - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - tool.setOutputDirectory(tmpdir); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(); - tool.setOutputDirectory(tmpdir); - return tool; - } - - NFA createNFA(Grammar g) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.process(imp); - } - } - } - - ParserNFAFactory f = new ParserNFAFactory(g); - if ( g.isLexer() ) f = new LexerNFAFactory((LexerGrammar)g); - return f.createNFA(); - } - - public DFA createDFA(Grammar g, DecisionState s) { - PredictionDFAFactory conv = new PredictionDFAFactory(g, s); - DFA dfa = conv.createDFA(); - conv.issueAmbiguityWarnings(); - System.out.print("DFA="+dfa); - return dfa; - } - - public void minimizeDFA(DFA dfa) { - DFAMinimizer dmin = new DFAMinimizer(dfa); - dfa.minimized = dmin.minimize(); - } - - PDA getLexerPDA(LexerGrammar g) { - NFA nfa = createNFA(g); - - LexerCompiler comp = new LexerCompiler(g); - CompiledPDA obj = comp.compileMode(LexerGrammar.DEFAULT_MODE_NAME); - PDA PDA = new PDA(obj.code, obj.altToAddr, obj.nLabels); - return PDA; - } - - List getTypesFromString(Grammar g, String expecting) { - List expectingTokenTypes = new ArrayList(); - if ( expecting!=null && !expecting.trim().equals("") ) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - List getTokenTypes(String input, PDA lexerPDA) { - ANTLRStringStream in = new ANTLRStringStream(input); - List tokenTypes = new ArrayList(); - int ttype = 0; - do { - ttype = lexerPDA.execThompson(in); - tokenTypes.add(ttype); - } while ( ttype!= Token.EOF ); - return tokenTypes; - } - - List checkRuleDFA(String gtext, String ruleName, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - NFA nfa = createNFA(g); - NFAState s = nfa.ruleToStartState.get(g.getRule(ruleName)); - if ( s==null ) { - System.err.println("no such rule: "+ruleName); - return null; - } - NFAState t = s.transition(0).target; - if ( !(t instanceof DecisionState) ) { - System.out.println(ruleName+" has no decision"); - return null; - } - DecisionState blk = (DecisionState)t; - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - List checkRuleDFA(String gtext, int decision, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - NFA nfa = createNFA(g); - DecisionState blk = nfa.decisionToNFAState.get(decision); - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - void checkRuleDFA(Grammar g, DecisionState blk, String expecting) - throws Exception - { - DFA dfa = createDFA(g, blk); - String result = null; - if ( dfa!=null ) result = dfa.toString(); - assertEquals(expecting, result); - } - - List checkLexerDFA(String gtext, String expecting) - throws Exception - { - return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting); - } - - List checkLexerDFA(String gtext, String modeName, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - LexerGrammar g = new LexerGrammar(gtext, equeue); - g.nfa = createNFA(g); - LexerNFAToDFAConverter conv = new LexerNFAToDFAConverter(g); - DFA dfa = conv.createDFA(modeName); - g.setLookaheadDFA(0, dfa); // only one decision to worry about - - String result = null; - if ( dfa!=null ) result = dfa.toString(); - assertEquals(expecting, result); - - return equeue.all; - } - - protected boolean compile(String fileName) { - String compiler = "javac"; - String classpathOption = "-classpath"; - - if (jikes!=null) { - compiler = jikes; - classpathOption = "-bootclasspath"; - } - - String[] args = new String[] { - compiler, "-d", tmpdir, - classpathOption, tmpdir+pathSep+CLASSPATH, - tmpdir+"/"+fileName - }; - String cmdLine = compiler+" -d "+tmpdir+" "+classpathOption+" "+tmpdir+pathSep+CLASSPATH+" "+fileName; - //System.out.println("compile: "+cmdLine); - File outputDir = new File(tmpdir); - try { - Process process = - Runtime.getRuntime().exec(args, null, outputDir); - StreamVacuum stdout = new StreamVacuum(process.getInputStream()); - StreamVacuum stderr = new StreamVacuum(process.getErrorStream()); - stdout.start(); - stderr.start(); - process.waitFor(); - stdout.join(); - stderr.join(); - if ( stdout.toString().length()>0 ) { - System.err.println("compile stdout from: "+cmdLine); - System.err.println(stdout); - } - if ( stderr.toString().length()>0 ) { - System.err.println("compile stderr from: "+cmdLine); - System.err.println(stderr); - } - int ret = process.exitValue(); - return ret==0; - } - catch (Exception e) { - System.err.println("can't exec compilation"); - e.printStackTrace(System.err); - return false; - } - } - - /** Return true if all is ok, no errors */ - protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean debug) { - boolean allIsWell = true; - mkdir(tmpdir); - writeFile(tmpdir, fileName, grammarStr); - try { - final List options = new ArrayList(); - if ( debug ) { - options.add("-debug"); - } - options.add("-o"); - options.add(tmpdir); - options.add("-lib"); - options.add(tmpdir); - options.add(new File(tmpdir,grammarFileName).toString()); - final String[] optionsA = new String[options.size()]; - options.toArray(optionsA); - ErrorQueue equeue = new ErrorQueue(); - Tool antlr = newTool(optionsA); - antlr.addListener(equeue); - antlr.processGrammarsOnCommandLine(); - if ( equeue.errors.size()>0 ) { - allIsWell = false; - System.err.println("antlr reports errors from "+options); - for (int i = 0; i < equeue.errors.size(); i++) { - Message msg = (Message) equeue.errors.get(i); - System.err.println(msg); - } - System.out.println("!!!\ngrammar:"); - System.out.println(grammarStr); - System.out.println("###"); - } - } - catch (Exception e) { - allIsWell = false; - System.err.println("problems building grammar: "+e); - e.printStackTrace(System.err); - } - return allIsWell; - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean debug) - { - rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName, - debug); - writeFile(tmpdir, "input", input); - return rawExecRecognizer(null, - null, - lexerName, - null, - null, - false, - false, - false, - debug); - } - - protected String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String startRuleName, - String input, boolean debug) - { - rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - debug); - writeFile(tmpdir, "input", input); - boolean parserBuildsTrees = - grammarStr.indexOf("output=AST")>=0 || - grammarStr.indexOf("output = AST")>=0; - boolean parserBuildsTemplate = - grammarStr.indexOf("output=template")>=0 || - grammarStr.indexOf("output = template")>=0; - return rawExecRecognizer(parserName, - null, - lexerName, - startRuleName, - null, - parserBuildsTrees, - parserBuildsTemplate, - false, - debug); - } - - protected String execTreeParser(String parserGrammarFileName, - String parserGrammarStr, - String parserName, - String treeParserGrammarFileName, - String treeParserGrammarStr, - String treeParserName, - String lexerName, - String parserStartRuleName, - String treeParserStartRuleName, - String input) - { - return execTreeParser(parserGrammarFileName, - parserGrammarStr, - parserName, - treeParserGrammarFileName, - treeParserGrammarStr, - treeParserName, - lexerName, - parserStartRuleName, - treeParserStartRuleName, - input, - false); - } - - protected String execTreeParser(String parserGrammarFileName, - String parserGrammarStr, - String parserName, - String treeParserGrammarFileName, - String treeParserGrammarStr, - String treeParserName, - String lexerName, - String parserStartRuleName, - String treeParserStartRuleName, - String input, - boolean debug) - { - // build the parser - rawGenerateAndBuildRecognizer(parserGrammarFileName, - parserGrammarStr, - parserName, - lexerName, - debug); - - // build the tree parser - rawGenerateAndBuildRecognizer(treeParserGrammarFileName, - treeParserGrammarStr, - treeParserName, - lexerName, - debug); - - writeFile(tmpdir, "input", input); - - boolean parserBuildsTrees = - parserGrammarStr.indexOf("output=AST")>=0 || - parserGrammarStr.indexOf("output = AST")>=0; - boolean treeParserBuildsTrees = - treeParserGrammarStr.indexOf("output=AST")>=0 || - treeParserGrammarStr.indexOf("output = AST")>=0; - boolean parserBuildsTemplate = - parserGrammarStr.indexOf("output=template")>=0 || - parserGrammarStr.indexOf("output = template")>=0; - - return rawExecRecognizer(parserName, - treeParserName, - lexerName, - parserStartRuleName, - treeParserStartRuleName, - parserBuildsTrees, - parserBuildsTemplate, - treeParserBuildsTrees, - debug); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean debug) - { - boolean allIsWell = - antlr(grammarFileName, grammarFileName, grammarStr, debug); - if ( lexerName!=null ) { - boolean ok; - if ( parserName!=null ) { - ok = compile(parserName+".java"); - if ( !ok ) { allIsWell = false; } - } - ok = compile(lexerName+".java"); - if ( !ok ) { allIsWell = false; } - } - else { - boolean ok = compile(parserName+".java"); - if ( !ok ) { allIsWell = false; } - } - return allIsWell; - } - - protected String rawExecRecognizer(String parserName, - String treeParserName, - String lexerName, - String parserStartRuleName, - String treeParserStartRuleName, - boolean parserBuildsTrees, - boolean parserBuildsTemplate, - boolean treeParserBuildsTrees, - boolean debug) - { - this.stderrDuringParse = null; - if ( treeParserBuildsTrees && parserBuildsTrees ) { - writeTreeAndTreeTestFile(parserName, - treeParserName, - lexerName, - parserStartRuleName, - treeParserStartRuleName, - debug); - } - else if ( parserBuildsTrees ) { - writeTreeTestFile(parserName, - treeParserName, - lexerName, - parserStartRuleName, - treeParserStartRuleName, - debug); - } - else if ( parserBuildsTemplate ) { - writeTemplateTestFile(parserName, - lexerName, - parserStartRuleName, - debug); - } - else if ( parserName==null ) { - writeLexerTestFile(lexerName, debug); - } - else { - writeTestFile(parserName, - lexerName, - parserStartRuleName, - debug); - } - - compile("Test.java"); - try { - String[] args = new String[] { - "java", "-classpath", tmpdir+pathSep+CLASSPATH, - "Test", new File(tmpdir, "input").getAbsolutePath() - }; - //String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath(); - //System.out.println("execParser: "+cmdLine); - Process process = - Runtime.getRuntime().exec(args, null, new File(tmpdir)); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = null; - output = stdoutVacuum.toString(); - if ( stderrVacuum.toString().length()>0 ) { - this.stderrDuringParse = stderrVacuum.toString(); - //System.err.println("exec stderrVacuum: "+ stderrVacuum); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - public void testErrors(String[] pairs, boolean printTree) { - for (int i = 0; i < pairs.length; i+=2) { - String input = pairs[i]; - String expect = pairs[i+1]; - ErrorQueue equeue = new ErrorQueue(); - Grammar g=null; - try { - String[] lines = input.split("\n"); - String fileName = getFilenameFromFirstLineOfGrammar(lines[0]); - g = new Grammar(fileName, input, equeue); - - if ( printTree ) { - if ( g.ast!=null ) System.out.println(g.ast.toStringTree()); - else System.out.println("null tree"); - } - - if ( g.ast!=null && !g.ast.hasErrors ) { - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.process(imp); - } - } - } - - //g.loadImportedGrammars(); - } - catch (RecognitionException re) { - re.printStackTrace(System.err); - } - String actual = equeue.toString(g.tool); - String msg = input; - msg = msg.replaceAll("\n","\\\\n"); - msg = msg.replaceAll("\r","\\\\r"); - msg = msg.replaceAll("\t","\\\\t"); - - // ignore error number - expect = stripErrorNum(expect); - actual = stripErrorNum(actual); - assertEquals("error in: "+msg,expect,actual); - } - } - - // can be multi-line - //error(29): A.g:2:11: unknown attribute reference a in $a - //error(29): A.g:2:11: unknown attribute reference a in $a - String stripErrorNum(String errs) { - String[] lines = errs.split("\n"); - for (int i=0; i=0 && semi>=0 ) { - int space = line.indexOf(' ', grIndex); - fileName = line.substring(space+1, semi)+".g"; - } - if ( fileName.length()==".g".length() ) fileName = ""; - return fileName; - } - - void ambig(List msgs, int[] expectedAmbigAlts, String expectedAmbigInput) - throws Exception - { - ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); - } - - void ambig(List msgs, int i, int[] expectedAmbigAlts, String expectedAmbigInput) - throws Exception - { - List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); - AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); - if ( a==null ) assertNull(expectedAmbigAlts); - else { - assertEquals(a.conflictingAlts.toString(), Arrays.toString(expectedAmbigAlts)); - } - assertEquals(expectedAmbigInput, a.input); - } - - void unreachable(List msgs, int[] expectedUnreachableAlts) - throws Exception - { - unreachable(msgs, 0, expectedUnreachableAlts); - } - - void unreachable(List msgs, int i, int[] expectedUnreachableAlts) - throws Exception - { - List amsgs = getMessagesOfType(msgs, UnreachableAltsMessage.class); - UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); - if ( u==null ) assertNull(expectedUnreachableAlts); - else { - assertEquals(u.conflictingAlts.toString(), Arrays.toString(expectedUnreachableAlts)); - } - } - - List getMessagesOfType(List msgs, Class c) { - List filtered = new ArrayList(); - for (Message m : msgs) { - if ( m.getClass() == c ) filtered.add(m); - } - return filtered; - } - - public static class StreamVacuum implements Runnable { - StringBuffer buf = new StringBuffer(); - BufferedReader in; - Thread sucker; - public StreamVacuum(InputStream in) { - this.in = new BufferedReader( new InputStreamReader(in) ); - } - public void start() { - sucker = new Thread(this); - sucker.start(); - } - public void run() { - try { - String line = in.readLine(); - while (line!=null) { - buf.append(line); - buf.append('\n'); - line = in.readLine(); - } - } - catch (IOException ioe) { - System.err.println("can't read output from process"); - } - } - /** wait for the thread to finish */ - public void join() throws InterruptedException { - sucker.join(); - } - public String toString() { - return buf.toString(); - } - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { super(src); } - Set hide = new HashSet(); - protected void sync(int i) { - super.sync(i); - if ( hide.contains(get(i).getType()) ) get(i).setChannel(Token.HIDDEN_CHANNEL); - } - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void writeFile(String dir, String fileName, String content) { - try { - File f = new File(dir, fileName); - FileWriter w = new FileWriter(f); - BufferedWriter bw = new BufferedWriter(w); - bw.write(content); - bw.close(); - w.close(); - } - catch (IOException ioe) { - System.err.println("can't write file"); - ioe.printStackTrace(System.err); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug) - { - ST outputFileST = new ST( - "import org.antlr.runtime.*;\n" + - "import org.antlr.runtime.tree.*;\n" + - "import org.antlr.runtime.debug.*;\n" + - "\n" + - "class Profiler2 extends Profiler {\n" + - " public void terminate() { ; }\n" + - "}\n"+ - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = new ANTLRFileStream(args[0]);\n" + - " $lexerName$ lex = new $lexerName$(input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " $createParser$\n"+ - " parser.$parserStartRuleName$();\n" + - " }\n" + - "}" - ); - ST createParserST = - new ST( - " Profiler2 profiler = new Profiler2();\n"+ - " $parserName$ parser = new $parserName$(tokens,profiler);\n" + - " profiler.setParser(parser);\n"); - if ( !debug ) { - createParserST = - new ST( - " $parserName$ parser = new $parserName$(tokens);\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.java", outputFileST.toString()); - } - - protected void writeLexerTestFile(String lexerName, boolean debug) { - ST outputFileST = new ST( - "import org.antlr.runtime.*;\n" + - "import org.antlr.runtime.tree.*;\n" + - "import org.antlr.runtime.debug.*;\n" + - "\n" + - "class Profiler2 extends Profiler {\n" + - " public void terminate() { ; }\n" + - "}\n"+ - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = new ANTLRFileStream(args[0]);\n" + - " $lexerName$ lex = new $lexerName$(input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " System.out.println(tokens);\n" + - " }\n" + - "}" - ); - outputFileST.add("lexerName", lexerName); - writeFile(tmpdir, "Test.java", outputFileST.toString()); - } - - protected void writeTreeTestFile(String parserName, - String treeParserName, - String lexerName, - String parserStartRuleName, - String treeParserStartRuleName, - boolean debug) - { - ST outputFileST = new ST( - "import org.antlr.runtime.*;\n" + - "import org.antlr.runtime.tree.*;\n" + - "import org.antlr.runtime.debug.*;\n" + - "\n" + - "class Profiler2 extends Profiler {\n" + - " public void terminate() { ; }\n" + - "}\n"+ - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = new ANTLRFileStream(args[0]);\n" + - " $lexerName$ lex = new $lexerName$(input);\n" + - " TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" + - " $createParser$\n"+ - " $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" + - " $if(!treeParserStartRuleName)$\n" + - " if ( r.tree!=null ) {\n" + - " System.out.println(((Tree)r.tree).toStringTree());\n" + - " ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" + - " }\n" + - " $else$\n" + - " CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" + - " nodes.setTokenStream(tokens);\n" + - " $treeParserName$ walker = new $treeParserName$(nodes);\n" + - " walker.$treeParserStartRuleName$();\n" + - " $endif$\n" + - " }\n" + - "}" - ); - ST createParserST = - new ST( - " Profiler2 profiler = new Profiler2();\n"+ - " $parserName$ parser = new $parserName$(tokens,profiler);\n" + - " profiler.setParser(parser);\n"); - if ( !debug ) { - createParserST = - new ST( - " $parserName$ parser = new $parserName$(tokens);\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("treeParserName", treeParserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - outputFileST.add("treeParserStartRuleName", treeParserStartRuleName); - writeFile(tmpdir, "Test.java", outputFileST.toString()); - } - - /** Parser creates trees and so does the tree parser */ - protected void writeTreeAndTreeTestFile(String parserName, - String treeParserName, - String lexerName, - String parserStartRuleName, - String treeParserStartRuleName, - boolean debug) - { - ST outputFileST = new ST( - "import org.antlr.runtime.*;\n" + - "import org.antlr.runtime.tree.*;\n" + - "import org.antlr.runtime.debug.*;\n" + - "\n" + - "class Profiler2 extends Profiler {\n" + - " public void terminate() { ; }\n" + - "}\n"+ - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = new ANTLRFileStream(args[0]);\n" + - " $lexerName$ lex = new $lexerName$(input);\n" + - " TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" + - " $createParser$\n"+ - " $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" + - " ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" + - " CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" + - " nodes.setTokenStream(tokens);\n" + - " $treeParserName$ walker = new $treeParserName$(nodes);\n" + - " $treeParserName$.$treeParserStartRuleName$_return r2 = walker.$treeParserStartRuleName$();\n" + - " CommonTree rt = ((CommonTree)r2.tree);\n" + - " if ( rt!=null ) System.out.println(((CommonTree)r2.tree).toStringTree());\n" + - " }\n" + - "}" - ); - ST createParserST = - new ST( - " Profiler2 profiler = new Profiler2();\n"+ - " $parserName$ parser = new $parserName$(tokens,profiler);\n" + - " profiler.setParser(parser);\n"); - if ( !debug ) { - createParserST = - new ST( - " $parserName$ parser = new $parserName$(tokens);\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("treeParserName", treeParserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - outputFileST.add("treeParserStartRuleName", treeParserStartRuleName); - writeFile(tmpdir, "Test.java", outputFileST.toString()); - } - - protected void writeTemplateTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug) - { - ST outputFileST = new ST( - "import org.antlr.runtime.*;\n" + - "import org.antlr.stringtemplate.*;\n" + - "import org.antlr.stringtemplate.language.*;\n" + - "import org.antlr.runtime.debug.*;\n" + - "import java.io.*;\n" + - "\n" + - "class Profiler2 extends Profiler {\n" + - " public void terminate() { ; }\n" + - "}\n"+ - "public class Test {\n" + - " static String templates =\n" + - " \"group test;\"+" + - " \"foo(x,y) ::= \\\" \\\"\";\n"+ - " static STGroup group ="+ - " new STGroup(new StringReader(templates)," + - " AngleBracketTemplateLexer.class);"+ - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = new ANTLRFileStream(args[0]);\n" + - " $lexerName$ lex = new $lexerName$(input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " $createParser$\n"+ - " parser.setTemplateLib(group);\n"+ - " $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" + - " if ( r.st!=null )\n" + - " System.out.print(r.st.toString());\n" + - " else\n" + - " System.out.print(\"\");\n" + - " }\n" + - "}" - ); - ST createParserST = - new ST( - " Profiler2 profiler = new Profiler2();\n"+ - " $parserName$ parser = new $parserName$(tokens,profiler);\n" + - " profiler.setParser(parser);\n"); - if ( !debug ) { - createParserST = - new ST( - " $parserName$ parser = new $parserName$(tokens);\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.java", outputFileST.toString()); - } - - protected void eraseFiles(final String filesEndingWith) { - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - if ( files[i].endsWith(filesEndingWith) ) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - } - - protected void eraseFiles() { - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - - protected void eraseTempDir() { - File tmpdirF = new File(tmpdir); - if ( tmpdirF.exists() ) { - eraseFiles(); - tmpdirF.delete(); - } - } - - public String getFirstLineOfException() { - if ( this.stderrDuringParse ==null ) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix="Exception in thread \"main\" "; - return lines[0].substring(prefix.length(),lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable - * we cannot rely on the output order, as the hashing algorithm or other aspects - * of the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a - * bit of a hack, but guarantees that we get the same order on all systems. We assume that - * the keys are strings. - * - * @param m The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - - System.out.println("Map toString looks like: " + m.toString()); - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - // override to track errors - - public void assertEquals(String msg, Object a, Object b) { try {Assert.assertEquals(msg,a,b);} catch (Error e) {lastTestFailed=true; throw e;} } - public void assertEquals(Object a, Object b) { try {Assert.assertEquals(a,b);} catch (Error e) {lastTestFailed=true; throw e;} } - public void assertEquals(String msg, long a, long b) { try {Assert.assertEquals(msg,a,b);} catch (Error e) {lastTestFailed=true; throw e;} } - public void assertEquals(long a, long b) { try {Assert.assertEquals(a,b);} catch (Error e) {lastTestFailed=true; throw e;} } - - public void assertTrue(String msg, boolean b) { try {Assert.assertTrue(msg,b);} catch (Error e) {lastTestFailed=true; throw e;} } - public void assertTrue(boolean b) { try {Assert.assertTrue(b);} catch (Error e) {lastTestFailed=true; throw e;} } - - public void assertFalse(String msg, boolean b) { try {Assert.assertFalse(msg,b);} catch (Error e) {lastTestFailed=true; throw e;} } - public void assertFalse(boolean b) { try {Assert.assertFalse(b);} catch (Error e) {lastTestFailed=true; throw e;} } - - public void assertNotNull(String msg, Object p) { try {Assert.assertNotNull(msg, p);} catch (Error e) {lastTestFailed=true; throw e;} } - public void assertNotNull(Object p) { try {Assert.assertNotNull(p);} catch (Error e) {lastTestFailed=true; throw e;} } - - public void assertNull(String msg, Object p) { try {Assert.assertNull(msg, p);} catch (Error e) {lastTestFailed=true; throw e;} } - public void assertNull(Object p) { try {Assert.assertNull(p);} catch (Error e) {lastTestFailed=true; throw e;} } -} diff --git a/tool/test/org/antlr/v4/test/ErrorQueue.java b/tool/test/org/antlr/v4/test/ErrorQueue.java deleted file mode 100644 index 78f3aab48..000000000 --- a/tool/test/org/antlr/v4/test/ErrorQueue.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2005-2009 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.test; - -import org.antlr.v4.Tool; -import org.antlr.v4.misc.Utils; -import org.antlr.v4.tool.ANTLRToolListener; -import org.antlr.v4.tool.Message; -import org.antlr.v4.tool.ToolMessage; -import org.stringtemplate.v4.ST; - -import java.util.ArrayList; -import java.util.List; - -public class ErrorQueue implements ANTLRToolListener { - public List infos = new ArrayList(); - public List errors = new ArrayList(); - public List warnings = new ArrayList(); - public List all = new ArrayList(); - - public void info(String msg) { - infos.add(msg); - } - - public void error(Message msg) { - errors.add(msg); - all.add(msg); - } - - public void warning(Message msg) { - warnings.add(msg); - all.add(msg); - } - - public void error(ToolMessage msg) { - errors.add(msg); - all.add(msg); - } - - public int size() { - return all.size() + infos.size(); - } - - public String toString() { return Utils.join(all.iterator(), "\n"); } - - public String toString(Tool tool) { - StringBuilder buf = new StringBuilder(); - for (Message m : all) { - ST st = tool.errMgr.getMessageTemplate(m); - buf.append(st.render()); - buf.append("\n"); - } - return buf.toString(); - } - -} - diff --git a/tool/test/org/antlr/v4/test/TestASTStructure.gunit b/tool/test/org/antlr/v4/test/TestASTStructure.gunit deleted file mode 100644 index 1afda9871..000000000 --- a/tool/test/org/antlr/v4/test/TestASTStructure.gunit +++ /dev/null @@ -1,217 +0,0 @@ -/** Test ANTLRParser's AST construction. Translate to junit tests with: - * - * $ java org.antlr.v4.gunit.Gen TestASTStructure.gunit - */ -gunit TestASTStructure; - -@header {package org.antlr.v4.test;} -options { - adaptor = org.antlr.v4.parse.GrammarASTAdaptor; - parser = org.antlr.v4.parse.ANTLRParser; - lexer = org.antlr.v4.parse.ANTLRLexer; -} - -grammarSpec: - "parser grammar P; a : A;" - -> (PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - options {k=2; output=AST;} - scope S {int x} - tokens { A; B='33'; } - @header {foo} - a : A; - >> - -> - (PARSER_GRAMMAR P - (OPTIONS (= k 2) (= output AST)) - (scope S {int x}) - (tokens { A (= B '33')) - (@ header {foo}) - (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - @header {foo} - tokens { A; B='33'; } - options {k=2; ASTLabel=a.b.c; output=AST;} - scope S {int x} - a : A; - >> - -> - (PARSER_GRAMMAR P - (@ header {foo}) - (tokens { A (= B '33')) - (OPTIONS (= k 2) (= ASTLabel a.b.c) (= output AST)) - (scope S {int x}) - (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - import A=B, C; - a : A; - >> - -> - (PARSER_GRAMMAR P - (import (= A B) C) - (RULES (RULE a (BLOCK (ALT A))))) - -delegateGrammars: - "import A;" -> (import A) - -rule: - "a : A;" -> - (RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c)))))) - "A : B+;" -> (RULE A (BLOCK (ALT (+ (BLOCK (ALT B)))))) - - << - public a[int i] returns [int y] - options {backtrack=true;} - scope {int ss;} - scope S,T; - @init {blort} - : ID ; - >> - -> - (RULE a - (RULEMODIFIERS public) - int i - (returns int y) - (OPTIONS (= backtrack true)) - (scope {int ss;}) - (scope S T) - (@ init {blort}) - (BLOCK (ALT ID))) - - << - a[int i] returns [int y] - @init {blort} - scope {int ss;} - options {backtrack=true;} - scope S,T; - : ID; - >> - -> - (RULE a int i - (returns int y) - (@ init {blort}) - (scope {int ss;}) - (OPTIONS (= backtrack true)) - (scope S T) - (BLOCK (ALT ID))) - - << - a : ID ; - catch[A b] {foo} - finally {bar} - >> - -> - (RULE a (BLOCK (ALT ID)) - (catch A b {foo}) (finally {bar})) - - << - a : ID ; - catch[A a] {foo} - catch[B b] {fu} - finally {bar} - >> - -> - (RULE a (BLOCK (ALT ID)) - (catch A a {foo}) (catch B b {fu}) (finally {bar})) - -block: - "( ^(A B) | ^(b C) )" -> (BLOCK (ALT ("^(" A B)) (ALT ("^(" b C))) - -alternative: - "x+=ID* -> $x*" -> - (ALT_REWRITE - (ALT (* (BLOCK (ALT (+= x ID))))) - (-> (ALT (* (REWRITE_BLOCK (ALT x)))))) - - "A -> ..." -> (ALT_REWRITE (ALT A) (-> ...)) - "A -> " -> (ALT_REWRITE (ALT A) (-> EPSILON)) - - "A -> foo(a={x}, b={y})" -> - (ALT_REWRITE - (ALT A) - (-> (TEMPLATE foo (ARGLIST (= a {x}) (= b {y}))))) - - "A -> template(a={x}, b={y}) <>" -> - (ALT_REWRITE - (ALT A) - (-> (TEMPLATE (ARGLIST (= a {x}) (= b {y})) <>))) - - "A -> ({name})()" -> (ALT_REWRITE (ALT A) (-> (TEMPLATE {name}))) - - "A -> {expr}" -> (ALT_REWRITE (ALT A) (-> {expr})) - - << - A -> {p1}? {e1} - -> {e2} - -> - >> - -> - (ALT_REWRITE - (ALT A) - (-> {p1}? {e1}) - (-> {e2})) - - "A -> A" -> (ALT_REWRITE (ALT A) (-> (ALT A))) - - "a -> a" -> (ALT_REWRITE (ALT a) (-> (ALT a))) - - "a A X? Y* -> A a ^(TOP X)? Y*" -> - (ALT_REWRITE - (ALT a A (? (BLOCK (ALT X))) (* (BLOCK (ALT Y)))) - (-> (ALT - A a - (? (REWRITE_BLOCK (ALT ("^(" TOP X)))) - (* (REWRITE_BLOCK (ALT Y)))))) - - "A -> A[33]" -> (ALT_REWRITE (ALT A) (-> (ALT (A 33)))) - - "A -> 'int' ^(A A)*" -> - (ALT_REWRITE - (ALT A) - (-> (ALT 'int' (* (REWRITE_BLOCK (ALT ("^(" A A))))))) - - << - A -> {p1}? A - -> {p2}? B - -> - >> - -> - (ALT_REWRITE (ALT A) - (-> {p1}? (ALT A)) - (-> {p2}? (ALT B)) - (-> EPSILON)) - -element: - "b+" -> (+ (BLOCK (ALT b))) - "(b)+" -> (+ (BLOCK (ALT b))) - "b?" -> (? (BLOCK (ALT b))) - "(b)?" -> (? (BLOCK (ALT b))) - "(b)*" -> (* (BLOCK (ALT b))) - "b*" -> (* (BLOCK (ALT b))) - "'while'*" -> (* (BLOCK (ALT 'while'))) - "'a'+" -> (+ (BLOCK (ALT 'a'))) - "a[3]" -> (a 3) - "'a'..'z'+" -> (+ (BLOCK (ALT (.. 'a' 'z')))) - "x=ID" -> (= x ID) - "x=ID?" -> (? (BLOCK (ALT (= x ID)))) - "x=ID*" -> (* (BLOCK (ALT (= x ID)))) - "x=b" -> (= x b) - "x=(A|B)" -> (= x (BLOCK (ALT A) (ALT B))) - "x=~(A|B)" -> (= x (~ (BLOCK (ALT A) (ALT B)))) - "x+=~(A|B)" -> (+= x (~ (BLOCK (ALT A) (ALT B)))) - "x+=~(A|B)+"-> (+ (BLOCK (ALT (+= x (~ (BLOCK (ALT A) (ALT B))))))) - "x=b+" -> (+ (BLOCK (ALT (= x b)))) - "x+=ID*" -> (* (BLOCK (ALT (+= x ID)))) - "x+='int'*" -> (* (BLOCK (ALT (+= x 'int')))) - "x+=b+" -> (+ (BLOCK (ALT (+= x b)))) - "('*'^)*" -> (* (BLOCK (ALT (^ '*')))) - "({blort} 'x')*" -> (* (BLOCK (ALT {blort} 'x'))) - "A!" -> (! A) - "A^" -> (^ A) - "x=A^" -> (= x (^ A)) diff --git a/tool/test/org/antlr/v4/test/TestASTStructure.java b/tool/test/org/antlr/v4/test/TestASTStructure.java deleted file mode 100644 index 430feb6a0..000000000 --- a/tool/test/org/antlr/v4/test/TestASTStructure.java +++ /dev/null @@ -1,428 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.runtime.*; -import org.antlr.runtime.tree.*; -import org.antlr.v4.gunit.gUnitBase; -import org.junit.Test; -import org.junit.Before; -import static org.junit.Assert.*; - -public class TestASTStructure extends gUnitBase { - @Before public void setup() { - lexerClassName = "org.antlr.v4.parse.ANTLRLexer"; - parserClassName = "org.antlr.v4.parse.ANTLRParser"; - adaptorClassName = "org.antlr.v4.parse.GrammarASTAdaptor"; } - @Test public void test_grammarSpec1() throws Exception { - // gunit test on line 15 - RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "parser grammar P; a : A;", 15); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); - } - - @Test public void test_grammarSpec2() throws Exception { - // gunit test on line 18 - RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n options {k=2; output=AST;}\n scope S {int x}\n tokens { A; B='33'; }\n @header {foo}\n a : A;\n ", 18); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(PARSER_GRAMMAR P (OPTIONS (= k 2) (= output AST)) (scope S {int x}) (tokens { A (= B '33')) (@ header {foo}) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); - } - - @Test public void test_grammarSpec3() throws Exception { - // gunit test on line 34 - RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n @header {foo}\n tokens { A; B='33'; }\n options {k=2; ASTLabel=a.b.c; output=AST;}\n scope S {int x}\n a : A;\n ", 34); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(PARSER_GRAMMAR P (@ header {foo}) (tokens { A (= B '33')) (OPTIONS (= k 2) (= ASTLabel a.b.c) (= output AST)) (scope S {int x}) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); - } - - @Test public void test_grammarSpec4() throws Exception { - // gunit test on line 50 - RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n import A=B, C;\n a : A;\n ", 50); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(PARSER_GRAMMAR P (import (= A B) C) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); - } @Test public void test_delegateGrammars1() throws Exception { - // gunit test on line 61 - RuleReturnScope rstruct = (RuleReturnScope)execParser("delegateGrammars", "import A;", 61); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(import A)"; - assertEquals("testing rule delegateGrammars", expecting, actual); - } @Test public void test_rule1() throws Exception { - // gunit test on line 64 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "a : A;", 64); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c))))))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule2() throws Exception { - // gunit test on line 66 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "A : B+;", 66); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE A (BLOCK (ALT (+ (BLOCK (ALT B))))))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule3() throws Exception { - // gunit test on line 68 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n public a[int i] returns [int y]\n options {backtrack=true;}\n scope {int ss;}\n scope S,T;\n @init {blort}\n : ID ;\n ", 68); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a (RULEMODIFIERS public) int i (returns int y) (OPTIONS (= backtrack true)) (scope {int ss;}) (scope S T) (@ init {blort}) (BLOCK (ALT ID)))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule4() throws Exception { - // gunit test on line 87 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a[int i] returns [int y]\n @init {blort}\n scope {int ss;}\n options {backtrack=true;}\n scope S,T;\n : ID;\n ", 87); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a int i (returns int y) (@ init {blort}) (scope {int ss;}) (OPTIONS (= backtrack true)) (scope S T) (BLOCK (ALT ID)))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule5() throws Exception { - // gunit test on line 104 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A b] {foo}\n finally {bar}\n ", 104); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A b {foo}) (finally {bar}))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule6() throws Exception { - // gunit test on line 113 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A a] {foo}\n catch[B b] {fu}\n finally {bar}\n ", 113); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A a {foo}) (catch B b {fu}) (finally {bar}))"; - assertEquals("testing rule rule", expecting, actual); - } @Test public void test_block1() throws Exception { - // gunit test on line 124 - RuleReturnScope rstruct = (RuleReturnScope)execParser("block", "( ^(A B) | ^(b C) )", 124); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(BLOCK (ALT (^( A B)) (ALT (^( b C)))"; - assertEquals("testing rule block", expecting, actual); - } @Test public void test_alternative1() throws Exception { - // gunit test on line 127 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "x+=ID* -> $x*", 127); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT (* (BLOCK (ALT (+= x ID))))) (-> (ALT (* (REWRITE_BLOCK (ALT x))))))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative2() throws Exception { - // gunit test on line 132 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> ...", 132); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> ...))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative3() throws Exception { - // gunit test on line 133 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> ", 133); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> EPSILON))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative4() throws Exception { - // gunit test on line 135 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> foo(a={x}, b={y})", 135); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> (TEMPLATE foo (ARGLIST (= a {x}) (= b {y})))))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative5() throws Exception { - // gunit test on line 140 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> template(a={x}, b={y}) <>", 140); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> (TEMPLATE (ARGLIST (= a {x}) (= b {y})) <>)))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative6() throws Exception { - // gunit test on line 145 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> ({name})()", 145); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> (TEMPLATE {name})))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative7() throws Exception { - // gunit test on line 147 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> {expr}", 147); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> {expr}))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative8() throws Exception { - // gunit test on line 149 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "\n A -> {p1}? {e1}\n -> {e2}\n ->\n ", 149); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> {p1}? {e1}) (-> {e2}))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative9() throws Exception { - // gunit test on line 160 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> A", 160); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> (ALT A)))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative10() throws Exception { - // gunit test on line 162 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "a -> a", 162); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT a) (-> (ALT a)))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative11() throws Exception { - // gunit test on line 164 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "a A X? Y* -> A a ^(TOP X)? Y*", 164); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT a A (? (BLOCK (ALT X))) (* (BLOCK (ALT Y)))) (-> (ALT A a (? (REWRITE_BLOCK (ALT (^( TOP X)))) (* (REWRITE_BLOCK (ALT Y))))))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative12() throws Exception { - // gunit test on line 172 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> A[33]", 172); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> (ALT (A 33))))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative13() throws Exception { - // gunit test on line 174 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> 'int' ^(A A)*", 174); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> (ALT 'int' (* (REWRITE_BLOCK (ALT (^( A A)))))))"; - assertEquals("testing rule alternative", expecting, actual); - } - - @Test public void test_alternative14() throws Exception { - // gunit test on line 179 - RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "\n A -> {p1}? A\n -> {p2}? B\n ->\n ", 179); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(ALT_REWRITE (ALT A) (-> {p1}? (ALT A)) (-> {p2}? (ALT B)) (-> EPSILON))"; - assertEquals("testing rule alternative", expecting, actual); - } @Test public void test_element1() throws Exception { - // gunit test on line 191 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b+", 191); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element2() throws Exception { - // gunit test on line 192 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)+", 192); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element3() throws Exception { - // gunit test on line 193 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b?", 193); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(? (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element4() throws Exception { - // gunit test on line 194 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)?", 194); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(? (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element5() throws Exception { - // gunit test on line 195 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)*", 195); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element6() throws Exception { - // gunit test on line 196 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b*", 196); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element7() throws Exception { - // gunit test on line 197 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'while'*", 197); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT 'while')))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element8() throws Exception { - // gunit test on line 198 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'+", 198); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT 'a')))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element9() throws Exception { - // gunit test on line 199 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "a[3]", 199); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(a 3)"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element10() throws Exception { - // gunit test on line 200 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'..'z'+", 200); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT (.. 'a' 'z'))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element11() throws Exception { - // gunit test on line 201 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID", 201); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x ID)"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element12() throws Exception { - // gunit test on line 202 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID?", 202); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(? (BLOCK (ALT (= x ID))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element13() throws Exception { - // gunit test on line 203 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID*", 203); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT (= x ID))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element14() throws Exception { - // gunit test on line 204 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b", 204); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x b)"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element15() throws Exception { - // gunit test on line 205 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=(A|B)", 205); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element16() throws Exception { - // gunit test on line 206 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=~(A|B)", 206); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x (~ (BLOCK (ALT A) (ALT B))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element17() throws Exception { - // gunit test on line 207 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)", 207); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+= x (~ (BLOCK (ALT A) (ALT B))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element18() throws Exception { - // gunit test on line 208 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)+", 208); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT (+= x (~ (BLOCK (ALT A) (ALT B)))))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element19() throws Exception { - // gunit test on line 209 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b+", 209); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT (= x b))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element20() throws Exception { - // gunit test on line 210 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=ID*", 210); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT (+= x ID))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element21() throws Exception { - // gunit test on line 211 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+='int'*", 211); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT (+= x 'int'))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element22() throws Exception { - // gunit test on line 212 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=b+", 212); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT (+= x b))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element23() throws Exception { - // gunit test on line 213 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "('*'^)*", 213); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT (^ '*'))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element24() throws Exception { - // gunit test on line 214 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "({blort} 'x')*", 214); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT {blort} 'x')))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element25() throws Exception { - // gunit test on line 215 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "A!", 215); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(! A)"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element26() throws Exception { - // gunit test on line 216 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "A^", 216); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(^ A)"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element27() throws Exception { - // gunit test on line 217 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=A^", 217); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x (^ A))"; - assertEquals("testing rule element", expecting, actual); - } -} diff --git a/tool/test/org/antlr/v4/test/TestActionSplitter.java b/tool/test/org/antlr/v4/test/TestActionSplitter.java deleted file mode 100644 index b6945483d..000000000 --- a/tool/test/org/antlr/v4/test/TestActionSplitter.java +++ /dev/null @@ -1,53 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.runtime.ANTLRStringStream; -import org.antlr.runtime.Token; -import org.antlr.v4.parse.ActionSplitter; -import org.antlr.v4.semantics.BlankActionSplitterListener; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -public class TestActionSplitter extends BaseTest { - static String[] exprs = { - "foo", "['foo'<29>]", - "$x", "['$x'<20>]", - "\\$x", "['\\$'<6>, 'x'<29>]", - "$x.y", "['$x.y'<11>]", - "$ID.text", "['$ID.text'<11>]", - "$ID", "['$ID'<20>]", - "$ID.getText()", "['$ID'<20>, '.getText()'<29>]", - "$ID.text = \"test\";", "['$ID.text = \"test\";'<10>]", - "$a.line == $b.line", "['$a.line'<11>, ' == '<29>, '$b.line'<11>]", - "$r.tree", "['$r.tree'<11>]", - "foo $a::n bar", "['foo '<29>, '$a::n'<13>, ' bar'<29>]", - "$Symbols[-1]::names.add($id.text);", "['$Symbols[-1]::names'<16>, '.add('<29>, '$id.text'<11>, ');'<29>]", - "$Symbols[0]::names.add($id.text);", "['$Symbols[0]::names'<18>, '.add('<29>, '$id.text'<11>, ');'<29>]", - "$Symbols::x;", "['$Symbols::x'<13>, ';'<29>]", - "$Symbols.size()>0", "['$Symbols'<20>, '.size()>0'<29>]", - "$field::x = $field.st;", "['$field::x = $field.st;'<12>]", - "$foo.get(\"ick\");", "['$foo'<20>, '.get(\"ick\");'<29>]", - }; - - @Test public void testExprs() { - for (int i = 0; i < exprs.length; i+=2) { - String input = exprs[i]; - String expect = exprs[i+1]; - List chunks = getActionChunks(input); - assertEquals("input: "+input, expect, chunks.toString()); - } - } - - public static List getActionChunks(String a) { - List chunks = new ArrayList(); - ActionSplitter splitter = new ActionSplitter(new ANTLRStringStream(a), - new BlankActionSplitterListener()); - Token t = splitter.nextToken(); - while ( t.getType()!=Token.EOF ) { - chunks.add("'"+t.getText()+"'<"+t.getType()+">"); - t = splitter.nextToken(); - } - return chunks; - } -} diff --git a/tool/test/org/antlr/v4/test/TestActionTranslation.java b/tool/test/org/antlr/v4/test/TestActionTranslation.java deleted file mode 100644 index f998c9328..000000000 --- a/tool/test/org/antlr/v4/test/TestActionTranslation.java +++ /dev/null @@ -1,513 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.runtime.RecognitionException; -import org.antlr.v4.automata.LexerNFAFactory; -import org.antlr.v4.automata.NFAFactory; -import org.antlr.v4.automata.ParserNFAFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; -import org.stringtemplate.v4.ST; - -/** */ -public class TestActionTranslation extends BaseTest { - String attributeTemplate = - "parser grammar A;\n"+ - "@members {#members##end-members#}\n" + - "a[int x, int x1] returns [int y]\n" + - "@init {#init##end-init#}\n" + - " : id=ID ids+=ID lab=b[34] c d {\n" + - " #inline##end-inline#\n" + - " }\n" + - " c\n" + - " ;\n" + - " finally {#finally##end-finally#}\n" + - "b[int d] returns [int e]\n" + - " : {#inline2##end-inline2#}\n" + - " ;\n" + - "c returns [int x, int y] : ;\n" + - "d : ;\n"; - - String scopeTemplate = - "parser grammar A;\n"+ - "@members {\n" + - "#members##end-members#\n" + - "}\n" + - "scope S { int i; }\n" + - "a\n" + - "scope { int z; }\n" + - "scope S;\n" + - "@init {#init##end-init#}\n" + - " : {\n" + - " #inline##end-inline#" + - " }\n" + - " ;\n" + - " finally {#finally##end-finally#}\n"; - - @Test public void testEscapedLessThanInAction() throws Exception { - String action = "i<3; ''"; - String expected = "i<3; ''"; - testActions(attributeTemplate, "members", action, expected); - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - testActions(attributeTemplate, "inline2", action, expected); - } - - @Test public void testEscaped$InAction() throws Exception { - String action = "int \\$n; \"\\$in string\\$\""; - String expected = "int \\$n; \"\\$in string\\$\""; - testActions(attributeTemplate, "members", action, expected); - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - testActions(attributeTemplate, "inline2", action, expected); - } - - @Test public void testComplicatedArgParsing() throws Exception { - String action = "x, (*a).foo(21,33), 3.2+1, '\\n', "+ - "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; - String expected = "x, (*a).foo(21,33), 3.2+1, '\\n', "+ - "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; - testActions(attributeTemplate, "members", action, expected); - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - testActions(attributeTemplate, "inline2", action, expected); - } - - @Test public void testComplicatedArgParsingWithTranslation() throws Exception { - String action = "x, $ID.text+\"3242\", (*$ID).foo(21,33), 3.2+1, '\\n', "+ - "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; - String expected = "x, (_rID!=null?_rID.getText():null)+\"3242\"," + - " (*_tID).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testArguments() throws Exception { - String action = "$x; $a.x"; - String expected = "_ctx.x; _ctx.x"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnValue() throws Exception { - String action = "$x; $a.x"; - String expected = "_ctx.x; _ctx.x"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnValueWithNumber() throws Exception { - String action = "$a.x1"; - String expected = "_ctx.x1"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnValuesCurrentRule() throws Exception { - String action = "$y; $a.y;"; - String expected = "_ctx.y; _ctx.y;"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnValues() throws Exception { - String action = "$lab.e; $b.e;"; - String expected = "lab.e; _rb.e;"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnWithMultipleRuleRefs() throws Exception { - String action = "$c.x; $c.y;"; - String expected = "_rc.x; _rc.y;"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testTokenRefs() throws Exception { - String action = "$id; $ID; $id.text; $id.getText(); $id.line;"; - String expected = "id; _tID; (id!=null?id.getText():null); id.getText(); (id!=null?id.getLine():0);"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testRuleRefs() throws Exception { - String action = "$lab.start; $c.tree;"; - String expected = "(lab!=null?(()lab.start):null); (_rc!=null?(()_rc.tree):null);"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testBasicGlobalScope() throws Exception { - String action = "$S::i"; - String expected = "S_stack.peek().i"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void test0IndexedGlobalScope() throws Exception { - String action = "$S[0]::i"; - String expected = "S_stack.get(0).i"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void testAbsoluteIndexedGlobalScope() throws Exception { - String action = "$S[3]::i"; - String expected = "S_stack.get(3).i"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void testNegIndexedGlobalScope() throws Exception { - String action = "$S[-1]::i"; - String expected = "S_stack.get(S_stack.size()-1-1).i"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void testNegIndexedGlobalScope2() throws Exception { - String action = "$S[-$S::i]::i"; - String expected = "S_stack.get(S_stack.size()-S_stack.peek().i-1).i"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void testBasicRuleScope() throws Exception { - String action = "$a::z"; - String expected = "a_scope_stack.peek().z"; - testActions(scopeTemplate, "inline", action, expected); - } - - @Test public void testBasicGlobalScopeInRule() throws Exception { - String action = "$S::i"; - String expected = "S_stack.peek().i"; - testActions(scopeTemplate, "inline", action, expected); - } - - @Test public void testSetBasicRuleScope() throws Exception { - String action = "$a::z = 3;"; - String expected = "a_scope_stack.peek().z = 3;"; - testActions(scopeTemplate, "inline", action, expected); - } - - @Test public void testSetBasicGlobalScopeInRule() throws Exception { - String action = "$S::i = 3;"; - String expected = "S_stack.peek().i = 3;"; - testActions(scopeTemplate, "inline", action, expected); - } - - @Test public void testSet0IndexedGlobalScope() throws Exception { - String action = "$S[0]::i = $S::i;"; - String expected = "S_stack.get(0).i = S_stack.peek().i;"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void testSetAbsoluteIndexedGlobalScope() throws Exception { - String action = "$S[3]::i = $S::i;"; - String expected = "S_stack.get(3).i = S_stack.peek().i;"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void testSetNegIndexedGlobalScope() throws Exception { - String action = "$S[-1]::i = $S::i;"; - String expected = "S_stack.get(S_stack.size()-1-1).i = S_stack.peek().i;"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void testSetNegIndexedGlobalScope2() throws Exception { - String action = "$S[-$S::i]::i = $S::i;"; - String expected = "S_stack.get(S_stack.size()-S_stack.peek().i-1).i = S_stack.peek().i;"; - testActions(scopeTemplate, "members", action, expected); - } - - @Test public void testIsolatedDynamicRuleScopeRef() throws Exception { - String action = "$a;"; // refers to stack not top of stack - String expected = "a_scope_stack;"; - testActions(scopeTemplate, "inline", action, expected); - } - - @Test public void testIsolatedGlobalScopeRef() throws Exception { - String action = "$S;"; - String expected = "S_stack;"; - testActions(scopeTemplate, "inline", action, expected); - } - - - - @Test public void testDynamicRuleScopeRefInSubrule() throws Exception { - String action = "$a::n;"; - } - @Test public void testRuleScopeFromAnotherRule() throws Exception { - String action = "$a::n;"; // must be qualified - } - @Test public void testFullyQualifiedRefToCurrentRuleParameter() throws Exception { - String action = "$a.i;"; - } - @Test public void testFullyQualifiedRefToCurrentRuleRetVal() throws Exception { - String action = "$a.i;"; - } - @Test public void testSetFullyQualifiedRefToCurrentRuleRetVal() throws Exception { - String action = "$a.i = 1;"; - } - @Test public void testIsolatedRefToCurrentRule() throws Exception { - String action = "$a;"; - } - @Test public void testIsolatedRefToRule() throws Exception { - String action = "$x;"; - } - @Test public void testFullyQualifiedRefToLabelInCurrentRule() throws Exception { - String action = "$a.x;"; - } - @Test public void testFullyQualifiedRefToListLabelInCurrentRule() throws Exception { - String action = "$a.x;"; // must be qualified - } - @Test public void testFullyQualifiedRefToTemplateAttributeInCurrentRule() throws Exception { - String action = "$a.st;"; // can be qualified - } - @Test public void testRuleRefWhenRuleHasScope() throws Exception { - String action = "$b.start;"; - } - @Test public void testDynamicScopeRefOkEvenThoughRuleRefExists() throws Exception { - String action = "$b::n;"; - } - @Test public void testRefToTemplateAttributeForCurrentRule() throws Exception { - String action = "$st=null;"; - } - @Test public void testRefToTextAttributeForCurrentRule() throws Exception { - String action = "$text"; - } - @Test public void testRefToStartAttributeForCurrentRule() throws Exception { - String action = "$start;"; - } - - @Test public void testTokenLabelFromMultipleAlts() throws Exception { - String action = "$ID.text;"; // must be qualified - } - @Test public void testRuleLabelFromMultipleAlts() throws Exception { - String action = "$b.text;"; // must be qualified - } - @Test public void testUnqualifiedRuleScopeAttribute() throws Exception { - String action = "$n;"; // must be qualified - } - @Test public void testRuleAndTokenLabelTypeMismatch() throws Exception { - } - @Test public void testListAndTokenLabelTypeMismatch() throws Exception { - } - @Test public void testListAndRuleLabelTypeMismatch() throws Exception { - } - @Test public void testArgReturnValueMismatch() throws Exception { - } - @Test public void testSimplePlusEqualLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualStringLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualSetLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualWildcardLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testImplicitTokenLabel() throws Exception { - String action = "$ID; $ID.text; $ID.getText()"; - } - - @Test public void testImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testRuleLabelWithoutOutputOption() throws Exception { - } - @Test public void testRuleLabelOnTwoDifferentRulesAST() throws Exception { - } - @Test public void testRuleLabelOnTwoDifferentRulesTemplate() throws Exception { - } - @Test public void testMissingArgs() throws Exception { - } - @Test public void testArgsWhenNoneDefined() throws Exception { - } - @Test public void testReturnInitValue() throws Exception { - } - @Test public void testMultipleReturnInitValue() throws Exception { - } - @Test public void testCStyleReturnInitValue() throws Exception { - } - @Test public void testArgsWithInitValues() throws Exception { - } - @Test public void testArgsOnToken() throws Exception { - } - @Test public void testArgsOnTokenInLexer() throws Exception { - } - @Test public void testLabelOnRuleRefInLexer() throws Exception { - String action = "$i.text"; - } - - @Test public void testRefToRuleRefInLexer() throws Exception { - String action = "$ID.text"; - } - - @Test public void testRefToRuleRefInLexerNoAttribute() throws Exception { - String action = "$ID"; - } - - @Test public void testCharLabelInLexer() throws Exception { - } - @Test public void testCharListLabelInLexer() throws Exception { - } - @Test public void testWildcardCharLabelInLexer() throws Exception { - } - @Test public void testWildcardCharListLabelInLexer() throws Exception { - } - @Test public void testMissingArgsInLexer() throws Exception { - } - @Test public void testLexerRulePropertyRefs() throws Exception { - String action = "$text $type $line $pos $channel $index $start $stop"; - } - - @Test public void testLexerLabelRefs() throws Exception { - String action = "$a $b.text $c $d.text"; - } - - @Test public void testSettingLexerRulePropertyRefs() throws Exception { - String action = "$text $type=1 $line=1 $pos=1 $channel=1 $index"; - } - - @Test public void testArgsOnTokenInLexerRuleOfCombined() throws Exception { - } - @Test public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception { - } - @Test public void testTokenLabelTreeProperty() throws Exception { - String action = "$id.tree;"; - } - - @Test public void testTokenRefTreeProperty() throws Exception { - String action = "$ID.tree;"; - } - - @Test public void testAmbiguousTokenRef() throws Exception { - String action = "$ID;"; - } - - @Test public void testAmbiguousTokenRefWithProp() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testRuleRefWithDynamicScope() throws Exception { - String action = "$field::x = $field.st;"; - } - - @Test public void testAssignToOwnRulenameAttr() throws Exception { - String action = "$rule.tree = null;"; - } - - @Test public void testAssignToOwnParamAttr() throws Exception { - String action = "$rule.i = 42; $i = 23;"; - } - - @Test public void testIllegalAssignToOwnRulenameAttr() throws Exception { - String action = "$rule.stop = 0;"; - } - - @Test public void testIllegalAssignToLocalAttr() throws Exception { - String action = "$tree = null; $st = null; $start = 0; $stop = 0; $text = 0;"; - } - - @Test public void testIllegalAssignRuleRefAttr() throws Exception { - String action = "$other.tree = null;"; - } - - @Test public void testIllegalAssignTokenRefAttr() throws Exception { - String action = "$ID.text = \"test\";"; - } - - @Test public void testAssignToTreeNodeAttribute() throws Exception { - String action = "$tree.scope = localScope;"; - } - - @Test public void testDoNotTranslateAttributeCompare() throws Exception { - String action = "$a.line == $b.line"; - } - - @Test public void testDoNotTranslateScopeAttributeCompare() throws Exception { - String action = "if ($rule::foo == \"foo\" || 1) { System.out.println(\"ouch\"); }"; - } - - @Test public void testTreeRuleStopAttributeIsInvalid() throws Exception { - String action = "$r.x; $r.start; $r.stop"; - } - - @Test public void testRefToTextAttributeForCurrentTreeRule() throws Exception { - String action = "$text"; - } - - @Test public void testTypeOfGuardedAttributeRefIsCorrect() throws Exception { - String action = "int x = $b::n;"; - } - - @Test public void testBracketArgParsing() throws Exception { - } - - @Test public void testStringArgParsing() throws Exception { - String action = "34, '{', \"it's<\", '\"', \"\\\"\", 19"; - } - @Test public void testComplicatedSingleArgParsing() throws Exception { - String action = "(*a).foo(21,33,\",\")"; - } - @Test public void testArgWithLT() throws Exception { - String action = "34<50"; - } - @Test public void testGenericsAsArgumentDefinition() throws Exception { - String action = "$foo.get(\"ick\");"; - } - @Test public void testGenericsAsArgumentDefinition2() throws Exception { - String action = "$foo.get(\"ick\"); x=3;"; - } - @Test public void testGenericsAsReturnValue() throws Exception { - } - - - - public void testActions(String template, String actionName, String action, String expected) { - ST st = new ST(template); - st.add(actionName, action); - String grammar = st.render(); - try { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - NFAFactory factory = new ParserNFAFactory(g); - if ( g.isLexer() ) factory = new LexerNFAFactory((LexerGrammar)g); - g.nfa = factory.createNFA(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generate(); - String output = outputFileST.render(); - //System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start+b.length(),end); - assertEquals(expected, snippet); - } - if ( equeue.size()>0 ) { - System.err.println(equeue.toString(g.tool)); - } - } - catch (RecognitionException re) { - re.printStackTrace(System.err); - } - } -} diff --git a/tool/test/org/antlr/v4/test/TestAttributeChecks.java b/tool/test/org/antlr/v4/test/TestAttributeChecks.java deleted file mode 100644 index bc702b1e7..000000000 --- a/tool/test/org/antlr/v4/test/TestAttributeChecks.java +++ /dev/null @@ -1,280 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.runtime.RecognitionException; -import org.junit.Test; -import org.stringtemplate.v4.ST; - -/** */ -public class TestAttributeChecks extends BaseTest { - String attributeTemplate = - "parser grammar A;\n"+ - "@members {}\n" + - "a[int x] returns [int y]\n" + - "@init {}\n" + - " : id=ID ids+=ID lab=b[34] {\n" + - " \n" + - " }\n" + - " c\n" + - " ;\n" + - " finally {}\n" + - "b[int d] returns [int e]\n" + - " : {}\n" + - " ;\n" + - "c : ;\n" + - "d : ;\n"; - - String scopeTemplate = - "parser grammar A;\n"+ - "@members {\n" + - "\n" + - "}\n" + - "scope S { int i; }\n" + - "a[int x] returns [int y]\n" + - "scope { int z; }\n" + - "scope S;\n" + - "@init {}\n" + - " : lab=b[34] {\n" + - " " + - " }\n" + - " ;\n" + - " finally {}\n" + - "b[int d] returns [int e]\n" + - "scope { int f; }\n" + - " : {}\n" + - " ;\n" + - "c : ;"; - - String[] membersChecks = { - "$a", "error(29): A.g:2:11: unknown attribute reference a in $a\n", - "$a.y", "error(29): A.g:2:11: unknown attribute reference a in $a.y\n", - }; - - String[] initChecks = { - "$text", "", - "$start", "", - "$x = $y", "", - "$y = $x", "", - "$lab.e", "", - "$ids", "", - - "$a", "error(33): A.g:4:8: missing attribute access on rule reference a in $a\n", - "$c", "error(29): A.g:4:8: unknown attribute reference c in $c\n", - "$a.q", "error(31): A.g:4:10: unknown attribute q for rule a in $a.q\n", - }; - - String[] inlineChecks = { - "$text", "", - "$start", "", - "$x = $y", "", - "$y = $x", "", - "$a.x = $a.y", "", - "$lab.e", "", - "$lab.text", "", - "$b.e", "", - "$c.text", "", - "$ID", "", - "$ID.text", "", - "$id", "", - "$id.text", "", - "$ids", "", - }; - - String[] bad_inlineChecks = { - "$a", "error(33): A.g:6:4: missing attribute access on rule reference a in $a\n", - "$b", "error(33): A.g:6:4: missing attribute access on rule reference b in $b\n", - "$lab", "error(33): A.g:6:4: missing attribute access on rule reference lab in $lab\n", - "$c", "error(33): A.g:6:4: missing attribute access on rule reference c in $c\n", // no scope - "$q", "error(29): A.g:6:4: unknown attribute reference q in $q\n", - "$q.y", "error(29): A.g:6:4: unknown attribute reference q in $q.y\n", - "$q = 3", "error(29): A.g:6:4: unknown attribute reference q in $q\n", - "$q = 3;", "error(29): A.g:6:4: unknown attribute reference q in $q = 3;\n", - "$q.y = 3;", "error(29): A.g:6:4: unknown attribute reference q in $q.y = 3;\n", - "$q = $blort;", "error(29): A.g:6:4: unknown attribute reference q in $q = $blort;\n" + - "error(29): A.g:6:9: unknown attribute reference blort in $blort\n", - "$a.ick", "error(31): A.g:6:6: unknown attribute ick for rule a in $a.ick\n", - "$a.ick = 3;", "error(31): A.g:6:6: unknown attribute ick for rule a in $a.ick = 3;\n", - "$b.d", "error(30): A.g:6:6: cannot access rule d's parameter: $b.d\n", // can't see rule ref's arg - "$d.text", "error(29): A.g:6:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref - "$lab.d", "error(30): A.g:6:8: cannot access rule d's parameter: $lab.d\n", - }; - - String[] finallyChecks = { - "$text", "", - "$start", "", - "$x = $y", "", - "$y = $x", "", - "$lab.e", "", - "$lab.text", "", - "$id", "", - "$id.text", "", - "$ids", "", - - "$lab", "error(33): A.g:9:14: missing attribute access on rule reference lab in $lab\n", - "$a", "error(33): A.g:9:14: missing attribute access on rule reference a in $a\n", - "$q", "error(29): A.g:9:14: unknown attribute reference q in $q\n", - "$q.y", "error(29): A.g:9:14: unknown attribute reference q in $q.y\n", - "$q = 3", "error(29): A.g:9:14: unknown attribute reference q in $q\n", - "$q = 3;", "error(29): A.g:9:14: unknown attribute reference q in $q = 3;\n", - "$q.y = 3;", "error(29): A.g:9:14: unknown attribute reference q in $q.y = 3;\n", - "$q = $blort;", "error(29): A.g:9:14: unknown attribute reference q in $q = $blort;\n" + - "error(29): A.g:9:19: unknown attribute reference blort in $blort\n", - "$a.ick", "error(31): A.g:9:16: unknown attribute ick for rule a in $a.ick\n", - "$a.ick = 3;", "error(31): A.g:9:16: unknown attribute ick for rule a in $a.ick = 3;\n", - "$b", "error(29): A.g:9:14: unknown attribute reference b in $b\n", - "$b.e", "error(29): A.g:9:14: unknown attribute reference b in $b.e\n", // can't see rule refs outside alts - "$b.d", "error(29): A.g:9:14: unknown attribute reference b in $b.d\n", - "$c.text", "error(29): A.g:9:14: unknown attribute reference c in $c.text\n", - "$lab.d", "error(30): A.g:9:18: cannot access rule d's parameter: $lab.d\n", - }; - - String[] dynMembersChecks = { - "$S", "", - "$S::i", "", - "$S::i=$S::i", "", - - "$b::f", "error(54): A.g:3:1: unknown dynamic scope: b in $b::f\n", - "$S::j", "error(55): A.g:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j\n", - "$S::j = 3;", "error(55): A.g:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n", - "$S::j = $S::k;", "error(55): A.g:3:4: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" + - "error(55): A.g:3:12: unknown dynamically-scoped attribute for scope S: k in $S::k\n", - }; - - String[] dynInitChecks = { - "$a", "", - "$b", "", - "$lab", "", - "$b::f", "", - "$S::i", "", - "$S::i=$S::i", "", - "$a::z", "", - "$S", "", - - "$S::j", "error(55): A.g:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j\n", - "$S::j = 3;", "error(55): A.g:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n", - "$S::j = $S::k;", "error(55): A.g:8:11: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" + - "error(55): A.g:8:19: unknown dynamically-scoped attribute for scope S: k in $S::k\n", - }; - - String[] dynInlineChecks = { - "$a", "", - "$b", "", - "$lab", "", - "$b::f", "", - "$S", "", - "$S::i", "", - "$S::i=$S::i", "", - "$a::z", "", - - "$S::j", "error(55): A.g:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j\n", - "$S::j = 3;", "error(55): A.g:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n", - "$S::j = $S::k;", "error(55): A.g:10:7: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" + - "error(55): A.g:10:15: unknown dynamically-scoped attribute for scope S: k in $S::k\n", - "$Q[-1]::y", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[-1]::y\n", - "$Q[-i]::y", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[-i]::y\n", - "$Q[i]::y", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[i]::y\n", - "$Q[0]::y", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[0]::y\n", - "$Q[-1]::y = 23;", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[-1]::y = 23;\n", - "$Q[-i]::y = 23;", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[-i]::y = 23;\n", - "$Q[i]::y = 23;", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[i]::y = 23;\n", - "$Q[0]::y = 23;", "error(54): A.g:10:4: unknown dynamic scope: Q in $Q[0]::y = 23;\n", - "$S[-1]::y", "error(55): A.g:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-1]::y\n", - "$S[-i]::y", "error(55): A.g:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-i]::y\n", - "$S[i]::y", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S[i]::y\n", - "$S[0]::y", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S[0]::y\n", - "$S[-1]::y = 23;", "error(55): A.g:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-1]::y = 23;\n", - "$S[-i]::y = 23;", "error(55): A.g:10:11: unknown dynamically-scoped attribute for scope S: y in $S[-i]::y = 23;\n", - "$S[i]::y = 23;", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S[i]::y = 23;\n", - "$S[0]::y = 23;", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S[0]::y = 23;\n", - "$S[$S::y]::i", "error(55): A.g:10:10: unknown dynamically-scoped attribute for scope S: y in $S::y\n" - }; - - String[] dynFinallyChecks = { - "$a", "", - "$b", "", - "$lab", "", - "$b::f", "", - "$S", "", - "$S::i", "", - "$S::i=$S::i", "", - "$a::z", "", - - "$S::j", "error(55): A.g:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j\n", - "$S::j = 3;", "error(55): A.g:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j = 3;\n", - "$S::j = $S::k;", "error(55): A.g:12:17: unknown dynamically-scoped attribute for scope S: j in $S::j = $S::k;\n" + - "error(55): A.g:12:25: unknown dynamically-scoped attribute for scope S: k in $S::k\n", - }; - - @Test public void testMembersActions() throws RecognitionException { - testActions("members", membersChecks, attributeTemplate); - } - - @Test public void testInitActions() throws RecognitionException { - testActions("init", initChecks, attributeTemplate); - } - - @Test public void testInlineActions() throws RecognitionException { - testActions("inline", inlineChecks, attributeTemplate); - } - - @Test public void testBadInlineActions() throws RecognitionException { - testActions("inline", bad_inlineChecks, attributeTemplate); - } - - @Test public void testFinallyActions() throws RecognitionException { - testActions("finally", finallyChecks, attributeTemplate); - } - - @Test public void testDynMembersActions() throws RecognitionException { - testActions("members", dynMembersChecks, scopeTemplate); - } - - @Test public void testDynInitActions() throws RecognitionException { - testActions("init", dynInitChecks, scopeTemplate); - } - - @Test public void testDynInlineActions() throws RecognitionException { - testActions("inline", dynInlineChecks, scopeTemplate); - } - - @Test public void testDynFinallyActions() throws RecognitionException { - testActions("finally", dynFinallyChecks, scopeTemplate); - } - - @Test public void testTokenRef() throws RecognitionException { - String grammar = - "parser grammar S;\n" + - "a : x=ID {Token t = $x; t = $ID;} ;\n"; - String expected = - ""; - testErrors(new String[] {grammar, expected}, false); - } - - @Test public void testNonDynamicAttributeOutsideRule() throws Exception { - String action = "public void foo() { $x; }"; - } - @Test public void testNonDynamicAttributeOutsideRule2() throws Exception { - String action = "public void foo() { $x.y; }"; - } - @Test public void testUnknownGlobalScope() throws Exception { - String action = "$Symbols::names.add($id.text);"; - } - @Test public void testUnknownDynamicAttribute() throws Exception { - String action = "$a::x"; - } - - @Test public void testUnknownGlobalDynamicAttribute() throws Exception { - String action = "$Symbols::x"; - } - - - public void testActions(String location, String[] pairs, String template) { - for (int i = 0; i < pairs.length; i+=2) { - String action = pairs[i]; - String expected = pairs[i+1]; - ST st = new ST(template); - st.add(location, action); - String grammar = st.render(); - testErrors(new String[] {grammar, expected}, false); - } - } -} diff --git a/tool/test/org/antlr/v4/test/TestBasicSemanticErrors.java b/tool/test/org/antlr/v4/test/TestBasicSemanticErrors.java deleted file mode 100644 index 5ac16efc6..000000000 --- a/tool/test/org/antlr/v4/test/TestBasicSemanticErrors.java +++ /dev/null @@ -1,103 +0,0 @@ -package org.antlr.v4.test; - -import org.junit.Test; - -public class TestBasicSemanticErrors extends BaseTest { - static String[] A = { - // INPUT - "grammar A;\n" + - "\n" + - "options {\n" + - " output=template;\n" + - "}\n" + - "\n" + - "a : ID -> ID ;\n" + - "\n" + - "b : A^ | ((B!|C)) -> C;", - // YIELDS - "error(68): A.g:7:7: alts with rewrites can't use heterogeneous types left of ->\n" + - "error(78): A.g:9:4: AST operator with non-AST output option: ^\n" + - "error(78): A.g:9:11: AST operator with non-AST output option: !\n" + - "error(79): A.g:9:11: rule b alt 2 uses rewrite syntax and also an AST operator\n", - - // INPUT - "tree grammar B;\n" + - "options {\n" + - "\tfilter=true;\n" + - "\tbacktrack=false;\n" + - "\toutput=template;\n" + - "}\n" + - "\n" + - "a : A;\n" + - "\n" + - "b : ^(. A) ;", - // YIELDS - "error(80): B.g:10:6: Wildcard invalid as root; wildcard can itself be a tree\n" + - "error(81): B.g:1:5: option backtrack=false conflicts with tree grammar filter mode\n" + - "error(81): B.g:1:5: option output=template conflicts with tree grammar filter mode\n" - }; - - static String[] U = { - // INPUT - "parser grammar U;\n" + - "options { foo=bar; k=*; backtrack=true;}\n" + - "tokens {\n" + - " f='fkj';\n" + - " S = 'a';\n" + - "}\n" + - "tokens { A; }\n" + - "options { x=y; }\n" + - "\n" + - "a\n" + - "options { blech=bar; greedy=true; }\n" + - " : ID\n" + - " ;\n" + - "b : ( options { ick=bar; greedy=true; } : ID )+ ;\n" + - "c : ID ID ;", - // YIELDS - "error(21): U.g:8:0: repeated grammar prequel spec (option, token, or import); please merge\n" + - "error(21): U.g:7:0: repeated grammar prequel spec (option, token, or import); please merge\n" + - "error(49): U.g:2:10: illegal option foo\n" + - "error(26): U.g:4:8: token names must start with an uppercase letter: f\n" + - "error(25): U.g:4:8: can't assign string value to token name f in non-combined grammar\n" + - "error(25): U.g:5:8: can't assign string value to token name S in non-combined grammar\n" + - "error(49): U.g:8:10: illegal option x\n" + - "error(49): U.g:11:10: illegal option blech\n" + - "error(49): U.g:14:16: illegal option ick\n" + - "error(49): U.g:15:16: illegal option x\n", - - // INPUT - "tree grammar V;\n" + - "options {\n" + - " rewrite=true;\n" + - " output=template;\n" + - "}\n" + - "a : A\n" + - " | A B -> template() \"kjsfdkdsj\" \n" + - " ;", - // YIELDS - "error(66): V.g:7:4: with rewrite=true, alt 2 not simple node or obvious tree element; text attribute for rule not guaranteed to be correct\n", - - // INPUT - "tree grammar V;\n" + - "options { rewrite=true; }\n" + - "a : A\n" + - " | A B -> template() \"kjsfdkdsj\" \n" + - " ;", - // YIELDS - "error(62): V.g:4:8: rule a uses rewrite syntax or operator with no output option\n", - }; - - static String[] C = { - "parser grammar C;\n" + - "options {output=AST;}\n" + - "tokens { A; B; C; }\n" + - "a : A -> B $a A ;", // no problem with or $a. - - "" - }; - - @Test public void testA() { super.testErrors(A, false); } - @Test public void testU() { super.testErrors(U, false); } - @Test public void testE() { super.testErrors(C, false); } -} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/TestDFAConstruction.java b/tool/test/org/antlr/v4/test/TestDFAConstruction.java deleted file mode 100644 index a316f3b02..000000000 --- a/tool/test/org/antlr/v4/test/TestDFAConstruction.java +++ /dev/null @@ -1,417 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.v4.analysis.LeftRecursionDetector; -import org.antlr.v4.automata.NFA; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.Message; -import org.junit.Test; - -import java.util.List; - -/** NON-OPTIMIZED DFA */ -public class TestDFAConstruction extends BaseTest { - @Test public void testSimpleLinearApproxDecisionAsDFA() throws Exception { - String g = - "parser grammar P;\n"+ - "a : A | B ;"; - String expecting = - "s0-A->:s1=>1\n" + - "s0-B->:s2=>2\n"; - checkRuleDFA(g, "a", expecting); - } - - @Test public void testApproxRecur() throws Exception { - String g = - "parser grammar A;\n" + - "a : e X\n" + - " | e Y\n" + - " ;\n" + - "e : L e R\n" + - " | I\n" + - " ;"; -// String expecting = -// "s0-I->s2\n" + -// "s0-L->s1\n" + -// "s1-I->s2\n" + -// "s1-L->s1\n" + -// "s2-R->s3\n" + -// "s2-X->:s5=>1\n" + -// "s2-Y->:s4=>2\n" + -// "s3-R->s3\n" + -// "s3-X->:s5=>1\n" + -// "s3-Y->:s4=>2\n"; -// String expecting = -// "s0-I->s1\n" + -// "s0-L->s2\n" + -// "s1-Y->:s3=>2\n" + -// "s1-X->:s4=>1\n" + -// "s2-I->s5\n" + -// "s2-L->s2\n" + -// "s5-Y->:s3=>2\n" + -// "s5-X->:s4=>1\n" + -// "s5-R->s6\n" + -// "s6-Y->:s3=>2\n" + -// "s6-X->:s4=>1\n" + -// "s6-R->s6\n"; - String expecting = - "s0-L->s1\n" + - "s0-I->s2\n" + - "s1-L->s1\n" + - "s1-I->s2\n" + - "s2-X->:s3=>1\n" + - "s2-R->s4\n" + - "s2-Y->:s5=>2\n" + - "s4-X->:s3=>1\n" + - "s4-R->s4\n" + - "s4-Y->:s5=>2\n"; - checkRuleDFA(g, "a", expecting); - } - - @Test public void checkNullableRuleAndMultipleCalls() throws Exception { - String g = - "parser grammar B;\n" + - " \n" + - "a : b X\n"+ - " | b Y\n"+ - " ; \n" + - "b : c D\n"+ - " | c E\n"+ - " ;\n" + - "c : C | ;"; - String expecting = - "s0-C->s1\n" + - "s0-D->s2\n" + - "s0-E->s3\n" + - "s1-D->s2\n" + - "s1-E->s3\n" + - "s2-X->:s4=>1\n" + - "s2-Y->:s5=>2\n" + - "s3-X->:s4=>1\n" + - "s3-Y->:s5=>2\n"; - checkRuleDFA(g, "a", expecting); - } - - @Test public void avoidsGlobalFollowSequence() throws Exception { - String g = - "parser grammar C;\n" + - "a : b X\n" + - " | b Y\n" + - " ; \n" + - "b : F\n" + - " |\n" + - " ; \n" + - "q : b Q ;"; - String expecting = - "s0-F->s1\n" + - "s0-X->:s2=>1\n" + - "s0-Y->:s3=>2\n" + - "s1-X->:s2=>1\n" + - "s1-Y->:s3=>2\n"; - checkRuleDFA(g, "a", expecting); - } - - @Test public void strongLL() throws Exception { - String g = - "parser grammar D;\n" + - "\n" + - "s : X a A B\n" + - " | Y a B\n" + - " ;\n" + - "a : A | B | ;"; - // AB predicts 1 and 3 but AB only happens when called from 1st alt for 3rd alt - // In that case, 1st alt would be AA not AB. LL(2) but not strong LL(2) - // dup rules to reduce to strong LL(2) - String expecting = - "s0-A->s1\n" + - "s0-B->s2\n" + - "s1-A->:s3=>1\n" + - "s1-B->:s4=>1\n" + - "s2-A->:s5=>2\n" + - "s2-B->:s6=>2\n" + - "s2-EOF->:s7=>3\n"; - List msgs = checkRuleDFA(g, "a", expecting); - System.out.println(msgs); - } - - @Test public void testMultiAltsWithRecursion() throws Exception { - String g = - "parser grammar T;\n" + - "s : b X\n" + - " | b Y\n" + - " ;\n" + - "b : L b R\n" + - " | A b B\n" + - " | I\n" + - " ;"; - String expecting = - "s0-L->s1\n" + - "s0-A->s2\n" + - "s0-I->s3\n" + - "s1-L->s1\n" + - "s1-A->s2\n" + - "s1-I->s3\n" + - "s2-L->s1\n" + - "s2-A->s2\n" + - "s2-I->s3\n" + - "s3-X->:s4=>1\n" + - "s3-R->s5\n" + - "s3-B->s6\n" + - "s3-Y->:s7=>2\n" + - "s5-X->:s4=>1\n" + - "s5-R->s5\n" + - "s5-B->s6\n" + - "s5-Y->:s7=>2\n" + - "s6-X->:s4=>1\n" + - "s6-R->s5\n" + - "s6-B->s6\n" + - "s6-Y->:s7=>2\n"; - checkRuleDFA(g, "s", expecting); - } - - @Test public void recursionInMultipleWithoutNonRecursiveAlt() throws Exception { - String g = - "parser grammar t;\n"+ - "a : A a X | A a Y;"; - String expecting = - "s0-A->:s1=>1\n"; - List msgs = checkRuleDFA(g, "a", expecting); - System.out.println(msgs); - ambig(msgs, new int[] {1,2}, "A"); - unreachable(msgs, new int[] {2}); - assertEquals(msgs.size(), 2); - } - - @Test public void emptyAndNonEmptyContextStack() throws Exception { - String g = - "parser grammar S4;\n" + - "a : A+ ;\n" + - "x : a a ;"; - String expecting = - "s0-A->:s1=>1\n" + - "s0-EOF->:s2=>1\n"; - List msgs = checkRuleDFA(g, 0, expecting); - System.out.println(msgs); - ambig(msgs, new int[] {1,2}, "A"); - unreachable(msgs, new int[] {2}); - assertEquals(msgs.size(), 2); - } - - - @Test public void simpleNullableRule() throws Exception { - String g = - "parser grammar S2;\n" + - "a : b X \n" + - " | b Y\n" + - " ; \n" + - "b : F \n" + - " | \n" + - " ;"; - String expecting = - "s0-F->s1\n" + - "s0-X->:s2=>1\n" + - "s0-Y->:s3=>2\n" + - "s1-X->:s2=>1\n" + - "s1-Y->:s3=>2\n"; - List msgs = checkRuleDFA(g, "a", expecting); - System.out.println(msgs); - assertEquals(msgs.size(), 0); - } - - @Test public void testRecursion() throws Exception { - String g = - "parser grammar t;\n"+ - "s : a Y | A+ X ;\n" + - "a : A a | Q;"; - String expecting = - "s0-A->s1\n" + - "s0-Q->:s2=>1\n" + - "s1-A->s1\n" + - "s1-Q->:s2=>1\n" + - "s1-X->:s3=>2\n"; - List msgs = checkRuleDFA(g, "s", expecting); - System.out.println(msgs); - assertEquals(msgs.size(), 0); - } - - @Test public void testimmediateLeftRecursion() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar( - "parser grammar t;\n"+ - "s : a ;\n" + - "a : a A | B;", equeue); - NFA nfa = createNFA(g); - LeftRecursionDetector lr = new LeftRecursionDetector(nfa); - lr.check(); - String expecting = "[[Rule{name=a}]]"; - assertEquals(expecting, lr.listOfRecursiveCycles.toString()); - } - - @Test public void testLeftRecursionInMultipleCycles() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar( - "parser grammar t;\n"+ - "s : a x ;\n" + - "a : b | A ;\n" + - "b : c ;\n" + - "c : a | C ;\n" + - "x : y | X ;\n" + - "y : x ;\n", equeue); - NFA nfa = createNFA(g); - LeftRecursionDetector lr = new LeftRecursionDetector(nfa); - lr.check(); - String expecting = "[[Rule{name=a}, Rule{name=c}, Rule{name=b}], [Rule{name=x}, Rule{name=y}]]"; - assertEquals(expecting, lr.listOfRecursiveCycles.toString()); - } - - @Test public void selfRecurseNonDet() throws Exception { - String g = - "parser grammar t;\n"+ - "s : a ;\n" + - "a : P a P | P;"; - // nondeterministic from left edge - String expecting = - "s0-P->s1\n" + - "s1-P->:s2=>1\n" + - "s1-EOF->:s3=>2\n"; - List msgs = checkRuleDFA(g, "a", expecting); - System.out.println(msgs); - ambig(msgs, new int[] {1,2}, "P P"); - assertEquals(msgs.size(), 1); - } - - @Test public void testIndirectRecursionLoop() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar( - "parser grammar t;\n"+ - "s : a ;\n" + - "a : b X ;\n"+ - "b : a B ;\n", equeue); - NFA nfa = createNFA(g); - LeftRecursionDetector lr = new LeftRecursionDetector(nfa); - lr.check(); - String expecting = "[[Rule{name=a}, Rule{name=b}]]"; - assertEquals(lr.listOfRecursiveCycles.toString(), expecting); - } - - @Test public void testIndirectRecursionLoop2() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar( - "parser grammar t;\n"+ - "s : a ;\n" + - "a : i b X ;\n"+ // should see through i - "b : a B ;\n" + - "i : ;\n", equeue); - NFA nfa = createNFA(g); - LeftRecursionDetector lr = new LeftRecursionDetector(nfa); - lr.check(); - String expecting = "[[Rule{name=a}, Rule{name=b}]]"; - assertEquals(expecting, lr.listOfRecursiveCycles.toString()); - } - - @Test public void testifThenElse() throws Exception { - String g = - "parser grammar t;\n"+ - "s : IF s (E s)? | B;\n" + - "slist: s SEMI ;"; - String expecting = - "s0-E->:s1=>1\n" + - "s0-SEMI->:s2=>2\n"; - List msgs = checkRuleDFA(g, 0, expecting); - System.out.println(msgs); - ambig(msgs, new int[] {1,2}, "E"); - //unreachable(msgs, new int[] {2}); - assertEquals(msgs.size(), 1); - } - - @Test public void testifThenElseChecksStackSuffixConflict() throws Exception { - // if you don't check stack soon enough, this finds E B not just E - // as ambig input - String g = - "parser grammar t;\n"+ - "slist: s SEMI ;\n"+ - "s : IF s el | B;\n" + - "el: (E s)? ;\n"; - String expecting = - "s0-E->:s1=>1\n" + - "s0-SEMI->:s2=>2\n"; - List msgs = checkRuleDFA(g, 1, expecting); - System.out.println(msgs); - ambig(msgs, new int[] {1,2}, "E"); - assertEquals(msgs.size(), 1); - } - - @Test - public void testDoubleInvokeRuleLeftEdge() throws Exception { - String g = - "parser grammar t;\n"+ - "a : b X\n" + - " | b Y\n" + - " ;\n" + - "b : c B\n" + - " | c\n" + - " ;\n" + - "c : C ;\n"; - String expecting = - "s0-C->s1\n" + - "s1-B->s2\n" + - "s1-X->:s3=>1\n" + - "s1-Y->:s4=>2\n" + - "s2-X->:s3=>1\n" + - "s2-Y->:s4=>2\n"; - List msgs = checkRuleDFA(g, "a", expecting); - System.out.println(msgs); - assertEquals(msgs.size(), 0); - } - - @Test public void testimmediateTailRecursion() throws Exception { - String g = - "parser grammar t;\n"+ - "s : a ;\n" + - "a : A a | A B;"; - String expecting = - "s0-A->s1\n" + - "s1-A->:s2=>1\n" + - "s1-B->:s3=>2\n"; - List msgs = checkRuleDFA(g, "a", expecting); - assertEquals(msgs.size(), 0); - } - - @Test public void testCycleInsideRuleDoesNotForceInfiniteRecursion() throws Exception { - // shouldn't be possible to loop - // forever inside of a rule if there is an epsilon loop. - String g = - "parser grammar t;\n"+ - "s : a ;\n" + - "a : (A|)+ B;\n"; - String expecting = - "s0-A->:s1=>1\n" + - "s0-B->:s2=>2\n"; - List msgs = checkRuleDFA(g, 0, expecting); - System.out.println(msgs); - ambig(msgs, new int[] {1,2}, "A"); - assertEquals(msgs.size(), 1); - - expecting = - "s0-B->:s1=>1\n" + - "s0-A->:s2=>2\n"; - msgs = checkRuleDFA(g, 1, expecting); - System.out.println(msgs); - ambig(msgs, new int[] {1,2}, "B"); - assertEquals(msgs.size(), 1); - } - - /* - @Test public void _template() throws Exception { - String g = - ""; - String expecting = - ""; - List msgs = checkRuleDFA(g, "a", expecting); - System.out.println(msgs); - //ambig(msgs, new int[] {1,2}, "A"); - //unreachable(msgs, new int[] {2}); - assertEquals(msgs.size(), 0); - } - */ - -} diff --git a/tool/test/org/antlr/v4/test/TestDFAInterp.java b/tool/test/org/antlr/v4/test/TestDFAInterp.java deleted file mode 100644 index f60c65d05..000000000 --- a/tool/test/org/antlr/v4/test/TestDFAInterp.java +++ /dev/null @@ -1,98 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.runtime.ANTLRStringStream; -import org.antlr.runtime.CharStream; -import org.antlr.runtime.CommonTokenStream; -import org.antlr.runtime.Token; -import org.antlr.v4.automata.DFA; -import org.antlr.v4.automata.DecisionState; -import org.antlr.v4.automata.NFA; -import org.antlr.v4.codegen.CompiledPDA; -import org.antlr.v4.codegen.DFACompiler; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.pda.PDA; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import java.util.List; - -/** */ -public class TestDFAInterp extends BaseTest { - - public static class InterpLexer extends Lexer { - public InterpLexer(CharStream input, PDA pda) { - super(input); - modeToPDA = new PDA[] { pda }; - } - } - - @Test public void testSimpleLL1Decision() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n"); - - Grammar g = new Grammar( - "parser grammar P;\n" + - "a : ID | INT ;\n" - ); - int expecting = 1; - checkDFAMatches(g, lg, 0, "ab", expecting); - - expecting = 2; - checkDFAMatches(g, lg, 0, "32", expecting); - } - - @Test public void testArbCommonPrefix() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "SEMI : ';' ;\n" + - "DOT : '.' ;\n" + - "WS : ' ' ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n"); - - Grammar g = new Grammar( - "parser grammar P;\n" + - "tokens { WS; }\n" + - "a : ID+ SEMI\n" + - " | ID+ DOT\n" + - " ;\n" - ); - int expecting = 1; - checkDFAMatches(g, lg, 2, "a b c ;", expecting); - - expecting = 2; - checkDFAMatches(g, lg, 2, "a b c .", expecting); - } - - int interp(Grammar g, LexerGrammar lg, int decision, String input) { - NFA nfa = createNFA(g); - DecisionState blk = nfa.decisionToNFAState.get(decision); - DFA dfa = createDFA(g, blk); - DFACompiler comp = new DFACompiler(dfa); - CompiledPDA obj = comp.compile(); - PDA pda = new PDA(obj.code, obj.altToAddr, obj.nLabels); - - lg.importVocab(g); - PDA lexerPDA = getLexerPDA(lg); - Lexer lexer = new InterpLexer(new ANTLRStringStream(input), lexerPDA); - - CommonTokenStream tokens = new CommonTokenStream(lexer); - tokens.fill(); - List list = tokens.getTokens(); - for (Token t : list) {// hide WS - if ( t.getType()==g.getTokenType("WS") ) t.setChannel(Token.HIDDEN_CHANNEL); - } - System.out.println("tokens="+ list); - int alt = pda.execNoRecursion(tokens, 0); - return alt; - } - - void checkDFAMatches(Grammar g, LexerGrammar lg, int decision, - String input, int expecting) { - int result = interp(g, lg, decision, input); - assertEquals(expecting, result); - } -} diff --git a/tool/test/org/antlr/v4/test/TestDFAtoPDABytecodeGeneration.java b/tool/test/org/antlr/v4/test/TestDFAtoPDABytecodeGeneration.java deleted file mode 100644 index 77ea5e3e7..000000000 --- a/tool/test/org/antlr/v4/test/TestDFAtoPDABytecodeGeneration.java +++ /dev/null @@ -1,119 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.v4.automata.DFA; -import org.antlr.v4.automata.DecisionState; -import org.antlr.v4.automata.Edge; -import org.antlr.v4.automata.NFA; -import org.antlr.v4.codegen.CompiledPDA; -import org.antlr.v4.codegen.DFACompiler; -import org.antlr.v4.runtime.pda.Bytecode; -import org.antlr.v4.runtime.pda.PDA; -import org.antlr.v4.tool.Grammar; -import org.junit.Test; - -/** */ -public class TestDFAtoPDABytecodeGeneration extends BaseTest { - @Test public void testNotAisSet() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : ~A B C | A ;"); - String expecting = - "0000:\tsplit 7, 16\n" + - "0007:\tset 0\n" + - "0010:\tjmp 13\n" + - "0013:\taccept 1\n" + - "0016:\tmatch8 4\n" + - "0018:\tjmp 21\n" + - "0021:\taccept 2\n"; - checkBytecode(g, 0, expecting); - } - - @Test public void testAorBToSameState() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | B ;"); - String expecting = - "0000:\tsplit 7, 15\n" + - "0007:\tmatch8 4\n" + - "0009:\tjmp 12\n" + - "0012:\taccept 2\n" + - "0015:\tmatch8 5\n" + - "0017:\tjmp 12\n"; - - NFA nfa = createNFA(g); - DecisionState blk = nfa.decisionToNFAState.get(0); - DFA dfa = createDFA(g, blk); - - // make S0 go to S1 on both A and B (pinch alts back to single state) - Edge e0 = dfa.states.get(0).edge(0); - Edge e1 = dfa.states.get(0).edge(1); - e0.target = e1.target; - System.out.print("altered DFA="+dfa); - - DFACompiler comp = new DFACompiler(dfa); - CompiledPDA obj = comp.compile(); - PDA pda = new PDA(obj.code, obj.altToAddr, obj.nLabels); - assertEquals(expecting, Bytecode.disassemble(pda.code, false)); - } - - @Test public void testAorB() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | B ;"); - String expecting = - "0000:\tsplit 7, 15\n" + - "0007:\tmatch8 4\n" + - "0009:\tjmp 12\n" + - "0012:\taccept 1\n" + - "0015:\tmatch8 5\n" + - "0017:\tjmp 20\n" + - "0020:\taccept 2\n"; - checkBytecode(g, 0, expecting); - } - - @Test public void testABorAC() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A B | A C ;"); - String expecting = - "0000:\tmatch8 4\n" + - "0002:\tjmp 5\n" + - "0005:\tsplit 12, 20\n" + - "0012:\tmatch8 6\n" + - "0014:\tjmp 17\n" + - "0017:\taccept 2\n" + - "0020:\tmatch8 5\n" + - "0022:\tjmp 25\n" + - "0025:\taccept 1\n"; - checkBytecode(g, 0, expecting); - } - - @Test public void testAPlus() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A+ B | A+ C ;"); - String expecting = - "0000:\tmatch8 4\n" + - "0002:\tjmp 5\n" + - "0005:\tsplit 14, 22, 30\n" + - "0014:\tmatch8 6\n" + - "0016:\tjmp 19\n" + - "0019:\taccept 2\n" + - "0022:\tmatch8 5\n" + - "0024:\tjmp 27\n" + - "0027:\taccept 1\n" + - "0030:\tmatch8 4\n" + - "0032:\tjmp 5\n"; - checkBytecode(g, 2, expecting); - } - - void checkBytecode(Grammar g, int decision, String expecting) { - NFA nfa = createNFA(g); - DecisionState blk = nfa.decisionToNFAState.get(decision); - DFA dfa = createDFA(g, blk); - DFACompiler comp = new DFACompiler(dfa); - CompiledPDA obj = comp.compile(); - PDA pda = new PDA(obj.code, obj.altToAddr, obj.nLabels); - assertEquals(expecting, Bytecode.disassemble(pda.code, false)); - } -} diff --git a/tool/test/org/antlr/v4/test/TestLexerDFAConstruction.java b/tool/test/org/antlr/v4/test/TestLexerDFAConstruction.java deleted file mode 100644 index eaa8b758d..000000000 --- a/tool/test/org/antlr/v4/test/TestLexerDFAConstruction.java +++ /dev/null @@ -1,133 +0,0 @@ -package org.antlr.v4.test; - -import org.junit.Test; - -public class TestLexerDFAConstruction extends BaseTest { - - @Test public void unicode() throws Exception { - String g = - "lexer grammar L;\n" + - "A : '\\u0030'..'\\u8000'+ 'a' ;\n" + - "B : '\\u0020' ;"; - String expecting = - "s0-{'0'..'\\u8000'}->s1\n" + - "s0-' '->:s2=>B\n" + - "s1-'a'->:s3=>A\n" + - "s1-{'0'..'`', 'b'..'\\u8000'}->s1\n" + - ":s3=>A-'a'->:s3=>A\n" + - ":s3=>A-{'0'..'`', 'b'..'\\u8000'}->s1\n"; - checkLexerDFA(g, expecting); - } - - @Test public void keywordvsID() throws Exception { - String g = - "lexer grammar L2;\n" + - "IF : 'if' ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : DIGIT+ ;\n" + - "public fragment\n" + - "DIGIT : '0'..'9' ;"; - String expecting = - "s0-'i'->:s1=>ID\n" + - "s0-{'a'..'h', 'j'..'z'}->:s2=>ID\n" + - "s0-{'0'..'9'}->:s3=>INT\n" + - ":s1=>ID-'f'->:s4=>IF\n" + - ":s1=>ID-{'a'..'e', 'g'..'z'}->:s2=>ID\n" + - ":s2=>ID-{'a'..'z'}->:s2=>ID\n" + - ":s3=>INT-{'0'..'9'}->:s3=>INT\n" + - ":s4=>IF-{'a'..'z'}->:s2=>ID\n"; - checkLexerDFA(g, expecting); - } - - @Test public void recursiveMatchingTwoAlts() throws Exception { - // TODO: recursion requires NFA - String g = - "lexer grammar L3;\n" + - "SPECIAL : '{{}}' ;\n" + - "ACTION : '{' (FOO | 'x')* '}' ;\n" + - "fragment\n" + - "FOO : ACTION ;\n" + - "LCURLY : '{' ;"; - String expecting = - ""; - checkLexerDFA(g, expecting); - } - - @Test public void testMode() throws Exception { - String g = - "lexer grammar L;\n"+ - "A : 'a' ;\n" + - "X : 'x' ;\n" + - "mode FOO;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"; - String expecting = - "s0-'b'->:s1=>B\n" + - "s0-'c'->:s2=>C\n"; - checkLexerDFA(g, "FOO", expecting); - } - - @Test public void pred() throws Exception { - String g = - "lexer grammar L;\n" + - "A : {p1}? 'a' 'b' ;\n" + - "B : 'a' 'b' ;"; - String expecting = - "s0-'a'->s1\n" + - "s1-'b'->s2\n" + - "s2-{p1}?->:s3=>A\n" + - "s2-true->:s4=>B\n"; - checkLexerDFA(g, expecting); - } - - @Test public void gated_pred() throws Exception { - String g = - "lexer grammar pred;\n" + - "A : {p1}?=> 'a' 'b'\n" + - " | 'a' 'c' \n" + - " | 'b'\n" + - " ;"; - String expecting = - "s0-'a'->s1\n" + - "s0-'b'->:s2=>A\n" + - "s1-'b'&&{p1}?->:s3=>A\n" + - "s1-'c'->:s4=>A\n"; - checkLexerDFA(g, expecting); - } - - @Test public void gated_pred2() throws Exception { - String g = - "lexer grammar T;\n" + - "A : {p1}?=> 'a' 'b'\n" + - " | 'b'\n" + - " ;\n" + - "B : 'a' 'c' ;"; - String expecting = - "s0-'a'->s1\n" + - "s0-'b'->:s2=>A\n" + - "s1-'b'&&{p1}?->:s3=>A\n" + - "s1-'c'->:s4=>B\n"; - checkLexerDFA(g, expecting); - } - - @Test public void ambigButPredicatedTokens() throws Exception { - String g = - "lexer grammar L4;\n" + - "A : {p1}? 'a' ; \n" + - "B : {p2}? 'a' ;"; - String expecting = - "s0-'a'->s1\n" + - "s1-{p1}?->:s2=>A\n" + - "s1-{p2}?->:s3=>B\n"; - checkLexerDFA(g, expecting); - } - - public void _template() throws Exception { - String g = - ""; - String expecting = - ""; - checkLexerDFA(g, expecting); - } - -} diff --git a/tool/test/org/antlr/v4/test/TestLinearApproximateLookahead.java b/tool/test/org/antlr/v4/test/TestLinearApproximateLookahead.java deleted file mode 100644 index 2db5f5dbb..000000000 --- a/tool/test/org/antlr/v4/test/TestLinearApproximateLookahead.java +++ /dev/null @@ -1,111 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.v4.analysis.LinearApproximator; -import org.antlr.v4.automata.DFA; -import org.antlr.v4.automata.DecisionState; -import org.antlr.v4.automata.NFA; -import org.antlr.v4.automata.NFAState; -import org.antlr.v4.tool.Grammar; -import org.junit.Test; - -public class TestLinearApproximateLookahead extends BaseTest { - @Test - public void testLL1Block() throws Exception { - String g = - "parser grammar P;\n"+ - "a : A | B ;"; - String expecting = - "s0-A->:s1=>1\n" + - "s0-B->:s2=>2\n"; - checkRule(g, "a", expecting); - } - - @Test - public void testLL1Chain() throws Exception { - String g = - "parser grammar P;\n"+ - "a : b | B ;\n" + - "b : c | C ;\n" + - "c : D ;"; - String expecting = - "s0-{D, C}->:s1=>1\n" + - "s0-B->:s2=>2\n"; - checkRule(g, "a", expecting); - } - - @Test - public void testLL2Block() throws Exception { - String g = - "parser grammar P;\n"+ - "a : A B | A C ;"; - String expecting = - "s0-A->s1\n" + - "s0-A->s3\n" + - "s1-B->:s2=>1\n" + - "s3-C->:s4=>2\n"; - checkRule(g, "a", expecting); - } - - @Test public void testLL1NullableRuleRef() throws Exception { - String g = - "parser grammar P;\n"+ - "a : b B | X b C ;\n" + - "b : A | ;"; - String expecting = - "s0-{A, B}->:s1=>1\n" + - "s0-X->:s2=>2\n"; - checkRule(g, "a", expecting); - } - - @Test public void testLL2FOLLOW() throws Exception { - String g = - "parser grammar P;\n"+ - "a : X b Q | Y b Z ;\n" + - "b : A B | A | ;\n" + - "c : b C ;"; - String expecting = - "s0-A->s1\n" + - "s0-A->s3\n" + - "s0-{Q, C, Z}->s5\n" + - "s1-B->:s2=>1\n" + - "s3-{Q, C, Z}->:s4=>2\n" + - "s5-EOF->:s6=>3\n"; - checkRule(g, "b", expecting); - } - - @Test - public void testNonDetLL1Block() throws Exception { - String g = - "parser grammar P;\n"+ - "a : A | B | A ;"; - String expecting = null; - checkRule(g, "a", expecting); - } - - @Test - public void testNonDetLL2Block() throws Exception { - String g = - "parser grammar P;\n"+ - "a : A B | A B | C ;"; - String expecting = null; - checkRule(g, "a", expecting); - } - - void checkRule(String gtext, String ruleName, String expecting) - throws Exception - { - Grammar g = new Grammar(gtext); - NFA nfa = createNFA(g); - NFAState s = nfa.ruleToStartState.get(g.getRule(ruleName)); - if ( s==null ) { - System.err.println("no start state for rule "+ruleName); - return; - } - DecisionState blk = (DecisionState)s.transition(0).target; - LinearApproximator lin = new LinearApproximator(g,blk.decision); - DFA dfa = lin.createDFA(blk); - String result = null; - if ( dfa!=null ) result = dfa.toString(); - assertEquals(expecting, result); - } -} diff --git a/tool/test/org/antlr/v4/test/TestNFAConstruction.java b/tool/test/org/antlr/v4/test/TestNFAConstruction.java deleted file mode 100644 index d206cb38a..000000000 --- a/tool/test/org/antlr/v4/test/TestNFAConstruction.java +++ /dev/null @@ -1,951 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.*; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.junit.Test; - -public class TestNFAConstruction extends BaseTest { - @Test - public void testA() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A;"); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-A->s3\n" + - "s3->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s4\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAB() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A B ;"); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-A->s3\n" + - "s3->s4\n" + - "s4-B->s5\n" + - "s5->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s6\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAorB() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A | B {;} ;"); - String expecting = - "RuleStart_a_0->BlockStart_8\n" + - "BlockStart_8->s2\n" + - "BlockStart_8->s4\n" + - "s2-A->s3\n" + - "s4-B->s5\n" + - "s3->BlockEnd_9\n" + - "s5->s6\n" + - "BlockEnd_9->RuleStop_a_1\n" + - "s6-{;}->s7\n" + - "RuleStop_a_1-EOF->s10\n" + - "s7->BlockEnd_9\n"; - checkRule(g, "a", expecting); - } - - @Test public void testRange() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar P;\n"+ - "A : 'a'..'c' ;" - ); - String expecting = - "RuleStart_A_1->s3\n" + - "s3-'a'..'c'->s4\n" + - "s4->RuleStop_A_2\n"; - checkTokensRule(g, "A", expecting); - } - - @Test public void testRangeOrRange() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar P;\n"+ - "A : ('a'..'c' 'h' | 'q' 'j'..'l') ;" - ); - String expecting = - "RuleStart_A_1->BlockStart_11\n" + - "BlockStart_11->s3\n" + - "BlockStart_11->s7\n" + - "s3-'a'..'c'->s4\n" + - "s7-'q'->s8\n" + - "s4->s5\n" + - "s8->s9\n" + - "s5-'h'->s6\n" + - "s9-'j'..'l'->s10\n" + - "s6->BlockEnd_12\n" + - "s10->BlockEnd_12\n" + - "BlockEnd_12->RuleStop_A_2\n"; - checkTokensRule(g, "A", expecting); - } - - @Test public void testStringLiteralInParser() throws Exception { - Grammar g = new Grammar( - "grammar P;\n"+ - "a : A|'b' ;" - ); - String expecting = - "RuleStart_a_0->BlockStart_6\n" + - "BlockStart_6->s2\n" + - "BlockStart_6->s4\n" + - "s2-A->s3\n" + - "s4-'b'->s5\n" + - "s3->BlockEnd_7\n" + - "s5->BlockEnd_7\n" + - "BlockEnd_7->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s8\n"; - checkRule(g, "a", expecting); - } - - @Test public void testABorCD() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A B | C D;"); - String expecting = - "RuleStart_a_0->BlockStart_10\n" + - "BlockStart_10->s2\n" + - "BlockStart_10->s6\n" + - "s2-A->s3\n" + - "s6-C->s7\n" + - "s3->s4\n" + - "s7->s8\n" + - "s4-B->s5\n" + - "s8-D->s9\n" + - "s5->BlockEnd_11\n" + - "s9->BlockEnd_11\n" + - "BlockEnd_11->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s12\n"; - checkRule(g, "a", expecting); - } - - @Test public void testbA() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : b A ;\n"+ - "b : B ;"); - String expecting = - "RuleStart_a_0->s4\n" + - "s4->RuleStart_b_2\n" + - "s5->s6\n" + - "s6-A->s7\n" + - "s7->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s10\n"; - checkRule(g, "a", expecting); - expecting = - "RuleStart_b_2->s8\n" + - "s8-B->s9\n" + - "s9->RuleStop_b_3\n" + - "RuleStop_b_3->s5\n"; - checkRule(g, "b", expecting); - } - - @Test public void testFollow() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : b A ;\n"+ - "b : B ;\n"+ - "c : b C;"); - String expecting = - "RuleStart_b_2->s10\n" + - "s10-B->s11\n" + - "s11->RuleStop_b_3\n" + - "RuleStop_b_3->s7\n" + - "RuleStop_b_3->s13\n"; - checkRule(g, "b", expecting); - } - - @Test public void testAorEpsilon() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A | ;"); - String expecting = - "RuleStart_a_0->BlockStart_6\n" + - "BlockStart_6->s2\n" + - "BlockStart_6->s4\n" + - "s2-A->s3\n" + - "s4->s5\n" + - "s3->BlockEnd_7\n" + - "s5->BlockEnd_7\n" + - "BlockEnd_7->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s8\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAOptional() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A?;"); - String expecting = - "RuleStart_a_0->BlockStart_4\n" + - "BlockStart_4->s2\n" + - "BlockStart_4->BlockEnd_5\n" + - "s2-A->s3\n" + - "BlockEnd_5->RuleStop_a_1\n" + - "s3->BlockEnd_5\n" + - "RuleStop_a_1-EOF->s6\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAorBoptional() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A|B)?;"); - String expecting = - "RuleStart_a_0->BlockStart_6\n" + - "BlockStart_6->s2\n" + - "BlockStart_6->s4\n" + - "BlockStart_6->BlockEnd_7\n" + - "s2-A->s3\n" + - "s4-B->s5\n" + - "BlockEnd_7->RuleStop_a_1\n" + - "s3->BlockEnd_7\n" + - "s5->BlockEnd_7\n" + - "RuleStop_a_1-EOF->s8\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAorBthenC() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A | B) C;"); - String expecting = - "RuleStart_a_0->BlockStart_6\n" + - "BlockStart_6->s2\n" + - "BlockStart_6->s4\n" + - "s2-A->s3\n" + - "s4-B->s5\n" + - "s3->BlockEnd_7\n" + - "s5->BlockEnd_7\n" + - "BlockEnd_7->s8\n" + - "s8-C->s9\n" + - "s9->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s10\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAplus() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A+;"); - String expecting = - "RuleStart_a_0->PlusBlockStart_4\n" + - "PlusBlockStart_4->s2\n" + - "s2-A->s3\n" + - "s3->LoopBack_5\n" + - "LoopBack_5->BlockEnd_6\n" + - "LoopBack_5->s2\n" + - "BlockEnd_6->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s7\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAorBplus() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A|B)+;"); - String expecting = - "RuleStart_a_0->PlusBlockStart_8\n" + - "PlusBlockStart_8->BlockStart_6\n" + - "BlockStart_6->s2\n" + - "BlockStart_6->s4\n" + - "s2-A->s3\n" + - "s4-B->s5\n" + - "s3->BlockEnd_7\n" + - "s5->BlockEnd_7\n" + - "BlockEnd_7->LoopBack_9\n" + - "LoopBack_9->BlockEnd_10\n" + - "LoopBack_9->BlockStart_6\n" + - "BlockEnd_10->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s11\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAorBorEmptyPlus() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A | B | )+ ;"); - String expecting = - "RuleStart_a_0->PlusBlockStart_10\n" + - "PlusBlockStart_10->BlockStart_8\n" + - "BlockStart_8->s2\n" + - "BlockStart_8->s4\n" + - "BlockStart_8->s6\n" + - "s2-A->s3\n" + - "s4-B->s5\n" + - "s6->s7\n" + - "s3->BlockEnd_9\n" + - "s5->BlockEnd_9\n" + - "s7->BlockEnd_9\n" + - "BlockEnd_9->LoopBack_11\n" + - "LoopBack_11->BlockEnd_12\n" + - "LoopBack_11->BlockStart_8\n" + - "BlockEnd_12->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s13\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAStar() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A*;"); - String expecting = - "RuleStart_a_0->StarBlockStart_4\n" + - "StarBlockStart_4->s2\n" + - "StarBlockStart_4->BlockEnd_6\n" + - "s2-A->s3\n" + - "BlockEnd_6->RuleStop_a_1\n" + - "s3->LoopBack_5\n" + - "RuleStop_a_1-EOF->s7\n" + - "LoopBack_5->BlockEnd_6\n" + - "LoopBack_5->s2\n"; - checkRule(g, "a", expecting); - } - - @Test public void testNestedAstar() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (',' ID*)*;"); - String expecting = - "RuleStart_a_0->StarBlockStart_9\n" + - "StarBlockStart_9->s2\n" + - "StarBlockStart_9->BlockEnd_11\n" + - "s2-','->s3\n" + - "BlockEnd_11->RuleStop_a_1\n" + - "s3->StarBlockStart_6\n" + - "RuleStop_a_1-EOF->s12\n" + - "StarBlockStart_6->s4\n" + - "StarBlockStart_6->BlockEnd_8\n" + - "s4-ID->s5\n" + - "BlockEnd_8->LoopBack_10\n" + - "s5->LoopBack_7\n" + - "LoopBack_10->BlockEnd_11\n" + - "LoopBack_10->s2\n" + - "LoopBack_7->BlockEnd_8\n" + - "LoopBack_7->s4\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAorBstar() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A | B)* ;"); - String expecting = - "RuleStart_a_0->StarBlockStart_8\n" + - "StarBlockStart_8->BlockStart_6\n" + - "StarBlockStart_8->BlockEnd_10\n" + - "BlockStart_6->s2\n" + - "BlockStart_6->s4\n" + - "BlockEnd_10->RuleStop_a_1\n" + - "s2-A->s3\n" + - "s4-B->s5\n" + - "RuleStop_a_1-EOF->s11\n" + - "s3->BlockEnd_7\n" + - "s5->BlockEnd_7\n" + - "BlockEnd_7->LoopBack_9\n" + - "LoopBack_9->BlockEnd_10\n" + - "LoopBack_9->BlockStart_6\n"; - checkRule(g, "a", expecting); - } - - @Test public void testPredicatedAorB() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : {p1}? A | {p2}? B ;"); - String expecting = - "RuleStart_a_0->BlockStart_10\n" + - "BlockStart_10->s2\n" + - "BlockStart_10->s6\n" + - "s2-{p1}?->s3\n" + - "s6-{p2}?->s7\n" + - "s3->s4\n" + - "s7->s8\n" + - "s4-A->s5\n" + - "s8-B->s9\n" + - "s5->BlockEnd_11\n" + - "s9->BlockEnd_11\n" + - "BlockEnd_11->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s12\n"; - checkRule(g, "a", expecting); - } - -/* - @Test public void testMultiplePredicates() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : {p1}? {p1a}? A | {p2}? B | {p3} b;\n" + - "b : {p4}? B ;"); - String expecting = - "\n"; - checkRule(g, "a", expecting); - } - - @Test public void testSets() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : ( A | B )+ ;\n" + - "b : ( A | B{;} )+ ;\n" + - "c : (A|B) (A|B) ;\n" + - "d : ( A | B )* ;\n" + - "e : ( A | B )? ;"); - String expecting = - "\n"; - checkRule(g, "a", expecting); - expecting = - "\n"; - checkRule(g, "b", expecting); - expecting = - "\n"; - checkRule(g, "c", expecting); - expecting = - "\n"; - checkRule(g, "d", expecting); - expecting = - "\n"; - checkRule(g, "e", expecting); - } - - @Test public void testNotSet() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "tokens { A; B; C; }\n"+ - "a : ~A ;\n"); - String expecting = - "\n"; - checkRule(g, "a", expecting); - - } - - @Test public void testNotSingletonBlockSet() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "tokens { A; B; C; }\n"+ - "a : ~(A) ;\n"); - String expecting = - "\n"; - checkRule(g, "a", expecting); - } - - @Test public void testNotCharSet() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : ~'3' ;\n"); - String expecting = - "RuleStart_A_1->s5\n" + - "s5-{'\\u0000'..'2', '4'..'\\uFFFE'}->s6\n" + - "s6->RuleStop_A_2\n"; - checkRule(g, "A", expecting); - } - - @Test public void testNotBlockSet() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : ~('3'|'b') ;\n"); - String expecting = - "\n"; - checkRule(g, "A", expecting); - } - - @Test public void testNotSetLoop() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : ~('3')* ;\n"); - String expecting = - "\n"; - checkRule(g, "A", expecting); - } - - @Test public void testNotBlockSetLoop() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : ~('3'|'b')* ;\n"); - String expecting = - "\n"; - checkRule(g, "A", expecting); - } - - @Test public void testLabeledNotSet() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "tokens { A; B; C; }\n"+ - "a : t=~A ;\n"); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-B..C->.s3\n" + - ".s3->:s4\n" + - ":s4-EOF->.s5\n"; - checkRule(g, "a", expecting); - } - - @Test public void testLabeledNotCharSet() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : t=~'3' ;\n"); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" + - ".s3->:s4\n" + - ":s4-->.s5\n"; - checkRule(g, "A", expecting); - } - - @Test public void testLabeledNotBlockSet() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : t=~('3'|'b') ;\n"); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" + - ".s3->:s4\n" + - ":s4-->.s5\n"; - checkRule(g, "A", expecting); - } - - @Test public void testEscapedCharLiteral() throws Exception { - Grammar g = new Grammar( - "grammar P;\n"+ - "a : '\\n';"); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-'\\n'->.s3\n" + - ".s3->:s4\n" + - ":s4-EOF->.s5\n"; - checkRule(g, "a", expecting); - } - - @Test public void testEscapedStringLiteral() throws Exception { - Grammar g = new Grammar( - "grammar P;\n"+ - "a : 'a\\nb\\u0030c\\'';"); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-'a\\nb\\u0030c\\''->s3\n" + - "s3->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s4\n"; - checkRule(g, "a", expecting); - } - - // AUTO BACKTRACKING STUFF - - @Test public void testAutoBacktracking_RuleBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : 'a'{;}|'b';" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s1->.s9\n" + - ".s10-'b'->.s11\n" + - ".s11->.s6\n" + - ".s2-{synpred1_t}?->.s3\n" + - ".s3-'a'->.s4\n" + - ".s4-{}->.s5\n" + - ".s5->.s6\n" + - ".s6->:s7\n" + - ".s9->.s10\n" + - ":s7-EOF->.s8\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_RuleSetBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : 'a'|'b';" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-'a'..'b'->.s3\n" + - ".s3->:s4\n" + - ":s4-EOF->.s5\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_SimpleBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'{;}|'b') ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s10->.s11\n" + - ".s11-'b'->.s12\n" + - ".s12->.s7\n" + - ".s2->.s10\n" + - ".s2->.s3\n" + - ".s3-{synpred1_t}?->.s4\n" + - ".s4-'a'->.s5\n" + - ".s5-{}->.s6\n" + - ".s6->.s7\n" + - ".s7->:s8\n" + - ":s8-EOF->.s9\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_SetBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'|'b') ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-'a'..'b'->.s3\n" + - ".s3->:s4\n" + - ":s4-EOF->.s5\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_StarBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'{;}|'b')* ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s12->.s13\n" + - ".s13-{synpred2_t}?->.s14\n" + - ".s14-'b'->.s15\n" + - ".s15->.s8\n" + - ".s16->.s9\n" + - ".s2->.s16\n" + - ".s2->.s3\n" + - ".s3->.s12\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6-{}->.s7\n" + - ".s7->.s8\n" + - ".s8->.s3\n" + - ".s8->.s9\n" + - ".s9->:s10\n" + - ":s10-EOF->.s11\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_StarSetBlock_IgnoresPreds() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'|'b')* ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2->.s3\n" + - ".s2->.s9\n" + - ".s3->.s4\n" + - ".s4-'a'..'b'->.s5\n" + - ".s5->.s3\n" + - ".s5->.s6\n" + - ".s6->:s7\n" + - ".s9->.s6\n" + - ":s7-EOF->.s8\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_StarSetBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'|'b'{;})* ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s11->.s12\n" + - ".s12-{synpred2_t}?->.s13\n" + - ".s13-'b'->.s14\n" + - ".s14-{}->.s15\n" + - ".s15->.s7\n" + - ".s16->.s8\n" + - ".s2->.s16\n" + - ".s2->.s3\n" + - ".s3->.s11\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6->.s7\n" + - ".s7->.s3\n" + - ".s7->.s8\n" + - ".s8->:s9\n" + - ":s9-EOF->.s10\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_StarBlock1Alt() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a')* ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s10->.s7\n" + - ".s2->.s10\n" + - ".s2->.s3\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6->.s3\n" + - ".s6->.s7\n" + - ".s7->:s8\n" + - ":s8-EOF->.s9\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_PlusBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'{;}|'b')+ ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s12->.s13\n" + - ".s13-{synpred2_t}?->.s14\n" + - ".s14-'b'->.s15\n" + - ".s15->.s8\n" + - ".s2->.s3\n" + - ".s3->.s12\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6-{}->.s7\n" + - ".s7->.s8\n" + - ".s8->.s3\n" + - ".s8->.s9\n" + - ".s9->:s10\n" + - ":s10-EOF->.s11\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_PlusSetBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'|'b'{;})+ ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s11->.s12\n" + - ".s12-{synpred2_t}?->.s13\n" + - ".s13-'b'->.s14\n" + - ".s14-{}->.s15\n" + - ".s15->.s7\n" + - ".s2->.s3\n" + - ".s3->.s11\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6->.s7\n" + - ".s7->.s3\n" + - ".s7->.s8\n" + - ".s8->:s9\n" + - ":s9-EOF->.s10\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_PlusBlock1Alt() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a')+ ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2->.s3\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6->.s3\n" + - ".s6->.s7\n" + - ".s7->:s8\n" + - ":s8-EOF->.s9\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_OptionalBlock2Alts() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'{;}|'b')?;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s10->.s11\n" + - ".s10->.s14\n" + - ".s11-{synpred2_t}?->.s12\n" + - ".s12-'b'->.s13\n" + - ".s13->.s7\n" + - ".s14->.s7\n" + - ".s2->.s10\n" + - ".s2->.s3\n" + - ".s3-{synpred1_t}?->.s4\n" + - ".s4-'a'->.s5\n" + - ".s5-{}->.s6\n" + - ".s6->.s7\n" + - ".s7->:s8\n" + - ":s8-EOF->.s9\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_OptionalBlock1Alt() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a')?;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2->.s3\n" + - ".s2->.s9\n" + - ".s3-{synpred1_t}?->.s4\n" + - ".s4-'a'->.s5\n" + - ".s5->.s6\n" + - ".s6->:s7\n" + - ".s9->.s6\n" + - ":s7-EOF->.s8\n"; - checkRule(g, "a", expecting); - } - - @Test public void testAutoBacktracking_ExistingPred() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a')=> 'a' | 'b';" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s1->.s8\n" + - ".s10->.s5\n" + - ".s2-{synpred1_t}?->.s3\n" + - ".s3-'a'->.s4\n" + - ".s4->.s5\n" + - ".s5->:s6\n" + - ".s8->.s9\n" + - ".s9-'b'->.s10\n" + - ":s6-EOF->.s7\n"; - checkRule(g, "a", expecting); - } -*/ - - @Test public void testDefaultMode() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' ;\n" + - "X : 'x' ;\n" + - "mode FOO;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - String expecting = - "BlockStart_0->RuleStart_A_2\n" + - "BlockStart_0->RuleStart_X_4\n" + - "RuleStart_A_2->s10\n" + - "RuleStart_X_4->s12\n" + - "s10-'a'->s11\n" + - "s12-'x'->s13\n" + - "s11->RuleStop_A_3\n" + - "s13->RuleStop_X_5\n"; - checkTokensRule(g, "DEFAULT_MODE", expecting); - } - - @Test public void testMode() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' ;\n" + - "X : 'x' ;\n" + - "mode FOO;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - String expecting = - "BlockStart_1->RuleStart_B_6\n" + - "BlockStart_1->RuleStart_C_8\n" + - "RuleStart_B_6->s14\n" + - "RuleStart_C_8->s16\n" + - "s14-'b'->s15\n" + - "s16-'c'->s17\n" + - "s15->RuleStop_B_7\n" + - "s17->RuleStop_C_9\n"; - checkTokensRule(g, "FOO", expecting); - } - - void checkTokensRule(LexerGrammar g, String modeName, String expecting) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.process(imp); - } - } - } - - if ( g.modes.get(modeName)==null ) { - System.err.println("no such mode "+modeName); - return; - } - - ParserNFAFactory f = new LexerNFAFactory((LexerGrammar)g); - NFA nfa = f.createNFA(); - NFAState startState = nfa.modeToStartState.get(modeName); - NFASerializer serializer = new NFASerializer(g, startState); - String result = serializer.toString(); - - //System.out.print(result); - assertEquals(expecting, result); - } - - void checkRule(Grammar g, String ruleName, String expecting) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.process(imp); - } - } - } - - ParserNFAFactory f = new ParserNFAFactory(g); - NFA nfa = f.createNFA(); - Rule r = g.getRule(ruleName); - NFAState startState = nfa.ruleToStartState.get(r); - NFASerializer serializer = new NFASerializer(g, startState); - String result = serializer.toString(); - - //System.out.print(result); - assertEquals(expecting, result); - } -} diff --git a/tool/test/org/antlr/v4/test/TestPDABytecodeGeneration.java b/tool/test/org/antlr/v4/test/TestPDABytecodeGeneration.java deleted file mode 100644 index edbcdd363..000000000 --- a/tool/test/org/antlr/v4/test/TestPDABytecodeGeneration.java +++ /dev/null @@ -1,287 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.v4.Tool; -import org.antlr.v4.codegen.CompiledPDA; -import org.antlr.v4.codegen.LexerCompiler; -import org.antlr.v4.runtime.pda.Bytecode; -import org.antlr.v4.runtime.pda.PDA; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -public class TestPDABytecodeGeneration extends BaseTest { - @Test public void unicode() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : '\\u0030'..'\\u8000'+ 'a' ;\n" + - "B : '\\u0020' | '\\n';"); - String expecting = - "0000:\tsplit 7, 24\n" + - "0007:\trange16 '0', '\\u8000'\n" + - "0012:\tsplit 7, 19\n" + - "0019:\tmatch8 'a'\n" + - "0021:\taccept 4\n" + - "0024:\tsplit 31, 36\n" + - "0031:\tmatch8 ' '\n" + - "0033:\tjmp 38\n" + - "0036:\tmatch8 '\\n'\n" + - "0038:\taccept 5\n"; - checkBytecode(g, expecting); - } - - @Test public void testString() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'ab' ;"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tmatch8 'a'\n" + - "0007:\tmatch8 'b'\n" + - "0009:\taccept 4\n"; - checkBytecode(g, expecting); - } - - @Test public void testNotChar() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : ~'a' ;"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tnot \n" + - "0006:\tmatch8 'a'\n" + - "0008:\taccept 4\n"; - checkBytecode(g, expecting); - } - - @Test public void testNotBlock() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : ~('a'|'b') ;"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tnot \n" + // not's next match/range - "0006:\tsplit 13, 18\n" + - "0013:\tmatch8 'a'\n" + - "0015:\tjmp 20\n" + - "0018:\tmatch8 'b'\n" + - "0020:\taccept 4\n"; - checkBytecode(g, expecting); - } - - @Test public void testNotStarBlock() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : ~('a'|'b')* ;"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tsplit 12, 30\n" + - "0012:\tnot \n" + - "0013:\tsplit 20, 25\n" + - "0020:\tmatch8 'a'\n" + - "0022:\tjmp 27\n" + - "0025:\tmatch8 'b'\n" + - "0027:\tjmp 5\n" + - "0030:\taccept 4\n"; - checkBytecode(g, expecting); - } - - @Test public void testIDandIntandKeyword() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'ab';\n" + - "B : 'a'..'z'+ ;\n" + - "I : '0'..'9'+ ;\n"); - String expecting = - "0000:\tsplit 9, 16, 29\n" + - "0009:\tmatch8 'a'\n" + - "0011:\tmatch8 'b'\n" + - "0013:\taccept 4\n" + - "0016:\trange8 'a', 'z'\n" + - "0019:\tsplit 16, 26\n" + - "0026:\taccept 5\n" + - "0029:\trange8 '0', '9'\n" + - "0032:\tsplit 29, 39\n" + - "0039:\taccept 6\n"; - checkBytecode(g, expecting); - } - - @Test public void testNonGreedy() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "\n" + - "CMT : '/*' (options {greedy=false;}:.)* '*/' ;\n" + - "ID : 'ab' ;\n"); - String expecting = - "0000:\tsplit 7, 29\n" + - "0007:\tmatch8 '/'\n" + - "0009:\tmatch8 '*'\n" + - "0011:\tsplit 22, 18\n" + - "0018:\twildcard \n" + - "0019:\tjmp 11\n" + - "0022:\tmatch8 '*'\n" + - "0024:\tmatch8 '/'\n" + - "0026:\taccept 4\n" + - "0029:\tmatch8 'a'\n" + - "0031:\tmatch8 'b'\n" + - "0033:\taccept 5\n"; - checkBytecode(g, expecting); - } - - @Test public void testCallFragment() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "I : D+ ;\n" + - "fragment D : '0'..'9'+ ;\n"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tcall 18\n" + - "0008:\tsplit 5, 15\n" + - "0015:\taccept 4\n" + - "0018:\trange8 '0', '9'\n" + - "0021:\tsplit 18, 28\n" + - "0028:\tret \n"; - checkBytecode(g, expecting); - } - - @Test public void testLabeledChar() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : a='a' ;\n"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tlabel 0\n" + - "0008:\tmatch8 'a'\n" + - "0010:\tsave 0\n" + - "0013:\taccept 4\n"; - checkBytecode(g, expecting); - } - - @Test public void testLabeledString() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : a='aa' ;\n"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tlabel 0\n" + - "0008:\tmatch8 'a'\n" + - "0010:\tmatch8 'a'\n" + - "0012:\tsave 0\n" + - "0015:\taccept 4\n"; - checkBytecode(g, expecting); - } - - @Test public void testLabeledToken() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "I : d=D ;\n" + - "fragment D : '0'..'9'+ ;\n"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tlabel 0\n" + - "0008:\tcall 17\n" + - "0011:\tsave 0\n" + - "0014:\taccept 4\n" + - "0017:\trange8 '0', '9'\n" + - "0020:\tsplit 17, 27\n" + - "0027:\tret \n"; - checkBytecode(g, expecting); - } - - @Test public void testLabelIndexes() throws Exception { - // labels indexed from 0 in each rule - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : a='a' ;\n" + - "B : a='b' b='c' ;\n"); - String expecting = - "0000:\tsplit 7, 18\n" + - "0007:\tlabel 0\n" + - "0010:\tmatch8 'a'\n" + - "0012:\tsave 0\n" + - "0015:\taccept 4\n" + - "0018:\tlabel 1\n" + - "0021:\tmatch8 'b'\n" + - "0023:\tsave 1\n" + - "0026:\tlabel 2\n" + - "0029:\tmatch8 'c'\n" + - "0031:\tsave 2\n" + - "0034:\taccept 5\n"; - checkBytecode(g, expecting); - } - - @Test public void testLabelReuseWithinRule() throws Exception { - // labels indexed from 0 in each rule - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : a='b' a='c' ;\n"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tlabel 0\n" + - "0008:\tmatch8 'b'\n" + - "0010:\tsave 0\n" + - "0013:\tlabel 0\n" + - "0016:\tmatch8 'c'\n" + - "0018:\tsave 0\n" + - "0021:\taccept 4\n"; - checkBytecode(g, expecting); - } - - @Test public void testAction() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : {foo} 'a' | 'b' {bar} ;\n"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tsplit 12, 22\n" + - "0012:\taction 1, 0\n" + - "0017:\tmatch8 'a'\n" + - "0019:\tjmp 29\n" + - "0022:\tmatch8 'b'\n" + - "0024:\taction 1, 1\n" + - "0029:\taccept 4\n"; - checkBytecode(g, expecting); - } - - @Test public void testSempred() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : {foo}? 'a' | 'b' {bar}? ;\n"); - String expecting = - "0000:\tsplit 5\n" + - "0005:\tsplit 12, 22\n" + - "0012:\tsempred 1, 0\n" + - "0017:\tmatch8 'a'\n" + - "0019:\tjmp 29\n" + - "0022:\tmatch8 'b'\n" + - "0024:\tsempred 1, 1\n" + - "0029:\taccept 4\n"; - checkBytecode(g, expecting); - } - - public void _template() throws Exception { - LexerGrammar g = new LexerGrammar( - "\n"); - String expecting = - "\n"; - checkBytecode(g, expecting); - } - - void checkBytecode(LexerGrammar g, String expecting) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.process(imp); - } - } - } - LexerCompiler comp = new LexerCompiler(g); - CompiledPDA obj = comp.compileMode(LexerGrammar.DEFAULT_MODE_NAME); - PDA PDA = new PDA(obj.code, obj.altToAddr, obj.nLabels); - assertEquals(expecting, Bytecode.disassemble(PDA.code)); - } -} diff --git a/tool/test/org/antlr/v4/test/TestPDABytecodeInterp.java b/tool/test/org/antlr/v4/test/TestPDABytecodeInterp.java deleted file mode 100644 index 45334909d..000000000 --- a/tool/test/org/antlr/v4/test/TestPDABytecodeInterp.java +++ /dev/null @@ -1,251 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.runtime.ANTLRStringStream; -import org.antlr.v4.runtime.pda.PDA; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** */ -public class TestPDABytecodeInterp extends BaseTest { - @Test public void testString() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'ab' ;"); - String expecting = "A, A, EOF"; - checkMatches(g, "abab", expecting); - } - - @Test public void testUnicode() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : '\\u0020' ;"); // space - String expecting = "A, A, EOF"; - checkMatches(g, " ", expecting); - } - - @Test public void testEscapes() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "WS : '\t'|'\n'|' ' ;"); - String expecting = "WS, WS, WS, WS, WS, EOF"; - checkMatches(g, " \t\n\n ", expecting); - } - - @Test public void testNotChar() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : ~'a' ;"); - String expecting = "A, EOF"; - checkMatches(g, "b", expecting); - } - - @Test public void testIDandIntandKeyword() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'ab';\n" + - "B : 'a'..'z'+ ;\n" + - "I : '0'..'9'+ ;\n"); - String expecting = "A, I, B, EOF"; - checkMatches(g, "ab32abc", expecting); - } - - @Test public void testSLComment() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "\n" + - "CMT : '//' ~('\r'|'\n')* '\r'? '\n' ;\n" + - "ID : 'ab' ;\n"); - String expecting = "ID, CMT, ID, EOF"; - checkMatches(g, "ab// foo\nab", expecting); - } - - @Test public void testNonGreedy() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "\n" + - "CMT : '/*' (options {greedy=false;}:.)* '*/' ;\n" + - "ID : 'ab' ;\n"); - String expecting = "ID, CMT, EOF"; - checkMatches(g, "ab/* x */", expecting); - } - - @Test public void testNonGreedyAndCommonLeftPrefix() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "\n" + - "CMT : '/*' (options {greedy=false;}:.)* '*/' ;\n" + - "CMT2: '/*' (options {greedy=false;}:.)* '*/' '!' ;\n" + - "ID : 'ab' ;\n"); - String expecting = "ID, CMT2, CMT, EOF"; - checkMatches(g, "ab/* x */!/* foo */", expecting); - } - - @Test public void testCallFragment() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "I : D+ ;\n" + - "fragment D : '0'..'9'+ ;\n"); - String expecting = "I, EOF"; - checkMatches(g, "32", expecting); - } - - @Test public void testCallNonFragment() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "QID : ID ('.' ID)+ ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : ' ' ;\n"); - String expecting = "ID, EOF"; - checkMatches(g, "z", expecting); - expecting = "ID, WS, QID, WS, ID, WS, QID, WS, ID, EOF"; - checkMatches(g, "z a.b x c.d.e y", expecting); - } - - @Test public void testRecursiveCall() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "ACTION : '{' (ACTION|.)* '}' ;\n"); - String expecting = "ACTION, EOF"; - checkMatches(g, "{hi}", expecting); - checkMatches(g, "{{hi}}", expecting); - checkMatches(g, "{{x}{y}}", expecting); - checkMatches(g, "{{{{{{x}}}}}}", expecting); - } - - @Test public void testAltOrWildcard() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "ELSE : . ;\n"); - String expecting = "A, A, ELSE, A, EOF"; - checkMatches(g, "aaxa", expecting); - } - - @Test public void testRewindBackToLastGoodMatch() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' 'b'? ;\n"+ - "B : 'b' ;\n"+ - "WS : ' ' ;\n"); - String expecting = "A, WS, A, WS, B, EOF"; - checkMatches(g, "a ab b", expecting); - } - - // fixes http://www.antlr.org/jira/browse/ANTLR-189 from v3 - @Test public void testRewindBackToLastGoodMatch_DOT_vs_NUM() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "NUM: '0'..'9'+ ('.' '0'..'9'+)? ;\n"+ - "DOT : '.' ;\n"+ - "WS : ' ' ;\n"); - checkMatches(g, "3.14 .", "NUM, WS, DOT, EOF"); - checkMatches(g, "9", "NUM, EOF"); - checkMatches(g, ".1", "DOT, NUM, EOF"); - checkMatches(g, "1.", "NUM, DOT, EOF"); - } - - @Test public void testLabeledChar() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : a='a' ;\n"); - checkLabels(g, "a", "A", "[[@-1,0:0='a',<0>,1:0]]"); - } - - @Test public void testLabeledString() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : a='abc' ;\n"); - checkLabels(g, "abc", "A", "[[@-1,0:2='abc',<0>,1:0]]"); - } - - @Test public void testLabeledToken() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "I : d=D ;\n" + - "fragment D : '0'..'9'+ ;\n"); - checkLabels(g, "901", "I", "[[@-1,0:2='901',<0>,1:0]]"); - } - - @Test public void testLabelInLoopIsLastElement() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "I : d=D+ ;\n" + - "fragment D : '0'..'9' ;\n"); - checkLabels(g, "901", "I", "[[@-1,2:2='1',<0>,1:2]]"); - } - - @Test public void testLabelIndexes() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : a='a' ;\n" + - "B : a='b' b='c' ;\n"); - checkLabels(g, "bc", "B", "[[@-1,0:-1='',<0>,1:0], [@-1,0:0='b',<0>,1:0], [@-1,1:1='c',<0>,1:1]]"); - } - - @Test public void testAction() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "I : {a1} d=D {a2} ;\n" + - "fragment D : ('0'..'9' {a3})+ ;\n"); - checkLabels(g, "901", "I", "[[@-1,0:2='901',<0>,1:0]]"); - } - - @Test public void testSempred() throws Exception { - // not actually evaluating preds since we're interpreting; assumes true. - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n" + - "A : {true}? 'a' | 'b' {true}? ;\n"); - checkMatches(g, "ab", "A, A, EOF"); - } - - - public void _template() throws Exception { - LexerGrammar g = new LexerGrammar( - "\n"); - String expecting = ""; - checkMatches(g, "input", expecting); - } - - void checkMatches(LexerGrammar g, String input, String expecting) { - PDA pda = getLexerPDA(g); - - List expectingTokenTypes = getTypesFromString(g, expecting); - - List tokenTypes = getTokenTypes(input, pda); - assertEquals(expectingTokenTypes, tokenTypes); - } - - void checkLabels(LexerGrammar g, String input, String expecting, - String expectingTokens) - { - PDA pda = getLexerPDA(g); - List expectingTokenTypes = getTypesFromString(g, expecting); - ANTLRStringStream in = new ANTLRStringStream(input); - List tokenTypes = new ArrayList(); - int ttype = pda.execThompson(in); - tokenTypes.add(ttype); - assertEquals(expectingTokenTypes, tokenTypes); - - if ( expectingTokens!=null ) { - assertEquals(expectingTokens, Arrays.toString(pda.labelValues)); - } - } - - - -// List getTokens(String input, PDA lexerPDA) { -// ANTLRStringStream in = new ANTLRStringStream(input); -// List tokens = new ArrayList(); -// int ttype = 0; -// do { -// ttype = lexerPDA.execThompson(in); -// tokens.add(new CommonToken(ttype,"")); -// } while ( ttype!= Token.EOF ); -// return tokens; -// } - -} diff --git a/tool/test/org/antlr/v4/test/TestPredicatedDFAConstruction.java b/tool/test/org/antlr/v4/test/TestPredicatedDFAConstruction.java deleted file mode 100644 index 7b939354b..000000000 --- a/tool/test/org/antlr/v4/test/TestPredicatedDFAConstruction.java +++ /dev/null @@ -1,86 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.v4.tool.Message; -import org.junit.Test; - -import java.util.List; - -/** */ -public class TestPredicatedDFAConstruction extends BaseTest { - @Test - public void TwoAltsOnePred() throws Exception { - String g = - "parser grammar E;\n" + - "a : {p1}? ID\n" + - " | ID\n" + - " ;"; - String expecting = - "s0-ID->s1\n" + - "s1-{p1}?->:s2=>1\n" + - "s1-true->:s3=>2\n"; - checkRuleDFA(g, "a", expecting); - } - - @Test public void hoistPredIntoCallingRule() throws Exception { - String g = - "grammar Q;\n" + - "\n" + - "prog: stat+ ;\n" + - "/** ANTLR pulls predicates from keyIF and keyCALL into\n" + - "* decision for this rule.\n" + - "*/\n" + - "stat: keyIF expr stat\n" + - "\t| keyCALL ID ';'\n" + - "\t| ';'\n" + - "\t;\n" + - "/** An ID whose text is \"if\" */\n" + - "keyIF : {IF}? ID ;\n" + - "/** An ID whose text is \"call\" */\n" + - "keyCALL : {CALL}? ID ;\n" + - "\n" + - "expr : ID;"; - String expecting = - "s0-';'->:s1=>3\n" + - "s0-ID->s2\n" + - "s2-ID->s3\n" + - "s3-ID->:s4=>1\n" + - "s3-';'->s5\n" + - "s5-{IF}?->:s6=>1\n" + - "s5-{CALL}?->:s7=>2\n"; - List msgs = checkRuleDFA(g, "stat", expecting); - System.err.println(msgs); - } - - @Test - public void testSemanticContextPreventsEarlyTerminationOfClosure() throws Exception { - String g = - "parser grammar T;\n" + - "a : loop SEMI | ID SEMI\n" + - " ;\n" + - "loop\n" + - " : {while}? ID\n" + - " | {do}? ID\n" + - " | {for}? ID\n" + - " ;"; - String expecting = - "s0-ID->s1\n" + - "s1-SEMI->s2\n" + - "s2-({while}?||{do}?||{for}?)->:s3=>1\n" + - "s2-true->:s4=>2\n"; - checkRuleDFA(g, "a", expecting); - } - - /* - @Test public void _template() throws Exception { - String g = - ""; - String expecting = - ""; - List msgs = checkRuleDFA(g, "a", expecting); - System.out.println(msgs); - //ambig(msgs, new int[] {1,2}, "A"); - //unreachable(msgs, new int[] {2}); - assertEquals(msgs.size(), 2); - } - */ -} diff --git a/tool/test/org/antlr/v4/test/TestScopeParsing.java b/tool/test/org/antlr/v4/test/TestScopeParsing.java deleted file mode 100644 index d654d8a33..000000000 --- a/tool/test/org/antlr/v4/test/TestScopeParsing.java +++ /dev/null @@ -1,56 +0,0 @@ -package org.antlr.v4.test; - -import org.antlr.v4.parse.ScopeParser; -import org.junit.Test; - -public class TestScopeParsing extends BaseTest { - String[] argPairs = { - "", "{}", - " ", "{}", - "int i", "{i=int i}", - "int[] i, int j[]", "{i=int[] i, j=int [] j}", - "Map[] i, int j[]", "{i=Map[] i, j=int [] j}", - "int i = 34+a[3], int j[] = new int[34]", - "{i=int i= 34+a[3], j=int [] j= new int[34]}", - "char *foo32[3] = {1\\,2\\,3}", "{3=char *foo32[] 3= {1,2,3}}", - - // python/ruby style - "i", "{i=null i}", - "i,j", "{i=null i, j=null j}", - "i,j, k", "{i=null i, j=null j, k=null k}", - }; - - String[] scopePairs = { - "int i;", "{i=int i}", - "int[] i; int j[];", "{i=int[] i, j=int [] j}", - "Map[] i; int j[];", "{i=Map[] i, j=int [] j}", - "int i = 34+a[3]; int j[] = new int[34];", - "{i=int i= 34+a[3], j=int [] j= new int[34]}", - "char *foo32[] = {1,2,3};", "{foo32=char *[] foo32= {1,2,3}}", - " int i; int c; int k; ", "{i=int i, c=int c, k=int k}", - " { int i; int c; int k; }", "{i=int i, c=int c, k=int k}", - - // python/ruby style - "i", "{i=null i}", - " i ; j ;", "{i=null i, j=null j}", - "i; j; k;", "{i=null i, j=null j, k=null k}", - }; - - @Test public void testArgs() { - for (int i = 0; i < argPairs.length; i+=2) { - String input = argPairs[i]; - String expected = argPairs[i+1]; - String actual = ScopeParser.parseTypeList(input).attributes.toString(); - assertEquals(expected, actual); - } - } - - @Test public void testScopes() { - for (int i = 0; i < scopePairs.length; i+=2) { - String input = scopePairs[i]; - String expected = scopePairs[i+1]; - String actual = ScopeParser.parseDynamicScope(input).attributes.toString(); - assertEquals(expected, actual); - } - } -} diff --git a/tool/test/org/antlr/v4/test/TestSymbolIssues.java b/tool/test/org/antlr/v4/test/TestSymbolIssues.java deleted file mode 100644 index 1b86366f9..000000000 --- a/tool/test/org/antlr/v4/test/TestSymbolIssues.java +++ /dev/null @@ -1,102 +0,0 @@ -package org.antlr.v4.test; - -import org.junit.Test; - -/** */ -public class TestSymbolIssues extends BaseTest { - static String[] A = { - // INPUT - "grammar A;\n" + - "options { opt='sss'; k=3; }\n" + - "tokens { X; Y='y'; X='x'; Y='q'; Z; Z; }\n" + - "scope Blort { int x; }\n" + - "\n" + - "@members {foo}\n" + - "@members {bar}\n" + - "@lexer::header {package jj;}\n" + - "@lexer::header {package kk;}\n" + - "\n" + - "scope Blort { int x; }\n" + - "\n" + - "a[int i] returns [foo f] : X ID a[3] b[34] q ;\n" + - "b returns [int g] : Y 'y' 'if' a ;\n" + - "a : FJKD ;\n" + - "\n" + - "ID : 'a'..'z'+ ID ;", - // YIELDS - "error(49): A.g:2:10: illegal option opt\n" + - "error(59): A.g:11:6: scope Blort redefinition\n" + - "error(18): A.g:15:0: rule a redefinition\n" + - "error(58): A.g:7:1: redefinition of members action\n" + - "error(58): A.g:9:1: redefinition of header action\n" + - "error(72): A.g:3:19: cannot alias X; token name already defined\n" + - "error(72): A.g:3:26: cannot alias Y; token name already assigned to 'y'\n" + - "error(72): A.g:3:36: cannot alias Z; token name already defined\n" + - "error(46): A.g:13:37: rule b has no defined parameters\n" + - "error(23): A.g:13:43: reference to undefined rule: q\n" + - "error(45): A.g:14:31: missing parameter(s) on rule reference: a\n" - }; - - static String[] B = { - // INPUT - "parser grammar B;\n" + - "tokens { X='x'; Y; }\n" + - "scope s { int i; }\n" + - "\n" + - "a : s=ID b+=ID X=ID '.' ;\n" + - "\n" + - "b : x=ID x+=ID ;\n" + - "\n" + - "s : FOO ;", - // YIELDS - "error(25): B.g:2:9: can't assign string value to token name X in non-combined grammar\n" + - "error(34): B.g:9:0: symbol s conflicts with global dynamic scope with same name\n" + - "error(35): B.g:5:9: label b conflicts with rule with same name\n" + - "error(34): B.g:5:4: symbol s conflicts with global dynamic scope with same name\n" + - "error(36): B.g:5:15: label X conflicts with token with same name\n" + - "error(41): B.g:7:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n" - }; - - static String[] C = { - // INPUT - "grammar C;\n"+ - "options {output=AST;}\n"+ - "a : A x=b y+=Z 'hi' -> ID A r $foo $x b $y+ 'hi' 'eh?'\n"+ - " | ID -> $x A ID // shouldn't see these refs from other alt ('cept ID)\n"+ - " ;\n"+ - "b : B ;\n"+ - "A : 'a';", - // YIELDS - "error(51): C.g:3:28: reference to rewrite element r not found to left of ->\n" + - "error(51): C.g:3:30: reference to rewrite element foo not found to left of ->\n" + - "error(51): C.g:3:49: reference to rewrite element 'eh?' not found to left of ->\n" + - "error(51): C.g:4:10: reference to rewrite element x not found to left of ->\n" - }; - - static String[] D = { - // INPUT - "parser grammar D;\n" + - "a[int j] \n" + - "scope { int i; }\n" + - " : i=ID j=ID ;\n" + - "\n" + - "b[int i] returns [int i] : ID ;\n" + - "\n" + - "c[int i] returns [String k]\n" + - "scope { int i; int c; int k; }\n" + - " : ID ;", - - // YIELDS - "error(38): D.g:4:21: label j conflicts with rule a's return value or parameter with same name\n" + - "error(37): D.g:4:16: label i conflicts with rule a's dynamically-scoped attribute with same name\n" + - "error(42): D.g:6:0: rule b's argument i conflicts a return value with same name\n" + - "error(40): D.g:9:6: rule c's dynamically-scoped attribute i conflicts with c's return value or parameter with same name\n" + - "error(39): D.g:9:6: rule c's dynamically-scoped attribute c conflicts with the rule name\n" + - "error(40): D.g:9:6: rule c's dynamically-scoped attribute k conflicts with c's return value or parameter with same name\n" - }; - - @Test public void testA() { super.testErrors(A, false); } - @Test public void testB() { super.testErrors(B, false); } - @Test public void testC() { super.testErrors(C, false); } - @Test public void testD() { super.testErrors(D, false); } -} diff --git a/tool/test/org/antlr/v4/test/TestSyntaxErrors.java b/tool/test/org/antlr/v4/test/TestSyntaxErrors.java deleted file mode 100644 index b1845e699..000000000 --- a/tool/test/org/antlr/v4/test/TestSyntaxErrors.java +++ /dev/null @@ -1,143 +0,0 @@ -package org.antlr.v4.test; - -import org.junit.Test; - -public class TestSyntaxErrors extends BaseTest { - static String[] A = { - // INPUT - "grammar A;\n" + - "", - // YIELDS - "error(63): A.g::: grammar A has no rules\n", - - "A;", - "error(17): :1:0: 'A' came as a complete surprise to me\n", - - "grammar ;", - "error(17): :1:8: ';' came as a complete surprise to me while looking for an identifier\n", - - "grammar A\n" + - "a : ID ;\n", - "error(17): :2:0: missing SEMI at 'a'\n", - - "grammar A;\n" + - "a : ID ;;\n"+ - "b : B ;", - "error(17): A.g:2:8: ';' came as a complete surprise to me\n", - - "grammar A;;\n" + - "a : ID ;\n", - "error(17): A;.g:1:10: ';' came as a complete surprise to me\n", - - "grammar A;\n" + - "a @init : ID ;\n", - "error(17): A.g:2:8: mismatched input ':' expecting ACTION while matching rule preamble\n", - - "grammar A;\n" + - "a ( A | B ) D ;\n" + - "b : B ;", - "error(17): A.g:2:3: '(' came as a complete surprise to me while matching rule preamble\n" + - "error(17): A.g:2:11: mismatched input ')' expecting SEMI while matching a rule\n" + - "error(17): A.g:2:15: ';' came as a complete surprise to me while matching rule preamble\n", - }; - - @Test public void testA() { super.testErrors(A, true); } - - @Test public void testExtraColon() { - String[] pair = new String[] { - "grammar A;\n" + - "a : : A ;\n" + - "b : B ;", - "error(17): A.g:2:4: ':' came as a complete surprise to me while matching alternative\n", - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi() { - String[] pair = new String[] { - "grammar A;\n" + - "a : A \n" + - "b : B ;", - "error(17): A.g:3:0: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n", - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi2() { - String[] pair = new String[] { - "lexer grammar A;\n" + - "A : 'a' \n" + - "B : 'b' ;", - "error(17): A.g:3:0: unterminated rule (missing ';') detected at 'B :' while looking for rule element\n", - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi3() { - String[] pair = new String[] { - "grammar A;\n" + - "a : A \n" + - "b[int i] returns [int y] : B ;", - "error(17): A.g:3:9: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n" - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi4() { - String[] pair = new String[] { - "grammar A;\n" + - "a : b \n" + - " catch [Exception e] {...}\n" + - "b : B ;\n", - - "error(17): A.g:2:4: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n" - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi5() { - String[] pair = new String[] { - "grammar A;\n" + - "a : A \n" + - " catch [Exception e] {...}\n", - - "error(17): A.g:2:4: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n" - }; - super.testErrors(pair, true); - } - - @Test public void testBadRulePrequelStart() { - String[] pair = new String[] { - "grammar A;\n" + - "a @ options {k=1;} : A ;\n" + - "b : B ;", - - "error(17): A.g:2:4: 'options {' came as a complete surprise to me while looking for an identifier\n" - }; - super.testErrors(pair, true); - } - - @Test public void testBadRulePrequelStart2() { - String[] pair = new String[] { - "grammar A;\n" + - "a } : A ;\n" + - "b : B ;", - - "error(17): A.g:2:2: '}' came as a complete surprise to me while matching rule preamble\n" - }; - super.testErrors(pair, true); - } - - @Test public void testModeInParser() { - String[] pair = new String[] { - "grammar A;\n" + - "a : A ;\n" + - "mode foo;\n" + - "b : B ;", - - "error(87): A.g:3:5: lexical modes are only allowed in lexer grammars\n" - }; - super.testErrors(pair, true); - } - -}