adding initial antlr v4 files

[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 6621]
This commit is contained in:
parrt 2010-01-27 17:20:51 -08:00
parent c9c01f2e3d
commit 7f63527ce1
29 changed files with 26069 additions and 0 deletions

5
build.xml Normal file
View File

@ -0,0 +1,5 @@
<project name="ANTLRv4">
<property file="build.properties"/>
</project>

0
tool/build.properties Normal file
View File

10
tool/build.xml Normal file
View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<project name="tool" default="compile">
<property file="build.properties"/>
<target name="compile">
<javac srcdir="src" destdir="../build/classes"/>
</target>
</project>

View File

@ -0,0 +1,12 @@
/** templates used to generate make-compatible dependencies */
group depend;
/** Generate "f : x, y, z" dependencies for input
* dependencies and generated files. in and out
* are File objects. For example, you can say
* <f.canonicalPath>
*/
dependencies(grammarFileName,in,out) ::= <<
<if(in)><grammarFileName>: <in; separator=", "><endif>
<out:{f | <f> : <grammarFileName>}; separator="\n">
>>

View File

@ -0,0 +1 @@
<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];

View File

@ -0,0 +1 @@
{rank=same; rankdir=TB; <states; separator="; ">}

View File

@ -0,0 +1,7 @@
digraph NFA {
<if(rankdir)>rankdir=<rankdir>;<endif>
<decisionRanks; separator="\n">
<states; separator="\n">
<edges; separator="\n">
}

View File

@ -0,0 +1 @@
<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];

View File

@ -0,0 +1 @@
<src> -> <target> [fontname="Times-Italic", label = "e"];

View File

@ -0,0 +1,6 @@
digraph NFA {
rankdir=LR;
<decisionRanks; separator="\n">
<states; separator="\n">
<edges; separator="\n">
}

View File

@ -0,0 +1 @@
node [fontsize=11, shape = <if(useBox)>box<else>circle, fixedsize=true, width=.4<endif>]; <name>

View File

@ -0,0 +1 @@
node [fontsize=11, shape = <if(useBox)>polygon,sides=4,peripheries=2<else>doublecircle, fixedsize=true, width=.6<endif>]; <name>

View File

@ -0,0 +1,42 @@
/*
[The "BSD licence"]
Copyright (c) 2006 Kay Roepke
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
New style messages. This file contains the actual layout of the messages emitted by ANTLR.
The text itself is coming out of the languages/*stg files, according to the chosen locale.
This file contains the default format ANTLR uses.
*/
group antlr;
location(file, line, column) ::= "<file>:<line>:<column>:"
message(id, text) ::= "(<id>) <text>"
report(location, message, type) ::= "<type>(<message.id>): <location> <message.text>"
wantsSingleLineMessage() ::= "false"

View File

@ -0,0 +1,42 @@
/*
[The "BSD licence"]
Copyright (c) 2006 Kay Roepke
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
New style messages. This file contains the actual layout of the messages emitted by ANTLR.
The text itself is coming out of the languages/*stg files, according to the chosen locale.
This file contains the format that mimicks GCC output.
*/
group gnu;
location(file, line, column) ::= "<file>:<line>:"
message(id, text) ::= "<text> (<id>)"
report(location, message, type) ::= "<location> <type>: <message>"
wantsSingleLineMessage() ::= "true"

View File

@ -0,0 +1,42 @@
/*
[The "BSD licence"]
Copyright (c) 2006 Kay Roepke
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
New style messages. This file contains the actual layout of the messages emitted by ANTLR.
The text itself is coming out of the languages/*stg files, according to the chosen locale.
This file contains the default format ANTLR uses.
*/
group antlr;
location(file, line, column) ::= "<file>(<line>,<column>)"
message(id, text) ::= "error <id> : <text>"
report(location, message, type) ::= "<location> : <type> <message.id> : <message.text>"
wantsSingleLineMessage() ::= "true"

View File

@ -0,0 +1,299 @@
/*
[The "BSD licence"]
Copyright (c) 2005-2006 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
New style messages. This file only contains the messages in English, but no
information about which file, line, or column it occurred in.
The location and message ids are taken out of the formats directory.
Kay Roepke
*/
group en_US;
// TOOL ERRORS
// file errors
CANNOT_WRITE_FILE(arg,exception,stackTrace) ::= <<
cannot write file <arg>: <exception>
<stackTrace; separator="\n">
>>
CANNOT_CLOSE_FILE(arg,exception,stackTrace) ::= "cannot close file <arg>: <exception>"
CANNOT_FIND_TOKENS_FILE(arg) ::= "cannot find tokens file <arg>"
ERROR_READING_TOKENS_FILE(arg,exception,stackTrace) ::= <<
problem reading token vocabulary file <arg>: <exception>
<stackTrace; separator="\n">
>>
DIR_NOT_FOUND(arg,exception,stackTrace) ::= "directory not found: <arg>"
OUTPUT_DIR_IS_FILE(arg,exception,stackTrace) ::= "output directory is a file: <arg>"
CANNOT_OPEN_FILE(arg,exception,stackTrace) ::= "cannot find or open file: <arg><if(exception)>; reason: <exception><endif>"
CIRCULAR_DEPENDENCY() ::= "your grammars contain a circular dependency and cannot be sorted into a valid build order."
INTERNAL_ERROR(arg,arg2,exception,stackTrace) ::= <<
internal error: <arg> <arg2><if(exception)>: <exception><endif>
<stackTrace; separator="\n">
>>
INTERNAL_WARNING(arg) ::= "internal warning: <arg>"
ERROR_CREATING_ARTIFICIAL_RULE(arg,exception,stackTrace) ::= <<
problems creating lexer rule listing all tokens: <exception>
<stackTrace; separator="\n">
>>
TOKENS_FILE_SYNTAX_ERROR(arg,arg2) ::=
"problems parsing token vocabulary file <arg> on line <arg2>"
CANNOT_GEN_DOT_FILE(arg,exception,stackTrace) ::=
"cannot write DFA DOT file <arg>: <exception>"
BAD_ACTION_AST_STRUCTURE(exception,stackTrace) ::=
"bad internal tree structure for action '<arg>': <exception>"
BAD_AST_STRUCTURE(arg,exception,stackTrace) ::= <<
bad internal tree structure '<arg>': <exception>
<stackTrace; separator="\n">
>>
FILE_AND_GRAMMAR_NAME_DIFFER(arg,arg2) ::=
"file <arg2> contains grammar <arg>; names must be identical"
FILENAME_EXTENSION_ERROR(arg) ::=
"file <arg> must end in a file extension, normally .g"
// code gen errors
MISSING_CODE_GEN_TEMPLATES(arg) ::=
"cannot find code generation templates <arg>.stg"
MISSING_CYCLIC_DFA_CODE_GEN_TEMPLATES() ::=
"cannot find code generation cyclic DFA templates for language <arg>"
CODE_GEN_TEMPLATES_INCOMPLETE(arg) ::=
"at least one code generation template missing for language <arg>"
CANNOT_CREATE_TARGET_GENERATOR(arg,exception,stackTrace) ::=
"cannot create target <arg> code generator: <exception>"
CANNOT_COMPUTE_SAMPLE_INPUT_SEQ() ::=
"cannot generate a sample input sequence from lookahead DFA"
// grammar interpretation errors
/*
NO_VIABLE_DFA_ALT(arg,arg2) ::=
"no viable transition from state <arg> on <arg2> while interpreting DFA"
*/
// GRAMMAR ERRORS
SYNTAX_ERROR(arg) ::= "syntax error: <arg>"
RULE_REDEFINITION(arg) ::=
"rule <arg> redefinition"
LEXER_RULES_NOT_ALLOWED(arg) ::=
"lexer rule <arg> not allowed in parser"
PARSER_RULES_NOT_ALLOWED(arg) ::=
"parser rule <arg> not allowed in lexer"
CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL(arg) ::=
"cannot find an attribute name in attribute declaration"
NO_TOKEN_DEFINITION(arg) ::=
"no lexer rule corresponding to token: <arg>"
UNDEFINED_RULE_REF(arg) ::=
"reference to undefined rule: <arg>"
LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(arg) ::=
"literal has no associated lexer rule: <arg>"
CANNOT_ALIAS_TOKENS_IN_LEXER(arg) ::=
"literals are illegal in lexer tokens{} section: <arg>"
ATTRIBUTE_REF_NOT_IN_RULE(arg,arg2) ::=
"reference to attribute outside of a rule: <arg><if(arg2)>.<arg2><endif>"
UNKNOWN_ATTRIBUTE_IN_SCOPE(arg,arg2) ::=
"unknown attribute for <arg>: <arg2>"
UNKNOWN_RULE_ATTRIBUTE(arg,arg2) ::=
"unknown attribute for rule <arg>: <arg2>"
UNKNOWN_SIMPLE_ATTRIBUTE(arg,args2) ::=
"attribute is not a token, parameter, or return value: <arg>"
ISOLATED_RULE_SCOPE(arg) ::=
"missing attribute access on rule scope: <arg>"
INVALID_RULE_PARAMETER_REF(arg,arg2) ::=
"cannot access rule <arg>'s parameter: <arg2>"
INVALID_RULE_SCOPE_ATTRIBUTE_REF(arg,arg2) ::=
"cannot access rule <arg>'s dynamically-scoped attribute: <arg2>"
SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE(arg) ::=
"symbol <arg> conflicts with global dynamic scope with same name"
WRITE_TO_READONLY_ATTR(arg,arg2,arg3) ::=
"cannot write to read only attribute: $<arg><if(arg2)>.<arg2><endif>"
LABEL_CONFLICTS_WITH_RULE(arg) ::=
"label <arg> conflicts with rule with same name"
LABEL_CONFLICTS_WITH_TOKEN(arg) ::=
"label <arg> conflicts with token with same name"
LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE(arg,arg2) ::=
"label <arg> conflicts with rule <arg2>'s dynamically-scoped attribute with same name"
LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL(arg,arg2) ::=
"label <arg> conflicts with rule <arg2>'s return value or parameter with same name"
ATTRIBUTE_CONFLICTS_WITH_RULE(arg,arg2) ::=
"rule <arg2>'s dynamically-scoped attribute <arg> conflicts with the rule name"
ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL(arg,arg2) ::=
"rule <arg2>'s dynamically-scoped attribute <arg> conflicts with<arg2>'s return value or parameter with same name"
LABEL_TYPE_CONFLICT(arg,arg2) ::=
"label <arg> type mismatch with previous definition: <arg2>"
ARG_RETVAL_CONFLICT(arg,arg2) ::=
"rule <arg2>'s argument <arg> conflicts a return value with same name"
NONUNIQUE_REF(arg) ::=
"<arg> is a non-unique reference"
FORWARD_ELEMENT_REF(arg) ::=
"illegal forward reference: <arg>"
MISSING_RULE_ARGS(arg) ::=
"missing parameter(s) on rule reference: <arg>"
RULE_HAS_NO_ARGS(arg) ::=
"rule <arg> has no defined parameters"
ARGS_ON_TOKEN_REF(arg) ::=
"token reference <arg> may not have parameters"
/*
NONCHAR_RANGE() ::=
"range operator can only be used in the lexer"
*/
ILLEGAL_OPTION(arg) ::=
"illegal option <arg>"
LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT(arg) ::=
"rule '+=' list labels are not allowed w/o output option: <arg>"
UNDEFINED_TOKEN_REF_IN_REWRITE(arg) ::=
"reference to undefined token in rewrite rule: <arg>"
REWRITE_ELEMENT_NOT_PRESENT_ON_LHS(arg) ::=
"reference to rewrite element <arg> without reference on left of ->"
UNDEFINED_LABEL_REF_IN_REWRITE(arg) ::=
"reference to undefined label in rewrite rule: $<arg>"
NO_GRAMMAR_START_RULE (arg) ::=
"grammar <arg>: no start rule (no rule can obviously be followed by EOF)"
EMPTY_COMPLEMENT(arg) ::= <<
<if(arg)>
set complement ~<arg> is empty
<else>
set complement is empty
<endif>
>>
UNKNOWN_DYNAMIC_SCOPE(arg) ::=
"unknown dynamic scope: <arg>"
UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE(arg,arg2) ::=
"unknown dynamically-scoped attribute for scope <arg>: <arg2>"
RULE_REF_AMBIG_WITH_RULE_IN_ALT(arg) ::=
"reference $<arg> is ambiguous; rule <arg> is enclosing rule and referenced in the production (assuming enclosing rule)"
ISOLATED_RULE_ATTRIBUTE(arg) ::=
"reference to locally-defined rule scope attribute without rule name: <arg>"
INVALID_ACTION_SCOPE(arg,arg2) ::=
"unknown or invalid action scope for <arg2> grammar: <arg>"
ACTION_REDEFINITION(arg) ::=
"redefinition of <arg> action"
DOUBLE_QUOTES_ILLEGAL(arg) ::=
"string literals must use single quotes (such as \'begin\'): <arg>"
INVALID_TEMPLATE_ACTION(arg) ::=
"invalid StringTemplate % shorthand syntax: '<arg>'"
MISSING_ATTRIBUTE_NAME() ::=
"missing attribute name on $ reference"
ARG_INIT_VALUES_ILLEGAL(arg) ::=
"rule parameters may not have init values: <arg>"
REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION(arg) ::=
"<if(arg)>rule <arg> uses <endif>rewrite syntax or operator with no output option; setting output=AST"
AST_OP_WITH_NON_AST_OUTPUT_OPTION(arg) ::=
"AST operator with non-AST output option: <arg>"
NO_RULES(arg) ::= "grammar file <arg> has no rules"
MISSING_AST_TYPE_IN_TREE_GRAMMAR(arg) ::=
"tree grammar <arg> has no ASTLabelType option"
REWRITE_FOR_MULTI_ELEMENT_ALT(arg) ::=
"with rewrite=true, alt <arg> not simple node or obvious tree element; text attribute for rule not guaranteed to be correct"
RULE_INVALID_SET(arg) ::=
"Cannot complement rule <arg>; not a simple set or element"
HETERO_ILLEGAL_IN_REWRITE_ALT(arg) ::=
"alts with rewrites can't use heterogeneous types left of ->"
NO_SUCH_GRAMMAR_SCOPE(arg,arg2) ::=
"reference to undefined grammar in rule reference: <arg>.<arg2>"
NO_SUCH_RULE_IN_SCOPE(arg,arg2) ::=
"rule <arg2> is not defined in grammar <arg>"
TOKEN_ALIAS_CONFLICT(arg,arg2) ::=
"cannot alias <arg>; string already assigned to <arg2>"
TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::=
"cannot alias <arg>; token name already assigned to <arg2>"
TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
"tokenVocab option ignored in imported grammar <arg>"
INVALID_IMPORT(arg,arg2) ::=
"<arg.grammarTypeString> grammar <arg.name> cannot import <arg2.grammarTypeString> grammar <arg2.name>"
IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
"no lexer rules contributed to <arg> from imported grammar <arg2>"
IMPORT_NAME_CLASH(arg,arg2) ::=
"combined grammar <arg.name> and imported <arg2.grammarTypeString> grammar <arg2.name> both generate <arg2.recognizerName>; import ignored"
AST_OP_IN_ALT_WITH_REWRITE(arg,arg2) ::=
"rule <arg> alt <arg2> uses rewrite syntax and also an AST operator"
WILDCARD_AS_ROOT(arg) ::= "Wildcard invalid as root; wildcard can itself be a tree"
CONFLICTING_OPTION_IN_TREE_FILTER(arg,arg2) ::= "option <arg>=<arg2> conflicts with tree grammar filter mode"
// GRAMMAR WARNINGS
GRAMMAR_NONDETERMINISM(input,conflictingAlts,paths,disabled,hasPredicateBlockedByAction) ::=
<<
<if(paths)>
Decision can match input such as "<input>" using multiple alternatives:
<paths:{ alt <it.alt> via NFA path <it.states; separator=","><\n>}>
<else>
Decision can match input such as "<input>" using multiple alternatives: <conflictingAlts; separator=", ">
<endif>
<if(disabled)><\n>As a result, alternative(s) <disabled; separator=","> were disabled for that input<endif><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
>>
DANGLING_STATE(danglingAlts,input) ::= <<
the decision cannot distinguish between alternative(s) <danglingAlts; separator=","> for input such as "<input>"
>>
UNREACHABLE_ALTS(alts) ::= <<
The following alternatives can never be matched: <alts; separator=","><\n>
>>
INSUFFICIENT_PREDICATES(upon,altToLocations,hasPredicateBlockedByAction) ::= <<
Input such as "<upon>" is insufficiently covered with predicates at locations: <altToLocations.keys:{alt|alt <alt>: <altToLocations.(alt):{loc| line <loc.line>:<loc.column> at <loc.text>}; separator=", ">}; separator=", "><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
>>
DUPLICATE_SET_ENTRY(arg) ::=
"duplicate token type <arg> when collapsing subrule into set"
ANALYSIS_ABORTED(enclosingRule) ::= <<
ANTLR could not analyze this decision in rule <enclosingRule>; often this is because of recursive rule references visible from the left edge of alternatives. ANTLR will re-analyze the decision with a fixed lookahead of k=1. Consider using "options {k=1;}" for that decision and possibly adding a syntactic predicate.
>>
RECURSION_OVERLOW(alt,input,targetRules,callSiteStates) ::= <<
Alternative <alt>: after matching input such as <input> decision cannot predict what comes next due to recursion overflow <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule.name>};separator=", ">}; separator=" and ">
>>
LEFT_RECURSION(targetRules,alt,callSiteStates) ::= <<
Alternative <alt> discovers infinite left-recursion <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule>};separator=", ">}; separator=" and ">
>>
UNREACHABLE_TOKENS(tokens) ::= <<
The following token definitions can never be matched because prior tokens match the same input: <tokens; separator=",">
>>
TOKEN_NONDETERMINISM(input,conflictingTokens,paths,disabled,hasPredicateBlockedByAction) ::=
<<
<if(paths)>
Decision can match input such as "<input>" using multiple alternatives:
<paths:{ alt <it.alt> via NFA path <it.states; separator=","><\n>}>
<else>
Multiple token rules can match input such as "<input>": <conflictingTokens; separator=", "><\n>
<endif>
<if(disabled)><\n>As a result, token(s) <disabled; separator=","> were disabled for that input<endif><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
>>
LEFT_RECURSION_CYCLES(listOfCycles) ::= <<
The following sets of rules are mutually left-recursive <listOfCycles:{c| [<c:{r|<r.name>}; separator=", ">]}; separator=" and ">
>>
NONREGULAR_DECISION(ruleName,alts) ::= <<
[fatal] rule <ruleName> has non-LL(*) decision due to recursive rule invocations reachable from alts <alts; separator=",">. Resolve by left-factoring or using syntactic predicates or using backtrack=true option.
>>
/* l10n for message levels */
warning() ::= "warning"
error() ::= "error"

View File

@ -0,0 +1,427 @@
package org.antlr.v4;
import org.antlr.runtime.*;
import org.antlr.runtime.tree.*;
import org.antlr.v4.gui.ASTViewer;
import org.antlr.v4.parse.ANTLRLexer;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.parse.ASTVerifier;
import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.tool.*;
import java.io.File;
import java.util.*;
public class Tool {
public final Properties antlrSettings = new Properties();
public String VERSION = "!Unknown version!";
//public static final String VERSION = "${project.version}";
public static final String UNINITIALIZED_DIR = "<unset-dir>";
private List<String> grammarFileNames = new ArrayList<String>();
private boolean generate_NFA_dot = false;
private boolean generate_DFA_dot = false;
private String outputDirectory = ".";
private boolean haveOutputDir = false;
private String inputDirectory = null;
private String parentGrammarDirectory;
private String grammarOutputDirectory;
private boolean haveInputDir = false;
private String libDirectory = ".";
private boolean debug = false;
private boolean trace = false;
private boolean profile = false;
private boolean report = false;
private boolean printGrammar = false;
private boolean depend = false;
private boolean forceAllFilesToOutputDir = false;
private boolean forceRelativeOutput = false;
protected boolean deleteTempLexer = true;
private boolean verbose = false;
/** Don't process grammar file if generated files are newer than grammar */
/**
* Indicate whether the tool should analyze the dependencies of the provided grammar
* file list and ensure that the grammars with dependencies are built
* after any of the other gramamrs in the list that they are dependent on. Setting
* this option also has the side effect that any grammars that are includes for other
* grammars in the list are excluded from individual analysis, which allows the caller
* to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
* of grammars that are just includes for other grammars or what order the grammars
* appear on the command line.
*
* This option was coded to make life easier for tool integration (such as Maven) but
* may also be useful at the command line.
*
* @param make
*/
private boolean make = false;
private boolean showBanner = true;
private static boolean exitNow = false;
// The internal options are for my use on the command line during dev
//
public static boolean internalOption_PrintGrammarTree = false;
public static boolean internalOption_PrintDFA = false;
public static boolean internalOption_ShowNFAConfigsInDFA = false;
public static boolean internalOption_watchNFAConversion = false;
protected Map<String, Grammar> grammars = new HashMap<String, Grammar>();
/** An adaptor that tells ANTLR to build CymbalAST nodes */
public static TreeAdaptor astAdaptor = new GrammarASTAdaptor();
public static void main(String[] args) throws Exception {
Tool antlr = new Tool(args);
if (!exitNow) {
antlr.process();
if (ErrorManager.getNumErrors() > 0) {
System.exit(1);
}
// System.exit(0);
}
}
public Tool() {
}
public Tool(String[] args) {
processArgs(args);
}
public void processArgs(String[] args) {
if (verbose) {
ErrorManager.info("ANTLR Parser Generator Version " + VERSION);
showBanner = false;
}
if (args == null || args.length == 0) {
help();
return;
}
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-o") || args[i].equals("-fo")) {
if (i + 1 >= args.length) {
System.err.println("missing output directory with -fo/-o option; ignoring");
}
else {
if (args[i].equals("-fo")) { // force output into dir
forceAllFilesToOutputDir = true;
}
i++;
outputDirectory = args[i];
if (outputDirectory.endsWith("/") ||
outputDirectory.endsWith("\\")) {
outputDirectory =
outputDirectory.substring(0, outputDirectory.length() - 1);
}
File outDir = new File(outputDirectory);
haveOutputDir = true;
if (outDir.exists() && !outDir.isDirectory()) {
ErrorManager.msg(Msg.OUTPUT_DIR_IS_FILE, outputDirectory);
libDirectory = ".";
}
}
}
else if (args[i].equals("-lib")) {
if (i + 1 >= args.length) {
System.err.println("missing library directory with -lib option; ignoring");
}
else {
i++;
libDirectory = args[i];
if (libDirectory.endsWith("/") ||
libDirectory.endsWith("\\")) {
libDirectory = libDirectory.substring(0, libDirectory.length() - 1);
}
File outDir = new File(libDirectory);
if (!outDir.exists()) {
ErrorManager.msg(Msg.DIR_NOT_FOUND, libDirectory);
libDirectory = ".";
}
}
}
else if (args[i].equals("-nfa")) {
generate_NFA_dot = true;
}
else if (args[i].equals("-dfa")) {
generate_DFA_dot = true;
}
else if (args[i].equals("-debug")) {
debug = true;
}
else if (args[i].equals("-trace")) {
trace = true;
}
else if (args[i].equals("-report")) {
report = true;
}
else if (args[i].equals("-profile")) {
profile = true;
}
else if (args[i].equals("-print")) {
printGrammar = true;
}
else if (args[i].equals("-depend")) {
depend = true;
}
else if (args[i].equals("-verbose")) {
verbose = true;
}
else if (args[i].equals("-version")) {
version();
exitNow = true;
}
else if (args[i].equals("-make")) {
make = true;
}
else if (args[i].equals("-message-format")) {
if (i + 1 >= args.length) {
System.err.println("missing output format with -message-format option; using default");
}
else {
i++;
//ErrorManager.setFormat(args[i]);
}
}
else if (args[i].equals("-Xgrtree")) {
internalOption_PrintGrammarTree = true; // print grammar tree
}
else if (args[i].equals("-Xdfa")) {
internalOption_PrintDFA = true;
}
else if (args[i].equals("-Xnoprune")) {
//DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES = false;
}
else if (args[i].equals("-Xnocollapse")) {
//DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES = false;
}
else if (args[i].equals("-Xdbgconversion")) {
//NFAToDFAConverter.debug = true;
}
else if (args[i].equals("-Xmultithreaded")) {
//NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
}
else if (args[i].equals("-Xnomergestopstates")) {
//DFAOptimizer.MERGE_STOP_STATES = false;
}
else if (args[i].equals("-Xdfaverbose")) {
internalOption_ShowNFAConfigsInDFA = true;
}
else if (args[i].equals("-Xwatchconversion")) {
internalOption_watchNFAConversion = true;
}
else if (args[i].equals("-XdbgST")) {
//CodeGenerator.EMIT_TEMPLATE_DELIMITERS = true;
}
else if (args[i].equals("-Xmaxinlinedfastates")) {
if (i + 1 >= args.length) {
System.err.println("missing max inline dfa states -Xmaxinlinedfastates option; ignoring");
}
else {
i++;
// CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = Integer.parseInt(args[i]);
}
}
else if (args[i].equals("-Xmaxswitchcaselabels")) {
if (i + 1 >= args.length) {
System.err.println("missing max switch case labels -Xmaxswitchcaselabels option; ignoring");
}
else {
i++;
// CodeGenerator.MAX_SWITCH_CASE_LABELS = Integer.parseInt(args[i]);
}
}
else if (args[i].equals("-Xminswitchalts")) {
if (i + 1 >= args.length) {
System.err.println("missing min switch alternatives -Xminswitchalts option; ignoring");
}
else {
i++;
// CodeGenerator.MIN_SWITCH_ALTS = Integer.parseInt(args[i]);
}
}
else if (args[i].equals("-Xm")) {
if (i + 1 >= args.length) {
System.err.println("missing max recursion with -Xm option; ignoring");
}
else {
i++;
//NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
}
}
else if (args[i].equals("-Xmaxdfaedges")) {
if (i + 1 >= args.length) {
System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
}
else {
i++;
// DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
}
}
else if (args[i].equals("-Xconversiontimeout")) {
if (i + 1 >= args.length) {
System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
}
else {
i++;
//DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
}
}
else if (args[i].equals("-Xnfastates")) {
//DecisionProbe.verbose = true;
}
else if (args[i].equals("-X")) {
Xhelp();
}
else {
if (args[i].charAt(0) != '-') {
// Must be the grammar file
addGrammarFile(args[i]);
}
}
}
}
public Grammar load(String fileName) throws Exception {
ANTLRFileStream in = new ANTLRFileStream(fileName);
ANTLRLexer lexer = new ANTLRLexer(in);
CommonTokenStream tokens = new CommonTokenStream(lexer);
ANTLRParser p = new ANTLRParser(tokens);
p.setTreeAdaptor(astAdaptor);
ParserRuleReturnScope r = p.grammarSpec();
GrammarAST t = (GrammarAST) r.getTree();
System.out.println(t.toStringTree());
String name = t.getChild(0).getText();
Grammar g = new Grammar(this, name, t);
grammars.put(name, g);
return g;
}
public void process() throws Exception {
// testing parser
Grammar g = load(grammarFileNames.get(0));
g.loadImportedGrammars();
//g.ast.inspect();
// use buffered node stream as we will look around in stream
// to give good error messages.
BufferedTreeNodeStream nodes = new BufferedTreeNodeStream(astAdaptor,g.ast);
ASTVerifier walker = new ASTVerifier(nodes);
walker.grammarSpec();
}
private static void version() {
ErrorManager.info("ANTLR Parser Generator Version " + new Tool().VERSION);
}
private static void help() {
ErrorManager.info("ANTLR Parser Generator Version " + new Tool().VERSION);
System.err.println("usage: java org.antlr.Tool [args] file.g [file2.g file3.g ...]");
System.err.println(" -o outputDir specify output directory where all output is generated");
System.err.println(" -fo outputDir same as -o but force even files with relative paths to dir");
System.err.println(" -lib dir specify location of token files");
System.err.println(" -depend generate file dependencies");
System.err.println(" -report print out a report about the grammar(s) processed");
System.err.println(" -print print out the grammar without actions");
System.err.println(" -debug generate a parser that emits debugging events");
System.err.println(" -profile generate a parser that computes profiling information");
System.err.println(" -nfa generate an NFA for each rule");
System.err.println(" -dfa generate a DFA for each decision point");
System.err.println(" -message-format name specify output style for messages");
System.err.println(" -verbose generate ANTLR version and other information");
System.err.println(" -make only build if generated files older than grammar");
System.err.println(" -version print the version of ANTLR and exit.");
System.err.println(" -X display extended argument list");
}
private static void Xhelp() {
ErrorManager.info("ANTLR Parser Generator Version " + new Tool().VERSION);
System.err.println(" -Xgrtree print the grammar AST");
System.err.println(" -Xdfa print DFA as text ");
System.err.println(" -Xnoprune test lookahead against EBNF block exit branches");
System.err.println(" -Xnocollapse collapse incident edges into DFA states");
System.err.println(" -Xdbgconversion dump lots of info during NFA conversion");
System.err.println(" -Xmultithreaded run the analysis in 2 threads");
System.err.println(" -Xnomergestopstates do not merge stop states");
System.err.println(" -Xdfaverbose generate DFA states in DOT with NFA configs");
System.err.println(" -Xwatchconversion print a message for each NFA before converting");
System.err.println(" -XdbgST put tags at start/stop of all templates in output");
System.err.println(" -Xnfastates for nondeterminisms, list NFA states for each path");
/*
System.err.println(" -Xm m max number of rule invocations during conversion [" + NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK + "]");
System.err.println(" -Xmaxdfaedges m max \"comfortable\" number of edges for single DFA state [" + DFA.MAX_STATE_TRANSITIONS_FOR_TABLE + "]");
System.err.println(" -Xconversiontimeout t set NFA conversion timeout (ms) for each decision [" + DFA.MAX_TIME_PER_DFA_CREATION + "]");
System.err.println(" -Xmaxinlinedfastates m max DFA states before table used rather than inlining [" + CodeGenerator.MADSI_DEFAULT +"]");
System.err.println(" -Xmaxswitchcaselabels m don't generate switch() statements for dfas bigger than m [" + CodeGenerator.MSCL_DEFAULT +"]");
System.err.println(" -Xminswitchalts m don't generate switch() statements for dfas smaller than m [" + CodeGenerator.MSA_DEFAULT + "]");
*/
}
public void addGrammarFile(String grammarFileName) {
if (!grammarFileNames.contains(grammarFileName)) {
grammarFileNames.add(grammarFileName);
}
}
/**
* Provide the current setting of the conversion timeout on DFA creation.
*
* @return DFA creation timeout value in milliseconds
*/
public int getConversionTimeout() {
//return DFA.MAX_TIME_PER_DFA_CREATION;
return 0;
}
/**
* Returns the current setting of the message format descriptor
* @return Current message format
*/
public String getMessageFormat() {
//return ErrorManager.getMessageFormat().toString();
return null;
}
/**
* Returns the number of errors that the analysis/processing threw up.
* @return Error count
*/
public int getNumErrors() {
return ErrorManager.getNumErrors();
}
/**
* Set the message format to one of ANTLR, gnu, vs2005
*
* @param format
*/
public void setMessageFormat(String format) {
//ErrorManager.setFormat(format);
}
/**
* Set the location (base directory) where output files should be produced
* by the ANTLR tool.
* @param outputDirectory
*/
public void setOutputDirectory(String outputDirectory) {
haveOutputDir = true;
outputDirectory = outputDirectory;
}
/**
* Set the base location of input files. Normally (when the tool is
* invoked from the command line), the inputDirectory is not set, but
* for build tools such as Maven, we need to be able to locate the input
* files relative to the base, as the working directory could be anywhere and
* changing workig directories is not a valid concept for JVMs because of threading and
* so on. Setting the directory just means that the getFileDirectory() method will
* try to open files relative to this input directory.
*
* @param inputDirectory Input source base directory
*/
public void setInputDirectory(String inputDirectory) {
inputDirectory = inputDirectory;
haveInputDir = true;
}
}

View File

@ -0,0 +1,726 @@
// File : A3Lexer.g
// Author : Jim Idle (jimi@temporal-wave.com)
// Copyright : Free BSD - See @header clause below
// Version : First implemented as part of ANTLR 3.2 this is the self
// hosting ANTLR 3 Lexer.
//
// Description
// -----------
// This is the definitive lexer grammar for parsing ANTLR V3.x.x grammars. All other
// gramnmars are derived from this grammar via source code control integration (perforce)
// or by the gdiff tool.
//
// This grammar and its associated grmmmars A3Parser.g and A3Walker.g exhibit the following
// traits, which are recommended for all production quality grammars:
//
// 1) They are separate grammars, not composite grammars;
// 2) They implement all supporting methods in a superclass (at least this is recommended
// for language targets that support inheritence;
// 3) All errors are pushed as far down the parsing chain as possible, which means
// that the lexer tries to defer error reporting to the parser, and the parser
// tries to defer error reporting to a semantic phase consisting of a single
// walk of the AST. The reason for this is that the error messages produced
// from later phases of the parse will generally have better context and so
// be more useful to the end user. Consider the message: "Syntax error at 'options'"
// vs: "You cannot specify two options{} sections in a single grammar file".
// 4) The lexer is 'programmed' to catch common mistakes such as unterminated literals
// and report them specifically and not just issue confusing lexer mismatch errors.
//
/** Read in an ANTLR grammar and build an AST. Try not to do
* any actions, just build the tree.
*
* The phases are:
*
* A3Lexer.g (this file)
* A3Parser.g
* A3Verify.g (derived from A3Walker.g)
* assign.types.g
* define.g
* buildnfa.g
* antlr.print.g (optional)
* codegen.g
*
* Terence Parr
* University of San Francisco
* 2005
* Jim Idle (this v3 grammar)
* Temporal Wave LLC
* 2009
*/
lexer grammar ANTLRLexer;
// ==============================================================================
// Note that while this grammar does not care about order of constructs
// that don't really matter, such as options before @header etc, it must first
// be parsed by the original v2 parser, before it replaces it. That parser does
// care about order of structures. Hence we are constrained by the v2 parser
// for at least the first bootstrap release that causes this parser to replace
// the v2 version.
// ==============================================================================
// -------
// Options
//
// V3 option directives to tell the tool what we are asking of it for this
// grammar.
//
options {
// Target language is Java, which is the default but being specific
// here as this grammar is also meant as a good example grammar for
// for users.
//
language = Java;
// The super class that this lexer should expect to inherit from, and
// which contains any and all support routines for the lexer. This is
// commented out in this baseline (definitive or normative grammar)
// - see the ANTLR tool implementation for hints on how to use the super
// class
//
//superclass = AbstractA3Lexer;
}
tokens { SEMPRED; FORCED_ACTION; }
// Include the copyright in this source and also the generated source
//
@lexer::header {
/*
[The "BSD licence"]
Copyright (c) 2005-2009 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.parse;
}
// +=====================+
// | Lexer specification |
// +=====================+
// --------
// Comments
//
// ANTLR comments can be multi or single line and we don't care
// which particularly. However we also accept Javadoc style comments
// of the form: /** ... */ and we do take care to distinguish those
// from ordinary multi-line comments
// Note how we guide the lexical PATH because we want to issue a decriptive
// error message in case of a standalone '/' character, which makes no
// sense in ANTLR source code. We alo trap unterminated multi-line comments
//
fragment DOC_COMMENT : ;
COMMENT
@init {
// Record the start line and offsets as if we need to report an
// unterminated comment, then we want to show the start of the comment
// we think is broken, not the end, where people will have to try and work
// it out themselves.
//
int startLine = $line;
int offset = getCharPositionInLine();
}
: // Eat the first character only, then see if we have a comment
// or something silly.
//
'/' // Comment introducer
(
// Single line comment, possibly with embedded src/line directives
// in a similar style to the C pre-processor, allowing generated
// code to refer the programmer back to the original source code
// in case of error.
//
'/'
(
(' $ANTLR')=> ' $ANTLR' SRC
| ~(NLCHARS)*
)
| // Multi-line comment, which may be a documentation comment
// if it starts /** (note that we protect against accidentaly
// recognizing a comment /**/ as a documentation comment
//
'*' (
{ input.LA(2) != '/'}?=> '*' { $type = DOC_COMMENT; }
| { true }?=> // Required to cover all alts with predicates
)
// Should we support embedded multiline comments here?
//
(
// Pick out end of multiline comment and exit the loop
// if we find it.
//
{ !(input.LA(1) == '*' && input.LA(2) == '/') }?
// Anything else other than the non-greedy match of
// the comment close sequence
//
.
)*
(
// Look for the comment terminator, but if it is accidentally
// unterminated, then we will hit EOF, which will trigger the
// epsilon alt and hence we can issue an error message relative
// to the start of the unterminated multi-line comment
//
'*/'
| // Unterminated comment!
//
{
// ErrorManager.msg(Msg.UNTERMINATED_DOC_COMMENT, startLine, offset, $pos, startLine, offset, $pos, (Object)null);
}
)
| // There was nothing that made sense following the opening '/' and so
// we issue an error regarding the malformed comment
//
{
// TODO: Insert error message relative to comment start
//
}
)
{
// Unless we had a documentation comment, then we do not wish to
// pass the comments in to the parser. If you are writing a formatter
// then you will want to preserve the comments off channel, but could
// just skip and save token space if not.
//
if ($type != DOC_COMMENT) {
$channel=2; // Comments are on channel 2
}
}
;
DOUBLE_QUOTE_STRING_LITERAL
: '"' (('\\')=>'\\' . | ~'"' )* '"'
;
DOUBLE_ANGLE_STRING_LITERAL
: '<<' (options {greedy=false;} : . )* '>>'
;
// --------------
// Argument specs
//
// Certain argument lists, such as those specifying call parameters
// to a rule invocation, or input parameters to a rule specification
// are contained within square brackets. In the lexer we consume them
// all at once and sort them out later in the grammar analysis.
//
ARG_ACTION
@init
{
StringBuffer theText = new StringBuffer();
}
: '['
(
('\\')=>'\\'
(
(']')=>']'
{
// We do not include the \ character itself when picking up an escaped ]
//
theText.append(']');
}
| c=.
{
// We DO include the \ character when finding any other escape
//
theText.append('\\');
theText.append((char)$c);
}
)
| ('"')=>as=ACTION_STRING_LITERAL
{
// Append the embedded string literal test
//
theText.append($as.text);
}
| ('\'')=>ac=ACTION_CHAR_LITERAL
{
// Append the embedded chracter literal text
//
theText.append($ac.text);
}
| c=~']'
{
// Whatever else we found in the scan
//
theText.append((char)$c);
}
)*
']'
{
// Set the token text to our gathered string
//
setText(theText.toString());
}
;
// -------
// Actions
//
// Other than making sure to distinguish between { and } embedded
// within what we have assumed to be literals in the action code, the
// job of the lexer is merely to gather the code within the action
// (delimited by {}) and pass it to the parser as a single token.
// Note the special case of the {{ }} action, which is a forced
// action, that the generated code will execute regardless of
// backtracking (predicate) level.
// We know that this token will be asked for its text somewhere
// in the upcoming parse, so setting the text here to exclude
// the delimiting {} is no additional overhead.
//
ACTION
: NESTED_ACTION ('?' {$type = SEMPRED;} )?
{
// Note that because of the sempred detection above, we
// will not see {{ action }}? as a forced action, but as a semantic
// predicate.
if ( $text.startsWith("{{") && $text.endsWith("}}") ) {
// Switch types to a forced action
$type = FORCED_ACTION;
}
}
;
// ----------------
// Action structure
//
// Many language targets use {} as block delimiters and so we
// must recursively match {} delimited blocks to balance the
// braces. Additionally, we must make some assumptions about
// literal string representation in the target language. We assume
// that they are delimited by ' or " and so consume these
// in their own alts so as not to inadvertantly match {}.
// This rule calls itself on matching a {
//
fragment
NESTED_ACTION
@init {
// Record the start line and offsets as if we need to report an
// unterminated block, then we want to show the start of the comment
// we think is broken, not the end, where people will have to try and work
// it out themselves.
//
int startLine = getLine();
int offset = getCharPositionInLine();
}
: // Action and other blocks start with opening {
//
'{'
(
// And now we can match one of a number of embedded
// elements within the action until we find a
// } that balances the opening {. If we do not find
// the balanced } then we will hit EOF and can issue
// an error message about the brace that we belive to
// be mismatched. This won't be foolproof but we will
// be able to at least report an error against the
// opening brace that we feel is in error and this will
// guide the user to the correction as best we can.
//
// An embedded {} block
//
NESTED_ACTION
| // What appears to be a literal
//
ACTION_CHAR_LITERAL
| // We have assumed that the target language has C/Java
// type comments.
//
COMMENT
| // What appears to be a literal
//
ACTION_STRING_LITERAL
| // What appears to be an escape sequence
//
ACTION_ESC
| // Some other single character that is not
// handled above
//
~('\\'|'"'|'\''|'/'|'{'|'}')
)*
(
// Correctly balanced closing brace
//
'}'
| // Looks like have an imblanced {} block, report
// with respect to the opening brace.
//
{
// TODO: Report imbalanced {}
System.out.println("Block starting at line " + startLine + " offset " + (offset+1) + " contains imbalanced {} or is missing a }");
}
)
;
// Keywords
// --------
// keywords used to specify ANTLR v3 grammars. Keywords may not be used as
// labels for rules or in any other context where they woudl be ambiguous
// with the keyword vs some other identifier
// OPTIONS and TOKENS must also consume the opening brace that captures
// their option block, as this is teh easiest way to parse it separate
// to an ACTION block, despite it usingthe same {} delimiters.
//
OPTIONS : 'options' WSNLCHARS* '{' ;
TOKENS : 'tokens' WSNLCHARS* '{' ;
SCOPE : 'scope' ;
IMPORT : 'import' ;
FRAGMENT : 'fragment' ;
LEXER : 'lexer' ;
PARSER : 'parser' ;
TREE : 'tree' ;
GRAMMAR : 'grammar' ;
PROTECTED : 'protected' ;
PUBLIC : 'public' ;
PRIVATE : 'private' ;
RETURNS : 'returns' ;
THROWS : 'throws' ;
CATCH : 'catch' ;
FINALLY : 'finally' ;
TEMPLATE : 'template' ;
// -----------
// Punctuation
//
// Character sequences used as separators, delimters, operators, etc
//
COLON : ':' ;
COLONCOLON : '::' ;
COMMA : ',' ;
SEMI : ';' ;
LPAREN : '(' ;
RPAREN : ')' ;
IMPLIES : '=>' ;
LT : '<' ;
GT : '>' ;
ASSIGN : '=' ;
QUESTION : '?' ;
BANG : '!' ;
STAR : '*' ;
PLUS : '+' ;
PLUS_ASSIGN : '+=' ;
OR : '|' ;
ROOT : '^' ;
DOLLAR : '$' ;
WILDCARD : '.' ;
RANGE : '..' ;
ETC : '...' ;
RARROW : '->' ;
TREE_BEGIN : '^(' ;
AT : '@' ;
NOT : '~' ;
RBRACE : '}' ;
// ---------------
// Token reference
//
// The names of all tokens must start with an upper case letter and so
// the lexer can distinguish them directly.
//
TOKEN_REF
: ('A'..'Z') ('A'..'Z' | 'a'..'z' | '0'..'9' | '_')*
;
// --------------
// Rule reference
//
// The names of all rules must start with a lower case letter
// so the lexer can distibguish them directly. The parser takes
// care of the case such as id=rulename
//
RULE_REF
: ('a'..'z') ('A'..'Z' | 'a'..'z' | '0'..'9' | '_')*
;
// ----------------------------
// Literals embedded in actions
//
// Note that we have made the assumption that the language used within
// actions uses the fairly standard " and ' delimiters for literals and
// that within these literals, characters are escaped using the \ character.
// There are some languages which do not conform to this in all cases, such
// as by using /string/ and so on. We will have to deal with such cases if
// if they come up in targets.
//
// Within actions, or other structures that are not part of the ANTLR
// syntax, we may encounter literal characters. Within these, we do
// not want to inadvertantly match things like '}' and so we eat them
// specifically. While this rule is called CHAR it allows for the fact that
// some languages may use/allow ' as the string delimiter.
//
fragment
ACTION_CHAR_LITERAL
: '\'' (('\\')=>ACTION_ESC | ~'\'' )* '\''
;
// Within actions, or other structures that are not part of the ANTLR
// syntax, we may encounter literal strings. Within these, we do
// not want to inadvertantly match things like '}' and so we eat them
// specifically.
//
fragment
ACTION_STRING_LITERAL
: '"' (('\\')=>ACTION_ESC | ~'"')* '"'
;
// Within literal strings and characters that are not part of the ANTLR
// syntax, we must allow for escaped character sequences so that we do not
// inadvertantly recognize the end of a string or character when the terminating
// delimiter has been esacped.
//
fragment
ACTION_ESC
: '\\' .
;
// -------
// Integer
//
// Obviously (I hope) match an aribtrary long sequence of digits.
//
INT : ('0'..'9')+
;
// -----------
// Source spec
//
// A fragment rule for picking up information about an origrinating
// file from which the grammar we are parsing has been generated. This allows
// ANTLR to report errors against the originating file and not the generated
// file.
//
fragment
SRC : 'src' WSCHARS+ file=ACTION_STRING_LITERAL WSCHARS+ line=INT
{
// TODO: Add target specific code to change the source file name and current line number
//
}
;
// --------------
// Literal string
//
// ANTLR makes no disticintion between a single character literal and a
// multi-character string. All literals are single quote delimited and
// may contain unicode escape sequences of the form \uxxxx, where x
// is a valid hexadecimal number (as per Java basically).
// If we have just the one character, then this is a CHAR_LITERAL and
// is of type STRING_LITERAL at > 1
//
fragment STRING_LITERAL : ;
CHAR_LITERAL
@init {
int len = 0;
}
: '\'' ( ( ESC_SEQ | ~('\\'|'\'') ) {len++;} )* '\''
{
// Change the token type if we have more than one character
//
if (len > 1) {
$type = STRING_LITERAL;
}
}
;
// A valid hex digit specification
//
fragment
HEX_DIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ;
// Any kind of escaped character that we can embed within ANTLR
// literal strings.
//
fragment
ESC_SEQ
: '\\'
(
// The standard escaped character set such as tab, newline,
// etc.
//
'b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\'
| // A Java style Unicode escape sequence
//
UNICODE_ESC
| // An illegal escape seqeunce
//
{
// TODO: Issue error message
//
}
)
;
fragment
UNICODE_ESC
@init {
// Flag to tell us whether we have a valid number of
// hex digits in the escape sequence
//
int hCount = 0;
}
: 'u' // Leadin for unicode escape sequence
// We now require 4 hex digits. Note though
// that we accept any number of characters
// and issue an error if we do not get 4. We cannot
// use an inifinite count such as + because this
// might consume too many, so we lay out the lexical
// options and issue an error at the invalid paths.
//
(
(
HEX_DIGIT { hCount++; }
(
HEX_DIGIT { hCount++; }
(
HEX_DIGIT { hCount++; }
(
// Four valid hex digits, we are good
//
HEX_DIGIT { hCount++; }
| // Three valid digits
)
| // Two valid digits
)
| // One valid digit
)
)
| // No valid hex digits at all
)
// Now check the digit count and issue an error if we need to
//
{
if (hCount != 4) {
// TODO: Issue error message
}
}
;
// ----------
// Whitespace
//
// Characters and character constructs that are of no import
// to the parser and are used to make the grammar easier to read
// for humans.
//
WS
: (
' '
| '\t'
| '\r'
| '\n'
| '\f'
)+
{
$channel=2;
}
;
// A fragment rule for use in recognizing end of line in
// rules like COMMENT.
//
fragment
NLCHARS
: '\n' | '\r'
;
// A fragment rule for recognizing traditional whitespace
// characters within lexer rules.
//
fragment
WSCHARS
: ' ' | '\t' | '\f'
;
// A fragment rule for recognizing both traditional whitespace and
// end of line markers, when we don't care to distinguish but don't
// want any action code going on.
//
fragment
WSNLCHARS
: ' ' | '\t' | '\f' | '\n' | '\r'
;
// -----------------
// Illegal Character
//
// This is an illegal character trap which is always the last rule in the
// lexer specification. It matches a single character of any value and being
// the last rule in the file will match when no other rule knows what to do
// about the character. It is reported as an error but is not passed on to the
// parser. This means that the parser to deal with the gramamr file anyway
// but we will not try to analyse or code generate from a file with lexical
// errors.
//
ERRCHAR
: .
{
// TODO: Issue error message
//
skip();
}
;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,922 @@
/*
[The "BSD license"]
Copyright (c) 2010 Jim Idle, Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** The definitive ANTLR v3 grammar to parse ANTLR v4 grammars.
* The grammar builds ASTs that are sniffed by subsequent stages.
*/
parser grammar ANTLRParser;
options {
// Target language is Java, which is the default but being specific
// here as this grammar is also meant as a good example grammar for
// for users.
language = Java;
// The output of this grammar is going to be an AST upon which
// we run a semantic checking phase, then the rest of the analysis
// including final code generation.
output = AST;
// The vocabulary (tokens and their int token types) we are using
// for the parser. This is generated by the lexer. The vocab will be extended
// to include the imaginary tokens below.
tokenVocab = ANTLRLexer;
ASTLabelType = GrammarAST;
}
// Imaginary Tokens
//
// Imaginary tokens do not exist as far as the lexer is concerned, and it cannot
// generate them. However we sometimes need additional 'tokens' to use as root
// nodes for the AST we are generating. The tokens section is where we
// specify any such tokens
tokens {
LEXER;
RULE;
RULES;
RULEMODIFIERS;
RULEACTIONS;
BLOCK;
OPTIONAL;
CLOSURE;
POSITIVE_CLOSURE;
SYNPRED;
RANGE;
CHAR_RANGE;
EPSILON;
ALT;
ALTLIST;
RESULT;
ID;
ARG;
ARGLIST;
RET;
LEXER_GRAMMAR;
PARSER_GRAMMAR;
TREE_GRAMMAR;
COMBINED_GRAMMAR;
INITACTION;
LABEL; // $x used in rewrite rules
TEMPLATE;
GATED_SEMPRED; // {p}? =>
SYN_SEMPRED; // (...) => it's a manually-specified synpred converted to sempred
BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
DOT;
// A generic node indicating a list of something when we don't
// really need to distinguish what we have a list of as the AST
// will 'kinow' by context.
//
LIST;
ELEMENT_OPTIONS; // TOKEN<options>
ST_RESULT; // distinguish between ST and tree rewrites
RESULT;
ALT_REWRITE; // indicate ALT is rewritten
}
// Include the copyright in this source and also the generated source
//
@header {
/*
[The "BSD licence"]
Copyright (c) 2005-2009 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.parse;
import org.antlr.v4.tool.*;
}
// The main entry point for parsing a V3 grammar from top to toe. This is
// the method call from whence to obtain the AST for the parse.
//
grammarSpec
:
// The grammar itself can have a documenation comment, which is the
// first terminal in the file.
//
DOC_COMMENT?
// Next we should see the type and name of the grammar file that
// we are about to parse.
//
grammarType id SEMI
// There now follows zero or more declaration sections that should
// be given to us before the rules are declared
//
// A number of things can be declared/stated before the grammar rules
// 'proper' are parsed. These include grammar imports (delegate), grammar
// options, imaginary token declarations, global scope declarations,
// and actions such as @header. In this rule we allow any number of
// these constructs in any order so that the grammar author is not
// constrained by some arbitrary order of declarations that nobody
// can remember. In the next phase of the parse, we verify that these
// constructs are valid, not repeated and so on.
prequelConstruct*
// We should now see at least one ANTLR EBNF style rule
// declaration. If the rules are missing we will let the
// semantic verification phase tell the user about it.
//
rules
// And we force ANTLR to process everything it finds in the input
// stream by specifying hte need to match End Of File before the
// parse is complete.
//
EOF
// Having parsed everything in the file and accumulated the relevant
// subtrees, we can now rewrite everything into the main AST form
// that our tree walkers are expecting.
//
-> ^(grammarType // The grammar type is our root AST node
id // We need to identify the grammar of course
DOC_COMMENT? // We may or may not have a global documentation comment for the file
prequelConstruct* // The set of declarations we accumulated
rules // And of course, we need the set of rules we discovered
)
;
// ------------
// Grammar Type
//
// ANTLR will process a combined lexer/grammar, a stand alone parser,
// a stand alone lexer, and tree grammars. This rule determines which of
// these the gramamr author is asking us to deal with. This choice will
// later allow us to throw out valid syntactical constructs that are
// not valid for certain grammar types, such as rule parameters and
// returns specified for lexer rules.
//
grammarType
: ( // A standalone lexer specification
LEXER -> LEXER_GRAMMAR
| // A standalone parser specification
PARSER -> PARSER_GRAMMAR
| // A standalone tree parser specification
TREE -> TREE_GRAMMAR
| // A combined lexer and parser specification
-> COMBINED_GRAMMAR
)
GRAMMAR
;
// This is the list of all constructs that can be declared before
// the set of rules that compose the grammar, and is invoked 0..n
// times by the grammarPrequel rule.
prequelConstruct
: // A list of options that affect analysis and/or code generation
optionsSpec
| // A list of grammars to which this grammar will delegate certain
// parts of the parsing sequence - a set of imported grammars
delegateGrammars
| // The declaration of any token types we need that are not already
// specified by a preceeding grammar, such as when a parser declares
// imaginary tokens with which to construct the AST, or a rewriting
// tree parser adds further imaginary tokens to ones defined in a prior
// {tree} parser.
tokensSpec
| // A declaration of a scope that may be used in multiple rules within
// the grammar spec, rather than being delcared and therefore associated
// with, a specific rule.
attrScope
| // A declaration of language target implemented constructs. All such
// action sections start with '@' and are given to the language target's
// StringTemplate group. For instance @parser::header and @lexer::header
// are gathered here.
action
;
// A list of options that affect analysis and/or code generation
optionsSpec
: OPTIONS (option SEMI)* RBRACE -> ^(OPTIONS["OPTIONS"] option+)
;
option
: id ASSIGN^ optionValue
;
// ------------
// Option Value
//
// The actual value of an option - Doh!
//
optionValue
: // If the option value is a single word that conforms to the
// lexical rules of token or rule names, then the user may skip quotes
// and so on. Many option values meet this description
//
qid
| // The value is a long string
//
STRING_LITERAL
| // The value was a single character
//
CHAR_LITERAL
| // The value was an integer number
//
INT
| // Asterisk, used for things like k=*
//
STAR
;
// A list of grammars to which this grammar will delegate certain
// parts of the parsing sequence - a set of imported grammars
delegateGrammars
: IMPORT delegateGrammar (COMMA delegateGrammar)* SEMI -> ^(IMPORT delegateGrammar+)
;
// A possibly named grammar file that should be imported to this gramamr
// and delgated to for the rules it specifies
delegateGrammar
: id ASSIGN^ id
| id
;
/** The declaration of any token types we need that are not already
* specified by a preceeding grammar, such as when a parser declares
* imaginary tokens with which to construct the AST, or a rewriting
* tree parser adds further imaginary tokens to ones defined in a prior
* {tree} parser.
*/
tokensSpec
: TOKENS tokenSpec+ RBRACE -> ^(TOKENS tokenSpec+)
;
tokenSpec
: TOKEN_REF
( ASSIGN (lit=STRING_LITERAL|lit=CHAR_LITERAL) -> ^(ASSIGN TOKEN_REF $lit)
| -> TOKEN_REF
)
SEMI
| RULE_REF // INVALID! (an error alt)
;
// A declaration of a scope that may be used in multiple rules within
// the grammar spec, rather than being declared within and therefore associated
// with, a specific rule.
attrScope
: SCOPE id ACTION -> ^(SCOPE id ACTION)
;
// A declaration of a language target specifc section,
// such as @header, @includes and so on. We do not verify these
// sections, they are just passed on to the language target.
/** Match stuff like @parser::members {int i;} */
action
: AT (actionScopeName COLONCOLON)? id ACTION -> ^(AT actionScopeName? id ACTION)
;
/** Sometimes the scope names will collide with keywords; allow them as
* ids for action scopes.
*/
actionScopeName
: id
| LEXER -> ID[$LEXER]
| PARSER -> ID[$PARSER]
;
rules
: rule*
// Rewrite with an enclosing node as this is good for counting
// the number of rules and an easy marker for the walker to detect
// that there are no rules.
->^(RULES rule*)
;
// The specification of an EBNF rule in ANTLR style, with all the
// rule level parameters, declarations, actions, rewrite specs and so
// on.
//
// Note that here we allow any number of rule declaration sections (such
// as scope, returns, etc) in any order and we let the upcoming semantic
// verification of the AST determine if things are repeated or if a
// particular functional element is not valid in the context of the
// grammar type, such as using returns in lexer rules and so on.
rule
: // A rule may start with an optional documentation comment
DOC_COMMENT?
// Following the documentation, we can declare a rule to be
// public, private and so on. This is only valid for some
// language targets of course but the target will ignore these
// modifiers if they make no sense in that language.
ruleModifiers?
// Next comes the rule name. Here we do not distinguish between
// parser or lexer rules, the semantic verification phase will
// reject any rules that make no sense, such as lexer rules in
// a pure parser or tree parser.
id
// Immediately following the rulename, there may be a specification
// of input parameters for the rule. We do not do anything with the
// parameters here except gather them for future phases such as
// semantic verifcation, type assignment etc. We require that
// the input parameters are the next syntactically significant element
// following the rule id.
ARG_ACTION?
ruleReturns?
// Now, before the rule specification itself, which is introduced
// with a COLON, we may have zero or more configuration sections.
// As usual we just accept anything that is syntactically valid for
// one form of the rule or another and let the semantic verification
// phase throw out anything that is invalid.
// At the rule level, a programmer may specify a number of sections, such
// as scope declarations, rule return elements, @ sections (which may be
// language target specific) and so on. We allow any number of these in any
// order here and as usual rely onthe semantic verification phase to reject
// anything invalid using its addinotal context information. Here we are
// context free and just accept anything that is a syntactically correct
// construct.
//
rulePrequel*
COLON
// The rule is, at the top level, just a list of alts, with
// finer grained structure defined within the alts.
altListAsBlock
SEMI
exceptionGroup
-> ^( RULE id DOC_COMMENT? ruleModifiers? ARG_ACTION?
ruleReturns? rulePrequel* altListAsBlock exceptionGroup*
)
;
// Many language targets support exceptions and the rule will
// generally be able to throw the language target equivalent
// of a recognition exception. The grammar programmar can
// specify a list of exceptions to catch or a generic catch all
// and the target language code generation template is
// responsible for generating code that makes sense.
exceptionGroup
: exceptionHandler* finallyClause?
;
// Specifies a handler for a particular type of exception
// thrown by a rule
exceptionHandler
: CATCH ARG_ACTION ACTION -> ^(CATCH ARG_ACTION ACTION)
;
// Specifies a block of code to run after the rule and any
// expcetion blocks have exceuted.
finallyClause
: FINALLY ACTION -> ^(FINALLY ACTION)
;
// An individual rule level configuration as referenced by the ruleActions
// rule above.
//
rulePrequel
: throwsSpec
| ruleScopeSpec
| optionsSpec
| ruleAction
;
// A rule can return elements that it constructs as it executes.
// The return values are specified in a 'returns' prequel element,
// which contains COMMA separated declarations, where the declaration
// is target language specific. Here we see the returns declaration
// as a single lexical action element, to be processed later.
//
ruleReturns
: RETURNS^ ARG_ACTION
;
// --------------
// Exception spec
//
// Some target languages, such as Java and C# support exceptions
// and they are specified as a prequel element for each rule that
// wishes to throw its own exception type. Note that the name of the
// exception is just a single word, so the header section of the grammar
// must specify the correct import statements (or language equivalent).
// Target languages that do not support exceptions just safely ignore
// them.
//
throwsSpec
: THROWS qid (COMMA qid)* -> ^(THROWS qid+)
;
// As well as supporting globally specifed scopes, ANTLR supports rule
// level scopes, which are tracked in a rule specific stack. Rule specific
// scopes are specified at this level, and globally specified scopes
// are merely referenced here.
ruleScopeSpec
: SCOPE ACTION -> ^(SCOPE ACTION)
| SCOPE id (COMMA id)* SEMI -> ^(SCOPE id+)
;
// @ Sections are generally target language specific things
// such as local variable declarations, code to run before the
// rule starts and so on. Fir instance most targets support the
// @init {} section where declarations and code can be placed
// to run before the rule is entered. The C target also has
// an @declarations {} section, where local variables are declared
// in order that the generated code is C89 copmliant.
//
/** Match stuff like @init {int i;} */
ruleAction
: AT id ACTION -> ^(AT id ACTION)
;
// A set of access modifiers that may be applied to rule declarations
// and which may or may not mean something to the target language.
// Note that the parser allows any number of these in any order and the
// semantic pass will throw out invalid combinations.
//
ruleModifiers
: ruleModifier+ -> ^(RULEMODIFIERS ruleModifier+)
;
// An individual access modifier for a rule. The 'fragment' modifier
// is an internal indication for lexer rules that they do not match
// from the input but are like subroutines for other lexer rules to
// reuse for certain lexical patterns. The other modifiers are passed
// to the code generation templates and may be ignored by the template
// if they are of no use in that language.
ruleModifier
: PUBLIC
| PRIVATE
| PROTECTED
| FRAGMENT
;
altList
: alternative (OR alternative)* -> alternative+
;
// A set of alts, rewritten as a BLOCK for generic processing
// in tree walkers. Used by the rule 'rule' so that the list of
// alts for a rule appears as a BLOCK containing the alts and
// can be processed by the generic BLOCK rule. Note that we
// use a separate rule so that the BLOCK node has start and stop
// boundaries set correctly by rule post processing of rewrites.
altListAsBlock
: altList -> ^(BLOCK altList)
;
// An individual alt with an optional rewrite clause for the
// elements of the alt.
alternative
: elements
( rewrite -> ^(ALT_REWRITE elements rewrite)
| -> elements
)
| rewrite -> ^(ALT_REWRITE ^(ALT EPSILON) rewrite) // empty alt with rewrite
| -> ^(ALT EPSILON) // empty alt
;
elements
: e+=element+ -> ^(ALT $e+)
;
element
: labeledElement
( ebnfSuffix -> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] labeledElement ) ))
| -> labeledElement
)
| atom
( ebnfSuffix -> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] atom ) ) )
| -> atom
)
| ebnf
| ACTION
| SEMPRED
( IMPLIES -> GATED_SEMPRED[$IMPLIES]
| -> SEMPRED
)
| treeSpec
( ebnfSuffix -> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] treeSpec ) ) )
| -> treeSpec
)
;
labeledElement : id (ASSIGN^|PLUS_ASSIGN^) (atom|block) ;
// Tree specifying alt
// Tree grammars need to have alts that describe a tree structure they
// will walk of course. Alts for trees therefore start with ^( XXX, which
// says we will see a root node of XXX then DOWN etc
treeSpec
: TREE_BEGIN
// Only a subset of elements are allowed to be a root node. However
// we allow any element to appear here and reject silly ones later
// when we walk the AST.
element
// After the tree root we get the usual suspects,
// all members of the element set
element+
RPAREN
-> ^(TREE_BEGIN element+)
;
// A block of gramamr structure optionally followed by standard EBNF
// notation, or ANTLR specific notation. I.E. ? + ^ and so on
ebnf
: block
// And now we see if we have any of the optional suffixs and rewrite
// the AST for this rule accordingly
//
( blockSuffixe -> ^(blockSuffixe block)
| -> block
)
;
// The standard EBNF suffixes with additional components that make
// sense only to ANTLR, in the context of a grammar block.
blockSuffixe
: ebnfSuffix // Standard EBNF
// ANTLR Specific Suffixes
| ROOT
| IMPLIES // We will change this to syn/sem pred in the next phase
| BANG
;
ebnfSuffix
@init {
Token op = input.LT(1);
}
: QUESTION -> OPTIONAL[op]
| STAR -> CLOSURE[op]
| PLUS -> POSITIVE_CLOSURE[op]
;
atom: range (ROOT^ | BANG^)? // Range x..y - only valid in lexers
| // Qualified reference delegate.rule. This must be
// lexically contiguous (no spaces either side of the DOT)
// otherwise it is two references with a wildcard in between
// and not a qualified reference.
{
input.LT(1).getCharPositionInLine()+input.LT(1).getText().length()==
input.LT(2).getCharPositionInLine() &&
input.LT(2).getCharPositionInLine()+1==input.LT(3).getCharPositionInLine()
}?
id WILDCARD ruleref
-> ^(DOT[$WILDCARD] id ruleref)
| // Qualified reference delegate.token.
{
input.LT(1).getCharPositionInLine()+input.LT(1).getText().length()==
input.LT(2).getCharPositionInLine() &&
input.LT(2).getCharPositionInLine()+1==input.LT(3).getCharPositionInLine()
}?
id WILDCARD terminal
-> ^(DOT[$WILDCARD] id terminal)
| terminal
| ruleref
| notSet (ROOT^|BANG^)?
;
// --------------------
// Inverted element set
//
// A set of characters (in a lexer) or terminal tokens, if a parser
// that are then used to create the inverse set of them.
//
notSet
: NOT notTerminal -> ^(NOT notTerminal)
| NOT block -> ^(NOT block)
;
// -------------------
// Valid set terminals
//
// The terminal tokens that can be members of an inverse set (for
// matching anything BUT these)
//
notTerminal
: CHAR_LITERAL
| TOKEN_REF
| STRING_LITERAL
;
// -------------
// Grammar Block
//
// Anywhere where an element is valid, the grammar may start a new block
// of alts by surrounding that block with ( ). A new block may also have a set
// of options, which apply only to that block.
//
block
: LPAREN
// A new blocked altlist may have a set of options set sepcifically
// for it.
//
optionsSpec?
(
// Optional @ sections OR an action, however we allow both
// to be present and will let the semantic checking phase determine
// what is allowable.
//
ra+=ruleAction*
ACTION?
// COLON is optional with a block
//
COLON
)?
// List of alts for this Paren block
//
altList
RPAREN
// Rewrite as a block
//
-> ^(BLOCK optionsSpec? $ra* ACTION? altList )
;
// ----------------
// Parser rule ref
//
// Reference to a parser rule with optional arguments and optional
// directive to become the root node or ignore the tree produced
//
ruleref
: RULE_REF ARG_ACTION?
( (op=ROOT|op=BANG) -> ^($op RULE_REF ARG_ACTION?)
| -> ^(RULE_REF ARG_ACTION?)
)
;
// ---------------
// Character Range
//
// Specifies a range of characters. Valid for lexer rules only, but
// we do not check that here, the tree walkers shoudl do that.
// Note also that the parser also allows through more than just
// character literals so that we can produce a much nicer semantic
// error about any abuse of the .. operator.
//
range
: rangeElement RANGE^ rangeElement
;
// -----------------
// Atoms for a range
//
// All the things that we are going to allow syntactically as the subject of a range
// operator. We do not want to restrict this just to CHAR_LITERAL as then
// we will issue a syntax error that is perhaps none too obvious, even though we
// say 'expecting CHAR_LITERAL'. Instead we will check these semantically
//
rangeElement
: CHAR_LITERAL // Valid
| STRING_LITERAL // Invalid
| RULE_REF // Invalid
| TOKEN_REF // Invalid
;
terminal
: ( CHAR_LITERAL elementOptions? -> ^(CHAR_LITERAL elementOptions?)
// Args are only valid for lexer rules
| TOKEN_REF ARG_ACTION? elementOptions? -> ^(TOKEN_REF ARG_ACTION? elementOptions?)
| STRING_LITERAL elementOptions? -> ^(STRING_LITERAL elementOptions?)
| // Wildcard '.' means any character in a lexer, any
// token in parser and any token or node in a tree parser
// Because the terminal rule is allowed to be the node
// specification for the start of a tree rule, we must
// later check that wildcard was not used for that.
DOT elementOptions? -> ^(WILDCARD[$DOT] elementOptions?)
)
( ROOT -> ^(ROOT $terminal)
| BANG -> ^(BANG $terminal)
)?
;
// ---------------
// Generic options
//
// Terminals may be adorned with certain options when
// reference in the grammar: TOK<,,,>
//
elementOptions
: // Options begin with < and end with >
//
LT elementOption (COMMA elementOption)* GT -> ^(ELEMENT_OPTIONS elementOption+)
;
// WHen used with elements we can specify what the tree node type can
// be and also assign settings of various options (which we do not check here)
elementOption
: // This format indicates the default node option
qid
| // This format indicates option assignment
id ASSIGN^ (qid | STRING_LITERAL)
;
rewrite
: predicatedRewrite* nakedRewrite -> predicatedRewrite* nakedRewrite
;
predicatedRewrite
: RARROW SEMPRED rewriteAlt
-> {$rewriteAlt.isTemplate}? ^(ST_RESULT[$RARROW] SEMPRED rewriteAlt)
-> ^(RESULT[$RARROW] SEMPRED rewriteAlt)
;
nakedRewrite
: RARROW rewriteAlt -> {$rewriteAlt.isTemplate}? ^(ST_RESULT[$RARROW] rewriteAlt)
-> ^(RESULT[$RARROW] rewriteAlt)
;
// distinguish between ST and tree rewrites; for ETC/EPSILON and trees,
// rule altAndRewrite makes REWRITE root. for ST, we use ST_REWRITE
rewriteAlt returns [boolean isTemplate]
options {backtrack=true;}
: // try to parse a template rewrite
rewriteTemplate {$isTemplate=true;}
| // If we are not building templates, then we must be
// building ASTs or have rewrites in a grammar that does not
// have output=AST; options. If that is the case, we will issue
// errors/warnings in the next phase, so we just eat them here
rewriteTreeAlt
| ETC
| /* empty rewrite */ -> EPSILON
;
rewriteTreeAlt
: rewriteTreeElement+ -> ^(ALT["ALT"] rewriteTreeElement+)
;
rewriteTreeElement
: rewriteTreeAtom
| rewriteTreeAtom ebnfSuffix
-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewriteTreeAtom)) )
| rewriteTree
( ebnfSuffix
-> ^(ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewriteTree)) )
| -> rewriteTree
)
| rewriteTreeEbnf
;
rewriteTreeAtom
: CHAR_LITERAL
| TOKEN_REF ARG_ACTION? -> ^(TOKEN_REF ARG_ACTION?) // for imaginary nodes
| RULE_REF
| STRING_LITERAL
| DOLLAR id -> LABEL[$DOLLAR,$id.text] // reference to a label in a rewrite rule
| ACTION
;
rewriteTreeEbnf
@init {
Token firstToken = input.LT(1);
}
@after {
$rewriteTreeEbnf.tree.getToken().setLine(firstToken.getLine());
$rewriteTreeEbnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
}
: lp=LPAREN rewriteTreeAlt RPAREN ebnfSuffix -> ^(ebnfSuffix ^(BLOCK[$lp,"BLOCK"] rewriteTreeAlt))
;
rewriteTree
: TREE_BEGIN rewriteTreeAtom rewriteTreeElement* RPAREN
-> ^(TREE_BEGIN rewriteTreeAtom rewriteTreeElement* )
;
/** Build a tree for a template rewrite:
^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
ID can be "template" keyword. If first child is ACTION then it's
an indirect template ref
-> foo(a={...}, b={...})
-> ({string-e})(a={...}, b={...}) // e evaluates to template name
-> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
-> {st-expr} // st-expr evaluates to ST
*/
rewriteTemplate
: // -> template(a={...},...) "..." inline template
TEMPLATE LPAREN rewriteTemplateArgs RPAREN
( str=DOUBLE_QUOTE_STRING_LITERAL | str=DOUBLE_ANGLE_STRING_LITERAL )
-> ^(TEMPLATE[$TEMPLATE,"TEMPLATE"] rewriteTemplateArgs? $str)
| // -> foo(a={...}, ...)
rewriteTemplateRef
| // -> ({expr})(a={...}, ...)
rewriteIndirectTemplateHead
| // -> {...}
ACTION
;
/** -> foo(a={...}, ...) */
rewriteTemplateRef
: id LPAREN rewriteTemplateArgs RPAREN
-> ^(TEMPLATE[$LPAREN,"TEMPLATE"] id rewriteTemplateArgs?)
;
/** -> ({expr})(a={...}, ...) */
rewriteIndirectTemplateHead
: lp=LPAREN ACTION RPAREN LPAREN rewriteTemplateArgs RPAREN
-> ^(TEMPLATE[$lp,"TEMPLATE"] ACTION rewriteTemplateArgs?)
;
rewriteTemplateArgs
: rewriteTemplateArg (COMMA rewriteTemplateArg)*
-> ^(ARGLIST rewriteTemplateArg+)
|
;
rewriteTemplateArg
: id ASSIGN ACTION -> ^(ARG[$ASSIGN] id ACTION)
;
// The name of the grammar, and indeed some other grammar elements may
// come through to the parser looking like a rule reference or a token
// reference, hence this rule is used to pick up whichever it is and rewrite
// it as a generic ID token.
id
: RULE_REF ->ID[$RULE_REF]
| TOKEN_REF ->ID[$TOKEN_REF]
;
qid : id (WILDCARD id)* -> ID[$text] ;
alternativeEntry : alternative EOF ; // allow gunit to call alternative and see EOF afterwards
elementEntry : element EOF ;
ruleEntry : rule EOF ;
blockEntry : block EOF ;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,414 @@
/*
[The "BSD license"]
Copyright (c) 2010 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** The definitive ANTLR v3 tree grammar to parse ANTLR v4 grammars.
* Parses trees created in ANTLRParser.g.
*/
tree grammar ASTVerifier;
options {
language = Java;
tokenVocab = ANTLRParser;
ASTLabelType = ASTGrammar;
}
// Include the copyright in this source and also the generated source
@header {
/*
[The "BSD license"]
Copyright (c) 2005-2009 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.parse;
import org.antlr.v4.tool.*;
}
@members {
public String getErrorMessage(RecognitionException e,
String[] tokenNames)
{
List stack = getRuleInvocationStack(e, this.getClass().getName());
String msg = null;
String inputContext =
((CommonTree)input.LT(-3)).token+" "+
((CommonTree)input.LT(-2)).token+" "+
((CommonTree)input.LT(-1)).token+" >>>"+
((CommonTree)input.LT(1)).token+"<<< "+
((CommonTree)input.LT(2)).token+" "+
((CommonTree)input.LT(3)).token;
if ( e instanceof NoViableAltException ) {
NoViableAltException nvae = (NoViableAltException)e;
msg = " no viable alt; token="+e.token+
" (decision="+nvae.decisionNumber+
" state "+nvae.stateNumber+")"+
" decision=<<"+nvae.grammarDecisionDescription+">>";
}
else {
msg = super.getErrorMessage(e, tokenNames);
}
return stack+" "+msg+"\ncontext=..."+inputContext+"...";
}
public String getTokenErrorDisplay(Token t) {
return t.toString();
}
public void traceIn(String ruleName, int ruleIndex) {
System.out.print("enter "+ruleName+" "+
((GrammarAST)input.LT(1)).token+" "+
((GrammarAST)input.LT(2)).token+" "+
((GrammarAST)input.LT(3)).token+" "+
((GrammarAST)input.LT(4)).token);
if ( state.backtracking>0 ) {
System.out.print(" backtracking="+state.backtracking);
}
System.out.println();
}
}
grammarSpec
: ^(grammarType ID DOC_COMMENT? prequelConstruct* rules)
;
grammarType
: LEXER_GRAMMAR | PARSER_GRAMMAR | TREE_GRAMMAR | COMBINED_GRAMMAR
;
prequelConstruct
: optionsSpec
| delegateGrammars
| tokensSpec
| attrScope
| action
;
optionsSpec
: ^(OPTIONS option+)
;
option
: ^(ASSIGN ID optionValue)
;
optionValue
: ID
| STRING_LITERAL
| CHAR_LITERAL
| INT
| STAR
;
delegateGrammars
: ^(IMPORT delegateGrammar+)
;
delegateGrammar
: ^(ASSIGN ID ID)
| ID
;
tokensSpec
: ^(TOKENS tokenSpec+)
;
tokenSpec
: ^(ASSIGN TOKEN_REF STRING_LITERAL)
| ^(ASSIGN TOKEN_REF CHAR_LITERAL)
| TOKEN_REF
| RULE_REF
;
attrScope
: ^(SCOPE ID ACTION)
;
action
: ^(AT ID? ID ACTION)
;
rules
: ^(RULES rule*)
;
rule: ^( RULE ID DOC_COMMENT? ruleModifiers? ARG_ACTION?
ruleReturns? rulePrequel* altListAsBlock exceptionGroup
)
;
exceptionGroup
: exceptionHandler* finallyClause?
;
exceptionHandler
: ^(CATCH ARG_ACTION ACTION)
;
finallyClause
: ^(FINALLY ACTION)
;
rulePrequel
: throwsSpec
| ruleScopeSpec
| optionsSpec
| ruleAction
;
ruleReturns
: ^(RETURNS ARG_ACTION)
;
throwsSpec
: ^(THROWS ID+)
;
ruleScopeSpec
: ^(SCOPE ACTION)
| ^(SCOPE ID+)
;
ruleAction
: ^(AT ID ACTION)
;
ruleModifiers
: ^(RULEMODIFIERS ruleModifier+)
;
ruleModifier
: PUBLIC
| PRIVATE
| PROTECTED
| FRAGMENT
;
altList
: alternative+
;
altListAsBlock
: ^(BLOCK altList)
;
alternative
: ^(ALT_REWRITE alternative rewrite)
| ^(ALT EPSILON)
| elements
;
elements
: ^(ALT element+)
;
element
: labeledElement
| atom
| ebnf
| ACTION
| SEMPRED
| GATED_SEMPRED
| treeSpec
;
labeledElement
: ^(ASSIGN ID (atom|block))
| ^(PLUS_ASSIGN ID (atom|block))
;
treeSpec
: ^(TREE_BEGIN element+)
;
ebnf: ^(blockSuffix block)
| block
;
blockSuffix
: ebnfSuffix
| ROOT
| IMPLIES
| BANG
;
ebnfSuffix
: OPTIONAL
| CLOSURE
| POSITIVE_CLOSURE
;
atom: ^(ROOT range)
| ^(BANG range)
| ^(ROOT notSet)
| ^(BANG notSet)
| range
| ^(DOT ID terminal)
| ^(DOT ID ruleref)
| terminal
| ruleref
;
notSet
: ^(NOT notTerminal)
| ^(NOT block)
;
notTerminal
: CHAR_LITERAL
| TOKEN_REF
| STRING_LITERAL
;
block
: ^(BLOCK optionsSpec? ruleAction* ACTION? altList)
;
ruleref
: ^(ROOT RULE_REF ARG_ACTION?)
| ^(BANG RULE_REF ARG_ACTION?)
| ^(RULE_REF ARG_ACTION?)
;
range
: ^(RANGE rangeElement rangeElement)
;
rangeElement
: CHAR_LITERAL
| STRING_LITERAL
| RULE_REF
| TOKEN_REF
;
terminal
: ^(CHAR_LITERAL elementOptions)
| CHAR_LITERAL
| ^(STRING_LITERAL elementOptions)
| STRING_LITERAL
| ^(TOKEN_REF elementOptions)
| TOKEN_REF
| ^(WILDCARD elementOptions)
| WILDCARD
| ^(ROOT terminal)
| ^(BANG terminal)
;
elementOptions
: ^(ELEMENT_OPTIONS elementOption+)
;
elementOption
: ID
| ^(ASSIGN ID ID)
| ^(ASSIGN ID STRING_LITERAL)
;
rewrite
: predicatedRewrite* nakedRewrite
;
predicatedRewrite
: ^(ST_RESULT SEMPRED rewriteAlt)
| ^(RESULT SEMPRED rewriteAlt)
;
nakedRewrite
: ^(ST_RESULT rewriteAlt)
| ^(RESULT rewriteAlt)
;
rewriteAlt
: rewriteTemplate
| rewriteTreeAlt
| ETC
| EPSILON
;
rewriteTreeAlt
: ^(ALT rewriteTreeElement+)
;
rewriteTreeElement
: rewriteTreeAtom
| rewriteTree
| rewriteTreeEbnf
;
rewriteTreeAtom
: CHAR_LITERAL
| ^(TOKEN_REF ARG_ACTION)
| TOKEN_REF
| RULE_REF
| STRING_LITERAL
| LABEL
| ACTION
;
rewriteTreeEbnf
: ^(ebnfSuffix ^(BLOCK rewriteTreeAlt))
;
rewriteTree
: ^(TREE_BEGIN rewriteTreeAtom rewriteTreeElement* )
;
rewriteTemplate
: ^(TEMPLATE rewriteTemplateArgs? DOUBLE_QUOTE_STRING_LITERAL)
| ^(TEMPLATE rewriteTemplateArgs? DOUBLE_ANGLE_STRING_LITERAL)
| rewriteTemplateRef
| rewriteIndirectTemplateHead
| ACTION
;
rewriteTemplateRef
: ^(TEMPLATE ID rewriteTemplateArgs?)
;
rewriteIndirectTemplateHead
: ^(TEMPLATE ACTION rewriteTemplateArgs?)
;
rewriteTemplateArgs
: ^(ARGLIST rewriteTemplateArg+)
;
rewriteTemplateArg
: ^(ARG ID ACTION)
;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,22 @@
package org.antlr.v4.parse;
import org.antlr.runtime.RecognitionException;
import org.antlr.runtime.Token;
import org.antlr.runtime.TokenStream;
import org.antlr.runtime.tree.CommonTreeAdaptor;
import org.antlr.v4.tool.GrammarAST;
import org.antlr.v4.tool.GrammarASTErrorNode;
public class GrammarASTAdaptor extends CommonTreeAdaptor {
public Object create(Token token) { return new GrammarAST(token); }
public Object dupNode(Object t) {
if ( t==null ) return null;
return create(((GrammarAST)t).token);
}
public Object errorNode(TokenStream input, Token start, Token stop,
RecognitionException e)
{
return new GrammarASTErrorNode(input, start, stop, e);
}
}

View File

@ -0,0 +1,801 @@
/*
[The "BSD license"]
Copyright (c) 2005-2009 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test;
import org.antlr.v4.Tool;
import org.antlr.runtime.CommonTokenStream;
import org.antlr.runtime.TokenSource;
import org.antlr.runtime.Token;
import org.antlr.v4.tool.ANTLRErrorListener;
import org.antlr.v4.tool.ErrorManager;
import org.antlr.v4.tool.Message;
import org.junit.Before;
import org.junit.After;
import org.junit.Assert;
import org.stringtemplate.ST;
import java.io.*;
import java.util.*;
public abstract class BaseTest {
public static final String jikes = null;//"/usr/bin/jikes";
public static final String pathSep = System.getProperty("path.separator");
/**
* When runnning from Maven, the junit tests are run via the surefire plugin. It sets the
* classpath for the test environment into the following property. We need to pick this up
* for the junit tests that are going to generate and try to run code.
*/
public static final String SUREFIRE_CLASSPATH = System.getProperty("surefire.test.class.path", "");
/**
* Build up the full classpath we need, including the surefire path (if present)
*/
public static final String CLASSPATH = System.getProperty("java.class.path") + (SUREFIRE_CLASSPATH.equals("") ? "" : pathSep + SUREFIRE_CLASSPATH);
public String tmpdir = null;
/** reset during setUp and set to true if we find a problem */
protected boolean lastTestFailed = false;
/** If error during parser execution, store stderr here; can't return
* stdout and stderr. This doesn't trap errors from running antlr.
*/
protected String stderrDuringParse;
@Before
public void setUp() throws Exception {
lastTestFailed = false; // hope for the best, but set to true in asserts that fail
// new output dir for each test
tmpdir = new File(System.getProperty("java.io.tmpdir"), "antlr-"+getClass().getName()+"-"+System.currentTimeMillis()).getAbsolutePath();
ErrorManager.resetErrorState();
}
@After
public void tearDown() throws Exception {
// remove tmpdir if no error.
if ( !lastTestFailed ) eraseTempDir();
}
protected org.antlr.v4.Tool newTool(String[] args) {
Tool tool = new Tool(args);
tool.setOutputDirectory(tmpdir);
return tool;
}
protected Tool newTool() {
org.antlr.v4.Tool tool = new Tool();
tool.setOutputDirectory(tmpdir);
return tool;
}
protected boolean compile(String fileName) {
String compiler = "javac";
String classpathOption = "-classpath";
if (jikes!=null) {
compiler = jikes;
classpathOption = "-bootclasspath";
}
String[] args = new String[] {
compiler, "-d", tmpdir,
classpathOption, tmpdir+pathSep+CLASSPATH,
tmpdir+"/"+fileName
};
String cmdLine = compiler+" -d "+tmpdir+" "+classpathOption+" "+tmpdir+pathSep+CLASSPATH+" "+fileName;
//System.out.println("compile: "+cmdLine);
File outputDir = new File(tmpdir);
try {
Process process =
Runtime.getRuntime().exec(args, null, outputDir);
StreamVacuum stdout = new StreamVacuum(process.getInputStream());
StreamVacuum stderr = new StreamVacuum(process.getErrorStream());
stdout.start();
stderr.start();
process.waitFor();
stdout.join();
stderr.join();
if ( stdout.toString().length()>0 ) {
System.err.println("compile stdout from: "+cmdLine);
System.err.println(stdout);
}
if ( stderr.toString().length()>0 ) {
System.err.println("compile stderr from: "+cmdLine);
System.err.println(stderr);
}
int ret = process.exitValue();
return ret==0;
}
catch (Exception e) {
System.err.println("can't exec compilation");
e.printStackTrace(System.err);
return false;
}
}
/** Return true if all is ok, no errors */
protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean debug) {
boolean allIsWell = true;
mkdir(tmpdir);
writeFile(tmpdir, fileName, grammarStr);
try {
final List options = new ArrayList();
if ( debug ) {
options.add("-debug");
}
options.add("-o");
options.add(tmpdir);
options.add("-lib");
options.add(tmpdir);
options.add(new File(tmpdir,grammarFileName).toString());
final String[] optionsA = new String[options.size()];
options.toArray(optionsA);
/*
final ErrorQueue equeue = new ErrorQueue();
ErrorManager.setErrorListener(equeue);
*/
Tool antlr = newTool(optionsA);
antlr.process();
ANTLRErrorListener listener = ErrorManager.getErrorListener();
if ( listener instanceof ErrorQueue ) {
ErrorQueue equeue = (ErrorQueue)listener;
if ( equeue.errors.size()>0 ) {
allIsWell = false;
System.err.println("antlr reports errors from "+options);
for (int i = 0; i < equeue.errors.size(); i++) {
Message msg = (Message) equeue.errors.get(i);
System.err.println(msg);
}
System.out.println("!!!\ngrammar:");
System.out.println(grammarStr);
System.out.println("###");
}
}
}
catch (Exception e) {
allIsWell = false;
System.err.println("problems building grammar: "+e);
e.printStackTrace(System.err);
}
return allIsWell;
}
protected String execLexer(String grammarFileName,
String grammarStr,
String lexerName,
String input,
boolean debug)
{
rawGenerateAndBuildRecognizer(grammarFileName,
grammarStr,
null,
lexerName,
debug);
writeFile(tmpdir, "input", input);
return rawExecRecognizer(null,
null,
lexerName,
null,
null,
false,
false,
false,
debug);
}
protected String execParser(String grammarFileName,
String grammarStr,
String parserName,
String lexerName,
String startRuleName,
String input, boolean debug)
{
rawGenerateAndBuildRecognizer(grammarFileName,
grammarStr,
parserName,
lexerName,
debug);
writeFile(tmpdir, "input", input);
boolean parserBuildsTrees =
grammarStr.indexOf("output=AST")>=0 ||
grammarStr.indexOf("output = AST")>=0;
boolean parserBuildsTemplate =
grammarStr.indexOf("output=template")>=0 ||
grammarStr.indexOf("output = template")>=0;
return rawExecRecognizer(parserName,
null,
lexerName,
startRuleName,
null,
parserBuildsTrees,
parserBuildsTemplate,
false,
debug);
}
protected String execTreeParser(String parserGrammarFileName,
String parserGrammarStr,
String parserName,
String treeParserGrammarFileName,
String treeParserGrammarStr,
String treeParserName,
String lexerName,
String parserStartRuleName,
String treeParserStartRuleName,
String input)
{
return execTreeParser(parserGrammarFileName,
parserGrammarStr,
parserName,
treeParserGrammarFileName,
treeParserGrammarStr,
treeParserName,
lexerName,
parserStartRuleName,
treeParserStartRuleName,
input,
false);
}
protected String execTreeParser(String parserGrammarFileName,
String parserGrammarStr,
String parserName,
String treeParserGrammarFileName,
String treeParserGrammarStr,
String treeParserName,
String lexerName,
String parserStartRuleName,
String treeParserStartRuleName,
String input,
boolean debug)
{
// build the parser
rawGenerateAndBuildRecognizer(parserGrammarFileName,
parserGrammarStr,
parserName,
lexerName,
debug);
// build the tree parser
rawGenerateAndBuildRecognizer(treeParserGrammarFileName,
treeParserGrammarStr,
treeParserName,
lexerName,
debug);
writeFile(tmpdir, "input", input);
boolean parserBuildsTrees =
parserGrammarStr.indexOf("output=AST")>=0 ||
parserGrammarStr.indexOf("output = AST")>=0;
boolean treeParserBuildsTrees =
treeParserGrammarStr.indexOf("output=AST")>=0 ||
treeParserGrammarStr.indexOf("output = AST")>=0;
boolean parserBuildsTemplate =
parserGrammarStr.indexOf("output=template")>=0 ||
parserGrammarStr.indexOf("output = template")>=0;
return rawExecRecognizer(parserName,
treeParserName,
lexerName,
parserStartRuleName,
treeParserStartRuleName,
parserBuildsTrees,
parserBuildsTemplate,
treeParserBuildsTrees,
debug);
}
/** Return true if all is well */
protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
String grammarStr,
String parserName,
String lexerName,
boolean debug)
{
boolean allIsWell =
antlr(grammarFileName, grammarFileName, grammarStr, debug);
if ( lexerName!=null ) {
boolean ok;
if ( parserName!=null ) {
ok = compile(parserName+".java");
if ( !ok ) { allIsWell = false; }
}
ok = compile(lexerName+".java");
if ( !ok ) { allIsWell = false; }
}
else {
boolean ok = compile(parserName+".java");
if ( !ok ) { allIsWell = false; }
}
return allIsWell;
}
protected String rawExecRecognizer(String parserName,
String treeParserName,
String lexerName,
String parserStartRuleName,
String treeParserStartRuleName,
boolean parserBuildsTrees,
boolean parserBuildsTemplate,
boolean treeParserBuildsTrees,
boolean debug)
{
this.stderrDuringParse = null;
if ( treeParserBuildsTrees && parserBuildsTrees ) {
writeTreeAndTreeTestFile(parserName,
treeParserName,
lexerName,
parserStartRuleName,
treeParserStartRuleName,
debug);
}
else if ( parserBuildsTrees ) {
writeTreeTestFile(parserName,
treeParserName,
lexerName,
parserStartRuleName,
treeParserStartRuleName,
debug);
}
else if ( parserBuildsTemplate ) {
writeTemplateTestFile(parserName,
lexerName,
parserStartRuleName,
debug);
}
else if ( parserName==null ) {
writeLexerTestFile(lexerName, debug);
}
else {
writeTestFile(parserName,
lexerName,
parserStartRuleName,
debug);
}
compile("Test.java");
try {
String[] args = new String[] {
"java", "-classpath", tmpdir+pathSep+CLASSPATH,
"Test", new File(tmpdir, "input").getAbsolutePath()
};
//String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath();
//System.out.println("execParser: "+cmdLine);
Process process =
Runtime.getRuntime().exec(args, null, new File(tmpdir));
StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream());
StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream());
stdoutVacuum.start();
stderrVacuum.start();
process.waitFor();
stdoutVacuum.join();
stderrVacuum.join();
String output = null;
output = stdoutVacuum.toString();
if ( stderrVacuum.toString().length()>0 ) {
this.stderrDuringParse = stderrVacuum.toString();
//System.err.println("exec stderrVacuum: "+ stderrVacuum);
}
return output;
}
catch (Exception e) {
System.err.println("can't exec recognizer");
e.printStackTrace(System.err);
}
return null;
}
public static class StreamVacuum implements Runnable {
StringBuffer buf = new StringBuffer();
BufferedReader in;
Thread sucker;
public StreamVacuum(InputStream in) {
this.in = new BufferedReader( new InputStreamReader(in) );
}
public void start() {
sucker = new Thread(this);
sucker.start();
}
public void run() {
try {
String line = in.readLine();
while (line!=null) {
buf.append(line);
buf.append('\n');
line = in.readLine();
}
}
catch (IOException ioe) {
System.err.println("can't read output from process");
}
}
/** wait for the thread to finish */
public void join() throws InterruptedException {
sucker.join();
}
public String toString() {
return buf.toString();
}
}
public static class FilteringTokenStream extends CommonTokenStream {
public FilteringTokenStream(TokenSource src) { super(src); }
Set<Integer> hide = new HashSet<Integer>();
protected void sync(int i) {
super.sync(i);
if ( hide.contains(get(i).getType()) ) get(i).setChannel(Token.HIDDEN_CHANNEL);
}
public void setTokenTypeChannel(int ttype, int channel) {
hide.add(ttype);
}
}
protected void writeFile(String dir, String fileName, String content) {
try {
File f = new File(dir, fileName);
FileWriter w = new FileWriter(f);
BufferedWriter bw = new BufferedWriter(w);
bw.write(content);
bw.close();
w.close();
}
catch (IOException ioe) {
System.err.println("can't write file");
ioe.printStackTrace(System.err);
}
}
protected void mkdir(String dir) {
File f = new File(dir);
f.mkdirs();
}
protected void writeTestFile(String parserName,
String lexerName,
String parserStartRuleName,
boolean debug)
{
ST outputFileST = new ST(
"import org.antlr.runtime.*;\n" +
"import org.antlr.runtime.tree.*;\n" +
"import org.antlr.runtime.debug.*;\n" +
"\n" +
"class Profiler2 extends Profiler {\n" +
" public void terminate() { ; }\n" +
"}\n"+
"public class Test {\n" +
" public static void main(String[] args) throws Exception {\n" +
" CharStream input = new ANTLRFileStream(args[0]);\n" +
" $lexerName$ lex = new $lexerName$(input);\n" +
" CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
" $createParser$\n"+
" parser.$parserStartRuleName$();\n" +
" }\n" +
"}"
);
ST createParserST =
new ST(
" Profiler2 profiler = new Profiler2();\n"+
" $parserName$ parser = new $parserName$(tokens,profiler);\n" +
" profiler.setParser(parser);\n");
if ( !debug ) {
createParserST =
new ST(
" $parserName$ parser = new $parserName$(tokens);\n");
}
outputFileST.add("createParser", createParserST);
outputFileST.add("parserName", parserName);
outputFileST.add("lexerName", lexerName);
outputFileST.add("parserStartRuleName", parserStartRuleName);
writeFile(tmpdir, "Test.java", outputFileST.toString());
}
protected void writeLexerTestFile(String lexerName, boolean debug) {
ST outputFileST = new ST(
"import org.antlr.runtime.*;\n" +
"import org.antlr.runtime.tree.*;\n" +
"import org.antlr.runtime.debug.*;\n" +
"\n" +
"class Profiler2 extends Profiler {\n" +
" public void terminate() { ; }\n" +
"}\n"+
"public class Test {\n" +
" public static void main(String[] args) throws Exception {\n" +
" CharStream input = new ANTLRFileStream(args[0]);\n" +
" $lexerName$ lex = new $lexerName$(input);\n" +
" CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
" System.out.println(tokens);\n" +
" }\n" +
"}"
);
outputFileST.add("lexerName", lexerName);
writeFile(tmpdir, "Test.java", outputFileST.toString());
}
protected void writeTreeTestFile(String parserName,
String treeParserName,
String lexerName,
String parserStartRuleName,
String treeParserStartRuleName,
boolean debug)
{
ST outputFileST = new ST(
"import org.antlr.runtime.*;\n" +
"import org.antlr.runtime.tree.*;\n" +
"import org.antlr.runtime.debug.*;\n" +
"\n" +
"class Profiler2 extends Profiler {\n" +
" public void terminate() { ; }\n" +
"}\n"+
"public class Test {\n" +
" public static void main(String[] args) throws Exception {\n" +
" CharStream input = new ANTLRFileStream(args[0]);\n" +
" $lexerName$ lex = new $lexerName$(input);\n" +
" TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
" $createParser$\n"+
" $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
" $if(!treeParserStartRuleName)$\n" +
" if ( r.tree!=null ) {\n" +
" System.out.println(((Tree)r.tree).toStringTree());\n" +
" ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" +
" }\n" +
" $else$\n" +
" CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
" nodes.setTokenStream(tokens);\n" +
" $treeParserName$ walker = new $treeParserName$(nodes);\n" +
" walker.$treeParserStartRuleName$();\n" +
" $endif$\n" +
" }\n" +
"}"
);
ST createParserST =
new ST(
" Profiler2 profiler = new Profiler2();\n"+
" $parserName$ parser = new $parserName$(tokens,profiler);\n" +
" profiler.setParser(parser);\n");
if ( !debug ) {
createParserST =
new ST(
" $parserName$ parser = new $parserName$(tokens);\n");
}
outputFileST.add("createParser", createParserST);
outputFileST.add("parserName", parserName);
outputFileST.add("treeParserName", treeParserName);
outputFileST.add("lexerName", lexerName);
outputFileST.add("parserStartRuleName", parserStartRuleName);
outputFileST.add("treeParserStartRuleName", treeParserStartRuleName);
writeFile(tmpdir, "Test.java", outputFileST.toString());
}
/** Parser creates trees and so does the tree parser */
protected void writeTreeAndTreeTestFile(String parserName,
String treeParserName,
String lexerName,
String parserStartRuleName,
String treeParserStartRuleName,
boolean debug)
{
ST outputFileST = new ST(
"import org.antlr.runtime.*;\n" +
"import org.antlr.runtime.tree.*;\n" +
"import org.antlr.runtime.debug.*;\n" +
"\n" +
"class Profiler2 extends Profiler {\n" +
" public void terminate() { ; }\n" +
"}\n"+
"public class Test {\n" +
" public static void main(String[] args) throws Exception {\n" +
" CharStream input = new ANTLRFileStream(args[0]);\n" +
" $lexerName$ lex = new $lexerName$(input);\n" +
" TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
" $createParser$\n"+
" $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
" ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" +
" CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
" nodes.setTokenStream(tokens);\n" +
" $treeParserName$ walker = new $treeParserName$(nodes);\n" +
" $treeParserName$.$treeParserStartRuleName$_return r2 = walker.$treeParserStartRuleName$();\n" +
" CommonTree rt = ((CommonTree)r2.tree);\n" +
" if ( rt!=null ) System.out.println(((CommonTree)r2.tree).toStringTree());\n" +
" }\n" +
"}"
);
ST createParserST =
new ST(
" Profiler2 profiler = new Profiler2();\n"+
" $parserName$ parser = new $parserName$(tokens,profiler);\n" +
" profiler.setParser(parser);\n");
if ( !debug ) {
createParserST =
new ST(
" $parserName$ parser = new $parserName$(tokens);\n");
}
outputFileST.add("createParser", createParserST);
outputFileST.add("parserName", parserName);
outputFileST.add("treeParserName", treeParserName);
outputFileST.add("lexerName", lexerName);
outputFileST.add("parserStartRuleName", parserStartRuleName);
outputFileST.add("treeParserStartRuleName", treeParserStartRuleName);
writeFile(tmpdir, "Test.java", outputFileST.toString());
}
protected void writeTemplateTestFile(String parserName,
String lexerName,
String parserStartRuleName,
boolean debug)
{
ST outputFileST = new ST(
"import org.antlr.runtime.*;\n" +
"import org.antlr.stringtemplate.*;\n" +
"import org.antlr.stringtemplate.language.*;\n" +
"import org.antlr.runtime.debug.*;\n" +
"import java.io.*;\n" +
"\n" +
"class Profiler2 extends Profiler {\n" +
" public void terminate() { ; }\n" +
"}\n"+
"public class Test {\n" +
" static String templates =\n" +
" \"group test;\"+" +
" \"foo(x,y) ::= \\\"<x> <y>\\\"\";\n"+
" static STGroup group ="+
" new STGroup(new StringReader(templates)," +
" AngleBracketTemplateLexer.class);"+
" public static void main(String[] args) throws Exception {\n" +
" CharStream input = new ANTLRFileStream(args[0]);\n" +
" $lexerName$ lex = new $lexerName$(input);\n" +
" CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
" $createParser$\n"+
" parser.setTemplateLib(group);\n"+
" $parserName$.$parserStartRuleName$_return r = parser.$parserStartRuleName$();\n" +
" if ( r.st!=null )\n" +
" System.out.print(r.st.toString());\n" +
" else\n" +
" System.out.print(\"\");\n" +
" }\n" +
"}"
);
ST createParserST =
new ST(
" Profiler2 profiler = new Profiler2();\n"+
" $parserName$ parser = new $parserName$(tokens,profiler);\n" +
" profiler.setParser(parser);\n");
if ( !debug ) {
createParserST =
new ST(
" $parserName$ parser = new $parserName$(tokens);\n");
}
outputFileST.add("createParser", createParserST);
outputFileST.add("parserName", parserName);
outputFileST.add("lexerName", lexerName);
outputFileST.add("parserStartRuleName", parserStartRuleName);
writeFile(tmpdir, "Test.java", outputFileST.toString());
}
protected void eraseFiles(final String filesEndingWith) {
File tmpdirF = new File(tmpdir);
String[] files = tmpdirF.list();
for(int i = 0; files!=null && i < files.length; i++) {
if ( files[i].endsWith(filesEndingWith) ) {
new File(tmpdir+"/"+files[i]).delete();
}
}
}
protected void eraseFiles() {
File tmpdirF = new File(tmpdir);
String[] files = tmpdirF.list();
for(int i = 0; files!=null && i < files.length; i++) {
new File(tmpdir+"/"+files[i]).delete();
}
}
protected void eraseTempDir() {
File tmpdirF = new File(tmpdir);
if ( tmpdirF.exists() ) {
eraseFiles();
tmpdirF.delete();
}
}
public String getFirstLineOfException() {
if ( this.stderrDuringParse ==null ) {
return null;
}
String[] lines = this.stderrDuringParse.split("\n");
String prefix="Exception in thread \"main\" ";
return lines[0].substring(prefix.length(),lines[0].length());
}
public String sortLinesInString(String s) {
String lines[] = s.split("\n");
Arrays.sort(lines);
List<String> linesL = Arrays.asList(lines);
StringBuffer buf = new StringBuffer();
for (String l : linesL) {
buf.append(l);
buf.append('\n');
}
return buf.toString();
}
/**
* When looking at a result set that consists of a Map/HashTable
* we cannot rely on the output order, as the hashing algorithm or other aspects
* of the implementation may be different on differnt JDKs or platforms. Hence
* we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a
* bit of a hack, but guarantees that we get the same order on all systems. We assume that
* the keys are strings.
*
* @param m The Map that contains keys we wish to return in sorted order
* @return A string that represents all the keys in sorted order.
*/
public String sortMapToString(Map m) {
System.out.println("Map toString looks like: " + m.toString());
// Pass in crap, and get nothing back
//
if (m == null) {
return null;
}
// Sort the keys in the Map
//
TreeMap nset = new TreeMap(m);
System.out.println("Tree map looks like: " + nset.toString());
return nset.toString();
}
// override to track errors
public void assertEquals(String msg, Object a, Object b) { try {Assert.assertEquals(msg,a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertEquals(Object a, Object b) { try {Assert.assertEquals(a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertEquals(String msg, long a, long b) { try {Assert.assertEquals(msg,a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertEquals(long a, long b) { try {Assert.assertEquals(a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertTrue(String msg, boolean b) { try {Assert.assertTrue(msg,b);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertTrue(boolean b) { try {Assert.assertTrue(b);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertFalse(String msg, boolean b) { try {Assert.assertFalse(msg,b);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertFalse(boolean b) { try {Assert.assertFalse(b);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertNotNull(String msg, Object p) { try {Assert.assertNotNull(msg, p);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertNotNull(Object p) { try {Assert.assertNotNull(p);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertNull(String msg, Object p) { try {Assert.assertNull(msg, p);} catch (Error e) {lastTestFailed=true; throw e;} }
public void assertNull(Object p) { try {Assert.assertNull(p);} catch (Error e) {lastTestFailed=true; throw e;} }
}

View File

@ -0,0 +1,68 @@
/*
[The "BSD license"]
Copyright (c) 2005-2009 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test;
import org.antlr.tool.ANTLRErrorListener;
import org.antlr.tool.Message;
import org.antlr.tool.ToolMessage;
import java.util.List;
import java.util.LinkedList;
public class ErrorQueue implements ANTLRErrorListener {
List infos = new LinkedList();
List errors = new LinkedList();
List warnings = new LinkedList();
public void info(String msg) {
infos.add(msg);
}
public void error(Message msg) {
errors.add(msg);
}
public void warning(Message msg) {
warnings.add(msg);
}
public void error(ToolMessage msg) {
errors.add(msg);
}
public int size() {
return infos.size() + errors.size() + warnings.size();
}
public String toString() {
return "infos: "+infos+
"errors: "+errors+
"warnings: "+warnings;
}
}

View File

@ -0,0 +1,15 @@
package org.antlr.v4.test;
import org.antlr.gunit.gUnitBaseTest;
public class TestANTLR extends gUnitBaseTest {
public void setUp() {
this.packagePath = "./org/antlr/v4/test";
this.lexerPath = "org.antlr.v4.test.ANTLRLexer";
this.parserPath = "org.antlr.v4.test.ANTLRParser";
}
}

View File

@ -0,0 +1,217 @@
/** Test ANTLRParser's AST construction. Translate to junit tests with:
*
* $ java org.antlr.gunit.v4.Gen TestASTStructure.gunit
*/
gunit TestASTStructure;
@header {package org.antlr.v4.test;}
options {
adaptor = org.antlr.v4.parse.GrammarASTAdaptor;
parser = org.antlr.v4.parse.ANTLRParser;
lexer = org.antlr.v4.parse.ANTLRLexer;
}
grammarSpec:
"parser grammar P; a : A;"
-> (PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A)))))
<<
parser grammar P;
options {k=2; output=AST;}
scope S {int x}
tokens { A; B='33'; }
@header {foo}
a : A;
>>
->
(PARSER_GRAMMAR P
(OPTIONS (= k 2) (= output AST))
(scope S {int x})
(tokens { A (= B '33'))
(@ header {foo})
(RULES (RULE a (BLOCK (ALT A)))))
<<
parser grammar P;
@header {foo}
tokens { A; B='33'; }
options {k=2; ASTLabel=a.b.c; output=AST;}
scope S {int x}
a : A;
>>
->
(PARSER_GRAMMAR P
(@ header {foo})
(tokens { A (= B '33'))
(OPTIONS (= k 2) (= ASTLabel a.b.c) (= output AST))
(scope S {int x})
(RULES (RULE a (BLOCK (ALT A)))))
<<
parser grammar P;
import A=B, C;
a : A;
>>
->
(PARSER_GRAMMAR P
(import (= A B) C)
(RULES (RULE a (BLOCK (ALT A)))))
delegateGrammars:
"import A;" -> (import A)
rule:
"a : A<X,Y=a.b.c>;" ->
(RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c))))))
"A : B+;" -> (RULE A (BLOCK (ALT (+ (BLOCK (ALT B))))))
<<
public a[int i] returns [int y]
options {backtrack=true;}
scope {int ss;}
scope S,T;
@init {blort}
: ID ;
>>
->
(RULE a
(RULEMODIFIERS public)
int i
(returns int y)
(OPTIONS (= backtrack true))
(scope {int ss;})
(scope S T)
(@ init {blort})
(BLOCK (ALT ID)))
<<
a[int i] returns [int y]
@init {blort}
scope {int ss;}
options {backtrack=true;}
scope S,T;
: ID;
>>
->
(RULE a int i
(returns int y)
(@ init {blort})
(scope {int ss;})
(OPTIONS (= backtrack true))
(scope S T)
(BLOCK (ALT ID)))
<<
a : ID ;
catch[A b] {foo}
finally {bar}
>>
->
(RULE a (BLOCK (ALT ID))
(catch A b {foo}) (finally {bar}))
<<
a : ID ;
catch[A a] {foo}
catch[B b] {fu}
finally {bar}
>>
->
(RULE a (BLOCK (ALT ID))
(catch A a {foo}) (catch B b {fu}) (finally {bar}))
block:
"( ^(A B) | ^(b C) )" -> (BLOCK (ALT ("^(" A B)) (ALT ("^(" b C)))
alternative:
"x+=ID* -> $x*" ->
(ALT_REWRITE
(ALT (* (BLOCK (ALT (+= x ID)))))
(-> (ALT (* (BLOCK (ALT x))))))
"A -> ..." -> (ALT_REWRITE (ALT A) (-> ...))
"A -> " -> (ALT_REWRITE (ALT A) (-> EPSILON))
"A -> foo(a={x}, b={y})" ->
(ALT_REWRITE
(ALT A)
(-> (TEMPLATE foo (ARGLIST (= a {x}) (= b {y})))))
"A -> template(a={x}, b={y}) <<ick>>" ->
(ALT_REWRITE
(ALT A)
(-> (TEMPLATE (ARGLIST (= a {x}) (= b {y})) <<ick>>)))
"A -> ({name})()" -> (ALT_REWRITE (ALT A) (-> (TEMPLATE {name})))
"A -> {expr}" -> (ALT_REWRITE (ALT A) (-> {expr}))
<<
A -> {p1}? {e1}
-> {e2}
->
>>
->
(ALT_REWRITE
(ALT A)
(-> {p1}? {e1})
(-> {e2}))
"A -> A" -> (ALT_REWRITE (ALT A) (-> (ALT A)))
"a -> a" -> (ALT_REWRITE (ALT a) (-> (ALT a)))
"a A X? Y* -> A a ^(TOP X)? Y*" ->
(ALT_REWRITE
(ALT a A (? (BLOCK (ALT X))) (* (BLOCK (ALT Y))))
(-> (ALT
A a
(? (BLOCK (ALT ("^(" TOP X))))
(* (BLOCK (ALT Y))))))
"A -> A[33]" -> (ALT_REWRITE (ALT A) (-> (ALT (A 33))))
"A -> 'int' ^(A A)*" ->
(ALT_REWRITE
(ALT A)
(-> (ALT 'int' (* (BLOCK (ALT ("^(" A A)))))))
<<
A -> {p1}? A
-> {p2}? B
->
>>
->
(ALT_REWRITE (ALT A)
(-> {p1}? (ALT A))
(-> {p2}? (ALT B))
(-> EPSILON))
element:
"b+" -> (+ (BLOCK (ALT b)))
"(b)+" -> (+ (BLOCK (ALT b)))
"b?" -> (? (BLOCK (ALT b)))
"(b)?" -> (? (BLOCK (ALT b)))
"(b)*" -> (* (BLOCK (ALT b)))
"b*" -> (* (BLOCK (ALT b)))
"'while'*" -> (* (BLOCK (ALT 'while')))
"'a'+" -> (+ (BLOCK (ALT 'a')))
"a[3]" -> (a 3)
"'a'..'z'+" -> (+ (BLOCK (ALT (.. 'a' 'z'))))
"x=ID" -> (= x ID)
"x=ID?" -> (? (BLOCK (ALT (= x ID))))
"x=ID*" -> (* (BLOCK (ALT (= x ID))))
"x=b" -> (= x b)
"x=(A|B)" -> (= x (BLOCK (ALT A) (ALT B)))
"x=~(A|B)" -> (= x (~ (BLOCK (ALT A) (ALT B))))
"x+=~(A|B)" -> (+= x (~ (BLOCK (ALT A) (ALT B))))
"x+=~(A|B)+"-> (+ (BLOCK (ALT (+= x (~ (BLOCK (ALT A) (ALT B)))))))
"x=b+" -> (+ (BLOCK (ALT (= x b))))
"x+=ID*" -> (* (BLOCK (ALT (+= x ID))))
"x+='int'*" -> (* (BLOCK (ALT (+= x 'int'))))
"x+=b+" -> (+ (BLOCK (ALT (+= x b))))
"('*'^)*" -> (* (BLOCK (ALT (^ '*'))))
"({blort} 'x')*" -> (* (BLOCK (ALT {blort} 'x')))
"A!" -> (! A)
"A^" -> (^ A)
"x=A^" -> (= x (^ A))

View File

@ -0,0 +1,427 @@
package org.antlr.v4.test;
import org.antlr.runtime.*;
import org.antlr.runtime.tree.*;
import org.junit.Test;
import org.junit.Before;
import static org.junit.Assert.*;
public class TestASTStructure extends org.antlr.v4.gunit.jUnitBaseTest {
@Before public void setup() {
lexerClassName = "org.antlr.v4.parse.ANTLRLexer";
parserClassName = "org.antlr.v4.parse.ANTLRParser";
adaptorClassName = "org.antlr.v4.parse.GrammarASTAdaptor"; }
@Test public void test_grammarSpec1() throws Exception {
// gunit test on line 21
RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "parser grammar P; a : A;", 21);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A)))))";
assertEquals("testing rule grammarSpec", expecting, actual);
}
@Test public void test_grammarSpec2() throws Exception {
// gunit test on line 24
RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n options {k=2; output=AST;}\n scope S {int x}\n tokens { A; B='33'; }\n @header {foo}\n a : A;\n ", 24);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(PARSER_GRAMMAR P (OPTIONS (= k 2) (= output AST)) (scope S {int x}) (tokens { A (= B '33')) (@ header {foo}) (RULES (RULE a (BLOCK (ALT A)))))";
assertEquals("testing rule grammarSpec", expecting, actual);
}
@Test public void test_grammarSpec3() throws Exception {
// gunit test on line 40
RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n @header {foo}\n tokens { A; B='33'; }\n options {k=2; ASTLabel=a.b.c; output=AST;}\n scope S {int x}\n a : A;\n ", 40);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(PARSER_GRAMMAR P (@ header {foo}) (tokens { A (= B '33')) (OPTIONS (= k 2) (= ASTLabel a.b.c) (= output AST)) (scope S {int x}) (RULES (RULE a (BLOCK (ALT A)))))";
assertEquals("testing rule grammarSpec", expecting, actual);
}
@Test public void test_grammarSpec4() throws Exception {
// gunit test on line 56
RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n import A=B, C;\n a : A;\n ", 56);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(PARSER_GRAMMAR P (import (= A B) C) (RULES (RULE a (BLOCK (ALT A)))))";
assertEquals("testing rule grammarSpec", expecting, actual);
} @Test public void test_delegateGrammars1() throws Exception {
// gunit test on line 67
RuleReturnScope rstruct = (RuleReturnScope)execParser("delegateGrammars", "import A;", 67);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(import A)";
assertEquals("testing rule delegateGrammars", expecting, actual);
} @Test public void test_rule1() throws Exception {
// gunit test on line 70
RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "a : A<X,Y=a.b.c>;", 70);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c))))))";
assertEquals("testing rule rule", expecting, actual);
}
@Test public void test_rule2() throws Exception {
// gunit test on line 72
RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "A : B+;", 72);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(RULE A (BLOCK (ALT (+ (BLOCK (ALT B))))))";
assertEquals("testing rule rule", expecting, actual);
}
@Test public void test_rule3() throws Exception {
// gunit test on line 74
RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n public a[int i] returns [int y]\n options {backtrack=true;}\n scope {int ss;}\n scope S,T;\n @init {blort}\n : ID ;\n ", 74);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(RULE a (RULEMODIFIERS public) int i (returns int y) (OPTIONS (= backtrack true)) (scope {int ss;}) (scope S T) (@ init {blort}) (BLOCK (ALT ID)))";
assertEquals("testing rule rule", expecting, actual);
}
@Test public void test_rule4() throws Exception {
// gunit test on line 93
RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a[int i] returns [int y]\n @init {blort}\n scope {int ss;}\n options {backtrack=true;}\n scope S,T;\n : ID;\n ", 93);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(RULE a int i (returns int y) (@ init {blort}) (scope {int ss;}) (OPTIONS (= backtrack true)) (scope S T) (BLOCK (ALT ID)))";
assertEquals("testing rule rule", expecting, actual);
}
@Test public void test_rule5() throws Exception {
// gunit test on line 110
RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A b] {foo}\n finally {bar}\n ", 110);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A b {foo}) (finally {bar}))";
assertEquals("testing rule rule", expecting, actual);
}
@Test public void test_rule6() throws Exception {
// gunit test on line 119
RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A a] {foo}\n catch[B b] {fu}\n finally {bar}\n ", 119);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A a {foo}) (catch B b {fu}) (finally {bar}))";
assertEquals("testing rule rule", expecting, actual);
} @Test public void test_block1() throws Exception {
// gunit test on line 130
RuleReturnScope rstruct = (RuleReturnScope)execParser("block", "( ^(A B) | ^(b C) )", 130);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(BLOCK (ALT (^( A B)) (ALT (^( b C)))";
assertEquals("testing rule block", expecting, actual);
} @Test public void test_alternative1() throws Exception {
// gunit test on line 133
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "x+=ID* -> $x*", 133);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT (* (BLOCK (ALT (+= x ID))))) (-> (ALT (* (BLOCK (ALT x))))))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative2() throws Exception {
// gunit test on line 138
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> ...", 138);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> ...))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative3() throws Exception {
// gunit test on line 139
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> ", 139);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> EPSILON))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative4() throws Exception {
// gunit test on line 141
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> foo(a={x}, b={y})", 141);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> (TEMPLATE foo (ARGLIST (= a {x}) (= b {y})))))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative5() throws Exception {
// gunit test on line 146
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> template(a={x}, b={y}) <<ick>>", 146);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> (TEMPLATE (ARGLIST (= a {x}) (= b {y})) <<ick>>)))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative6() throws Exception {
// gunit test on line 151
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> ({name})()", 151);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> (TEMPLATE {name})))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative7() throws Exception {
// gunit test on line 153
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> {expr}", 153);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> {expr}))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative8() throws Exception {
// gunit test on line 155
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "\n A -> {p1}? {e1}\n -> {e2}\n ->\n ", 155);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> {p1}? {e1}) (-> {e2}))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative9() throws Exception {
// gunit test on line 166
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> A", 166);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> (ALT A)))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative10() throws Exception {
// gunit test on line 168
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "a -> a", 168);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT a) (-> (ALT a)))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative11() throws Exception {
// gunit test on line 170
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "a A X? Y* -> A a ^(TOP X)? Y*", 170);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT a A (? (BLOCK (ALT X))) (* (BLOCK (ALT Y)))) (-> (ALT A a (? (BLOCK (ALT (^( TOP X)))) (* (BLOCK (ALT Y))))))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative12() throws Exception {
// gunit test on line 178
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> A[33]", 178);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> (ALT (A 33))))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative13() throws Exception {
// gunit test on line 180
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "A -> 'int' ^(A A)*", 180);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> (ALT 'int' (* (BLOCK (ALT (^( A A)))))))";
assertEquals("testing rule alternative", expecting, actual);
}
@Test public void test_alternative14() throws Exception {
// gunit test on line 185
RuleReturnScope rstruct = (RuleReturnScope)execParser("alternative", "\n A -> {p1}? A\n -> {p2}? B\n ->\n ", 185);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(ALT_REWRITE (ALT A) (-> {p1}? (ALT A)) (-> {p2}? (ALT B)) (-> EPSILON))";
assertEquals("testing rule alternative", expecting, actual);
} @Test public void test_element1() throws Exception {
// gunit test on line 197
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b+", 197);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(+ (BLOCK (ALT b)))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element2() throws Exception {
// gunit test on line 198
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)+", 198);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(+ (BLOCK (ALT b)))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element3() throws Exception {
// gunit test on line 199
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b?", 199);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(? (BLOCK (ALT b)))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element4() throws Exception {
// gunit test on line 200
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)?", 200);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(? (BLOCK (ALT b)))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element5() throws Exception {
// gunit test on line 201
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)*", 201);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(* (BLOCK (ALT b)))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element6() throws Exception {
// gunit test on line 202
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b*", 202);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(* (BLOCK (ALT b)))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element7() throws Exception {
// gunit test on line 203
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'while'*", 203);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(* (BLOCK (ALT 'while')))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element8() throws Exception {
// gunit test on line 204
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'+", 204);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(+ (BLOCK (ALT 'a')))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element9() throws Exception {
// gunit test on line 205
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "a[3]", 205);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(a 3)";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element10() throws Exception {
// gunit test on line 206
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'..'z'+", 206);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(+ (BLOCK (ALT (.. 'a' 'z'))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element11() throws Exception {
// gunit test on line 207
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID", 207);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(= x ID)";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element12() throws Exception {
// gunit test on line 208
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID?", 208);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(? (BLOCK (ALT (= x ID))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element13() throws Exception {
// gunit test on line 209
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID*", 209);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(* (BLOCK (ALT (= x ID))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element14() throws Exception {
// gunit test on line 210
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b", 210);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(= x b)";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element15() throws Exception {
// gunit test on line 211
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=(A|B)", 211);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(= x (BLOCK (ALT A) (ALT B)))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element16() throws Exception {
// gunit test on line 212
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=~(A|B)", 212);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(= x (~ (BLOCK (ALT A) (ALT B))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element17() throws Exception {
// gunit test on line 213
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)", 213);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(+= x (~ (BLOCK (ALT A) (ALT B))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element18() throws Exception {
// gunit test on line 214
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)+", 214);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(+ (BLOCK (ALT (+= x (~ (BLOCK (ALT A) (ALT B)))))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element19() throws Exception {
// gunit test on line 215
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b+", 215);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(+ (BLOCK (ALT (= x b))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element20() throws Exception {
// gunit test on line 216
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=ID*", 216);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(* (BLOCK (ALT (+= x ID))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element21() throws Exception {
// gunit test on line 217
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+='int'*", 217);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(* (BLOCK (ALT (+= x 'int'))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element22() throws Exception {
// gunit test on line 218
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=b+", 218);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(+ (BLOCK (ALT (+= x b))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element23() throws Exception {
// gunit test on line 219
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "('*'^)*", 219);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(* (BLOCK (ALT (^ '*'))))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element24() throws Exception {
// gunit test on line 220
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "({blort} 'x')*", 220);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(* (BLOCK (ALT {blort} 'x')))";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element25() throws Exception {
// gunit test on line 221
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "A!", 221);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(! A)";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element26() throws Exception {
// gunit test on line 222
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "A^", 222);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(^ A)";
assertEquals("testing rule element", expecting, actual);
}
@Test public void test_element27() throws Exception {
// gunit test on line 223
RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=A^", 223);
Object actual = ((Tree)rstruct.getTree()).toStringTree();
Object expecting = "(= x (^ A))";
assertEquals("testing rule element", expecting, actual);
}
}