Merge pull request #1130 from gagern/javadoc

Fix Javadoc errors
This commit is contained in:
Terence Parr 2016-03-28 22:30:55 +00:00
commit 4bc7083523
21 changed files with 66 additions and 85 deletions

View File

@ -89,3 +89,4 @@ YYYY/MM/DD, github id, Full name, email
2015/12/23, pboyer, Peter Boyer, peter.b.boyer@gmail.com
2015/12/24, dtymon, David Tymon, david.tymon@gmail.com
2016/03/27, beardlybread, Bradley Steinbacher, bradley.j.steinbacher@gmail.com
2016/03/28, gagern, Martin von Gagern, gagern@ma.tum.de

View File

@ -100,7 +100,7 @@ public class AnalysisPipeline {
}
}
/** Return whether lookahead sets are disjoint; no lookahead => not disjoint */
/** Return whether lookahead sets are disjoint; no lookahead not disjoint */
public static boolean disjoint(IntervalSet[] altLook) {
boolean collision = false;
IntervalSet combined = new IntervalSet();

View File

@ -330,7 +330,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
return lrlabel;
}
/** Strip last 2 tokens if -> label; alter indexes in altAST */
/** Strip last 2 tokens if label; alter indexes in altAST */
public void stripAltLabel(GrammarAST altAST) {
int start = altAST.getTokenStartIndex();
int stop = altAST.getTokenStopIndex();

View File

@ -244,9 +244,9 @@ public class LeftRecursiveRuleTransformer {
* (ALT ID))
* (* (BLOCK
* (OPTIONS ...)
* (ALT {7 >= $_p}? '*' (= b e) {$v = $a.v * $b.v;})
* (ALT {6 >= $_p}? '+' (= b e) {$v = $a.v + $b.v;})
* (ALT {3 >= $_p}? '++') (ALT {2 >= $_p}? '--'))))))
* (ALT {7 >= $_p}? '*' (= b e) {$v = $a.v * $b.v;})
* (ALT {6 >= $_p}? '+' (= b e) {$v = $a.v + $b.v;})
* (ALT {3 >= $_p}? '++') (ALT {2 >= $_p}? '--'))))))
* </pre>
*/
public void setAltASTPointers(LeftRecursiveRule r, RuleAST t) {

View File

@ -93,7 +93,7 @@ public interface ATNFactory {
/** For a non-lexer, just build a simple token reference atom.
* For a lexer, a string is a sequence of char to match. That is,
* "fog" is treated as 'f' 'o' 'g' not as a single transition in
* the DFA. Machine== o-'f'->o-'o'->o-'g'->o and has n+1 states
* the DFA. Machine== o-'f'-&gt;o-'o'-&gt;o-'g'-&gt;o and has n+1 states
* for n characters.
*/
@ -101,16 +101,16 @@ public interface ATNFactory {
/** For reference to rule r, build
*
* o-e->(r) o
* o-e-&gt;(r) o
*
* where (r) is the start of rule r and the trailing o is not linked
* to from rule ref state directly (it's done thru the transition(0)
* RuleClosureTransition.
*
* If the rule r is just a list of tokens, it's block will be just
* a set on an edge o->o->o-set->o->o->o, could inline it rather than doing
* a set on an edge o-&gt;o-&gt;o-set-&gt;o-&gt;o-&gt;o, could inline it rather than doing
* the rule reference, but i'm not doing this yet as I'm not sure
* it would help much in the ATN->DFA construction.
* it would help much in the ATN-&gt;DFA construction.
*
* TODO add to codegen: collapse alt blks that are sets into single matchSet
* @param node
@ -118,7 +118,7 @@ public interface ATNFactory {
Handle ruleRef(GrammarAST node);
/** From an empty alternative build Grip o-e->o */
/** From an empty alternative build Grip o-e-&gt;o */
Handle epsilon(GrammarAST node);
@ -143,13 +143,13 @@ public interface ATNFactory {
/** From A|B|..|Z alternative block build
*
* o->o-A->o->o (last ATNState is blockEndATNState pointed to by all alts)
* o-&gt;o-A-&gt;o-&gt;o (last ATNState is blockEndATNState pointed to by all alts)
* | ^
* o->o-B->o--|
* o-&gt;o-B-&gt;o--|
* | |
* ... |
* | |
* o->o-Z->o--|
* o-&gt;o-Z-&gt;o--|
*
* So every alternative gets begin ATNState connected by epsilon
* and every alt right side points at a block end ATNState. There is a
@ -160,7 +160,7 @@ public interface ATNFactory {
* begin/end.
*
* Special case: if just a list of tokens/chars/sets, then collapse
* to a single edge'd o-set->o graph.
* to a single edge'd o-set-&gt;o graph.
*
* Set alt number (1..n) in the left-Transition ATNState.
*/
@ -171,9 +171,9 @@ public interface ATNFactory {
/** From (A)? build either:
*
* o--A->o
* o--A-&gt;o
* | ^
* o---->|
* o----&gt;|
*
* or, if A is a block, just add an empty alt to the end of the block
*/
@ -184,7 +184,7 @@ public interface ATNFactory {
*
* |---| (Transition 2 from A.right points at alt 1)
* v | (follow of loop is Transition 1)
* o->o-A-o->o
* o-&gt;o-A-o-&gt;o
*
* Meaning that the last ATNState in A points back to A's left Transition ATNState
* and we add a new begin/end ATNState. A can be single alternative or
@ -200,7 +200,7 @@ public interface ATNFactory {
*
* |---|
* v |
* o->o-A-o--o (Transition 2 from block end points at alt 1; follow is Transition 1)
* o-&gt;o-A-o--o (Transition 2 from block end points at alt 1; follow is Transition 1)
* | ^
* o---------| (optional branch is 2nd alt of optional block containing A+)
*

View File

@ -324,7 +324,7 @@ public class LexerATNFactory extends ParserATNFactory {
/** For a lexer, a string is a sequence of char to match. That is,
* "fog" is treated as 'f' 'o' 'g' not as a single transition in
* the DFA. Machine== o-'f'->o-'o'->o-'g'->o and has n+1 states
* the DFA. Machine== o-'f'-&gt;o-'o'-&gt;o-'g'-&gt;o and has n+1 states
* for n characters.
*/
@Override

View File

@ -286,7 +286,7 @@ public class ParserATNFactory implements ATNFactory {
* For reference to rule {@code r}, build
*
* <pre>
* o->(r) o
* o-&gt;(r) o
* </pre>
*
* where {@code (r)} is the start of rule {@code r} and the trailing
@ -391,24 +391,24 @@ public class ParserATNFactory implements ATNFactory {
* From {@code A|B|..|Z} alternative block build
*
* <pre>
* o->o-A->o->o (last ATNState is BlockEndState pointed to by all alts)
* o-&gt;o-A-&gt;o-&gt;o (last ATNState is BlockEndState pointed to by all alts)
* | ^
* |->o-B->o--|
* |-&gt;o-B-&gt;o--|
* | |
* ... |
* | |
* |->o-Z->o--|
* |-&gt;o-Z-&gt;o--|
* </pre>
*
* So start node points at every alternative with epsilon transition and
* every alt right side points at a block end ATNState.
* <p/>
* <p>
* Special case: only one alternative: don't make a block with alt
* begin/end.
* <p/>
* <p>
* Special case: if just a list of tokens/chars/sets, then collapse to a
* single edged o-set->o graph.
* <p/>
* single edged o-set-&gt;o graph.
* <p>
* TODO: Set alt number (1..n) in the states?
*/
@ -506,9 +506,9 @@ public class ParserATNFactory implements ATNFactory {
* From {@code (A)?} build either:
*
* <pre>
* o--A->o
* o--A-&gt;o
* | ^
* o---->|
* o----&gt;|
* </pre>
*
* or, if {@code A} is a block, just add an empty alt to the end of the
@ -535,7 +535,7 @@ public class ParserATNFactory implements ATNFactory {
* <pre>
* |---------|
* v |
* [o-blk-o]->o->o
* [o-blk-o]-&gt;o-&gt;o
* </pre>
*
* We add a decision for loop back node to the existing one at {@code blk}
@ -583,7 +583,7 @@ public class ParserATNFactory implements ATNFactory {
* <pre>
* |-------------|
* v |
* o--[o-blk-o]->o o
* o--[o-blk-o]-&gt;o o
* | ^
* -----------------|
* </pre>

View File

@ -53,7 +53,7 @@ import java.util.Set;
* We identify those nested objects by the list of arguments in the template
* definition. For example, here is the definition of the parser template:
*
* Parser(parser, scopes, funcs) ::= <<...>>
* Parser(parser, scopes, funcs) ::= &lt;&lt;...&gt;&gt;
*
* The first template argument is always the output model object from which
* this walker will create the template. Any other arguments identify

View File

@ -56,7 +56,7 @@ public abstract class Target {
* predicates and such that may refer to chars that need to be escaped
* when represented as strings. Also, templates need to be escaped so
* that the target language can hold them as a string.
* <p/>
* <p>
* I have defined (via the constructor) the set of typical escapes,
* but your {@link Target} subclass is free to alter the translated chars
* or add more definitions. This is non-static so each target can have

View File

@ -38,7 +38,7 @@ import org.antlr.v4.tool.Rule;
import java.util.ArrayList;
/** A StructDecl to handle a -> label on alt */
/** A StructDecl to handle a -&gt; label on alt */
public class AltLabelStructDecl extends StructDecl {
public int altNum;
public AltLabelStructDecl(OutputModelFactory factory, Rule r,

View File

@ -93,11 +93,11 @@ public class JavaScriptTarget extends Target {
/**
* {@inheritDoc}
* <p/>
* <p>
* For Java, this is the translation {@code 'a\n"'} &rarr; {@code "a\n\""}.
* Expect single quotes around the incoming literal. Just flip the quotes
* and replace double quotes with {@code \"}.
* <p/>
* <p>
* Note that we have decided to allow people to use '\"' without penalty, so
* we must build the target string in a loop as {@link String#replace}
* cannot handle both {@code \"} and {@code "} without a lot of messing

View File

@ -81,7 +81,7 @@ public class Graph<T> {
* For sorting, I'm not following convention here since ANTLR
* needs the opposite. Here's what I assume for sorting:
*
* If there exists an edge u -> v then u depends on v and v
* If there exists an edge u -&gt; v then u depends on v and v
* must happen before u.
*
* So if this gives nonreversed postorder traversal, I get the order
@ -114,4 +114,4 @@ public class Graph<T> {
}
sorted.add(n.payload);
}
}
}

View File

@ -54,21 +54,6 @@ public class Utils {
static Integer[] ints = new Integer[INTEGER_POOL_MAX_VALUE+1];
/** Integer objects are immutable so share all Integers with the
* same value up to some max size. Use an array as a perfect hash.
* Return shared object for 0..INTEGER_POOL_MAX_VALUE or a new
* Integer object with x in it. Java's autoboxing only caches up to 127.
public static Integer integer(int x) {
if ( x<0 || x>INTEGER_POOL_MAX_VALUE ) {
return new Integer(x);
}
if ( ints[x]==null ) {
ints[x] = new Integer(x);
}
return ints[x];
}
*/
public static String stripFileExtension(String name) {
if ( name==null ) return null;
int lastDot = name.lastIndexOf('.');

View File

@ -35,7 +35,7 @@ import org.antlr.runtime.Token;
import org.antlr.v4.tool.Grammar;
/** A CommonToken that can also track it's original location,
* derived from options on the element ref like BEGIN<line=34,...>.
* derived from options on the element ref like BEGIN&lt;line=34,...&gt;.
*/
public class GrammarToken extends CommonToken {
public Grammar g;

View File

@ -52,11 +52,15 @@ import java.util.List;
public class ScopeParser {
/** Given an arg or retval scope definition list like
*
* Map<String, String>, int[] j3, char *foo32[3]
* <code>
* Map&lt;String, String&gt;, int[] j3, char *foo32[3]
* </code>
*
* or
*
* <code>
* int i=3, j=a[34]+20
* </code>
*
* convert to an attribute scope.
*/

View File

@ -39,7 +39,7 @@ import java.util.LinkedHashMap;
import java.util.Set;
/** Track the attributes within retval, arg lists etc...
* <p/>
* <p>
* Each rule has potentially 3 scopes: return values,
* parameters, and an implicitly-named scope (i.e., a scope defined in a rule).
* Implicitly-defined scopes are named after the rule; rules and scopes then

View File

@ -34,12 +34,12 @@ import org.antlr.v4.runtime.Lexer;
/**
* A complex enumeration of all the error messages that the tool can issue.
* <p/>
* <p>
* When adding error messages, also add a description of the message to the
* Wiki with a location under the Wiki page
* <a href="http://www.antlr.org/wiki/display/ANTLR4/Errors+Reported+by+the+ANTLR+Tool">Errors Reported by the ANTLR Tool</a>.
*
* @author Jim Idle <jimi@temporal-wave.com>, Terence Parr
* @author Jim Idle &lt;jimi@temporal-wave.com&gt;, Terence Parr
* @since 4.0
*/
public enum ErrorType {
@ -702,8 +702,8 @@ public enum ErrorType {
* <p>The following rule produces this error.</p>
*
* <pre>
* X : 'foo' -> type(Foo); // ok
* Y : 'foo' -> token(Foo); // error 149 (token is not a supported lexer command)
* X : 'foo' -&gt; type(Foo); // ok
* Y : 'foo' -&gt; token(Foo); // error 149 (token is not a supported lexer command)
* </pre>
*
* @since 4.1
@ -719,8 +719,8 @@ public enum ErrorType {
* <p>The following rule produces this error.</p>
*
* <pre>
* X : 'foo' -> type(Foo); // ok
* Y : 'foo' -> type; // error 150 (the type command requires an argument)
* X : 'foo' -&gt; type(Foo); // ok
* Y : 'foo' -&gt; type; // error 150 (the type command requires an argument)
* </pre>
*
* @since 4.1
@ -737,8 +737,8 @@ public enum ErrorType {
* <p>The following rule produces this error.</p>
*
* <pre>
* X : 'foo' -> popMode; // ok
* Y : 'foo' -> popMode(A); // error 151 (the popMode command does not take an argument)
* X : 'foo' -&gt; popMode; // ok
* Y : 'foo' -&gt; popMode(A); // error 151 (the popMode command does not take an argument)
* </pre>
*
* @since 4.1
@ -825,8 +825,8 @@ public enum ErrorType {
* public static final int CUSTOM = HIDDEN + 1;
* }
*
* X : 'foo' -> channel(HIDDEN); // ok
* Y : 'bar' -> channel(CUSTOM); // warning 155
* X : 'foo' -&gt; channel(HIDDEN); // ok
* Y : 'bar' -&gt; channel(CUSTOM); // warning 155
* </pre>
*
* @since 4.2
@ -891,12 +891,12 @@ public enum ErrorType {
* <p>The following rule produces this warning.</p>
*
* <pre>
* X1 : 'x' -> more // ok
* X1 : 'x' -&gt; more // ok
* ;
* Y1 : 'x' {more();} // ok
* ;
* fragment
* X2 : 'x' -> more // warning 158
* X2 : 'x' -&gt; more // warning 158
* ;
* fragment
* Y2 : 'x' {more();} // warning 158

View File

@ -114,14 +114,14 @@ public class Grammar implements AttributeResolver {
public static final Set<String> LexerBlockOptions = new HashSet<String>();
/** Legal options for rule refs like id<key=value> */
/** Legal options for rule refs like id&lt;key=value&gt; */
public static final Set<String> ruleRefOptions = new HashSet<String>();
static {
ruleRefOptions.add(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME);
ruleRefOptions.add(LeftRecursiveRuleTransformer.TOKENINDEX_OPTION_NAME);
}
/** Legal options for terminal refs like ID<assoc=right> */
/** Legal options for terminal refs like ID&lt;assoc=right&gt; */
public static final Set<String> tokenOptions = new HashSet<String>();
static {
tokenOptions.add("assoc");
@ -553,15 +553,6 @@ public class Grammar implements AttributeResolver {
public List<Grammar> getImportedGrammars() { return importedGrammars; }
/** Get delegates below direct delegates of g
public List<Grammar> getIndirectDelegates(Grammar g) {
List<Grammar> direct = getDirectDelegates(g);
List<Grammar> delegates = getDelegates(g);
delegates.removeAll(direct);
return delegates;
}
*/
public LexerGrammar getImplicitLexer() {
return implicitLexer;
}

View File

@ -151,7 +151,7 @@ public class GrammarParserInterpreter extends ParserInterpreter {
* it's simple. Set decisionStatesThatSetOuterAltNumInContext
* indicates which decision states should set the outer alternative number.
*
* Left recursive rules are much more complicated to deal with:
* <p>Left recursive rules are much more complicated to deal with:
* there is typically a decision for the primary alternatives and a
* decision to choose between the recursive operator alternatives.
* For example, the following left recursive rule has two primary and 2

View File

@ -166,11 +166,11 @@ public class GrammarTransformPipeline {
/** Merge all the rules, token definitions, and named actions from
imported grammars into the root grammar tree. Perform:
(tokens { X (= Y 'y')) + (tokens { Z ) -> (tokens { X (= Y 'y') Z)
(tokens { X (= Y 'y')) + (tokens { Z ) -&gt; (tokens { X (= Y 'y') Z)
(@ members {foo}) + (@ members {bar}) -> (@ members {foobar})
(@ members {foo}) + (@ members {bar}) -&gt; (@ members {foobar})
(RULES (RULE x y)) + (RULES (RULE z)) -> (RULES (RULE x y z))
(RULES (RULE x y)) + (RULES (RULE z)) -&gt; (RULES (RULE x y z))
Rules in root prevent same rule from being appended to RULES node.
@ -322,7 +322,7 @@ public class GrammarTransformPipeline {
* We'll have this Grammar share token symbols later; don't generate
* tokenVocab or tokens{} section. Copy over named actions.
*
* Side-effects: it removes children from GRAMMAR & RULES nodes
* Side-effects: it removes children from GRAMMAR &amp; RULES nodes
* in combined AST. Anything cut out is dup'd before
* adding to lexer to avoid "who's ur daddy" issues
*/

View File

@ -136,7 +136,7 @@ public class LeftRecursiveRule extends Rule {
return alts;
}
/** Get -> labels from those alts we deleted for left-recursive rules. */
/** Get -&gt; labels from those alts we deleted for left-recursive rules. */
@Override
public Map<String, List<Pair<Integer, AltAST>>> getAltLabels() {
Map<String, List<Pair<Integer, AltAST>>> labels = new HashMap<String, List<Pair<Integer, AltAST>>>();