Merge pull request #430 from sharwell/doclint

Fix many errors reported by -Xdoclint
This commit is contained in:
Sam Harwell 2014-01-19 19:04:22 -08:00
commit 23e447fe34
59 changed files with 615 additions and 727 deletions

View File

@ -212,9 +212,9 @@ public class Antlr4Mojo extends AbstractMojo {
* The main entry point for this Mojo, it is responsible for converting
* ANTLR 4.x grammars into the target language specified by the grammar.
*
* @throws MojoExecutionException if a configuration or grammar error causes
* @exception MojoExecutionException if a configuration or grammar error causes
* the code generation process to fail
* @throws MojoFailureException if an instance of the ANTLR 4 {@link Tool}
* @exception MojoFailureException if an instance of the ANTLR 4 {@link Tool}
* cannot be created
*/
@Override
@ -353,7 +353,7 @@ public class Antlr4Mojo extends AbstractMojo {
/**
*
* @param sourceDirectory
* @throws InclusionScanException
* @exception InclusionScanException
*/
@NotNull
private List<List<String>> processGrammarFiles(List<String> args, File sourceDirectory) throws InclusionScanException {

View File

@ -47,11 +47,11 @@ public interface ANTLRErrorListener {
* specifies how to recover from syntax errors and how to compute error
* messages. This listener's job is simply to emit a computed message,
* though it has enough information to create its own message in many cases.
* <p/>
* The {@link RecognitionException} is non-null for all syntax errors except
*
* <p>The {@link RecognitionException} is non-null for all syntax errors except
* when we discover mismatched token errors that we can recover from
* in-line, without returning from the surrounding rule (via the single
* token insertion and deletion mechanism).
* token insertion and deletion mechanism).</p>
*
* @param recognizer
* What parser got the error. From this
@ -84,19 +84,19 @@ public interface ANTLRErrorListener {
/**
* This method is called by the parser when a full-context prediction
* results in an ambiguity.
* <p/>
* When {@code exact} is {@code true}, <em>all</em> of the alternatives in
*
* <p>When {@code exact} is {@code true}, <em>all</em> of the alternatives in
* {@code ambigAlts} are viable, i.e. this is reporting an exact ambiguity.
* When {@code exact} is {@code false}, <em>at least two</em> of the
* alternatives in {@code ambigAlts} are viable for the current input, but
* the prediction algorithm terminated as soon as it determined that at
* least the <em>minimum</em> alternative in {@code ambigAlts} is viable.
* <p/>
* When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction mode
* least the <em>minimum</em> alternative in {@code ambigAlts} is viable.</p>
*
* <p>When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction mode
* is used, the parser is required to identify exact ambiguities so
* {@code exact} will always be {@code true}.
* <p/>
* This method is not used by lexers.
* {@code exact} will always be {@code true}.</p>
*
* <p>This method is not used by lexers.</p>
*
* @param recognizer the parser instance
* @param dfa the DFA for the current decision
@ -120,13 +120,13 @@ public interface ANTLRErrorListener {
/**
* This method is called when an SLL conflict occurs and the parser is about
* to use the full context information to make an LL decision.
* <p/>
* If one or more configurations in {@code configs} contains a semantic
*
* <p>If one or more configurations in {@code configs} contains a semantic
* predicate, the predicates are evaluated before this method is called. The
* subset of alternatives which are still viable after predicates are
* evaluated is reported in {@code conflictingAlts}.
* <p/>
* This method is not used by lexers.
* evaluated is reported in {@code conflictingAlts}.</p>
*
* <p>This method is not used by lexers.</p>
*
* @param recognizer the parser instance
* @param dfa the DFA for the current decision
@ -148,21 +148,21 @@ public interface ANTLRErrorListener {
/**
* This method is called by the parser when a full-context prediction has a
* unique result.
* <p/>
* For prediction implementations that only evaluate full-context
*
* <p>For prediction implementations that only evaluate full-context
* predictions when an SLL conflict is found (including the default
* {@link ParserATNSimulator} implementation), this method reports cases
* where SLL conflicts were resolved to unique full-context predictions,
* i.e. the decision was context-sensitive. This report does not necessarily
* indicate a problem, and it may appear even in completely unambiguous
* grammars.
* <p/>
* {@code configs} may have more than one represented alternative if the
* grammars.</p>
*
* <p>{@code configs} may have more than one represented alternative if the
* full-context prediction algorithm does not evaluate predicates before
* beginning the full-context prediction. In all cases, the final prediction
* is passed as the {@code prediction} argument.
* <p/>
* This method is not used by lexers.
* is passed as the {@code prediction} argument.</p>
*
* <p>This method is not used by lexers.</p>
*
* @param recognizer the parser instance
* @param dfa the DFA for the current decision

View File

@ -46,8 +46,8 @@ import org.antlr.v4.runtime.misc.NotNull;
*
* Implementations of this interface report syntax errors by calling
* {@link Parser#notifyErrorListeners}.
* <p/>
* TODO: what to do about lexers
*
* <p>TODO: what to do about lexers</p>
*/
public interface ANTLRErrorStrategy {
/**
@ -62,10 +62,10 @@ public interface ANTLRErrorStrategy {
* strategy successfully recovers from the match failure, this method
* returns the {@link Token} instance which should be treated as the
* successful result of the match.
* <p/>
* Note that the calling code will not report an error if this method
*
* <p>Note that the calling code will not report an error if this method
* returns successfully. The error strategy implementation is responsible
* for calling {@link Parser#notifyErrorListeners} as appropriate.
* for calling {@link Parser#notifyErrorListeners} as appropriate.</p>
*
* @param recognizer the parser instance
* @throws RecognitionException if the error strategy was not able to
@ -95,13 +95,13 @@ public interface ANTLRErrorStrategy {
* This method provides the error handler with an opportunity to handle
* syntactic or semantic errors in the input stream before they result in a
* {@link RecognitionException}.
* <p/>
* The generated code currently contains calls to {@link #sync} after
*
* <p>The generated code currently contains calls to {@link #sync} after
* entering the decision state of a closure block ({@code (...)*} or
* {@code (...)+}).
* <p/>
* For an implementation based on Jim Idle's "magic sync" mechanism, see
* {@link DefaultErrorStrategy#sync}.
* {@code (...)+}).</p>
*
* <p>For an implementation based on Jim Idle's "magic sync" mechanism, see
* {@link DefaultErrorStrategy#sync}.</p>
*
* @see DefaultErrorStrategy#sync
*

View File

@ -41,8 +41,8 @@ import java.util.Arrays;
* Vacuum all input from a {@link Reader}/{@link InputStream} and then treat it
* like a {@code char[]} buffer. Can also pass in a {@link String} or
* {@code char[]} to use.
* <p/>
* If you need encoding, pass in stream/reader with correct encoding.
*
* <p>If you need encoding, pass in stream/reader with correct encoding.</p>
*/
public class ANTLRInputStream implements CharStream {
public static final int READ_BUFFER_SIZE = 1024;

View File

@ -33,8 +33,8 @@ package org.antlr.v4.runtime;
import org.antlr.v4.runtime.misc.ParseCancellationException;
/** Bail out of parser at first syntax error. Do this to use it:
* <p/>
* {@code myparser.setErrorHandler(new BailErrorStrategy());}
*
* <p>{@code myparser.setErrorHandler(new BailErrorStrategy());}</p>
*/
public class BailErrorStrategy extends DefaultErrorStrategy {
/** Instead of recovering from exception {@code e}, re-throw it wrapped

View File

@ -45,11 +45,11 @@ import java.util.Set;
* because it has to constantly flip back and forth between inside/output
* templates. E.g., {@code <names:{hi, <it>}>} has to parse names as part of an
* expression but {@code "hi, <it>"} as a nested template.
* <p/>
* You can't use this stream if you pass whitespace or other off-channel tokens
*
* <p>You can't use this stream if you pass whitespace or other off-channel tokens
* to the parser. The stream can't ignore off-channel tokens.
* ({@link UnbufferedTokenStream} is the same way.) Use
* {@link CommonTokenStream}.
* {@link CommonTokenStream}.</p>
*/
public class BufferedTokenStream implements TokenStream {
@NotNull
@ -222,9 +222,9 @@ public class BufferedTokenStream implements TokenStream {
* operation. The default implementation simply returns {@code i}. If an
* exception is thrown in this method, the current stream index should not be
* changed.
* <p/>
* For example, {@link CommonTokenStream} overrides this method to ensure that
* the seek target is always an on-channel token.
*
* <p>For example, {@link CommonTokenStream} overrides this method to ensure that
* the seek target is always an on-channel token.</p>
*
* @param i The target token index.
* @return The adjusted target token index.

View File

@ -52,7 +52,7 @@ public class CommonToken implements WritableToken, Serializable {
// TODO: can store these in map in token stream rather than as field here
protected String text;
/** What token number is this from 0..n-1 tokens; < 0 implies invalid index */
/** What token number is this from 0..n-1 tokens; &lt; 0 implies invalid index */
protected int index = -1;
/** The char position into the input buffer where this token starts */

View File

@ -62,9 +62,9 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
/**
* {@inheritDoc}
* <p/>
* The default implementation simply calls {@link #endErrorCondition} to
* ensure that the handler is not in error recovery mode.
*
* <p>The default implementation simply calls {@link #endErrorCondition} to
* ensure that the handler is not in error recovery mode.</p>
*/
@Override
public void reset(Parser recognizer) {
@ -103,8 +103,8 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
/**
* {@inheritDoc}
* <p/>
* The default implementation simply calls {@link #endErrorCondition}.
*
* <p>The default implementation simply calls {@link #endErrorCondition}.</p>
*/
@Override
public void reportMatch(Parser recognizer) {
@ -113,11 +113,11 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
/**
* {@inheritDoc}
* <p/>
* The default implementation returns immediately if the handler is already
*
* <p>The default implementation returns immediately if the handler is already
* in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
* and dispatches the reporting task based on the runtime type of {@code e}
* according to the following table.
* according to the following table.</p>
*
* <ul>
* <li>{@link NoViableAltException}: Dispatches the call to
@ -158,10 +158,10 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
/**
* {@inheritDoc}
* <p/>
* The default implementation resynchronizes the parser by consuming tokens
*
* <p>The default implementation resynchronizes the parser by consuming tokens
* until we find one in the resynchronization set--loosely the set of tokens
* that can follow the current rule.
* that can follow the current rule.</p>
*/
@Override
public void recover(Parser recognizer, RecognitionException e) {
@ -194,9 +194,9 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
* that the current lookahead symbol is consistent with what were expecting
* at this point in the ATN. You can call this anytime but ANTLR only
* generates code to check before subrules/loops and each iteration.
* <p/>
* Implements Jim Idle's magic sync mechanism in closures and optional
* subrules. E.g.,
*
* <p>Implements Jim Idle's magic sync mechanism in closures and optional
* subrules. E.g.,</p>
*
* <pre>
* a : sync ( stuff sync )* ;
@ -207,20 +207,20 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
* token deletion, if possible. If it can't do that, it bails on the current
* rule and uses the default error recovery, which consumes until the
* resynchronization set of the current rule.
* <p/>
* If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
*
* <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
* with an empty alternative), then the expected set includes what follows
* the subrule.
* <p/>
* During loop iteration, it consumes until it sees a token that can start a
* the subrule.</p>
*
* <p>During loop iteration, it consumes until it sees a token that can start a
* sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
* stay in the loop as long as possible.
* <p/>
* <strong>ORIGINS</strong>
* <p/>
* Previous versions of ANTLR did a poor job of their recovery within loops.
* stay in the loop as long as possible.</p>
*
* <p><strong>ORIGINS</strong></p>
*
* <p>Previous versions of ANTLR did a poor job of their recovery within loops.
* A single mismatch token or missing token would force the parser to bail
* out of the entire rules surrounding the loop. So, for rule
* out of the entire rules surrounding the loop. So, for rule</p>
*
* <pre>
* classDef : 'class' ID '{' member* '}'
@ -229,11 +229,11 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
* input with an extra token between members would force the parser to
* consume until it found the next class definition rather than the next
* member definition of the current class.
* <p/>
* This functionality cost a little bit of effort because the parser has to
*
* <p>This functionality cost a little bit of effort because the parser has to
* compare token set at the start of the loop and at each iteration. If for
* some reason speed is suffering for you, you can turn off this
* functionality by simply overriding this method as a blank { }.
* functionality by simply overriding this method as a blank { }.</p>
*/
@Override
public void sync(Parser recognizer) throws RecognitionException {
@ -348,15 +348,15 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
* erroneous symbol is current {@code LT(1)} symbol and has not yet been
* removed from the input stream. When this method returns,
* {@code recognizer} is in error recovery mode.
* <p/>
* This method is called when {@link #singleTokenDeletion} identifies
*
* <p>This method is called when {@link #singleTokenDeletion} identifies
* single-token deletion as a viable recovery strategy for a mismatched
* input error.
* <p/>
* The default implementation simply returns if the handler is already in
* input error.</p>
*
* <p>The default implementation simply returns if the handler is already in
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
* enter error recovery mode, followed by calling
* {@link Parser#notifyErrorListeners}.
* {@link Parser#notifyErrorListeners}.</p>
*
* @param recognizer the parser instance
*/
@ -380,15 +380,15 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
* insertion of a missing token into the input stream. At the time this
* method is called, the missing token has not yet been inserted. When this
* method returns, {@code recognizer} is in error recovery mode.
* <p/>
* This method is called when {@link #singleTokenInsertion} identifies
*
* <p>This method is called when {@link #singleTokenInsertion} identifies
* single-token insertion as a viable recovery strategy for a mismatched
* input error.
* <p/>
* The default implementation simply returns if the handler is already in
* input error.</p>
*
* <p>The default implementation simply returns if the handler is already in
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
* enter error recovery mode, followed by calling
* {@link Parser#notifyErrorListeners}.
* {@link Parser#notifyErrorListeners}.</p>
*
* @param recognizer the parser instance
*/
@ -409,46 +409,46 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
/**
* {@inheritDoc}
* <p/>
* The default implementation attempts to recover from the mismatched input
*
* <p>The default implementation attempts to recover from the mismatched input
* by using single token insertion and deletion as described below. If the
* recovery attempt fails, this method throws an
* {@link InputMismatchException}.
* <p/>
* <strong>EXTRA TOKEN</strong> (single token deletion)
* <p/>
* {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
* {@link InputMismatchException}.</p>
*
* <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
*
* <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
* right token, however, then assume {@code LA(1)} is some extra spurious
* token and delete it. Then consume and return the next token (which was
* the {@code LA(2)} token) as the successful result of the match operation.
* <p/>
* This recovery strategy is implemented by {@link #singleTokenDeletion}.
* <p/>
* <strong>MISSING TOKEN</strong> (single token insertion)
* <p/>
* If current token (at {@code LA(1)}) is consistent with what could come
* the {@code LA(2)} token) as the successful result of the match operation.</p>
*
* <p>This recovery strategy is implemented by {@link #singleTokenDeletion}.</p>
*
* <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
*
* <p>If current token (at {@code LA(1)}) is consistent with what could come
* after the expected {@code LA(1)} token, then assume the token is missing
* and use the parser's {@link TokenFactory} to create it on the fly. The
* "insertion" is performed by returning the created token as the successful
* result of the match operation.
* <p/>
* This recovery strategy is implemented by {@link #singleTokenInsertion}.
* <p/>
* <strong>EXAMPLE</strong>
* <p/>
* For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
* result of the match operation.</p>
*
* <p>This recovery strategy is implemented by {@link #singleTokenInsertion}.</p>
*
* <p><strong>EXAMPLE</strong></p>
*
* <p>For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
* the parser returns from the nested call to {@code expr}, it will have
* call chain:
* call chain:</p>
*
* <pre>
* stat -> expr -> atom
* stat &rarr; expr &rarr; atom
* </pre>
*
* and it will be trying to match the {@code ')'} at this point in the
* derivation:
*
* <pre>
* => ID '=' '(' INT ')' ('+' atom)* ';'
* =&gt; ID '=' '(' INT ')' ('+' atom)* ';'
* ^
* </pre>
*
@ -485,12 +485,12 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
* deletion strategy fails to recover from the mismatched input. If this
* method returns {@code true}, {@code recognizer} will be in error recovery
* mode.
* <p/>
* This method determines whether or not single-token insertion is viable by
*
* <p>This method determines whether or not single-token insertion is viable by
* checking if the {@code LA(1)} input symbol could be successfully matched
* if it were instead the {@code LA(2)} symbol. If this method returns
* {@code true}, the caller is responsible for creating and inserting a
* token with the correct type to produce this behavior.
* token with the correct type to produce this behavior.</p>
*
* @param recognizer the parser instance
* @return {@code true} if single-token insertion is a viable recovery
@ -520,12 +520,12 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
* handler state will not have changed. If this method returns non-null,
* {@code recognizer} will <em>not</em> be in error recovery mode since the
* returned token was a successful match.
* <p/>
* If the single-token deletion is successful, this method calls
*
* <p>If the single-token deletion is successful, this method calls
* {@link #reportUnwantedToken} to report the error, followed by
* {@link Parser#consume} to actually "delete" the extraneous token. Then,
* before returning {@link #reportMatch} is called to signal a successful
* match.
* match.</p>
*
* @param recognizer the parser instance
* @return the successfully matched {@link Token} instance if single-token

View File

@ -37,10 +37,10 @@ import org.antlr.v4.runtime.misc.NotNull;
* interface provides <em>marked ranges</em> with support for a minimum level
* of buffering necessary to implement arbitrary lookahead during prediction.
* For more information on marked ranges, see {@link #mark}.
* <p/>
* <strong>Initializing Methods:</strong> Some methods in this interface have
*
* <p><strong>Initializing Methods:</strong> Some methods in this interface have
* unspecified behavior if no call to an initializing method has occurred after
* the stream was constructed. The following is a list of initializing methods:
* the stream was constructed. The following is a list of initializing methods:</p>
*
* <ul>
* <li>{@link #LA}</li>
@ -93,8 +93,8 @@ public interface IntStream {
* symbol in the stream. It is not valid to call this method with
* {@code i==0}, but the specific behavior is unspecified because this
* method is frequently called from performance-critical code.
* <p/>
* This method is guaranteed to succeed if any of the following are true:
*
* <p>This method is guaranteed to succeed if any of the following are true:</p>
*
* <ul>
* <li>{@code i>0}</li>
@ -109,12 +109,12 @@ public interface IntStream {
* that has not yet been released.</li>
* </ul>
*
* If {@code i} represents a position at or beyond the end of the stream,
* this method returns {@link #EOF}.
* <p/>
* The return value is unspecified if {@code i<0} and fewer than {@code -i}
* <p>If {@code i} represents a position at or beyond the end of the stream,
* this method returns {@link #EOF}.</p>
*
* <p>The return value is unspecified if {@code i<0} and fewer than {@code -i}
* calls to {@link #consume consume()} have occurred from the beginning of
* the stream before calling this method.
* the stream before calling this method.</p>
*
* @throws UnsupportedOperationException if the stream does not support
* retrieving the value of the specified symbol
@ -127,8 +127,8 @@ public interface IntStream {
* was called to the current {@link #index index()}. This allows the use of
* streaming input sources by specifying the minimum buffering requirements
* to support arbitrary lookahead during prediction.
* <p/>
* The returned mark is an opaque handle (type {@code int}) which is passed
*
* <p>The returned mark is an opaque handle (type {@code int}) which is passed
* to {@link #release release()} when the guarantees provided by the marked
* range are no longer necessary. When calls to
* {@code mark()}/{@code release()} are nested, the marks must be released
@ -136,19 +136,19 @@ public interface IntStream {
* used during performance-critical sections of prediction, the specific
* behavior of invalid usage is unspecified (i.e. a mark is not released, or
* a mark is released twice, or marks are not released in reverse order from
* which they were created).
* <p/>
* The behavior of this method is unspecified if no call to an
* which they were created).</p>
*
* <p>The behavior of this method is unspecified if no call to an
* {@link IntStream initializing method} has occurred after this stream was
* constructed.
* <p/>
* This method does not change the current position in the input stream.
* <p/>
* The following example shows the use of {@link #mark mark()},
* constructed.</p>
*
* <p>This method does not change the current position in the input stream.</p>
*
* <p>The following example shows the use of {@link #mark mark()},
* {@link #release release(mark)}, {@link #index index()}, and
* {@link #seek seek(index)} as part of an operation to safely work within a
* marked region, then restore the stream position to its original value and
* release the mark.
* release the mark.</p>
* <pre>
* IntStream stream = ...;
* int index = -1;
@ -175,8 +175,8 @@ public interface IntStream {
* reverse order of the corresponding calls to {@code mark()}. If a mark is
* released twice, or if marks are not released in reverse order of the
* corresponding calls to {@code mark()}, the behavior is unspecified.
* <p/>
* For more information and an example, see {@link #mark}.
*
* <p>For more information and an example, see {@link #mark}.</p>
*
* @param marker A marker returned by a call to {@code mark()}.
* @see #mark
@ -186,10 +186,10 @@ public interface IntStream {
/**
* Return the index into the stream of the input symbol referred to by
* {@code LA(1)}.
* <p/>
* The behavior of this method is unspecified if no call to an
*
* <p>The behavior of this method is unspecified if no call to an
* {@link IntStream initializing method} has occurred after this stream was
* constructed.
* constructed.</p>
*/
int index();

View File

@ -8,10 +8,10 @@ import java.util.List;
/**
* Provides an implementation of {@link TokenSource} as a wrapper around a list
* of {@link Token} objects.
* <p/>
* If the final token in the list is an {@link Token#EOF} token, it will be used
*
* <p>If the final token in the list is an {@link Token#EOF} token, it will be used
* as the EOF token for every call to {@link #nextToken} after the end of the
* list is reached. Otherwise, an EOF token will be created.
* list is reached. Otherwise, an EOF token will be created.</p>
*/
public class ListTokenSource implements TokenSource {
/**
@ -80,7 +80,7 @@ public class ListTokenSource implements TokenSource {
}
/**
* @inheritDoc
* {@inheritDoc}
*/
@Override
public int getCharPositionInLine() {
@ -111,7 +111,7 @@ public class ListTokenSource implements TokenSource {
}
/**
* @inheritDoc
* {@inheritDoc}
*/
@Override
public Token nextToken() {
@ -142,7 +142,7 @@ public class ListTokenSource implements TokenSource {
}
/**
* @inheritDoc
* {@inheritDoc}
*/
@Override
public int getLine() {
@ -177,7 +177,7 @@ public class ListTokenSource implements TokenSource {
}
/**
* @inheritDoc
* {@inheritDoc}
*/
@Override
public CharStream getInputStream() {
@ -196,7 +196,7 @@ public class ListTokenSource implements TokenSource {
}
/**
* @inheritDoc
* {@inheritDoc}
*/
@Override
public String getSourceName() {
@ -213,7 +213,7 @@ public class ListTokenSource implements TokenSource {
}
/**
* @inheritDoc
* {@inheritDoc}
*/
@Override
public void setTokenFactory(@NotNull TokenFactory<?> factory) {
@ -221,7 +221,7 @@ public class ListTokenSource implements TokenSource {
}
/**
* @inheritDoc
* {@inheritDoc}
*/
@Override
@NotNull

View File

@ -196,13 +196,13 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
* Match current input symbol against {@code ttype}. If the symbol type
* matches, {@link ANTLRErrorStrategy#reportMatch} and {@link #consume} are
* called to complete the match process.
* <p/>
* If the symbol type does not match,
*
* <p>If the symbol type does not match,
* {@link ANTLRErrorStrategy#recoverInline} is called on the current error
* strategy to attempt recovery. If {@link #getBuildParseTree} is
* {@code true} and the token index of the symbol returned by
* {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
* the parse tree by calling {@link ParserRuleContext#addErrorNode}.
* the parse tree by calling {@link ParserRuleContext#addErrorNode}.</p>
*
* @param ttype the token type to match
* @return the matched symbol
@ -232,13 +232,13 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
* Match current input symbol as a wildcard. If the symbol type matches
* (i.e. has a value greater than 0), {@link ANTLRErrorStrategy#reportMatch}
* and {@link #consume} are called to complete the match process.
* <p/>
* If the symbol type does not match,
*
* <p>If the symbol type does not match,
* {@link ANTLRErrorStrategy#recoverInline} is called on the current error
* strategy to attempt recovery. If {@link #getBuildParseTree} is
* {@code true} and the token index of the symbol returned by
* {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
* the parse tree by calling {@link ParserRuleContext#addErrorNode}.
* the parse tree by calling {@link ParserRuleContext#addErrorNode}.</p>
*
* @return the matched symbol
* @throws RecognitionException if the current input symbol did not match
@ -269,15 +269,15 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
* them up using the {@link ParserRuleContext#children} list so that it
* forms a parse tree. The {@link ParserRuleContext} returned from the start
* rule represents the root of the parse tree.
* <p/>
* Note that if we are not building parse trees, rule contexts only point
*
* <p>Note that if we are not building parse trees, rule contexts only point
* upwards. When a rule exits, it returns the context but that gets garbage
* collected if nobody holds a reference. It points upwards but nobody
* points at it.
* <p/>
* When we build parse trees, we are adding all of these contexts to
* points at it.</p>
*
* <p>When we build parse trees, we are adding all of these contexts to
* {@link ParserRuleContext#children} list. Contexts are then not candidates
* for garbage collection.
* for garbage collection.</p>
*/
public void setBuildParseTree(boolean buildParseTrees) {
this._buildParseTrees = buildParseTrees;
@ -331,19 +331,19 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
/**
* Registers {@code listener} to receive events during the parsing process.
* <p/>
* To support output-preserving grammar transformations (including but not
*
* <p>To support output-preserving grammar transformations (including but not
* limited to left-recursion removal, automated left-factoring, and
* optimized code generation), calls to listener methods during the parse
* may differ substantially from calls made by
* {@link ParseTreeWalker#DEFAULT} used after the parse is complete. In
* particular, rule entry and exit events may occur in a different order
* during the parse than after the parser. In addition, calls to certain
* rule entry methods may be omitted.
* <p/>
* With the following specific exceptions, calls to listener events are
* rule entry methods may be omitted.</p>
*
* <p>With the following specific exceptions, calls to listener events are
* <em>deterministic</em>, i.e. for identical input the calls to listener
* methods will be the same.
* methods will be the same.</p>
*
* <ul>
* <li>Alterations to the grammar used to generate code may change the
@ -372,9 +372,9 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
/**
* Remove {@code listener} from the list of parse listeners.
* <p/>
* If {@code listener} is {@code null} or has not been added as a parse
* listener, this method does nothing.
*
* <p>If {@code listener} is {@code null} or has not been added as a parse
* listener, this method does nothing.</p>
*
* @see #addParseListener
*
@ -479,7 +479,7 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
*
* <pre>
* ParseTree t = parser.expr();
* ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
* ParseTreePattern p = parser.compileParseTreePattern("&lt;ID&gt;+0", MyParser.RULE_expr);
* ParseTreeMatch m = p.match(t);
* String id = m.get("ID");
* </pre>
@ -560,10 +560,10 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
/**
* Consume and return the {@linkplain #getCurrentToken current symbol}.
* <p/>
* E.g., given the following input with {@code A} being the current
*
* <p>E.g., given the following input with {@code A} being the current
* lookahead symbol, this function moves the cursor to {@code B} and returns
* {@code A}.
* {@code A}.</p>
*
* <pre>
* A B

View File

@ -87,8 +87,8 @@ public class RecognitionException extends RuntimeException {
* {@link LexerNoViableAltException} exceptions, this is the
* {@link DecisionState} number. For others, it is the state whose outgoing
* edge we couldn't match.
* <p/>
* If the state number is not known, this method returns -1.
*
* <p>If the state number is not known, this method returns -1.</p>
*/
public int getOffendingState() {
return offendingState;
@ -101,9 +101,9 @@ public class RecognitionException extends RuntimeException {
/**
* Gets the set of input symbols which could potentially follow the
* previously matched symbol at the time this exception was thrown.
* <p/>
* If the set of expected tokens is not known and could not be computed,
* this method returns {@code null}.
*
* <p>If the set of expected tokens is not known and could not be computed,
* this method returns {@code null}.</p>
*
* @return The set of token types that could potentially follow the current
* state in the ATN, or {@code null} if the information is not available.
@ -119,8 +119,8 @@ public class RecognitionException extends RuntimeException {
/**
* Gets the {@link RuleContext} at the time this exception was thrown.
* <p/>
* If the context is not available, this method returns {@code null}.
*
* <p>If the context is not available, this method returns {@code null}.</p>
*
* @return The {@link RuleContext} at the time this exception was thrown.
* If the context is not available, this method returns {@code null}.
@ -133,8 +133,8 @@ public class RecognitionException extends RuntimeException {
/**
* Gets the input stream which is the symbol source for the recognizer where
* this exception was thrown.
* <p/>
* If the input stream is not available, this method returns {@code null}.
*
* <p>If the input stream is not available, this method returns {@code null}.</p>
*
* @return The input stream which is the symbol source for the recognizer
* where this exception was thrown, or {@code null} if the stream is not
@ -156,8 +156,8 @@ public class RecognitionException extends RuntimeException {
/**
* Gets the {@link Recognizer} where this exception occurred.
* <p/>
* If the recognizer is not available, this method returns {@code null}.
*
* <p>If the recognizer is not available, this method returns {@code null}.</p>
*
* @return The recognizer where this exception occurred, or {@code null} if
* the recognizer is not available.

View File

@ -70,8 +70,8 @@ public abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
/**
* Get a map from token names to token types.
* <p/>
* Used for XPath and tree pattern compilation.
*
* <p>Used for XPath and tree pattern compilation.</p>
*/
@NotNull
public Map<String, Integer> getTokenTypeMap() {
@ -95,8 +95,8 @@ public abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
/**
* Get a map from rule names to rule indexes.
* <p/>
* Used for XPath and tree pattern compilation.
*
* <p>Used for XPath and tree pattern compilation.</p>
*/
@NotNull
public Map<String, Integer> getRuleIndexMap() {
@ -125,9 +125,9 @@ public abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
/**
* If this recognizer was generated, it will have a serialized ATN
* representation of the grammar.
* <p/>
* For interpreters, we don't know their serialized ATN despite having
* created the interpreter from it.
*
* <p>For interpreters, we don't know their serialized ATN despite having
* created the interpreter from it.</p>
*/
@NotNull
public String getSerializedATN() {

View File

@ -37,15 +37,15 @@ import org.antlr.v4.runtime.misc.Nullable;
* and also must reveal it's source of characters; {@link CommonToken}'s text is
* computed from a {@link CharStream}; it only store indices into the char
* stream.
* <p/>
* Errors from the lexer are never passed to the parser. Either you want to keep
*
* <p>Errors from the lexer are never passed to the parser. Either you want to keep
* going or you do not upon token recognition error. If you do not want to
* continue lexing then you do not want to continue parsing. Just throw an
* exception not under {@link RecognitionException} and Java will naturally toss
* you all the way out of the recognizers. If you want to continue lexing then
* you should not throw an exception to the parser--it has already requested a
* token. Keep lexing until you get a valid one. Just report errors and keep
* going, looking for a valid token.
* going, looking for a valid token.</p>
*/
public interface TokenSource {
/**

View File

@ -52,16 +52,16 @@ public interface TokenStream extends IntStream {
/**
* Gets the {@link Token} at the specified {@code index} in the stream. When
* the preconditions of this method are met, the return value is non-null.
* <p/>
* The preconditions for this method are the same as the preconditions of
*
* <p>The preconditions for this method are the same as the preconditions of
* {@link IntStream#seek}. If the behavior of {@code seek(index)} is
* unspecified for the current state and given {@code index}, then the
* behavior of this method is also unspecified.
* <p/>
* The symbol referred to by {@code index} differs from {@code seek()} only
* behavior of this method is also unspecified.</p>
*
* <p>The symbol referred to by {@code index} differs from {@code seek()} only
* in the case of filtering streams where {@code index} lies before the end
* of the stream. Unlike {@code seek()}, this method does not adjust
* {@code index} to point to a non-ignored symbol.
* {@code index} to point to a non-ignored symbol.</p>
*
* @throws IllegalArgumentException if {code index} is less than 0
* @throws UnsupportedOperationException if the stream does not support
@ -86,7 +86,7 @@ public interface TokenStream extends IntStream {
* <pre>
* TokenStream stream = ...;
* String text = "";
* for (int i = interval.a; i <= interval.b; i++) {
* for (int i = interval.a; i &lt;= interval.b; i++) {
* text += stream.get(i).getText();
* }
* </pre>
@ -122,9 +122,9 @@ public interface TokenStream extends IntStream {
* context. This method behaves like the following code, including potential
* exceptions from the call to {@link #getText(Interval)}, but may be
* optimized by the specific implementation.
* </p>
* If {@code ctx.getSourceInterval()} does not return a valid interval of
* tokens provided by this stream, the behavior is unspecified.
*
* <p>If {@code ctx.getSourceInterval()} does not return a valid interval of
* tokens provided by this stream, the behavior is unspecified.</p>
*
* <pre>
* TokenStream stream = ...;
@ -141,20 +141,20 @@ public interface TokenStream extends IntStream {
/**
* Return the text of all tokens in this stream between {@code start} and
* {@code stop} (inclusive).
* <p/>
* If the specified {@code start} or {@code stop} token was not provided by
*
* <p>If the specified {@code start} or {@code stop} token was not provided by
* this stream, or if the {@code stop} occurred before the {@code start}
* token, the behavior is unspecified.
* <p/>
* For streams which ensure that the {@link Token#getTokenIndex} method is
* token, the behavior is unspecified.</p>
*
* <p>For streams which ensure that the {@link Token#getTokenIndex} method is
* accurate for all of its provided tokens, this method behaves like the
* following code. Other streams may implement this method in other ways
* provided the behavior is consistent with this at a high level.
* provided the behavior is consistent with this at a high level.</p>
*
* <pre>
* TokenStream stream = ...;
* String text = "";
* for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) {
* for (int i = start.getTokenIndex(); i &lt;= stop.getTokenIndex(); i++) {
* text += stream.get(i).getText();
* }
* </pre>

View File

@ -184,11 +184,11 @@ public class TokenStreamRewriter {
/** You may have multiple, named streams of rewrite operations.
* I'm calling these things "programs."
* Maps String (name) -> rewrite (List)
* Maps String (name) &rarr; rewrite (List)
*/
protected final Map<String, List<RewriteOperation>> programs;
/** Map String (program name) -> Integer index */
/** Map String (program name) &rarr; Integer index */
protected final Map<String, Integer> lastRewriteTokenIndexes;
public TokenStreamRewriter(TokenStream tokens) {
@ -456,7 +456,7 @@ public class TokenStreamRewriter {
* 3. throw exception if index in same range as previous replace
*
* Don't actually delete; make op null in list. Easier to walk list.
* Later we can throw as we add to index -> op map.
* Later we can throw as we add to index &rarr; op map.
*
* Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
* inserted stuff would be before the replace range. But, if you

View File

@ -53,16 +53,16 @@ public class UnbufferedCharStream implements CharStream {
/**
* The number of characters currently in {@link #data data}.
* <p/>
* This is not the buffer capacity, that's {@code data.length}.
*
* <p>This is not the buffer capacity, that's {@code data.length}.</p>
*/
protected int n;
/**
* 0..n-1 index into {@link #data data} of next character.
* <p/>
* The {@code LA(1)} character is {@code data[p]}. If {@code p == n}, we are
* out of buffered characters.
*
* <p>The {@code LA(1)} character is {@code data[p]}. If {@code p == n}, we are
* out of buffered characters.</p>
*/
protected int p=0;
@ -214,10 +214,10 @@ public class UnbufferedCharStream implements CharStream {
/**
* Return a marker that we can release later.
* <p/>
* The specific marker value used for this class allows for some level of
*
* <p>The specific marker value used for this class allows for some level of
* protection against misuse where {@code seek()} is called on a mark or
* {@code release()} is called in the wrong order.
* {@code release()} is called in the wrong order.</p>
*/
@Override
public int mark() {

View File

@ -47,16 +47,16 @@ public class UnbufferedTokenStream<T extends Token> implements TokenStream {
/**
* The number of tokens currently in {@link #tokens tokens}.
* <p/>
* This is not the buffer capacity, that's {@code tokens.length}.
*
* <p>This is not the buffer capacity, that's {@code tokens.length}.</p>
*/
protected int n;
/**
* 0..n-1 index into {@link #tokens tokens} of next token.
* <p/>
* The {@code LT(1)} token is {@code tokens[p]}. If {@code p == n}, we are
* out of buffered tokens.
*
* <p>The {@code LT(1)} token is {@code tokens[p]}. If {@code p == n}, we are
* out of buffered tokens.</p>
*/
protected int p=0;
@ -83,9 +83,9 @@ public class UnbufferedTokenStream<T extends Token> implements TokenStream {
* Absolute token index. It's the index of the token about to be read via
* {@code LT(1)}. Goes from 0 to the number of tokens in the entire stream,
* although the stream size is unknown before the end is reached.
* <p/>
* This value is used to set the token indexes if the stream provides tokens
* that implement {@link WritableToken}.
*
* <p>This value is used to set the token indexes if the stream provides tokens
* that implement {@link WritableToken}.</p>
*/
protected int currentTokenIndex = 0;
@ -222,10 +222,10 @@ public class UnbufferedTokenStream<T extends Token> implements TokenStream {
/**
* Return a marker that we can release later.
* <p/>
* The specific marker value used for this class allows for some level of
*
* <p>The specific marker value used for this class allows for some level of
* protection against misuse where {@code seek()} is called on a mark or
* {@code release()} is called in the wrong order.
* {@code release()} is called in the wrong order.</p>
*/
@Override
public int mark() {

View File

@ -167,9 +167,9 @@ public class ATN {
* assumed true). If a path in the ATN exists from the starting state to the
* {@link RuleStopState} of the outermost context without matching any
* symbols, {@link Token#EOF} is added to the returned set.
* <p/>
* If {@code context} is {@code null}, it is treated as
* {@link ParserRuleContext#EMPTY}.
*
* <p>If {@code context} is {@code null}, it is treated as
* {@link ParserRuleContext#EMPTY}.</p>
*
* @param stateNumber the ATN state number
* @param context the full parse context

View File

@ -65,7 +65,7 @@ public class ATNConfig {
* invokes the ATN simulator.
*
* closure() tracks the depth of how far we dip into the
* outer context: depth > 0. Note that it may not be totally
* outer context: depth &gt; 0. Note that it may not be totally
* accurate depth since I don't ever decrement. TODO: make it a boolean then
*/
public int reachesIntoOuterContext;

View File

@ -149,9 +149,9 @@ public class ATNConfigSet implements Set<ATNConfig> {
* {@link ATNConfig#state}, {@code i} is the {@link ATNConfig#alt}, and
* {@code pi} is the {@link ATNConfig#semanticContext}. We use
* {@code (s,i,pi)} as key.
* <p/>
* This method updates {@link #dipsIntoOuterContext} and
* {@link #hasSemanticContext} when necessary.
*
* <p>This method updates {@link #dipsIntoOuterContext} and
* {@link #hasSemanticContext} when necessary.</p>
*/
public boolean add(
@NotNull ATNConfig config,

View File

@ -59,7 +59,7 @@ public class ATNSerializer {
this.tokenNames = tokenNames;
}
/** Serialize state descriptors, edge descriptors, and decision->state map
/** Serialize state descriptors, edge descriptors, and decision&rarr;state map
* into list of ints:
*
* grammar-type, (ANTLRParser.LEXER, ...)

View File

@ -70,19 +70,19 @@ public abstract class ATNSimulator {
* to use only cached nodes/graphs in addDFAState(). We don't want to
* fill this during closure() since there are lots of contexts that
* pop up but are not used ever again. It also greatly slows down closure().
* <p/>
* This cache makes a huge difference in memory and a little bit in speed.
*
* <p>This cache makes a huge difference in memory and a little bit in speed.
* For the Java grammar on java.*, it dropped the memory requirements
* at the end from 25M to 16M. We don't store any of the full context
* graphs in the DFA because they are limited to local context only,
* but apparently there's a lot of repetition there as well. We optimize
* the config contexts before storing the config set in the DFA states
* by literally rebuilding them with cached subgraphs only.
* <p/>
* I tried a cache for use during closure operations, that was
* by literally rebuilding them with cached subgraphs only.</p>
*
* <p>I tried a cache for use during closure operations, that was
* whacked after each adaptivePredict(). It cost a little bit
* more time I think and doesn't save on the overall footprint
* so it's not worth the complexity.
* so it's not worth the complexity.</p>
*/
protected final PredictionContextCache sharedContextCache;

View File

@ -87,11 +87,11 @@ public class LL1Analyzer {
/**
* Compute set of tokens that can follow {@code s} in the ATN in the
* specified {@code ctx}.
* <p/>
* If {@code ctx} is {@code null} and the end of the rule containing
*
* <p>If {@code ctx} is {@code null} and the end of the rule containing
* {@code s} is reached, {@link Token#EPSILON} is added to the result set.
* If {@code ctx} is not {@code null} and the end of the outermost rule is
* reached, {@link Token#EOF} is added to the result set.
* reached, {@link Token#EOF} is added to the result set.</p>
*
* @param s the ATN state
* @param ctx the complete parser context, or {@code null} if the context
@ -108,11 +108,11 @@ public class LL1Analyzer {
/**
* Compute set of tokens that can follow {@code s} in the ATN in the
* specified {@code ctx}.
* <p/>
* If {@code ctx} is {@code null} and the end of the rule containing
*
* <p>If {@code ctx} is {@code null} and the end of the rule containing
* {@code s} is reached, {@link Token#EPSILON} is added to the result set.
* If {@code ctx} is not {@code null} and the end of the outermost rule is
* reached, {@link Token#EOF} is added to the result set.
* reached, {@link Token#EOF} is added to the result set.</p>
*
* @param s the ATN state
* @param stopState the ATN state to stop at. This can be a
@ -136,12 +136,12 @@ public class LL1Analyzer {
/**
* Compute set of tokens that can follow {@code s} in the ATN in the
* specified {@code ctx}.
* <p/>
* If {@code ctx} is {@code null} and {@code stopState} or the end of the
*
* <p>If {@code ctx} is {@code null} and {@code stopState} or the end of the
* rule containing {@code s} is reached, {@link Token#EPSILON} is added to
* the result set. If {@code ctx} is not {@code null} and {@code addEOF} is
* {@code true} and {@code stopState} or the end of the outermost rule is
* reached, {@link Token#EOF} is added to the result set.
* reached, {@link Token#EOF} is added to the result set.</p>
*
* @param s the ATN state.
* @param stopState the ATN state to stop at. This can be a

View File

@ -58,13 +58,13 @@ public class LexerATNSimulator extends ATNSimulator {
* and current character position in that line. Note that the Lexer is
* tracking the starting line and characterization of the token. These
* variables track the "state" of the simulator when it hits an accept state.
* <p/>
* We track these variables separately for the DFA and ATN simulation
*
* <p>We track these variables separately for the DFA and ATN simulation
* because the DFA simulation often has to fail over to the ATN
* simulation. If the ATN simulation fails, we need the DFA to fall
* back to its previously accepted state, if any. If the ATN succeeds,
* then the ATN does the accept and the DFA simulator that invoked it
* can simply return the predicted token type.
* can simply return the predicted token type.</p>
*/
protected static class SimState {
protected int index = -1;
@ -557,15 +557,15 @@ public class LexerATNSimulator extends ATNSimulator {
/**
* Evaluate a predicate specified in the lexer.
* <p/>
* If {@code speculative} is {@code true}, this method was called before
*
* <p>If {@code speculative} is {@code true}, this method was called before
* {@link #consume} for the matched character. This method should call
* {@link #consume} before evaluating the predicate to ensure position
* sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine},
* and {@link Lexer#getCharPositionInLine}, properly reflect the current
* lexer state. This method should restore {@code input} and the simulator
* to the original state before returning (i.e. undo the actions made by the
* call to {@link #consume}.
* call to {@link #consume}.</p>
*
* @param input The input stream.
* @param ruleIndex The rule containing the predicate.

View File

@ -55,11 +55,11 @@ public interface LexerAction {
* Gets whether the lexer action is position-dependent. Position-dependent
* actions may have different semantics depending on the {@link CharStream}
* index at the time the action is executed.
* <p/>
* Many lexer commands, including {@code type}, {@code skip}, and
*
* <p>Many lexer commands, including {@code type}, {@code skip}, and
* {@code more}, do not check the input index during their execution.
* Actions like this are position-independent, and may be stored more
* efficiently as part of the {@link LexerATNConfig#lexerActionExecutor}.
* efficiently as part of the {@link LexerATNConfig#lexerActionExecutor}.</p>
*
* @return {@code true} if the lexer action semantics can be affected by the
* position of the input {@link CharStream} at the time it is executed;
@ -69,9 +69,9 @@ public interface LexerAction {
/**
* Execute the lexer action in the context of the specified {@link Lexer}.
* <p/>
* For position-dependent actions, the input stream must already be
* positioned correctly prior to calling this method.
*
* <p>For position-dependent actions, the input stream must already be
* positioned correctly prior to calling this method.</p>
*
* @param lexer The lexer instance.
*/

View File

@ -43,10 +43,10 @@ import java.util.Arrays;
/**
* Represents an executor for a sequence of lexer actions which traversed during
* the matching operation of a lexer rule (token).
* <p/>
* The executor tracks position information for position-dependent lexer actions
*
* <p>The executor tracks position information for position-dependent lexer actions
* efficiently, ensuring that actions appearing only at the end of the rule do
* not cause bloating of the {@link DFA} created for the lexer.
* not cause bloating of the {@link DFA} created for the lexer.</p>
*
* @author Sam Harwell
* @since 4.2
@ -104,25 +104,25 @@ public class LexerActionExecutor {
/**
* Creates a {@link LexerActionExecutor} which encodes the current offset
* for position-dependent lexer actions.
* <p/>
* Normally, when the executor encounters lexer actions where
*
* <p>Normally, when the executor encounters lexer actions where
* {@link LexerAction#isPositionDependent} returns {@code true}, it calls
* {@link IntStream#seek} on the input {@link CharStream} to set the input
* position to the <em>end</em> of the current token. This behavior provides
* for efficient DFA representation of lexer actions which appear at the end
* of a lexer rule, even when the lexer rule matches a variable number of
* characters.
* <p/>
* Prior to traversing a match transition in the ATN, the current offset
* characters.</p>
*
* <p>Prior to traversing a match transition in the ATN, the current offset
* from the token start index is assigned to all position-dependent lexer
* actions which have not already been assigned a fixed offset. By storing
* the offsets relative to the token start index, the DFA representation of
* lexer actions which appear in the middle of tokens remains efficient due
* to sharing among tokens of the same length, regardless of their absolute
* position in the input stream.
* <p/>
* If the current executor already has offsets assigned to all
* position-dependent lexer actions, the method returns {@code this}.
* position in the input stream.</p>
*
* <p>If the current executor already has offsets assigned to all
* position-dependent lexer actions, the method returns {@code this}.</p>
*
* @param offset The current offset to assign to all position-dependent
* lexer actions which do not already have offsets assigned.
@ -161,12 +161,12 @@ public class LexerActionExecutor {
/**
* Execute the actions encapsulated by this executor within the context of a
* particular {@link Lexer}.
* <p/>
* This method calls {@link IntStream#seek} to set the position of the
*
* <p>This method calls {@link IntStream#seek} to set the position of the
* {@code input} {@link CharStream} prior to calling
* {@link LexerAction#execute} on a position-dependent action. Before the
* method returns, the input position will be restored to the same position
* it was in when the method was invoked.
* it was in when the method was invoked.</p>
*
* @param lexer The lexer instance.
* @param input The input stream which is the source for the current token.

View File

@ -82,9 +82,9 @@ public final class LexerChannelAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* This action is implemented by calling {@link Lexer#setChannel} with the
* value provided by {@link #getChannel}.
*
* <p>This action is implemented by calling {@link Lexer#setChannel} with the
* value provided by {@link #getChannel}.</p>
*/
@Override
public void execute(@NotNull Lexer lexer) {

View File

@ -41,10 +41,10 @@ import org.antlr.v4.runtime.misc.NotNull;
* rule and action indexes assigned to the custom action. The implementation of
* a custom action is added to the generated code for the lexer in an override
* of {@link Recognizer#action} when the grammar is compiled.
* <p/>
* This class may represent embedded actions created with the <code>{...}</code>
*
* <p>This class may represent embedded actions created with the <code>{...}</code>
* syntax in ANTLR 4, as well as actions created for lexer commands where the
* command argument could not be evaluated when the grammar was compiled.
* command argument could not be evaluated when the grammar was compiled.</p>
*
* @author Sam Harwell
* @since 4.2
@ -99,10 +99,10 @@ public final class LexerCustomAction implements LexerAction {
* Gets whether the lexer action is position-dependent. Position-dependent
* actions may have different semantics depending on the {@link CharStream}
* index at the time the action is executed.
* <p/>
* Custom actions are position-dependent since they may represent a
*
* <p>Custom actions are position-dependent since they may represent a
* user-defined embedded action which makes calls to methods like
* {@link Lexer#getText}.
* {@link Lexer#getText}.</p>
*
* @return This method returns {@code true}.
*/
@ -113,9 +113,9 @@ public final class LexerCustomAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* Custom actions are implemented by calling {@link Lexer#action} with the
* appropriate rule and action indexes.
*
* <p>Custom actions are implemented by calling {@link Lexer#action} with the
* appropriate rule and action indexes.</p>
*/
@Override
public void execute(@NotNull Lexer lexer) {

View File

@ -38,12 +38,12 @@ import org.antlr.v4.runtime.misc.NotNull;
/**
* This implementation of {@link LexerAction} is used for tracking input offsets
* for position-dependent actions within a {@link LexerActionExecutor}.
* <p/>
* This action is not serialized as part of the ATN, and is only required for
*
* <p>This action is not serialized as part of the ATN, and is only required for
* position-dependent lexer actions which appear at a location other than the
* end of a rule. For more information about DFA optimizations employed for
* lexer actions, see {@link LexerActionExecutor#append} and
* {@link LexerActionExecutor#fixOffsetBeforeMatch}.
* {@link LexerActionExecutor#fixOffsetBeforeMatch}.</p>
*
* @author Sam Harwell
* @since 4.2
@ -55,9 +55,9 @@ public final class LexerIndexedCustomAction implements LexerAction {
/**
* Constructs a new indexed custom action by associating a character offset
* with a {@link LexerAction}.
* <p/>
* Note: This class is only required for lexer actions for which
* {@link LexerAction#isPositionDependent} returns {@code true}.
*
* <p>Note: This class is only required for lexer actions for which
* {@link LexerAction#isPositionDependent} returns {@code true}.</p>
*
* @param offset The offset into the input {@link CharStream}, relative to
* the token start index, at which the specified lexer action should be
@ -114,9 +114,9 @@ public final class LexerIndexedCustomAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* This method calls {@link #execute} on the result of {@link #getAction}
* using the provided {@code lexer}.
*
* <p>This method calls {@link #execute} on the result of {@link #getAction}
* using the provided {@code lexer}.</p>
*/
@Override
public void execute(Lexer lexer) {

View File

@ -81,9 +81,9 @@ public final class LexerModeAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* This action is implemented by calling {@link Lexer#mode} with the
* value provided by {@link #getMode}.
*
* <p>This action is implemented by calling {@link Lexer#mode} with the
* value provided by {@link #getMode}.</p>
*/
@Override
public void execute(@NotNull Lexer lexer) {

View File

@ -36,9 +36,9 @@ import org.antlr.v4.runtime.misc.NotNull;
/**
* Implements the {@code more} lexer action by calling {@link Lexer#more}.
* <p/>
* The {@code more} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.
*
* <p>The {@code more} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
*
* @author Sam Harwell
* @since 4.2
@ -75,8 +75,8 @@ public final class LexerMoreAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* This action is implemented by calling {@link Lexer#more}.
*
* <p>This action is implemented by calling {@link Lexer#more}.</p>
*/
@Override
public void execute(@NotNull Lexer lexer) {

View File

@ -36,9 +36,9 @@ import org.antlr.v4.runtime.misc.NotNull;
/**
* Implements the {@code popMode} lexer action by calling {@link Lexer#popMode}.
* <p/>
* The {@code popMode} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.
*
* <p>The {@code popMode} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
*
* @author Sam Harwell
* @since 4.2
@ -75,8 +75,8 @@ public final class LexerPopModeAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* This action is implemented by calling {@link Lexer#popMode}.
*
* <p>This action is implemented by calling {@link Lexer#popMode}.</p>
*/
@Override
public void execute(@NotNull Lexer lexer) {

View File

@ -81,9 +81,9 @@ public final class LexerPushModeAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* This action is implemented by calling {@link Lexer#pushMode} with the
* value provided by {@link #getMode}.
*
* <p>This action is implemented by calling {@link Lexer#pushMode} with the
* value provided by {@link #getMode}.</p>
*/
@Override
public void execute(@NotNull Lexer lexer) {

View File

@ -36,9 +36,9 @@ import org.antlr.v4.runtime.misc.NotNull;
/**
* Implements the {@code skip} lexer action by calling {@link Lexer#skip}.
* <p/>
* The {@code skip} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.
*
* <p>The {@code skip} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
*
* @author Sam Harwell
* @since 4.2
@ -75,8 +75,8 @@ public final class LexerSkipAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* This action is implemented by calling {@link Lexer#skip}.
*
* <p>This action is implemented by calling {@link Lexer#skip}.</p>
*/
@Override
public void execute(@NotNull Lexer lexer) {

View File

@ -80,9 +80,9 @@ public class LexerTypeAction implements LexerAction {
/**
* {@inheritDoc}
* <p/>
* This action is implemented by calling {@link Lexer#setType} with the
* value provided by {@link #getType}.
*
* <p>This action is implemented by calling {@link Lexer#setType} with the
* value provided by {@link #getType}.</p>
*/
@Override
public void execute(@NotNull Lexer lexer) {

View File

@ -228,7 +228,7 @@ import java.util.Set;
must yield a set of ambiguous alternatives that is no larger
than the SLL set. If the LL set is a singleton, then the grammar
is LL but not SLL. If the LL set is the same size as the SLL
set, the decision is SLL. If the LL set has size > 1, then that
set, the decision is SLL. If the LL set has size &gt; 1, then that
decision is truly ambiguous on the current input. If the LL set
is smaller, then the SLL conflict resolution might choose an
alternative that the full LL would rule out as a possibility
@ -271,8 +271,8 @@ public class ParserATNSimulator extends ATNSimulator {
* Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
* isn't synchronized but we're ok since two threads shouldn't reuse same
* parser/atnsim object because it can only handle one input at a time.
* This maps graphs a and b to merged result c. (a,b)->c. We can avoid
* the merge if we ever see a and b again. Note that (b,a)->c should
* This maps graphs a and b to merged result c. (a,b)&rarr;c. We can avoid
* the merge if we ever see a and b again. Note that (b,a)&rarr;c should
* also be examined during cache lookup.
*/
protected DoubleKeyMap<PredictionContext,PredictionContext,PredictionContext> mergeCache;
@ -885,11 +885,11 @@ public class ParserATNSimulator extends ATNSimulator {
* {@code configs} which are in a {@link RuleStopState}. If all
* configurations in {@code configs} are already in a rule stop state, this
* method simply returns {@code configs}.
* <p/>
* When {@code lookToEndOfRule} is true, this method uses
*
* <p>When {@code lookToEndOfRule} is true, this method uses
* {@link ATN#nextTokens} for each configuration in {@code configs} which is
* not already in a rule stop state to see if a rule stop state is reachable
* from the configuration via epsilon-only transitions.
* from the configuration via epsilon-only transitions.</p>
*
* @param configs the configuration set to update
* @param lookToEndOfRule when true, this method checks for rule stop states
@ -1451,7 +1451,7 @@ public class ParserATNSimulator extends ATNSimulator {
we don't consider any conflicts that include alternative 2. So, we
ignore the conflict between alts 1 and 2. We ignore a set of
conflicting alts when there is an intersection with an alternative
associated with a single alt state in the state->config-list map.
associated with a single alt state in the state&rarr;config-list map.
It's also the case that we might have two conflicting configurations but
also a 3rd nonconflicting configuration for a different alternative:
@ -1558,10 +1558,10 @@ public class ParserATNSimulator extends ATNSimulator {
* DFA. If {@code from} is {@code null}, or if {@code t} is outside the
* range of edges that can be represented in the DFA tables, this method
* returns without adding the edge to the DFA.
* <p/>
* If {@code to} is {@code null}, this method returns {@code null}.
*
* <p>If {@code to} is {@code null}, this method returns {@code null}.
* Otherwise, this method returns the {@link DFAState} returned by calling
* {@link #addDFAState} for the {@code to} state.
* {@link #addDFAState} for the {@code to} state.</p>
*
* @param dfa The DFA
* @param from The source state for the edge
@ -1610,9 +1610,9 @@ public class ParserATNSimulator extends ATNSimulator {
* the actual instance stored in the DFA. If a state equivalent to {@code D}
* is already in the DFA, the existing state is returned. Otherwise this
* method returns {@code D} after adding it to the DFA.
* <p/>
* If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and
* does not change the DFA.
*
* <p>If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and
* does not change the DFA.</p>
*
* @param dfa The dfa
* @param D The DFA state to add

View File

@ -73,11 +73,11 @@ public abstract class PredictionContext {
* private int referenceHashCode() {
* int hash = {@link MurmurHash#initialize}({@link #INITIAL_HASH});
*
* for (int i = 0; i < {@link #size()}; i++) {
* for (int i = 0; i &lt; {@link #size()}; i++) {
* hash = {@link MurmurHash#update}(hash, {@link #getParent}(i));
* }
*
* for (int i = 0; i < {@link #size()}; i++) {
* for (int i = 0; i &lt; {@link #size()}; i++) {
* hash = {@link MurmurHash#update}(hash, {@link #getReturnState}(i));
* }
*
@ -203,31 +203,23 @@ public abstract class PredictionContext {
/**
* Merge two {@link SingletonPredictionContext} instances.
*
* <p/>
* <p>Stack tops equal, parents merge is same; return left graph.<br>
* <embed src="images/SingletonMerge_SameRootSamePar.svg" type="image/svg+xml"/></p>
*
* Stack tops equal, parents merge is same; return left graph.<br/>
* <embed src="images/SingletonMerge_SameRootSamePar.svg" type="image/svg+xml"/>
*
* <p/>
*
* Same stack top, parents differ; merge parents giving array node, then
* <p>Same stack top, parents differ; merge parents giving array node, then
* remainders of those graphs. A new root node is created to point to the
* merged parents.<br/>
* <embed src="images/SingletonMerge_SameRootDiffPar.svg" type="image/svg+xml"/>
* merged parents.<br>
* <embed src="images/SingletonMerge_SameRootDiffPar.svg" type="image/svg+xml"/></p>
*
* <p/>
*
* Different stack tops pointing to same parent. Make array node for the
* <p>Different stack tops pointing to same parent. Make array node for the
* root where both element in the root point to the same (original)
* parent.<br/>
* <embed src="images/SingletonMerge_DiffRootSamePar.svg" type="image/svg+xml"/>
* parent.<br>
* <embed src="images/SingletonMerge_DiffRootSamePar.svg" type="image/svg+xml"/></p>
*
* <p/>
*
* Different stack tops pointing to different parents. Make array node for
* <p>Different stack tops pointing to different parents. Make array node for
* the root where each element points to the corresponding original
* parent.<br/>
* <embed src="images/SingletonMerge_DiffRootDiffPar.svg" type="image/svg+xml"/>
* parent.<br>
* <embed src="images/SingletonMerge_DiffRootDiffPar.svg" type="image/svg+xml"/></p>
*
* @param a the first {@link SingletonPredictionContext}
* @param b the second {@link SingletonPredictionContext}
@ -308,43 +300,31 @@ public abstract class PredictionContext {
*
* <h2>Local-Context Merges</h2>
*
* These local-context merge operations are used when {@code rootIsWildcard}
* is true.
* <p>These local-context merge operations are used when {@code rootIsWildcard}
* is true.</p>
*
* <p/>
* <p>{@link #EMPTY} is superset of any graph; return {@link #EMPTY}.<br>
* <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
*
* {@link #EMPTY} is superset of any graph; return {@link #EMPTY}.<br/>
* <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/>
* <p>{@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is
* {@code #EMPTY}; return left graph.<br>
* <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
*
* <p/>
*
* {@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is
* {@code #EMPTY}; return left graph.<br/>
* <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/>
*
* <p/>
*
* Special case of last merge if local context.<br/>
* <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/>
* <p>Special case of last merge if local context.<br>
* <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
*
* <h2>Full-Context Merges</h2>
*
* These full-context merge operations are used when {@code rootIsWildcard}
* is false.
* <p>These full-context merge operations are used when {@code rootIsWildcard}
* is false.</p>
*
* <p/>
* <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
*
* <embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/>
* <p>Must keep all contexts; {@link #EMPTY} in array is a special value (and
* null parent).<br>
* <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
*
* <p/>
*
* Must keep all contexts; {@link #EMPTY} in array is a special value (and
* null parent).<br/>
* <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/>
*
* <p/>
*
* <embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/>
* <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
*
* @param a the first {@link SingletonPredictionContext}
* @param b the second {@link SingletonPredictionContext}
@ -382,31 +362,21 @@ public abstract class PredictionContext {
/**
* Merge two {@link ArrayPredictionContext} instances.
*
* <p/>
* <p>Different tops, different parents.<br>
* <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
*
* Different tops, different parents.<br/>
* <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/>
* <p>Shared top, same parents.<br>
* <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
*
* <p/>
* <p>Shared top, different parents.<br>
* <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
*
* Shared top, same parents.<br/>
* <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/>
* <p>Shared top, all shared parents.<br>
* <embed src="images/ArrayMerge_ShareTopSharePar.svg" type="image/svg+xml"/></p>
*
* <p/>
*
* Shared top, different parents.<br/>
* <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/>
*
* <p/>
*
* Shared top, all shared parents.<br/>
* <embed src="images/ArrayMerge_ShareTopSharePar.svg" type="image/svg+xml"/>
*
* <p/>
*
* Equal tops, merge parents and reduce top to
* {@link SingletonPredictionContext}.<br/>
* <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/>
* <p>Equal tops, merge parents and reduce top to
* {@link SingletonPredictionContext}.<br>
* <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
*/
public static PredictionContext mergeArrays(
ArrayPredictionContext a,

View File

@ -99,135 +99,93 @@ public enum PredictionMode {
/**
* Computes the SLL prediction termination condition.
*
* <p/>
*
* This method computes the SLL prediction termination condition for both of
* the following cases.
* <p>This method computes the SLL prediction termination condition for both of
* the following cases.</p>
*
* <ul>
* <li>The usual SLL+LL fallback upon SLL conflict</li>
* <li>Pure SLL without LL fallback</li>
* </ul>
*
* <p/>
* <p><strong>COMBINED SLL+LL PARSING</strong></p>
*
* <strong>COMBINED SLL+LL PARSING</strong>
*
* <p/>
*
* When LL-fallback is enabled upon SLL conflict, correct predictions are
* <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
* ensured regardless of how the termination condition is computed by this
* method. Due to the substantially higher cost of LL prediction, the
* prediction should only fall back to LL when the additional lookahead
* cannot lead to a unique SLL prediction.
* cannot lead to a unique SLL prediction.</p>
*
* <p/>
*
* Assuming combined SLL+LL parsing, an SLL configuration set with only
* <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
* conflicting subsets should fall back to full LL, even if the
* configuration sets don't resolve to the same alternative (e.g.
* {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
* configuration, SLL could continue with the hopes that more lookahead will
* resolve via one of those non-conflicting configurations.
* resolve via one of those non-conflicting configurations.</p>
*
* <p/>
*
* Here's the prediction termination rule them: SLL (for SLL+LL parsing)
* <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
* stops when it sees only conflicting configuration subsets. In contrast,
* full LL keeps going when there is uncertainty.
* full LL keeps going when there is uncertainty.</p>
*
* <p/>
* <p><strong>HEURISTIC</strong></p>
*
* <strong>HEURISTIC</strong>
*
* <p/>
*
* As a heuristic, we stop prediction when we see any conflicting subset
* <p>As a heuristic, we stop prediction when we see any conflicting subset
* unless we see a state that only has one alternative associated with it.
* The single-alt-state thing lets prediction continue upon rules like
* (otherwise, it would admit defeat too soon):
* (otherwise, it would admit defeat too soon):</p>
*
* <p/>
* <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}</p>
*
* {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}
*
* <p/>
*
* When the ATN simulation reaches the state before {@code ';'}, it has a
* <p>When the ATN simulation reaches the state before {@code ';'}, it has a
* DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
* {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
* processing this node because alternative to has another way to continue,
* via {@code [6|2|[]]}.
* via {@code [6|2|[]]}.</p>
*
* <p/>
* <p>It also let's us continue for this rule:</p>
*
* It also let's us continue for this rule:
* <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}</p>
*
* <p/>
*
* {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}
*
* <p/>
*
* After matching input A, we reach the stop state for rule A, state 1.
* <p>After matching input A, we reach the stop state for rule A, state 1.
* State 8 is the state right before B. Clearly alternatives 1 and 2
* conflict and no amount of further lookahead will separate the two.
* However, alternative 3 will be able to continue and so we do not stop
* working on this state. In the previous example, we're concerned with
* states associated with the conflicting alternatives. Here alt 3 is not
* associated with the conflicting configs, but since we can continue
* looking for input reasonably, don't declare the state done.
* looking for input reasonably, don't declare the state done.</p>
*
* <p/>
* <p><strong>PURE SLL PARSING</strong></p>
*
* <strong>PURE SLL PARSING</strong>
*
* <p/>
*
* To handle pure SLL parsing, all we have to do is make sure that we
* <p>To handle pure SLL parsing, all we have to do is make sure that we
* combine stack contexts for configurations that differ only by semantic
* predicate. From there, we can do the usual SLL termination heuristic.
* predicate. From there, we can do the usual SLL termination heuristic.</p>
*
* <p/>
* <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
*
* <strong>PREDICATES IN SLL+LL PARSING</strong>
*
* <p/>
*
* SLL decisions don't evaluate predicates until after they reach DFA stop
* <p>SLL decisions don't evaluate predicates until after they reach DFA stop
* states because they need to create the DFA cache that works in all
* semantic situations. In contrast, full LL evaluates predicates collected
* during start state computation so it can ignore predicates thereafter.
* This means that SLL termination detection can totally ignore semantic
* predicates.
* predicates.</p>
*
* <p/>
*
* Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
* <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
* semantic predicate contexts so we might see two configurations like the
* following.
* following.</p>
*
* <p/>
* <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
*
* {@code (s, 1, x, {}), (s, 1, x', {p})}
*
* <p/>
*
* Before testing these configurations against others, we have to merge
* <p>Before testing these configurations against others, we have to merge
* {@code x} and {@code x'} (without modifying the existing configurations).
* For example, we test {@code (x+x')==x''} when looking for conflicts in
* the following configurations.
* the following configurations.</p>
*
* <p/>
* <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
*
* {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
*
* <p/>
*
* If the configuration set has predicates (as indicated by
* <p>If the configuration set has predicates (as indicated by
* {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of
* the configurations to strip out all of the predicates so that a standard
* {@link ATNConfigSet} will merge everything ignoring predicates.
* {@link ATNConfigSet} will merge everything ignoring predicates.</p>
*/
public static boolean hasSLLConflictTerminatingPrediction(PredictionMode mode, @NotNull ATNConfigSet configs) {
/* Configs in rule stop states indicate reaching the end of the decision
@ -307,86 +265,62 @@ public enum PredictionMode {
/**
* Full LL prediction termination.
*
* <p/>
*
* Can we stop looking ahead during ATN simulation or is there some
* <p>Can we stop looking ahead during ATN simulation or is there some
* uncertainty as to which alternative we will ultimately pick, after
* consuming more input? Even if there are partial conflicts, we might know
* that everything is going to resolve to the same minimum alternative. That
* means we can stop since no more lookahead will change that fact. On the
* other hand, there might be multiple conflicts that resolve to different
* minimums. That means we need more look ahead to decide which of those
* alternatives we should predict.
* alternatives we should predict.</p>
*
* <p/>
*
* The basic idea is to split the set of configurations {@code C}, into
* <p>The basic idea is to split the set of configurations {@code C}, into
* conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
* non-conflicting configurations. Two configurations conflict if they have
* identical {@link ATNConfig#state} and {@link ATNConfig#context} values
* but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)}
* and {@code (s, j, ctx, _)} for {@code i!=j}.
* and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
*
* <p/>
* <p>Reduce these configuration subsets to the set of possible alternatives.
* You can compute the alternative subsets in one pass as follows:</p>
*
* Reduce these configuration subsets to the set of possible alternatives.
* You can compute the alternative subsets in one pass as follows:
* <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
* {@code C} holding {@code s} and {@code ctx} fixed.</p>
*
* <p/>
*
* {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
* {@code C} holding {@code s} and {@code ctx} fixed.
*
* <p/>
*
* Or in pseudo-code, for each configuration {@code c} in {@code C}:
* <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
*
* <pre>
* map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
* alt and not pred
* </pre>
*
* <p/>
* <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
*
* The values in {@code map} are the set of {@code A_s,ctx} sets.
* <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
* {@code s} and {@code ctx}.</p>
*
* <p/>
*
* If {@code |A_s,ctx|=1} then there is no conflict associated with
* {@code s} and {@code ctx}.
*
* <p/>
*
* Reduce the subsets to singletons by choosing a minimum of each subset. If
* <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
* the union of these alternative subsets is a singleton, then no amount of
* more lookahead will help us. We will always pick that alternative. If,
* however, there is more than one alternative, then we are uncertain which
* alternative to predict and must continue looking for resolution. We may
* or may not discover an ambiguity in the future, even if there are no
* conflicting subsets this round.
* conflicting subsets this round.</p>
*
* <p/>
*
* The biggest sin is to terminate early because it means we've made a
* <p>The biggest sin is to terminate early because it means we've made a
* decision but were uncertain as to the eventual outcome. We haven't used
* enough lookahead. On the other hand, announcing a conflict too late is no
* big deal; you will still have the conflict. It's just inefficient. It
* might even look until the end of file.
* might even look until the end of file.</p>
*
* <p/>
*
* No special consideration for semantic predicates is required because
* <p>No special consideration for semantic predicates is required because
* predicates are evaluated on-the-fly for full LL prediction, ensuring that
* no configuration contains a semantic context during the termination
* check.
* check.</p>
*
* <p/>
* <p><strong>CONFLICTING CONFIGS</strong></p>
*
* <strong>CONFLICTING CONFIGS</strong>
*
* <p/>
*
* Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
* <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
* when {@code i!=j} but {@code x=x'}. Because we merge all
* {@code (s, i, _)} configurations together, that means that there are at
* most {@code n} configurations associated with state {@code s} for
@ -399,38 +333,28 @@ public enum PredictionMode {
* others resolve to {@code min(i)} as well. However, if {@code x} is
* associated with {@code j>i} then at least one stack configuration for
* {@code j} is not in conflict with alternative {@code i}. The algorithm
* should keep going, looking for more lookahead due to the uncertainty.
* should keep going, looking for more lookahead due to the uncertainty.</p>
*
* <p/>
*
* For simplicity, I'm doing a equality check between {@code x} and
* <p>For simplicity, I'm doing a equality check between {@code x} and
* {@code x'} that lets the algorithm continue to consume lookahead longer
* than necessary. The reason I like the equality is of course the
* simplicity but also because that is the test you need to detect the
* alternatives that are actually in conflict.
* alternatives that are actually in conflict.</p>
*
* <p/>
* <p><strong>CONTINUE/STOP RULE</strong></p>
*
* <strong>CONTINUE/STOP RULE</strong>
*
* <p/>
*
* Continue if union of resolved alternative sets from non-conflicting and
* <p>Continue if union of resolved alternative sets from non-conflicting and
* conflicting alternative subsets has more than one alternative. We are
* uncertain about which alternative to predict.
* uncertain about which alternative to predict.</p>
*
* <p/>
*
* The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
* <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
* alternatives are still in the running for the amount of input we've
* consumed at this point. The conflicting sets let us to strip away
* configurations that won't lead to more states because we resolve
* conflicts to the configuration with a minimum alternate for the
* conflicting set.
* conflicting set.</p>
*
* <p/>
*
* <strong>CASES</strong>
* <p><strong>CASES</strong></p>
*
* <ul>
*
@ -462,28 +386,22 @@ public enum PredictionMode {
*
* </ul>
*
* <strong>EXACT AMBIGUITY DETECTION</strong>
* <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
*
* <p/>
* <p>If all states report the same conflicting set of alternatives, then we
* know we have the exact ambiguity set.</p>
*
* If all states report the same conflicting set of alternatives, then we
* know we have the exact ambiguity set.
* <p><code>|A_<em>i</em>|&gt;1</code> and
* <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
*
* <p/>
*
* <code>|A_<em>i</em>|&gt;1</code> and
* <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.
*
* <p/>
*
* In other words, we continue examining lookahead until all {@code A_i}
* <p>In other words, we continue examining lookahead until all {@code A_i}
* have more than one alternative and all {@code A_i} are the same. If
* {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
* because the resolved set is {@code {1}}. To determine what the real
* ambiguity is, we have to know whether the ambiguity is between one and
* two or one and three so we keep going. We can only stop prediction when
* we need exact ambiguity detection when the sets look like
* {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
* {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
*/
public static int resolvesToJustOneViableAlt(@NotNull Collection<BitSet> altsets) {
return getSingleViableAlt(altsets);

View File

@ -48,9 +48,9 @@ import java.util.Set;
/** A tree structure used to record the semantic context in which
* an ATN configuration is valid. It's either a single predicate,
* a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
* <p/>
* I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
* {@link SemanticContext} within the scope of this outer class.
*
* <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
* {@link SemanticContext} within the scope of this outer class.</p>
*/
public abstract class SemanticContext {
public static final SemanticContext NONE = new Predicate();
@ -63,12 +63,12 @@ public abstract class SemanticContext {
* having to create proper rule-specific context during prediction (as
* opposed to the parser, which creates them naturally). In a practical
* sense, this avoids a cast exception from RuleContext to myruleContext.
* <p/>
* For context dependent predicates, we must pass in a local context so that
*
* <p>For context dependent predicates, we must pass in a local context so that
* references such as $arg evaluate properly as _localctx.arg. We only
* capture context dependent predicates in the context in which we begin
* prediction, so we passed in the outer context here in case of context
* dependent predicate evaluation.
* dependent predicate evaluation.</p>
*/
public abstract boolean eval(Recognizer<?,?> parser, RuleContext outerContext);

View File

@ -39,10 +39,10 @@ public final class StarLoopEntryState extends DecisionState {
/**
* Indicates whether this state can benefit from a precedence DFA during SLL
* decision making.
* <p/>
* This is a computed property that is calculated during ATN deserialization
*
* <p>This is a computed property that is calculated during ATN deserialization
* and stored for use in {@link ParserATNSimulator} and
* {@link ParserInterpreter}.
* {@link ParserInterpreter}.</p>
*
* @see DFA#isPrecedenceDfa()
*/

View File

@ -42,15 +42,15 @@ import java.util.Map;
/** An ATN transition between any two ATN states. Subclasses define
* atom, set, epsilon, action, predicate, rule transitions.
* <p/>
* This is a one way link. It emanates from a state (usually via a list of
* transitions) and has a target state.
* <p/>
* Since we never have to change the ATN transitions once we construct it,
*
* <p>This is a one way link. It emanates from a state (usually via a list of
* transitions) and has a target state.</p>
*
* <p>Since we never have to change the ATN transitions once we construct it,
* we can fix these transitions as specific classes. The DFA transitions
* on the other hand need to update the labels as it adds transitions to
* the states. We'll use the term Edge for the DFA to distinguish them from
* ATN transitions.
* ATN transitions.</p>
*/
public abstract class Transition {
// constants for serialization

View File

@ -62,7 +62,7 @@ public class DFA {
/**
* {@code true} if this DFA is for a precedence decision; otherwise,
* {@code false}. This is the backing field for {@link #isPrecedenceDfa},
* {@link #setPrecedenceDfa}, {@link #hasPrecedenceEdge}.
* {@link #setPrecedenceDfa}.
*/
private volatile boolean precedenceDfa;

View File

@ -52,7 +52,7 @@ import java.util.Set;
* input a1a2..an, the DFA is in a state that represents the
* subset T of the states of the ATN that are reachable from the
* ATN's start state along some path labeled a1a2..an."
* In conventional NFA->DFA conversion, therefore, the subset T
* In conventional NFA&rarr;DFA conversion, therefore, the subset T
* would be a bitset representing the set of states the
* ATN could be in. We need to track the alt predicted by each
* state as well, however. More importantly, we need to maintain
@ -60,14 +60,14 @@ import java.util.Set;
* jump from rule to rule, emulating rule invocations (method calls).
* I have to add a stack to simulate the proper lookahead sequences for
* the underlying LL grammar from which the ATN was derived.
* <p/>
* I use a set of ATNConfig objects not simple states. An ATNConfig
*
* <p>I use a set of ATNConfig objects not simple states. An ATNConfig
* is both a state (ala normal conversion) and a RuleContext describing
* the chain of rules (if any) followed to arrive at that state.
* <p/>
* A DFA state may have multiple references to a particular state,
* the chain of rules (if any) followed to arrive at that state.</p>
*
* <p>A DFA state may have multiple references to a particular state,
* but with different ATN contexts (with same or different alts)
* meaning that state was reached via a different set of rule invocations.
* meaning that state was reached via a different set of rule invocations.</p>
*/
public class DFAState {
public int stateNumber = -1;
@ -104,12 +104,12 @@ public class DFAState {
* {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates
* on-the-fly. If this is not null, then {@link #prediction} is
* {@link ATN#INVALID_ALT_NUMBER}.
* <p/>
* We only use these for non-{@link #requiresFullContext} but conflicting states. That
*
* <p>We only use these for non-{@link #requiresFullContext} but conflicting states. That
* means we know from the context (it's $ or we don't dip into outer
* context) that it's an ambiguity not a conflict.
* <p/>
* This list is computed by {@link ParserATNSimulator#predicateDFAState}.
* context) that it's an ambiguity not a conflict.</p>
*
* <p>This list is computed by {@link ParserATNSimulator#predicateDFAState}.</p>
*/
@Nullable
public PredPrediction[] predicates;
@ -160,15 +160,15 @@ public class DFAState {
/**
* Two {@link DFAState} instances are equal if their ATN configuration sets
* are the same. This method is used to see if a state already exists.
* <p/>
* Because the number of alternatives and number of ATN configurations are
*
* <p>Because the number of alternatives and number of ATN configurations are
* finite, there is a finite number of DFA states that can be processed.
* This is necessary to show that the algorithm terminates.
* <p/>
* Cannot test the DFA state numbers here because in
* This is necessary to show that the algorithm terminates.</p>
*
* <p>Cannot test the DFA state numbers here because in
* {@link ParserATNSimulator#addDFAState} we need to know if any other state
* exists that has this exact set of ATN configurations. The
* {@link #stateNumber} is irrelevant.
* {@link #stateNumber} is irrelevant.</p>
*/
@Override
public boolean equals(Object o) {

View File

@ -234,10 +234,10 @@ public class IntegerList {
/**
* Returns the hash code value for this list.
* <p/>
* This implementation uses exactly the code that is used to define the
*
* <p>This implementation uses exactly the code that is used to define the
* list hash function in the documentation for the {@link List#hashCode}
* method.
* method.</p>
*
* @return the hash code value for this list
*/

View File

@ -65,7 +65,7 @@ public class Interval {
}
/** return number of elements between a and b inclusively. x..x is length 1.
* if b < a, then length is 0. 9..10 has length 2.
* if b &lt; a, then length is 0. 9..10 has length 2.
*/
public int length() {
if ( b<a ) return 0;

View File

@ -111,7 +111,7 @@ public class IntervalSet implements IntSet {
}
/** Add interval; i.e., add all integers from a to b to set.
* If b<a, do nothing.
* If b&lt;a, do nothing.
* Keep list in sorted order (by left range value).
* If overlap, combine ranges. For example,
* If this is {1..5, 10..20}, adding 6..7 yields
@ -246,7 +246,7 @@ public class IntervalSet implements IntSet {
return compl;
}
/** Compute this-other via this&~other.
/** Compute this-other via this&amp;~other.
* Return a new set containing all elements in this but not in other.
* other is assumed to be a subset of this;
* anything that is in other but not in this will be ignored.
@ -400,7 +400,7 @@ public class IntervalSet implements IntSet {
return last.b;
}
/** Return minimum element >= 0 */
/** Return minimum element &gt;= 0 */
public int getMinElement() {
if ( isNil() ) {
return Token.INVALID_TYPE;

View File

@ -40,9 +40,9 @@ public final class ObjectEqualityComparator extends AbstractEqualityComparator<O
/**
* {@inheritDoc}
* <p/>
* This implementation returns
* {@code obj.}{@link Object#hashCode hashCode()}.
*
* <p>This implementation returns
* {@code obj.}{@link Object#hashCode hashCode()}.</p>
*/
@Override
public int hashCode(Object obj) {
@ -55,12 +55,12 @@ public final class ObjectEqualityComparator extends AbstractEqualityComparator<O
/**
* {@inheritDoc}
* <p/>
* This implementation relies on object equality. If both objects are
*
* <p>This implementation relies on object equality. If both objects are
* {@code null}, this method returns {@code true}. Otherwise if only
* {@code a} is {@code null}, this method returns {@code false}. Otherwise,
* this method returns the result of
* {@code a.}{@link Object#equals equals}{@code (b)}.
* {@code a.}{@link Object#equals equals}{@code (b)}.</p>
*/
@Override
public boolean equals(Object a, Object b) {

View File

@ -136,8 +136,8 @@ public class Utils {
t.join();
}
/** Convert array of strings to string->index map. Useful for
* converting rulenames to name->ruleindex map.
/** Convert array of strings to string&rarr;index map. Useful for
* converting rulenames to name&rarr;ruleindex map.
*/
public static Map<String, Integer> toMap(String[] keys) {
Map<String, Integer> m = new HashMap<String, Integer>();

View File

@ -35,9 +35,9 @@ import org.antlr.v4.runtime.misc.NotNull;
public abstract class AbstractParseTreeVisitor<T> implements ParseTreeVisitor<T> {
/**
* {@inheritDoc}
* <p/>
* The default implementation calls {@link ParseTree#accept} on the
* specified tree.
*
* <p>The default implementation calls {@link ParseTree#accept} on the
* specified tree.</p>
*/
@Override
public T visit(@NotNull ParseTree tree) {
@ -46,18 +46,18 @@ public abstract class AbstractParseTreeVisitor<T> implements ParseTreeVisitor<T>
/**
* {@inheritDoc}
* <p/>
* The default implementation initializes the aggregate result to
*
* <p>The default implementation initializes the aggregate result to
* {@link #defaultResult defaultResult()}. Before visiting each child, it
* calls {@link #shouldVisitNextChild shouldVisitNextChild}; if the result
* is {@code false} no more children are visited and the current aggregate
* result is returned. After visiting a child, the aggregate result is
* updated by calling {@link #aggregateResult aggregateResult} with the
* previous aggregate result and the result of visiting the child.
* <p/>
* The default implementation is not safe for use in visitors that modify
* previous aggregate result and the result of visiting the child.</p>
*
* <p>The default implementation is not safe for use in visitors that modify
* the tree structure. Visitors that modify the tree should override this
* method to behave properly in respect to the specific algorithm in use.
* method to behave properly in respect to the specific algorithm in use.</p>
*/
@Override
public T visitChildren(@NotNull RuleNode node) {
@ -78,9 +78,9 @@ public abstract class AbstractParseTreeVisitor<T> implements ParseTreeVisitor<T>
/**
* {@inheritDoc}
* <p/>
* The default implementation returns the result of
* {@link #defaultResult defaultResult}.
*
* <p>The default implementation returns the result of
* {@link #defaultResult defaultResult}.</p>
*/
@Override
public T visitTerminal(@NotNull TerminalNode node) {
@ -89,9 +89,9 @@ public abstract class AbstractParseTreeVisitor<T> implements ParseTreeVisitor<T>
/**
* {@inheritDoc}
* <p/>
* The default implementation returns the result of
* {@link #defaultResult defaultResult}.
*
* <p>The default implementation returns the result of
* {@link #defaultResult defaultResult}.</p>
*/
@Override
public T visitErrorNode(@NotNull ErrorNode node) {
@ -104,8 +104,8 @@ public abstract class AbstractParseTreeVisitor<T> implements ParseTreeVisitor<T>
* {@link #visitTerminal visitTerminal}, {@link #visitErrorNode visitErrorNode}.
* The default implementation of {@link #visitChildren visitChildren}
* initializes its aggregate result to this value.
* <p/>
* The base implementation returns {@code null}.
*
* <p>The base implementation returns {@code null}.</p>
*
* @return The default value returned by visitor methods.
*/
@ -118,10 +118,10 @@ public abstract class AbstractParseTreeVisitor<T> implements ParseTreeVisitor<T>
* either all children are visited or {@link #shouldVisitNextChild} returns
* {@code false}, the aggregate value is returned as the result of
* {@link #visitChildren}.
* <p/>
* The default implementation returns {@code nextResult}, meaning
*
* <p>The default implementation returns {@code nextResult}, meaning
* {@link #visitChildren} will return the result of the last child visited
* (or return the initial value if the node has no children).
* (or return the initial value if the node has no children).</p>
*
* @param aggregate The previous aggregate value. In the default
* implementation, the aggregate value is initialized to
@ -143,13 +143,13 @@ public abstract class AbstractParseTreeVisitor<T> implements ParseTreeVisitor<T>
* value (in the default implementation, the initial value is returned by a
* call to {@link #defaultResult}. This method is not called after the last
* child is visited.
* <p/>
* The default implementation always returns {@code true}, indicating that
*
* <p>The default implementation always returns {@code true}, indicating that
* {@code visitChildren} should only return after all children are visited.
* One reason to override this method is to provide a "short circuit"
* evaluation option for situations where the result of visiting a single
* child has the potential to determine the result of the visit operation as
* a whole.
* a whole.</p>
*
* @param node The {@link RuleNode} whose children are currently being
* visited.

View File

@ -38,8 +38,8 @@ import org.antlr.v4.runtime.Token;
* during a parse that makes the data structure look like a simple parse tree.
* This node represents both internal nodes, rule invocations,
* and leaf nodes, token matches.
* <p/>
* The payload is either a {@link Token} or a {@link RuleContext} object.
*
* <p>The payload is either a {@link Token} or a {@link RuleContext} object.</p>
*/
public interface ParseTree extends SyntaxTree {
// the following methods narrow the return type; they are not additional methods

View File

@ -44,8 +44,8 @@ public interface SyntaxTree extends Tree {
* {@link TokenStream} of the first and last token associated with this
* subtree. If this node is a leaf, then the interval represents a single
* token.
* <p/>
* If source interval is unknown, this returns {@link Interval#INVALID}.
*
* <p>If source interval is unknown, this returns {@link Interval#INVALID}.</p>
*/
@NotNull
Interval getSourceInterval();

View File

@ -39,7 +39,7 @@ package org.antlr.v4.runtime.tree.gui;
* for trees. Commands:
*
* <pre>
* $ ttf2tfm /Library/Fonts/Arial\ Black.ttf > metrics
* $ ttf2tfm /Library/Fonts/Arial\ Black.ttf &gt; metrics
* </pre>
*
* Then run metrics into python code after stripping header/footer:
@ -57,11 +57,11 @@ package org.antlr.v4.runtime.tree.gui;
* maxh = 0;
* for line in lines[4:]: # skip header 0..3
* all = line.split(' ')
* words = [x for x in all if len(x)>0]
* words = [x for x in all if len(x)&gt;0]
* ascii = int(words[1], 16)
* height = int(words[8])
* if height>maxh: maxh = height
* if ascii>=128: break
* if height&gt;maxh: maxh = height
* if ascii&gt;=128: break
* print " widths[%d] = %s; // %s" % (ascii, words[3], words[2])
*
* print " maxCharHeight = "+str(maxh)+";"

View File

@ -33,13 +33,13 @@ package org.antlr.v4.runtime.tree.pattern;
/**
* A chunk is either a token tag, a rule tag, or a span of literal text within a
* tree pattern.
* <p/>
* The method {@link ParseTreePatternMatcher#split(String)} returns a list of
*
* <p>The method {@link ParseTreePatternMatcher#split(String)} returns a list of
* chunks in preparation for creating a token stream by
* {@link ParseTreePatternMatcher#tokenize(String)}. From there, we get a parse
* tree from with {@link ParseTreePatternMatcher#compile(String, int)}. These
* chunks are converted to {@link RuleTagToken}, {@link TokenTagToken}, or the
* regular tokens of the text surrounding the tags.
* regular tokens of the text surrounding the tags.</p>
*/
abstract class Chunk {
}

View File

@ -98,14 +98,14 @@ public class ParseTreeMatch {
/**
* Get the last node associated with a specific {@code label}.
* <p/>
* For example, for pattern {@code <id:ID>}, {@code get("id")} returns the
*
* <p>For example, for pattern {@code <id:ID>}, {@code get("id")} returns the
* node matched for that {@code ID}. If more than one node
* matched the specified label, only the last is returned. If there is
* no node associated with the label, this returns {@code null}.
* <p/>
* Pattern tags like {@code <ID>} and {@code <expr>} without labels are
* considered to be labeled with {@code ID} and {@code expr}, respectively.
* no node associated with the label, this returns {@code null}.</p>
*
* <p>Pattern tags like {@code <ID>} and {@code <expr>} without labels are
* considered to be labeled with {@code ID} and {@code expr}, respectively.</p>
*
* @param label The label to check.
*
@ -124,13 +124,13 @@ public class ParseTreeMatch {
/**
* Return all nodes matching a rule or token tag with the specified label.
* <p/>
* If the {@code label} is the name of a parser rule or token in the
*
* <p>If the {@code label} is the name of a parser rule or token in the
* grammar, the resulting list will contain both the parse trees matching
* rule or tags explicitly labeled with the label and the complete set of
* parse trees matching the labeled and unlabeled tags in the pattern for
* the parser rule or token. For example, if {@code label} is {@code "foo"},
* the result will contain <em>all</em> of the following.
* the result will contain <em>all</em> of the following.</p>
*
* <ul>
* <li>Parse tree nodes matching tags of the form {@code <foo:anyRuleName>} and
@ -157,10 +157,10 @@ public class ParseTreeMatch {
/**
* Return a mapping from label &rarr; [list of nodes].
* <p/>
* The map includes special entries corresponding to the names of rules and
*
* <p>The map includes special entries corresponding to the names of rules and
* tokens referenced in tags in the original pattern. For additional
* information, see the description of {@link #getAll(String)}.
* information, see the description of {@link #getAll(String)}.</p>
*
* @return A mapping from labels to parse tree nodes. If the parse tree
* pattern did not contain any rule or token tags, this map will be empty.

View File

@ -53,60 +53,60 @@ import java.util.List;
/**
* A tree pattern matching mechanism for ANTLR {@link ParseTree}s.
* <p/>
* Patterns are strings of source input text with special tags representing
* token or rule references such as:
* <p/>
* {@code <ID> = <expr>;}
* <p/>
* Given a pattern start rule such as {@code statement}, this object constructs
*
* <p>Patterns are strings of source input text with special tags representing
* token or rule references such as:</p>
*
* <p>{@code <ID> = <expr>;}</p>
*
* <p>Given a pattern start rule such as {@code statement}, this object constructs
* a {@link ParseTree} with placeholders for the {@code ID} and {@code expr}
* subtree. Then the {@link #match} routines can compare an actual
* {@link ParseTree} from a parse with this pattern. Tag {@code <ID>} matches
* any {@code ID} token and tag {@code <expr>} references the result of the
* {@code expr} rule (generally an instance of {@code ExprContext}.
* <p/>
* Pattern {@code x = 0;} is a similar pattern that matches the same pattern
* {@code expr} rule (generally an instance of {@code ExprContext}.</p>
*
* <p>Pattern {@code x = 0;} is a similar pattern that matches the same pattern
* except that it requires the identifier to be {@code x} and the expression to
* be {@code 0}.
* <p/>
* The {@link #matches} routines return {@code true} or {@code false} based
* be {@code 0}.</p>
*
* <p>The {@link #matches} routines return {@code true} or {@code false} based
* upon a match for the tree rooted at the parameter sent in. The
* {@link #match} routines return a {@link ParseTreeMatch} object that
* contains the parse tree, the parse tree pattern, and a map from tag name to
* matched nodes (more below). A subtree that fails to match, returns with
* {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not
* match.
* <p/>
* For efficiency, you can compile a tree pattern in string form to a
* {@link ParseTreePattern} object.
* <p/>
* See {@code TestParseTreeMatcher} for lots of examples.
* match.</p>
*
* <p>For efficiency, you can compile a tree pattern in string form to a
* {@link ParseTreePattern} object.</p>
*
* <p>See {@code TestParseTreeMatcher} for lots of examples.
* {@link ParseTreePattern} has two static helper methods:
* {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that
* are easy to use but not super efficient because they create new
* {@link ParseTreePatternMatcher} objects each time and have to compile the
* pattern in string form before using it.
* <p/>
* The lexer and parser that you pass into the {@link ParseTreePatternMatcher}
* pattern in string form before using it.</p>
*
* <p>The lexer and parser that you pass into the {@link ParseTreePatternMatcher}
* constructor are used to parse the pattern in string form. The lexer converts
* the {@code <ID> = <expr>;} into a sequence of four tokens (assuming lexer
* throws out whitespace or puts it on a hidden channel). Be aware that the
* input stream is reset for the lexer (but not the parser; a
* {@link ParserInterpreter} is created to parse the input.). Any user-defined
* fields you have put into the lexer might get changed when this mechanism asks
* it to scan the pattern string.
* <p/>
* Normally a parser does not accept token {@code <expr>} as a valid
* it to scan the pattern string.</p>
*
* <p>Normally a parser does not accept token {@code <expr>} as a valid
* {@code expr} but, from the parser passed in, we create a special version of
* the underlying grammar representation (an {@link ATN}) that allows imaginary
* tokens representing rules ({@code <expr>}) to match entire rules. We call
* these <em>bypass alternatives</em>.
* <p/>
* Delimiters are {@code <} and {@code >}, with {@code \} as the escape string
* these <em>bypass alternatives</em>.</p>
*
* <p>Delimiters are {@code <} and {@code >}, with {@code \} as the escape string
* by default, but you can set them to whatever you want using
* {@link #setDelimiters}. You must escape both start and stop strings
* {@code \<} and {@code \>}.
* {@code \<} and {@code \>}.</p>
*/
public class ParseTreePatternMatcher {
public static class CannotInvokeStartRule extends RuntimeException {

View File

@ -115,8 +115,8 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* Rule tag tokens are always placed on the {@link #DEFAULT_CHANNEL}.
*
* <p>Rule tag tokens are always placed on the {@link #DEFAULT_CHANNEL}.</p>
*/
@Override
public int getChannel() {
@ -125,9 +125,9 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* This method returns the rule tag formatted with {@code <} and {@code >}
* delimiters.
*
* <p>This method returns the rule tag formatted with {@code <} and {@code >}
* delimiters.</p>
*/
@Override
public String getText() {
@ -140,9 +140,9 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* Rule tag tokens have types assigned according to the rule bypass
* transitions created during ATN deserialization.
*
* <p>Rule tag tokens have types assigned according to the rule bypass
* transitions created during ATN deserialization.</p>
*/
@Override
public int getType() {
@ -151,8 +151,8 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link RuleTagToken} always returns 0.
*
* <p>The implementation for {@link RuleTagToken} always returns 0.</p>
*/
@Override
public int getLine() {
@ -161,8 +161,8 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link RuleTagToken} always returns -1.
*
* <p>The implementation for {@link RuleTagToken} always returns -1.</p>
*/
@Override
public int getCharPositionInLine() {
@ -171,8 +171,8 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link RuleTagToken} always returns -1.
*
* <p>The implementation for {@link RuleTagToken} always returns -1.</p>
*/
@Override
public int getTokenIndex() {
@ -181,8 +181,8 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link RuleTagToken} always returns -1.
*
* <p>The implementation for {@link RuleTagToken} always returns -1.</p>
*/
@Override
public int getStartIndex() {
@ -191,8 +191,8 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link RuleTagToken} always returns -1.
*
* <p>The implementation for {@link RuleTagToken} always returns -1.</p>
*/
@Override
public int getStopIndex() {
@ -201,8 +201,8 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link RuleTagToken} always returns {@code null}.
*
* <p>The implementation for {@link RuleTagToken} always returns {@code null}.</p>
*/
@Override
public TokenSource getTokenSource() {
@ -211,8 +211,8 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link RuleTagToken} always returns {@code null}.
*
* <p>The implementation for {@link RuleTagToken} always returns {@code null}.</p>
*/
@Override
public CharStream getInputStream() {
@ -221,9 +221,9 @@ public class RuleTagToken implements Token {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link RuleTagToken} returns a string of the form
* {@code ruleName:bypassTokenType}.
*
* <p>The implementation for {@link RuleTagToken} returns a string of the form
* {@code ruleName:bypassTokenType}.</p>
*/
@Override
public String toString() {

View File

@ -69,9 +69,9 @@ class TextChunk extends Chunk {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link TextChunk} returns the result of
* {@link #getText()} in single quotes.
*
* <p>The implementation for {@link TextChunk} returns the result of
* {@link #getText()} in single quotes.</p>
*/
@Override
public String toString() {

View File

@ -100,9 +100,9 @@ public class TokenTagToken extends CommonToken {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link TokenTagToken} returns the token tag
* formatted with {@code <} and {@code >} delimiters.
*
* <p>The implementation for {@link TokenTagToken} returns the token tag
* formatted with {@code <} and {@code >} delimiters.</p>
*/
@Override
public String getText() {
@ -115,9 +115,9 @@ public class TokenTagToken extends CommonToken {
/**
* {@inheritDoc}
* <p/>
* The implementation for {@link TokenTagToken} returns a string of the form
* {@code tokenName:type}.
*
* <p>The implementation for {@link TokenTagToken} returns a string of the form
* {@code tokenName:type}.</p>
*/
@Override
public String toString() {