refactored CommonTokenStream.js to use es6 classes

fix: dont wrap class in an object for export
fix: annotate adjustSeekIndex with Number type to avoid warning
use const for better scoping
use jsdoc
This commit is contained in:
Camilo Roca 2020-03-15 16:10:50 +01:00
parent 5f6b7de72d
commit 181c44fb11
3 changed files with 87 additions and 91 deletions

View File

@ -214,8 +214,8 @@ class BufferedTokenStream extends TokenStream {
* that
* the seek target is always an on-channel token.</p>
*
* @param i The target token index.
* @return The adjusted target token index.
* @param {Number} i The target token index.
* @return {Number} The adjusted target token index.
*/
adjustSeekIndex(i) {
return i;

View File

@ -1,57 +1,52 @@
//
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
///
//
// This class extends {@link BufferedTokenStream} with functionality to filter
// token streams to tokens on a particular channel (tokens where
// {@link Token//getChannel} returns a particular value).
//
// <p>
// This token stream provides access to all tokens by index or when calling
// methods like {@link //getText}. The channel filtering is only used for code
// accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and
// {@link //LB}.</p>
//
// <p>
// By default, tokens are placed on the default channel
// ({@link Token//DEFAULT_CHANNEL}), but may be reassigned by using the
// {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to
// call {@link Lexer//setChannel}.
// </p>
//
// <p>
// Note: lexer rules which use the {@code ->skip} lexer command or call
// {@link Lexer//skip} do not produce tokens at all, so input text matched by
// such a rule will not be available as part of the token stream, regardless of
// channel.</p>
///
var Token = require('./Token').Token;
var BufferedTokenStream = require('./BufferedTokenStream');
const Token = require('./Token').Token;
const BufferedTokenStream = require('./BufferedTokenStream');
function CommonTokenStream(lexer, channel) {
BufferedTokenStream.call(this, lexer);
/**
* This class extends {@link BufferedTokenStream} with functionality to filter
* token streams to tokens on a particular channel (tokens where
* {@link Token//getChannel} returns a particular value).
*
* <p>
* This token stream provides access to all tokens by index or when calling
* methods like {@link //getText}. The channel filtering is only used for code
* accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and
* {@link //LB}.</p>
*
* <p>
* By default, tokens are placed on the default channel
* ({@link Token//DEFAULT_CHANNEL}), but may be reassigned by using the
* {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to
* call {@link Lexer//setChannel}.
* </p>
*
* <p>
* Note: lexer rules which use the {@code ->skip} lexer command or call
* {@link Lexer//skip} do not produce tokens at all, so input text matched by
* such a rule will not be available as part of the token stream, regardless of
* channel.</p>
*/
class CommonTokenStream extends BufferedTokenStream {
constructor(lexer, channel) {
super(lexer);
this.channel = channel===undefined ? Token.DEFAULT_CHANNEL : channel;
return this;
}
CommonTokenStream.prototype = Object.create(BufferedTokenStream.prototype);
CommonTokenStream.prototype.constructor = CommonTokenStream;
CommonTokenStream.prototype.adjustSeekIndex = function(i) {
adjustSeekIndex(i) {
return this.nextTokenOnChannel(i, this.channel);
};
}
CommonTokenStream.prototype.LB = function(k) {
LB(k) {
if (k===0 || this.index-k<0) {
return null;
}
var i = this.index;
var n = 1;
let i = this.index;
let n = 1;
// find k good tokens looking backwards
while (n <= k) {
// skip off-channel tokens
@ -62,9 +57,9 @@ CommonTokenStream.prototype.LB = function(k) {
return null;
}
return this.tokens[i];
};
}
CommonTokenStream.prototype.LT = function(k) {
LT(k) {
this.lazyInit();
if (k === 0) {
return null;
@ -72,8 +67,8 @@ CommonTokenStream.prototype.LT = function(k) {
if (k < 0) {
return this.LB(-k);
}
var i = this.index;
var n = 1; // we know tokens[pos] is a good one
let i = this.index;
let n = 1; // we know tokens[pos] is a good one
// find k good tokens
while (n < k) {
// skip off-channel tokens, but make sure to not look past EOF
@ -83,14 +78,14 @@ CommonTokenStream.prototype.LT = function(k) {
n += 1;
}
return this.tokens[i];
};
}
// Count EOF just once.///
CommonTokenStream.prototype.getNumberOfOnChannelTokens = function() {
var n = 0;
// Count EOF just once.
getNumberOfOnChannelTokens() {
let n = 0;
this.fill();
for (var i =0; i< this.tokens.length;i++) {
var t = this.tokens[i];
for (let i =0; i< this.tokens.length;i++) {
const t = this.tokens[i];
if( t.channel===this.channel) {
n += 1;
}
@ -99,6 +94,7 @@ CommonTokenStream.prototype.getNumberOfOnChannelTokens = function() {
}
}
return n;
};
}
}
exports.CommonTokenStream = CommonTokenStream;
module.exports = CommonTokenStream;

View File

@ -13,7 +13,7 @@ exports.CharStreams = require('./CharStreams');
exports.CommonToken = require('./Token').CommonToken;
exports.InputStream = require('./InputStream').InputStream;
exports.FileStream = require('./FileStream').FileStream;
exports.CommonTokenStream = require('./CommonTokenStream').CommonTokenStream;
exports.CommonTokenStream = require('./CommonTokenStream');
exports.Lexer = require('./Lexer').Lexer;
exports.Parser = require('./Parser').Parser;
var pc = require('./PredictionContext');