refactored CommonTokenStream.js to use es6 classes

fix: dont wrap class in an object for export
fix: annotate adjustSeekIndex with Number type to avoid warning
use const for better scoping
use jsdoc
This commit is contained in:
Camilo Roca 2020-03-15 16:10:50 +01:00
parent 5f6b7de72d
commit 181c44fb11
3 changed files with 87 additions and 91 deletions

View File

@ -214,8 +214,8 @@ class BufferedTokenStream extends TokenStream {
* that
* the seek target is always an on-channel token.</p>
*
* @param i The target token index.
* @return The adjusted target token index.
* @param {Number} i The target token index.
* @return {Number} The adjusted target token index.
*/
adjustSeekIndex(i) {
return i;

View File

@ -1,104 +1,100 @@
//
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
///
//
// This class extends {@link BufferedTokenStream} with functionality to filter
// token streams to tokens on a particular channel (tokens where
// {@link Token//getChannel} returns a particular value).
//
// <p>
// This token stream provides access to all tokens by index or when calling
// methods like {@link //getText}. The channel filtering is only used for code
// accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and
// {@link //LB}.</p>
//
// <p>
// By default, tokens are placed on the default channel
// ({@link Token//DEFAULT_CHANNEL}), but may be reassigned by using the
// {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to
// call {@link Lexer//setChannel}.
// </p>
//
// <p>
// Note: lexer rules which use the {@code ->skip} lexer command or call
// {@link Lexer//skip} do not produce tokens at all, so input text matched by
// such a rule will not be available as part of the token stream, regardless of
// channel.</p>
///
var Token = require('./Token').Token;
var BufferedTokenStream = require('./BufferedTokenStream');
const Token = require('./Token').Token;
const BufferedTokenStream = require('./BufferedTokenStream');
function CommonTokenStream(lexer, channel) {
BufferedTokenStream.call(this, lexer);
this.channel = channel===undefined ? Token.DEFAULT_CHANNEL : channel;
return this;
}
CommonTokenStream.prototype = Object.create(BufferedTokenStream.prototype);
CommonTokenStream.prototype.constructor = CommonTokenStream;
CommonTokenStream.prototype.adjustSeekIndex = function(i) {
return this.nextTokenOnChannel(i, this.channel);
};
CommonTokenStream.prototype.LB = function(k) {
if (k===0 || this.index-k<0) {
return null;
/**
* This class extends {@link BufferedTokenStream} with functionality to filter
* token streams to tokens on a particular channel (tokens where
* {@link Token//getChannel} returns a particular value).
*
* <p>
* This token stream provides access to all tokens by index or when calling
* methods like {@link //getText}. The channel filtering is only used for code
* accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and
* {@link //LB}.</p>
*
* <p>
* By default, tokens are placed on the default channel
* ({@link Token//DEFAULT_CHANNEL}), but may be reassigned by using the
* {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to
* call {@link Lexer//setChannel}.
* </p>
*
* <p>
* Note: lexer rules which use the {@code ->skip} lexer command or call
* {@link Lexer//skip} do not produce tokens at all, so input text matched by
* such a rule will not be available as part of the token stream, regardless of
* channel.</p>
*/
class CommonTokenStream extends BufferedTokenStream {
constructor(lexer, channel) {
super(lexer);
this.channel = channel===undefined ? Token.DEFAULT_CHANNEL : channel;
}
var i = this.index;
var n = 1;
// find k good tokens looking backwards
while (n <= k) {
// skip off-channel tokens
i = this.previousTokenOnChannel(i - 1, this.channel);
n += 1;
}
if (i < 0) {
return null;
}
return this.tokens[i];
};
CommonTokenStream.prototype.LT = function(k) {
this.lazyInit();
if (k === 0) {
return null;
adjustSeekIndex(i) {
return this.nextTokenOnChannel(i, this.channel);
}
if (k < 0) {
return this.LB(-k);
}
var i = this.index;
var n = 1; // we know tokens[pos] is a good one
// find k good tokens
while (n < k) {
// skip off-channel tokens, but make sure to not look past EOF
if (this.sync(i + 1)) {
i = this.nextTokenOnChannel(i + 1, this.channel);
LB(k) {
if (k===0 || this.index-k<0) {
return null;
}
n += 1;
}
return this.tokens[i];
};
// Count EOF just once.///
CommonTokenStream.prototype.getNumberOfOnChannelTokens = function() {
var n = 0;
this.fill();
for (var i =0; i< this.tokens.length;i++) {
var t = this.tokens[i];
if( t.channel===this.channel) {
let i = this.index;
let n = 1;
// find k good tokens looking backwards
while (n <= k) {
// skip off-channel tokens
i = this.previousTokenOnChannel(i - 1, this.channel);
n += 1;
}
if( t.type===Token.EOF) {
break;
if (i < 0) {
return null;
}
return this.tokens[i];
}
return n;
};
exports.CommonTokenStream = CommonTokenStream;
LT(k) {
this.lazyInit();
if (k === 0) {
return null;
}
if (k < 0) {
return this.LB(-k);
}
let i = this.index;
let n = 1; // we know tokens[pos] is a good one
// find k good tokens
while (n < k) {
// skip off-channel tokens, but make sure to not look past EOF
if (this.sync(i + 1)) {
i = this.nextTokenOnChannel(i + 1, this.channel);
}
n += 1;
}
return this.tokens[i];
}
// Count EOF just once.
getNumberOfOnChannelTokens() {
let n = 0;
this.fill();
for (let i =0; i< this.tokens.length;i++) {
const t = this.tokens[i];
if( t.channel===this.channel) {
n += 1;
}
if( t.type===Token.EOF) {
break;
}
}
return n;
}
}
module.exports = CommonTokenStream;

View File

@ -13,7 +13,7 @@ exports.CharStreams = require('./CharStreams');
exports.CommonToken = require('./Token').CommonToken;
exports.InputStream = require('./InputStream').InputStream;
exports.FileStream = require('./FileStream').FileStream;
exports.CommonTokenStream = require('./CommonTokenStream').CommonTokenStream;
exports.CommonTokenStream = require('./CommonTokenStream');
exports.Lexer = require('./Lexer').Lexer;
exports.Parser = require('./Parser').Parser;
var pc = require('./PredictionContext');