diff --git a/contributors.txt b/contributors.txt index f48052823..c54c0279e 100644 --- a/contributors.txt +++ b/contributors.txt @@ -210,4 +210,5 @@ YYYY/MM/DD, github id, Full name, email 2018/11/14, nxtstep, Adriaan (Arjan) Duz, codewithadriaan[et]gmail[dot]com 2018/11/15, amykyta3, Alex Mykyta, amykyta3@users.noreply.github.com 2018/11/29, hannemann-tamas, Ralf Hannemann-Tamas, ralf.ht@gmail.com -2018/12/20, WalterCouto, Walter Couto, WalterCouto@users.noreply.github.com \ No newline at end of file +2018/12/20, WalterCouto, Walter Couto, WalterCouto@users.noreply.github.com +2018/12/23, youkaichao, Kaichao You, youkaichao@gmail.com diff --git a/runtime/Python2/src/antlr4/TokenStreamRewriter.py b/runtime/Python2/src/antlr4/TokenStreamRewriter.py index f35fad6c2..77edf6021 100644 --- a/runtime/Python2/src/antlr4/TokenStreamRewriter.py +++ b/runtime/Python2/src/antlr4/TokenStreamRewriter.py @@ -88,7 +88,8 @@ class TokenStreamRewriter(object): def delete(self, program_name, from_idx, to_idx): if isinstance(from_idx, Token): self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, "") - self.replace(program_name, from_idx, to_idx, "") + else: + self.replace(program_name, from_idx, to_idx, "") def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME): return self.lastRewriteTokenIndexes.get(program_name, -1) @@ -104,7 +105,7 @@ class TokenStreamRewriter(object): def getText(self, program_name, start, stop): """ - :type interval: Interval.Interval + :type interval: IntervalSet.Interval :param program_name: :param interval: :return: @@ -173,13 +174,13 @@ class TokenStreamRewriter(object): if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))): continue prevInserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)] - for i, prevIop in enumerate(prevInserts): + for prev_index, prevIop in enumerate(prevInserts): if prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertBeforeOp: iop.text += prevIop.text - rewrites[i] = None + rewrites[prev_index] = None elif prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertAfterOp: iop.text = prevIop.text + iop.text - rewrites[i] = None + rewrites[prev_index] = None # look for replaces where iop.index is in range; error prevReplaces = [op for op in rewrites[:i] if type(op) is TokenStreamRewriter.ReplaceOp] for rop in prevReplaces: diff --git a/runtime/Python3/src/antlr4/TokenStreamRewriter.py b/runtime/Python3/src/antlr4/TokenStreamRewriter.py index 347f4f44f..621545c53 100644 --- a/runtime/Python3/src/antlr4/TokenStreamRewriter.py +++ b/runtime/Python3/src/antlr4/TokenStreamRewriter.py @@ -41,7 +41,10 @@ class TokenStreamRewriter(object): self.insertAfter(token.tokenIndex, text, program_name) def insertAfter(self, index, text, program_name=DEFAULT_PROGRAM_NAME): - self.insertBefore(program_name, index + 1, text) + op = self.InsertAfterOp(self.tokens, index + 1, text) + rewrites = self.getProgram(program_name) + op.instructionIndex = len(rewrites) + rewrites.append(op) def insertBeforeIndex(self, index, text): self.insertBefore(self.DEFAULT_PROGRAM_NAME, index, text) @@ -84,8 +87,9 @@ class TokenStreamRewriter(object): def delete(self, program_name, from_idx, to_idx): if isinstance(from_idx, Token): - self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, None) - self.replace(program_name, from_idx, to_idx, None) + self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, "") + else: + self.replace(program_name, from_idx, to_idx, "") def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME): return self.lastRewriteTokenIndexes.get(program_name, -1) @@ -95,6 +99,9 @@ class TokenStreamRewriter(object): def getProgram(self, program_name): return self.programs.setdefault(program_name, []) + + def getDefaultText(self): + return self.getText(self.DEFAULT_PROGRAM_NAME, 0, len(self.tokens.tokens)) def getText(self, program_name, start:int, stop:int): """ @@ -114,7 +121,7 @@ class TokenStreamRewriter(object): indexToOp = self._reduceToSingleOperationPerIndex(rewrites) i = start while all((i <= stop, i < len(self.tokens.tokens))): - op = indexToOp.get(i) + op = indexToOp.pop(i, None) token = self.tokens.get(i) if op is None: if token.type != Token.EOF: buf.write(token.text) @@ -134,7 +141,7 @@ class TokenStreamRewriter(object): if any((rop is None, not isinstance(rop, TokenStreamRewriter.ReplaceOp))): continue # Wipe prior inserts within range - inserts = [op for op in rewrites[:i] if isinstance(rop, TokenStreamRewriter.InsertBeforeOp)] + inserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)] for iop in inserts: if iop.index == rop.index: rewrites[iop.instructionIndex] = None @@ -148,7 +155,7 @@ class TokenStreamRewriter(object): if all((prevRop.index >= rop.index, prevRop.last_index <= rop.last_index)): rewrites[prevRop.instructionIndex] = None continue - isDisjoint = any((prevRop.last_indexrop)) + isDisjoint = any((prevRop.last_indexrop.last_index)) isSame = all((prevRop.index == rop.index, prevRop.last_index == rop.last_index)) if all((prevRop.text is None, rop.text is None, not isDisjoint)): rewrites[prevRop.instructionIndex] = None @@ -162,11 +169,14 @@ class TokenStreamRewriter(object): for i, iop in enumerate(rewrites): if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))): continue - prevInserts = [op for op in rewrites[:i] if isinstance(iop, TokenStreamRewriter.InsertBeforeOp)] - for prevIop in prevInserts: - if prevIop.index == iop.index: + prevInserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)] + for prev_index, prevIop in enumerate(prevInserts): + if prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertBeforeOp: iop.text += prevIop.text - rewrites[i] = None + rewrites[prev_index] = None + elif prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertAfterOp: + iop.text = prevIop.text + iop.text + rewrites[prev_index] = None # look for replaces where iop.index is in range; error prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)] for rop in prevReplaces: @@ -174,16 +184,16 @@ class TokenStreamRewriter(object): rop.text = iop.text + rop.text rewrites[i] = None continue - if all((iop.index >= rop.index, iop.index <= rop.index)): + if all((iop.index >= rop.index, iop.index <= rop.last_index)): raise ValueError("insert op {} within boundaries of previous {}".format(iop, rop)) - reduced = {} - for i, op in enumerate(rewrites): - if op is None: continue - if reduced.get(op.index): raise ValueError('should be only one op per index') - reduced[op.index] = op + reduced = {} + for i, op in enumerate(rewrites): + if op is None: continue + if reduced.get(op.index): raise ValueError('should be only one op per index') + reduced[op.index] = op - return reduced + return reduced class RewriteOperation(object): @@ -209,7 +219,7 @@ class TokenStreamRewriter(object): return self.index def __str__(self): - pass + return '<{}@{}:"{}">'.format(self.__class__.__name__, self.tokens.get(self.index), self.text) class InsertBeforeOp(RewriteOperation): @@ -222,6 +232,9 @@ class TokenStreamRewriter(object): buf.write(self.tokens.get(self.index).text) return self.index + 1 + class InsertAfterOp(InsertBeforeOp): + pass + class ReplaceOp(RewriteOperation): def __init__(self, from_idx, to_idx, tokens, text): @@ -231,4 +244,9 @@ class TokenStreamRewriter(object): def execute(self, buf): if self.text: buf.write(self.text) - return self.last_index + 1 \ No newline at end of file + return self.last_index + 1 + + def __str__(self): + if self.text: + return ''.format(self.tokens.get(self.index), self.tokens.get(self.last_index), + self.text) \ No newline at end of file diff --git a/runtime/Python3/test/TestTokenStreamRewriter.py b/runtime/Python3/test/TestTokenStreamRewriter.py new file mode 100644 index 000000000..abe311eeb --- /dev/null +++ b/runtime/Python3/test/TestTokenStreamRewriter.py @@ -0,0 +1,525 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. + +import unittest + + +from mocks.TestLexer import TestLexer, TestLexer2 +from antlr4.TokenStreamRewriter import TokenStreamRewriter +from antlr4.InputStream import InputStream +from antlr4.CommonTokenStream import CommonTokenStream + + +class TestTokenStreamRewriter(unittest.TestCase): + def testInsertBeforeIndexZero(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + rewriter.insertBeforeIndex(0, '0') + + self.assertEqual(rewriter.getDefaultText(), '0abc') + + def testInsertAfterLastIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + rewriter.insertAfter(10, 'x') + + self.assertEqual(rewriter.getDefaultText(), 'abcx') + + def test2InsertBeforeAfterMiddleIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(1, 'x') + rewriter.insertAfter(1, 'x') + + self.assertEqual(rewriter.getDefaultText(), 'axbxc') + + def testReplaceIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceIndex(0, 'x') + + self.assertEqual(rewriter.getDefaultText(), 'xbc') + + def testReplaceLastIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceIndex(2, 'x') + + self.assertEqual(rewriter.getDefaultText(), 'abx') + + def testReplaceMiddleIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceIndex(1, 'x') + + self.assertEqual(rewriter.getDefaultText(), 'axc') + + def testToStringStartStop(self): + input = InputStream('x = 3 * 0;') + lexer = TestLexer2(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(4, 8, '0') + + self.assertEqual(rewriter.getDefaultText(), 'x = 0;') + self.assertEqual(rewriter.getText('default', 0, 9), 'x = 0;') + self.assertEqual(rewriter.getText('default', 4, 8), '0') + + def testToStringStartStop2(self): + input = InputStream('x = 3 * 0 + 2 * 0;') + lexer = TestLexer2(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + self.assertEqual('x = 3 * 0 + 2 * 0;', rewriter.getDefaultText()) + + # replace 3 * 0 with 0 + rewriter.replaceRange(4, 8, '0') + self.assertEqual('x = 0 + 2 * 0;', rewriter.getDefaultText()) + self.assertEqual('x = 0 + 2 * 0;', rewriter.getText('default', 0, 17)) + self.assertEqual('0', rewriter.getText('default', 4, 8)) + self.assertEqual('x = 0', rewriter.getText('default', 0, 8)) + self.assertEqual('2 * 0', rewriter.getText('default', 12, 16)) + + rewriter.insertAfter(17, "// comment") + self.assertEqual('2 * 0;// comment', rewriter.getText('default', 12, 18)) + + self.assertEqual('x = 0', rewriter.getText('default', 0, 8)) + + def test2ReplaceMiddleIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceIndex(1, 'x') + rewriter.replaceIndex(1, 'y') + + self.assertEqual('ayc', rewriter.getDefaultText()) + + def test2ReplaceMiddleIndex1InsertBefore(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(0, "_") + rewriter.replaceIndex(1, 'x') + rewriter.replaceIndex(1, 'y') + + self.assertEqual('_ayc', rewriter.getDefaultText()) + + def test2InsertMiddleIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(1, 'x') + rewriter.insertBeforeIndex(1, 'y') + + self.assertEqual('ayxbc', rewriter.getDefaultText()) + + def testReplaceThenDeleteMiddleIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(0, 2, 'x') + rewriter.insertBeforeIndex(1, '0') + + with self.assertRaises(ValueError) as ctx: + rewriter.getDefaultText() + self.assertEqual( + 'insert op ,1:1]:"0"> within boundaries of previous ,1:0]..[@2,2:2=\'c\',<3>,1:2]:"x">', + str(ctx.exception) + ) + + def testInsertThenReplaceSameIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(0, '0') + rewriter.replaceIndex(0, 'x') + + self.assertEqual('0xbc', rewriter.getDefaultText()) + + def test2InsertThenReplaceIndex0(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(0, 'x') + rewriter.insertBeforeIndex(0, 'y') + rewriter.replaceIndex(0, 'z') + + self.assertEqual('yxzbc', rewriter.getDefaultText()) + + def testReplaceThenInsertBeforeLastIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceIndex(2, 'x') + rewriter.insertBeforeIndex(2, 'y') + + self.assertEqual('abyx', rewriter.getDefaultText()) + + def testReplaceThenInsertAfterLastIndex(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceIndex(2, 'x') + rewriter.insertAfter(2, 'y') + + self.assertEqual('abxy', rewriter.getDefaultText()) + + def testReplaceRangeThenInsertAtLeftEdge(self): + input = InputStream('abcccba') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(2, 4, 'x') + rewriter.insertBeforeIndex(2, 'y') + + self.assertEqual('abyxba', rewriter.getDefaultText()) + + def testReplaceRangeThenInsertAtRightEdge(self): + input = InputStream('abcccba') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(2, 4, 'x') + rewriter.insertBeforeIndex(4, 'y') + + with self.assertRaises(ValueError) as ctx: + rewriter.getDefaultText() + msg = str(ctx.exception) + self.assertEqual( + "insert op ,1:4]:\"y\"> within boundaries of previous ,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">", + msg + ) + + def testReplaceRangeThenInsertAfterRightEdge(self): + input = InputStream('abcccba') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(2, 4, 'x') + rewriter.insertAfter(4, 'y') + + self.assertEqual('abxyba', rewriter.getDefaultText()) + + def testReplaceAll(self): + input = InputStream('abcccba') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(0, 6, 'x') + + self.assertEqual('x', rewriter.getDefaultText()) + + def testReplaceSubsetThenFetch(self): + input = InputStream('abcccba') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(2, 4, 'xyz') + + self.assertEqual('abxyzba', rewriter.getDefaultText()) + + def testReplaceThenReplaceSuperset(self): + input = InputStream('abcccba') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(2, 4, 'xyz') + rewriter.replaceRange(3, 5, 'foo') + + with self.assertRaises(ValueError) as ctx: + rewriter.getDefaultText() + msg = str(ctx.exception) + self.assertEqual( + """replace op boundaries of ,1:3]..[@5,5:5='b',<2>,1:5]:"foo"> overlap with previous ,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""", + msg + ) + + def testReplaceThenReplaceLowerIndexedSuperset(self): + input = InputStream('abcccba') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(2, 4, 'xyz') + rewriter.replaceRange(1, 3, 'foo') + + with self.assertRaises(ValueError) as ctx: + rewriter.getDefaultText() + msg = str(ctx.exception) + self.assertEqual( + """replace op boundaries of ,1:1]..[@3,3:3='c',<3>,1:3]:"foo"> overlap with previous ,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""", + msg + ) + + def testReplaceSingleMiddleThenOverlappingSuperset(self): + input = InputStream('abcba') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceIndex(2, 'xyz') + rewriter.replaceRange(0, 3, 'foo') + + self.assertEqual('fooa', rewriter.getDefaultText()) + + def testCombineInserts(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(0, 'x') + rewriter.insertBeforeIndex(0, 'y') + + self.assertEqual('yxabc', rewriter.getDefaultText()) + + def testCombine3Inserts(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(1, 'x') + rewriter.insertBeforeIndex(0, 'y') + rewriter.insertBeforeIndex(1, 'z') + + self.assertEqual('yazxbc', rewriter.getDefaultText()) + + def testCombineInsertOnLeftWithReplace(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(0, 2, 'foo') + rewriter.insertBeforeIndex(0, 'z') + + self.assertEqual('zfoo', rewriter.getDefaultText()) + + def testCombineInsertOnLeftWithDelete(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.delete('default', 0, 2) + rewriter.insertBeforeIndex(0, 'z') + + self.assertEqual('z', rewriter.getDefaultText()) + + def testDisjointInserts(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(1, 'x') + rewriter.insertBeforeIndex(2, 'y') + rewriter.insertBeforeIndex(0, 'z') + + self.assertEqual('zaxbyc', rewriter.getDefaultText()) + + def testOverlappingReplace(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(1, 2, 'foo') + rewriter.replaceRange(0, 3, 'bar') + + self.assertEqual('bar', rewriter.getDefaultText()) + + def testOverlappingReplace2(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(0, 3, 'bar') + rewriter.replaceRange(1, 2, 'foo') + + with self.assertRaises(ValueError) as ctx: + rewriter.getDefaultText() + + self.assertEqual( + """replace op boundaries of ,1:1]..[@2,2:2='c',<3>,1:2]:"foo"> overlap with previous ,1:0]..[@3,3:2='',<-1>,1:3]:"bar">""", + str(ctx.exception) + ) + + def testOverlappingReplace3(self): + input = InputStream('abcc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(1, 2, 'foo') + rewriter.replaceRange(0, 2, 'bar') + + self.assertEqual('barc', rewriter.getDefaultText()) + + def testOverlappingReplace4(self): + input = InputStream('abcc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(1, 2, 'foo') + rewriter.replaceRange(1, 3, 'bar') + + self.assertEqual('abar', rewriter.getDefaultText()) + + def testDropIdenticalReplace(self): + input = InputStream('abcc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(1, 2, 'foo') + rewriter.replaceRange(1, 2, 'foo') + + self.assertEqual('afooc', rewriter.getDefaultText()) + + def testDropPrevCoveredInsert(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(1, 'foo') + rewriter.replaceRange(1, 2, 'foo') + + self.assertEqual('afoofoo', rewriter.getDefaultText()) + + def testLeaveAloneDisjointInsert(self): + input = InputStream('abcc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(1, 'x') + rewriter.replaceRange(2, 3, 'foo') + + self.assertEqual('axbfoo', rewriter.getDefaultText()) + + def testLeaveAloneDisjointInsert2(self): + input = InputStream('abcc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.replaceRange(2, 3, 'foo') + rewriter.insertBeforeIndex(1, 'x') + + self.assertEqual('axbfoo', rewriter.getDefaultText()) + + def testInsertBeforeTokenThenDeleteThatToken(self): + input = InputStream('abc') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(1, 'foo') + rewriter.replaceRange(1, 2, 'foo') + + self.assertEqual('afoofoo', rewriter.getDefaultText()) + + def testPreservesOrderOfContiguousInserts(self): + """ + Test for fix for: https://github.com/antlr/antlr4/issues/550 + """ + input = InputStream('aa') + lexer = TestLexer(input) + stream = CommonTokenStream(lexer=lexer) + stream.fill() + rewriter = TokenStreamRewriter(tokens=stream) + + rewriter.insertBeforeIndex(0, '') + rewriter.insertAfter(0, '') + rewriter.insertBeforeIndex(1, '') + rewriter.insertAfter(1, '') + + self.assertEqual('aa', rewriter.getDefaultText()) + + +if __name__ == '__main__': + unittest.main() diff --git a/runtime/Python3/test/mocks/TestLexer.py b/runtime/Python3/test/mocks/TestLexer.py new file mode 100644 index 000000000..9c54007b2 --- /dev/null +++ b/runtime/Python3/test/mocks/TestLexer.py @@ -0,0 +1,101 @@ +# Generated from /Users/lyga/Dropbox/code/python/antlr4-learn/test_grammar/T.g4 by ANTLR 4.5.3 +# encoding: utf-8 +from __future__ import print_function +from antlr4 import * +from io import StringIO + + +def serializedATN(): + with StringIO() as buf: + buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2") + buf.write(u"\5\17\b\1\4\2\t\2\4\3\t\3\4\4\t\4\3\2\3\2\3\3\3\3\3\4") + buf.write(u"\3\4\2\2\5\3\3\5\4\7\5\3\2\2\16\2\3\3\2\2\2\2\5\3\2\2") + buf.write(u"\2\2\7\3\2\2\2\3\t\3\2\2\2\5\13\3\2\2\2\7\r\3\2\2\2\t") + buf.write(u"\n\7c\2\2\n\4\3\2\2\2\13\f\7d\2\2\f\6\3\2\2\2\r\16\7") + buf.write(u"e\2\2\16\b\3\2\2\2\3\2\2") + return buf.getvalue() + + +class TestLexer(Lexer): + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)] + + A = 1 + B = 2 + C = 3 + + modeNames = [u"DEFAULT_MODE"] + + literalNames = [u"", + u"'a'", u"'b'", u"'c'"] + + symbolicNames = [u"", + u"A", u"B", u"C"] + + ruleNames = [u"A", u"B", u"C"] + + grammarFileName = u"T.g4" + + def __init__(self, input=None): + super(TestLexer, self).__init__(input) + self.checkVersion("4.7.2") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + + +def serializedATN2(): + with StringIO() as buf: + buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2") + buf.write(u"\t(\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t") + buf.write(u"\7\4\b\t\b\3\2\6\2\23\n\2\r\2\16\2\24\3\3\6\3\30\n\3") + buf.write(u"\r\3\16\3\31\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\6\b") + buf.write(u"%\n\b\r\b\16\b&\2\2\t\3\3\5\4\7\5\t\6\13\7\r\b\17\t\3") + buf.write(u"\2\2*\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2") + buf.write(u"\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\3\22\3\2\2\2\5") + buf.write(u"\27\3\2\2\2\7\33\3\2\2\2\t\35\3\2\2\2\13\37\3\2\2\2\r") + buf.write(u"!\3\2\2\2\17$\3\2\2\2\21\23\4c|\2\22\21\3\2\2\2\23\24") + buf.write(u"\3\2\2\2\24\22\3\2\2\2\24\25\3\2\2\2\25\4\3\2\2\2\26") + buf.write(u"\30\4\62;\2\27\26\3\2\2\2\30\31\3\2\2\2\31\27\3\2\2\2") + buf.write(u"\31\32\3\2\2\2\32\6\3\2\2\2\33\34\7=\2\2\34\b\3\2\2\2") + buf.write(u"\35\36\7?\2\2\36\n\3\2\2\2\37 \7-\2\2 \f\3\2\2\2!\"\7") + buf.write(u",\2\2\"\16\3\2\2\2#%\7\"\2\2$#\3\2\2\2%&\3\2\2\2&$\3") + buf.write(u"\2\2\2&\'\3\2\2\2\'\20\3\2\2\2\6\2\24\31&\2") + return buf.getvalue() + + +class TestLexer2(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN2()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + + ID = 1 + INT = 2 + SEMI = 3 + ASSIGN = 4 + PLUS = 5 + MULT = 6 + WS = 7 + + modeNames = [ u"DEFAULT_MODE" ] + + literalNames = [ u"", + u"';'", u"'='", u"'+'", u"'*'" ] + + symbolicNames = [ u"", + u"ID", u"INT", u"SEMI", u"ASSIGN", u"PLUS", u"MULT", u"WS" ] + + ruleNames = [ u"ID", u"INT", u"SEMI", u"ASSIGN", u"PLUS", u"MULT", u"WS" ] + + grammarFileName = u"T2.g4" + + def __init__(self, input=None): + super(TestLexer2, self).__init__(input) + self.checkVersion("4.7.2") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None diff --git a/runtime/Python3/test/mocks/__init__.py b/runtime/Python3/test/mocks/__init__.py new file mode 100644 index 000000000..e69de29bb