update code on updated master

This commit is contained in:
You Kaichao 2018-12-23 18:47:29 +08:00
parent 9f422d87b8
commit 157afd1a8d
6 changed files with 671 additions and 25 deletions

View File

@ -211,3 +211,4 @@ YYYY/MM/DD, github id, Full name, email
2018/11/15, amykyta3, Alex Mykyta, amykyta3@users.noreply.github.com 2018/11/15, amykyta3, Alex Mykyta, amykyta3@users.noreply.github.com
2018/11/29, hannemann-tamas, Ralf Hannemann-Tamas, ralf.ht@gmail.com 2018/11/29, hannemann-tamas, Ralf Hannemann-Tamas, ralf.ht@gmail.com
2018/12/20, WalterCouto, Walter Couto, WalterCouto@users.noreply.github.com 2018/12/20, WalterCouto, Walter Couto, WalterCouto@users.noreply.github.com
2018/12/23, youkaichao, Kaichao You, youkaichao@gmail.com

View File

@ -88,6 +88,7 @@ class TokenStreamRewriter(object):
def delete(self, program_name, from_idx, to_idx): def delete(self, program_name, from_idx, to_idx):
if isinstance(from_idx, Token): if isinstance(from_idx, Token):
self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, "") self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, "")
else:
self.replace(program_name, from_idx, to_idx, "") self.replace(program_name, from_idx, to_idx, "")
def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME): def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME):
@ -104,7 +105,7 @@ class TokenStreamRewriter(object):
def getText(self, program_name, start, stop): def getText(self, program_name, start, stop):
""" """
:type interval: Interval.Interval :type interval: IntervalSet.Interval
:param program_name: :param program_name:
:param interval: :param interval:
:return: :return:
@ -173,13 +174,13 @@ class TokenStreamRewriter(object):
if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))): if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))):
continue continue
prevInserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)] prevInserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
for i, prevIop in enumerate(prevInserts): for prev_index, prevIop in enumerate(prevInserts):
if prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertBeforeOp: if prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertBeforeOp:
iop.text += prevIop.text iop.text += prevIop.text
rewrites[i] = None rewrites[prev_index] = None
elif prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertAfterOp: elif prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertAfterOp:
iop.text = prevIop.text + iop.text iop.text = prevIop.text + iop.text
rewrites[i] = None rewrites[prev_index] = None
# look for replaces where iop.index is in range; error # look for replaces where iop.index is in range; error
prevReplaces = [op for op in rewrites[:i] if type(op) is TokenStreamRewriter.ReplaceOp] prevReplaces = [op for op in rewrites[:i] if type(op) is TokenStreamRewriter.ReplaceOp]
for rop in prevReplaces: for rop in prevReplaces:

View File

@ -41,7 +41,10 @@ class TokenStreamRewriter(object):
self.insertAfter(token.tokenIndex, text, program_name) self.insertAfter(token.tokenIndex, text, program_name)
def insertAfter(self, index, text, program_name=DEFAULT_PROGRAM_NAME): def insertAfter(self, index, text, program_name=DEFAULT_PROGRAM_NAME):
self.insertBefore(program_name, index + 1, text) op = self.InsertAfterOp(self.tokens, index + 1, text)
rewrites = self.getProgram(program_name)
op.instructionIndex = len(rewrites)
rewrites.append(op)
def insertBeforeIndex(self, index, text): def insertBeforeIndex(self, index, text):
self.insertBefore(self.DEFAULT_PROGRAM_NAME, index, text) self.insertBefore(self.DEFAULT_PROGRAM_NAME, index, text)
@ -84,8 +87,9 @@ class TokenStreamRewriter(object):
def delete(self, program_name, from_idx, to_idx): def delete(self, program_name, from_idx, to_idx):
if isinstance(from_idx, Token): if isinstance(from_idx, Token):
self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, None) self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, "")
self.replace(program_name, from_idx, to_idx, None) else:
self.replace(program_name, from_idx, to_idx, "")
def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME): def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME):
return self.lastRewriteTokenIndexes.get(program_name, -1) return self.lastRewriteTokenIndexes.get(program_name, -1)
@ -96,6 +100,9 @@ class TokenStreamRewriter(object):
def getProgram(self, program_name): def getProgram(self, program_name):
return self.programs.setdefault(program_name, []) return self.programs.setdefault(program_name, [])
def getDefaultText(self):
return self.getText(self.DEFAULT_PROGRAM_NAME, 0, len(self.tokens.tokens))
def getText(self, program_name, start:int, stop:int): def getText(self, program_name, start:int, stop:int):
""" """
:return: :return:
@ -114,7 +121,7 @@ class TokenStreamRewriter(object):
indexToOp = self._reduceToSingleOperationPerIndex(rewrites) indexToOp = self._reduceToSingleOperationPerIndex(rewrites)
i = start i = start
while all((i <= stop, i < len(self.tokens.tokens))): while all((i <= stop, i < len(self.tokens.tokens))):
op = indexToOp.get(i) op = indexToOp.pop(i, None)
token = self.tokens.get(i) token = self.tokens.get(i)
if op is None: if op is None:
if token.type != Token.EOF: buf.write(token.text) if token.type != Token.EOF: buf.write(token.text)
@ -134,7 +141,7 @@ class TokenStreamRewriter(object):
if any((rop is None, not isinstance(rop, TokenStreamRewriter.ReplaceOp))): if any((rop is None, not isinstance(rop, TokenStreamRewriter.ReplaceOp))):
continue continue
# Wipe prior inserts within range # Wipe prior inserts within range
inserts = [op for op in rewrites[:i] if isinstance(rop, TokenStreamRewriter.InsertBeforeOp)] inserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
for iop in inserts: for iop in inserts:
if iop.index == rop.index: if iop.index == rop.index:
rewrites[iop.instructionIndex] = None rewrites[iop.instructionIndex] = None
@ -148,7 +155,7 @@ class TokenStreamRewriter(object):
if all((prevRop.index >= rop.index, prevRop.last_index <= rop.last_index)): if all((prevRop.index >= rop.index, prevRop.last_index <= rop.last_index)):
rewrites[prevRop.instructionIndex] = None rewrites[prevRop.instructionIndex] = None
continue continue
isDisjoint = any((prevRop.last_index<rop.index, prevRop.index>rop)) isDisjoint = any((prevRop.last_index<rop.index, prevRop.index>rop.last_index))
isSame = all((prevRop.index == rop.index, prevRop.last_index == rop.last_index)) isSame = all((prevRop.index == rop.index, prevRop.last_index == rop.last_index))
if all((prevRop.text is None, rop.text is None, not isDisjoint)): if all((prevRop.text is None, rop.text is None, not isDisjoint)):
rewrites[prevRop.instructionIndex] = None rewrites[prevRop.instructionIndex] = None
@ -162,11 +169,14 @@ class TokenStreamRewriter(object):
for i, iop in enumerate(rewrites): for i, iop in enumerate(rewrites):
if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))): if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))):
continue continue
prevInserts = [op for op in rewrites[:i] if isinstance(iop, TokenStreamRewriter.InsertBeforeOp)] prevInserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
for prevIop in prevInserts: for prev_index, prevIop in enumerate(prevInserts):
if prevIop.index == iop.index: if prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertBeforeOp:
iop.text += prevIop.text iop.text += prevIop.text
rewrites[i] = None rewrites[prev_index] = None
elif prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertAfterOp:
iop.text = prevIop.text + iop.text
rewrites[prev_index] = None
# look for replaces where iop.index is in range; error # look for replaces where iop.index is in range; error
prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)] prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
for rop in prevReplaces: for rop in prevReplaces:
@ -174,7 +184,7 @@ class TokenStreamRewriter(object):
rop.text = iop.text + rop.text rop.text = iop.text + rop.text
rewrites[i] = None rewrites[i] = None
continue continue
if all((iop.index >= rop.index, iop.index <= rop.index)): if all((iop.index >= rop.index, iop.index <= rop.last_index)):
raise ValueError("insert op {} within boundaries of previous {}".format(iop, rop)) raise ValueError("insert op {} within boundaries of previous {}".format(iop, rop))
reduced = {} reduced = {}
@ -209,7 +219,7 @@ class TokenStreamRewriter(object):
return self.index return self.index
def __str__(self): def __str__(self):
pass return '<{}@{}:"{}">'.format(self.__class__.__name__, self.tokens.get(self.index), self.text)
class InsertBeforeOp(RewriteOperation): class InsertBeforeOp(RewriteOperation):
@ -222,6 +232,9 @@ class TokenStreamRewriter(object):
buf.write(self.tokens.get(self.index).text) buf.write(self.tokens.get(self.index).text)
return self.index + 1 return self.index + 1
class InsertAfterOp(InsertBeforeOp):
pass
class ReplaceOp(RewriteOperation): class ReplaceOp(RewriteOperation):
def __init__(self, from_idx, to_idx, tokens, text): def __init__(self, from_idx, to_idx, tokens, text):
@ -232,3 +245,8 @@ class TokenStreamRewriter(object):
if self.text: if self.text:
buf.write(self.text) buf.write(self.text)
return self.last_index + 1 return self.last_index + 1
def __str__(self):
if self.text:
return '<ReplaceOp@{}..{}:"{}">'.format(self.tokens.get(self.index), self.tokens.get(self.last_index),
self.text)

View File

@ -0,0 +1,525 @@
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
import unittest
from mocks.TestLexer import TestLexer, TestLexer2
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from antlr4.InputStream import InputStream
from antlr4.CommonTokenStream import CommonTokenStream
class TestTokenStreamRewriter(unittest.TestCase):
def testInsertBeforeIndexZero(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, '0')
self.assertEqual(rewriter.getDefaultText(), '0abc')
def testInsertAfterLastIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertAfter(10, 'x')
self.assertEqual(rewriter.getDefaultText(), 'abcx')
def test2InsertBeforeAfterMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.insertAfter(1, 'x')
self.assertEqual(rewriter.getDefaultText(), 'axbxc')
def testReplaceIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(0, 'x')
self.assertEqual(rewriter.getDefaultText(), 'xbc')
def testReplaceLastIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(2, 'x')
self.assertEqual(rewriter.getDefaultText(), 'abx')
def testReplaceMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(1, 'x')
self.assertEqual(rewriter.getDefaultText(), 'axc')
def testToStringStartStop(self):
input = InputStream('x = 3 * 0;')
lexer = TestLexer2(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(4, 8, '0')
self.assertEqual(rewriter.getDefaultText(), 'x = 0;')
self.assertEqual(rewriter.getText('default', 0, 9), 'x = 0;')
self.assertEqual(rewriter.getText('default', 4, 8), '0')
def testToStringStartStop2(self):
input = InputStream('x = 3 * 0 + 2 * 0;')
lexer = TestLexer2(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
self.assertEqual('x = 3 * 0 + 2 * 0;', rewriter.getDefaultText())
# replace 3 * 0 with 0
rewriter.replaceRange(4, 8, '0')
self.assertEqual('x = 0 + 2 * 0;', rewriter.getDefaultText())
self.assertEqual('x = 0 + 2 * 0;', rewriter.getText('default', 0, 17))
self.assertEqual('0', rewriter.getText('default', 4, 8))
self.assertEqual('x = 0', rewriter.getText('default', 0, 8))
self.assertEqual('2 * 0', rewriter.getText('default', 12, 16))
rewriter.insertAfter(17, "// comment")
self.assertEqual('2 * 0;// comment', rewriter.getText('default', 12, 18))
self.assertEqual('x = 0', rewriter.getText('default', 0, 8))
def test2ReplaceMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(1, 'x')
rewriter.replaceIndex(1, 'y')
self.assertEqual('ayc', rewriter.getDefaultText())
def test2ReplaceMiddleIndex1InsertBefore(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, "_")
rewriter.replaceIndex(1, 'x')
rewriter.replaceIndex(1, 'y')
self.assertEqual('_ayc', rewriter.getDefaultText())
def test2InsertMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.insertBeforeIndex(1, 'y')
self.assertEqual('ayxbc', rewriter.getDefaultText())
def testReplaceThenDeleteMiddleIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(0, 2, 'x')
rewriter.insertBeforeIndex(1, '0')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
self.assertEqual(
'insert op <InsertBeforeOp@[@1,1:1=\'b\',<2>,1:1]:"0"> within boundaries of previous <ReplaceOp@[@0,0:0=\'a\',<1>,1:0]..[@2,2:2=\'c\',<3>,1:2]:"x">',
str(ctx.exception)
)
def testInsertThenReplaceSameIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, '0')
rewriter.replaceIndex(0, 'x')
self.assertEqual('0xbc', rewriter.getDefaultText())
def test2InsertThenReplaceIndex0(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, 'x')
rewriter.insertBeforeIndex(0, 'y')
rewriter.replaceIndex(0, 'z')
self.assertEqual('yxzbc', rewriter.getDefaultText())
def testReplaceThenInsertBeforeLastIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(2, 'x')
rewriter.insertBeforeIndex(2, 'y')
self.assertEqual('abyx', rewriter.getDefaultText())
def testReplaceThenInsertAfterLastIndex(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(2, 'x')
rewriter.insertAfter(2, 'y')
self.assertEqual('abxy', rewriter.getDefaultText())
def testReplaceRangeThenInsertAtLeftEdge(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'x')
rewriter.insertBeforeIndex(2, 'y')
self.assertEqual('abyxba', rewriter.getDefaultText())
def testReplaceRangeThenInsertAtRightEdge(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'x')
rewriter.insertBeforeIndex(4, 'y')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
msg = str(ctx.exception)
self.assertEqual(
"insert op <InsertBeforeOp@[@4,4:4='c',<3>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">",
msg
)
def testReplaceRangeThenInsertAfterRightEdge(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'x')
rewriter.insertAfter(4, 'y')
self.assertEqual('abxyba', rewriter.getDefaultText())
def testReplaceAll(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(0, 6, 'x')
self.assertEqual('x', rewriter.getDefaultText())
def testReplaceSubsetThenFetch(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'xyz')
self.assertEqual('abxyzba', rewriter.getDefaultText())
def testReplaceThenReplaceSuperset(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'xyz')
rewriter.replaceRange(3, 5, 'foo')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
msg = str(ctx.exception)
self.assertEqual(
"""replace op boundaries of <ReplaceOp@[@3,3:3='c',<3>,1:3]..[@5,5:5='b',<2>,1:5]:"foo"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""",
msg
)
def testReplaceThenReplaceLowerIndexedSuperset(self):
input = InputStream('abcccba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 4, 'xyz')
rewriter.replaceRange(1, 3, 'foo')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
msg = str(ctx.exception)
self.assertEqual(
"""replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@3,3:3='c',<3>,1:3]:"foo"> overlap with previous <ReplaceOp@[@2,2:2='c',<3>,1:2]..[@4,4:4='c',<3>,1:4]:"xyz">""",
msg
)
def testReplaceSingleMiddleThenOverlappingSuperset(self):
input = InputStream('abcba')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceIndex(2, 'xyz')
rewriter.replaceRange(0, 3, 'foo')
self.assertEqual('fooa', rewriter.getDefaultText())
def testCombineInserts(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, 'x')
rewriter.insertBeforeIndex(0, 'y')
self.assertEqual('yxabc', rewriter.getDefaultText())
def testCombine3Inserts(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.insertBeforeIndex(0, 'y')
rewriter.insertBeforeIndex(1, 'z')
self.assertEqual('yazxbc', rewriter.getDefaultText())
def testCombineInsertOnLeftWithReplace(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(0, 2, 'foo')
rewriter.insertBeforeIndex(0, 'z')
self.assertEqual('zfoo', rewriter.getDefaultText())
def testCombineInsertOnLeftWithDelete(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.delete('default', 0, 2)
rewriter.insertBeforeIndex(0, 'z')
self.assertEqual('z', rewriter.getDefaultText())
def testDisjointInserts(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.insertBeforeIndex(2, 'y')
rewriter.insertBeforeIndex(0, 'z')
self.assertEqual('zaxbyc', rewriter.getDefaultText())
def testOverlappingReplace(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(1, 2, 'foo')
rewriter.replaceRange(0, 3, 'bar')
self.assertEqual('bar', rewriter.getDefaultText())
def testOverlappingReplace2(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(0, 3, 'bar')
rewriter.replaceRange(1, 2, 'foo')
with self.assertRaises(ValueError) as ctx:
rewriter.getDefaultText()
self.assertEqual(
"""replace op boundaries of <ReplaceOp@[@1,1:1='b',<2>,1:1]..[@2,2:2='c',<3>,1:2]:"foo"> overlap with previous <ReplaceOp@[@0,0:0='a',<1>,1:0]..[@3,3:2='<EOF>',<-1>,1:3]:"bar">""",
str(ctx.exception)
)
def testOverlappingReplace3(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(1, 2, 'foo')
rewriter.replaceRange(0, 2, 'bar')
self.assertEqual('barc', rewriter.getDefaultText())
def testOverlappingReplace4(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(1, 2, 'foo')
rewriter.replaceRange(1, 3, 'bar')
self.assertEqual('abar', rewriter.getDefaultText())
def testDropIdenticalReplace(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(1, 2, 'foo')
rewriter.replaceRange(1, 2, 'foo')
self.assertEqual('afooc', rewriter.getDefaultText())
def testDropPrevCoveredInsert(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'foo')
rewriter.replaceRange(1, 2, 'foo')
self.assertEqual('afoofoo', rewriter.getDefaultText())
def testLeaveAloneDisjointInsert(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'x')
rewriter.replaceRange(2, 3, 'foo')
self.assertEqual('axbfoo', rewriter.getDefaultText())
def testLeaveAloneDisjointInsert2(self):
input = InputStream('abcc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.replaceRange(2, 3, 'foo')
rewriter.insertBeforeIndex(1, 'x')
self.assertEqual('axbfoo', rewriter.getDefaultText())
def testInsertBeforeTokenThenDeleteThatToken(self):
input = InputStream('abc')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(1, 'foo')
rewriter.replaceRange(1, 2, 'foo')
self.assertEqual('afoofoo', rewriter.getDefaultText())
def testPreservesOrderOfContiguousInserts(self):
"""
Test for fix for: https://github.com/antlr/antlr4/issues/550
"""
input = InputStream('aa')
lexer = TestLexer(input)
stream = CommonTokenStream(lexer=lexer)
stream.fill()
rewriter = TokenStreamRewriter(tokens=stream)
rewriter.insertBeforeIndex(0, '<b>')
rewriter.insertAfter(0, '</b>')
rewriter.insertBeforeIndex(1, '<b>')
rewriter.insertAfter(1, '</b>')
self.assertEqual('<b>a</b><b>a</b>', rewriter.getDefaultText())
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,101 @@
# Generated from /Users/lyga/Dropbox/code/python/antlr4-learn/test_grammar/T.g4 by ANTLR 4.5.3
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2")
buf.write(u"\5\17\b\1\4\2\t\2\4\3\t\3\4\4\t\4\3\2\3\2\3\3\3\3\3\4")
buf.write(u"\3\4\2\2\5\3\3\5\4\7\5\3\2\2\16\2\3\3\2\2\2\2\5\3\2\2")
buf.write(u"\2\2\7\3\2\2\2\3\t\3\2\2\2\5\13\3\2\2\2\7\r\3\2\2\2\t")
buf.write(u"\n\7c\2\2\n\4\3\2\2\2\13\f\7d\2\2\f\6\3\2\2\2\r\16\7")
buf.write(u"e\2\2\16\b\3\2\2\2\3\2\2")
return buf.getvalue()
class TestLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
A = 1
B = 2
C = 3
modeNames = [u"DEFAULT_MODE"]
literalNames = [u"<INVALID>",
u"'a'", u"'b'", u"'c'"]
symbolicNames = [u"<INVALID>",
u"A", u"B", u"C"]
ruleNames = [u"A", u"B", u"C"]
grammarFileName = u"T.g4"
def __init__(self, input=None):
super(TestLexer, self).__init__(input)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
def serializedATN2():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2")
buf.write(u"\t(\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write(u"\7\4\b\t\b\3\2\6\2\23\n\2\r\2\16\2\24\3\3\6\3\30\n\3")
buf.write(u"\r\3\16\3\31\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\6\b")
buf.write(u"%\n\b\r\b\16\b&\2\2\t\3\3\5\4\7\5\t\6\13\7\r\b\17\t\3")
buf.write(u"\2\2*\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2")
buf.write(u"\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\3\22\3\2\2\2\5")
buf.write(u"\27\3\2\2\2\7\33\3\2\2\2\t\35\3\2\2\2\13\37\3\2\2\2\r")
buf.write(u"!\3\2\2\2\17$\3\2\2\2\21\23\4c|\2\22\21\3\2\2\2\23\24")
buf.write(u"\3\2\2\2\24\22\3\2\2\2\24\25\3\2\2\2\25\4\3\2\2\2\26")
buf.write(u"\30\4\62;\2\27\26\3\2\2\2\30\31\3\2\2\2\31\27\3\2\2\2")
buf.write(u"\31\32\3\2\2\2\32\6\3\2\2\2\33\34\7=\2\2\34\b\3\2\2\2")
buf.write(u"\35\36\7?\2\2\36\n\3\2\2\2\37 \7-\2\2 \f\3\2\2\2!\"\7")
buf.write(u",\2\2\"\16\3\2\2\2#%\7\"\2\2$#\3\2\2\2%&\3\2\2\2&$\3")
buf.write(u"\2\2\2&\'\3\2\2\2\'\20\3\2\2\2\6\2\24\31&\2")
return buf.getvalue()
class TestLexer2(Lexer):
atn = ATNDeserializer().deserialize(serializedATN2())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
ID = 1
INT = 2
SEMI = 3
ASSIGN = 4
PLUS = 5
MULT = 6
WS = 7
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"';'", u"'='", u"'+'", u"'*'" ]
symbolicNames = [ u"<INVALID>",
u"ID", u"INT", u"SEMI", u"ASSIGN", u"PLUS", u"MULT", u"WS" ]
ruleNames = [ u"ID", u"INT", u"SEMI", u"ASSIGN", u"PLUS", u"MULT", u"WS" ]
grammarFileName = u"T2.g4"
def __init__(self, input=None):
super(TestLexer2, self).__init__(input)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None

View File