From ea20af25ab1ff91c04faaf48c5a27bc01908aaa1 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Tue, 14 Oct 2014 00:17:03 +0800 Subject: [PATCH 01/26] work in progress --- .../v4/testgen/CompositeLexerTestMethod.java | 35 + .../v4/testgen/CompositeParserTestMethod.java | 35 + tool/test/org/antlr/v4/testgen/Generator.java | 540 +++ tool/test/org/antlr/v4/testgen/Grammar.java | 51 + .../org/antlr/v4/testgen/LexerTestMethod.java | 15 + .../antlr/v4/testgen/ParserTestMethod.java | 13 + tool/test/org/antlr/v4/testgen/TestFile.java | 82 + .../test/org/antlr/v4/testgen/TestMethod.java | 33 + .../LexerDelegatorInvokesDelegateRule.st | 4 + .../LexerDelegatorInvokesDelegateRule_S.st | 3 + .../LexerDelegatorRuleOverridesDelegate.st | 4 + .../LexerDelegatorRuleOverridesDelegate_S.st | 3 + .../BringInLiteralsFromDelegate.st | 4 + .../BringInLiteralsFromDelegate_S.st | 2 + .../CombinedImportsCombined.st | 3 + .../CombinedImportsCombined_S.st | 5 + .../DelegatesSeeSameTokenType.st | 17 + .../DelegatesSeeSameTokenType_S.st | 3 + .../DelegatesSeeSameTokenType_T.st | 3 + .../DelegatorAccessesDelegateMembers.st | 4 + .../DelegatorAccessesDelegateMembers_S.st | 5 + .../DelegatorInvokesDelegateRule.st | 5 + .../DelegatorInvokesDelegateRuleWithArgs.st | 5 + .../DelegatorInvokesDelegateRuleWithArgs_S.st | 2 + ...atorInvokesDelegateRuleWithReturnStruct.st | 5 + ...orInvokesDelegateRuleWithReturnStruct_S.st | 2 + .../DelegatorInvokesDelegateRule_S.st | 2 + ...egatorInvokesFirstVersionOfDelegateRule.st | 5 + ...atorInvokesFirstVersionOfDelegateRule_S.st | 3 + ...atorInvokesFirstVersionOfDelegateRule_T.st | 2 + .../DelegatorRuleOverridesDelegate.st | 4 + .../DelegatorRuleOverridesDelegate_S.st | 3 + .../DelegatorRuleOverridesDelegates.st | 4 + .../DelegatorRuleOverridesDelegates_S.st | 4 + .../DelegatorRuleOverridesDelegates_T.st | 3 + ...legatorRuleOverridesLookaheadInDelegate.st | 7 + ...gatorRuleOverridesLookaheadInDelegate_S.st | 7 + .../ImportLexerWithOnlyFragmentRules.st | 4 + .../ImportLexerWithOnlyFragmentRules_S.st | 6 + .../ImportedGrammarWithEmptyOptions.st | 5 + .../ImportedGrammarWithEmptyOptions_S.st | 3 + .../ImportedRuleWithAction.st | 5 + .../ImportedRuleWithAction_S.st | 2 + .../CompositeParsers/KeywordVSIDOrder.st | 5 + .../CompositeParsers/KeywordVSIDOrder_S.st | 2 + .../AmbigYieldsCtxSensitiveDFA.st | 5 + .../FullContextParsing/AmbiguityNoLoop.st | 12 + .../FullContextParsing/CtxSensitiveDFA.st | 9 + .../CtxSensitiveDFATwoDiffInput.st | 9 + .../FullContextParsing/ExprAmbiguity.st | 13 + .../FullContextIF_THEN_ELSEParse.st | 10 + .../LoopsSimulateTailRecursion.st | 15 + .../SLLSeesEOFInLLGrammar.st | 9 + .../grammars/LexerExec/ActionPlacement.st | 8 + .../v4/testgen/grammars/LexerExec/CharSet.st | 3 + .../grammars/LexerExec/CharSetInSet.st | 4 + .../testgen/grammars/LexerExec/CharSetNot.st | 3 + .../testgen/grammars/LexerExec/CharSetPlus.st | 3 + .../grammars/LexerExec/CharSetRange.st | 4 + .../LexerExec/CharSetWithEscapedChar.st | 3 + .../LexerExec/CharSetWithMissingEndRange.st | 3 + .../LexerExec/CharSetWithMissingEscapeChar.st | 3 + .../grammars/LexerExec/CharSetWithQuote.st | 3 + .../LexerExec/CharSetWithReversedRange.st | 3 + .../testgen/grammars/LexerExec/EOFByItself.st | 3 + .../LexerExec/EOFSuffixInFirstRule.st | 4 + .../grammars/LexerExec/GreedyClosure.st | 3 + .../grammars/LexerExec/GreedyConfigs.st | 4 + .../grammars/LexerExec/GreedyOptional.st | 3 + .../LexerExec/GreedyPositiveClosure.st | 3 + .../v4/testgen/grammars/LexerExec/HexVsID.st | 8 + .../testgen/grammars/LexerExec/KeywordID.st | 4 + .../testgen/grammars/LexerExec/LargeLexer.st | 4002 +++++++++++++++++ .../grammars/LexerExec/NonGreedyClosure.st | 3 + .../grammars/LexerExec/NonGreedyConfigs.st | 4 + .../grammars/LexerExec/NonGreedyOptional.st | 3 + .../LexerExec/NonGreedyPositiveClosure.st | 3 + .../LexerExec/NonGreedyTermination.st | 2 + .../testgen/grammars/LexerExec/Parentheses.st | 7 + .../LexerExec/PositionAdjustingLexer.st | 34 + .../grammars/LexerExec/QuoteTranslation.st | 2 + .../RecursiveLexerRuleRefWithWildcardPlus.ST | 3 + .../RecursiveLexerRuleRefWithWildcardStar.st | 3 + .../RefToRuleDoesNotSetTokenNorEmitAnother.st | 4 + .../v4/testgen/grammars/LexerExec/Slashes.st | 6 + .../v4/testgen/grammars/ParserExec/APlus.st | 6 + .../v4/testgen/grammars/ParserExec/AStar.st | 6 + .../testgen/grammars/ParserExec/AorAPlus.st | 6 + .../testgen/grammars/ParserExec/AorAStar.st | 6 + .../v4/testgen/grammars/ParserExec/AorB.st | 9 + .../testgen/grammars/ParserExec/AorBPlus.st | 8 + .../testgen/grammars/ParserExec/AorBStar.st | 8 + .../v4/testgen/grammars/ParserExec/Basic.st | 7 + .../ParserExec/IfIfElseGreedyBinding1.st | 8 + .../ParserExec/IfIfElseGreedyBinding2.st | 8 + .../ParserExec/IfIfElseNonGreedyBinding1.st | 8 + .../ParserExec/IfIfElseNonGreedyBinding2.st | 8 + .../grammars/ParserExec/LL1OptionalBlock.st | 7 + .../LabelAliasingAcrossLabeledAlternatives.st | 8 + .../v4/testgen/grammars/ParserExec/Labels.st | 6 + .../ParserExec/ListLabelForClosureContext.st | 20 + .../grammars/ParserExec/ListLabelsOnSet.st | 7 + .../testgen/grammars/ParserExec/Optional.st | 4 + .../grammars/ParserExec/PredicatedIfIfElse.st | 7 + .../grammars/ParserExec/PredictionIssue334.st | 14 + .../ParserExec/StartRuleWithoutEOF.st | 6 + 106 files changed, 5356 insertions(+) create mode 100644 tool/test/org/antlr/v4/testgen/CompositeLexerTestMethod.java create mode 100644 tool/test/org/antlr/v4/testgen/CompositeParserTestMethod.java create mode 100644 tool/test/org/antlr/v4/testgen/Generator.java create mode 100644 tool/test/org/antlr/v4/testgen/Grammar.java create mode 100644 tool/test/org/antlr/v4/testgen/LexerTestMethod.java create mode 100644 tool/test/org/antlr/v4/testgen/ParserTestMethod.java create mode 100644 tool/test/org/antlr/v4/testgen/TestFile.java create mode 100644 tool/test/org/antlr/v4/testgen/TestMethod.java create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder_S.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbiguityNoLoop.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/ExprAmbiguity.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/LoopsSimulateTailRecursion.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/ActionPlacement.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetInSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetNot.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetPlus.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetRange.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithEscapedChar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEndRange.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEscapeChar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithQuote.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithReversedRange.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFByItself.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFSuffixInFirstRule.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyClosure.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyConfigs.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyOptional.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyPositiveClosure.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/HexVsID.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/KeywordID.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/LargeLexer.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyClosure.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyConfigs.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyOptional.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyPositiveClosure.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyTermination.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/Parentheses.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/PositionAdjustingLexer.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/QuoteTranslation.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.ST create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/Slashes.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/APlus.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/AStar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAPlus.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAStar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorB.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBPlus.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBStar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/Basic.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding1.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding1.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/LL1OptionalBlock.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/Labels.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelsOnSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/Optional.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredictionIssue334.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st diff --git a/tool/test/org/antlr/v4/testgen/CompositeLexerTestMethod.java b/tool/test/org/antlr/v4/testgen/CompositeLexerTestMethod.java new file mode 100644 index 000000000..2c978c9b8 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/CompositeLexerTestMethod.java @@ -0,0 +1,35 @@ +package org.antlr.v4.testgen; + +import java.io.File; + +import org.stringtemplate.v4.STGroup; + +public class CompositeLexerTestMethod extends LexerTestMethod { + + public Grammar[] slaveGrammars; + + public CompositeLexerTestMethod(String name, String grammarName, + String input, String expectedOutput, + String expectedErrors, String ... slaves) { + super(name, grammarName, input, expectedOutput, expectedErrors, null); + this.slaveGrammars = new Grammar[slaves.length]; + for(int i=0;i configs = readConfigs(); + File source = configs.get("Source"); + for(Map.Entry item : configs.entrySet()) { + if("Source".equals(item.getKey())) + continue; + Generator gen = new Generator(item.getKey(), source, item.getValue()); + gen.generateTests(); + } + } + + private static Map readConfigs() throws Exception { + Map configs = new HashMap(); + configs.put("Source", readGrammarDir()); // source of test templates + configs.put("Firefox", readFirefoxDir()); // generated Firefox tests + return configs; + } + + private static File readFirefoxDir() { + // TODO read from env variable + return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-javascript/tool/test/org/antlr/v4/js/test/firefox"); + } + + private static File readGrammarDir() throws Exception { + File parent = readThisDir(); + return new File(parent, "grammars"); + } + + private static File readThisDir() throws Exception { + String className = Generator.class.getName().replace(".", "/"); + className = className.substring(0, className.lastIndexOf("/") + 1); + URL url = ClassLoader.getSystemResource(className); + return new File(url.toURI()); + } + + public static String escape(String s) { + return s==null ? null : s.replace("\\","\\\\").replace("\n", "\\n").replace("\"","\\\""); + } + + String target; + File input; + File output; + STGroup group; + + public Generator(String target, File input, File output) { + this.target = target; + this.input = input; + this.output = output; + } + + private void generateTests() throws Exception { + this.group = readTemplates(); + Collection tests = buildTests(); + for(TestFile test : tests) { + String code = generateTestCode(test); + writeTestFile(test, code); + } + } + + private STGroup readTemplates() throws Exception { + if(!output.exists()) + throw new FileNotFoundException(output.getAbsolutePath()); + String name = target + ".test.stg"; + File file = new File(output, name); + if(!file.exists()) + throw new FileNotFoundException(file.getAbsolutePath()); + return new STGroupFile(file.getAbsolutePath()); + } + + private String generateTestCode(TestFile test) throws Exception { + test.generateUnitTests(group); + ST template = group.getInstanceOf("TestFile"); + template.add("file", test); + return template.render(); + } + + private void writeTestFile(TestFile test, String code) throws Exception { + File file = new File(output, "Test" + test.getName() + ".java"); + OutputStream stream = new FileOutputStream(file); + try { + stream.write(code.getBytes()); + } finally { + stream.close(); + } + } + + private Collection buildTests() throws Exception { + List list = new ArrayList(); + list.add(buildLexerExec()); + list.add(buildParserExec()); + list.add(buildCompositeLexers()); + list.add(buildCompositeParsers()); + list.add(buildFullContextParsing()); + return list; + + } + + private TestFile buildFullContextParsing() throws Exception { + TestFile file = new TestFile("FullContextParsing"); + file.addParserTest(input, "AmbigYieldsCtxSensitiveDFA", "T", "s", "abc", + "Decision 0:\n" + + "s0-ID->:s1^=>1\n", + "line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", null); + file.addParserTest(input, "CtxSensitiveDFA", "T", "s", "$ 34 abc", + "Decision 1:\n" + + "s0-INT->s1\n" + + "s1-ID->:s2^=>1\n", + "line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + + "line 1:2 reportContextSensitivity d=1 (e), input='34'\n", 1); + file.addParserTest(input, "CtxSensitiveDFA", "T", "s", "@ 34 abc", + "Decision 1:\n" + + "s0-INT->s1\n" + + "s1-ID->:s2^=>1\n", + "line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + + "line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n", 2); + file.addParserTest(input, "CtxSensitiveDFATwoDiffInput", "T", "s", + "$ 34 abc @ 34 abc", + "Decision 2:\n" + + "s0-INT->s1\n" + + "s1-ID->:s2^=>1\n", + "line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\n" + + "line 1:2 reportContextSensitivity d=2 (e), input='34'\n" + + "line 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\n" + + "line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", null); + file.addParserTest(input, "SLLSeesEOFInLLGrammar", "T", "s", + "34 abc", + "Decision 0:\n" + + "s0-INT->s1\n" + + "s1-ID->:s2^=>1\n", + "line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\n" + + "line 1:0 reportContextSensitivity d=0 (e), input='34'\n", null); + file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", + "{ if x then return }", + "Decision 1:\n" + + "s0-'}'->:s1=>2\n", null, 1); + file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", + "{ if x then return else foo }", + "Decision 1:\n" + + "s0-'else'->:s1^=>1\n", + "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", 2); + file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", + "{ if x then if y then return else foo }", + "Decision 1:\n" + + "s0-'else'->:s1^=>1\n" + + "s0-'}'->:s2=>2\n", + "line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 3); + // should not be ambiguous because the second 'else bar' clearly + // indicates that the first else should match to the innermost if. + // LL_EXACT_AMBIG_DETECTION makes us keep going to resolve + file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", + "{ if x then if y then return else foo else bar }", + "Decision 1:\n" + + "s0-'else'->:s1^=>1\n", + "line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\n" + + "line 1:38 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportContextSensitivity d=1 (stat), input='else'\n", 4); + file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", + "{ if x then return else foo\n" + + "if x then if y then return else foo }", + "Decision 1:\n" + + "s0-'else'->:s1^=>1\n" + + "s0-'}'->:s2=>2\n", + "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 5); + file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", + "{ if x then return else foo\n" + + "if x then if y then return else foo }", + "Decision 1:\n" + + "s0-'else'->:s1^=>1\n" + + "s0-'}'->:s2=>2\n", + "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 6); + file.addParserTest(input, "LoopsSimulateTailRecursion", "T", "prog", + "a(i)<-x", + "pass: a(i)<-x\n", + "line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\n" + + "line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n", null); + file.addParserTest(input, "AmbiguityNoLoop", "T", "prog", + "a@", + "alt 1\n", + "line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\n" + + "line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\n" + + "line 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\n" + + "line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n", null); + file.addParserTest(input, "ExprAmbiguity", "T", "s", + "a+b", + "(expr a + (expr b))\n", + "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + + "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n", 1); + file.addParserTest(input, "ExprAmbiguity", "T", "s", + "a+b*c", + "(expr a + (expr b * (expr c)))\n", + "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + + "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" + + "line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" + + "line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n", 2); + return file; + } + + private TestFile buildCompositeLexers() throws Exception { + TestFile file = new TestFile("CompositeLexers"); + file.addCompositeLexerTest(input, "LexerDelegatorInvokesDelegateRule", "M", "abc", + "S.A\n" + + "[@0,0:0='a',<3>,1:0]\n" + + "[@1,1:1='b',<1>,1:1]\n" + + "[@2,2:2='c',<4>,1:2]\n" + + "[@3,3:2='',<-1>,1:3]\n", null, "S"); + file.addCompositeLexerTest(input, "LexerDelegatorRuleOverridesDelegate", "M", "ab", + "M.A\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n", null, "S"); + return file; + } + + private TestFile buildLexerExec() throws Exception { + TestFile file = new TestFile("LexerExec"); + file.addLexerTest(input, "QuoteTranslation", "L", "\"", + "[@0,0:0='\"',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n", null); + file.addLexerTest(input, "RefToRuleDoesNotSetTokenNorEmitAnother", "L", "34 -21 3", + "[@0,0:1='34',<2>,1:0]\n" + + "[@1,3:5='-21',<1>,1:3]\n" + + "[@2,7:7='3',<2>,1:7]\n" + + "[@3,8:7='',<-1>,1:8]\n", null); + file.addLexerTest(input, "Slashes", "L", "\\ / \\/ /\\", + "[@0,0:0='\\',<1>,1:0]\n" + + "[@1,2:2='/',<2>,1:2]\n" + + "[@2,4:5='\\/',<3>,1:4]\n" + + "[@3,7:8='/\\',<4>,1:7]\n" + + "[@4,9:8='',<-1>,1:9]\n", null); + file.addLexerTest(input, "Parentheses", "L", "-.-.-!", + "[@0,0:4='-.-.-',<1>,1:0]\n" + + "[@1,5:5='!',<3>,1:5]\n" + + "[@2,6:5='',<-1>,1:6]\n", null); + file.addLexerTest(input, "NonGreedyTermination", "L", "\"hi\"\"mom\"", + "[@0,0:3='\"hi\"',<1>,1:0]\n" + + "[@1,4:8='\"mom\"',<1>,1:4]\n" + + "[@2,9:8='',<-1>,1:9]\n", null, 1); + file.addLexerTest(input, "NonGreedyTermination", "L", "\"\"\"mom\"", + "[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n", null, 2); + file.addLexerTest(input, "GreedyOptional", "L", "//blah\n//blah\n", + "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", null); + file.addLexerTest(input, "NonGreedyOptional", "L", "//blah\n//blah\n", + "[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", null); + file.addLexerTest(input, "GreedyClosure", "L", "//blah\n//blah\n", + "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", null); + file.addLexerTest(input, "NonGreedyClosure", "L", "//blah\n//blah\n", + "[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", null); + file.addLexerTest(input, "GreedyPositiveClosure", "L", "//blah\n//blah\n", + "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", null); + file.addLexerTest(input, "NonGreedyPositiveClosure", "L", "//blah\n//blah\n", + "[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", null); + file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardStar", "L", + "/* ick */\n" + + "/* /* */\n" + + "/* /*nested*/ */\n", + "[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,9:9='\\n',<2>,1:9]\n" + + "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,35:35='\\n',<2>,3:16]\n" + + "[@4,36:35='',<-1>,4:17]\n", null, 1); + file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardStar", "L", + "/* ick */x\n" + + "/* /* */x\n" + + "/* /*nested*/ */x\n", + "[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,10:10='\\n',<2>,1:10]\n" + + "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,38:38='\\n',<2>,3:17]\n" + + "[@4,39:38='',<-1>,4:18]\n", + "line 1:9 token recognition error at: 'x'\n" + + "line 3:16 token recognition error at: 'x'\n", 2); + file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardPlus", "L", + "/* ick */\n" + + "/* /* */\n" + + "/* /*nested*/ */\n", + "[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,9:9='\\n',<2>,1:9]\n" + + "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,35:35='\\n',<2>,3:16]\n" + + "[@4,36:35='',<-1>,4:17]\n", null, 1); + file.addLexerTest(input, "RecursiveLexerRuleRefWithWildcardPlus", "L", + "/* ick */x\n" + + "/* /* */x\n" + + "/* /*nested*/ */x\n", + "[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,10:10='\\n',<2>,1:10]\n" + + "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,38:38='\\n',<2>,3:17]\n" + + "[@4,39:38='',<-1>,4:18]\n", + "line 1:9 token recognition error at: 'x'\n" + + "line 3:16 token recognition error at: 'x'\n", 2); + file.addLexerTest(input, "ActionPlacement", "L", "ab", + "stuff0: \n" + + "stuff1: a\n" + + "stuff2: ab\n" + + "ab\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n", null); + file.addLexerTest(input, "GreedyConfigs", "L", "ab", + "ab\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n", null); + file.addLexerTest(input, "NonGreedyConfigs", "L", "qb", + "a\n" + + "b\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,1:1='b',<3>,1:1]\n" + + "[@2,2:1='',<-1>,1:2]\n", null); + file.addLexerTest(input, "KeywordID", "L", "end eend ending a", + "[@0,0:2='end',<1>,1:0]\n" + + "[@1,3:3=' ',<3>,1:3]\n" + + "[@2,4:7='eend',<2>,1:4]\n" + + "[@3,8:8=' ',<3>,1:8]\n" + + "[@4,9:14='ending',<2>,1:9]\n" + + "[@5,15:15=' ',<3>,1:15]\n" + + "[@6,16:16='a',<2>,1:16]\n" + + "[@7,17:16='',<-1>,1:17]\n", null); + file.addLexerTest(input, "HexVsID", "L", "x 0 1 a.b a.l", + "[@0,0:0='x',<5>,1:0]\n" + + "[@1,1:1=' ',<6>,1:1]\n" + + "[@2,2:2='0',<2>,1:2]\n" + + "[@3,3:3=' ',<6>,1:3]\n" + + "[@4,4:4='1',<2>,1:4]\n" + + "[@5,5:5=' ',<6>,1:5]\n" + + "[@6,6:6='a',<5>,1:6]\n" + + "[@7,7:7='.',<4>,1:7]\n" + + "[@8,8:8='b',<5>,1:8]\n" + + "[@9,9:9=' ',<6>,1:9]\n" + + "[@10,10:10='a',<5>,1:10]\n" + + "[@11,11:11='.',<4>,1:11]\n" + + "[@12,12:12='l',<5>,1:12]\n" + + "[@13,13:12='',<-1>,1:13]\n",null); + file.addLexerTest(input, "EOFByItself", "L", "", + "[@0,0:-1='',<1>,1:0]\n" + + "[@1,0:-1='',<-1>,1:0]\n", null); + file.addLexerTest(input, "EOFSuffixInFirstRule", "L", "", + "[@0,0:-1='',<-1>,1:0]\n", null, 1); + file.addLexerTest(input, "EOFSuffixInFirstRule", "L", "a", + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n", null, 2); + file.addLexerTest(input, "CharSet", "L", "34\r\n 34", + "I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,5:6='34',<1>,2:1]\n" + + "[@2,7:6='',<-1>,2:3]\n", null); + file.addLexerTest(input, "CharSetPlus", "L", "34\r\n 34", + "I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,5:6='34',<1>,2:1]\n" + + "[@2,7:6='',<-1>,2:3]\n", null); + file.addLexerTest(input, "CharSetNot", "L", "xaf", + "I\n" + + "[@0,0:2='xaf',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n", null); + file.addLexerTest(input, "CharSetInSet", "L", "a x", + "I\n" + + "I\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,2:2='x',<1>,1:2]\n" + + "[@2,3:2='',<-1>,1:3]\n", null); + file.addLexerTest(input, "CharSetRange", "L", "34\r 34 a2 abc \n ", + "I\n" + + "I\n" + + "ID\n" + + "ID\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,4:5='34',<1>,1:4]\n" + + "[@2,7:8='a2',<2>,1:7]\n" + + "[@3,10:12='abc',<2>,1:10]\n" + + "[@4,18:17='',<-1>,2:3]\n", null); + file.addLexerTest(input, "CharSetWithMissingEndRange", "L", "00\r\n", + "I\n" + + "[@0,0:1='00',<1>,1:0]\n" + + "[@1,4:3='',<-1>,2:0]\n", null); + file.addLexerTest(input, "CharSetWithMissingEscapeChar", "L", "34 ", + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n", null); + file.addLexerTest(input, "CharSetWithEscapedChar", "L", "- ] ", + "DASHBRACK\n" + + "DASHBRACK\n" + + "[@0,0:0='-',<1>,1:0]\n" + + "[@1,2:2=']',<1>,1:2]\n" + + "[@2,4:3='',<-1>,1:4]\n", null); + file.addLexerTest(input, "CharSetWithReversedRange", "L", "9", + "A\n" + + "[@0,0:0='9',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n", null); + file.addLexerTest(input, "CharSetWithQuote", "L", "b\"a", + "A\n" + + "[@0,0:2='b\"a',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n", null, 1); + file.addLexerTest(input, "CharSetWithQuote", "L", "b\"\\a", + "A\n" + + "[@0,0:3='b\"\\a',<1>,1:0]\n" + + "[@1,4:3='',<-1>,1:4]\n", null, 2); + final int TOKENS = 4; + final int LABEL = 5; + final int IDENTIFIER = 6; + file.addLexerTest(input, "PositionAdjustingLexer", "L", + "tokens\n" + + "tokens {\n" + + "notLabel\n" + + "label1 =\n" + + "label2 +=\n" + + "notLabel\n", + "[@0,0:5='tokens',<" + IDENTIFIER + ">,1:0]\n" + + "[@1,7:12='tokens',<" + TOKENS + ">,2:0]\n" + + "[@2,14:14='{',<3>,2:7]\n" + + "[@3,16:23='notLabel',<" + IDENTIFIER + ">,3:0]\n" + + "[@4,25:30='label1',<" + LABEL + ">,4:0]\n" + + "[@5,32:32='=',<1>,4:7]\n" + + "[@6,34:39='label2',<" + LABEL + ">,5:0]\n" + + "[@7,41:42='+=',<2>,5:7]\n" + + "[@8,44:51='notLabel',<" + IDENTIFIER + ">,6:0]\n" + + "[@9,53:52='',<-1>,7:0]\n", null); + file.addLexerTest(input, "LargeLexer", "L", "KW400", + "[@0,0:4='KW400',<402>,1:0]\n" + + "[@1,5:4='',<-1>,1:5]\n", null); + return file; + } + + private TestFile buildCompositeParsers() throws Exception { + TestFile file = new TestFile("CompositeParsers"); + file.importErrorQueue = true; + file.importGrammar = true; + file.addCompositeParserTest(input, "DelegatorInvokesDelegateRule", "M", "s", "b", "S.a\n", null, "S"); + file.addCompositeParserTest(input, "BringInLiteralsFromDelegate", "M", "s", "=a", "S.a\n", null, "S"); + file.addCompositeParserTest(input, "DelegatorInvokesDelegateRuleWithArgs", "M", "s", "a", "S.a1000\n", null, "S"); + file.addCompositeParserTest(input, "DelegatorInvokesDelegateRuleWithReturnStruct", "M", "s", "b", "S.ab\n", null, "S"); + file.addCompositeParserTest(input, "DelegatorAccessesDelegateMembers", "M", "s", "b", "foo\n", null, "S"); + file.addCompositeParserTest(input, "DelegatorInvokesFirstVersionOfDelegateRule", "M", "s", "b", "S.a\n", null, "S", "T"); + CompositeParserTestMethod ct = file.addCompositeParserTest(input, "DelegatesSeeSameTokenType", "M", "s", "aa", "S.x\nT.y\n", null, "S", "T"); + ct.afterGrammar = "writeFile(tmpdir, \"M.g4\", grammar);\n" + + "ErrorQueue equeue = new ErrorQueue();\n" + + "Grammar g = new Grammar(tmpdir+\"/M.g4\", grammar, equeue);\n" + + "String expectedTokenIDToTypeMap = \"{EOF=-1, B=1, A=2, C=3, WS=4}\";\n" + + "String expectedStringLiteralToTypeMap = \"{'a'=2, 'b'=1, 'c'=3}\";\n" + + "String expectedTypeToTokenList = \"[B, A, C, WS]\";\n" + + "assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString());\n" + + "assertEquals(expectedStringLiteralToTypeMap, sort(g.stringLiteralToTypeMap).toString());\n" + + "assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString());\n" + + "assertEquals(\"unexpected errors: \"+equeue, 0, equeue.errors.size());\n"; + ct = file.addCompositeParserTest(input, "CombinedImportsCombined", "M", "s", "x 34 9", "S.x\n", null, "S"); + ct.afterGrammar = "writeFile(tmpdir, \"M.g4\", grammar);\n" + + "ErrorQueue equeue = new ErrorQueue();\n" + + "new Grammar(tmpdir+\"/M.g4\", grammar, equeue);\n" + + "assertEquals(\"unexpected errors: \" + equeue, 0, equeue.errors.size());\n"; + file.addCompositeParserTest(input, "DelegatorRuleOverridesDelegate", "M", "a", "c", "S.a\n", null, "S"); + file.addCompositeParserTest(input, "DelegatorRuleOverridesLookaheadInDelegate", "M", "prog", "float x = 3;", "Decl: floatx=3;\n", null, "S"); + file.addCompositeParserTest(input, "DelegatorRuleOverridesDelegates", "M", "a", "c", "M.b\nS.a\n", null, "S", "T"); + file.addCompositeParserTest(input, "KeywordVSIDOrder", "M", "a", "abc", + "M.A\n" + + "M.a: [@0,0:2='abc',<1>,1:0]\n", null, "S"); + file.addCompositeParserTest(input, "ImportedRuleWithAction", "M", "s", "b", "", null, "S"); + file.addCompositeParserTest(input, "ImportedGrammarWithEmptyOptions", "M", "s", "b", "", null, "S"); + file.addCompositeParserTest(input, "ImportLexerWithOnlyFragmentRules", "M", "program", "test test", "", null, "S"); + return file; + } + + private TestFile buildParserExec() throws Exception { + TestFile file = new TestFile("ParserExec"); + file.addParserTest(input, "Labels", "T", "a", "abc 34", "", null); + file.addParserTest(input, "ListLabelsOnSet", "T", "a", "abc 34", "", null); + file.addParserTest(input, "AorB", "T", "a", "34", "alt 2\n", null); + file.addParserTest(input, "Basic", "T", "a", "abc 34", "abc34\n", null); + file.addParserTest(input, "APlus", "T", "a", "a b c", "abc\n", null); + file.addParserTest(input, "AorAPlus", "T", "a", "a b c", "abc\n", null); + file.addParserTest(input, "IfIfElseGreedyBinding1", "T", "start", + "if y if y x else x", "if y x else x\nif y if y x else x\n", null); + file.addParserTest(input, "IfIfElseGreedyBinding2", "T", "start", + "if y if y x else x", "if y x else x\nif y if y x else x\n", null); + file.addParserTest(input, "IfIfElseNonGreedyBinding1", "T", "start", + "if y if y x else x", "if y x\nif y if y x else x\n", null); + file.addParserTest(input, "IfIfElseNonGreedyBinding2", "T", "start", + "if y if y x else x", "if y x\nif y if y x else x\n", null); + file.addParserTest(input, "AStar", "T", "a", "", "\n", null, 1); + file.addParserTest(input, "AStar", "T", "a", "a b c", "abc\n", null, 2); + file.addParserTest(input, "LL1OptionalBlock", "T", "a", "", "\n", null, 1); + file.addParserTest(input, "LL1OptionalBlock", "T", "a", "a", "a\n", null, 2); + file.addParserTest(input, "AorAStar", "T", "a", "", "\n", null, 1); + file.addParserTest(input, "AorAStar", "T", "a", "a b c", "abc\n", null, 2); + file.addParserTest(input, "AorBPlus", "T", "a", "a 34 c", "a34c\n", null); + file.addParserTest(input, "AorBStar", "T", "a", "", "\n", null, 1); + file.addParserTest(input, "AorBStar", "T", "a", "a 34 c", "a34c\n", null, 2); + file.addParserTest(input, "Optional", "T", "stat", "x", "", null, 1); + file.addParserTest(input, "Optional", "T", "stat", "if x", "", null, 2); + file.addParserTest(input, "Optional", "T", "stat", "if x else x", "", null, 3); + file.addParserTest(input, "Optional", "T", "stat", "if if x else x", "", null, 4); + file.addParserTest(input, "PredicatedIfIfElse", "T", "s", "if x if x a else b", "", null); + /* file.addTest(input, "StartRuleWithoutEOF", "T", "s", "abc 34", + "Decision 0:\n" + "s0-ID->s1\n" + "s1-INT->s2\n" + "s2-EOF->:s3=>1\n", null); */ + file.addParserTest(input, "LabelAliasingAcrossLabeledAlternatives", "T", "start", "xy", "x\ny\n", null); + file.addParserTest(input, "PredictionIssue334", "T", "file_", "a", "(file_ (item a) )\n", null); + file.addParserTest(input, "ListLabelForClosureContext", "T", "expression", "a", "", null); + return file; + } + + + +} diff --git a/tool/test/org/antlr/v4/testgen/Grammar.java b/tool/test/org/antlr/v4/testgen/Grammar.java new file mode 100644 index 000000000..4b159cf78 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/Grammar.java @@ -0,0 +1,51 @@ +package org.antlr.v4.testgen; + +import java.io.File; +import java.io.FileInputStream; +import java.io.InputStream; + +import org.stringtemplate.v4.ST; +import org.stringtemplate.v4.STGroup; + +public class Grammar { + + public String fileName; + public String grammarName; + public String[] lines; + public ST template; + + public Grammar(String fileName, String grammarName) { + this.fileName = fileName; + this.grammarName = grammarName; + } + + public void load(File grammarDir) throws Exception { + template = loadGrammar(grammarDir, fileName); + } + + protected ST loadGrammar(File grammarDir, String grammarFileName) throws Exception { + File file = new File(grammarDir, grammarFileName + ".st"); + InputStream input = new FileInputStream(file); + try { + byte[] data = new byte[(int)file.length()]; + int next = 0; + while(input.available()>0) { + int read = input.read(data, next, data.length - next); + next += read; + } + String s = new String(data); + return new ST(s); + } finally { + input.close(); + } + } + + public void generate(STGroup group) { + template.add("grammarName", grammarName); + template.groupThatCreatedThisInstance = group; // so templates get interpreted + lines = template.render().split("\n"); + for(int i=0;i unitTests = new ArrayList(); + public String name; + public List tests = new ArrayList(); + public boolean importErrorQueue = false; + public boolean importGrammar = false; + + public TestFile(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void addParserTest(File grammarDir, String name, String grammarName, String methodName, + String input, String expectedOutput, String expectedErrors) throws Exception { + addParserTest( grammarDir, name, grammarName, methodName, input, expectedOutput, expectedErrors, null); + } + + public ParserTestMethod addParserTest(File grammarDir, String name, String grammarName, String methodName, + String input, String expectedOutput, String expectedErrors, Integer index) throws Exception { + ParserTestMethod tm = new ParserTestMethod(name, grammarName, methodName, input, expectedOutput, expectedErrors, index); + tm.loadGrammars(grammarDir, this.name); + unitTests.add(tm); + return tm; + } + + public CompositeParserTestMethod addCompositeParserTest(File grammarDir, String name, String grammarName, String methodName, + String input, String expectedOutput, String expectedErrors, String ... slaves) throws Exception { + CompositeParserTestMethod tm = new CompositeParserTestMethod(name, grammarName, methodName, input, expectedOutput, expectedErrors, slaves); + tm.loadGrammars(grammarDir, this.name); + unitTests.add(tm); + return tm; + } + + public LexerTestMethod addLexerTest(File grammarDir, String name, String grammarName, + String input, String expectedOutput, String expectedErrors) throws Exception { + return addLexerTest(grammarDir, name, grammarName, input, expectedOutput, expectedErrors, null); + } + + public LexerTestMethod addLexerTest(File grammarDir, String name, String grammarName, + String input, String expectedOutput, String expectedErrors, Integer index) throws Exception { + LexerTestMethod tm = new LexerTestMethod(name, grammarName, input, expectedOutput, expectedErrors, index); + tm.loadGrammars(grammarDir, this.name); + unitTests.add(tm); + return tm; + } + + public CompositeLexerTestMethod addCompositeLexerTest(File grammarDir, String name, String grammarName, + String input, String expectedOutput, String expectedErrors, String ... slaves) throws Exception { + CompositeLexerTestMethod tm = new CompositeLexerTestMethod(name, grammarName, input, expectedOutput, expectedErrors, slaves); + tm.loadGrammars(grammarDir, this.name); + unitTests.add(tm); + return tm; + } + + public void generateUnitTests(STGroup group) { + for(TestMethod tm : unitTests) { + tm.generateGrammars(group); + String name = tm.getClass().getSimpleName(); + ST template = group.getInstanceOf(name); + template.add("test", tm); + tests.add(template.render()); + } + } + + + + + +} diff --git a/tool/test/org/antlr/v4/testgen/TestMethod.java b/tool/test/org/antlr/v4/testgen/TestMethod.java new file mode 100644 index 000000000..ef764fe2c --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/TestMethod.java @@ -0,0 +1,33 @@ +package org.antlr.v4.testgen; + +import java.io.File; + +import org.stringtemplate.v4.STGroup; + +public abstract class TestMethod { + + public String name; + public Grammar grammar; + public String afterGrammar; + public String input; + public String expectedOutput; + public String expectedErrors; + + protected TestMethod(String name, String grammarName, String input, + String expectedOutput, String expectedErrors, Integer index) { + this.name = name + (index==null ? "" : "_" + index); + this.grammar = new Grammar(name, grammarName); + this.input = Generator.escape(input); + this.expectedOutput = Generator.escape(expectedOutput); + this.expectedErrors = Generator.escape(expectedErrors); + } + + public void loadGrammars(File grammarDir, String testFileName) throws Exception { + grammar.load(new File(grammarDir, testFileName)); + } + + public void generateGrammars(STGroup group) { + grammar.generate(group); + } + +} diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st new file mode 100644 index 000000000..9e984e32d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st @@ -0,0 +1,4 @@ +lexer grammar M; +import S; +B : 'b'; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st new file mode 100644 index 000000000..902a03a03 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st @@ -0,0 +1,3 @@ +lexer grammar S; +A : 'a' {}; +C : 'c' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st new file mode 100644 index 000000000..3db9d0e88 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st @@ -0,0 +1,4 @@ +lexer grammar M; +import S; +A : 'a' B {}; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st new file mode 100644 index 000000000..272238968 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st @@ -0,0 +1,3 @@ +lexer grammar S; +A : 'a' {}; +B : 'b' {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate.st new file mode 100644 index 000000000..363b5bac3 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate.st @@ -0,0 +1,4 @@ +grammar M; +import S; +s : a ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st new file mode 100644 index 000000000..ed4b76ea8 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st @@ -0,0 +1,2 @@ +parser grammar S; +a : '=' 'a' {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined.st new file mode 100644 index 000000000..1609afb7b --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined.st @@ -0,0 +1,3 @@ +grammar M; +import S; +s : x INT; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined_S.st new file mode 100644 index 000000000..b2b4b7452 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined_S.st @@ -0,0 +1,5 @@ +parser grammar S; +tokens { A, B, C } +x : 'x' INT {}; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType.st new file mode 100644 index 000000000..dcd2e01b4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType.st @@ -0,0 +1,17 @@ +// The lexer will create rules to match letters a, b, c. +// The associated token types A, B, C must have the same value +// and all import'd parsers. Since ANTLR regenerates all imports +// for use with the delegator M, it can generate the same token type +// mapping in each parser: +// public static final int C=6; +// public static final int EOF=-1; +// public static final int B=5; +// public static final int WS=7; +// public static final int A=4; +grammar M; +import S,T; +s : x y ; // matches AA, which should be 'aa' +B : 'b' ; // another order: B, A, C +A : 'a' ; +C : 'c' ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st new file mode 100644 index 000000000..4ed0581fe --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st @@ -0,0 +1,3 @@ +parser grammar S; +tokens { A, B, C } +x : A {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st new file mode 100644 index 000000000..817f5ccf2 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st @@ -0,0 +1,3 @@ +parser grammar S; +tokens { C, B, A } // reverse order +y : A {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st new file mode 100644 index 000000000..7b2eacb7a --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st @@ -0,0 +1,4 @@ +grammar M; // uses no rules from the import +import S; +s : 'b'{}; // gS is import pointer +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st new file mode 100644 index 000000000..3450c4f8b --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st @@ -0,0 +1,5 @@ +parser grammar S; +@members { + +} +a : B; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st new file mode 100644 index 000000000..0b8891303 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st @@ -0,0 +1,5 @@ +grammar M; +import S; +s : a ; +B : 'b' ; // defines B from inherited token space +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st new file mode 100644 index 000000000..cff94f4a8 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st @@ -0,0 +1,5 @@ +grammar M; +import S; +s : label=a[3] {} ; +B : 'b' ; // defines B from inherited token space +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st new file mode 100644 index 000000000..7b965e623 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st @@ -0,0 +1,2 @@ +parser grammar S; +a[int x] returns [int y] : B {;$y=1000;}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st new file mode 100644 index 000000000..a42f67f68 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st @@ -0,0 +1,5 @@ +grammar M; +import S; +s : a {} ; +B : 'b' ; // defines B from inherited token space +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st new file mode 100644 index 000000000..9a2e9c2d0 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st @@ -0,0 +1,2 @@ +parser grammar S; +A : B {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st new file mode 100644 index 000000000..21c07ab4f --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st @@ -0,0 +1,2 @@ +parser grammar S; +a : B {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st new file mode 100644 index 000000000..3bbf0d2e5 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st @@ -0,0 +1,5 @@ +grammar M; +import S,T; +s : a ; +B : 'b' ; // defines B from inherited token space +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st new file mode 100644 index 000000000..e729bc983 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st @@ -0,0 +1,3 @@ +parser grammar S; +a : B {}; +b : B; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st new file mode 100644 index 000000000..259014348 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st @@ -0,0 +1,2 @@ +parser grammar T; +a : B {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st new file mode 100644 index 000000000..89a08234c --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st @@ -0,0 +1,4 @@ +grammar M; +import S; +b : 'b'|'c'; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st new file mode 100644 index 000000000..4354737ad --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st @@ -0,0 +1,3 @@ +parser grammar S; +a : b {}; +b : B ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st new file mode 100644 index 000000000..d00bb36a3 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st @@ -0,0 +1,4 @@ +grammar M; +import S, T; +b : 'b'|'c' {}|B|A; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st new file mode 100644 index 000000000..2ac580483 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st @@ -0,0 +1,4 @@ +parser grammar S; +a : b {}; +b : 'b' ; + \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st new file mode 100644 index 000000000..57893ca2e --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st @@ -0,0 +1,3 @@ +parser grammar S; +tokens { A } +b : 'b' {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st new file mode 100644 index 000000000..6bc0e69e1 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st @@ -0,0 +1,7 @@ +grammar M; +import S; +prog : decl ; +type_ : 'int' | 'float' ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st new file mode 100644 index 000000000..8c59175f8 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st @@ -0,0 +1,7 @@ +parser grammar S; +type_ : 'int' ; +decl : type_ ID ';' + | type_ ID init ';' { + + }; +init : '=' INT; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st new file mode 100644 index 000000000..ed0547f02 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st @@ -0,0 +1,4 @@ +grammar M; +import S; +program : 'test' 'test'; +WS : (UNICODE_CLASS_Zs)+ -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st new file mode 100644 index 000000000..dab7479fe --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st @@ -0,0 +1,6 @@ +lexer grammar S; +fragment +UNICODE_CLASS_Zs : '\u0020' | '\u00A0' | '\u1680' | '\u180E' + | '\u2000'..'\u200A' + | '\u202F' | '\u205F' | '\u3000' + ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st new file mode 100644 index 000000000..46c443c69 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st @@ -0,0 +1,5 @@ +grammar M; +import S; +s : a; +B : 'b'; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st new file mode 100644 index 000000000..e34c7abe2 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st @@ -0,0 +1,3 @@ +parser grammar S; +options {} +a : B; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction.st new file mode 100644 index 000000000..46c443c69 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction.st @@ -0,0 +1,5 @@ +grammar M; +import S; +s : a; +B : 'b'; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction_S.st new file mode 100644 index 000000000..e17380340 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction_S.st @@ -0,0 +1,2 @@ +parser grammar S; +a : @after {} : B; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder.st new file mode 100644 index 000000000..6e137e60e --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder.st @@ -0,0 +1,5 @@ +grammar M; +import S; +a : A {}; +A : 'abc' {}; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder_S.st new file mode 100644 index 000000000..8808195c1 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder_S.st @@ -0,0 +1,2 @@ +lexer grammar S; +ID : 'a'..'z'+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st new file mode 100644 index 000000000..d404d7422 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st @@ -0,0 +1,5 @@ +grammar ; +s @after {} + : ID | ID {} ; +ID : 'a'..'z'+; +WS : (' '|'\t'|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbiguityNoLoop.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbiguityNoLoop.st new file mode 100644 index 000000000..867aa162c --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbiguityNoLoop.st @@ -0,0 +1,12 @@ +grammar ; +prog +@init {} + : expr expr {} + | expr + ; +expr: '@' + | ID '@' + | ID + ; +ID : [a-z]+ ; +WS : [ \r\n\t]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st new file mode 100644 index 000000000..2e8c5ed26 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st @@ -0,0 +1,9 @@ +grammar ; +s @after {} + : '$' a | '@' b ; +a : e ID ; +b : e INT ID ; +e : INT | ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st new file mode 100644 index 000000000..52378f8dd --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st @@ -0,0 +1,9 @@ +grammar ; +s @after {} + : ('$' a | '@' b)+ ; +a : e ID ; +b : e INT ID ; +e : INT | ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/ExprAmbiguity.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/ExprAmbiguity.st new file mode 100644 index 000000000..da23906ab --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/ExprAmbiguity.st @@ -0,0 +1,13 @@ +grammar ; +s +@init {} +: expr[0] {}; + expr[int _p] + : ID + ( + {5 >= $_p}? '*' expr[6] + | {4 >= $_p}? '+' expr[5] + )* + ; +ID : [a-zA-Z]+ ; +WS : [ \r\n\t]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st new file mode 100644 index 000000000..b331d468d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st @@ -0,0 +1,10 @@ +grammar ; +s +@init {} +@after {} + : '{' stat* '}' ; +stat: 'if' ID 'then' stat ('else' ID)? + | 'return + ; +ID : 'a'..'z'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/LoopsSimulateTailRecursion.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/LoopsSimulateTailRecursion.st new file mode 100644 index 000000000..09883ac5c --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/LoopsSimulateTailRecursion.st @@ -0,0 +1,15 @@ +grammar ; +prog +@init {} + : expr_or_assign*; +expr_or_assign + : expr '++' {} + | expr {} + ; +expr: expr_primary ('\<-' ID)?; +expr_primary + : '(' ID ')' + | ID '(' ID ')' + | ID + ; +ID : [a-z]+ ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st new file mode 100644 index 000000000..ccacc60c5 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st @@ -0,0 +1,9 @@ +grammar ; +s @after {} + : a; +a : e ID ; +b : e INT ID ; +e : INT | ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\t'|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/ActionPlacement.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/ActionPlacement.st new file mode 100644 index 000000000..ad130a0fa --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/ActionPlacement.st @@ -0,0 +1,8 @@ +lexer grammar ; +I : ({} 'a' +| {} + 'a' {} + 'b' {}) + {} ; +WS : (' '|'\n') -> skip ; +J : .; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSet.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSet.st new file mode 100644 index 000000000..6dc908042 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSet.st @@ -0,0 +1,3 @@ +lexer grammar ; +I : '0'..'9'+ {} ; +WS : [ \n\u000D] -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetInSet.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetInSet.st new file mode 100644 index 000000000..7f86ef615 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetInSet.st @@ -0,0 +1,4 @@ +lexer grammar ; +I : (~[ab \\n]|'a') {} ; +WS : [ \n\u000D]+ -> skip ; + \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetNot.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetNot.st new file mode 100644 index 000000000..8c8a4d43a --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetNot.st @@ -0,0 +1,3 @@ +lexer grammar ; +I : ~[ab \n] ~[ \ncd]* {} ; +WS : [ \n\u000D]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetPlus.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetPlus.st new file mode 100644 index 000000000..cc1ad08c3 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetPlus.st @@ -0,0 +1,3 @@ +lexer grammar ; +I : '0'..'9'+ {} ; +WS : [ \n\u000D]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetRange.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetRange.st new file mode 100644 index 000000000..9d49c4d11 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetRange.st @@ -0,0 +1,4 @@ +lexer grammar ; +I : [0-9]+ {} ; +ID : [a-zA-Z] [a-zA-Z0-9]* {} ; +WS : [ \n\u0009\r]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithEscapedChar.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithEscapedChar.st new file mode 100644 index 000000000..e5972371e --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithEscapedChar.st @@ -0,0 +1,3 @@ +lexer grammar ; +DASHBRACK : [\\-\]]+ {} ; +WS : [ \u]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEndRange.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEndRange.st new file mode 100644 index 000000000..f362a75a2 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEndRange.st @@ -0,0 +1,3 @@ +lexer grammar ; +I : [0-]+ {} ; +WS : [ \n\u000D]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEscapeChar.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEscapeChar.st new file mode 100644 index 000000000..6340980ac --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEscapeChar.st @@ -0,0 +1,3 @@ +lexer grammar ; +I : [0-9]+ {} ; +WS : [ \u]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithQuote.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithQuote.st new file mode 100644 index 000000000..abac47e4a --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithQuote.st @@ -0,0 +1,3 @@ +lexer grammar ; +A : ["a-z]+ {} ; +WS : [ \n\t]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithReversedRange.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithReversedRange.st new file mode 100644 index 000000000..f01b9afd1 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithReversedRange.st @@ -0,0 +1,3 @@ +lexer grammar ; +A : [z-a9]+ {} ; +WS : [ \u]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFByItself.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFByItself.st new file mode 100644 index 000000000..ca92cc56d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFByItself.st @@ -0,0 +1,3 @@ +lexer grammar ; +DONE : EOF ; +A : 'a'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFSuffixInFirstRule.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFSuffixInFirstRule.st new file mode 100644 index 000000000..374abcfa3 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFSuffixInFirstRule.st @@ -0,0 +1,4 @@ +lexer grammar ; +A : 'a' EOF ; +B : 'a'; +C : 'c'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyClosure.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyClosure.st new file mode 100644 index 000000000..fca53f1f7 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyClosure.st @@ -0,0 +1,3 @@ +lexer grammar ; +CMT : '//' .*? '\n' CMT*; +WS : (' '|'\t')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyConfigs.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyConfigs.st new file mode 100644 index 000000000..1cc91df27 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyConfigs.st @@ -0,0 +1,4 @@ +lexer grammar ; +I : ('a' | 'ab') {} ; +WS : (' '|'\n') -> skip ; +J : .; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyOptional.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyOptional.st new file mode 100644 index 000000000..62435a715 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyOptional.st @@ -0,0 +1,3 @@ +lexer grammar ; +CMT : '//' .*? '\n' CMT?; +WS : (' '|'\t')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyPositiveClosure.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyPositiveClosure.st new file mode 100644 index 000000000..1de332e41 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyPositiveClosure.st @@ -0,0 +1,3 @@ +lexer grammar ; +CMT : ('//' .*? '\n')+; +WS : (' '|'\t')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/HexVsID.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/HexVsID.st new file mode 100644 index 000000000..028ebabb4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/HexVsID.st @@ -0,0 +1,8 @@ +lexer grammar ; +HexLiteral : '0' ('x'|'X') HexDigit+ ; +DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ; +FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ; +DOT : '.' ; +ID : 'a'..'z'+ ; +fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/KeywordID.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/KeywordID.st new file mode 100644 index 000000000..6c4987470 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/KeywordID.st @@ -0,0 +1,4 @@ +lexer grammar ; +KEND : 'end' ; // has priority +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/LargeLexer.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/LargeLexer.st new file mode 100644 index 000000000..5a040ea9f --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/LargeLexer.st @@ -0,0 +1,4002 @@ +lexer grammar ; +WS : [ \t\r\n]+ -> skip; +KW0 : 'KW0'; +KW1 : 'KW1'; +KW2 : 'KW2'; +KW3 : 'KW3'; +KW4 : 'KW4'; +KW5 : 'KW5'; +KW6 : 'KW6'; +KW7 : 'KW7'; +KW8 : 'KW8'; +KW9 : 'KW9'; +KW10 : 'KW10'; +KW11 : 'KW11'; +KW12 : 'KW12'; +KW13 : 'KW13'; +KW14 : 'KW14'; +KW15 : 'KW15'; +KW16 : 'KW16'; +KW17 : 'KW17'; +KW18 : 'KW18'; +KW19 : 'KW19'; +KW20 : 'KW20'; +KW21 : 'KW21'; +KW22 : 'KW22'; +KW23 : 'KW23'; +KW24 : 'KW24'; +KW25 : 'KW25'; +KW26 : 'KW26'; +KW27 : 'KW27'; +KW28 : 'KW28'; +KW29 : 'KW29'; +KW30 : 'KW30'; +KW31 : 'KW31'; +KW32 : 'KW32'; +KW33 : 'KW33'; +KW34 : 'KW34'; +KW35 : 'KW35'; +KW36 : 'KW36'; +KW37 : 'KW37'; +KW38 : 'KW38'; +KW39 : 'KW39'; +KW40 : 'KW40'; +KW41 : 'KW41'; +KW42 : 'KW42'; +KW43 : 'KW43'; +KW44 : 'KW44'; +KW45 : 'KW45'; +KW46 : 'KW46'; +KW47 : 'KW47'; +KW48 : 'KW48'; +KW49 : 'KW49'; +KW50 : 'KW50'; +KW51 : 'KW51'; +KW52 : 'KW52'; +KW53 : 'KW53'; +KW54 : 'KW54'; +KW55 : 'KW55'; +KW56 : 'KW56'; +KW57 : 'KW57'; +KW58 : 'KW58'; +KW59 : 'KW59'; +KW60 : 'KW60'; +KW61 : 'KW61'; +KW62 : 'KW62'; +KW63 : 'KW63'; +KW64 : 'KW64'; +KW65 : 'KW65'; +KW66 : 'KW66'; +KW67 : 'KW67'; +KW68 : 'KW68'; +KW69 : 'KW69'; +KW70 : 'KW70'; +KW71 : 'KW71'; +KW72 : 'KW72'; +KW73 : 'KW73'; +KW74 : 'KW74'; +KW75 : 'KW75'; +KW76 : 'KW76'; +KW77 : 'KW77'; +KW78 : 'KW78'; +KW79 : 'KW79'; +KW80 : 'KW80'; +KW81 : 'KW81'; +KW82 : 'KW82'; +KW83 : 'KW83'; +KW84 : 'KW84'; +KW85 : 'KW85'; +KW86 : 'KW86'; +KW87 : 'KW87'; +KW88 : 'KW88'; +KW89 : 'KW89'; +KW90 : 'KW90'; +KW91 : 'KW91'; +KW92 : 'KW92'; +KW93 : 'KW93'; +KW94 : 'KW94'; +KW95 : 'KW95'; +KW96 : 'KW96'; +KW97 : 'KW97'; +KW98 : 'KW98'; +KW99 : 'KW99'; +KW100 : 'KW100'; +KW101 : 'KW101'; +KW102 : 'KW102'; +KW103 : 'KW103'; +KW104 : 'KW104'; +KW105 : 'KW105'; +KW106 : 'KW106'; +KW107 : 'KW107'; +KW108 : 'KW108'; +KW109 : 'KW109'; +KW110 : 'KW110'; +KW111 : 'KW111'; +KW112 : 'KW112'; +KW113 : 'KW113'; +KW114 : 'KW114'; +KW115 : 'KW115'; +KW116 : 'KW116'; +KW117 : 'KW117'; +KW118 : 'KW118'; +KW119 : 'KW119'; +KW120 : 'KW120'; +KW121 : 'KW121'; +KW122 : 'KW122'; +KW123 : 'KW123'; +KW124 : 'KW124'; +KW125 : 'KW125'; +KW126 : 'KW126'; +KW127 : 'KW127'; +KW128 : 'KW128'; +KW129 : 'KW129'; +KW130 : 'KW130'; +KW131 : 'KW131'; +KW132 : 'KW132'; +KW133 : 'KW133'; +KW134 : 'KW134'; +KW135 : 'KW135'; +KW136 : 'KW136'; +KW137 : 'KW137'; +KW138 : 'KW138'; +KW139 : 'KW139'; +KW140 : 'KW140'; +KW141 : 'KW141'; +KW142 : 'KW142'; +KW143 : 'KW143'; +KW144 : 'KW144'; +KW145 : 'KW145'; +KW146 : 'KW146'; +KW147 : 'KW147'; +KW148 : 'KW148'; +KW149 : 'KW149'; +KW150 : 'KW150'; +KW151 : 'KW151'; +KW152 : 'KW152'; +KW153 : 'KW153'; +KW154 : 'KW154'; +KW155 : 'KW155'; +KW156 : 'KW156'; +KW157 : 'KW157'; +KW158 : 'KW158'; +KW159 : 'KW159'; +KW160 : 'KW160'; +KW161 : 'KW161'; +KW162 : 'KW162'; +KW163 : 'KW163'; +KW164 : 'KW164'; +KW165 : 'KW165'; +KW166 : 'KW166'; +KW167 : 'KW167'; +KW168 : 'KW168'; +KW169 : 'KW169'; +KW170 : 'KW170'; +KW171 : 'KW171'; +KW172 : 'KW172'; +KW173 : 'KW173'; +KW174 : 'KW174'; +KW175 : 'KW175'; +KW176 : 'KW176'; +KW177 : 'KW177'; +KW178 : 'KW178'; +KW179 : 'KW179'; +KW180 : 'KW180'; +KW181 : 'KW181'; +KW182 : 'KW182'; +KW183 : 'KW183'; +KW184 : 'KW184'; +KW185 : 'KW185'; +KW186 : 'KW186'; +KW187 : 'KW187'; +KW188 : 'KW188'; +KW189 : 'KW189'; +KW190 : 'KW190'; +KW191 : 'KW191'; +KW192 : 'KW192'; +KW193 : 'KW193'; +KW194 : 'KW194'; +KW195 : 'KW195'; +KW196 : 'KW196'; +KW197 : 'KW197'; +KW198 : 'KW198'; +KW199 : 'KW199'; +KW200 : 'KW200'; +KW201 : 'KW201'; +KW202 : 'KW202'; +KW203 : 'KW203'; +KW204 : 'KW204'; +KW205 : 'KW205'; +KW206 : 'KW206'; +KW207 : 'KW207'; +KW208 : 'KW208'; +KW209 : 'KW209'; +KW210 : 'KW210'; +KW211 : 'KW211'; +KW212 : 'KW212'; +KW213 : 'KW213'; +KW214 : 'KW214'; +KW215 : 'KW215'; +KW216 : 'KW216'; +KW217 : 'KW217'; +KW218 : 'KW218'; +KW219 : 'KW219'; +KW220 : 'KW220'; +KW221 : 'KW221'; +KW222 : 'KW222'; +KW223 : 'KW223'; +KW224 : 'KW224'; +KW225 : 'KW225'; +KW226 : 'KW226'; +KW227 : 'KW227'; +KW228 : 'KW228'; +KW229 : 'KW229'; +KW230 : 'KW230'; +KW231 : 'KW231'; +KW232 : 'KW232'; +KW233 : 'KW233'; +KW234 : 'KW234'; +KW235 : 'KW235'; +KW236 : 'KW236'; +KW237 : 'KW237'; +KW238 : 'KW238'; +KW239 : 'KW239'; +KW240 : 'KW240'; +KW241 : 'KW241'; +KW242 : 'KW242'; +KW243 : 'KW243'; +KW244 : 'KW244'; +KW245 : 'KW245'; +KW246 : 'KW246'; +KW247 : 'KW247'; +KW248 : 'KW248'; +KW249 : 'KW249'; +KW250 : 'KW250'; +KW251 : 'KW251'; +KW252 : 'KW252'; +KW253 : 'KW253'; +KW254 : 'KW254'; +KW255 : 'KW255'; +KW256 : 'KW256'; +KW257 : 'KW257'; +KW258 : 'KW258'; +KW259 : 'KW259'; +KW260 : 'KW260'; +KW261 : 'KW261'; +KW262 : 'KW262'; +KW263 : 'KW263'; +KW264 : 'KW264'; +KW265 : 'KW265'; +KW266 : 'KW266'; +KW267 : 'KW267'; +KW268 : 'KW268'; +KW269 : 'KW269'; +KW270 : 'KW270'; +KW271 : 'KW271'; +KW272 : 'KW272'; +KW273 : 'KW273'; +KW274 : 'KW274'; +KW275 : 'KW275'; +KW276 : 'KW276'; +KW277 : 'KW277'; +KW278 : 'KW278'; +KW279 : 'KW279'; +KW280 : 'KW280'; +KW281 : 'KW281'; +KW282 : 'KW282'; +KW283 : 'KW283'; +KW284 : 'KW284'; +KW285 : 'KW285'; +KW286 : 'KW286'; +KW287 : 'KW287'; +KW288 : 'KW288'; +KW289 : 'KW289'; +KW290 : 'KW290'; +KW291 : 'KW291'; +KW292 : 'KW292'; +KW293 : 'KW293'; +KW294 : 'KW294'; +KW295 : 'KW295'; +KW296 : 'KW296'; +KW297 : 'KW297'; +KW298 : 'KW298'; +KW299 : 'KW299'; +KW300 : 'KW300'; +KW301 : 'KW301'; +KW302 : 'KW302'; +KW303 : 'KW303'; +KW304 : 'KW304'; +KW305 : 'KW305'; +KW306 : 'KW306'; +KW307 : 'KW307'; +KW308 : 'KW308'; +KW309 : 'KW309'; +KW310 : 'KW310'; +KW311 : 'KW311'; +KW312 : 'KW312'; +KW313 : 'KW313'; +KW314 : 'KW314'; +KW315 : 'KW315'; +KW316 : 'KW316'; +KW317 : 'KW317'; +KW318 : 'KW318'; +KW319 : 'KW319'; +KW320 : 'KW320'; +KW321 : 'KW321'; +KW322 : 'KW322'; +KW323 : 'KW323'; +KW324 : 'KW324'; +KW325 : 'KW325'; +KW326 : 'KW326'; +KW327 : 'KW327'; +KW328 : 'KW328'; +KW329 : 'KW329'; +KW330 : 'KW330'; +KW331 : 'KW331'; +KW332 : 'KW332'; +KW333 : 'KW333'; +KW334 : 'KW334'; +KW335 : 'KW335'; +KW336 : 'KW336'; +KW337 : 'KW337'; +KW338 : 'KW338'; +KW339 : 'KW339'; +KW340 : 'KW340'; +KW341 : 'KW341'; +KW342 : 'KW342'; +KW343 : 'KW343'; +KW344 : 'KW344'; +KW345 : 'KW345'; +KW346 : 'KW346'; +KW347 : 'KW347'; +KW348 : 'KW348'; +KW349 : 'KW349'; +KW350 : 'KW350'; +KW351 : 'KW351'; +KW352 : 'KW352'; +KW353 : 'KW353'; +KW354 : 'KW354'; +KW355 : 'KW355'; +KW356 : 'KW356'; +KW357 : 'KW357'; +KW358 : 'KW358'; +KW359 : 'KW359'; +KW360 : 'KW360'; +KW361 : 'KW361'; +KW362 : 'KW362'; +KW363 : 'KW363'; +KW364 : 'KW364'; +KW365 : 'KW365'; +KW366 : 'KW366'; +KW367 : 'KW367'; +KW368 : 'KW368'; +KW369 : 'KW369'; +KW370 : 'KW370'; +KW371 : 'KW371'; +KW372 : 'KW372'; +KW373 : 'KW373'; +KW374 : 'KW374'; +KW375 : 'KW375'; +KW376 : 'KW376'; +KW377 : 'KW377'; +KW378 : 'KW378'; +KW379 : 'KW379'; +KW380 : 'KW380'; +KW381 : 'KW381'; +KW382 : 'KW382'; +KW383 : 'KW383'; +KW384 : 'KW384'; +KW385 : 'KW385'; +KW386 : 'KW386'; +KW387 : 'KW387'; +KW388 : 'KW388'; +KW389 : 'KW389'; +KW390 : 'KW390'; +KW391 : 'KW391'; +KW392 : 'KW392'; +KW393 : 'KW393'; +KW394 : 'KW394'; +KW395 : 'KW395'; +KW396 : 'KW396'; +KW397 : 'KW397'; +KW398 : 'KW398'; +KW399 : 'KW399'; +KW400 : 'KW400'; +KW401 : 'KW401'; +KW402 : 'KW402'; +KW403 : 'KW403'; +KW404 : 'KW404'; +KW405 : 'KW405'; +KW406 : 'KW406'; +KW407 : 'KW407'; +KW408 : 'KW408'; +KW409 : 'KW409'; +KW410 : 'KW410'; +KW411 : 'KW411'; +KW412 : 'KW412'; +KW413 : 'KW413'; +KW414 : 'KW414'; +KW415 : 'KW415'; +KW416 : 'KW416'; +KW417 : 'KW417'; +KW418 : 'KW418'; +KW419 : 'KW419'; +KW420 : 'KW420'; +KW421 : 'KW421'; +KW422 : 'KW422'; +KW423 : 'KW423'; +KW424 : 'KW424'; +KW425 : 'KW425'; +KW426 : 'KW426'; +KW427 : 'KW427'; +KW428 : 'KW428'; +KW429 : 'KW429'; +KW430 : 'KW430'; +KW431 : 'KW431'; +KW432 : 'KW432'; +KW433 : 'KW433'; +KW434 : 'KW434'; +KW435 : 'KW435'; +KW436 : 'KW436'; +KW437 : 'KW437'; +KW438 : 'KW438'; +KW439 : 'KW439'; +KW440 : 'KW440'; +KW441 : 'KW441'; +KW442 : 'KW442'; +KW443 : 'KW443'; +KW444 : 'KW444'; +KW445 : 'KW445'; +KW446 : 'KW446'; +KW447 : 'KW447'; +KW448 : 'KW448'; +KW449 : 'KW449'; +KW450 : 'KW450'; +KW451 : 'KW451'; +KW452 : 'KW452'; +KW453 : 'KW453'; +KW454 : 'KW454'; +KW455 : 'KW455'; +KW456 : 'KW456'; +KW457 : 'KW457'; +KW458 : 'KW458'; +KW459 : 'KW459'; +KW460 : 'KW460'; +KW461 : 'KW461'; +KW462 : 'KW462'; +KW463 : 'KW463'; +KW464 : 'KW464'; +KW465 : 'KW465'; +KW466 : 'KW466'; +KW467 : 'KW467'; +KW468 : 'KW468'; +KW469 : 'KW469'; +KW470 : 'KW470'; +KW471 : 'KW471'; +KW472 : 'KW472'; +KW473 : 'KW473'; +KW474 : 'KW474'; +KW475 : 'KW475'; +KW476 : 'KW476'; +KW477 : 'KW477'; +KW478 : 'KW478'; +KW479 : 'KW479'; +KW480 : 'KW480'; +KW481 : 'KW481'; +KW482 : 'KW482'; +KW483 : 'KW483'; +KW484 : 'KW484'; +KW485 : 'KW485'; +KW486 : 'KW486'; +KW487 : 'KW487'; +KW488 : 'KW488'; +KW489 : 'KW489'; +KW490 : 'KW490'; +KW491 : 'KW491'; +KW492 : 'KW492'; +KW493 : 'KW493'; +KW494 : 'KW494'; +KW495 : 'KW495'; +KW496 : 'KW496'; +KW497 : 'KW497'; +KW498 : 'KW498'; +KW499 : 'KW499'; +KW500 : 'KW500'; +KW501 : 'KW501'; +KW502 : 'KW502'; +KW503 : 'KW503'; +KW504 : 'KW504'; +KW505 : 'KW505'; +KW506 : 'KW506'; +KW507 : 'KW507'; +KW508 : 'KW508'; +KW509 : 'KW509'; +KW510 : 'KW510'; +KW511 : 'KW511'; +KW512 : 'KW512'; +KW513 : 'KW513'; +KW514 : 'KW514'; +KW515 : 'KW515'; +KW516 : 'KW516'; +KW517 : 'KW517'; +KW518 : 'KW518'; +KW519 : 'KW519'; +KW520 : 'KW520'; +KW521 : 'KW521'; +KW522 : 'KW522'; +KW523 : 'KW523'; +KW524 : 'KW524'; +KW525 : 'KW525'; +KW526 : 'KW526'; +KW527 : 'KW527'; +KW528 : 'KW528'; +KW529 : 'KW529'; +KW530 : 'KW530'; +KW531 : 'KW531'; +KW532 : 'KW532'; +KW533 : 'KW533'; +KW534 : 'KW534'; +KW535 : 'KW535'; +KW536 : 'KW536'; +KW537 : 'KW537'; +KW538 : 'KW538'; +KW539 : 'KW539'; +KW540 : 'KW540'; +KW541 : 'KW541'; +KW542 : 'KW542'; +KW543 : 'KW543'; +KW544 : 'KW544'; +KW545 : 'KW545'; +KW546 : 'KW546'; +KW547 : 'KW547'; +KW548 : 'KW548'; +KW549 : 'KW549'; +KW550 : 'KW550'; +KW551 : 'KW551'; +KW552 : 'KW552'; +KW553 : 'KW553'; +KW554 : 'KW554'; +KW555 : 'KW555'; +KW556 : 'KW556'; +KW557 : 'KW557'; +KW558 : 'KW558'; +KW559 : 'KW559'; +KW560 : 'KW560'; +KW561 : 'KW561'; +KW562 : 'KW562'; +KW563 : 'KW563'; +KW564 : 'KW564'; +KW565 : 'KW565'; +KW566 : 'KW566'; +KW567 : 'KW567'; +KW568 : 'KW568'; +KW569 : 'KW569'; +KW570 : 'KW570'; +KW571 : 'KW571'; +KW572 : 'KW572'; +KW573 : 'KW573'; +KW574 : 'KW574'; +KW575 : 'KW575'; +KW576 : 'KW576'; +KW577 : 'KW577'; +KW578 : 'KW578'; +KW579 : 'KW579'; +KW580 : 'KW580'; +KW581 : 'KW581'; +KW582 : 'KW582'; +KW583 : 'KW583'; +KW584 : 'KW584'; +KW585 : 'KW585'; +KW586 : 'KW586'; +KW587 : 'KW587'; +KW588 : 'KW588'; +KW589 : 'KW589'; +KW590 : 'KW590'; +KW591 : 'KW591'; +KW592 : 'KW592'; +KW593 : 'KW593'; +KW594 : 'KW594'; +KW595 : 'KW595'; +KW596 : 'KW596'; +KW597 : 'KW597'; +KW598 : 'KW598'; +KW599 : 'KW599'; +KW600 : 'KW600'; +KW601 : 'KW601'; +KW602 : 'KW602'; +KW603 : 'KW603'; +KW604 : 'KW604'; +KW605 : 'KW605'; +KW606 : 'KW606'; +KW607 : 'KW607'; +KW608 : 'KW608'; +KW609 : 'KW609'; +KW610 : 'KW610'; +KW611 : 'KW611'; +KW612 : 'KW612'; +KW613 : 'KW613'; +KW614 : 'KW614'; +KW615 : 'KW615'; +KW616 : 'KW616'; +KW617 : 'KW617'; +KW618 : 'KW618'; +KW619 : 'KW619'; +KW620 : 'KW620'; +KW621 : 'KW621'; +KW622 : 'KW622'; +KW623 : 'KW623'; +KW624 : 'KW624'; +KW625 : 'KW625'; +KW626 : 'KW626'; +KW627 : 'KW627'; +KW628 : 'KW628'; +KW629 : 'KW629'; +KW630 : 'KW630'; +KW631 : 'KW631'; +KW632 : 'KW632'; +KW633 : 'KW633'; +KW634 : 'KW634'; +KW635 : 'KW635'; +KW636 : 'KW636'; +KW637 : 'KW637'; +KW638 : 'KW638'; +KW639 : 'KW639'; +KW640 : 'KW640'; +KW641 : 'KW641'; +KW642 : 'KW642'; +KW643 : 'KW643'; +KW644 : 'KW644'; +KW645 : 'KW645'; +KW646 : 'KW646'; +KW647 : 'KW647'; +KW648 : 'KW648'; +KW649 : 'KW649'; +KW650 : 'KW650'; +KW651 : 'KW651'; +KW652 : 'KW652'; +KW653 : 'KW653'; +KW654 : 'KW654'; +KW655 : 'KW655'; +KW656 : 'KW656'; +KW657 : 'KW657'; +KW658 : 'KW658'; +KW659 : 'KW659'; +KW660 : 'KW660'; +KW661 : 'KW661'; +KW662 : 'KW662'; +KW663 : 'KW663'; +KW664 : 'KW664'; +KW665 : 'KW665'; +KW666 : 'KW666'; +KW667 : 'KW667'; +KW668 : 'KW668'; +KW669 : 'KW669'; +KW670 : 'KW670'; +KW671 : 'KW671'; +KW672 : 'KW672'; +KW673 : 'KW673'; +KW674 : 'KW674'; +KW675 : 'KW675'; +KW676 : 'KW676'; +KW677 : 'KW677'; +KW678 : 'KW678'; +KW679 : 'KW679'; +KW680 : 'KW680'; +KW681 : 'KW681'; +KW682 : 'KW682'; +KW683 : 'KW683'; +KW684 : 'KW684'; +KW685 : 'KW685'; +KW686 : 'KW686'; +KW687 : 'KW687'; +KW688 : 'KW688'; +KW689 : 'KW689'; +KW690 : 'KW690'; +KW691 : 'KW691'; +KW692 : 'KW692'; +KW693 : 'KW693'; +KW694 : 'KW694'; +KW695 : 'KW695'; +KW696 : 'KW696'; +KW697 : 'KW697'; +KW698 : 'KW698'; +KW699 : 'KW699'; +KW700 : 'KW700'; +KW701 : 'KW701'; +KW702 : 'KW702'; +KW703 : 'KW703'; +KW704 : 'KW704'; +KW705 : 'KW705'; +KW706 : 'KW706'; +KW707 : 'KW707'; +KW708 : 'KW708'; +KW709 : 'KW709'; +KW710 : 'KW710'; +KW711 : 'KW711'; +KW712 : 'KW712'; +KW713 : 'KW713'; +KW714 : 'KW714'; +KW715 : 'KW715'; +KW716 : 'KW716'; +KW717 : 'KW717'; +KW718 : 'KW718'; +KW719 : 'KW719'; +KW720 : 'KW720'; +KW721 : 'KW721'; +KW722 : 'KW722'; +KW723 : 'KW723'; +KW724 : 'KW724'; +KW725 : 'KW725'; +KW726 : 'KW726'; +KW727 : 'KW727'; +KW728 : 'KW728'; +KW729 : 'KW729'; +KW730 : 'KW730'; +KW731 : 'KW731'; +KW732 : 'KW732'; +KW733 : 'KW733'; +KW734 : 'KW734'; +KW735 : 'KW735'; +KW736 : 'KW736'; +KW737 : 'KW737'; +KW738 : 'KW738'; +KW739 : 'KW739'; +KW740 : 'KW740'; +KW741 : 'KW741'; +KW742 : 'KW742'; +KW743 : 'KW743'; +KW744 : 'KW744'; +KW745 : 'KW745'; +KW746 : 'KW746'; +KW747 : 'KW747'; +KW748 : 'KW748'; +KW749 : 'KW749'; +KW750 : 'KW750'; +KW751 : 'KW751'; +KW752 : 'KW752'; +KW753 : 'KW753'; +KW754 : 'KW754'; +KW755 : 'KW755'; +KW756 : 'KW756'; +KW757 : 'KW757'; +KW758 : 'KW758'; +KW759 : 'KW759'; +KW760 : 'KW760'; +KW761 : 'KW761'; +KW762 : 'KW762'; +KW763 : 'KW763'; +KW764 : 'KW764'; +KW765 : 'KW765'; +KW766 : 'KW766'; +KW767 : 'KW767'; +KW768 : 'KW768'; +KW769 : 'KW769'; +KW770 : 'KW770'; +KW771 : 'KW771'; +KW772 : 'KW772'; +KW773 : 'KW773'; +KW774 : 'KW774'; +KW775 : 'KW775'; +KW776 : 'KW776'; +KW777 : 'KW777'; +KW778 : 'KW778'; +KW779 : 'KW779'; +KW780 : 'KW780'; +KW781 : 'KW781'; +KW782 : 'KW782'; +KW783 : 'KW783'; +KW784 : 'KW784'; +KW785 : 'KW785'; +KW786 : 'KW786'; +KW787 : 'KW787'; +KW788 : 'KW788'; +KW789 : 'KW789'; +KW790 : 'KW790'; +KW791 : 'KW791'; +KW792 : 'KW792'; +KW793 : 'KW793'; +KW794 : 'KW794'; +KW795 : 'KW795'; +KW796 : 'KW796'; +KW797 : 'KW797'; +KW798 : 'KW798'; +KW799 : 'KW799'; +KW800 : 'KW800'; +KW801 : 'KW801'; +KW802 : 'KW802'; +KW803 : 'KW803'; +KW804 : 'KW804'; +KW805 : 'KW805'; +KW806 : 'KW806'; +KW807 : 'KW807'; +KW808 : 'KW808'; +KW809 : 'KW809'; +KW810 : 'KW810'; +KW811 : 'KW811'; +KW812 : 'KW812'; +KW813 : 'KW813'; +KW814 : 'KW814'; +KW815 : 'KW815'; +KW816 : 'KW816'; +KW817 : 'KW817'; +KW818 : 'KW818'; +KW819 : 'KW819'; +KW820 : 'KW820'; +KW821 : 'KW821'; +KW822 : 'KW822'; +KW823 : 'KW823'; +KW824 : 'KW824'; +KW825 : 'KW825'; +KW826 : 'KW826'; +KW827 : 'KW827'; +KW828 : 'KW828'; +KW829 : 'KW829'; +KW830 : 'KW830'; +KW831 : 'KW831'; +KW832 : 'KW832'; +KW833 : 'KW833'; +KW834 : 'KW834'; +KW835 : 'KW835'; +KW836 : 'KW836'; +KW837 : 'KW837'; +KW838 : 'KW838'; +KW839 : 'KW839'; +KW840 : 'KW840'; +KW841 : 'KW841'; +KW842 : 'KW842'; +KW843 : 'KW843'; +KW844 : 'KW844'; +KW845 : 'KW845'; +KW846 : 'KW846'; +KW847 : 'KW847'; +KW848 : 'KW848'; +KW849 : 'KW849'; +KW850 : 'KW850'; +KW851 : 'KW851'; +KW852 : 'KW852'; +KW853 : 'KW853'; +KW854 : 'KW854'; +KW855 : 'KW855'; +KW856 : 'KW856'; +KW857 : 'KW857'; +KW858 : 'KW858'; +KW859 : 'KW859'; +KW860 : 'KW860'; +KW861 : 'KW861'; +KW862 : 'KW862'; +KW863 : 'KW863'; +KW864 : 'KW864'; +KW865 : 'KW865'; +KW866 : 'KW866'; +KW867 : 'KW867'; +KW868 : 'KW868'; +KW869 : 'KW869'; +KW870 : 'KW870'; +KW871 : 'KW871'; +KW872 : 'KW872'; +KW873 : 'KW873'; +KW874 : 'KW874'; +KW875 : 'KW875'; +KW876 : 'KW876'; +KW877 : 'KW877'; +KW878 : 'KW878'; +KW879 : 'KW879'; +KW880 : 'KW880'; +KW881 : 'KW881'; +KW882 : 'KW882'; +KW883 : 'KW883'; +KW884 : 'KW884'; +KW885 : 'KW885'; +KW886 : 'KW886'; +KW887 : 'KW887'; +KW888 : 'KW888'; +KW889 : 'KW889'; +KW890 : 'KW890'; +KW891 : 'KW891'; +KW892 : 'KW892'; +KW893 : 'KW893'; +KW894 : 'KW894'; +KW895 : 'KW895'; +KW896 : 'KW896'; +KW897 : 'KW897'; +KW898 : 'KW898'; +KW899 : 'KW899'; +KW900 : 'KW900'; +KW901 : 'KW901'; +KW902 : 'KW902'; +KW903 : 'KW903'; +KW904 : 'KW904'; +KW905 : 'KW905'; +KW906 : 'KW906'; +KW907 : 'KW907'; +KW908 : 'KW908'; +KW909 : 'KW909'; +KW910 : 'KW910'; +KW911 : 'KW911'; +KW912 : 'KW912'; +KW913 : 'KW913'; +KW914 : 'KW914'; +KW915 : 'KW915'; +KW916 : 'KW916'; +KW917 : 'KW917'; +KW918 : 'KW918'; +KW919 : 'KW919'; +KW920 : 'KW920'; +KW921 : 'KW921'; +KW922 : 'KW922'; +KW923 : 'KW923'; +KW924 : 'KW924'; +KW925 : 'KW925'; +KW926 : 'KW926'; +KW927 : 'KW927'; +KW928 : 'KW928'; +KW929 : 'KW929'; +KW930 : 'KW930'; +KW931 : 'KW931'; +KW932 : 'KW932'; +KW933 : 'KW933'; +KW934 : 'KW934'; +KW935 : 'KW935'; +KW936 : 'KW936'; +KW937 : 'KW937'; +KW938 : 'KW938'; +KW939 : 'KW939'; +KW940 : 'KW940'; +KW941 : 'KW941'; +KW942 : 'KW942'; +KW943 : 'KW943'; +KW944 : 'KW944'; +KW945 : 'KW945'; +KW946 : 'KW946'; +KW947 : 'KW947'; +KW948 : 'KW948'; +KW949 : 'KW949'; +KW950 : 'KW950'; +KW951 : 'KW951'; +KW952 : 'KW952'; +KW953 : 'KW953'; +KW954 : 'KW954'; +KW955 : 'KW955'; +KW956 : 'KW956'; +KW957 : 'KW957'; +KW958 : 'KW958'; +KW959 : 'KW959'; +KW960 : 'KW960'; +KW961 : 'KW961'; +KW962 : 'KW962'; +KW963 : 'KW963'; +KW964 : 'KW964'; +KW965 : 'KW965'; +KW966 : 'KW966'; +KW967 : 'KW967'; +KW968 : 'KW968'; +KW969 : 'KW969'; +KW970 : 'KW970'; +KW971 : 'KW971'; +KW972 : 'KW972'; +KW973 : 'KW973'; +KW974 : 'KW974'; +KW975 : 'KW975'; +KW976 : 'KW976'; +KW977 : 'KW977'; +KW978 : 'KW978'; +KW979 : 'KW979'; +KW980 : 'KW980'; +KW981 : 'KW981'; +KW982 : 'KW982'; +KW983 : 'KW983'; +KW984 : 'KW984'; +KW985 : 'KW985'; +KW986 : 'KW986'; +KW987 : 'KW987'; +KW988 : 'KW988'; +KW989 : 'KW989'; +KW990 : 'KW990'; +KW991 : 'KW991'; +KW992 : 'KW992'; +KW993 : 'KW993'; +KW994 : 'KW994'; +KW995 : 'KW995'; +KW996 : 'KW996'; +KW997 : 'KW997'; +KW998 : 'KW998'; +KW999 : 'KW999'; +KW1000 : 'KW1000'; +KW1001 : 'KW1001'; +KW1002 : 'KW1002'; +KW1003 : 'KW1003'; +KW1004 : 'KW1004'; +KW1005 : 'KW1005'; +KW1006 : 'KW1006'; +KW1007 : 'KW1007'; +KW1008 : 'KW1008'; +KW1009 : 'KW1009'; +KW1010 : 'KW1010'; +KW1011 : 'KW1011'; +KW1012 : 'KW1012'; +KW1013 : 'KW1013'; +KW1014 : 'KW1014'; +KW1015 : 'KW1015'; +KW1016 : 'KW1016'; +KW1017 : 'KW1017'; +KW1018 : 'KW1018'; +KW1019 : 'KW1019'; +KW1020 : 'KW1020'; +KW1021 : 'KW1021'; +KW1022 : 'KW1022'; +KW1023 : 'KW1023'; +KW1024 : 'KW1024'; +KW1025 : 'KW1025'; +KW1026 : 'KW1026'; +KW1027 : 'KW1027'; +KW1028 : 'KW1028'; +KW1029 : 'KW1029'; +KW1030 : 'KW1030'; +KW1031 : 'KW1031'; +KW1032 : 'KW1032'; +KW1033 : 'KW1033'; +KW1034 : 'KW1034'; +KW1035 : 'KW1035'; +KW1036 : 'KW1036'; +KW1037 : 'KW1037'; +KW1038 : 'KW1038'; +KW1039 : 'KW1039'; +KW1040 : 'KW1040'; +KW1041 : 'KW1041'; +KW1042 : 'KW1042'; +KW1043 : 'KW1043'; +KW1044 : 'KW1044'; +KW1045 : 'KW1045'; +KW1046 : 'KW1046'; +KW1047 : 'KW1047'; +KW1048 : 'KW1048'; +KW1049 : 'KW1049'; +KW1050 : 'KW1050'; +KW1051 : 'KW1051'; +KW1052 : 'KW1052'; +KW1053 : 'KW1053'; +KW1054 : 'KW1054'; +KW1055 : 'KW1055'; +KW1056 : 'KW1056'; +KW1057 : 'KW1057'; +KW1058 : 'KW1058'; +KW1059 : 'KW1059'; +KW1060 : 'KW1060'; +KW1061 : 'KW1061'; +KW1062 : 'KW1062'; +KW1063 : 'KW1063'; +KW1064 : 'KW1064'; +KW1065 : 'KW1065'; +KW1066 : 'KW1066'; +KW1067 : 'KW1067'; +KW1068 : 'KW1068'; +KW1069 : 'KW1069'; +KW1070 : 'KW1070'; +KW1071 : 'KW1071'; +KW1072 : 'KW1072'; +KW1073 : 'KW1073'; +KW1074 : 'KW1074'; +KW1075 : 'KW1075'; +KW1076 : 'KW1076'; +KW1077 : 'KW1077'; +KW1078 : 'KW1078'; +KW1079 : 'KW1079'; +KW1080 : 'KW1080'; +KW1081 : 'KW1081'; +KW1082 : 'KW1082'; +KW1083 : 'KW1083'; +KW1084 : 'KW1084'; +KW1085 : 'KW1085'; +KW1086 : 'KW1086'; +KW1087 : 'KW1087'; +KW1088 : 'KW1088'; +KW1089 : 'KW1089'; +KW1090 : 'KW1090'; +KW1091 : 'KW1091'; +KW1092 : 'KW1092'; +KW1093 : 'KW1093'; +KW1094 : 'KW1094'; +KW1095 : 'KW1095'; +KW1096 : 'KW1096'; +KW1097 : 'KW1097'; +KW1098 : 'KW1098'; +KW1099 : 'KW1099'; +KW1100 : 'KW1100'; +KW1101 : 'KW1101'; +KW1102 : 'KW1102'; +KW1103 : 'KW1103'; +KW1104 : 'KW1104'; +KW1105 : 'KW1105'; +KW1106 : 'KW1106'; +KW1107 : 'KW1107'; +KW1108 : 'KW1108'; +KW1109 : 'KW1109'; +KW1110 : 'KW1110'; +KW1111 : 'KW1111'; +KW1112 : 'KW1112'; +KW1113 : 'KW1113'; +KW1114 : 'KW1114'; +KW1115 : 'KW1115'; +KW1116 : 'KW1116'; +KW1117 : 'KW1117'; +KW1118 : 'KW1118'; +KW1119 : 'KW1119'; +KW1120 : 'KW1120'; +KW1121 : 'KW1121'; +KW1122 : 'KW1122'; +KW1123 : 'KW1123'; +KW1124 : 'KW1124'; +KW1125 : 'KW1125'; +KW1126 : 'KW1126'; +KW1127 : 'KW1127'; +KW1128 : 'KW1128'; +KW1129 : 'KW1129'; +KW1130 : 'KW1130'; +KW1131 : 'KW1131'; +KW1132 : 'KW1132'; +KW1133 : 'KW1133'; +KW1134 : 'KW1134'; +KW1135 : 'KW1135'; +KW1136 : 'KW1136'; +KW1137 : 'KW1137'; +KW1138 : 'KW1138'; +KW1139 : 'KW1139'; +KW1140 : 'KW1140'; +KW1141 : 'KW1141'; +KW1142 : 'KW1142'; +KW1143 : 'KW1143'; +KW1144 : 'KW1144'; +KW1145 : 'KW1145'; +KW1146 : 'KW1146'; +KW1147 : 'KW1147'; +KW1148 : 'KW1148'; +KW1149 : 'KW1149'; +KW1150 : 'KW1150'; +KW1151 : 'KW1151'; +KW1152 : 'KW1152'; +KW1153 : 'KW1153'; +KW1154 : 'KW1154'; +KW1155 : 'KW1155'; +KW1156 : 'KW1156'; +KW1157 : 'KW1157'; +KW1158 : 'KW1158'; +KW1159 : 'KW1159'; +KW1160 : 'KW1160'; +KW1161 : 'KW1161'; +KW1162 : 'KW1162'; +KW1163 : 'KW1163'; +KW1164 : 'KW1164'; +KW1165 : 'KW1165'; +KW1166 : 'KW1166'; +KW1167 : 'KW1167'; +KW1168 : 'KW1168'; +KW1169 : 'KW1169'; +KW1170 : 'KW1170'; +KW1171 : 'KW1171'; +KW1172 : 'KW1172'; +KW1173 : 'KW1173'; +KW1174 : 'KW1174'; +KW1175 : 'KW1175'; +KW1176 : 'KW1176'; +KW1177 : 'KW1177'; +KW1178 : 'KW1178'; +KW1179 : 'KW1179'; +KW1180 : 'KW1180'; +KW1181 : 'KW1181'; +KW1182 : 'KW1182'; +KW1183 : 'KW1183'; +KW1184 : 'KW1184'; +KW1185 : 'KW1185'; +KW1186 : 'KW1186'; +KW1187 : 'KW1187'; +KW1188 : 'KW1188'; +KW1189 : 'KW1189'; +KW1190 : 'KW1190'; +KW1191 : 'KW1191'; +KW1192 : 'KW1192'; +KW1193 : 'KW1193'; +KW1194 : 'KW1194'; +KW1195 : 'KW1195'; +KW1196 : 'KW1196'; +KW1197 : 'KW1197'; +KW1198 : 'KW1198'; +KW1199 : 'KW1199'; +KW1200 : 'KW1200'; +KW1201 : 'KW1201'; +KW1202 : 'KW1202'; +KW1203 : 'KW1203'; +KW1204 : 'KW1204'; +KW1205 : 'KW1205'; +KW1206 : 'KW1206'; +KW1207 : 'KW1207'; +KW1208 : 'KW1208'; +KW1209 : 'KW1209'; +KW1210 : 'KW1210'; +KW1211 : 'KW1211'; +KW1212 : 'KW1212'; +KW1213 : 'KW1213'; +KW1214 : 'KW1214'; +KW1215 : 'KW1215'; +KW1216 : 'KW1216'; +KW1217 : 'KW1217'; +KW1218 : 'KW1218'; +KW1219 : 'KW1219'; +KW1220 : 'KW1220'; +KW1221 : 'KW1221'; +KW1222 : 'KW1222'; +KW1223 : 'KW1223'; +KW1224 : 'KW1224'; +KW1225 : 'KW1225'; +KW1226 : 'KW1226'; +KW1227 : 'KW1227'; +KW1228 : 'KW1228'; +KW1229 : 'KW1229'; +KW1230 : 'KW1230'; +KW1231 : 'KW1231'; +KW1232 : 'KW1232'; +KW1233 : 'KW1233'; +KW1234 : 'KW1234'; +KW1235 : 'KW1235'; +KW1236 : 'KW1236'; +KW1237 : 'KW1237'; +KW1238 : 'KW1238'; +KW1239 : 'KW1239'; +KW1240 : 'KW1240'; +KW1241 : 'KW1241'; +KW1242 : 'KW1242'; +KW1243 : 'KW1243'; +KW1244 : 'KW1244'; +KW1245 : 'KW1245'; +KW1246 : 'KW1246'; +KW1247 : 'KW1247'; +KW1248 : 'KW1248'; +KW1249 : 'KW1249'; +KW1250 : 'KW1250'; +KW1251 : 'KW1251'; +KW1252 : 'KW1252'; +KW1253 : 'KW1253'; +KW1254 : 'KW1254'; +KW1255 : 'KW1255'; +KW1256 : 'KW1256'; +KW1257 : 'KW1257'; +KW1258 : 'KW1258'; +KW1259 : 'KW1259'; +KW1260 : 'KW1260'; +KW1261 : 'KW1261'; +KW1262 : 'KW1262'; +KW1263 : 'KW1263'; +KW1264 : 'KW1264'; +KW1265 : 'KW1265'; +KW1266 : 'KW1266'; +KW1267 : 'KW1267'; +KW1268 : 'KW1268'; +KW1269 : 'KW1269'; +KW1270 : 'KW1270'; +KW1271 : 'KW1271'; +KW1272 : 'KW1272'; +KW1273 : 'KW1273'; +KW1274 : 'KW1274'; +KW1275 : 'KW1275'; +KW1276 : 'KW1276'; +KW1277 : 'KW1277'; +KW1278 : 'KW1278'; +KW1279 : 'KW1279'; +KW1280 : 'KW1280'; +KW1281 : 'KW1281'; +KW1282 : 'KW1282'; +KW1283 : 'KW1283'; +KW1284 : 'KW1284'; +KW1285 : 'KW1285'; +KW1286 : 'KW1286'; +KW1287 : 'KW1287'; +KW1288 : 'KW1288'; +KW1289 : 'KW1289'; +KW1290 : 'KW1290'; +KW1291 : 'KW1291'; +KW1292 : 'KW1292'; +KW1293 : 'KW1293'; +KW1294 : 'KW1294'; +KW1295 : 'KW1295'; +KW1296 : 'KW1296'; +KW1297 : 'KW1297'; +KW1298 : 'KW1298'; +KW1299 : 'KW1299'; +KW1300 : 'KW1300'; +KW1301 : 'KW1301'; +KW1302 : 'KW1302'; +KW1303 : 'KW1303'; +KW1304 : 'KW1304'; +KW1305 : 'KW1305'; +KW1306 : 'KW1306'; +KW1307 : 'KW1307'; +KW1308 : 'KW1308'; +KW1309 : 'KW1309'; +KW1310 : 'KW1310'; +KW1311 : 'KW1311'; +KW1312 : 'KW1312'; +KW1313 : 'KW1313'; +KW1314 : 'KW1314'; +KW1315 : 'KW1315'; +KW1316 : 'KW1316'; +KW1317 : 'KW1317'; +KW1318 : 'KW1318'; +KW1319 : 'KW1319'; +KW1320 : 'KW1320'; +KW1321 : 'KW1321'; +KW1322 : 'KW1322'; +KW1323 : 'KW1323'; +KW1324 : 'KW1324'; +KW1325 : 'KW1325'; +KW1326 : 'KW1326'; +KW1327 : 'KW1327'; +KW1328 : 'KW1328'; +KW1329 : 'KW1329'; +KW1330 : 'KW1330'; +KW1331 : 'KW1331'; +KW1332 : 'KW1332'; +KW1333 : 'KW1333'; +KW1334 : 'KW1334'; +KW1335 : 'KW1335'; +KW1336 : 'KW1336'; +KW1337 : 'KW1337'; +KW1338 : 'KW1338'; +KW1339 : 'KW1339'; +KW1340 : 'KW1340'; +KW1341 : 'KW1341'; +KW1342 : 'KW1342'; +KW1343 : 'KW1343'; +KW1344 : 'KW1344'; +KW1345 : 'KW1345'; +KW1346 : 'KW1346'; +KW1347 : 'KW1347'; +KW1348 : 'KW1348'; +KW1349 : 'KW1349'; +KW1350 : 'KW1350'; +KW1351 : 'KW1351'; +KW1352 : 'KW1352'; +KW1353 : 'KW1353'; +KW1354 : 'KW1354'; +KW1355 : 'KW1355'; +KW1356 : 'KW1356'; +KW1357 : 'KW1357'; +KW1358 : 'KW1358'; +KW1359 : 'KW1359'; +KW1360 : 'KW1360'; +KW1361 : 'KW1361'; +KW1362 : 'KW1362'; +KW1363 : 'KW1363'; +KW1364 : 'KW1364'; +KW1365 : 'KW1365'; +KW1366 : 'KW1366'; +KW1367 : 'KW1367'; +KW1368 : 'KW1368'; +KW1369 : 'KW1369'; +KW1370 : 'KW1370'; +KW1371 : 'KW1371'; +KW1372 : 'KW1372'; +KW1373 : 'KW1373'; +KW1374 : 'KW1374'; +KW1375 : 'KW1375'; +KW1376 : 'KW1376'; +KW1377 : 'KW1377'; +KW1378 : 'KW1378'; +KW1379 : 'KW1379'; +KW1380 : 'KW1380'; +KW1381 : 'KW1381'; +KW1382 : 'KW1382'; +KW1383 : 'KW1383'; +KW1384 : 'KW1384'; +KW1385 : 'KW1385'; +KW1386 : 'KW1386'; +KW1387 : 'KW1387'; +KW1388 : 'KW1388'; +KW1389 : 'KW1389'; +KW1390 : 'KW1390'; +KW1391 : 'KW1391'; +KW1392 : 'KW1392'; +KW1393 : 'KW1393'; +KW1394 : 'KW1394'; +KW1395 : 'KW1395'; +KW1396 : 'KW1396'; +KW1397 : 'KW1397'; +KW1398 : 'KW1398'; +KW1399 : 'KW1399'; +KW1400 : 'KW1400'; +KW1401 : 'KW1401'; +KW1402 : 'KW1402'; +KW1403 : 'KW1403'; +KW1404 : 'KW1404'; +KW1405 : 'KW1405'; +KW1406 : 'KW1406'; +KW1407 : 'KW1407'; +KW1408 : 'KW1408'; +KW1409 : 'KW1409'; +KW1410 : 'KW1410'; +KW1411 : 'KW1411'; +KW1412 : 'KW1412'; +KW1413 : 'KW1413'; +KW1414 : 'KW1414'; +KW1415 : 'KW1415'; +KW1416 : 'KW1416'; +KW1417 : 'KW1417'; +KW1418 : 'KW1418'; +KW1419 : 'KW1419'; +KW1420 : 'KW1420'; +KW1421 : 'KW1421'; +KW1422 : 'KW1422'; +KW1423 : 'KW1423'; +KW1424 : 'KW1424'; +KW1425 : 'KW1425'; +KW1426 : 'KW1426'; +KW1427 : 'KW1427'; +KW1428 : 'KW1428'; +KW1429 : 'KW1429'; +KW1430 : 'KW1430'; +KW1431 : 'KW1431'; +KW1432 : 'KW1432'; +KW1433 : 'KW1433'; +KW1434 : 'KW1434'; +KW1435 : 'KW1435'; +KW1436 : 'KW1436'; +KW1437 : 'KW1437'; +KW1438 : 'KW1438'; +KW1439 : 'KW1439'; +KW1440 : 'KW1440'; +KW1441 : 'KW1441'; +KW1442 : 'KW1442'; +KW1443 : 'KW1443'; +KW1444 : 'KW1444'; +KW1445 : 'KW1445'; +KW1446 : 'KW1446'; +KW1447 : 'KW1447'; +KW1448 : 'KW1448'; +KW1449 : 'KW1449'; +KW1450 : 'KW1450'; +KW1451 : 'KW1451'; +KW1452 : 'KW1452'; +KW1453 : 'KW1453'; +KW1454 : 'KW1454'; +KW1455 : 'KW1455'; +KW1456 : 'KW1456'; +KW1457 : 'KW1457'; +KW1458 : 'KW1458'; +KW1459 : 'KW1459'; +KW1460 : 'KW1460'; +KW1461 : 'KW1461'; +KW1462 : 'KW1462'; +KW1463 : 'KW1463'; +KW1464 : 'KW1464'; +KW1465 : 'KW1465'; +KW1466 : 'KW1466'; +KW1467 : 'KW1467'; +KW1468 : 'KW1468'; +KW1469 : 'KW1469'; +KW1470 : 'KW1470'; +KW1471 : 'KW1471'; +KW1472 : 'KW1472'; +KW1473 : 'KW1473'; +KW1474 : 'KW1474'; +KW1475 : 'KW1475'; +KW1476 : 'KW1476'; +KW1477 : 'KW1477'; +KW1478 : 'KW1478'; +KW1479 : 'KW1479'; +KW1480 : 'KW1480'; +KW1481 : 'KW1481'; +KW1482 : 'KW1482'; +KW1483 : 'KW1483'; +KW1484 : 'KW1484'; +KW1485 : 'KW1485'; +KW1486 : 'KW1486'; +KW1487 : 'KW1487'; +KW1488 : 'KW1488'; +KW1489 : 'KW1489'; +KW1490 : 'KW1490'; +KW1491 : 'KW1491'; +KW1492 : 'KW1492'; +KW1493 : 'KW1493'; +KW1494 : 'KW1494'; +KW1495 : 'KW1495'; +KW1496 : 'KW1496'; +KW1497 : 'KW1497'; +KW1498 : 'KW1498'; +KW1499 : 'KW1499'; +KW1500 : 'KW1500'; +KW1501 : 'KW1501'; +KW1502 : 'KW1502'; +KW1503 : 'KW1503'; +KW1504 : 'KW1504'; +KW1505 : 'KW1505'; +KW1506 : 'KW1506'; +KW1507 : 'KW1507'; +KW1508 : 'KW1508'; +KW1509 : 'KW1509'; +KW1510 : 'KW1510'; +KW1511 : 'KW1511'; +KW1512 : 'KW1512'; +KW1513 : 'KW1513'; +KW1514 : 'KW1514'; +KW1515 : 'KW1515'; +KW1516 : 'KW1516'; +KW1517 : 'KW1517'; +KW1518 : 'KW1518'; +KW1519 : 'KW1519'; +KW1520 : 'KW1520'; +KW1521 : 'KW1521'; +KW1522 : 'KW1522'; +KW1523 : 'KW1523'; +KW1524 : 'KW1524'; +KW1525 : 'KW1525'; +KW1526 : 'KW1526'; +KW1527 : 'KW1527'; +KW1528 : 'KW1528'; +KW1529 : 'KW1529'; +KW1530 : 'KW1530'; +KW1531 : 'KW1531'; +KW1532 : 'KW1532'; +KW1533 : 'KW1533'; +KW1534 : 'KW1534'; +KW1535 : 'KW1535'; +KW1536 : 'KW1536'; +KW1537 : 'KW1537'; +KW1538 : 'KW1538'; +KW1539 : 'KW1539'; +KW1540 : 'KW1540'; +KW1541 : 'KW1541'; +KW1542 : 'KW1542'; +KW1543 : 'KW1543'; +KW1544 : 'KW1544'; +KW1545 : 'KW1545'; +KW1546 : 'KW1546'; +KW1547 : 'KW1547'; +KW1548 : 'KW1548'; +KW1549 : 'KW1549'; +KW1550 : 'KW1550'; +KW1551 : 'KW1551'; +KW1552 : 'KW1552'; +KW1553 : 'KW1553'; +KW1554 : 'KW1554'; +KW1555 : 'KW1555'; +KW1556 : 'KW1556'; +KW1557 : 'KW1557'; +KW1558 : 'KW1558'; +KW1559 : 'KW1559'; +KW1560 : 'KW1560'; +KW1561 : 'KW1561'; +KW1562 : 'KW1562'; +KW1563 : 'KW1563'; +KW1564 : 'KW1564'; +KW1565 : 'KW1565'; +KW1566 : 'KW1566'; +KW1567 : 'KW1567'; +KW1568 : 'KW1568'; +KW1569 : 'KW1569'; +KW1570 : 'KW1570'; +KW1571 : 'KW1571'; +KW1572 : 'KW1572'; +KW1573 : 'KW1573'; +KW1574 : 'KW1574'; +KW1575 : 'KW1575'; +KW1576 : 'KW1576'; +KW1577 : 'KW1577'; +KW1578 : 'KW1578'; +KW1579 : 'KW1579'; +KW1580 : 'KW1580'; +KW1581 : 'KW1581'; +KW1582 : 'KW1582'; +KW1583 : 'KW1583'; +KW1584 : 'KW1584'; +KW1585 : 'KW1585'; +KW1586 : 'KW1586'; +KW1587 : 'KW1587'; +KW1588 : 'KW1588'; +KW1589 : 'KW1589'; +KW1590 : 'KW1590'; +KW1591 : 'KW1591'; +KW1592 : 'KW1592'; +KW1593 : 'KW1593'; +KW1594 : 'KW1594'; +KW1595 : 'KW1595'; +KW1596 : 'KW1596'; +KW1597 : 'KW1597'; +KW1598 : 'KW1598'; +KW1599 : 'KW1599'; +KW1600 : 'KW1600'; +KW1601 : 'KW1601'; +KW1602 : 'KW1602'; +KW1603 : 'KW1603'; +KW1604 : 'KW1604'; +KW1605 : 'KW1605'; +KW1606 : 'KW1606'; +KW1607 : 'KW1607'; +KW1608 : 'KW1608'; +KW1609 : 'KW1609'; +KW1610 : 'KW1610'; +KW1611 : 'KW1611'; +KW1612 : 'KW1612'; +KW1613 : 'KW1613'; +KW1614 : 'KW1614'; +KW1615 : 'KW1615'; +KW1616 : 'KW1616'; +KW1617 : 'KW1617'; +KW1618 : 'KW1618'; +KW1619 : 'KW1619'; +KW1620 : 'KW1620'; +KW1621 : 'KW1621'; +KW1622 : 'KW1622'; +KW1623 : 'KW1623'; +KW1624 : 'KW1624'; +KW1625 : 'KW1625'; +KW1626 : 'KW1626'; +KW1627 : 'KW1627'; +KW1628 : 'KW1628'; +KW1629 : 'KW1629'; +KW1630 : 'KW1630'; +KW1631 : 'KW1631'; +KW1632 : 'KW1632'; +KW1633 : 'KW1633'; +KW1634 : 'KW1634'; +KW1635 : 'KW1635'; +KW1636 : 'KW1636'; +KW1637 : 'KW1637'; +KW1638 : 'KW1638'; +KW1639 : 'KW1639'; +KW1640 : 'KW1640'; +KW1641 : 'KW1641'; +KW1642 : 'KW1642'; +KW1643 : 'KW1643'; +KW1644 : 'KW1644'; +KW1645 : 'KW1645'; +KW1646 : 'KW1646'; +KW1647 : 'KW1647'; +KW1648 : 'KW1648'; +KW1649 : 'KW1649'; +KW1650 : 'KW1650'; +KW1651 : 'KW1651'; +KW1652 : 'KW1652'; +KW1653 : 'KW1653'; +KW1654 : 'KW1654'; +KW1655 : 'KW1655'; +KW1656 : 'KW1656'; +KW1657 : 'KW1657'; +KW1658 : 'KW1658'; +KW1659 : 'KW1659'; +KW1660 : 'KW1660'; +KW1661 : 'KW1661'; +KW1662 : 'KW1662'; +KW1663 : 'KW1663'; +KW1664 : 'KW1664'; +KW1665 : 'KW1665'; +KW1666 : 'KW1666'; +KW1667 : 'KW1667'; +KW1668 : 'KW1668'; +KW1669 : 'KW1669'; +KW1670 : 'KW1670'; +KW1671 : 'KW1671'; +KW1672 : 'KW1672'; +KW1673 : 'KW1673'; +KW1674 : 'KW1674'; +KW1675 : 'KW1675'; +KW1676 : 'KW1676'; +KW1677 : 'KW1677'; +KW1678 : 'KW1678'; +KW1679 : 'KW1679'; +KW1680 : 'KW1680'; +KW1681 : 'KW1681'; +KW1682 : 'KW1682'; +KW1683 : 'KW1683'; +KW1684 : 'KW1684'; +KW1685 : 'KW1685'; +KW1686 : 'KW1686'; +KW1687 : 'KW1687'; +KW1688 : 'KW1688'; +KW1689 : 'KW1689'; +KW1690 : 'KW1690'; +KW1691 : 'KW1691'; +KW1692 : 'KW1692'; +KW1693 : 'KW1693'; +KW1694 : 'KW1694'; +KW1695 : 'KW1695'; +KW1696 : 'KW1696'; +KW1697 : 'KW1697'; +KW1698 : 'KW1698'; +KW1699 : 'KW1699'; +KW1700 : 'KW1700'; +KW1701 : 'KW1701'; +KW1702 : 'KW1702'; +KW1703 : 'KW1703'; +KW1704 : 'KW1704'; +KW1705 : 'KW1705'; +KW1706 : 'KW1706'; +KW1707 : 'KW1707'; +KW1708 : 'KW1708'; +KW1709 : 'KW1709'; +KW1710 : 'KW1710'; +KW1711 : 'KW1711'; +KW1712 : 'KW1712'; +KW1713 : 'KW1713'; +KW1714 : 'KW1714'; +KW1715 : 'KW1715'; +KW1716 : 'KW1716'; +KW1717 : 'KW1717'; +KW1718 : 'KW1718'; +KW1719 : 'KW1719'; +KW1720 : 'KW1720'; +KW1721 : 'KW1721'; +KW1722 : 'KW1722'; +KW1723 : 'KW1723'; +KW1724 : 'KW1724'; +KW1725 : 'KW1725'; +KW1726 : 'KW1726'; +KW1727 : 'KW1727'; +KW1728 : 'KW1728'; +KW1729 : 'KW1729'; +KW1730 : 'KW1730'; +KW1731 : 'KW1731'; +KW1732 : 'KW1732'; +KW1733 : 'KW1733'; +KW1734 : 'KW1734'; +KW1735 : 'KW1735'; +KW1736 : 'KW1736'; +KW1737 : 'KW1737'; +KW1738 : 'KW1738'; +KW1739 : 'KW1739'; +KW1740 : 'KW1740'; +KW1741 : 'KW1741'; +KW1742 : 'KW1742'; +KW1743 : 'KW1743'; +KW1744 : 'KW1744'; +KW1745 : 'KW1745'; +KW1746 : 'KW1746'; +KW1747 : 'KW1747'; +KW1748 : 'KW1748'; +KW1749 : 'KW1749'; +KW1750 : 'KW1750'; +KW1751 : 'KW1751'; +KW1752 : 'KW1752'; +KW1753 : 'KW1753'; +KW1754 : 'KW1754'; +KW1755 : 'KW1755'; +KW1756 : 'KW1756'; +KW1757 : 'KW1757'; +KW1758 : 'KW1758'; +KW1759 : 'KW1759'; +KW1760 : 'KW1760'; +KW1761 : 'KW1761'; +KW1762 : 'KW1762'; +KW1763 : 'KW1763'; +KW1764 : 'KW1764'; +KW1765 : 'KW1765'; +KW1766 : 'KW1766'; +KW1767 : 'KW1767'; +KW1768 : 'KW1768'; +KW1769 : 'KW1769'; +KW1770 : 'KW1770'; +KW1771 : 'KW1771'; +KW1772 : 'KW1772'; +KW1773 : 'KW1773'; +KW1774 : 'KW1774'; +KW1775 : 'KW1775'; +KW1776 : 'KW1776'; +KW1777 : 'KW1777'; +KW1778 : 'KW1778'; +KW1779 : 'KW1779'; +KW1780 : 'KW1780'; +KW1781 : 'KW1781'; +KW1782 : 'KW1782'; +KW1783 : 'KW1783'; +KW1784 : 'KW1784'; +KW1785 : 'KW1785'; +KW1786 : 'KW1786'; +KW1787 : 'KW1787'; +KW1788 : 'KW1788'; +KW1789 : 'KW1789'; +KW1790 : 'KW1790'; +KW1791 : 'KW1791'; +KW1792 : 'KW1792'; +KW1793 : 'KW1793'; +KW1794 : 'KW1794'; +KW1795 : 'KW1795'; +KW1796 : 'KW1796'; +KW1797 : 'KW1797'; +KW1798 : 'KW1798'; +KW1799 : 'KW1799'; +KW1800 : 'KW1800'; +KW1801 : 'KW1801'; +KW1802 : 'KW1802'; +KW1803 : 'KW1803'; +KW1804 : 'KW1804'; +KW1805 : 'KW1805'; +KW1806 : 'KW1806'; +KW1807 : 'KW1807'; +KW1808 : 'KW1808'; +KW1809 : 'KW1809'; +KW1810 : 'KW1810'; +KW1811 : 'KW1811'; +KW1812 : 'KW1812'; +KW1813 : 'KW1813'; +KW1814 : 'KW1814'; +KW1815 : 'KW1815'; +KW1816 : 'KW1816'; +KW1817 : 'KW1817'; +KW1818 : 'KW1818'; +KW1819 : 'KW1819'; +KW1820 : 'KW1820'; +KW1821 : 'KW1821'; +KW1822 : 'KW1822'; +KW1823 : 'KW1823'; +KW1824 : 'KW1824'; +KW1825 : 'KW1825'; +KW1826 : 'KW1826'; +KW1827 : 'KW1827'; +KW1828 : 'KW1828'; +KW1829 : 'KW1829'; +KW1830 : 'KW1830'; +KW1831 : 'KW1831'; +KW1832 : 'KW1832'; +KW1833 : 'KW1833'; +KW1834 : 'KW1834'; +KW1835 : 'KW1835'; +KW1836 : 'KW1836'; +KW1837 : 'KW1837'; +KW1838 : 'KW1838'; +KW1839 : 'KW1839'; +KW1840 : 'KW1840'; +KW1841 : 'KW1841'; +KW1842 : 'KW1842'; +KW1843 : 'KW1843'; +KW1844 : 'KW1844'; +KW1845 : 'KW1845'; +KW1846 : 'KW1846'; +KW1847 : 'KW1847'; +KW1848 : 'KW1848'; +KW1849 : 'KW1849'; +KW1850 : 'KW1850'; +KW1851 : 'KW1851'; +KW1852 : 'KW1852'; +KW1853 : 'KW1853'; +KW1854 : 'KW1854'; +KW1855 : 'KW1855'; +KW1856 : 'KW1856'; +KW1857 : 'KW1857'; +KW1858 : 'KW1858'; +KW1859 : 'KW1859'; +KW1860 : 'KW1860'; +KW1861 : 'KW1861'; +KW1862 : 'KW1862'; +KW1863 : 'KW1863'; +KW1864 : 'KW1864'; +KW1865 : 'KW1865'; +KW1866 : 'KW1866'; +KW1867 : 'KW1867'; +KW1868 : 'KW1868'; +KW1869 : 'KW1869'; +KW1870 : 'KW1870'; +KW1871 : 'KW1871'; +KW1872 : 'KW1872'; +KW1873 : 'KW1873'; +KW1874 : 'KW1874'; +KW1875 : 'KW1875'; +KW1876 : 'KW1876'; +KW1877 : 'KW1877'; +KW1878 : 'KW1878'; +KW1879 : 'KW1879'; +KW1880 : 'KW1880'; +KW1881 : 'KW1881'; +KW1882 : 'KW1882'; +KW1883 : 'KW1883'; +KW1884 : 'KW1884'; +KW1885 : 'KW1885'; +KW1886 : 'KW1886'; +KW1887 : 'KW1887'; +KW1888 : 'KW1888'; +KW1889 : 'KW1889'; +KW1890 : 'KW1890'; +KW1891 : 'KW1891'; +KW1892 : 'KW1892'; +KW1893 : 'KW1893'; +KW1894 : 'KW1894'; +KW1895 : 'KW1895'; +KW1896 : 'KW1896'; +KW1897 : 'KW1897'; +KW1898 : 'KW1898'; +KW1899 : 'KW1899'; +KW1900 : 'KW1900'; +KW1901 : 'KW1901'; +KW1902 : 'KW1902'; +KW1903 : 'KW1903'; +KW1904 : 'KW1904'; +KW1905 : 'KW1905'; +KW1906 : 'KW1906'; +KW1907 : 'KW1907'; +KW1908 : 'KW1908'; +KW1909 : 'KW1909'; +KW1910 : 'KW1910'; +KW1911 : 'KW1911'; +KW1912 : 'KW1912'; +KW1913 : 'KW1913'; +KW1914 : 'KW1914'; +KW1915 : 'KW1915'; +KW1916 : 'KW1916'; +KW1917 : 'KW1917'; +KW1918 : 'KW1918'; +KW1919 : 'KW1919'; +KW1920 : 'KW1920'; +KW1921 : 'KW1921'; +KW1922 : 'KW1922'; +KW1923 : 'KW1923'; +KW1924 : 'KW1924'; +KW1925 : 'KW1925'; +KW1926 : 'KW1926'; +KW1927 : 'KW1927'; +KW1928 : 'KW1928'; +KW1929 : 'KW1929'; +KW1930 : 'KW1930'; +KW1931 : 'KW1931'; +KW1932 : 'KW1932'; +KW1933 : 'KW1933'; +KW1934 : 'KW1934'; +KW1935 : 'KW1935'; +KW1936 : 'KW1936'; +KW1937 : 'KW1937'; +KW1938 : 'KW1938'; +KW1939 : 'KW1939'; +KW1940 : 'KW1940'; +KW1941 : 'KW1941'; +KW1942 : 'KW1942'; +KW1943 : 'KW1943'; +KW1944 : 'KW1944'; +KW1945 : 'KW1945'; +KW1946 : 'KW1946'; +KW1947 : 'KW1947'; +KW1948 : 'KW1948'; +KW1949 : 'KW1949'; +KW1950 : 'KW1950'; +KW1951 : 'KW1951'; +KW1952 : 'KW1952'; +KW1953 : 'KW1953'; +KW1954 : 'KW1954'; +KW1955 : 'KW1955'; +KW1956 : 'KW1956'; +KW1957 : 'KW1957'; +KW1958 : 'KW1958'; +KW1959 : 'KW1959'; +KW1960 : 'KW1960'; +KW1961 : 'KW1961'; +KW1962 : 'KW1962'; +KW1963 : 'KW1963'; +KW1964 : 'KW1964'; +KW1965 : 'KW1965'; +KW1966 : 'KW1966'; +KW1967 : 'KW1967'; +KW1968 : 'KW1968'; +KW1969 : 'KW1969'; +KW1970 : 'KW1970'; +KW1971 : 'KW1971'; +KW1972 : 'KW1972'; +KW1973 : 'KW1973'; +KW1974 : 'KW1974'; +KW1975 : 'KW1975'; +KW1976 : 'KW1976'; +KW1977 : 'KW1977'; +KW1978 : 'KW1978'; +KW1979 : 'KW1979'; +KW1980 : 'KW1980'; +KW1981 : 'KW1981'; +KW1982 : 'KW1982'; +KW1983 : 'KW1983'; +KW1984 : 'KW1984'; +KW1985 : 'KW1985'; +KW1986 : 'KW1986'; +KW1987 : 'KW1987'; +KW1988 : 'KW1988'; +KW1989 : 'KW1989'; +KW1990 : 'KW1990'; +KW1991 : 'KW1991'; +KW1992 : 'KW1992'; +KW1993 : 'KW1993'; +KW1994 : 'KW1994'; +KW1995 : 'KW1995'; +KW1996 : 'KW1996'; +KW1997 : 'KW1997'; +KW1998 : 'KW1998'; +KW1999 : 'KW1999'; +KW2000 : 'KW2000'; +KW2001 : 'KW2001'; +KW2002 : 'KW2002'; +KW2003 : 'KW2003'; +KW2004 : 'KW2004'; +KW2005 : 'KW2005'; +KW2006 : 'KW2006'; +KW2007 : 'KW2007'; +KW2008 : 'KW2008'; +KW2009 : 'KW2009'; +KW2010 : 'KW2010'; +KW2011 : 'KW2011'; +KW2012 : 'KW2012'; +KW2013 : 'KW2013'; +KW2014 : 'KW2014'; +KW2015 : 'KW2015'; +KW2016 : 'KW2016'; +KW2017 : 'KW2017'; +KW2018 : 'KW2018'; +KW2019 : 'KW2019'; +KW2020 : 'KW2020'; +KW2021 : 'KW2021'; +KW2022 : 'KW2022'; +KW2023 : 'KW2023'; +KW2024 : 'KW2024'; +KW2025 : 'KW2025'; +KW2026 : 'KW2026'; +KW2027 : 'KW2027'; +KW2028 : 'KW2028'; +KW2029 : 'KW2029'; +KW2030 : 'KW2030'; +KW2031 : 'KW2031'; +KW2032 : 'KW2032'; +KW2033 : 'KW2033'; +KW2034 : 'KW2034'; +KW2035 : 'KW2035'; +KW2036 : 'KW2036'; +KW2037 : 'KW2037'; +KW2038 : 'KW2038'; +KW2039 : 'KW2039'; +KW2040 : 'KW2040'; +KW2041 : 'KW2041'; +KW2042 : 'KW2042'; +KW2043 : 'KW2043'; +KW2044 : 'KW2044'; +KW2045 : 'KW2045'; +KW2046 : 'KW2046'; +KW2047 : 'KW2047'; +KW2048 : 'KW2048'; +KW2049 : 'KW2049'; +KW2050 : 'KW2050'; +KW2051 : 'KW2051'; +KW2052 : 'KW2052'; +KW2053 : 'KW2053'; +KW2054 : 'KW2054'; +KW2055 : 'KW2055'; +KW2056 : 'KW2056'; +KW2057 : 'KW2057'; +KW2058 : 'KW2058'; +KW2059 : 'KW2059'; +KW2060 : 'KW2060'; +KW2061 : 'KW2061'; +KW2062 : 'KW2062'; +KW2063 : 'KW2063'; +KW2064 : 'KW2064'; +KW2065 : 'KW2065'; +KW2066 : 'KW2066'; +KW2067 : 'KW2067'; +KW2068 : 'KW2068'; +KW2069 : 'KW2069'; +KW2070 : 'KW2070'; +KW2071 : 'KW2071'; +KW2072 : 'KW2072'; +KW2073 : 'KW2073'; +KW2074 : 'KW2074'; +KW2075 : 'KW2075'; +KW2076 : 'KW2076'; +KW2077 : 'KW2077'; +KW2078 : 'KW2078'; +KW2079 : 'KW2079'; +KW2080 : 'KW2080'; +KW2081 : 'KW2081'; +KW2082 : 'KW2082'; +KW2083 : 'KW2083'; +KW2084 : 'KW2084'; +KW2085 : 'KW2085'; +KW2086 : 'KW2086'; +KW2087 : 'KW2087'; +KW2088 : 'KW2088'; +KW2089 : 'KW2089'; +KW2090 : 'KW2090'; +KW2091 : 'KW2091'; +KW2092 : 'KW2092'; +KW2093 : 'KW2093'; +KW2094 : 'KW2094'; +KW2095 : 'KW2095'; +KW2096 : 'KW2096'; +KW2097 : 'KW2097'; +KW2098 : 'KW2098'; +KW2099 : 'KW2099'; +KW2100 : 'KW2100'; +KW2101 : 'KW2101'; +KW2102 : 'KW2102'; +KW2103 : 'KW2103'; +KW2104 : 'KW2104'; +KW2105 : 'KW2105'; +KW2106 : 'KW2106'; +KW2107 : 'KW2107'; +KW2108 : 'KW2108'; +KW2109 : 'KW2109'; +KW2110 : 'KW2110'; +KW2111 : 'KW2111'; +KW2112 : 'KW2112'; +KW2113 : 'KW2113'; +KW2114 : 'KW2114'; +KW2115 : 'KW2115'; +KW2116 : 'KW2116'; +KW2117 : 'KW2117'; +KW2118 : 'KW2118'; +KW2119 : 'KW2119'; +KW2120 : 'KW2120'; +KW2121 : 'KW2121'; +KW2122 : 'KW2122'; +KW2123 : 'KW2123'; +KW2124 : 'KW2124'; +KW2125 : 'KW2125'; +KW2126 : 'KW2126'; +KW2127 : 'KW2127'; +KW2128 : 'KW2128'; +KW2129 : 'KW2129'; +KW2130 : 'KW2130'; +KW2131 : 'KW2131'; +KW2132 : 'KW2132'; +KW2133 : 'KW2133'; +KW2134 : 'KW2134'; +KW2135 : 'KW2135'; +KW2136 : 'KW2136'; +KW2137 : 'KW2137'; +KW2138 : 'KW2138'; +KW2139 : 'KW2139'; +KW2140 : 'KW2140'; +KW2141 : 'KW2141'; +KW2142 : 'KW2142'; +KW2143 : 'KW2143'; +KW2144 : 'KW2144'; +KW2145 : 'KW2145'; +KW2146 : 'KW2146'; +KW2147 : 'KW2147'; +KW2148 : 'KW2148'; +KW2149 : 'KW2149'; +KW2150 : 'KW2150'; +KW2151 : 'KW2151'; +KW2152 : 'KW2152'; +KW2153 : 'KW2153'; +KW2154 : 'KW2154'; +KW2155 : 'KW2155'; +KW2156 : 'KW2156'; +KW2157 : 'KW2157'; +KW2158 : 'KW2158'; +KW2159 : 'KW2159'; +KW2160 : 'KW2160'; +KW2161 : 'KW2161'; +KW2162 : 'KW2162'; +KW2163 : 'KW2163'; +KW2164 : 'KW2164'; +KW2165 : 'KW2165'; +KW2166 : 'KW2166'; +KW2167 : 'KW2167'; +KW2168 : 'KW2168'; +KW2169 : 'KW2169'; +KW2170 : 'KW2170'; +KW2171 : 'KW2171'; +KW2172 : 'KW2172'; +KW2173 : 'KW2173'; +KW2174 : 'KW2174'; +KW2175 : 'KW2175'; +KW2176 : 'KW2176'; +KW2177 : 'KW2177'; +KW2178 : 'KW2178'; +KW2179 : 'KW2179'; +KW2180 : 'KW2180'; +KW2181 : 'KW2181'; +KW2182 : 'KW2182'; +KW2183 : 'KW2183'; +KW2184 : 'KW2184'; +KW2185 : 'KW2185'; +KW2186 : 'KW2186'; +KW2187 : 'KW2187'; +KW2188 : 'KW2188'; +KW2189 : 'KW2189'; +KW2190 : 'KW2190'; +KW2191 : 'KW2191'; +KW2192 : 'KW2192'; +KW2193 : 'KW2193'; +KW2194 : 'KW2194'; +KW2195 : 'KW2195'; +KW2196 : 'KW2196'; +KW2197 : 'KW2197'; +KW2198 : 'KW2198'; +KW2199 : 'KW2199'; +KW2200 : 'KW2200'; +KW2201 : 'KW2201'; +KW2202 : 'KW2202'; +KW2203 : 'KW2203'; +KW2204 : 'KW2204'; +KW2205 : 'KW2205'; +KW2206 : 'KW2206'; +KW2207 : 'KW2207'; +KW2208 : 'KW2208'; +KW2209 : 'KW2209'; +KW2210 : 'KW2210'; +KW2211 : 'KW2211'; +KW2212 : 'KW2212'; +KW2213 : 'KW2213'; +KW2214 : 'KW2214'; +KW2215 : 'KW2215'; +KW2216 : 'KW2216'; +KW2217 : 'KW2217'; +KW2218 : 'KW2218'; +KW2219 : 'KW2219'; +KW2220 : 'KW2220'; +KW2221 : 'KW2221'; +KW2222 : 'KW2222'; +KW2223 : 'KW2223'; +KW2224 : 'KW2224'; +KW2225 : 'KW2225'; +KW2226 : 'KW2226'; +KW2227 : 'KW2227'; +KW2228 : 'KW2228'; +KW2229 : 'KW2229'; +KW2230 : 'KW2230'; +KW2231 : 'KW2231'; +KW2232 : 'KW2232'; +KW2233 : 'KW2233'; +KW2234 : 'KW2234'; +KW2235 : 'KW2235'; +KW2236 : 'KW2236'; +KW2237 : 'KW2237'; +KW2238 : 'KW2238'; +KW2239 : 'KW2239'; +KW2240 : 'KW2240'; +KW2241 : 'KW2241'; +KW2242 : 'KW2242'; +KW2243 : 'KW2243'; +KW2244 : 'KW2244'; +KW2245 : 'KW2245'; +KW2246 : 'KW2246'; +KW2247 : 'KW2247'; +KW2248 : 'KW2248'; +KW2249 : 'KW2249'; +KW2250 : 'KW2250'; +KW2251 : 'KW2251'; +KW2252 : 'KW2252'; +KW2253 : 'KW2253'; +KW2254 : 'KW2254'; +KW2255 : 'KW2255'; +KW2256 : 'KW2256'; +KW2257 : 'KW2257'; +KW2258 : 'KW2258'; +KW2259 : 'KW2259'; +KW2260 : 'KW2260'; +KW2261 : 'KW2261'; +KW2262 : 'KW2262'; +KW2263 : 'KW2263'; +KW2264 : 'KW2264'; +KW2265 : 'KW2265'; +KW2266 : 'KW2266'; +KW2267 : 'KW2267'; +KW2268 : 'KW2268'; +KW2269 : 'KW2269'; +KW2270 : 'KW2270'; +KW2271 : 'KW2271'; +KW2272 : 'KW2272'; +KW2273 : 'KW2273'; +KW2274 : 'KW2274'; +KW2275 : 'KW2275'; +KW2276 : 'KW2276'; +KW2277 : 'KW2277'; +KW2278 : 'KW2278'; +KW2279 : 'KW2279'; +KW2280 : 'KW2280'; +KW2281 : 'KW2281'; +KW2282 : 'KW2282'; +KW2283 : 'KW2283'; +KW2284 : 'KW2284'; +KW2285 : 'KW2285'; +KW2286 : 'KW2286'; +KW2287 : 'KW2287'; +KW2288 : 'KW2288'; +KW2289 : 'KW2289'; +KW2290 : 'KW2290'; +KW2291 : 'KW2291'; +KW2292 : 'KW2292'; +KW2293 : 'KW2293'; +KW2294 : 'KW2294'; +KW2295 : 'KW2295'; +KW2296 : 'KW2296'; +KW2297 : 'KW2297'; +KW2298 : 'KW2298'; +KW2299 : 'KW2299'; +KW2300 : 'KW2300'; +KW2301 : 'KW2301'; +KW2302 : 'KW2302'; +KW2303 : 'KW2303'; +KW2304 : 'KW2304'; +KW2305 : 'KW2305'; +KW2306 : 'KW2306'; +KW2307 : 'KW2307'; +KW2308 : 'KW2308'; +KW2309 : 'KW2309'; +KW2310 : 'KW2310'; +KW2311 : 'KW2311'; +KW2312 : 'KW2312'; +KW2313 : 'KW2313'; +KW2314 : 'KW2314'; +KW2315 : 'KW2315'; +KW2316 : 'KW2316'; +KW2317 : 'KW2317'; +KW2318 : 'KW2318'; +KW2319 : 'KW2319'; +KW2320 : 'KW2320'; +KW2321 : 'KW2321'; +KW2322 : 'KW2322'; +KW2323 : 'KW2323'; +KW2324 : 'KW2324'; +KW2325 : 'KW2325'; +KW2326 : 'KW2326'; +KW2327 : 'KW2327'; +KW2328 : 'KW2328'; +KW2329 : 'KW2329'; +KW2330 : 'KW2330'; +KW2331 : 'KW2331'; +KW2332 : 'KW2332'; +KW2333 : 'KW2333'; +KW2334 : 'KW2334'; +KW2335 : 'KW2335'; +KW2336 : 'KW2336'; +KW2337 : 'KW2337'; +KW2338 : 'KW2338'; +KW2339 : 'KW2339'; +KW2340 : 'KW2340'; +KW2341 : 'KW2341'; +KW2342 : 'KW2342'; +KW2343 : 'KW2343'; +KW2344 : 'KW2344'; +KW2345 : 'KW2345'; +KW2346 : 'KW2346'; +KW2347 : 'KW2347'; +KW2348 : 'KW2348'; +KW2349 : 'KW2349'; +KW2350 : 'KW2350'; +KW2351 : 'KW2351'; +KW2352 : 'KW2352'; +KW2353 : 'KW2353'; +KW2354 : 'KW2354'; +KW2355 : 'KW2355'; +KW2356 : 'KW2356'; +KW2357 : 'KW2357'; +KW2358 : 'KW2358'; +KW2359 : 'KW2359'; +KW2360 : 'KW2360'; +KW2361 : 'KW2361'; +KW2362 : 'KW2362'; +KW2363 : 'KW2363'; +KW2364 : 'KW2364'; +KW2365 : 'KW2365'; +KW2366 : 'KW2366'; +KW2367 : 'KW2367'; +KW2368 : 'KW2368'; +KW2369 : 'KW2369'; +KW2370 : 'KW2370'; +KW2371 : 'KW2371'; +KW2372 : 'KW2372'; +KW2373 : 'KW2373'; +KW2374 : 'KW2374'; +KW2375 : 'KW2375'; +KW2376 : 'KW2376'; +KW2377 : 'KW2377'; +KW2378 : 'KW2378'; +KW2379 : 'KW2379'; +KW2380 : 'KW2380'; +KW2381 : 'KW2381'; +KW2382 : 'KW2382'; +KW2383 : 'KW2383'; +KW2384 : 'KW2384'; +KW2385 : 'KW2385'; +KW2386 : 'KW2386'; +KW2387 : 'KW2387'; +KW2388 : 'KW2388'; +KW2389 : 'KW2389'; +KW2390 : 'KW2390'; +KW2391 : 'KW2391'; +KW2392 : 'KW2392'; +KW2393 : 'KW2393'; +KW2394 : 'KW2394'; +KW2395 : 'KW2395'; +KW2396 : 'KW2396'; +KW2397 : 'KW2397'; +KW2398 : 'KW2398'; +KW2399 : 'KW2399'; +KW2400 : 'KW2400'; +KW2401 : 'KW2401'; +KW2402 : 'KW2402'; +KW2403 : 'KW2403'; +KW2404 : 'KW2404'; +KW2405 : 'KW2405'; +KW2406 : 'KW2406'; +KW2407 : 'KW2407'; +KW2408 : 'KW2408'; +KW2409 : 'KW2409'; +KW2410 : 'KW2410'; +KW2411 : 'KW2411'; +KW2412 : 'KW2412'; +KW2413 : 'KW2413'; +KW2414 : 'KW2414'; +KW2415 : 'KW2415'; +KW2416 : 'KW2416'; +KW2417 : 'KW2417'; +KW2418 : 'KW2418'; +KW2419 : 'KW2419'; +KW2420 : 'KW2420'; +KW2421 : 'KW2421'; +KW2422 : 'KW2422'; +KW2423 : 'KW2423'; +KW2424 : 'KW2424'; +KW2425 : 'KW2425'; +KW2426 : 'KW2426'; +KW2427 : 'KW2427'; +KW2428 : 'KW2428'; +KW2429 : 'KW2429'; +KW2430 : 'KW2430'; +KW2431 : 'KW2431'; +KW2432 : 'KW2432'; +KW2433 : 'KW2433'; +KW2434 : 'KW2434'; +KW2435 : 'KW2435'; +KW2436 : 'KW2436'; +KW2437 : 'KW2437'; +KW2438 : 'KW2438'; +KW2439 : 'KW2439'; +KW2440 : 'KW2440'; +KW2441 : 'KW2441'; +KW2442 : 'KW2442'; +KW2443 : 'KW2443'; +KW2444 : 'KW2444'; +KW2445 : 'KW2445'; +KW2446 : 'KW2446'; +KW2447 : 'KW2447'; +KW2448 : 'KW2448'; +KW2449 : 'KW2449'; +KW2450 : 'KW2450'; +KW2451 : 'KW2451'; +KW2452 : 'KW2452'; +KW2453 : 'KW2453'; +KW2454 : 'KW2454'; +KW2455 : 'KW2455'; +KW2456 : 'KW2456'; +KW2457 : 'KW2457'; +KW2458 : 'KW2458'; +KW2459 : 'KW2459'; +KW2460 : 'KW2460'; +KW2461 : 'KW2461'; +KW2462 : 'KW2462'; +KW2463 : 'KW2463'; +KW2464 : 'KW2464'; +KW2465 : 'KW2465'; +KW2466 : 'KW2466'; +KW2467 : 'KW2467'; +KW2468 : 'KW2468'; +KW2469 : 'KW2469'; +KW2470 : 'KW2470'; +KW2471 : 'KW2471'; +KW2472 : 'KW2472'; +KW2473 : 'KW2473'; +KW2474 : 'KW2474'; +KW2475 : 'KW2475'; +KW2476 : 'KW2476'; +KW2477 : 'KW2477'; +KW2478 : 'KW2478'; +KW2479 : 'KW2479'; +KW2480 : 'KW2480'; +KW2481 : 'KW2481'; +KW2482 : 'KW2482'; +KW2483 : 'KW2483'; +KW2484 : 'KW2484'; +KW2485 : 'KW2485'; +KW2486 : 'KW2486'; +KW2487 : 'KW2487'; +KW2488 : 'KW2488'; +KW2489 : 'KW2489'; +KW2490 : 'KW2490'; +KW2491 : 'KW2491'; +KW2492 : 'KW2492'; +KW2493 : 'KW2493'; +KW2494 : 'KW2494'; +KW2495 : 'KW2495'; +KW2496 : 'KW2496'; +KW2497 : 'KW2497'; +KW2498 : 'KW2498'; +KW2499 : 'KW2499'; +KW2500 : 'KW2500'; +KW2501 : 'KW2501'; +KW2502 : 'KW2502'; +KW2503 : 'KW2503'; +KW2504 : 'KW2504'; +KW2505 : 'KW2505'; +KW2506 : 'KW2506'; +KW2507 : 'KW2507'; +KW2508 : 'KW2508'; +KW2509 : 'KW2509'; +KW2510 : 'KW2510'; +KW2511 : 'KW2511'; +KW2512 : 'KW2512'; +KW2513 : 'KW2513'; +KW2514 : 'KW2514'; +KW2515 : 'KW2515'; +KW2516 : 'KW2516'; +KW2517 : 'KW2517'; +KW2518 : 'KW2518'; +KW2519 : 'KW2519'; +KW2520 : 'KW2520'; +KW2521 : 'KW2521'; +KW2522 : 'KW2522'; +KW2523 : 'KW2523'; +KW2524 : 'KW2524'; +KW2525 : 'KW2525'; +KW2526 : 'KW2526'; +KW2527 : 'KW2527'; +KW2528 : 'KW2528'; +KW2529 : 'KW2529'; +KW2530 : 'KW2530'; +KW2531 : 'KW2531'; +KW2532 : 'KW2532'; +KW2533 : 'KW2533'; +KW2534 : 'KW2534'; +KW2535 : 'KW2535'; +KW2536 : 'KW2536'; +KW2537 : 'KW2537'; +KW2538 : 'KW2538'; +KW2539 : 'KW2539'; +KW2540 : 'KW2540'; +KW2541 : 'KW2541'; +KW2542 : 'KW2542'; +KW2543 : 'KW2543'; +KW2544 : 'KW2544'; +KW2545 : 'KW2545'; +KW2546 : 'KW2546'; +KW2547 : 'KW2547'; +KW2548 : 'KW2548'; +KW2549 : 'KW2549'; +KW2550 : 'KW2550'; +KW2551 : 'KW2551'; +KW2552 : 'KW2552'; +KW2553 : 'KW2553'; +KW2554 : 'KW2554'; +KW2555 : 'KW2555'; +KW2556 : 'KW2556'; +KW2557 : 'KW2557'; +KW2558 : 'KW2558'; +KW2559 : 'KW2559'; +KW2560 : 'KW2560'; +KW2561 : 'KW2561'; +KW2562 : 'KW2562'; +KW2563 : 'KW2563'; +KW2564 : 'KW2564'; +KW2565 : 'KW2565'; +KW2566 : 'KW2566'; +KW2567 : 'KW2567'; +KW2568 : 'KW2568'; +KW2569 : 'KW2569'; +KW2570 : 'KW2570'; +KW2571 : 'KW2571'; +KW2572 : 'KW2572'; +KW2573 : 'KW2573'; +KW2574 : 'KW2574'; +KW2575 : 'KW2575'; +KW2576 : 'KW2576'; +KW2577 : 'KW2577'; +KW2578 : 'KW2578'; +KW2579 : 'KW2579'; +KW2580 : 'KW2580'; +KW2581 : 'KW2581'; +KW2582 : 'KW2582'; +KW2583 : 'KW2583'; +KW2584 : 'KW2584'; +KW2585 : 'KW2585'; +KW2586 : 'KW2586'; +KW2587 : 'KW2587'; +KW2588 : 'KW2588'; +KW2589 : 'KW2589'; +KW2590 : 'KW2590'; +KW2591 : 'KW2591'; +KW2592 : 'KW2592'; +KW2593 : 'KW2593'; +KW2594 : 'KW2594'; +KW2595 : 'KW2595'; +KW2596 : 'KW2596'; +KW2597 : 'KW2597'; +KW2598 : 'KW2598'; +KW2599 : 'KW2599'; +KW2600 : 'KW2600'; +KW2601 : 'KW2601'; +KW2602 : 'KW2602'; +KW2603 : 'KW2603'; +KW2604 : 'KW2604'; +KW2605 : 'KW2605'; +KW2606 : 'KW2606'; +KW2607 : 'KW2607'; +KW2608 : 'KW2608'; +KW2609 : 'KW2609'; +KW2610 : 'KW2610'; +KW2611 : 'KW2611'; +KW2612 : 'KW2612'; +KW2613 : 'KW2613'; +KW2614 : 'KW2614'; +KW2615 : 'KW2615'; +KW2616 : 'KW2616'; +KW2617 : 'KW2617'; +KW2618 : 'KW2618'; +KW2619 : 'KW2619'; +KW2620 : 'KW2620'; +KW2621 : 'KW2621'; +KW2622 : 'KW2622'; +KW2623 : 'KW2623'; +KW2624 : 'KW2624'; +KW2625 : 'KW2625'; +KW2626 : 'KW2626'; +KW2627 : 'KW2627'; +KW2628 : 'KW2628'; +KW2629 : 'KW2629'; +KW2630 : 'KW2630'; +KW2631 : 'KW2631'; +KW2632 : 'KW2632'; +KW2633 : 'KW2633'; +KW2634 : 'KW2634'; +KW2635 : 'KW2635'; +KW2636 : 'KW2636'; +KW2637 : 'KW2637'; +KW2638 : 'KW2638'; +KW2639 : 'KW2639'; +KW2640 : 'KW2640'; +KW2641 : 'KW2641'; +KW2642 : 'KW2642'; +KW2643 : 'KW2643'; +KW2644 : 'KW2644'; +KW2645 : 'KW2645'; +KW2646 : 'KW2646'; +KW2647 : 'KW2647'; +KW2648 : 'KW2648'; +KW2649 : 'KW2649'; +KW2650 : 'KW2650'; +KW2651 : 'KW2651'; +KW2652 : 'KW2652'; +KW2653 : 'KW2653'; +KW2654 : 'KW2654'; +KW2655 : 'KW2655'; +KW2656 : 'KW2656'; +KW2657 : 'KW2657'; +KW2658 : 'KW2658'; +KW2659 : 'KW2659'; +KW2660 : 'KW2660'; +KW2661 : 'KW2661'; +KW2662 : 'KW2662'; +KW2663 : 'KW2663'; +KW2664 : 'KW2664'; +KW2665 : 'KW2665'; +KW2666 : 'KW2666'; +KW2667 : 'KW2667'; +KW2668 : 'KW2668'; +KW2669 : 'KW2669'; +KW2670 : 'KW2670'; +KW2671 : 'KW2671'; +KW2672 : 'KW2672'; +KW2673 : 'KW2673'; +KW2674 : 'KW2674'; +KW2675 : 'KW2675'; +KW2676 : 'KW2676'; +KW2677 : 'KW2677'; +KW2678 : 'KW2678'; +KW2679 : 'KW2679'; +KW2680 : 'KW2680'; +KW2681 : 'KW2681'; +KW2682 : 'KW2682'; +KW2683 : 'KW2683'; +KW2684 : 'KW2684'; +KW2685 : 'KW2685'; +KW2686 : 'KW2686'; +KW2687 : 'KW2687'; +KW2688 : 'KW2688'; +KW2689 : 'KW2689'; +KW2690 : 'KW2690'; +KW2691 : 'KW2691'; +KW2692 : 'KW2692'; +KW2693 : 'KW2693'; +KW2694 : 'KW2694'; +KW2695 : 'KW2695'; +KW2696 : 'KW2696'; +KW2697 : 'KW2697'; +KW2698 : 'KW2698'; +KW2699 : 'KW2699'; +KW2700 : 'KW2700'; +KW2701 : 'KW2701'; +KW2702 : 'KW2702'; +KW2703 : 'KW2703'; +KW2704 : 'KW2704'; +KW2705 : 'KW2705'; +KW2706 : 'KW2706'; +KW2707 : 'KW2707'; +KW2708 : 'KW2708'; +KW2709 : 'KW2709'; +KW2710 : 'KW2710'; +KW2711 : 'KW2711'; +KW2712 : 'KW2712'; +KW2713 : 'KW2713'; +KW2714 : 'KW2714'; +KW2715 : 'KW2715'; +KW2716 : 'KW2716'; +KW2717 : 'KW2717'; +KW2718 : 'KW2718'; +KW2719 : 'KW2719'; +KW2720 : 'KW2720'; +KW2721 : 'KW2721'; +KW2722 : 'KW2722'; +KW2723 : 'KW2723'; +KW2724 : 'KW2724'; +KW2725 : 'KW2725'; +KW2726 : 'KW2726'; +KW2727 : 'KW2727'; +KW2728 : 'KW2728'; +KW2729 : 'KW2729'; +KW2730 : 'KW2730'; +KW2731 : 'KW2731'; +KW2732 : 'KW2732'; +KW2733 : 'KW2733'; +KW2734 : 'KW2734'; +KW2735 : 'KW2735'; +KW2736 : 'KW2736'; +KW2737 : 'KW2737'; +KW2738 : 'KW2738'; +KW2739 : 'KW2739'; +KW2740 : 'KW2740'; +KW2741 : 'KW2741'; +KW2742 : 'KW2742'; +KW2743 : 'KW2743'; +KW2744 : 'KW2744'; +KW2745 : 'KW2745'; +KW2746 : 'KW2746'; +KW2747 : 'KW2747'; +KW2748 : 'KW2748'; +KW2749 : 'KW2749'; +KW2750 : 'KW2750'; +KW2751 : 'KW2751'; +KW2752 : 'KW2752'; +KW2753 : 'KW2753'; +KW2754 : 'KW2754'; +KW2755 : 'KW2755'; +KW2756 : 'KW2756'; +KW2757 : 'KW2757'; +KW2758 : 'KW2758'; +KW2759 : 'KW2759'; +KW2760 : 'KW2760'; +KW2761 : 'KW2761'; +KW2762 : 'KW2762'; +KW2763 : 'KW2763'; +KW2764 : 'KW2764'; +KW2765 : 'KW2765'; +KW2766 : 'KW2766'; +KW2767 : 'KW2767'; +KW2768 : 'KW2768'; +KW2769 : 'KW2769'; +KW2770 : 'KW2770'; +KW2771 : 'KW2771'; +KW2772 : 'KW2772'; +KW2773 : 'KW2773'; +KW2774 : 'KW2774'; +KW2775 : 'KW2775'; +KW2776 : 'KW2776'; +KW2777 : 'KW2777'; +KW2778 : 'KW2778'; +KW2779 : 'KW2779'; +KW2780 : 'KW2780'; +KW2781 : 'KW2781'; +KW2782 : 'KW2782'; +KW2783 : 'KW2783'; +KW2784 : 'KW2784'; +KW2785 : 'KW2785'; +KW2786 : 'KW2786'; +KW2787 : 'KW2787'; +KW2788 : 'KW2788'; +KW2789 : 'KW2789'; +KW2790 : 'KW2790'; +KW2791 : 'KW2791'; +KW2792 : 'KW2792'; +KW2793 : 'KW2793'; +KW2794 : 'KW2794'; +KW2795 : 'KW2795'; +KW2796 : 'KW2796'; +KW2797 : 'KW2797'; +KW2798 : 'KW2798'; +KW2799 : 'KW2799'; +KW2800 : 'KW2800'; +KW2801 : 'KW2801'; +KW2802 : 'KW2802'; +KW2803 : 'KW2803'; +KW2804 : 'KW2804'; +KW2805 : 'KW2805'; +KW2806 : 'KW2806'; +KW2807 : 'KW2807'; +KW2808 : 'KW2808'; +KW2809 : 'KW2809'; +KW2810 : 'KW2810'; +KW2811 : 'KW2811'; +KW2812 : 'KW2812'; +KW2813 : 'KW2813'; +KW2814 : 'KW2814'; +KW2815 : 'KW2815'; +KW2816 : 'KW2816'; +KW2817 : 'KW2817'; +KW2818 : 'KW2818'; +KW2819 : 'KW2819'; +KW2820 : 'KW2820'; +KW2821 : 'KW2821'; +KW2822 : 'KW2822'; +KW2823 : 'KW2823'; +KW2824 : 'KW2824'; +KW2825 : 'KW2825'; +KW2826 : 'KW2826'; +KW2827 : 'KW2827'; +KW2828 : 'KW2828'; +KW2829 : 'KW2829'; +KW2830 : 'KW2830'; +KW2831 : 'KW2831'; +KW2832 : 'KW2832'; +KW2833 : 'KW2833'; +KW2834 : 'KW2834'; +KW2835 : 'KW2835'; +KW2836 : 'KW2836'; +KW2837 : 'KW2837'; +KW2838 : 'KW2838'; +KW2839 : 'KW2839'; +KW2840 : 'KW2840'; +KW2841 : 'KW2841'; +KW2842 : 'KW2842'; +KW2843 : 'KW2843'; +KW2844 : 'KW2844'; +KW2845 : 'KW2845'; +KW2846 : 'KW2846'; +KW2847 : 'KW2847'; +KW2848 : 'KW2848'; +KW2849 : 'KW2849'; +KW2850 : 'KW2850'; +KW2851 : 'KW2851'; +KW2852 : 'KW2852'; +KW2853 : 'KW2853'; +KW2854 : 'KW2854'; +KW2855 : 'KW2855'; +KW2856 : 'KW2856'; +KW2857 : 'KW2857'; +KW2858 : 'KW2858'; +KW2859 : 'KW2859'; +KW2860 : 'KW2860'; +KW2861 : 'KW2861'; +KW2862 : 'KW2862'; +KW2863 : 'KW2863'; +KW2864 : 'KW2864'; +KW2865 : 'KW2865'; +KW2866 : 'KW2866'; +KW2867 : 'KW2867'; +KW2868 : 'KW2868'; +KW2869 : 'KW2869'; +KW2870 : 'KW2870'; +KW2871 : 'KW2871'; +KW2872 : 'KW2872'; +KW2873 : 'KW2873'; +KW2874 : 'KW2874'; +KW2875 : 'KW2875'; +KW2876 : 'KW2876'; +KW2877 : 'KW2877'; +KW2878 : 'KW2878'; +KW2879 : 'KW2879'; +KW2880 : 'KW2880'; +KW2881 : 'KW2881'; +KW2882 : 'KW2882'; +KW2883 : 'KW2883'; +KW2884 : 'KW2884'; +KW2885 : 'KW2885'; +KW2886 : 'KW2886'; +KW2887 : 'KW2887'; +KW2888 : 'KW2888'; +KW2889 : 'KW2889'; +KW2890 : 'KW2890'; +KW2891 : 'KW2891'; +KW2892 : 'KW2892'; +KW2893 : 'KW2893'; +KW2894 : 'KW2894'; +KW2895 : 'KW2895'; +KW2896 : 'KW2896'; +KW2897 : 'KW2897'; +KW2898 : 'KW2898'; +KW2899 : 'KW2899'; +KW2900 : 'KW2900'; +KW2901 : 'KW2901'; +KW2902 : 'KW2902'; +KW2903 : 'KW2903'; +KW2904 : 'KW2904'; +KW2905 : 'KW2905'; +KW2906 : 'KW2906'; +KW2907 : 'KW2907'; +KW2908 : 'KW2908'; +KW2909 : 'KW2909'; +KW2910 : 'KW2910'; +KW2911 : 'KW2911'; +KW2912 : 'KW2912'; +KW2913 : 'KW2913'; +KW2914 : 'KW2914'; +KW2915 : 'KW2915'; +KW2916 : 'KW2916'; +KW2917 : 'KW2917'; +KW2918 : 'KW2918'; +KW2919 : 'KW2919'; +KW2920 : 'KW2920'; +KW2921 : 'KW2921'; +KW2922 : 'KW2922'; +KW2923 : 'KW2923'; +KW2924 : 'KW2924'; +KW2925 : 'KW2925'; +KW2926 : 'KW2926'; +KW2927 : 'KW2927'; +KW2928 : 'KW2928'; +KW2929 : 'KW2929'; +KW2930 : 'KW2930'; +KW2931 : 'KW2931'; +KW2932 : 'KW2932'; +KW2933 : 'KW2933'; +KW2934 : 'KW2934'; +KW2935 : 'KW2935'; +KW2936 : 'KW2936'; +KW2937 : 'KW2937'; +KW2938 : 'KW2938'; +KW2939 : 'KW2939'; +KW2940 : 'KW2940'; +KW2941 : 'KW2941'; +KW2942 : 'KW2942'; +KW2943 : 'KW2943'; +KW2944 : 'KW2944'; +KW2945 : 'KW2945'; +KW2946 : 'KW2946'; +KW2947 : 'KW2947'; +KW2948 : 'KW2948'; +KW2949 : 'KW2949'; +KW2950 : 'KW2950'; +KW2951 : 'KW2951'; +KW2952 : 'KW2952'; +KW2953 : 'KW2953'; +KW2954 : 'KW2954'; +KW2955 : 'KW2955'; +KW2956 : 'KW2956'; +KW2957 : 'KW2957'; +KW2958 : 'KW2958'; +KW2959 : 'KW2959'; +KW2960 : 'KW2960'; +KW2961 : 'KW2961'; +KW2962 : 'KW2962'; +KW2963 : 'KW2963'; +KW2964 : 'KW2964'; +KW2965 : 'KW2965'; +KW2966 : 'KW2966'; +KW2967 : 'KW2967'; +KW2968 : 'KW2968'; +KW2969 : 'KW2969'; +KW2970 : 'KW2970'; +KW2971 : 'KW2971'; +KW2972 : 'KW2972'; +KW2973 : 'KW2973'; +KW2974 : 'KW2974'; +KW2975 : 'KW2975'; +KW2976 : 'KW2976'; +KW2977 : 'KW2977'; +KW2978 : 'KW2978'; +KW2979 : 'KW2979'; +KW2980 : 'KW2980'; +KW2981 : 'KW2981'; +KW2982 : 'KW2982'; +KW2983 : 'KW2983'; +KW2984 : 'KW2984'; +KW2985 : 'KW2985'; +KW2986 : 'KW2986'; +KW2987 : 'KW2987'; +KW2988 : 'KW2988'; +KW2989 : 'KW2989'; +KW2990 : 'KW2990'; +KW2991 : 'KW2991'; +KW2992 : 'KW2992'; +KW2993 : 'KW2993'; +KW2994 : 'KW2994'; +KW2995 : 'KW2995'; +KW2996 : 'KW2996'; +KW2997 : 'KW2997'; +KW2998 : 'KW2998'; +KW2999 : 'KW2999'; +KW3000 : 'KW3000'; +KW3001 : 'KW3001'; +KW3002 : 'KW3002'; +KW3003 : 'KW3003'; +KW3004 : 'KW3004'; +KW3005 : 'KW3005'; +KW3006 : 'KW3006'; +KW3007 : 'KW3007'; +KW3008 : 'KW3008'; +KW3009 : 'KW3009'; +KW3010 : 'KW3010'; +KW3011 : 'KW3011'; +KW3012 : 'KW3012'; +KW3013 : 'KW3013'; +KW3014 : 'KW3014'; +KW3015 : 'KW3015'; +KW3016 : 'KW3016'; +KW3017 : 'KW3017'; +KW3018 : 'KW3018'; +KW3019 : 'KW3019'; +KW3020 : 'KW3020'; +KW3021 : 'KW3021'; +KW3022 : 'KW3022'; +KW3023 : 'KW3023'; +KW3024 : 'KW3024'; +KW3025 : 'KW3025'; +KW3026 : 'KW3026'; +KW3027 : 'KW3027'; +KW3028 : 'KW3028'; +KW3029 : 'KW3029'; +KW3030 : 'KW3030'; +KW3031 : 'KW3031'; +KW3032 : 'KW3032'; +KW3033 : 'KW3033'; +KW3034 : 'KW3034'; +KW3035 : 'KW3035'; +KW3036 : 'KW3036'; +KW3037 : 'KW3037'; +KW3038 : 'KW3038'; +KW3039 : 'KW3039'; +KW3040 : 'KW3040'; +KW3041 : 'KW3041'; +KW3042 : 'KW3042'; +KW3043 : 'KW3043'; +KW3044 : 'KW3044'; +KW3045 : 'KW3045'; +KW3046 : 'KW3046'; +KW3047 : 'KW3047'; +KW3048 : 'KW3048'; +KW3049 : 'KW3049'; +KW3050 : 'KW3050'; +KW3051 : 'KW3051'; +KW3052 : 'KW3052'; +KW3053 : 'KW3053'; +KW3054 : 'KW3054'; +KW3055 : 'KW3055'; +KW3056 : 'KW3056'; +KW3057 : 'KW3057'; +KW3058 : 'KW3058'; +KW3059 : 'KW3059'; +KW3060 : 'KW3060'; +KW3061 : 'KW3061'; +KW3062 : 'KW3062'; +KW3063 : 'KW3063'; +KW3064 : 'KW3064'; +KW3065 : 'KW3065'; +KW3066 : 'KW3066'; +KW3067 : 'KW3067'; +KW3068 : 'KW3068'; +KW3069 : 'KW3069'; +KW3070 : 'KW3070'; +KW3071 : 'KW3071'; +KW3072 : 'KW3072'; +KW3073 : 'KW3073'; +KW3074 : 'KW3074'; +KW3075 : 'KW3075'; +KW3076 : 'KW3076'; +KW3077 : 'KW3077'; +KW3078 : 'KW3078'; +KW3079 : 'KW3079'; +KW3080 : 'KW3080'; +KW3081 : 'KW3081'; +KW3082 : 'KW3082'; +KW3083 : 'KW3083'; +KW3084 : 'KW3084'; +KW3085 : 'KW3085'; +KW3086 : 'KW3086'; +KW3087 : 'KW3087'; +KW3088 : 'KW3088'; +KW3089 : 'KW3089'; +KW3090 : 'KW3090'; +KW3091 : 'KW3091'; +KW3092 : 'KW3092'; +KW3093 : 'KW3093'; +KW3094 : 'KW3094'; +KW3095 : 'KW3095'; +KW3096 : 'KW3096'; +KW3097 : 'KW3097'; +KW3098 : 'KW3098'; +KW3099 : 'KW3099'; +KW3100 : 'KW3100'; +KW3101 : 'KW3101'; +KW3102 : 'KW3102'; +KW3103 : 'KW3103'; +KW3104 : 'KW3104'; +KW3105 : 'KW3105'; +KW3106 : 'KW3106'; +KW3107 : 'KW3107'; +KW3108 : 'KW3108'; +KW3109 : 'KW3109'; +KW3110 : 'KW3110'; +KW3111 : 'KW3111'; +KW3112 : 'KW3112'; +KW3113 : 'KW3113'; +KW3114 : 'KW3114'; +KW3115 : 'KW3115'; +KW3116 : 'KW3116'; +KW3117 : 'KW3117'; +KW3118 : 'KW3118'; +KW3119 : 'KW3119'; +KW3120 : 'KW3120'; +KW3121 : 'KW3121'; +KW3122 : 'KW3122'; +KW3123 : 'KW3123'; +KW3124 : 'KW3124'; +KW3125 : 'KW3125'; +KW3126 : 'KW3126'; +KW3127 : 'KW3127'; +KW3128 : 'KW3128'; +KW3129 : 'KW3129'; +KW3130 : 'KW3130'; +KW3131 : 'KW3131'; +KW3132 : 'KW3132'; +KW3133 : 'KW3133'; +KW3134 : 'KW3134'; +KW3135 : 'KW3135'; +KW3136 : 'KW3136'; +KW3137 : 'KW3137'; +KW3138 : 'KW3138'; +KW3139 : 'KW3139'; +KW3140 : 'KW3140'; +KW3141 : 'KW3141'; +KW3142 : 'KW3142'; +KW3143 : 'KW3143'; +KW3144 : 'KW3144'; +KW3145 : 'KW3145'; +KW3146 : 'KW3146'; +KW3147 : 'KW3147'; +KW3148 : 'KW3148'; +KW3149 : 'KW3149'; +KW3150 : 'KW3150'; +KW3151 : 'KW3151'; +KW3152 : 'KW3152'; +KW3153 : 'KW3153'; +KW3154 : 'KW3154'; +KW3155 : 'KW3155'; +KW3156 : 'KW3156'; +KW3157 : 'KW3157'; +KW3158 : 'KW3158'; +KW3159 : 'KW3159'; +KW3160 : 'KW3160'; +KW3161 : 'KW3161'; +KW3162 : 'KW3162'; +KW3163 : 'KW3163'; +KW3164 : 'KW3164'; +KW3165 : 'KW3165'; +KW3166 : 'KW3166'; +KW3167 : 'KW3167'; +KW3168 : 'KW3168'; +KW3169 : 'KW3169'; +KW3170 : 'KW3170'; +KW3171 : 'KW3171'; +KW3172 : 'KW3172'; +KW3173 : 'KW3173'; +KW3174 : 'KW3174'; +KW3175 : 'KW3175'; +KW3176 : 'KW3176'; +KW3177 : 'KW3177'; +KW3178 : 'KW3178'; +KW3179 : 'KW3179'; +KW3180 : 'KW3180'; +KW3181 : 'KW3181'; +KW3182 : 'KW3182'; +KW3183 : 'KW3183'; +KW3184 : 'KW3184'; +KW3185 : 'KW3185'; +KW3186 : 'KW3186'; +KW3187 : 'KW3187'; +KW3188 : 'KW3188'; +KW3189 : 'KW3189'; +KW3190 : 'KW3190'; +KW3191 : 'KW3191'; +KW3192 : 'KW3192'; +KW3193 : 'KW3193'; +KW3194 : 'KW3194'; +KW3195 : 'KW3195'; +KW3196 : 'KW3196'; +KW3197 : 'KW3197'; +KW3198 : 'KW3198'; +KW3199 : 'KW3199'; +KW3200 : 'KW3200'; +KW3201 : 'KW3201'; +KW3202 : 'KW3202'; +KW3203 : 'KW3203'; +KW3204 : 'KW3204'; +KW3205 : 'KW3205'; +KW3206 : 'KW3206'; +KW3207 : 'KW3207'; +KW3208 : 'KW3208'; +KW3209 : 'KW3209'; +KW3210 : 'KW3210'; +KW3211 : 'KW3211'; +KW3212 : 'KW3212'; +KW3213 : 'KW3213'; +KW3214 : 'KW3214'; +KW3215 : 'KW3215'; +KW3216 : 'KW3216'; +KW3217 : 'KW3217'; +KW3218 : 'KW3218'; +KW3219 : 'KW3219'; +KW3220 : 'KW3220'; +KW3221 : 'KW3221'; +KW3222 : 'KW3222'; +KW3223 : 'KW3223'; +KW3224 : 'KW3224'; +KW3225 : 'KW3225'; +KW3226 : 'KW3226'; +KW3227 : 'KW3227'; +KW3228 : 'KW3228'; +KW3229 : 'KW3229'; +KW3230 : 'KW3230'; +KW3231 : 'KW3231'; +KW3232 : 'KW3232'; +KW3233 : 'KW3233'; +KW3234 : 'KW3234'; +KW3235 : 'KW3235'; +KW3236 : 'KW3236'; +KW3237 : 'KW3237'; +KW3238 : 'KW3238'; +KW3239 : 'KW3239'; +KW3240 : 'KW3240'; +KW3241 : 'KW3241'; +KW3242 : 'KW3242'; +KW3243 : 'KW3243'; +KW3244 : 'KW3244'; +KW3245 : 'KW3245'; +KW3246 : 'KW3246'; +KW3247 : 'KW3247'; +KW3248 : 'KW3248'; +KW3249 : 'KW3249'; +KW3250 : 'KW3250'; +KW3251 : 'KW3251'; +KW3252 : 'KW3252'; +KW3253 : 'KW3253'; +KW3254 : 'KW3254'; +KW3255 : 'KW3255'; +KW3256 : 'KW3256'; +KW3257 : 'KW3257'; +KW3258 : 'KW3258'; +KW3259 : 'KW3259'; +KW3260 : 'KW3260'; +KW3261 : 'KW3261'; +KW3262 : 'KW3262'; +KW3263 : 'KW3263'; +KW3264 : 'KW3264'; +KW3265 : 'KW3265'; +KW3266 : 'KW3266'; +KW3267 : 'KW3267'; +KW3268 : 'KW3268'; +KW3269 : 'KW3269'; +KW3270 : 'KW3270'; +KW3271 : 'KW3271'; +KW3272 : 'KW3272'; +KW3273 : 'KW3273'; +KW3274 : 'KW3274'; +KW3275 : 'KW3275'; +KW3276 : 'KW3276'; +KW3277 : 'KW3277'; +KW3278 : 'KW3278'; +KW3279 : 'KW3279'; +KW3280 : 'KW3280'; +KW3281 : 'KW3281'; +KW3282 : 'KW3282'; +KW3283 : 'KW3283'; +KW3284 : 'KW3284'; +KW3285 : 'KW3285'; +KW3286 : 'KW3286'; +KW3287 : 'KW3287'; +KW3288 : 'KW3288'; +KW3289 : 'KW3289'; +KW3290 : 'KW3290'; +KW3291 : 'KW3291'; +KW3292 : 'KW3292'; +KW3293 : 'KW3293'; +KW3294 : 'KW3294'; +KW3295 : 'KW3295'; +KW3296 : 'KW3296'; +KW3297 : 'KW3297'; +KW3298 : 'KW3298'; +KW3299 : 'KW3299'; +KW3300 : 'KW3300'; +KW3301 : 'KW3301'; +KW3302 : 'KW3302'; +KW3303 : 'KW3303'; +KW3304 : 'KW3304'; +KW3305 : 'KW3305'; +KW3306 : 'KW3306'; +KW3307 : 'KW3307'; +KW3308 : 'KW3308'; +KW3309 : 'KW3309'; +KW3310 : 'KW3310'; +KW3311 : 'KW3311'; +KW3312 : 'KW3312'; +KW3313 : 'KW3313'; +KW3314 : 'KW3314'; +KW3315 : 'KW3315'; +KW3316 : 'KW3316'; +KW3317 : 'KW3317'; +KW3318 : 'KW3318'; +KW3319 : 'KW3319'; +KW3320 : 'KW3320'; +KW3321 : 'KW3321'; +KW3322 : 'KW3322'; +KW3323 : 'KW3323'; +KW3324 : 'KW3324'; +KW3325 : 'KW3325'; +KW3326 : 'KW3326'; +KW3327 : 'KW3327'; +KW3328 : 'KW3328'; +KW3329 : 'KW3329'; +KW3330 : 'KW3330'; +KW3331 : 'KW3331'; +KW3332 : 'KW3332'; +KW3333 : 'KW3333'; +KW3334 : 'KW3334'; +KW3335 : 'KW3335'; +KW3336 : 'KW3336'; +KW3337 : 'KW3337'; +KW3338 : 'KW3338'; +KW3339 : 'KW3339'; +KW3340 : 'KW3340'; +KW3341 : 'KW3341'; +KW3342 : 'KW3342'; +KW3343 : 'KW3343'; +KW3344 : 'KW3344'; +KW3345 : 'KW3345'; +KW3346 : 'KW3346'; +KW3347 : 'KW3347'; +KW3348 : 'KW3348'; +KW3349 : 'KW3349'; +KW3350 : 'KW3350'; +KW3351 : 'KW3351'; +KW3352 : 'KW3352'; +KW3353 : 'KW3353'; +KW3354 : 'KW3354'; +KW3355 : 'KW3355'; +KW3356 : 'KW3356'; +KW3357 : 'KW3357'; +KW3358 : 'KW3358'; +KW3359 : 'KW3359'; +KW3360 : 'KW3360'; +KW3361 : 'KW3361'; +KW3362 : 'KW3362'; +KW3363 : 'KW3363'; +KW3364 : 'KW3364'; +KW3365 : 'KW3365'; +KW3366 : 'KW3366'; +KW3367 : 'KW3367'; +KW3368 : 'KW3368'; +KW3369 : 'KW3369'; +KW3370 : 'KW3370'; +KW3371 : 'KW3371'; +KW3372 : 'KW3372'; +KW3373 : 'KW3373'; +KW3374 : 'KW3374'; +KW3375 : 'KW3375'; +KW3376 : 'KW3376'; +KW3377 : 'KW3377'; +KW3378 : 'KW3378'; +KW3379 : 'KW3379'; +KW3380 : 'KW3380'; +KW3381 : 'KW3381'; +KW3382 : 'KW3382'; +KW3383 : 'KW3383'; +KW3384 : 'KW3384'; +KW3385 : 'KW3385'; +KW3386 : 'KW3386'; +KW3387 : 'KW3387'; +KW3388 : 'KW3388'; +KW3389 : 'KW3389'; +KW3390 : 'KW3390'; +KW3391 : 'KW3391'; +KW3392 : 'KW3392'; +KW3393 : 'KW3393'; +KW3394 : 'KW3394'; +KW3395 : 'KW3395'; +KW3396 : 'KW3396'; +KW3397 : 'KW3397'; +KW3398 : 'KW3398'; +KW3399 : 'KW3399'; +KW3400 : 'KW3400'; +KW3401 : 'KW3401'; +KW3402 : 'KW3402'; +KW3403 : 'KW3403'; +KW3404 : 'KW3404'; +KW3405 : 'KW3405'; +KW3406 : 'KW3406'; +KW3407 : 'KW3407'; +KW3408 : 'KW3408'; +KW3409 : 'KW3409'; +KW3410 : 'KW3410'; +KW3411 : 'KW3411'; +KW3412 : 'KW3412'; +KW3413 : 'KW3413'; +KW3414 : 'KW3414'; +KW3415 : 'KW3415'; +KW3416 : 'KW3416'; +KW3417 : 'KW3417'; +KW3418 : 'KW3418'; +KW3419 : 'KW3419'; +KW3420 : 'KW3420'; +KW3421 : 'KW3421'; +KW3422 : 'KW3422'; +KW3423 : 'KW3423'; +KW3424 : 'KW3424'; +KW3425 : 'KW3425'; +KW3426 : 'KW3426'; +KW3427 : 'KW3427'; +KW3428 : 'KW3428'; +KW3429 : 'KW3429'; +KW3430 : 'KW3430'; +KW3431 : 'KW3431'; +KW3432 : 'KW3432'; +KW3433 : 'KW3433'; +KW3434 : 'KW3434'; +KW3435 : 'KW3435'; +KW3436 : 'KW3436'; +KW3437 : 'KW3437'; +KW3438 : 'KW3438'; +KW3439 : 'KW3439'; +KW3440 : 'KW3440'; +KW3441 : 'KW3441'; +KW3442 : 'KW3442'; +KW3443 : 'KW3443'; +KW3444 : 'KW3444'; +KW3445 : 'KW3445'; +KW3446 : 'KW3446'; +KW3447 : 'KW3447'; +KW3448 : 'KW3448'; +KW3449 : 'KW3449'; +KW3450 : 'KW3450'; +KW3451 : 'KW3451'; +KW3452 : 'KW3452'; +KW3453 : 'KW3453'; +KW3454 : 'KW3454'; +KW3455 : 'KW3455'; +KW3456 : 'KW3456'; +KW3457 : 'KW3457'; +KW3458 : 'KW3458'; +KW3459 : 'KW3459'; +KW3460 : 'KW3460'; +KW3461 : 'KW3461'; +KW3462 : 'KW3462'; +KW3463 : 'KW3463'; +KW3464 : 'KW3464'; +KW3465 : 'KW3465'; +KW3466 : 'KW3466'; +KW3467 : 'KW3467'; +KW3468 : 'KW3468'; +KW3469 : 'KW3469'; +KW3470 : 'KW3470'; +KW3471 : 'KW3471'; +KW3472 : 'KW3472'; +KW3473 : 'KW3473'; +KW3474 : 'KW3474'; +KW3475 : 'KW3475'; +KW3476 : 'KW3476'; +KW3477 : 'KW3477'; +KW3478 : 'KW3478'; +KW3479 : 'KW3479'; +KW3480 : 'KW3480'; +KW3481 : 'KW3481'; +KW3482 : 'KW3482'; +KW3483 : 'KW3483'; +KW3484 : 'KW3484'; +KW3485 : 'KW3485'; +KW3486 : 'KW3486'; +KW3487 : 'KW3487'; +KW3488 : 'KW3488'; +KW3489 : 'KW3489'; +KW3490 : 'KW3490'; +KW3491 : 'KW3491'; +KW3492 : 'KW3492'; +KW3493 : 'KW3493'; +KW3494 : 'KW3494'; +KW3495 : 'KW3495'; +KW3496 : 'KW3496'; +KW3497 : 'KW3497'; +KW3498 : 'KW3498'; +KW3499 : 'KW3499'; +KW3500 : 'KW3500'; +KW3501 : 'KW3501'; +KW3502 : 'KW3502'; +KW3503 : 'KW3503'; +KW3504 : 'KW3504'; +KW3505 : 'KW3505'; +KW3506 : 'KW3506'; +KW3507 : 'KW3507'; +KW3508 : 'KW3508'; +KW3509 : 'KW3509'; +KW3510 : 'KW3510'; +KW3511 : 'KW3511'; +KW3512 : 'KW3512'; +KW3513 : 'KW3513'; +KW3514 : 'KW3514'; +KW3515 : 'KW3515'; +KW3516 : 'KW3516'; +KW3517 : 'KW3517'; +KW3518 : 'KW3518'; +KW3519 : 'KW3519'; +KW3520 : 'KW3520'; +KW3521 : 'KW3521'; +KW3522 : 'KW3522'; +KW3523 : 'KW3523'; +KW3524 : 'KW3524'; +KW3525 : 'KW3525'; +KW3526 : 'KW3526'; +KW3527 : 'KW3527'; +KW3528 : 'KW3528'; +KW3529 : 'KW3529'; +KW3530 : 'KW3530'; +KW3531 : 'KW3531'; +KW3532 : 'KW3532'; +KW3533 : 'KW3533'; +KW3534 : 'KW3534'; +KW3535 : 'KW3535'; +KW3536 : 'KW3536'; +KW3537 : 'KW3537'; +KW3538 : 'KW3538'; +KW3539 : 'KW3539'; +KW3540 : 'KW3540'; +KW3541 : 'KW3541'; +KW3542 : 'KW3542'; +KW3543 : 'KW3543'; +KW3544 : 'KW3544'; +KW3545 : 'KW3545'; +KW3546 : 'KW3546'; +KW3547 : 'KW3547'; +KW3548 : 'KW3548'; +KW3549 : 'KW3549'; +KW3550 : 'KW3550'; +KW3551 : 'KW3551'; +KW3552 : 'KW3552'; +KW3553 : 'KW3553'; +KW3554 : 'KW3554'; +KW3555 : 'KW3555'; +KW3556 : 'KW3556'; +KW3557 : 'KW3557'; +KW3558 : 'KW3558'; +KW3559 : 'KW3559'; +KW3560 : 'KW3560'; +KW3561 : 'KW3561'; +KW3562 : 'KW3562'; +KW3563 : 'KW3563'; +KW3564 : 'KW3564'; +KW3565 : 'KW3565'; +KW3566 : 'KW3566'; +KW3567 : 'KW3567'; +KW3568 : 'KW3568'; +KW3569 : 'KW3569'; +KW3570 : 'KW3570'; +KW3571 : 'KW3571'; +KW3572 : 'KW3572'; +KW3573 : 'KW3573'; +KW3574 : 'KW3574'; +KW3575 : 'KW3575'; +KW3576 : 'KW3576'; +KW3577 : 'KW3577'; +KW3578 : 'KW3578'; +KW3579 : 'KW3579'; +KW3580 : 'KW3580'; +KW3581 : 'KW3581'; +KW3582 : 'KW3582'; +KW3583 : 'KW3583'; +KW3584 : 'KW3584'; +KW3585 : 'KW3585'; +KW3586 : 'KW3586'; +KW3587 : 'KW3587'; +KW3588 : 'KW3588'; +KW3589 : 'KW3589'; +KW3590 : 'KW3590'; +KW3591 : 'KW3591'; +KW3592 : 'KW3592'; +KW3593 : 'KW3593'; +KW3594 : 'KW3594'; +KW3595 : 'KW3595'; +KW3596 : 'KW3596'; +KW3597 : 'KW3597'; +KW3598 : 'KW3598'; +KW3599 : 'KW3599'; +KW3600 : 'KW3600'; +KW3601 : 'KW3601'; +KW3602 : 'KW3602'; +KW3603 : 'KW3603'; +KW3604 : 'KW3604'; +KW3605 : 'KW3605'; +KW3606 : 'KW3606'; +KW3607 : 'KW3607'; +KW3608 : 'KW3608'; +KW3609 : 'KW3609'; +KW3610 : 'KW3610'; +KW3611 : 'KW3611'; +KW3612 : 'KW3612'; +KW3613 : 'KW3613'; +KW3614 : 'KW3614'; +KW3615 : 'KW3615'; +KW3616 : 'KW3616'; +KW3617 : 'KW3617'; +KW3618 : 'KW3618'; +KW3619 : 'KW3619'; +KW3620 : 'KW3620'; +KW3621 : 'KW3621'; +KW3622 : 'KW3622'; +KW3623 : 'KW3623'; +KW3624 : 'KW3624'; +KW3625 : 'KW3625'; +KW3626 : 'KW3626'; +KW3627 : 'KW3627'; +KW3628 : 'KW3628'; +KW3629 : 'KW3629'; +KW3630 : 'KW3630'; +KW3631 : 'KW3631'; +KW3632 : 'KW3632'; +KW3633 : 'KW3633'; +KW3634 : 'KW3634'; +KW3635 : 'KW3635'; +KW3636 : 'KW3636'; +KW3637 : 'KW3637'; +KW3638 : 'KW3638'; +KW3639 : 'KW3639'; +KW3640 : 'KW3640'; +KW3641 : 'KW3641'; +KW3642 : 'KW3642'; +KW3643 : 'KW3643'; +KW3644 : 'KW3644'; +KW3645 : 'KW3645'; +KW3646 : 'KW3646'; +KW3647 : 'KW3647'; +KW3648 : 'KW3648'; +KW3649 : 'KW3649'; +KW3650 : 'KW3650'; +KW3651 : 'KW3651'; +KW3652 : 'KW3652'; +KW3653 : 'KW3653'; +KW3654 : 'KW3654'; +KW3655 : 'KW3655'; +KW3656 : 'KW3656'; +KW3657 : 'KW3657'; +KW3658 : 'KW3658'; +KW3659 : 'KW3659'; +KW3660 : 'KW3660'; +KW3661 : 'KW3661'; +KW3662 : 'KW3662'; +KW3663 : 'KW3663'; +KW3664 : 'KW3664'; +KW3665 : 'KW3665'; +KW3666 : 'KW3666'; +KW3667 : 'KW3667'; +KW3668 : 'KW3668'; +KW3669 : 'KW3669'; +KW3670 : 'KW3670'; +KW3671 : 'KW3671'; +KW3672 : 'KW3672'; +KW3673 : 'KW3673'; +KW3674 : 'KW3674'; +KW3675 : 'KW3675'; +KW3676 : 'KW3676'; +KW3677 : 'KW3677'; +KW3678 : 'KW3678'; +KW3679 : 'KW3679'; +KW3680 : 'KW3680'; +KW3681 : 'KW3681'; +KW3682 : 'KW3682'; +KW3683 : 'KW3683'; +KW3684 : 'KW3684'; +KW3685 : 'KW3685'; +KW3686 : 'KW3686'; +KW3687 : 'KW3687'; +KW3688 : 'KW3688'; +KW3689 : 'KW3689'; +KW3690 : 'KW3690'; +KW3691 : 'KW3691'; +KW3692 : 'KW3692'; +KW3693 : 'KW3693'; +KW3694 : 'KW3694'; +KW3695 : 'KW3695'; +KW3696 : 'KW3696'; +KW3697 : 'KW3697'; +KW3698 : 'KW3698'; +KW3699 : 'KW3699'; +KW3700 : 'KW3700'; +KW3701 : 'KW3701'; +KW3702 : 'KW3702'; +KW3703 : 'KW3703'; +KW3704 : 'KW3704'; +KW3705 : 'KW3705'; +KW3706 : 'KW3706'; +KW3707 : 'KW3707'; +KW3708 : 'KW3708'; +KW3709 : 'KW3709'; +KW3710 : 'KW3710'; +KW3711 : 'KW3711'; +KW3712 : 'KW3712'; +KW3713 : 'KW3713'; +KW3714 : 'KW3714'; +KW3715 : 'KW3715'; +KW3716 : 'KW3716'; +KW3717 : 'KW3717'; +KW3718 : 'KW3718'; +KW3719 : 'KW3719'; +KW3720 : 'KW3720'; +KW3721 : 'KW3721'; +KW3722 : 'KW3722'; +KW3723 : 'KW3723'; +KW3724 : 'KW3724'; +KW3725 : 'KW3725'; +KW3726 : 'KW3726'; +KW3727 : 'KW3727'; +KW3728 : 'KW3728'; +KW3729 : 'KW3729'; +KW3730 : 'KW3730'; +KW3731 : 'KW3731'; +KW3732 : 'KW3732'; +KW3733 : 'KW3733'; +KW3734 : 'KW3734'; +KW3735 : 'KW3735'; +KW3736 : 'KW3736'; +KW3737 : 'KW3737'; +KW3738 : 'KW3738'; +KW3739 : 'KW3739'; +KW3740 : 'KW3740'; +KW3741 : 'KW3741'; +KW3742 : 'KW3742'; +KW3743 : 'KW3743'; +KW3744 : 'KW3744'; +KW3745 : 'KW3745'; +KW3746 : 'KW3746'; +KW3747 : 'KW3747'; +KW3748 : 'KW3748'; +KW3749 : 'KW3749'; +KW3750 : 'KW3750'; +KW3751 : 'KW3751'; +KW3752 : 'KW3752'; +KW3753 : 'KW3753'; +KW3754 : 'KW3754'; +KW3755 : 'KW3755'; +KW3756 : 'KW3756'; +KW3757 : 'KW3757'; +KW3758 : 'KW3758'; +KW3759 : 'KW3759'; +KW3760 : 'KW3760'; +KW3761 : 'KW3761'; +KW3762 : 'KW3762'; +KW3763 : 'KW3763'; +KW3764 : 'KW3764'; +KW3765 : 'KW3765'; +KW3766 : 'KW3766'; +KW3767 : 'KW3767'; +KW3768 : 'KW3768'; +KW3769 : 'KW3769'; +KW3770 : 'KW3770'; +KW3771 : 'KW3771'; +KW3772 : 'KW3772'; +KW3773 : 'KW3773'; +KW3774 : 'KW3774'; +KW3775 : 'KW3775'; +KW3776 : 'KW3776'; +KW3777 : 'KW3777'; +KW3778 : 'KW3778'; +KW3779 : 'KW3779'; +KW3780 : 'KW3780'; +KW3781 : 'KW3781'; +KW3782 : 'KW3782'; +KW3783 : 'KW3783'; +KW3784 : 'KW3784'; +KW3785 : 'KW3785'; +KW3786 : 'KW3786'; +KW3787 : 'KW3787'; +KW3788 : 'KW3788'; +KW3789 : 'KW3789'; +KW3790 : 'KW3790'; +KW3791 : 'KW3791'; +KW3792 : 'KW3792'; +KW3793 : 'KW3793'; +KW3794 : 'KW3794'; +KW3795 : 'KW3795'; +KW3796 : 'KW3796'; +KW3797 : 'KW3797'; +KW3798 : 'KW3798'; +KW3799 : 'KW3799'; +KW3800 : 'KW3800'; +KW3801 : 'KW3801'; +KW3802 : 'KW3802'; +KW3803 : 'KW3803'; +KW3804 : 'KW3804'; +KW3805 : 'KW3805'; +KW3806 : 'KW3806'; +KW3807 : 'KW3807'; +KW3808 : 'KW3808'; +KW3809 : 'KW3809'; +KW3810 : 'KW3810'; +KW3811 : 'KW3811'; +KW3812 : 'KW3812'; +KW3813 : 'KW3813'; +KW3814 : 'KW3814'; +KW3815 : 'KW3815'; +KW3816 : 'KW3816'; +KW3817 : 'KW3817'; +KW3818 : 'KW3818'; +KW3819 : 'KW3819'; +KW3820 : 'KW3820'; +KW3821 : 'KW3821'; +KW3822 : 'KW3822'; +KW3823 : 'KW3823'; +KW3824 : 'KW3824'; +KW3825 : 'KW3825'; +KW3826 : 'KW3826'; +KW3827 : 'KW3827'; +KW3828 : 'KW3828'; +KW3829 : 'KW3829'; +KW3830 : 'KW3830'; +KW3831 : 'KW3831'; +KW3832 : 'KW3832'; +KW3833 : 'KW3833'; +KW3834 : 'KW3834'; +KW3835 : 'KW3835'; +KW3836 : 'KW3836'; +KW3837 : 'KW3837'; +KW3838 : 'KW3838'; +KW3839 : 'KW3839'; +KW3840 : 'KW3840'; +KW3841 : 'KW3841'; +KW3842 : 'KW3842'; +KW3843 : 'KW3843'; +KW3844 : 'KW3844'; +KW3845 : 'KW3845'; +KW3846 : 'KW3846'; +KW3847 : 'KW3847'; +KW3848 : 'KW3848'; +KW3849 : 'KW3849'; +KW3850 : 'KW3850'; +KW3851 : 'KW3851'; +KW3852 : 'KW3852'; +KW3853 : 'KW3853'; +KW3854 : 'KW3854'; +KW3855 : 'KW3855'; +KW3856 : 'KW3856'; +KW3857 : 'KW3857'; +KW3858 : 'KW3858'; +KW3859 : 'KW3859'; +KW3860 : 'KW3860'; +KW3861 : 'KW3861'; +KW3862 : 'KW3862'; +KW3863 : 'KW3863'; +KW3864 : 'KW3864'; +KW3865 : 'KW3865'; +KW3866 : 'KW3866'; +KW3867 : 'KW3867'; +KW3868 : 'KW3868'; +KW3869 : 'KW3869'; +KW3870 : 'KW3870'; +KW3871 : 'KW3871'; +KW3872 : 'KW3872'; +KW3873 : 'KW3873'; +KW3874 : 'KW3874'; +KW3875 : 'KW3875'; +KW3876 : 'KW3876'; +KW3877 : 'KW3877'; +KW3878 : 'KW3878'; +KW3879 : 'KW3879'; +KW3880 : 'KW3880'; +KW3881 : 'KW3881'; +KW3882 : 'KW3882'; +KW3883 : 'KW3883'; +KW3884 : 'KW3884'; +KW3885 : 'KW3885'; +KW3886 : 'KW3886'; +KW3887 : 'KW3887'; +KW3888 : 'KW3888'; +KW3889 : 'KW3889'; +KW3890 : 'KW3890'; +KW3891 : 'KW3891'; +KW3892 : 'KW3892'; +KW3893 : 'KW3893'; +KW3894 : 'KW3894'; +KW3895 : 'KW3895'; +KW3896 : 'KW3896'; +KW3897 : 'KW3897'; +KW3898 : 'KW3898'; +KW3899 : 'KW3899'; +KW3900 : 'KW3900'; +KW3901 : 'KW3901'; +KW3902 : 'KW3902'; +KW3903 : 'KW3903'; +KW3904 : 'KW3904'; +KW3905 : 'KW3905'; +KW3906 : 'KW3906'; +KW3907 : 'KW3907'; +KW3908 : 'KW3908'; +KW3909 : 'KW3909'; +KW3910 : 'KW3910'; +KW3911 : 'KW3911'; +KW3912 : 'KW3912'; +KW3913 : 'KW3913'; +KW3914 : 'KW3914'; +KW3915 : 'KW3915'; +KW3916 : 'KW3916'; +KW3917 : 'KW3917'; +KW3918 : 'KW3918'; +KW3919 : 'KW3919'; +KW3920 : 'KW3920'; +KW3921 : 'KW3921'; +KW3922 : 'KW3922'; +KW3923 : 'KW3923'; +KW3924 : 'KW3924'; +KW3925 : 'KW3925'; +KW3926 : 'KW3926'; +KW3927 : 'KW3927'; +KW3928 : 'KW3928'; +KW3929 : 'KW3929'; +KW3930 : 'KW3930'; +KW3931 : 'KW3931'; +KW3932 : 'KW3932'; +KW3933 : 'KW3933'; +KW3934 : 'KW3934'; +KW3935 : 'KW3935'; +KW3936 : 'KW3936'; +KW3937 : 'KW3937'; +KW3938 : 'KW3938'; +KW3939 : 'KW3939'; +KW3940 : 'KW3940'; +KW3941 : 'KW3941'; +KW3942 : 'KW3942'; +KW3943 : 'KW3943'; +KW3944 : 'KW3944'; +KW3945 : 'KW3945'; +KW3946 : 'KW3946'; +KW3947 : 'KW3947'; +KW3948 : 'KW3948'; +KW3949 : 'KW3949'; +KW3950 : 'KW3950'; +KW3951 : 'KW3951'; +KW3952 : 'KW3952'; +KW3953 : 'KW3953'; +KW3954 : 'KW3954'; +KW3955 : 'KW3955'; +KW3956 : 'KW3956'; +KW3957 : 'KW3957'; +KW3958 : 'KW3958'; +KW3959 : 'KW3959'; +KW3960 : 'KW3960'; +KW3961 : 'KW3961'; +KW3962 : 'KW3962'; +KW3963 : 'KW3963'; +KW3964 : 'KW3964'; +KW3965 : 'KW3965'; +KW3966 : 'KW3966'; +KW3967 : 'KW3967'; +KW3968 : 'KW3968'; +KW3969 : 'KW3969'; +KW3970 : 'KW3970'; +KW3971 : 'KW3971'; +KW3972 : 'KW3972'; +KW3973 : 'KW3973'; +KW3974 : 'KW3974'; +KW3975 : 'KW3975'; +KW3976 : 'KW3976'; +KW3977 : 'KW3977'; +KW3978 : 'KW3978'; +KW3979 : 'KW3979'; +KW3980 : 'KW3980'; +KW3981 : 'KW3981'; +KW3982 : 'KW3982'; +KW3983 : 'KW3983'; +KW3984 : 'KW3984'; +KW3985 : 'KW3985'; +KW3986 : 'KW3986'; +KW3987 : 'KW3987'; +KW3988 : 'KW3988'; +KW3989 : 'KW3989'; +KW3990 : 'KW3990'; +KW3991 : 'KW3991'; +KW3992 : 'KW3992'; +KW3993 : 'KW3993'; +KW3994 : 'KW3994'; +KW3995 : 'KW3995'; +KW3996 : 'KW3996'; +KW3997 : 'KW3997'; +KW3998 : 'KW3998'; +KW3999 : 'KW3999'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyClosure.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyClosure.st new file mode 100644 index 000000000..52044892e --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyClosure.st @@ -0,0 +1,3 @@ +lexer grammar ; +CMT : '//' .*? '\n' CMT*?; +WS : (' '|'\t')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyConfigs.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyConfigs.st new file mode 100644 index 000000000..2bf9aa18e --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyConfigs.st @@ -0,0 +1,4 @@ +lexer grammar ; +I : .*? ('a' | 'ab') {} ; +WS : (' '|'\n') -> skip ; +J : . {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyOptional.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyOptional.st new file mode 100644 index 000000000..1e7ecad97 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyOptional.st @@ -0,0 +1,3 @@ +lexer grammar ; +CMT : '//' .*? '\n' CMT??; +WS : (' '|'\t')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyPositiveClosure.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyPositiveClosure.st new file mode 100644 index 000000000..a0de047f8 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyPositiveClosure.st @@ -0,0 +1,3 @@ +lexer grammar ; +CMT : ('//' .*? '\n')+?; +WS : (' '|'\t')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyTermination.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyTermination.st new file mode 100644 index 000000000..616d0d4d1 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyTermination.st @@ -0,0 +1,2 @@ +lexer grammar ; +STRING : '\"' ('\"\"' | .)*? '\"'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/Parentheses.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/Parentheses.st new file mode 100644 index 000000000..d280a188a --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/Parentheses.st @@ -0,0 +1,7 @@ +lexer grammar ; +START_BLOCK: '-.-.-'; +ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+; +fragment LETTER: L_A|L_K; +fragment L_A: '.-'; +fragment L_K: '-.-'; +SEPARATOR: '!'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/PositionAdjustingLexer.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/PositionAdjustingLexer.st new file mode 100644 index 000000000..aa4f642bd --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/PositionAdjustingLexer.st @@ -0,0 +1,34 @@ +lexer grammar PositionAdjustingLexer; + +@members { + +} + +ASSIGN : '=' ; +PLUS_ASSIGN : '+=' ; +LCURLY: '{'; + +// 'tokens' followed by '{' +TOKENS : 'tokens' IGNORED '{'; + +// IDENTIFIER followed by '+=' or '=' +LABEL + : IDENTIFIER IGNORED '+'? '=' + ; + +IDENTIFIER + : [a-zA-Z_] [a-zA-Z0-9_]* + ; + +fragment +IGNORED + : [ \t\r\n]* + ; + +NEWLINE + : [\r\n]+ -> skip + ; + +WS + : [ \t]+ -> skip + ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/QuoteTranslation.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/QuoteTranslation.st new file mode 100644 index 000000000..77e4b5c1a --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/QuoteTranslation.st @@ -0,0 +1,2 @@ +lexer grammar ; +QUOTE : '"' ; // make sure this compiles diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.ST b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.ST new file mode 100644 index 000000000..c6fece229 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.ST @@ -0,0 +1,3 @@ +lexer grammar ; +CMT : '/*' (CMT | .)+? '*/' ; +WS : (' '|'\t')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st new file mode 100644 index 000000000..5f9b7c9bb --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st @@ -0,0 +1,3 @@ +lexer grammar ; +CMT : '/*' (CMT | .)*? '*/' ; +WS : (' '|'\t')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st new file mode 100644 index 000000000..4bb5beb33 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st @@ -0,0 +1,4 @@ +lexer grammar ; +A : '-' I ; +I : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/Slashes.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/Slashes.st new file mode 100644 index 000000000..457e0dbc9 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/Slashes.st @@ -0,0 +1,6 @@ +lexer grammar ; +Backslash : '\\\\'; +Slash : '/'; +Vee : '\\\\/'; +Wedge : '/\\\\'; +WS : [ \t] -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/APlus.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/APlus.st new file mode 100644 index 000000000..24e7d443d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/APlus.st @@ -0,0 +1,6 @@ +grammar ; +a : ID+ { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AStar.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AStar.st new file mode 100644 index 000000000..cd360a422 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AStar.st @@ -0,0 +1,6 @@ +grammar ; +a : ID* { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAPlus.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAPlus.st new file mode 100644 index 000000000..abecc96ce --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAPlus.st @@ -0,0 +1,6 @@ +grammar ; +a : (ID|ID)+ { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAStar.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAStar.st new file mode 100644 index 000000000..dca57fd81 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAStar.st @@ -0,0 +1,6 @@ +grammar ; +a : (ID|ID)* { + +}; +ID : 'a'..'z'+; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorB.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorB.st new file mode 100644 index 000000000..73291af32 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorB.st @@ -0,0 +1,9 @@ +grammar ; +a : ID { + +} | INT { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBPlus.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBPlus.st new file mode 100644 index 000000000..99636d8b1 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBPlus.st @@ -0,0 +1,8 @@ +grammar ; +a : (ID|INT{ +})+ { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBStar.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBStar.st new file mode 100644 index 000000000..24376c2e4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBStar.st @@ -0,0 +1,8 @@ +grammar ; +a : (ID|INT{ +})* { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Basic.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Basic.st new file mode 100644 index 000000000..51c474cea --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Basic.st @@ -0,0 +1,7 @@ +grammar ; +a : ID INT { + +}; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding1.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding1.st new file mode 100644 index 000000000..c0cb51e80 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding1.st @@ -0,0 +1,8 @@ +grammar ; +start : statement+ ; +statement : 'x' | ifStatement; +ifStatement : 'if' 'y' statement ('else' statement)? { + +}; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> channel(HIDDEN); diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding2.st new file mode 100644 index 000000000..fd8a7d653 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding2.st @@ -0,0 +1,8 @@ +grammar ; +start : statement+ ; +statement : 'x' | ifStatement; +ifStatement : 'if' 'y' statement ('else' statement|) { + +}; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> channel(HIDDEN); diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding1.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding1.st new file mode 100644 index 000000000..2953acb57 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding1.st @@ -0,0 +1,8 @@ +grammar ; +start : statement+ ; +statement : 'x' | ifStatement; +ifStatement : 'if' 'y' statement ('else' statement)?? { + +}; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> channel(HIDDEN); diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding2.st new file mode 100644 index 000000000..561d52271 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding2.st @@ -0,0 +1,8 @@ +grammar ; +start : statement+ ; +statement : 'x' | ifStatement; +ifStatement : 'if' 'y' statement (|'else' statement) { + +}; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> channel(HIDDEN); diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/LL1OptionalBlock.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/LL1OptionalBlock.st new file mode 100644 index 000000000..31bec9d98 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/LL1OptionalBlock.st @@ -0,0 +1,7 @@ +grammar ; +a : (ID|{}INT)? { + +}; +ID : 'a'..'z'+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st new file mode 100644 index 000000000..6074d12bf --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st @@ -0,0 +1,8 @@ +grammar ; +start : a* EOF; +a + : label=subrule { } #One + | label='y' { } #Two + ; +subrule : 'x'; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Labels.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Labels.st new file mode 100644 index 000000000..28925ca96 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Labels.st @@ -0,0 +1,6 @@ +grammar ; +a : b1=b b2+=b* b3+=';' ; +b : id_=ID val+=INT*; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st new file mode 100644 index 000000000..ea11ca391 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st @@ -0,0 +1,20 @@ +grammar ; +ifStatement +@after { +items = $ctx.elseIfStatement() +} + : 'if' expression + ( ( 'then' + executableStatement* + elseIfStatement* // \<--- problem is here + elseStatement? + 'end' 'if' + ) | executableStatement ) + ; + +elseIfStatement + : 'else' 'if' expression 'then' executableStatement* + ; +expression : 'a' ; +executableStatement : 'a' ; +elseStatement : 'a' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelsOnSet.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelsOnSet.st new file mode 100644 index 000000000..7f666f8f9 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelsOnSet.st @@ -0,0 +1,7 @@ +grammar ; +a : b b* ';' ; +b : ID val+=(INT | FLOAT)*; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +FLOAT : [0-9]+ '.' [0-9]+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Optional.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Optional.st new file mode 100644 index 000000000..408a53447 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Optional.st @@ -0,0 +1,4 @@ +grammar ; +stat : ifstat | 'x'; +ifstat : 'if' stat ('else' stat)?; +WS : [ \n\t]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st new file mode 100644 index 000000000..9f6910f9d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st @@ -0,0 +1,7 @@ +grammar ; +s : stmt EOF ; +stmt : ifStmt | ID; +ifStmt : 'if' ID stmt ('else' stmt | { }?); +ELSE : 'else'; +ID : [a-zA-Z]+; +WS : [ \\n\\t]+ -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredictionIssue334.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredictionIssue334.st new file mode 100644 index 000000000..56190fd61 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredictionIssue334.st @@ -0,0 +1,14 @@ +grammar ; +file_ @init{ + +} +@after { + +} + : item (SEMICOLON item)* SEMICOLON? EOF ; +item : A B?; +SEMICOLON: ';'; +A : 'a'|'A'; +B : 'b'|'B'; +WS : [ \r\t\n]+ -> skip; + diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st new file mode 100644 index 000000000..23b2ae4ea --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st @@ -0,0 +1,6 @@ +grammar ; +s @after { } + : ID | ID INT ID ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\t'|'\n') -> skip; From 4df40e58e2f2baec75d01b29c1632664f063e8d3 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Wed, 15 Oct 2014 22:16:24 +0800 Subject: [PATCH 02/26] progress --- .../v4/testgen/AbstractParserTestMethod.java | 12 + .../v4/testgen/CompositeParserTestMethod.java | 2 +- .../v4/testgen/ConcreteParserTestMethod.java | 25 ++ tool/test/org/antlr/v4/testgen/Generator.java | 315 ++++++++++++++---- .../antlr/v4/testgen/ParserTestMethod.java | 4 +- tool/test/org/antlr/v4/testgen/TestFile.java | 41 ++- .../testgen/grammars/LeftRecursion/AmbigLR.st | 21 ++ .../grammars/LeftRecursion/Declarations.st | 14 + .../DirectCallToLeftRecursiveRule.st | 6 + .../grammars/LeftRecursion/Expressions.st | 13 + .../grammars/LeftRecursion/JavaExpressions.st | 56 ++++ .../LeftRecursion/LabelsOnOpSubrule.st | 8 + .../MultipleAlternativesWithCommonLabel.st | 16 + .../PrecedenceFilterConsidersContext.st | 6 + .../PrefixOpWithActionAndLabel.st | 11 + .../LeftRecursion/ReturnValueAndActions.st | 11 + .../ReturnValueAndActionsAndLabels.st | 14 + .../testgen/grammars/LeftRecursion/SemPred.st | 7 + .../testgen/grammars/LeftRecursion/Simple.st | 7 + .../grammars/LeftRecursion/TernaryExpr.st | 10 + .../LeftRecursion/WhitespaceInfluence.st | 49 +++ .../LexerErrors/DFAToATNThatFailsBackToDFA.st | 3 + .../DFAToATNThatMatchesThenFailsInATN.st | 4 + .../EnforcedGreedyNestedBrances.st | 3 + .../grammars/LexerErrors/ErrorInMiddle.st | 2 + .../LexerErrors/InvalidCharAtStart.st | 2 + .../InvalidCharAtStartAfterDFACache.st | 2 + .../LexerErrors/InvalidCharInToken.st | 2 + .../InvalidCharInTokenAfterDFACache.st | 2 + .../grammars/LexerErrors/LexerExecDFA.st | 6 + .../LexerErrors/StringsEmbeddedInActions.st | 4 + 31 files changed, 601 insertions(+), 77 deletions(-) create mode 100644 tool/test/org/antlr/v4/testgen/AbstractParserTestMethod.java create mode 100644 tool/test/org/antlr/v4/testgen/ConcreteParserTestMethod.java create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/AmbigLR.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Declarations.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Expressions.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/JavaExpressions.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/LabelsOnOpSubrule.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActions.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Simple.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/TernaryExpr.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/WhitespaceInfluence.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/EnforcedGreedyNestedBrances.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/ErrorInMiddle.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStart.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInToken.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/LexerExecDFA.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerErrors/StringsEmbeddedInActions.st diff --git a/tool/test/org/antlr/v4/testgen/AbstractParserTestMethod.java b/tool/test/org/antlr/v4/testgen/AbstractParserTestMethod.java new file mode 100644 index 000000000..1ad771843 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/AbstractParserTestMethod.java @@ -0,0 +1,12 @@ +package org.antlr.v4.testgen; + +public class AbstractParserTestMethod extends TestMethod { + + public String startRule; + + public AbstractParserTestMethod(String name, String grammarName, String startRule) { + super(name, grammarName, null, null, null, null); + this.startRule = startRule; + } + +} diff --git a/tool/test/org/antlr/v4/testgen/CompositeParserTestMethod.java b/tool/test/org/antlr/v4/testgen/CompositeParserTestMethod.java index 2329012ae..8264aba7f 100644 --- a/tool/test/org/antlr/v4/testgen/CompositeParserTestMethod.java +++ b/tool/test/org/antlr/v4/testgen/CompositeParserTestMethod.java @@ -11,7 +11,7 @@ public class CompositeParserTestMethod extends ParserTestMethod { public CompositeParserTestMethod(String name, String grammarName, String startRule, String input, String expectedOutput, String expectedErrors, String ... slaves) { - super(name, grammarName, startRule, input, expectedOutput, expectedErrors, null); + super(name, grammarName, startRule, input, expectedOutput, expectedErrors); this.slaveGrammars = new Grammar[slaves.length]; for(int i=0;i,1:1]\n", + "line 1:0 token recognition error at: 'x'\n"); + file.addLexerTest(input, "StringsEmbeddedInActions", "L", + "[\"foo\"]", + "[@0,0:6='[\"foo\"]',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n", + null, 1); + file.addLexerTest(input, "StringsEmbeddedInActions", "L", + "[\"foo]", + "[@0,6:5='',<-1>,1:6]\n", + "line 1:0 token recognition error at: '[\"foo]'\n", + 2); + file.addLexerTest(input, "EnforcedGreedyNestedBrances", "L", + "{ { } }", + "[@0,0:6='{ { } }',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n", + null, 1); + file.addLexerTest(input, "EnforcedGreedyNestedBrances", "L", + "{ { }", + "[@0,5:4='',<-1>,1:5]\n", + "line 1:0 token recognition error at: '{ { }'\n", + 2); + file.addLexerTest(input, "InvalidCharAtStartAfterDFACache", "L", + "abx", + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n", + "line 1:2 token recognition error at: 'x'\n"); + file.addLexerTest(input, "InvalidCharInToken", "L", + "ax", + "[@0,2:1='',<-1>,1:2]\n", + "line 1:0 token recognition error at: 'ax'\n"); + file.addLexerTest(input, "InvalidCharInTokenAfterDFACache", "L", + "abax", + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,4:3='',<-1>,1:4]\n", + "line 1:2 token recognition error at: 'ax'\n"); + // The first ab caches the DFA then abx goes through the DFA but + // into the ATN for the x, which fails. Must go back into DFA + // and return to previous dfa accept state + file.addLexerTest(input, "DFAToATNThatFailsBackToDFA", "L", + "ababx", + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:3='ab',<1>,1:2]\n" + + "[@2,5:4='',<-1>,1:5]\n", + "line 1:4 token recognition error at: 'x'\n"); + // The first ab caches the DFA then abx goes through the DFA but + // into the ATN for the c. It marks that hasn't except state + // and then keeps going in the ATN. It fails on the x, but + // uses the previous accepted in the ATN not DFA + file.addLexerTest(input, "DFAToATNThatMatchesThenFailsInATN", "L", + "ababcx", + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:4='abc',<2>,1:2]\n" + + "[@2,6:5='',<-1>,1:6]\n", + "line 1:5 token recognition error at: 'x'\n"); + file.addLexerTest(input, "ErrorInMiddle", "L", + "abx", + "[@0,3:2='',<-1>,1:3]\n", + "line 1:0 token recognition error at: 'abx'\n"); + file.addLexerTest(input, "LexerExecDFA", "L", + "x : x", + "[@0,0:0='x',<3>,1:0]\n" + + "[@1,2:2=':',<2>,1:2]\n" + + "[@2,4:4='x',<3>,1:4]\n" + + "[@3,5:4='',<-1>,1:5]\n", + "line 1:1 token recognition error at: ' '\n" + + "line 1:3 token recognition error at: ' '\n"); + return file; + } + + private TestFile buildLeftRecursion() throws Exception { + TestFile file = new TestFile("LeftRecursion"); + file.addParserTests(input, "Simple", "T", "s", + "x", "(s (a x))\n", + "x y", "(s (a (a x) y))\n", + "x y z", "(s (a (a (a x) y) z))\n"); + file.addParserTests(input, "DirectCallToLeftRecursiveRule", "T", "a", + "x", "(a x)\n", + "x y", "(a (a x) y)\n", + "x y z", "(a (a (a x) y) z)\n"); + file.addParserTest(input, "SemPred", "T", "s", "x y z", + "(s (a (a (a x) y) z))\n", null); + file.addParserTests(input, "TernaryExpr", "T", "s", + "a", "(s (e a) )", + "a+b", "(s (e (e a) + (e b)) )", + "a*b", "(s (e (e a) * (e b)) )", + "a?b:c", "(s (e (e a) ? (e b) : (e c)) )", + "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", + "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", + "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", + "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", + "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )"); + file.addParserTests(input, "Expressions", "T", "s", + "a", "(s (e a) )", + "1", "(s (e 1) )", + "a-1", "(s (e (e a) - (e 1)) )", + "a.b", "(s (e (e a) . b) )", + "a.this", "(s (e (e a) . this) )", + "-a", "(s (e - (e a)) )", + "-a+b", "(s (e (e - (e a)) + (e b)) )"); + file.addParserTests(input, "JavaExpressions", "T", "s", + "a|b&c", "(s (e (e a) | (e (e b) & (e c))) )", + "(a|b)&c", "(s (e (e ( (e (e a) | (e b)) )) & (e c)) )", + "a > b", "(s (e (e a) > (e b)) )", + "a >> b", "(s (e (e a) >> (e b)) )", + "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", + "a^b^c", "(s (e (e a) ^ (e (e b) ^ (e c))) )", + "(T)x", "(s (e ( (type T) ) (e x)) )", + "new A().b", "(s (e (e new (type A) ( )) . b) )", + "(T)t.f()", "(s (e (e ( (type T) ) (e (e t) . f)) ( )) )", + "a.f(x)==T.c", "(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )", + "a.f().g(x,1)", "(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )", + "new T[((n-1) * x) + 1]", "(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )"); + file.addParserTests(input, "Declarations", "T", "s", + "a", "(s (declarator a) )", + "*a", "(s (declarator * (declarator a)) )", + "**a", "(s (declarator * (declarator * (declarator a))) )", + "a[3]", "(s (declarator (declarator a) [ (e 3) ]) )", + "b[]", "(s (declarator (declarator b) [ ]) )", + "(a)", "(s (declarator ( (declarator a) )) )", + "a[]()", "(s (declarator (declarator (declarator a) [ ]) ( )) )", + "a[][]", "(s (declarator (declarator (declarator a) [ ]) [ ]) )", + "*a[]", "(s (declarator * (declarator (declarator a) [ ])) )", + "(*a)[]", "(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )"); + file.addParserTests(input, "ReturnValueAndActions", "T", "s", + "4", "4", + "1+2", "3", + "1+2*3", "7", + "(1+2)*3", "9"); + file.addParserTests(input, "LabelsOnOpSubrule", "T", "s", + "4", "(s (e 4))", + "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", + "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))"); + file.addParserTests(input, "ReturnValueAndActionsAndLabels", "T", "s", + "4", "4", + "1+2", "3", + "1+2*3", "7", + "i++*3", "12"); + /** + * This is a regression test for antlr/antlr4#433 "Not all context accessor + * methods are generated when an alternative rule label is used for multiple + * alternatives". + * https://github.com/antlr/antlr4/issues/433 + */ + file.addParserTests(input, "MultipleAlternativesWithCommonLabel", "T", "s", + "4", "4", + "1+2", "3", + "1+2*3", "7", + "i++*3", "12"); + file.addParserTests(input, "PrefixOpWithActionAndLabel", "T", "s", + "a", "a", + "a+b", "(a+b)", + "a=b+c", "((a=b)+c)"); + file.addParserTests(input, "AmbigLR", "Expr", "prog", + "1\n", "", + "a = 5\n", "", + "b = 6\n", "", + "a+b*2\n", "", + "(1+2)*3\n", ""); + /** + * This is a regression test for #239 "recoursive parser using implicit + * tokens ignore white space lexer rule". + * https://github.com/antlr/antlr4/issues/239 + */ + file.addParserTests(input, "WhitespaceInfluence", "Expr", "prog", + "Test(1,3)", "", + "Test(1, 3)", ""); + /** + * This is a regression test for antlr/antlr4#509 "Incorrect rule chosen in + * unambiguous grammar". + * https://github.com/antlr/antlr4/issues/509 + */ + file.addParserTest(input, "PrecedenceFilterConsidersContext", "T", "prog", + "aa", + "(prog (statement (letterA a)) (statement (letterA a)) )\n", null); + return file; + } + private TestFile buildFullContextParsing() throws Exception { TestFile file = new TestFile("FullContextParsing"); file.addParserTest(input, "AmbigYieldsCtxSensitiveDFA", "T", "s", "abc", "Decision 0:\n" + "s0-ID->:s1^=>1\n", - "line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", null); - file.addParserTest(input, "CtxSensitiveDFA", "T", "s", "$ 34 abc", + "line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n"); + file.addParserTestsWithErrors(input, "CtxSensitiveDFA", "T", "s", + "$ 34 abc", "Decision 1:\n" + "s0-INT->s1\n" + "s1-ID->:s2^=>1\n", "line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + - "line 1:2 reportContextSensitivity d=1 (e), input='34'\n", 1); - file.addParserTest(input, "CtxSensitiveDFA", "T", "s", "@ 34 abc", + "line 1:2 reportContextSensitivity d=1 (e), input='34'\n", + "@ 34 abc", "Decision 1:\n" + "s0-INT->s1\n" + "s1-ID->:s2^=>1\n", "line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + - "line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n", 2); + "line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n"); file.addParserTest(input, "CtxSensitiveDFATwoDiffInput", "T", "s", "$ 34 abc @ 34 abc", "Decision 2:\n" + @@ -140,86 +329,81 @@ public class Generator { "line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\n" + "line 1:2 reportContextSensitivity d=2 (e), input='34'\n" + "line 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\n" + - "line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", null); + "line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n"); file.addParserTest(input, "SLLSeesEOFInLLGrammar", "T", "s", "34 abc", "Decision 0:\n" + "s0-INT->s1\n" + "s1-ID->:s2^=>1\n", "line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\n" + - "line 1:0 reportContextSensitivity d=0 (e), input='34'\n", null); - file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", - "{ if x then return }", + "line 1:0 reportContextSensitivity d=0 (e), input='34'\n"); + file.addParserTestsWithErrors(input, "FullContextIF_THEN_ELSEParse", "T", "s", + "{ if x then return }", "Decision 1:\n" + - "s0-'}'->:s1=>2\n", null, 1); - file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", - "{ if x then return else foo }", + "s0-'}'->:s1=>2\n", + null, + "{ if x then return else foo }", "Decision 1:\n" + "s0-'else'->:s1^=>1\n", - "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", 2); - file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", - "{ if x then if y then return else foo }", + "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", + "{ if x then if y then return else foo }", "Decision 1:\n" + "s0-'else'->:s1^=>1\n" + "s0-'}'->:s2=>2\n", - "line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 3); - // should not be ambiguous because the second 'else bar' clearly - // indicates that the first else should match to the innermost if. - // LL_EXACT_AMBIG_DETECTION makes us keep going to resolve - file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", - "{ if x then if y then return else foo else bar }", + "line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", + // should not be ambiguous because the second 'else bar' clearly + // indicates that the first else should match to the innermost if. + // LL_EXACT_AMBIG_DETECTION makes us keep going to resolve + "{ if x then if y then return else foo else bar }", "Decision 1:\n" + "s0-'else'->:s1^=>1\n", - "line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\n" + - "line 1:38 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportContextSensitivity d=1 (stat), input='else'\n", 4); - file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", - "{ if x then return else foo\n" + + "line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\n" + + "line 1:38 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportContextSensitivity d=1 (stat), input='else'\n", + "{ if x then return else foo\n" + "if x then if y then return else foo }", "Decision 1:\n" + "s0-'else'->:s1^=>1\n" + "s0-'}'->:s2=>2\n", - "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + - "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 5); - file.addParserTest(input, "FullContextIF_THEN_ELSEParse", "T", "s", - "{ if x then return else foo\n" + + "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", + "{ if x then return else foo\n" + "if x then if y then return else foo }", "Decision 1:\n" + "s0-'else'->:s1^=>1\n" + "s0-'}'->:s2=>2\n", - "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + - "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", 6); + "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n"); file.addParserTest(input, "LoopsSimulateTailRecursion", "T", "prog", "a(i)<-x", "pass: a(i)<-x\n", "line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\n" + - "line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n", null); + "line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n"); file.addParserTest(input, "AmbiguityNoLoop", "T", "prog", "a@", "alt 1\n", "line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\n" + "line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\n" + "line 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n", null); - file.addParserTest(input, "ExprAmbiguity", "T", "s", - "a+b", + "line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n"); + file.addParserTestsWithErrors(input, "ExprAmbiguity", "T", "s", + "a+b", "(expr a + (expr b))\n", - "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n", 1); - file.addParserTest(input, "ExprAmbiguity", "T", "s", - "a+b*c", + "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + + "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n", + "a+b*c", "(expr a + (expr b * (expr c)))\n", - "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" + - "line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" + - "line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n", 2); + "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + + "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" + + "line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" + + "line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n"); return file; } @@ -513,19 +697,24 @@ public class Generator { "if y if y x else x", "if y x\nif y if y x else x\n", null); file.addParserTest(input, "IfIfElseNonGreedyBinding2", "T", "start", "if y if y x else x", "if y x\nif y if y x else x\n", null); - file.addParserTest(input, "AStar", "T", "a", "", "\n", null, 1); - file.addParserTest(input, "AStar", "T", "a", "a b c", "abc\n", null, 2); - file.addParserTest(input, "LL1OptionalBlock", "T", "a", "", "\n", null, 1); - file.addParserTest(input, "LL1OptionalBlock", "T", "a", "a", "a\n", null, 2); - file.addParserTest(input, "AorAStar", "T", "a", "", "\n", null, 1); - file.addParserTest(input, "AorAStar", "T", "a", "a b c", "abc\n", null, 2); + file.addParserTests(input, "AStar", "T", "a", + "", "\n", + "a b c", "abc\n"); + file.addParserTests(input, "LL1OptionalBlock", "T", "a", + "", "\n", + "a", "a\n"); + file.addParserTests(input, "AorAStar", "T", "a", + "", "\n", + "a b c", "abc\n"); file.addParserTest(input, "AorBPlus", "T", "a", "a 34 c", "a34c\n", null); - file.addParserTest(input, "AorBStar", "T", "a", "", "\n", null, 1); - file.addParserTest(input, "AorBStar", "T", "a", "a 34 c", "a34c\n", null, 2); - file.addParserTest(input, "Optional", "T", "stat", "x", "", null, 1); - file.addParserTest(input, "Optional", "T", "stat", "if x", "", null, 2); - file.addParserTest(input, "Optional", "T", "stat", "if x else x", "", null, 3); - file.addParserTest(input, "Optional", "T", "stat", "if if x else x", "", null, 4); + file.addParserTests(input, "AorBStar", "T", "a", + "", "\n", + "a 34 c", "a34c\n"); + file.addParserTests(input, "Optional", "T", "stat", + "x", "", + "if x", "", + "if x else x", "", + "if if x else x", ""); file.addParserTest(input, "PredicatedIfIfElse", "T", "s", "if x if x a else b", "", null); /* file.addTest(input, "StartRuleWithoutEOF", "T", "s", "abc 34", "Decision 0:\n" + "s0-ID->s1\n" + "s1-INT->s2\n" + "s2-EOF->:s3=>1\n", null); */ diff --git a/tool/test/org/antlr/v4/testgen/ParserTestMethod.java b/tool/test/org/antlr/v4/testgen/ParserTestMethod.java index 261bd8230..d3ee5fd07 100644 --- a/tool/test/org/antlr/v4/testgen/ParserTestMethod.java +++ b/tool/test/org/antlr/v4/testgen/ParserTestMethod.java @@ -5,8 +5,8 @@ public class ParserTestMethod extends TestMethod { public String startRule; public ParserTestMethod(String name, String grammarName, String startRule, - String input, String expectedOutput, String expectedErrors, Integer index) { - super(name, grammarName, input, expectedOutput, expectedErrors, index); + String input, String expectedOutput, String expectedErrors) { + super(name, grammarName, input, expectedOutput, expectedErrors, null); this.startRule = startRule; } diff --git a/tool/test/org/antlr/v4/testgen/TestFile.java b/tool/test/org/antlr/v4/testgen/TestFile.java index 552127a9d..2143a753c 100644 --- a/tool/test/org/antlr/v4/testgen/TestFile.java +++ b/tool/test/org/antlr/v4/testgen/TestFile.java @@ -23,18 +23,41 @@ public class TestFile { return name; } - public void addParserTest(File grammarDir, String name, String grammarName, String methodName, - String input, String expectedOutput, String expectedErrors) throws Exception { - addParserTest( grammarDir, name, grammarName, methodName, input, expectedOutput, expectedErrors, null); - } - public ParserTestMethod addParserTest(File grammarDir, String name, String grammarName, String methodName, - String input, String expectedOutput, String expectedErrors, Integer index) throws Exception { - ParserTestMethod tm = new ParserTestMethod(name, grammarName, methodName, input, expectedOutput, expectedErrors, index); + String input, String expectedOutput, String expectedErrors) throws Exception { + ParserTestMethod tm = new ParserTestMethod(name, grammarName, methodName, input, expectedOutput, expectedErrors); tm.loadGrammars(grammarDir, this.name); unitTests.add(tm); return tm; } + + public AbstractParserTestMethod addParserTests(File grammarDir, String name, String grammarName, String methodName, + String ... inputsAndOuputs) throws Exception { + AbstractParserTestMethod tm = new AbstractParserTestMethod(name, grammarName, methodName); + tm.loadGrammars(grammarDir, this.name); + unitTests.add(tm); + for(int i=0; i; +prog: stat ; +stat: expr NEWLINE # printExpr + | ID '=' expr NEWLINE # assign + | NEWLINE # blank + ; +expr: expr ('*'|'/') expr # MulDiv + | expr ('+'|'-') expr # AddSub + | INT # int + | ID # id + | '(' expr ')' # parens + ; + +MUL : '*' ; // assigns token name to '*' used above in grammar +DIV : '/' ; +ADD : '+' ; +SUB : '-' ; +ID : [a-zA-Z]+ ; // match identifiers +INT : [0-9]+ ; // match integers +NEWLINE:'\r'? '\n' ; // return newlines to parser (is end-statement signal) +WS : [ \t]+ -> skip ; // toss out whitespace diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Declarations.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Declarations.st new file mode 100644 index 000000000..41db93b3b --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Declarations.st @@ -0,0 +1,14 @@ +grammar ; +s @after {} : declarator EOF ; // must indicate EOF can follow +declarator + : declarator '[' e ']' + | declarator '[' ']' + | declarator '(' ')' + | '*' declarator // binds less tight than suffixes + | '(' declarator ')' + | ID + ; +e : INT ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st new file mode 100644 index 000000000..b41efcc64 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st @@ -0,0 +1,6 @@ +grammar ; +a @after {} : a ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Expressions.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Expressions.st new file mode 100644 index 000000000..6767d6dc4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Expressions.st @@ -0,0 +1,13 @@ +grammar ; +s @after {} : e EOF ; // must indicate EOF can follow +e : e '.' ID + | e '.' 'this' + | '-' e + | e '*' e + | e ('+'|'-') e + | INT + | ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/JavaExpressions.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/JavaExpressions.st new file mode 100644 index 000000000..c23d612af --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/JavaExpressions.st @@ -0,0 +1,56 @@ +grammar ; +s @after {} : e EOF ; // must indicate EOF can follow +expressionList + : e (',' e)* + ; +e : '(' e ')' + | 'this' + | 'super' + | INT + | ID + | type '.' 'class' + | e '.' ID + | e '.' 'this' + | e '.' 'super' '(' expressionList? ')' + | e '.' 'new' ID '(' expressionList? ')' + | 'new' type ( '(' expressionList? ')' | ('[' e ']')+) + | e '[' e ']' + | '(' type ')' e + | e ('++' | '--') + | e '(' expressionList? ')' + | ('+'|'-'|'++'|'--') e + | ('~'|'!') e + | e ('*'|'/'|'%') e + | e ('+'|'-') e + | e ('\<\<' | '>>>' | '>>') e + | e ('\<=' | '>=' | '>' | '\<') e + | e 'instanceof' e + | e ('==' | '!=') e + | e '&' e + |\ e '^' e + | e '|' e + | e '&&' e + | e '||' e + | e '?' e ':' e + |\ + e ('=' + |'+=' + |'-=' + |'*=' + |'/=' + |'&=' + |'|=' + |'^=' + |'>>=' + |'>>>=' + |'\<\<=' + |'%=') e + ; +type: ID + | ID '[' ']' + | 'int' + | 'int' '[' ']' + ; +ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/LabelsOnOpSubrule.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/LabelsOnOpSubrule.st new file mode 100644 index 000000000..567f539d6 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/LabelsOnOpSubrule.st @@ -0,0 +1,8 @@ +grammar ; +s @after {} : e; +e : a=e op=('*'|'/') b=e {} + | INT {} + | '(' x=e ')' {} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st new file mode 100644 index 000000000..e4cf19d9d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st @@ -0,0 +1,16 @@ +grammar ; +s : e {}; +e returns [int v] + : e '*' e {$v = $ctx.e(0).v * $ctx.e(1).v;} # binary + | e '+' e {$v = $ctx.e(0).v + $ctx.e(1).v;} # binary + | INT {$v = $INT.int;} # anInt + | '(' e ')' {$v = $e.v;} # parens + | left=e INC {;$v = $left.v + 1;} # unary + | left=e DEC {;$v = $left.v - 1;} # unary + | ID {$v = 3} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +INC : '++' ; +DEC : '--' ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st new file mode 100644 index 000000000..4bbada997 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st @@ -0,0 +1,6 @@ +grammar ; +prog +@after {} +: statement* EOF {}; +statement: letterA | statement letterA 'b' ; +letterA: 'a'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st new file mode 100644 index 000000000..73d7ea48b --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st @@ -0,0 +1,11 @@ +grammar ; +s : e {} ; +e returns [String result] + : ID '=' e1=e {$result = \"(\" + $ID.text + \"=\" + $e1.result + \")\";} + | ID {$result = $ID.text;} + | e1=e '+' e2=e {$result = \"(\" + $e1.result + \"+\" + $e2.result + \")\";} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActions.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActions.st new file mode 100644 index 000000000..37aca6e6a --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActions.st @@ -0,0 +1,11 @@ +grammar ; +s : e {}; +e returns [int v, list ignored] + : a=e '*' b=e {$v = $a.v * $b.v;} + | a=e '+' b=e {$v = $a.v + $b.v;} + | INT {$v = $INT.int;} + | '(' x=e ')' {$v = $x.v;} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; + diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st new file mode 100644 index 000000000..739b664a0 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st @@ -0,0 +1,14 @@ +grammar ; +s : q=e {}; +e returns [int v] + : a=e op='*' b=e {$v = $a.v * $b.v;} # mult + | a=e '+' b=e {$v = $a.v + $b.v;} # add + | INT {$v = $INT.int;} # anInt + | '(' x=e ')' {$v = $x.v;} # parens + | x=e '++' {$v = $x.v+1;} # inc + | e '--' # dec + | ID {$v = 3;} # anID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st new file mode 100644 index 000000000..b95a58025 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st @@ -0,0 +1,7 @@ +grammar ; +s @after {} : a ; +a : a {}? ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Simple.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Simple.st new file mode 100644 index 000000000..5f5ea05d1 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Simple.st @@ -0,0 +1,7 @@ +grammar ; +s @after {} : a ; +a : a ID + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/TernaryExpr.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/TernaryExpr.st new file mode 100644 index 000000000..5c6721a58 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/TernaryExpr.st @@ -0,0 +1,10 @@ +grammar ; +s @after {} : e EOF ; // must indicate EOF can follow or 'a\' won't match +e : e '*' e + | e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/WhitespaceInfluence.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/WhitespaceInfluence.st new file mode 100644 index 000000000..6551c1f7d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/WhitespaceInfluence.st @@ -0,0 +1,49 @@ +grammar ; +prog : expression EOF; +expression + : ID '(' expression (',' expression)* ')' # doFunction + | '(' expression ')' # doParenthesis + | '!' expression # doNot + | '-' expression # doNegate + | '+' expression # doPositiv + | expression '^' expression # doPower + | expression '*' expression # doMultipy + | expression '/' expression # doDivide + | expression '%' expression # doModulo + | expression '-' expression # doMinus + | expression '+' expression # doPlus + | expression '=' expression # doEqual + | expression '!=' expression # doNotEqual + | expression '>' expression # doGreather + | expression '>=' expression # doGreatherEqual + | expression '\<' expression # doLesser + | expression '\<=' expression # doLesserEqual + | expression K_IN '(' expression (',' expression)* ')' # doIn + | expression ( '&' | K_AND) expression # doAnd + | expression ( '|' | K_OR) expression # doOr + | '[' expression (',' expression)* ']' # newArray + | K_TRUE # newTrueBoolean + | K_FALSE # newFalseBoolean + | NUMBER # newNumber + | DATE # newDateTime + | ID # newIdentifier + | SQ_STRING # newString + | K_NULL # newNull + ; + +// Fragments +fragment DIGIT : '0' .. '9'; +fragment UPPER : 'A' .. 'Z'; +fragment LOWER : 'a' .. 'z'; +fragment LETTER : LOWER | UPPER; +fragment WORD : LETTER | '_' | '$' | '#' | '.'; +fragment ALPHANUM : WORD | DIGIT; + +// Tokens +ID : LETTER ALPHANUM*; +NUMBER : DIGIT+ ('.' DIGIT+)? (('e'|'E')('+'|'-')? DIGIT+)?; +DATE : '\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT (' ' DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT ('.' DIGIT+)?)? '\''; +SQ_STRING : '\'' ('\'\'' | ~'\'')* '\''; +DQ_STRING : '\"' ('\\\"' | ~'\"')* '\"'; +WS : [ \t\n\r]+ -> skip ; +COMMENTS : ('/*' .*? '*/' | '//' ~'\n'* '\n' ) -> skip;\n"; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st new file mode 100644 index 000000000..7eae0c410 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st @@ -0,0 +1,3 @@ +lexer grammar ; +A : 'ab' ; +B : 'abc' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st new file mode 100644 index 000000000..528a248fe --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st @@ -0,0 +1,4 @@ +lexer grammar ; +A : 'ab' ; +B : 'abc' ; +C : 'abcd' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/EnforcedGreedyNestedBrances.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/EnforcedGreedyNestedBrances.st new file mode 100644 index 000000000..87131b66a --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/EnforcedGreedyNestedBrances.st @@ -0,0 +1,3 @@ +lexer grammar ; +ACTION : '{' (ACTION | ~[{}])* '}'; +WS : [ \r\n\t]+ -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/ErrorInMiddle.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/ErrorInMiddle.st new file mode 100644 index 000000000..a0dd4d2c6 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/ErrorInMiddle.st @@ -0,0 +1,2 @@ +lexer grammar ; +A : 'abc' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStart.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStart.st new file mode 100644 index 000000000..c294d7efc --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStart.st @@ -0,0 +1,2 @@ +lexer grammar ; +A : 'a' 'b' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st new file mode 100644 index 000000000..c294d7efc --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st @@ -0,0 +1,2 @@ +lexer grammar ; +A : 'a' 'b' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInToken.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInToken.st new file mode 100644 index 000000000..c294d7efc --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInToken.st @@ -0,0 +1,2 @@ +lexer grammar ; +A : 'a' 'b' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st new file mode 100644 index 000000000..c294d7efc --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st @@ -0,0 +1,2 @@ +lexer grammar ; +A : 'a' 'b' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/LexerExecDFA.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/LexerExecDFA.st new file mode 100644 index 000000000..eeb152fc4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/LexerExecDFA.st @@ -0,0 +1,6 @@ +lexer grammar ; +start : ID ':' expr; +expr : primary expr? {} | expr '->' ID; +primary : ID; +ID : [a-z]+; +; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/StringsEmbeddedInActions.st b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/StringsEmbeddedInActions.st new file mode 100644 index 000000000..4acffdb58 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/StringsEmbeddedInActions.st @@ -0,0 +1,4 @@ +lexer grammar ; +ACTION2 : '[' (STRING | ~'"')*? ']'; +STRING : '"' ('\\"' | .)*? '"'; +WS : [ \t\r\n]+ -> skip; \ No newline at end of file From 50466f61dd8e8c814101b3c9f6da7a5bfb8708ab Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Wed, 15 Oct 2014 23:15:13 +0800 Subject: [PATCH 03/26] progress --- tool/test/org/antlr/v4/testgen/Generator.java | 42 +++++++++++++++++-- .../v4/testgen/grammars/Listeners/Basic.st | 23 ++++++++++ .../antlr/v4/testgen/grammars/Listeners/LR.st | 24 +++++++++++ .../grammars/Listeners/LRWithLabels.st | 24 +++++++++++ .../testgen/grammars/Listeners/RuleGetters.st | 24 +++++++++++ .../grammars/Listeners/TokenGetters.st | 23 ++++++++++ 6 files changed, 156 insertions(+), 4 deletions(-) create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st diff --git a/tool/test/org/antlr/v4/testgen/Generator.java b/tool/test/org/antlr/v4/testgen/Generator.java index cec647414..f369b55e6 100644 --- a/tool/test/org/antlr/v4/testgen/Generator.java +++ b/tool/test/org/antlr/v4/testgen/Generator.java @@ -1,8 +1,5 @@ package org.antlr.v4.testgen; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; @@ -14,7 +11,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import org.junit.Test; import org.stringtemplate.v4.ST; import org.stringtemplate.v4.STGroup; import org.stringtemplate.v4.STGroupFile; @@ -116,10 +112,48 @@ public class Generator { list.add(buildFullContextParsing()); list.add(buildLeftRecursion()); list.add(buildLexerErrors()); + list.add(buildListeners()); return list; } + private TestFile buildListeners() throws Exception { + TestFile file = new TestFile("Listeners"); + file.addParserTest(input, "Basic", "T", "s", + "1 2", + "(a 1 2)\n" + "1\n" + "2\n", + null); + file.addParserTests(input, "TokenGetters", "T", "s", + "1 2", + "(a 1 2)\n" + + "1 2 [1, 2]\n", + "abc", + "(a abc)\n" + + "[@0,0:2='abc',<4>,1:0]\n"); + file.addParserTests(input, "RuleGetters", "T", "s", + "1 2", + "(a (b 1) (b 2))\n" + + "1 2 1\n", + "abc", + "(a (b abc))\n" + + "abc\n"); + file.addParserTest(input, "LR", "T", "s", + "1+2*3", + "(e (e 1) + (e (e 2) * (e 3)))\n" + + "1\n" + + "2\n" + + "3\n" + + "2 3 2\n" + + "1 2 1\n", + null); + file.addParserTest(input, "LRWithLabels", "T", "s", + "1(2,3)", + "(e (e 1) ( (eList (e 2) , (e 3)) ))\n" + + "1\n" + "2\n" + "3\n" + "1 [13 6]\n", + null); + return file; + } + private TestFile buildLexerErrors() throws Exception { TestFile file = new TestFile("LexerErrors"); file.addLexerTest(input, "InvalidCharAtStart", "L", diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st new file mode 100644 index 000000000..d67e84e01 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st @@ -0,0 +1,23 @@ +grammar ; +@parser::header { + +} + +@parser::members { + +} + +s +@after { + + +} + : r=a ; +a : INT INT + | ID + ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st new file mode 100644 index 000000000..101094606 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st @@ -0,0 +1,24 @@ +grammar ; +@parser::header { + +} + +@parser::members { + +} + +s +@after { + + +} + : r=e ; +e : e op='*' e + | e op='+' e + | INT + ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st new file mode 100644 index 000000000..77a91663e --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st @@ -0,0 +1,24 @@ +grammar ; +@parser::header { + +} + +@parser::members { + +} + +s +@after { + + +} + : r=e ; +e : e '(' eList ')' # Call + | INT # Int + ; +eList : e (',' e)* ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st new file mode 100644 index 000000000..2f7b90a2e --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st @@ -0,0 +1,24 @@ +grammar ; +@parser::header { + +} + +@parser::members { + +} + +s +@after { + + +} + : r=a ; +a : b b // forces list + | b // a list still + ; +b : ID | INT; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st new file mode 100644 index 000000000..144a93763 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st @@ -0,0 +1,23 @@ +grammar ; +@parser::header { + +} + +@parser::members { + +} + +s +@after { + + +} + : r=a ; +a : INT INT + | ID + ; +MULT: '*' ; +ADD : '+' ; +INT : [0-9]+ ; +ID : [a-z]+ ; +WS : [ \t\n]+ -> skip ; From f22acf701eac16e51fa7d2f03c027a49a3c942d3 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Thu, 16 Oct 2014 22:57:29 +0800 Subject: [PATCH 04/26] progress --- tool/test/org/antlr/v4/testgen/Generator.java | 117 +++++++++++++++++- .../grammars/ParserErrors/ConjuringUpToken.st | 2 + .../ParserErrors/ConjuringUpTokenFromSet.st | 2 + .../ParserErrors/ContextListGetters.st | 8 ++ .../DuplicatedLeftRecursiveCall.st | 6 + .../ParserErrors/InvalidATNStateRemoval.st | 6 + .../ParserErrors/InvalidEmptyInput.st | 4 + .../grammars/ParserErrors/LL1ErrorInfo.st | 14 +++ .../v4/testgen/grammars/ParserErrors/LL2.st | 5 + .../v4/testgen/grammars/ParserErrors/LL3.st | 5 + .../testgen/grammars/ParserErrors/LLStar.st | 5 + .../MultiTokenDeletionBeforeLoop.st | 2 + .../MultiTokenDeletionBeforeLoop2.st | 2 + .../MultiTokenDeletionDuringLoop.st | 2 + .../MultiTokenDeletionDuringLoop2.st | 2 + .../ParserErrors/NoViableAltAvoidance.st | 7 ++ .../ParserErrors/SingleSetInsertion.st | 2 + .../ParserErrors/SingleTokenDeletion.st | 2 + .../SingleTokenDeletionBeforeLoop.st | 2 + .../SingleTokenDeletionBeforeLoop2.st | 2 + .../SingleTokenDeletionDuringLoop.st | 2 + .../SingleTokenDeletionDuringLoop2.st | 2 + .../SingleTokenDeletionExpectingSet.st | 2 + .../ParserErrors/SingleTokenInsertion.st | 2 + .../grammars/ParserErrors/TokenMismatch.st | 2 + 25 files changed, 203 insertions(+), 4 deletions(-) create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpToken.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpTokenFromSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidEmptyInput.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL3.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LLStar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/NoViableAltAvoidance.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleSetInsertion.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletion.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenInsertion.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParserErrors/TokenMismatch.st diff --git a/tool/test/org/antlr/v4/testgen/Generator.java b/tool/test/org/antlr/v4/testgen/Generator.java index f369b55e6..47c4274b3 100644 --- a/tool/test/org/antlr/v4/testgen/Generator.java +++ b/tool/test/org/antlr/v4/testgen/Generator.java @@ -105,16 +105,125 @@ public class Generator { private Collection buildTests() throws Exception { List list = new ArrayList(); - list.add(buildLexerExec()); - list.add(buildParserExec()); list.add(buildCompositeLexers()); list.add(buildCompositeParsers()); list.add(buildFullContextParsing()); list.add(buildLeftRecursion()); list.add(buildLexerErrors()); + list.add(buildLexerExec()); list.add(buildListeners()); - return list; - + list.add(buildParserErrors()); + list.add(buildParserExec()); + return list; + } + + private TestFile buildParserErrors() throws Exception { + TestFile file = new TestFile("ParserErrors"); + file.addParserTest(input, "TokenMismatch", "T", "a", + "aa", + "", + "line 1:1 mismatched input 'a' expecting 'b'\n"); + file.addParserTest(input, "SingleTokenDeletion", "T", "a", + "aab", + "", + "line 1:1 extraneous input 'a' expecting 'b'\n"); + file.addParserTest(input, "SingleTokenDeletionExpectingSet", "T", "a", + "aab", + "", + "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"); + file.addParserTest(input, "SingleTokenInsertion", "T", "a", + "ac", + "", + "line 1:1 missing 'b' at 'c'\n"); + file.addParserTest(input, "ConjuringUpToken", "T", "a", + "ac", + "conjured=[@-1,-1:-1='',<1>,1:1]\n", + null); + file.addParserTest(input, "SingleSetInsertion", "T", "a", + "ad", + "", + "line 1:1 missing {'b', 'c'} at 'd'\n"); + file.addParserTest(input, "ConjuringUpTokenFromSet", "T", "a", + "ad", + "conjured=[@-1,-1:-1='',<1>,1:1]\n", + null); + file.addParserTest(input, "LL2", "T", "a", + "ae", + "", + "line 1:1 no viable alternative at input 'ae'\n"); + file.addParserTest(input, "LL3", "T", "a", + "abe", + "", + "line 1:2 no viable alternative at input 'abe'\n"); + file.addParserTest(input, "LLStar", "T", "a", + "aaae", + "", + "line 1:3 no viable alternative at input 'aaae'\n"); + file.addParserTest(input, "SingleTokenDeletionBeforeLoop", "T", "a", + "aabc", + "", + "line 1:1 extraneous input 'a' expecting {, 'b'}\n" + + "line 1:3 token recognition error at: 'c'\n"); + file.addParserTest(input, "MultiTokenDeletionBeforeLoop", "T", "a", + "aacabc", + "", + "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"); + file.addParserTest(input, "SingleTokenDeletionDuringLoop", "T", "a", + "ababbc", + "", + "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n"); + file.addParserTest(input, "MultiTokenDeletionDuringLoop", "T", "a", + "abaaababc", + "", + "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n" + + "line 1:6 extraneous input 'a' expecting {'b', 'c'}\n"); + file.addParserTest(input, "SingleTokenDeletionBeforeLoop2", "T", "a", + "aabc", + "", + "line 1:1 extraneous input 'a' expecting {, 'b', 'z'}\n" + + "line 1:3 token recognition error at: 'c'\n"); + file.addParserTest(input, "MultiTokenDeletionBeforeLoop2", "T", "a", + "aacabc", + "", + "line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'}\n"); + file.addParserTest(input, "SingleTokenDeletionDuringLoop2", "T", "a", + "ababbc", + "", + "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n"); + file.addParserTest(input, "MultiTokenDeletionDuringLoop2", "T", "a", + "abaaababc", + "", + "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n" + + "line 1:6 extraneous input 'a' expecting {'b', 'z', 'c'}\n"); + file.addParserTest(input, "LL1ErrorInfo", "T", "start", + "dog and software", + "{'hardware', 'software'}\n", + null); + file.addParserTest(input, "InvalidEmptyInput", "T", "start", + "", + "", + "line 1:0 missing ID at ''\n"); + file.addParserTest(input, "ContextListGetters", "T", "s", + "abab", + "abab\n", + null); + file.addParserTestsWithErrors(input, "DuplicatedLeftRecursiveCall", "T", "start", + "xx", "", null, + "xxx", "", null, + "xxxx", "", null); + file.addParserTest(input, "InvalidATNStateRemoval", "T", "start", + "x:x", + "", + null); + // "a." matches 'a' to rule e but then realizes '.' won't match. + // previously would cause noviablealt. now prediction pretends to + // have "a' predict 2nd alt of e. Will get syntax error later so + // let it get farther. + file.addParserTest(input, "NoViableAltAvoidance", "T", "s", + "a.", + "", + "line 1:1 mismatched input '.' expecting '!'\n"); + return file; } private TestFile buildListeners() throws Exception { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpToken.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpToken.st new file mode 100644 index 000000000..d7259fee4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpToken.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' x='b' {} 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpTokenFromSet.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpTokenFromSet.st new file mode 100644 index 000000000..e8f27a064 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpTokenFromSet.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' x=('b'|'c') {} 'd' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st new file mode 100644 index 000000000..9d3d816aa --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st @@ -0,0 +1,8 @@ +grammar ; +@parser::members{ + +} +s : (a | b)+; +a : 'a' {}; +b : 'b' {}; +; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st new file mode 100644 index 000000000..04f71f413 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st @@ -0,0 +1,6 @@ +grammar ; +start : expr EOF; +expr : 'x' + | expr expr + ; +; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st new file mode 100644 index 000000000..646d8162c --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st @@ -0,0 +1,6 @@ +grammar ; +start : ID ':' expr; +expr : primary expr? {} | expr '->' ID; +primary : ID; +ID : [a-z]+; +; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidEmptyInput.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidEmptyInput.st new file mode 100644 index 000000000..65551a8c6 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidEmptyInput.st @@ -0,0 +1,4 @@ +grammar ; +start : ID+; +ID : [a-z]+; +; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st new file mode 100644 index 000000000..aec08f27d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st @@ -0,0 +1,14 @@ +grammar ; +start : animal (AND acClass)? service EOF; +animal : (DOG | CAT ); +service : (HARDWARE | SOFTWARE) ; +AND : 'and'; +DOG : 'dog'; +CAT : 'cat'; +HARDWARE: 'hardware'; +SOFTWARE: 'software'; +WS : ' ' -> skip ; +acClass +@init +{} + : ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL2.st new file mode 100644 index 000000000..a6ea5030b --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL2.st @@ -0,0 +1,5 @@ +grammar ; +a : 'a' 'b' + | 'a' 'c' +; +q : 'e' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL3.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL3.st new file mode 100644 index 000000000..412d44c02 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL3.st @@ -0,0 +1,5 @@ +grammar ; +a : 'a' 'b'* 'c' + | 'a' 'b' 'd' +; +q : 'e' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LLStar.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LLStar.st new file mode 100644 index 000000000..bc2e46412 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LLStar.st @@ -0,0 +1,5 @@ +grammar ; +a : 'a'+ 'b' + | 'a'+ 'c' +; +q : 'e' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st new file mode 100644 index 000000000..d1ee35ca9 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' 'b'* 'c'; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st new file mode 100644 index 000000000..7f322fede --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' ('b'|'z'{})* 'c'; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st new file mode 100644 index 000000000..b7417c923 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' 'b'* 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st new file mode 100644 index 000000000..133e24091 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' ('b'|'z'{})* 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/NoViableAltAvoidance.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/NoViableAltAvoidance.st new file mode 100644 index 000000000..139e3ded2 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/NoViableAltAvoidance.st @@ -0,0 +1,7 @@ +grammar ; +s : e '!' ; +e : 'a' 'b' + | 'a' + ; +DOT : '.' ; +WS : [ \t\r\n]+ -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleSetInsertion.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleSetInsertion.st new file mode 100644 index 000000000..d5f724b4a --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleSetInsertion.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' ('b'|'c') 'd' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletion.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletion.st new file mode 100644 index 000000000..daebfb144 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletion.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' 'b' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st new file mode 100644 index 000000000..c2b5d9037 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' 'b'* ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st new file mode 100644 index 000000000..ef412fd55 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' ('b'|'z'{})*; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st new file mode 100644 index 000000000..b7417c923 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' 'b'* 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st new file mode 100644 index 000000000..133e24091 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' ('b'|'z'{})* 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st new file mode 100644 index 000000000..6280f7599 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' ('b'|'c') ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenInsertion.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenInsertion.st new file mode 100644 index 000000000..013403fce --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenInsertion.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' 'b' 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/TokenMismatch.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/TokenMismatch.st new file mode 100644 index 000000000..daebfb144 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/TokenMismatch.st @@ -0,0 +1,2 @@ +grammar ; +a : 'a' 'b' ; \ No newline at end of file From 4f657e3e52da70ec39dc2909b0097f48fea65494 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Thu, 16 Oct 2014 23:19:59 +0800 Subject: [PATCH 05/26] progress --- tool/test/org/antlr/v4/testgen/Generator.java | 39 +++++++++++++++++++ .../testgen/grammars/ParseTrees/2AltLoop.st | 11 ++++++ .../testgen/grammars/ParseTrees/ExtraToken.st | 14 +++++++ .../grammars/ParseTrees/NoViableAlt.st | 14 +++++++ .../v4/testgen/grammars/ParseTrees/RuleRef.st | 13 +++++++ .../v4/testgen/grammars/ParseTrees/Sync.st | 13 +++++++ .../v4/testgen/grammars/ParseTrees/Token2.st | 11 ++++++ .../ParseTrees/TokenAndRuleContextString.st | 12 ++++++ .../testgen/grammars/ParseTrees/test2Alts.st | 11 ++++++ 9 files changed, 138 insertions(+) create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st diff --git a/tool/test/org/antlr/v4/testgen/Generator.java b/tool/test/org/antlr/v4/testgen/Generator.java index 47c4274b3..571df6854 100644 --- a/tool/test/org/antlr/v4/testgen/Generator.java +++ b/tool/test/org/antlr/v4/testgen/Generator.java @@ -114,9 +114,48 @@ public class Generator { list.add(buildListeners()); list.add(buildParserErrors()); list.add(buildParserExec()); + list.add(buildParseTrees()); return list; } + private TestFile buildParseTrees() throws Exception { + TestFile file = new TestFile("ParseTrees"); + file.addParserTest(input, "TokenAndRuleContextString", "T", "s", + "x", + "[a, s]\n(a x)\n", + null); + file.addParserTest(input, "Token2", "T", "s", + "xy", + "(a x y)\n", + null); + file.addParserTest(input, "test2Alts", "T", "s", + "y", + "(a y)\n", + null); + file.addParserTest(input, "2AltLoop", "T", "s", + "xyyxyxz", + "(a x y y x y x z)\n", + null); + file.addParserTest(input, "RuleRef", "T", "s", + "yx", + "(a (b y) x)\n", + null); + // ERRORs not shown. z is colored red in tree view + file.addParserTest(input, "ExtraToken", "T", "s", + "xzy", + "(a x z y)\n", + null); + file.addParserTest(input, "NoViableAlt", "T", "s", + "z", + "(a z)\n", + null); + file.addParserTest(input, "Sync", "T", "s", + "xzyy!", + "(a x z y y !)\n", + null); + return file; + } + private TestFile buildParserErrors() throws Exception { TestFile file = new TestFile("ParserErrors"); file.addParserTest(input, "TokenMismatch", "T", "a", diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st new file mode 100644 index 000000000..0b5bbd977 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st @@ -0,0 +1,11 @@ +grammar ; +s +@init { + +} +@after { + +} + : r=a ; +a : ('x' | 'y')* 'z' + ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st new file mode 100644 index 000000000..c5520f0b4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st @@ -0,0 +1,14 @@ +grammar ; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' 'y' + ; +Z : 'z' + ; + diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st new file mode 100644 index 000000000..3461fe3a4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st @@ -0,0 +1,14 @@ +grammar ; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' | 'y' + ; +Z : 'z' + ; + \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st new file mode 100644 index 000000000..2a048fd34 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st @@ -0,0 +1,13 @@ +grammar ; +s +@init { + +} +@after { + +} + : r=a ; +a : b 'x' + ; +b : 'y' + ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st new file mode 100644 index 000000000..183d1629c --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st @@ -0,0 +1,13 @@ +grammar ; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' 'y'* '!' + ; +Z : 'z' + ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st new file mode 100644 index 000000000..8261e8503 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st @@ -0,0 +1,11 @@ +grammar ; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' 'y' + ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st new file mode 100644 index 000000000..b418522e3 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st @@ -0,0 +1,12 @@ +grammar ; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' { + +} ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st new file mode 100644 index 000000000..67eb15bea --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st @@ -0,0 +1,11 @@ +grammar ; +s +@init { + +} +@after { + +} + : r=a ; +a : 'x' | 'y' + ; From 3d04a72d5217860dabcff34a692696a26122369e Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Thu, 16 Oct 2014 23:53:54 +0800 Subject: [PATCH 06/26] progress --- tool/test/org/antlr/v4/testgen/Generator.java | 90 +++++++++++++++++++ .../grammars/SemPredEvalLexer/DisableRule.st | 5 ++ .../grammars/SemPredEvalLexer/EnumNotID.st | 4 + .../grammars/SemPredEvalLexer/IDnotEnum.st | 4 + .../grammars/SemPredEvalLexer/IDvsEnum.st | 4 + .../grammars/SemPredEvalLexer/Indent.st | 6 ++ .../LexerInputPositionSensitivePredicates.st | 6 ++ .../SemPredEvalLexer/PredicatedKeywords.st | 4 + 8 files changed, 123 insertions(+) create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/EnumNotID.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/Indent.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/PredicatedKeywords.st diff --git a/tool/test/org/antlr/v4/testgen/Generator.java b/tool/test/org/antlr/v4/testgen/Generator.java index 571df6854..784bd05a7 100644 --- a/tool/test/org/antlr/v4/testgen/Generator.java +++ b/tool/test/org/antlr/v4/testgen/Generator.java @@ -115,9 +115,99 @@ public class Generator { list.add(buildParserErrors()); list.add(buildParserExec()); list.add(buildParseTrees()); + list.add(buildSemPredEvalLexer()); return list; } + private TestFile buildSemPredEvalLexer() throws Exception { + TestFile file = new TestFile("SemPredEvalLexer"); + file.addLexerTest(input, "DisableRule", "L", + "enum abc", + "[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<3>,1:5]\n" + + "[@2,8:7='',<-1>,1:8]\n" + + "s0-' '->:s5=>4\n" + + "s0-'a'->:s6=>3\n" + + "s0-'e'->:s1=>3\n" + + ":s1=>3-'n'->:s2=>3\n" + + ":s2=>3-'u'->:s3=>3\n" + + ":s6=>3-'b'->:s6=>3\n" + + ":s6=>3-'c'->:s6=>3\n", + null); + file.addLexerTest(input, "IDvsEnum", "L", + "enum abc enum", + "[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<2>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s5=>3\n" + + "s0-'a'->:s4=>2\n" + + "s0-'e'->:s1=>2\n" + + ":s1=>2-'n'->:s2=>2\n" + + ":s2=>2-'u'->:s3=>2\n" + + ":s4=>2-'b'->:s4=>2\n" + + ":s4=>2-'c'->:s4=>2\n", // no 'm'-> transition...conflicts with pred + null); + file.addLexerTest(input, "IDnotEnum", "L", + "enum abc enum", + "[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<2>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s2=>3\n", // no edges in DFA for enum/id. all paths lead to pred. + null); + file.addLexerTest(input, "EnumNotID", "L", + "enum abc enum", + "[@0,0:3='enum',<1>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<1>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s3=>3\n", // no edges in DFA for enum/id. all paths lead to pred. + null); + file.addLexerTest(input, "Indent", "L", + "abc\n def \n", + "INDENT\n" + // action output + "[@0,0:2='abc',<1>,1:0]\n" + // ID + "[@1,3:3='\\n',<3>,1:3]\n" + // NL + "[@2,4:5=' ',<2>,2:0]\n" + // INDENT + "[@3,6:8='def',<1>,2:2]\n" + // ID + "[@4,9:10=' ',<4>,2:5]\n" + // WS + "[@5,11:11='\\n',<3>,2:7]\n" + + "[@6,12:11='',<-1>,3:8]\n" + + "s0-'\n" + + "'->:s2=>3\n" + + "s0-'a'->:s1=>1\n" + + "s0-'d'->:s1=>1\n" + + ":s1=>1-'b'->:s1=>1\n" + + ":s1=>1-'c'->:s1=>1\n" + + ":s1=>1-'e'->:s1=>1\n" + + ":s1=>1-'f'->:s1=>1\n", + null); + file.addLexerTest(input, "LexerInputPositionSensitivePredicates", "L", + "a cde\nabcde\n", + "a\n" + + "cde\n" + + "ab\n" + + "cde\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,2:4='cde',<2>,1:2]\n" + + "[@2,6:7='ab',<1>,2:0]\n" + + "[@3,8:10='cde',<2>,2:2]\n" + + "[@4,12:11='',<-1>,3:0]\n", + null); + file.addLexerTest(input, "PredicatedKeywords", "L", + "enum enu a", + "enum!\n" + + "ID enu\n" + + "ID a\n" + + "[@0,0:3='enum',<1>,1:0]\n" + + "[@1,5:7='enu',<2>,1:5]\n" + + "[@2,9:9='a',<2>,1:9]\n" + + "[@3,10:9='',<-1>,1:10]\n", + null); + return file; + } + private TestFile buildParseTrees() throws Exception { TestFile file = new TestFile("ParseTrees"); file.addParserTest(input, "TokenAndRuleContextString", "T", "s", diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st new file mode 100644 index 000000000..ec1263a71 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st @@ -0,0 +1,5 @@ +lexer grammar ; +E1 : 'enum' { }? ; +E2 : 'enum' { }? ; // winner not E1 or ID +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/EnumNotID.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/EnumNotID.st new file mode 100644 index 000000000..e32eb23e7 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/EnumNotID.st @@ -0,0 +1,4 @@ +lexer grammar ; +ENUM : [a-z]+ { }? ; +ID : [a-z]+ ; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st new file mode 100644 index 000000000..c055aa9db --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st @@ -0,0 +1,4 @@ +lexer grammar ; +ENUM : [a-z]+ { }? ; +ID : [a-z]+ ; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st new file mode 100644 index 000000000..c5180a1ed --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st @@ -0,0 +1,4 @@ +lexer grammar ; +ENUM : 'enum' { }? ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/Indent.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/Indent.st new file mode 100644 index 000000000..36c452401 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/Indent.st @@ -0,0 +1,6 @@ +lexer grammar ; +ID : [a-z]+ ; +INDENT : [ \t]+ { }? \n" + + { } ;"+ +NL : '\n'; +WS : [ \t]+ ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st new file mode 100644 index 000000000..70890ceaf --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st @@ -0,0 +1,6 @@ +lexer grammar ; +WORD1 : ID1+ { } ; +WORD2 : ID2+ { } ; +fragment ID1 : { \< 2 }? [a-zA-Z]; +fragment ID2 : { >= 2 }? [a-zA-Z]; +WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/PredicatedKeywords.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/PredicatedKeywords.st new file mode 100644 index 000000000..529601cb6 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/PredicatedKeywords.st @@ -0,0 +1,4 @@ +lexer grammar ; +ENUM : [a-z]+ { }? { } ; +ID : [a-z]+ { } ; +WS : [ \n] -> skip ; From fe0d1e43e23ea54275508906b91eab1c8600bfeb Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sun, 19 Oct 2014 10:59:34 +0800 Subject: [PATCH 07/26] progress --- tool/test/org/antlr/v4/testgen/Generator.java | 159 ++++++++++++++++++ .../DelegatorAccessesDelegateMembers.st | 2 +- .../DelegatorAccessesDelegateMembers_S.st | 2 +- .../AmbigYieldsCtxSensitiveDFA.st | 2 +- .../FullContextParsing/CtxSensitiveDFA.st | 2 +- .../CtxSensitiveDFATwoDiffInput.st | 2 +- .../FullContextIF_THEN_ELSEParse.st | 2 +- .../SLLSeesEOFInLLGrammar.st | 2 +- .../testgen/grammars/LeftRecursion/SemPred.st | 2 +- .../v4/testgen/grammars/Listeners/Basic.st | 2 +- .../antlr/v4/testgen/grammars/Listeners/LR.st | 2 +- .../grammars/Listeners/LRWithLabels.st | 2 +- .../testgen/grammars/Listeners/RuleGetters.st | 2 +- .../grammars/Listeners/TokenGetters.st | 2 +- .../testgen/grammars/ParseTrees/2AltLoop.st | 2 +- .../testgen/grammars/ParseTrees/ExtraToken.st | 2 +- .../grammars/ParseTrees/NoViableAlt.st | 2 +- .../v4/testgen/grammars/ParseTrees/RuleRef.st | 2 +- .../v4/testgen/grammars/ParseTrees/Sync.st | 2 +- .../v4/testgen/grammars/ParseTrees/Token2.st | 2 +- .../ParseTrees/TokenAndRuleContextString.st | 4 +- .../testgen/grammars/ParseTrees/test2Alts.st | 2 +- .../ParserErrors/ContextListGetters.st | 2 +- .../ParserErrors/InvalidATNStateRemoval.st | 2 +- .../grammars/ParserErrors/LL1ErrorInfo.st | 2 +- .../MultiTokenDeletionBeforeLoop2.st | 2 +- .../MultiTokenDeletionDuringLoop2.st | 2 +- .../SingleTokenDeletionBeforeLoop2.st | 2 +- .../SingleTokenDeletionDuringLoop2.st | 2 +- .../ParserExec/ListLabelForClosureContext.st | 2 +- .../grammars/ParserExec/PredicatedIfIfElse.st | 2 +- .../ParserExec/StartRuleWithoutEOF.st | 2 +- .../grammars/SemPredEvalLexer/DisableRule.st | 4 +- .../grammars/SemPredEvalLexer/IDnotEnum.st | 2 +- .../grammars/SemPredEvalLexer/IDvsEnum.st | 2 +- .../SemPredEvalParser/2UnpredicatedAlts.st | 9 + .../2UnpredicatedAltsAndOneOrthogonalAlt.st | 10 ++ .../SemPredEvalParser/ActionHidesPreds.st | 9 + .../ActionsHidePredsInGlobalFOLLOW.st | 11 ++ .../AtomWithClosureInTranslatedLRRule.st | 6 + .../DepedentPredsInGlobalFOLLOW.st | 11 ++ ...pendentPredNotInOuterCtxShouldBeIgnored.st | 11 ++ .../SemPredEvalParser/DisabledAlternative.st | 5 + ...edNotPassedOuterCtxToAvoidCastException.st | 10 ++ .../NoTruePredsThrowsNoViableAlt.st | 8 + .../grammars/SemPredEvalParser/Order.st | 10 ++ .../PredFromAltTestedInLoopBack.st | 9 + .../PredTestedEvenWhenUnAmbig.st | 8 + .../PredicateDependentOnArg.st | 10 ++ .../PredicateDependentOnArg2.st | 10 ++ .../SemPredEvalParser/PredsInGlobalFOLLOW.st | 10 ++ .../SemPredEvalParser/RewindBeforePredEval.st | 8 + .../grammars/SemPredEvalParser/Simple.st | 9 + .../SemPredEvalParser/SimpleValidate.st | 8 + .../SemPredEvalParser/SimpleValidate2.st | 8 + .../grammars/SemPredEvalParser/ToLeft.st | 8 + .../ToLeftWithVaryingPredicate.st | 9 + .../UnpredicatedPathsInAlt.st | 12 ++ .../SemPredEvalParser/ValidateInDFA.st | 11 ++ 59 files changed, 415 insertions(+), 36 deletions(-) create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAlts.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionHidesPreds.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DisabledAlternative.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Order.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/RewindBeforePredEval.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Simple.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate2.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeft.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ValidateInDFA.st diff --git a/tool/test/org/antlr/v4/testgen/Generator.java b/tool/test/org/antlr/v4/testgen/Generator.java index 784bd05a7..01fed8dec 100644 --- a/tool/test/org/antlr/v4/testgen/Generator.java +++ b/tool/test/org/antlr/v4/testgen/Generator.java @@ -116,9 +116,168 @@ public class Generator { list.add(buildParserExec()); list.add(buildParseTrees()); list.add(buildSemPredEvalLexer()); + list.add(buildSemPredEvalParser()); return list; } + private TestFile buildSemPredEvalParser() throws Exception { + TestFile file = new TestFile("SemPredEvalParser"); + file.addParserTest(input, "SimpleValidate", "T", "s", + "x", + "", + "line 1:0 no viable alternative at input 'x'\n"); + file.addParserTest(input, "SimpleValidate2", "T", "s", + "3 4 x", + "alt 2\n" + "alt 2\n", + "line 1:4 no viable alternative at input 'x'\n"); + file.addParserTest(input, "AtomWithClosureInTranslatedLRRule", "T", "start", + "a+b+a", + "", + null); + file.addParserTest(input, "ValidateInDFA", "T", "s", + "x ; y", + "", + "line 1:0 no viable alternative at input 'x'\n" + + "line 1:4 no viable alternative at input 'y'\n"); + file.addParserTest(input, "Simple", "T", "s", + "x y 3", + "alt 2\n" + "alt 2\n" + "alt 3\n", + null); + // Under new predicate ordering rules (see antlr/antlr4#29), the first + // alt with an acceptable config (unpredicated, or predicated and evaluates + // to true) is chosen. + file.addParserTest(input, "Order", "T", "s", + "x y", + "alt 1\n" + "alt 1\n", + null); + // We have n-2 predicates for n alternatives. pick first alt + file.addParserTest(input, "2UnpredicatedAlts", "T", "s", + "x; y", + "alt 1\n" + + "alt 1\n", + "line 1:0 reportAttemptingFullContext d=0 (a), input='x'\n" + + "line 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x'\n" + + "line 1:3 reportAttemptingFullContext d=0 (a), input='y'\n" + + "line 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y'\n"); + file.addParserTest(input, "2UnpredicatedAltsAndOneOrthogonalAlt", "T", "s", + "34; x; y", + "alt 1\n" + "alt 2\n" + "alt 2\n", + "line 1:4 reportAttemptingFullContext d=0 (a), input='x'\n" + + "line 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x'\n" + + "line 1:7 reportAttemptingFullContext d=0 (a), input='y'\n" + + "line 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y'\n"); + // The parser consumes ID and moves to the 2nd token INT. + // To properly evaluate the predicates after matching ID INT, + // we must correctly see come back to starting index so LT(1) works + file.addParserTest(input, "RewindBeforePredEval", "T", "s", + "y 3 x 4", + "alt 2\n" + "alt 1\n", + null); + // checks that we throw exception if all alts + // are covered with a predicate and none succeeds + file.addParserTest(input, "NoTruePredsThrowsNoViableAlt", "T", "s", + "y 3 x 4", + "", + "line 1:0 no viable alternative at input 'y'\n"); + file.addParserTest(input, "ToLeft", "T", "s", + "x x y", + "alt 2\n" + "alt 2\n" + "alt 2\n", + null); + file.addParserTest(input, "UnpredicatedPathsInAlt", "T", "s", + "x 4", + "alt 1\n", + null); + file.addParserTest(input, "ActionHidesPreds", "T", "s", + "x x y", + "alt 1\n" + "alt 1\n" + "alt 1\n", + null); + /** In this case, we use predicates that depend on global information + * like we would do for a symbol table. We simply execute + * the predicates assuming that all necessary information is available. + * The i++ action is done outside of the prediction and so it is executed. + */ + file.addParserTest(input, "ToLeftWithVaryingPredicate", "T", "s", + "x x y", + "i=1\n" + "alt 2\n" + "i=2\n" + "alt 1\n" + "i=3\n" + "alt 2\n", + null); + /** + * In this case, we're passing a parameter into a rule that uses that + * information to predict the alternatives. This is the special case + * where we know exactly which context we are in. The context stack + * is empty and we have not dipped into the outer context to make a decision. + */ + file.addParserTest(input, "PredicateDependentOnArg", "T", "s", + "a b", + "alt 2\n" + "alt 1\n", + null); + /** In this case, we have to ensure that the predicates are not + tested during the closure after recognizing the 1st ID. The + closure will fall off the end of 'a' 1st time and reach into the + a[1] rule invocation. It should not execute predicates because it + does not know what the parameter is. The context stack will not + be empty and so they should be ignored. It will not affect + recognition, however. We are really making sure the ATN + simulation doesn't crash with context object issues when it + encounters preds during FOLLOW. + */ + file.addParserTest(input, "PredicateDependentOnArg2", "T", "s", + "a b", + "", + null); + // uses ID ';' or ID '.' lookahead to solve s. preds not tested. + file.addParserTest(input, "DependentPredNotInOuterCtxShouldBeIgnored", "T", "s", + "a;", + "alt 2\n", + null); + file.addParserTest(input, "IndependentPredNotPassedOuterCtxToAvoidCastException", "T", "s", + "a;", + "alt 2\n", + null); + /** During a global follow operation, we still collect semantic + * predicates as long as they are not dependent on local context + */ + file.addParserTest(input, "PredsInGlobalFOLLOW", "T", "s", + "a!", + "eval=true\n" + /* now we are parsing */ "parse\n", + null); + /** We cannot collect predicates that are dependent on local context if + * we are doing a global follow. They appear as if they were not there at all. + */ + file.addParserTest(input, "DepedentPredsInGlobalFOLLOW","T", "s", + "a!", + "eval=true\n" + "parse\n", + null); + /** Regular non-forced actions can create side effects used by semantic + * predicates and so we cannot evaluate any semantic predicate + * encountered after having seen a regular action. This includes + * during global follow operations. + */ + file.addParserTest(input, "ActionsHidePredsInGlobalFOLLOW", "T", "s", + "a!", + "eval=true\n" + "parse\n", + null); + file.addParserTestsWithErrors(input, "PredTestedEvenWhenUnAmbig", "T", "primary", + "abc", "ID abc\n", null, + "enum", "", "line 1:0 no viable alternative at input 'enum'\n"); + /** + * This is a regression test for antlr/antlr4#218 "ANTLR4 EOF Related Bug". + * https://github.com/antlr/antlr4/issues/218 + */ + file.addParserTest(input, "DisabledAlternative", "T", "cppCompilationUnit", + "hello", + "", + null); + /** Loopback doesn't eval predicate at start of alt */ + file.addParserTestsWithErrors(input, "PredFromAltTestedInLoopBack", "T", "file_", + "s\n\n\nx\n", + "(file_ (para (paraContent s) \n \n) (para (paraContent \n x \n)) )\n", + "line 5:2 mismatched input '' expecting '\n'\n", + "s\n\n\nx\n\n", + "(file_ (para (paraContent s) \n \n) (para (paraContent \n x) \n \n) )\n", + null); + return file; + } + private TestFile buildSemPredEvalLexer() throws Exception { TestFile file = new TestFile("SemPredEvalLexer"); file.addLexerTest(input, "DisableRule", "L", diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st index 7b2eacb7a..f91311b18 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st @@ -1,4 +1,4 @@ grammar M; // uses no rules from the import import S; -s : 'b'{}; // gS is import pointer +s : 'b'{}; // gS is import pointer WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st index 3450c4f8b..8b8bdca29 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st +++ b/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st @@ -1,5 +1,5 @@ parser grammar S; @members { - + } a : B; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st index d404d7422..b442c09c6 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st @@ -1,5 +1,5 @@ grammar ; -s @after {} +s @after {} : ID | ID {} ; ID : 'a'..'z'+; WS : (' '|'\t'|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st index 2e8c5ed26..4c69eb9fa 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st @@ -1,5 +1,5 @@ grammar ; -s @after {} +s @after {} : '$' a | '@' b ; a : e ID ; b : e INT ID ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st index 52378f8dd..3d021b445 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st @@ -1,5 +1,5 @@ grammar ; -s @after {} +s @after {} : ('$' a | '@' b)+ ; a : e ID ; b : e INT ID ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st index b331d468d..a7b8047fa 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st @@ -1,7 +1,7 @@ grammar ; s @init {} -@after {} +@after {} : '{' stat* '}' ; stat: 'if' ID 'then' stat ('else' ID)? | 'return diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st index ccacc60c5..c74f4def8 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st +++ b/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st @@ -1,5 +1,5 @@ grammar ; -s @after {} +s @after {} : a; a : e ID ; b : e INT ID ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st index b95a58025..e568a8944 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st +++ b/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st @@ -1,6 +1,6 @@ grammar ; s @after {} : a ; -a : a {}? ID +a : a {}? ID | ID ; ID : 'a'..'z'+ ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st index d67e84e01..9446ce77e 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st @@ -1,6 +1,6 @@ grammar ; @parser::header { - + } @parser::members { diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st index 101094606..0bf8e3148 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st @@ -1,6 +1,6 @@ grammar ; @parser::header { - + } @parser::members { diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st index 77a91663e..c548f899c 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st @@ -1,6 +1,6 @@ grammar ; @parser::header { - + } @parser::members { diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st index 2f7b90a2e..663a9e1d5 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st @@ -1,6 +1,6 @@ grammar ; @parser::header { - + } @parser::members { diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st b/tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st index 144a93763..fd653073a 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st +++ b/tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st @@ -1,6 +1,6 @@ grammar ; @parser::header { - + } @parser::members { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st index 0b5bbd977..332b4b8a0 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st @@ -1,7 +1,7 @@ grammar ; s @init { - + } @after { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st index c5520f0b4..a7620ea07 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st @@ -1,7 +1,7 @@ grammar ; s @init { - + } @after { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st index 3461fe3a4..be903effe 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st @@ -1,7 +1,7 @@ grammar ; s @init { - + } @after { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st index 2a048fd34..e35e3a801 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st @@ -1,7 +1,7 @@ grammar ; s @init { - + } @after { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st index 183d1629c..9584bedb0 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st @@ -1,7 +1,7 @@ grammar ; s @init { - + } @after { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st index 8261e8503..979f30694 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st @@ -1,7 +1,7 @@ grammar ; s @init { - + } @after { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st index b418522e3..020e0bb92 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st @@ -1,12 +1,12 @@ grammar ; s @init { - + } @after { } : r=a ; a : 'x' { - + } ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st index 67eb15bea..aa5e44672 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st @@ -1,7 +1,7 @@ grammar ; s @init { - + } @after { diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st index 9d3d816aa..6114db743 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st @@ -1,6 +1,6 @@ grammar ; @parser::members{ - + } s : (a | b)+; a : 'a' {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st index 646d8162c..75fd25550 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st @@ -1,6 +1,6 @@ grammar ; start : ID ':' expr; -expr : primary expr? {} | expr '->' ID; +expr : primary expr? {} | expr '->' ID; primary : ID; ID : [a-z]+; ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st index aec08f27d..ebd21c8b3 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st @@ -10,5 +10,5 @@ SOFTWARE: 'software'; WS : ' ' -> skip ; acClass @init -{} +{} : ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st index 7f322fede..1c4c62b21 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st @@ -1,2 +1,2 @@ grammar ; -a : 'a' ('b'|'z'{})* 'c'; \ No newline at end of file +a : 'a' ('b'|'z'{})* 'c'; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st index 133e24091..d0404f1c6 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st @@ -1,2 +1,2 @@ grammar ; -a : 'a' ('b'|'z'{})* 'c' ; \ No newline at end of file +a : 'a' ('b'|'z'{})* 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st index ef412fd55..9169f8f00 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st @@ -1,2 +1,2 @@ grammar ; -a : 'a' ('b'|'z'{})*; \ No newline at end of file +a : 'a' ('b'|'z'{})*; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st index 133e24091..d0404f1c6 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st @@ -1,2 +1,2 @@ grammar ; -a : 'a' ('b'|'z'{})* 'c' ; \ No newline at end of file +a : 'a' ('b'|'z'{})* 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st index ea11ca391..8ea3bfdb6 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st @@ -1,7 +1,7 @@ grammar ; ifStatement @after { -items = $ctx.elseIfStatement() +items = $ctx.elseIfStatement() } : 'if' expression ( ( 'then' diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st index 9f6910f9d..5c12afa69 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st @@ -1,7 +1,7 @@ grammar ; s : stmt EOF ; stmt : ifStmt | ID; -ifStmt : 'if' ID stmt ('else' stmt | { }?); +ifStmt : 'if' ID stmt ('else' stmt | { }?); ELSE : 'else'; ID : [a-zA-Z]+; WS : [ \\n\\t]+ -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st index 23b2ae4ea..0d142c518 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st +++ b/tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st @@ -1,5 +1,5 @@ grammar ; -s @after { } +s @after { } : ID | ID INT ID ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st index ec1263a71..90248db25 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st @@ -1,5 +1,5 @@ lexer grammar ; -E1 : 'enum' { }? ; -E2 : 'enum' { }? ; // winner not E1 or ID +E1 : 'enum' { }? ; +E2 : 'enum' { }? ; // winner not E1 or ID ID : 'a'..'z'+ ; WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st index c055aa9db..cac3df8d1 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st @@ -1,4 +1,4 @@ lexer grammar ; -ENUM : [a-z]+ { }? ; +ENUM : [a-z]+ { }? ; ID : [a-z]+ ; WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st index c5180a1ed..fc0b0696e 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st @@ -1,4 +1,4 @@ lexer grammar ; -ENUM : 'enum' { }? ; +ENUM : 'enum' { }? ; ID : 'a'..'z'+ ; WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAlts.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAlts.st new file mode 100644 index 000000000..3219067ba --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAlts.st @@ -0,0 +1,9 @@ +grammar ; +s : {} a ';' a; // do 2x: once in ATN, next in DFA +a : ID {} + | ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st new file mode 100644 index 000000000..2eb5d7a60 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st @@ -0,0 +1,10 @@ +grammar ; +s : {} a ';' a ';' a; +a : INT {} + | ID {} // must pick this one for ID since pred is false + | ID {} + | {}? ID {console.log(\"alt 4\");} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionHidesPreds.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionHidesPreds.st new file mode 100644 index 000000000..6a8bc7495 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionHidesPreds.st @@ -0,0 +1,9 @@ +grammar ; +@members {} +s : a+ ; +a : {} ID {}? {} + | {} ID {}? {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st new file mode 100644 index 000000000..503397837 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st @@ -0,0 +1,11 @@ +grammar ; +@members { +this.p = function(v) { + +} +s : e {} {}? {} '!' ; +t : e {} {}? ID ; +e : ID | ; // non-LL(1) so we use ATN +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st new file mode 100644 index 000000000..a782d480b --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st @@ -0,0 +1,6 @@ +grammar ; +start : e[0] EOF; +e[int _p] + : ( 'a' | 'b'+ ) ( {3 >= $_p}? '+' e[4] )* + ; + diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st new file mode 100644 index 000000000..06b5a3735 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st @@ -0,0 +1,11 @@ +grammar ; +@members { + +} +s : a[99] ; +a[int i] : e {}? {} '!' ; +b[int i] : e {}? ID ; +e : ID | ; // non-LL(1) so we use ATN +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st new file mode 100644 index 000000000..e17248d61 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st @@ -0,0 +1,11 @@ +grammar ; +s : b[2] ';' | b[2] '.' ; // decision in s drills down to ctx-dependent pred in a; +b[int i] : a[i] ; +a[int i]" + + : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; + diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DisabledAlternative.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DisabledAlternative.st new file mode 100644 index 000000000..869d94b27 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DisabledAlternative.st @@ -0,0 +1,5 @@ +grammar ; +cppCompilationUnit : content+ EOF; +content: anything | {}? .; +anything: ANY_CHAR; +ANY_CHAR: [_a-zA-Z0-9]; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st new file mode 100644 index 000000000..1b5baf097 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st @@ -0,0 +1,10 @@ +grammar ; +s : b ';' | b '.' ; +b : a ; +a + : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st new file mode 100644 index 000000000..ae5714552 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st @@ -0,0 +1,8 @@ +grammar ; +s : a a; +a : {}? ID INT {} + | {}? ID INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Order.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Order.st new file mode 100644 index 000000000..2b6d59d4f --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Order.st @@ -0,0 +1,10 @@ +grammar ; +s : a {} a; // do 2x: once in ATN, next in DFA; +// action blocks lookahead from falling off of 'a' +// and looking into 2nd 'a' ref. !ctx dependent pred +a : ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st new file mode 100644 index 000000000..697cd9a66 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st @@ -0,0 +1,9 @@ +grammar ; +file_ +@after {} + : para para EOF ; +para: paraContent NL NL ; +paraContent : ('s'|'x'|{}? NL)+ ; +NL : '\n' ; +s : 's' ; +X : 'x' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st new file mode 100644 index 000000000..dea5c35b0 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st @@ -0,0 +1,8 @@ +grammar ; +@members {} +primary + : ID {} + | {!}? 'enum' {} + ; +ID : [a-z]+ ; +WS : [ \t\n\r]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg.st new file mode 100644 index 000000000..3a05644d7 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg.st @@ -0,0 +1,10 @@ +grammar ; +@members {i=0} +s : a[2] a[1]; +"a[int i]" + +" : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st new file mode 100644 index 000000000..355d0f458 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st @@ -0,0 +1,10 @@ +grammar ; +@members {i=0} +s : a[2] a[1]; +a[int i]" + + : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st new file mode 100644 index 000000000..3a63138a3 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st @@ -0,0 +1,10 @@ +grammar ; +@members { + +} +s : e {}? {} '!' ; +t : e {}? ID ; +e : ID | ; // non-LL(1) so we use ATN +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/RewindBeforePredEval.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/RewindBeforePredEval.st new file mode 100644 index 000000000..a13cd7220 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/RewindBeforePredEval.st @@ -0,0 +1,8 @@ +grammar ; +s : a a; +a : {}? ID INT {} + | {}? ID INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Simple.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Simple.st new file mode 100644 index 000000000..8570ed205 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Simple.st @@ -0,0 +1,9 @@ +grammar ; +s : a a a; // do 3x: once in ATN, next in DFA then INT in ATN +a : {}? ID {} + | {}? ID {} + | INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate.st new file mode 100644 index 000000000..3b0577eeb --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate.st @@ -0,0 +1,8 @@ +grammar ; +s : a ; +a : {}? ID {} + | {}? INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate2.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate2.st new file mode 100644 index 000000000..6a84abcca --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate2.st @@ -0,0 +1,8 @@ +grammar ; +s : a a a; +a : {}? ID {} + | {}? INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeft.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeft.st new file mode 100644 index 000000000..bf6e1f899 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeft.st @@ -0,0 +1,8 @@ +grammar ; + s : a+ ; +a : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st new file mode 100644 index 000000000..2e9e6d10d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st @@ -0,0 +1,9 @@ +grammar ; +@members {this.i=0} +s : ({\n} a)+ ; +a : {}? ID {} + | {}? ID {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st new file mode 100644 index 000000000..a53c09554 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st @@ -0,0 +1,12 @@ +grammar ; +s : a {} + | b {} + ; +a : {}? ID INT + | ID INT + ; +b : ID ID + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ValidateInDFA.st b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ValidateInDFA.st new file mode 100644 index 000000000..7b153b7c4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ValidateInDFA.st @@ -0,0 +1,11 @@ +grammar ; +s : a ';' a; +// ';' helps us to resynchronize without consuming +// 2nd 'a' reference. We our testing that the DFA also +// throws an exception if the validating predicate fails +a : {}? ID {} + | {}? INT {} + ; +ID : 'a'..'z'+ ; +INT : '0'..'9'+; +WS : (' '|'\n') -> skip ; From 85388803c6e18163a98a13a20143f06e41d01967 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sun, 19 Oct 2014 11:53:00 +0800 Subject: [PATCH 08/26] test templates complete --- tool/test/org/antlr/v4/testgen/Generator.java | 91 +++++++++++++++++++ .../testgen/grammars/Sets/CharSetLiteral.st | 4 + .../testgen/grammars/Sets/LexerOptionalSet.st | 3 + .../v4/testgen/grammars/Sets/LexerPlusSet.st | 3 + .../v4/testgen/grammars/Sets/LexerStarSet.st | 3 + .../antlr/v4/testgen/grammars/Sets/NotChar.st | 3 + .../v4/testgen/grammars/Sets/NotCharSet.st | 3 + .../grammars/Sets/NotCharSetWithLabel.st | 3 + .../grammars/Sets/NotCharSetWithRuleRef3.st | 5 + .../Sets/OptionalLexerSingleElement.st | 3 + .../v4/testgen/grammars/Sets/OptionalSet.st | 2 + .../grammars/Sets/OptionalSingleElement.st | 3 + .../v4/testgen/grammars/Sets/ParserNotSet.st | 2 + .../testgen/grammars/Sets/ParserNotToken.st | 2 + .../grammars/Sets/ParserNotTokenWithLabel.st | 2 + .../v4/testgen/grammars/Sets/ParserSet.st | 2 + .../grammars/Sets/PlusLexerSingleElement.st | 3 + .../antlr/v4/testgen/grammars/Sets/PlusSet.st | 2 + .../v4/testgen/grammars/Sets/RuleAsSet.st | 2 + .../grammars/Sets/SeqDoesNotBecomeSet.st | 5 + .../grammars/Sets/StarLexerSingleElement.st | 3 + .../antlr/v4/testgen/grammars/Sets/StarSet.st | 2 + 22 files changed, 151 insertions(+) create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/CharSetLiteral.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/LexerOptionalSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/LexerPlusSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/LexerStarSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/NotChar.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithLabel.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithRuleRef3.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalLexerSingleElement.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSingleElement.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotToken.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotTokenWithLabel.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/ParserSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/PlusLexerSingleElement.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/PlusSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/RuleAsSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/SeqDoesNotBecomeSet.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/StarLexerSingleElement.st create mode 100644 tool/test/org/antlr/v4/testgen/grammars/Sets/StarSet.st diff --git a/tool/test/org/antlr/v4/testgen/Generator.java b/tool/test/org/antlr/v4/testgen/Generator.java index 01fed8dec..cf90e0ae7 100644 --- a/tool/test/org/antlr/v4/testgen/Generator.java +++ b/tool/test/org/antlr/v4/testgen/Generator.java @@ -117,9 +117,100 @@ public class Generator { list.add(buildParseTrees()); list.add(buildSemPredEvalLexer()); list.add(buildSemPredEvalParser()); + list.add(buildSets()); return list; } + private TestFile buildSets() throws Exception { + TestFile file = new TestFile("Sets"); + // this must return A not I to the parser; calling a nonfragment rule + // from a nonfragment rule does not set the overall token. + file.addParserTest(input, "SeqDoesNotBecomeSet", "T", "a", + "34", + "34\n", + null); + file.addParserTest(input, "ParserSet", "T", "a", + "x", + "x\n", + null); + file.addParserTest(input, "ParserNotSet", "T", "a", + "zz", + "z\n", + null); + file.addParserTest(input, "ParserNotToken", "T", "a", + "zz", + "zz\n", + null); + file.addParserTest(input, "ParserNotTokenWithLabel", "T", "a", + "zz", + "z\n", + null); + file.addParserTest(input, "RuleAsSet", "T", "a", + "b", + "b\n", + null); + file.addParserTest(input, "NotChar", "T", "a", + "x", + "x\n", + null); + file.addParserTest(input, "OptionalSingleElement", "T", "a", + "bc", + "bc\n", + null); + file.addParserTest(input, "OptionalLexerSingleElement", "T", "a", + "bc", + "bc\n", + null); + file.addParserTests(input, "StarLexerSingleElement", "T", "a", + "bbbbc", "bbbbc\n", + "c", "c\n"); + file.addParserTest(input, "PlusLexerSingleElement", "T", "a", + "bbbbc", + "bbbbc\n", + null); + file.addParserTest(input, "OptionalSet", "T", "a", + "ac", + "ac\n", + null); + file.addParserTest(input, "StarSet", "T", "a", + "abaac", + "abaac\n", + null); + file.addParserTest(input, "PlusSet", "T", "a", + "abaac", + "abaac\n", + null); + file.addParserTest(input, "LexerOptionalSet", "T", "a", + "ac", + "ac\n", + null); + file.addParserTest(input, "LexerStarSet", "T", "a", + "abaac", + "abaac\n", + null); + file.addParserTest(input, "LexerPlusSet", "T", "a", + "abaac", + "abaac\n", + null); + file.addParserTest(input, "NotCharSet", "T", "a", + "x", + "x\n", + null); + file.addParserTest(input, "NotCharSetWithLabel", "T", "a", + "x", + "x\n", + null); + file.addParserTest(input, "NotCharSetWithRuleRef3", "T", "a", + "x", + "x\n", + null); + file.addParserTest(input, "CharSetLiteral", "T", "a", + "A a B b", + "A\n" + "a\n" + "B\n" + "b\n", + null); + return file; + } + private TestFile buildSemPredEvalParser() throws Exception { TestFile file = new TestFile("SemPredEvalParser"); file.addParserTest(input, "SimpleValidate", "T", "s", diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/CharSetLiteral.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/CharSetLiteral.st new file mode 100644 index 000000000..b52c6dfc7 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/CharSetLiteral.st @@ -0,0 +1,4 @@ +grammar ; +a : (A {})+ ; +a : [AaBb] ; +WS : (' '|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerOptionalSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerOptionalSet.st new file mode 100644 index 000000000..f8f0db822 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerOptionalSet.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : ('a'|'b')? 'c' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerPlusSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerPlusSet.st new file mode 100644 index 000000000..48357285f --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerPlusSet.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : ('a'|'b')+ 'c' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerStarSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerStarSet.st new file mode 100644 index 000000000..17d4d2cd1 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerStarSet.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : ('a'|'b')* 'c' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/NotChar.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/NotChar.st new file mode 100644 index 000000000..09644818b --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/NotChar.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : ~'b' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSet.st new file mode 100644 index 000000000..a1e835e8c --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSet.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : ~('b'|'c') ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithLabel.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithLabel.st new file mode 100644 index 000000000..a7b229bbe --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithLabel.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : h=~('b'|'c') ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithRuleRef3.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithRuleRef3.st new file mode 100644 index 000000000..83aae7d73 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithRuleRef3.st @@ -0,0 +1,5 @@ +grammar ; +a : A {} ; +a : ('a'|B) ; // this doesn't collapse to set but works +fragment +B : ~('a'|'c') ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalLexerSingleElement.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalLexerSingleElement.st new file mode 100644 index 000000000..059b86b92 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalLexerSingleElement.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : 'b'? 'c' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSet.st new file mode 100644 index 000000000..128433b14 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSet.st @@ -0,0 +1,2 @@ +grammar ; +a : ('a'|'b')? 'c' {} ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSingleElement.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSingleElement.st new file mode 100644 index 000000000..4a7ee5072 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSingleElement.st @@ -0,0 +1,3 @@ +grammar ; +a : A? 'c' {} ; +a : 'b' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotSet.st new file mode 100644 index 000000000..eca2f1b3d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotSet.st @@ -0,0 +1,2 @@ +grammar ; +a : t=~('x'|'y') 'z' {} ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotToken.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotToken.st new file mode 100644 index 000000000..f5b1a914d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotToken.st @@ -0,0 +1,2 @@ +grammar ; +a : ~'x' 'z' {} ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotTokenWithLabel.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotTokenWithLabel.st new file mode 100644 index 000000000..6dc068aab --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotTokenWithLabel.st @@ -0,0 +1,2 @@ +grammar ; +a : t=~'x' 'z' {} ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserSet.st new file mode 100644 index 000000000..528eca4b4 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserSet.st @@ -0,0 +1,2 @@ +grammar ; +a : t=('x'|'y') {} ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/PlusLexerSingleElement.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/PlusLexerSingleElement.st new file mode 100644 index 000000000..51a16fa1d --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/PlusLexerSingleElement.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : 'b'+ 'c' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/PlusSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/PlusSet.st new file mode 100644 index 000000000..5027cd639 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/PlusSet.st @@ -0,0 +1,2 @@ +grammar ; +a : ('a'|'b')+ 'c' {} ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/RuleAsSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/RuleAsSet.st new file mode 100644 index 000000000..feced0155 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/RuleAsSet.st @@ -0,0 +1,2 @@ +grammar ; +a @after {} : 'a' | 'b' |'c' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/SeqDoesNotBecomeSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/SeqDoesNotBecomeSet.st new file mode 100644 index 000000000..1bab7fe17 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/SeqDoesNotBecomeSet.st @@ -0,0 +1,5 @@ +grammar ; +a : C {} ; +fragment A : '1' | '2'; +fragment B : '3' '4'; +C : A | B; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/StarLexerSingleElement.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/StarLexerSingleElement.st new file mode 100644 index 000000000..69d12c81e --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/StarLexerSingleElement.st @@ -0,0 +1,3 @@ +grammar ; +a : A {} ; +a : 'b'* 'c' ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/StarSet.st b/tool/test/org/antlr/v4/testgen/grammars/Sets/StarSet.st new file mode 100644 index 000000000..0f0b2a0f9 --- /dev/null +++ b/tool/test/org/antlr/v4/testgen/grammars/Sets/StarSet.st @@ -0,0 +1,2 @@ +grammar ; +a : ('a'|'b')* 'c' {} ; From 809bac259100b7020e0bff23b0ee8b3372624d42 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Fri, 24 Oct 2014 01:12:36 +0800 Subject: [PATCH 09/26] generation of TestLexerExec for Java --- .../rt/gen}/AbstractParserTestMethod.java | 2 +- .../rt/gen}/CompositeLexerTestMethod.java | 2 +- .../rt/gen}/CompositeParserTestMethod.java | 2 +- .../rt/gen}/ConcreteParserTestMethod.java | 2 +- .../{testgen => test/rt/gen}/Generator.java | 37 +- .../v4/{testgen => test/rt/gen}/Grammar.java | 2 +- .../rt/gen}/LexerTestMethod.java | 2 +- .../rt/gen}/ParserTestMethod.java | 2 +- .../v4/{testgen => test/rt/gen}/TestFile.java | 2 +- .../{testgen => test/rt/gen}/TestMethod.java | 2 +- .../LexerDelegatorInvokesDelegateRule.st | 0 .../LexerDelegatorInvokesDelegateRule_S.st | 0 .../LexerDelegatorRuleOverridesDelegate.st | 0 .../LexerDelegatorRuleOverridesDelegate_S.st | 0 .../BringInLiteralsFromDelegate.st | 0 .../BringInLiteralsFromDelegate_S.st | 0 .../CombinedImportsCombined.st | 0 .../CombinedImportsCombined_S.st | 0 .../DelegatesSeeSameTokenType.st | 0 .../DelegatesSeeSameTokenType_S.st | 0 .../DelegatesSeeSameTokenType_T.st | 0 .../DelegatorAccessesDelegateMembers.st | 0 .../DelegatorAccessesDelegateMembers_S.st | 0 .../DelegatorInvokesDelegateRule.st | 0 .../DelegatorInvokesDelegateRuleWithArgs.st | 0 .../DelegatorInvokesDelegateRuleWithArgs_S.st | 0 ...atorInvokesDelegateRuleWithReturnStruct.st | 0 ...orInvokesDelegateRuleWithReturnStruct_S.st | 0 .../DelegatorInvokesDelegateRule_S.st | 0 ...egatorInvokesFirstVersionOfDelegateRule.st | 0 ...atorInvokesFirstVersionOfDelegateRule_S.st | 0 ...atorInvokesFirstVersionOfDelegateRule_T.st | 0 .../DelegatorRuleOverridesDelegate.st | 0 .../DelegatorRuleOverridesDelegate_S.st | 0 .../DelegatorRuleOverridesDelegates.st | 0 .../DelegatorRuleOverridesDelegates_S.st | 0 .../DelegatorRuleOverridesDelegates_T.st | 0 ...legatorRuleOverridesLookaheadInDelegate.st | 0 ...gatorRuleOverridesLookaheadInDelegate_S.st | 0 .../ImportLexerWithOnlyFragmentRules.st | 0 .../ImportLexerWithOnlyFragmentRules_S.st | 0 .../ImportedGrammarWithEmptyOptions.st | 0 .../ImportedGrammarWithEmptyOptions_S.st | 0 .../ImportedRuleWithAction.st | 0 .../ImportedRuleWithAction_S.st | 0 .../CompositeParsers/KeywordVSIDOrder.st | 0 .../CompositeParsers/KeywordVSIDOrder_S.st | 0 .../AmbigYieldsCtxSensitiveDFA.st | 0 .../FullContextParsing/AmbiguityNoLoop.st | 0 .../FullContextParsing/CtxSensitiveDFA.st | 0 .../CtxSensitiveDFATwoDiffInput.st | 0 .../FullContextParsing/ExprAmbiguity.st | 0 .../FullContextIF_THEN_ELSEParse.st | 0 .../LoopsSimulateTailRecursion.st | 0 .../SLLSeesEOFInLLGrammar.st | 0 .../rt/gen}/grammars/LeftRecursion/AmbigLR.st | 0 .../grammars/LeftRecursion/Declarations.st | 0 .../DirectCallToLeftRecursiveRule.st | 0 .../grammars/LeftRecursion/Expressions.st | 0 .../grammars/LeftRecursion/JavaExpressions.st | 0 .../LeftRecursion/LabelsOnOpSubrule.st | 0 .../MultipleAlternativesWithCommonLabel.st | 0 .../PrecedenceFilterConsidersContext.st | 0 .../PrefixOpWithActionAndLabel.st | 0 .../LeftRecursion/ReturnValueAndActions.st | 0 .../ReturnValueAndActionsAndLabels.st | 0 .../rt/gen}/grammars/LeftRecursion/SemPred.st | 0 .../rt/gen}/grammars/LeftRecursion/Simple.st | 0 .../grammars/LeftRecursion/TernaryExpr.st | 0 .../LeftRecursion/WhitespaceInfluence.st | 0 .../LexerErrors/DFAToATNThatFailsBackToDFA.st | 0 .../DFAToATNThatMatchesThenFailsInATN.st | 0 .../EnforcedGreedyNestedBrances.st | 0 .../grammars/LexerErrors/ErrorInMiddle.st | 0 .../LexerErrors/InvalidCharAtStart.st | 0 .../InvalidCharAtStartAfterDFACache.st | 0 .../LexerErrors/InvalidCharInToken.st | 0 .../InvalidCharInTokenAfterDFACache.st | 0 .../gen}/grammars/LexerErrors/LexerExecDFA.st | 0 .../LexerErrors/StringsEmbeddedInActions.st | 0 .../gen/grammars/LexerExec/ActionPlacement.st | 8 + .../rt/gen}/grammars/LexerExec/CharSet.st | 0 .../gen}/grammars/LexerExec/CharSetInSet.st | 0 .../rt/gen}/grammars/LexerExec/CharSetNot.st | 0 .../rt/gen}/grammars/LexerExec/CharSetPlus.st | 0 .../gen}/grammars/LexerExec/CharSetRange.st | 0 .../LexerExec/CharSetWithEscapedChar.st | 0 .../LexerExec/CharSetWithMissingEndRange.st | 0 .../LexerExec/CharSetWithMissingEscapeChar.st | 0 .../grammars/LexerExec/CharSetWithQuote1.st} | 0 .../grammars/LexerExec/CharSetWithQuote2.st | 3 + .../LexerExec/CharSetWithReversedRange.st | 0 .../rt/gen}/grammars/LexerExec/EOFByItself.st | 0 .../LexerExec/EOFSuffixInFirstRule.st | 0 .../gen}/grammars/LexerExec/GreedyClosure.st | 0 .../gen}/grammars/LexerExec/GreedyConfigs.st | 2 +- .../gen}/grammars/LexerExec/GreedyOptional.st | 0 .../LexerExec/GreedyPositiveClosure.st | 0 .../rt/gen}/grammars/LexerExec/HexVsID.st | 2 +- .../rt/gen}/grammars/LexerExec/KeywordID.st | 2 +- .../rt/gen/grammars/LexerExec/LargeLexer.st | 4002 ++++++++++++++ .../grammars/LexerExec/NonGreedyClosure.st | 0 .../grammars/LexerExec/NonGreedyConfigs.st | 4 + .../grammars/LexerExec/NonGreedyOptional.st | 0 .../LexerExec/NonGreedyPositiveClosure.st | 0 .../LexerExec/NonGreedyTermination1.st | 2 + .../LexerExec/NonGreedyTermination2.st | 2 + .../rt/gen}/grammars/LexerExec/Parentheses.st | 0 .../LexerExec/PositionAdjustingLexer.st | 0 .../grammars/LexerExec/QuoteTranslation.st | 0 .../RecursiveLexerRuleRefWithWildcardPlus.st} | 2 +- .../RecursiveLexerRuleRefWithWildcardStar.st | 2 +- .../RefToRuleDoesNotSetTokenNorEmitAnother.st | 0 .../rt/gen}/grammars/LexerExec/Slashes.st | 0 .../rt/gen}/grammars/Listeners/Basic.st | 0 .../rt/gen}/grammars/Listeners/LR.st | 0 .../gen}/grammars/Listeners/LRWithLabels.st | 0 .../rt/gen}/grammars/Listeners/RuleGetters.st | 0 .../gen}/grammars/Listeners/TokenGetters.st | 0 .../rt/gen}/grammars/ParseTrees/2AltLoop.st | 0 .../rt/gen}/grammars/ParseTrees/ExtraToken.st | 0 .../gen}/grammars/ParseTrees/NoViableAlt.st | 0 .../rt/gen}/grammars/ParseTrees/RuleRef.st | 0 .../rt/gen}/grammars/ParseTrees/Sync.st | 0 .../rt/gen}/grammars/ParseTrees/Token2.st | 0 .../ParseTrees/TokenAndRuleContextString.st | 0 .../rt/gen}/grammars/ParseTrees/test2Alts.st | 0 .../grammars/ParserErrors/ConjuringUpToken.st | 0 .../ParserErrors/ConjuringUpTokenFromSet.st | 0 .../ParserErrors/ContextListGetters.st | 0 .../DuplicatedLeftRecursiveCall.st | 0 .../ParserErrors/InvalidATNStateRemoval.st | 0 .../ParserErrors/InvalidEmptyInput.st | 0 .../grammars/ParserErrors/LL1ErrorInfo.st | 0 .../rt/gen}/grammars/ParserErrors/LL2.st | 0 .../rt/gen}/grammars/ParserErrors/LL3.st | 0 .../rt/gen}/grammars/ParserErrors/LLStar.st | 0 .../MultiTokenDeletionBeforeLoop.st | 0 .../MultiTokenDeletionBeforeLoop2.st | 0 .../MultiTokenDeletionDuringLoop.st | 0 .../MultiTokenDeletionDuringLoop2.st | 0 .../ParserErrors/NoViableAltAvoidance.st | 0 .../ParserErrors/SingleSetInsertion.st | 0 .../ParserErrors/SingleTokenDeletion.st | 0 .../SingleTokenDeletionBeforeLoop.st | 0 .../SingleTokenDeletionBeforeLoop2.st | 0 .../SingleTokenDeletionDuringLoop.st | 0 .../SingleTokenDeletionDuringLoop2.st | 0 .../SingleTokenDeletionExpectingSet.st | 0 .../ParserErrors/SingleTokenInsertion.st | 0 .../grammars/ParserErrors/TokenMismatch.st | 0 .../rt/gen}/grammars/ParserExec/APlus.st | 0 .../rt/gen}/grammars/ParserExec/AStar.st | 0 .../rt/gen}/grammars/ParserExec/AorAPlus.st | 0 .../rt/gen}/grammars/ParserExec/AorAStar.st | 0 .../rt/gen}/grammars/ParserExec/AorB.st | 0 .../rt/gen}/grammars/ParserExec/AorBPlus.st | 0 .../rt/gen}/grammars/ParserExec/AorBStar.st | 0 .../rt/gen}/grammars/ParserExec/Basic.st | 0 .../ParserExec/IfIfElseGreedyBinding1.st | 0 .../ParserExec/IfIfElseGreedyBinding2.st | 0 .../ParserExec/IfIfElseNonGreedyBinding1.st | 0 .../ParserExec/IfIfElseNonGreedyBinding2.st | 0 .../grammars/ParserExec/LL1OptionalBlock.st | 0 .../LabelAliasingAcrossLabeledAlternatives.st | 0 .../rt/gen}/grammars/ParserExec/Labels.st | 0 .../ParserExec/ListLabelForClosureContext.st | 0 .../grammars/ParserExec/ListLabelsOnSet.st | 0 .../rt/gen}/grammars/ParserExec/Optional.st | 0 .../grammars/ParserExec/PredicatedIfIfElse.st | 0 .../grammars/ParserExec/PredictionIssue334.st | 0 .../ParserExec/StartRuleWithoutEOF.st | 0 .../grammars/SemPredEvalLexer/DisableRule.st | 0 .../grammars/SemPredEvalLexer/EnumNotID.st | 0 .../grammars/SemPredEvalLexer/IDnotEnum.st | 0 .../grammars/SemPredEvalLexer/IDvsEnum.st | 0 .../gen}/grammars/SemPredEvalLexer/Indent.st | 0 .../LexerInputPositionSensitivePredicates.st | 0 .../SemPredEvalLexer/PredicatedKeywords.st | 0 .../SemPredEvalParser/2UnpredicatedAlts.st | 0 .../2UnpredicatedAltsAndOneOrthogonalAlt.st | 0 .../SemPredEvalParser/ActionHidesPreds.st | 0 .../ActionsHidePredsInGlobalFOLLOW.st | 0 .../AtomWithClosureInTranslatedLRRule.st | 0 .../DepedentPredsInGlobalFOLLOW.st | 0 ...pendentPredNotInOuterCtxShouldBeIgnored.st | 0 .../SemPredEvalParser/DisabledAlternative.st | 0 ...edNotPassedOuterCtxToAvoidCastException.st | 0 .../NoTruePredsThrowsNoViableAlt.st | 0 .../gen}/grammars/SemPredEvalParser/Order.st | 0 .../PredFromAltTestedInLoopBack.st | 0 .../PredTestedEvenWhenUnAmbig.st | 0 .../PredicateDependentOnArg.st | 0 .../PredicateDependentOnArg2.st | 0 .../SemPredEvalParser/PredsInGlobalFOLLOW.st | 0 .../SemPredEvalParser/RewindBeforePredEval.st | 0 .../gen}/grammars/SemPredEvalParser/Simple.st | 0 .../SemPredEvalParser/SimpleValidate.st | 0 .../SemPredEvalParser/SimpleValidate2.st | 0 .../gen}/grammars/SemPredEvalParser/ToLeft.st | 0 .../ToLeftWithVaryingPredicate.st | 0 .../UnpredicatedPathsInAlt.st | 0 .../SemPredEvalParser/ValidateInDFA.st | 0 .../rt/gen}/grammars/Sets/CharSetLiteral.st | 0 .../rt/gen}/grammars/Sets/LexerOptionalSet.st | 0 .../rt/gen}/grammars/Sets/LexerPlusSet.st | 0 .../rt/gen}/grammars/Sets/LexerStarSet.st | 0 .../rt/gen}/grammars/Sets/NotChar.st | 0 .../rt/gen}/grammars/Sets/NotCharSet.st | 0 .../gen}/grammars/Sets/NotCharSetWithLabel.st | 0 .../grammars/Sets/NotCharSetWithRuleRef3.st | 0 .../Sets/OptionalLexerSingleElement.st | 0 .../rt/gen}/grammars/Sets/OptionalSet.st | 0 .../grammars/Sets/OptionalSingleElement.st | 0 .../rt/gen}/grammars/Sets/ParserNotSet.st | 0 .../rt/gen}/grammars/Sets/ParserNotToken.st | 0 .../grammars/Sets/ParserNotTokenWithLabel.st | 0 .../rt/gen}/grammars/Sets/ParserSet.st | 0 .../grammars/Sets/PlusLexerSingleElement.st | 0 .../rt/gen}/grammars/Sets/PlusSet.st | 0 .../rt/gen}/grammars/Sets/RuleAsSet.st | 0 .../gen}/grammars/Sets/SeqDoesNotBecomeSet.st | 0 .../grammars/Sets/StarLexerSingleElement.st | 0 .../rt/gen}/grammars/Sets/StarSet.st | 0 .../org/antlr/v4/test/rt/java/BaseTest.java | 1416 +++++ .../org/antlr/v4/test/rt/java/Java.test.stg | 349 ++ .../v4/test/rt/java/TestCompositeLexers.java | 49 + .../v4/test/rt/java/TestCompositeParsers.java | 328 ++ .../test/rt/java/TestFullContextParsing.java | 208 + .../v4/test/rt/java/TestLeftRecursion.java | 805 +++ .../v4/test/rt/java/TestLexerErrors.java | 141 + .../antlr/v4/test/rt/java/TestLexerExec.java | 4627 +++++++++++++++++ .../antlr/v4/test/rt/java/TestListeners.java | 257 + .../antlr/v4/test/rt/java/TestParseTrees.java | 163 + .../v4/test/rt/java/TestParserErrors.java | 289 + .../antlr/v4/test/rt/java/TestParserExec.java | 378 ++ .../v4/test/rt/java/TestSemPredEvalLexer.java | 147 + .../test/rt/java/TestSemPredEvalParser.java | 426 ++ .../org/antlr/v4/test/rt/java/TestSets.java | 227 + .../test/org/antlr/v4/test/tool/BaseTest.java | 1414 +++++ .../org/antlr/v4/test/tool/ErrorQueue.java | 108 + .../org/antlr/v4/test/{ => tool}/Java-LR.g4 | 0 .../test/org/antlr/v4/test/{ => tool}/Java.g4 | 0 .../v4/test/tool/JavaUnicodeInputStream.java | 267 + .../tool/ParserInterpreterForTesting.java | 132 + .../test/{ => tool}/PositionAdjustingLexer.g4 | 0 tool/test/org/antlr/v4/test/{ => tool}/Psl.g4 | 0 .../v4/test/{ => tool}/TestASTStructure.gunit | 0 .../antlr/v4/test/tool/TestASTStructure.java | 406 ++ .../v4/test/tool/TestATNConstruction.java | 981 ++++ .../v4/test/tool/TestATNDeserialization.java | 189 + .../v4/test/tool/TestATNInterpreter.java | 409 ++ .../v4/test/tool/TestATNLexerInterpreter.java | 325 ++ .../v4/test/tool/TestATNParserPrediction.java | 531 ++ .../v4/test/tool/TestATNSerialization.java | 737 +++ .../v4/test/tool/TestActionSplitter.java | 82 + .../v4/test/tool/TestActionTranslation.java | 424 ++ .../v4/test/tool/TestAttributeChecks.java | 273 + .../v4/test/tool/TestBasicSemanticErrors.java | 117 + .../v4/test/tool/TestBufferedTokenStream.java | 180 + .../v4/test/tool/TestCodeGeneration.java | 162 + .../v4/test/tool/TestCommonTokenStream.java | 309 ++ .../v4/test/tool/TestCompositeGrammars.java | 820 +++ .../org/antlr/v4/test/tool/TestFastQueue.java | 134 + .../v4/test/tool/TestFullContextParsing.java | 356 ++ .../antlr/v4/test/tool/TestGraphNodes.java | 906 ++++ .../antlr/v4/test/tool/TestIntervalSet.java | 453 ++ .../antlr/v4/test/tool/TestLeftRecursion.java | 732 +++ .../antlr/v4/test/tool/TestLexerActions.java | 283 + .../antlr/v4/test/tool/TestLexerErrors.java | 213 + .../org/antlr/v4/test/tool/TestLexerExec.java | 690 +++ .../org/antlr/v4/test/tool/TestListeners.java | 226 + .../antlr/v4/test/tool/TestParseErrors.java | 376 ++ .../v4/test/tool/TestParseTreeMatcher.java | 464 ++ .../antlr/v4/test/tool/TestParseTrees.java | 154 + .../antlr/v4/test/tool/TestParserExec.java | 597 +++ .../v4/test/tool/TestParserInterpreter.java | 235 + .../v4/test/tool/TestParserProfiler.java | 280 + .../antlr/v4/test/tool/TestPerformance.java | 2031 ++++++++ .../antlr/v4/test/tool/TestScopeParsing.java | 68 + .../v4/test/tool/TestSemPredEvalLexer.java | 183 + .../v4/test/tool/TestSemPredEvalParser.java | 626 +++ .../test/org/antlr/v4/test/tool/TestSets.java | 283 + .../antlr/v4/test/tool/TestSymbolIssues.java | 171 + .../test/tool/TestTokenPositionOptions.java | 179 + .../v4/test/tool/TestTokenStreamRewriter.java | 884 ++++ .../v4/test/tool/TestTokenTypeAssignment.java | 214 + .../v4/test/tool/TestToolSyntaxErrors.java | 656 +++ .../v4/test/tool/TestTopologicalSort.java | 117 + .../test/tool/TestUnbufferedCharStream.java | 367 ++ .../test/tool/TestUnbufferedTokenStream.java | 223 + .../antlr/v4/test/tool/TestVocabulary.java | 79 + .../org/antlr/v4/test/tool/TestXPath.java | 228 + .../grammars/LexerExec/ActionPlacement.st | 8 - .../testgen/grammars/LexerExec/LargeLexer.st | 4002 -------------- .../grammars/LexerExec/NonGreedyConfigs.st | 4 - .../LexerExec/NonGreedyTermination.st | 2 - .../antlr/v4/{test => xtest}/BaseTest.java | 2 +- .../antlr/v4/{test => xtest}/ErrorQueue.java | 2 +- tool/test/org/antlr/v4/xtest/Java-LR.g4 | 1248 +++++ tool/test/org/antlr/v4/xtest/Java.g4 | 1332 +++++ .../JavaUnicodeInputStream.java | 2 +- .../ParserInterpreterForTesting.java | 2 +- .../antlr/v4/xtest/PositionAdjustingLexer.g4 | 141 + tool/test/org/antlr/v4/xtest/Psl.g4 | 348 ++ .../org/antlr/v4/xtest/TestASTStructure.gunit | 155 + .../v4/{test => xtest}/TestASTStructure.java | 2 +- .../{test => xtest}/TestATNConstruction.java | 2 +- .../TestATNDeserialization.java | 2 +- .../{test => xtest}/TestATNInterpreter.java | 2 +- .../TestATNLexerInterpreter.java | 2 +- .../TestATNParserPrediction.java | 2 +- .../{test => xtest}/TestATNSerialization.java | 2 +- .../{test => xtest}/TestActionSplitter.java | 2 +- .../TestActionTranslation.java | 2 +- .../{test => xtest}/TestAttributeChecks.java | 2 +- .../TestBasicSemanticErrors.java | 2 +- .../TestBufferedTokenStream.java | 2 +- .../{test => xtest}/TestCodeGeneration.java | 2 +- .../TestCommonTokenStream.java | 2 +- .../TestCompositeGrammars.java | 2 +- .../v4/{test => xtest}/TestFastQueue.java | 2 +- .../TestFullContextParsing.java | 2 +- .../v4/{test => xtest}/TestGraphNodes.java | 2 +- .../v4/{test => xtest}/TestIntervalSet.java | 2 +- .../v4/{test => xtest}/TestLeftRecursion.java | 2 +- .../v4/{test => xtest}/TestLexerActions.java | 2 +- .../v4/{test => xtest}/TestLexerErrors.java | 2 +- .../v4/{test => xtest}/TestLexerExec.java | 2 +- .../v4/{test => xtest}/TestListeners.java | 2 +- .../v4/{test => xtest}/TestParseErrors.java | 2 +- .../{test => xtest}/TestParseTreeMatcher.java | 2 +- .../v4/{test => xtest}/TestParseTrees.java | 2 +- .../v4/{test => xtest}/TestParserExec.java | 2 +- .../TestParserInterpreter.java | 2 +- .../{test => xtest}/TestParserProfiler.java | 2 +- .../v4/{test => xtest}/TestPerformance.java | 2 +- .../v4/{test => xtest}/TestScopeParsing.java | 2 +- .../{test => xtest}/TestSemPredEvalLexer.java | 2 +- .../TestSemPredEvalParser.java | 2 +- .../antlr/v4/{test => xtest}/TestSets.java | 2 +- .../v4/{test => xtest}/TestSymbolIssues.java | 2 +- .../TestTokenPositionOptions.java | 2 +- .../TestTokenStreamRewriter.java | 2 +- .../TestTokenTypeAssignment.java | 2 +- .../{test => xtest}/TestToolSyntaxErrors.java | 2 +- .../{test => xtest}/TestTopologicalSort.java | 2 +- .../TestUnbufferedCharStream.java | 2 +- .../TestUnbufferedTokenStream.java | 2 +- .../v4/{test => xtest}/TestVocabulary.java | 2 +- .../antlr/v4/{test => xtest}/TestXPath.java | 2 +- 351 files changed, 37416 insertions(+), 4092 deletions(-) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/AbstractParserTestMethod.java (88%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/CompositeLexerTestMethod.java (96%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/CompositeParserTestMethod.java (96%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/ConcreteParserTestMethod.java (94%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/Generator.java (97%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/Grammar.java (97%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/LexerTestMethod.java (92%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/ParserTestMethod.java (90%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/TestFile.java (99%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/TestMethod.java (96%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/BringInLiteralsFromDelegate.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/CombinedImportsCombined.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/CombinedImportsCombined_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatesSeeSameTokenType.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/ImportedRuleWithAction.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/ImportedRuleWithAction_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/KeywordVSIDOrder.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/CompositeParsers/KeywordVSIDOrder_S.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/FullContextParsing/AmbiguityNoLoop.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/FullContextParsing/CtxSensitiveDFA.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/FullContextParsing/ExprAmbiguity.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/FullContextParsing/LoopsSimulateTailRecursion.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/AmbigLR.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/Declarations.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/Expressions.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/JavaExpressions.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/LabelsOnOpSubrule.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/ReturnValueAndActions.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/SemPred.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/Simple.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/TernaryExpr.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LeftRecursion/WhitespaceInfluence.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/EnforcedGreedyNestedBrances.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/ErrorInMiddle.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/InvalidCharAtStart.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/InvalidCharInToken.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/LexerExecDFA.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerErrors/StringsEmbeddedInActions.st (100%) create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/ActionPlacement.st rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSetInSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSetNot.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSetPlus.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSetRange.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSetWithEscapedChar.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSetWithMissingEndRange.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSetWithMissingEscapeChar.st (100%) rename tool/test/org/antlr/v4/{testgen/grammars/LexerExec/CharSetWithQuote.st => test/rt/gen/grammars/LexerExec/CharSetWithQuote1.st} (100%) create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithQuote2.st rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/CharSetWithReversedRange.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/EOFByItself.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/EOFSuffixInFirstRule.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/GreedyClosure.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/GreedyConfigs.st (58%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/GreedyOptional.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/GreedyPositiveClosure.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/HexVsID.st (90%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/KeywordID.st (74%) create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/LargeLexer.st rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/NonGreedyClosure.st (100%) create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyConfigs.st rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/NonGreedyOptional.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/NonGreedyPositiveClosure.st (100%) create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyTermination1.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyTermination2.st rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/Parentheses.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/PositionAdjustingLexer.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/QuoteTranslation.st (100%) rename tool/test/org/antlr/v4/{testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.ST => test/rt/gen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.st} (76%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st (76%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/LexerExec/Slashes.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Listeners/Basic.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Listeners/LR.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Listeners/LRWithLabels.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Listeners/RuleGetters.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Listeners/TokenGetters.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParseTrees/2AltLoop.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParseTrees/ExtraToken.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParseTrees/NoViableAlt.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParseTrees/RuleRef.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParseTrees/Sync.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParseTrees/Token2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParseTrees/TokenAndRuleContextString.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParseTrees/test2Alts.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/ConjuringUpToken.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/ConjuringUpTokenFromSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/ContextListGetters.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/InvalidATNStateRemoval.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/InvalidEmptyInput.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/LL1ErrorInfo.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/LL2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/LL3.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/LLStar.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/NoViableAltAvoidance.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/SingleSetInsertion.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/SingleTokenDeletion.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/SingleTokenInsertion.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserErrors/TokenMismatch.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/APlus.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/AStar.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/AorAPlus.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/AorAStar.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/AorB.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/AorBPlus.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/AorBStar.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/Basic.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/IfIfElseGreedyBinding1.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/IfIfElseGreedyBinding2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/IfIfElseNonGreedyBinding1.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/IfIfElseNonGreedyBinding2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/LL1OptionalBlock.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/Labels.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/ListLabelForClosureContext.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/ListLabelsOnSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/Optional.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/PredicatedIfIfElse.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/PredictionIssue334.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/ParserExec/StartRuleWithoutEOF.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalLexer/DisableRule.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalLexer/EnumNotID.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalLexer/IDnotEnum.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalLexer/IDvsEnum.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalLexer/Indent.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalLexer/PredicatedKeywords.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/2UnpredicatedAlts.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/ActionHidesPreds.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/DisabledAlternative.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/Order.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/PredicateDependentOnArg.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/PredicateDependentOnArg2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/RewindBeforePredEval.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/Simple.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/SimpleValidate.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/SimpleValidate2.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/ToLeft.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/SemPredEvalParser/ValidateInDFA.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/CharSetLiteral.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/LexerOptionalSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/LexerPlusSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/LexerStarSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/NotChar.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/NotCharSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/NotCharSetWithLabel.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/NotCharSetWithRuleRef3.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/OptionalLexerSingleElement.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/OptionalSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/OptionalSingleElement.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/ParserNotSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/ParserNotToken.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/ParserNotTokenWithLabel.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/ParserSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/PlusLexerSingleElement.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/PlusSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/RuleAsSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/SeqDoesNotBecomeSet.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/StarLexerSingleElement.st (100%) rename tool/test/org/antlr/v4/{testgen => test/rt/gen}/grammars/Sets/StarSet.st (100%) create mode 100644 tool/test/org/antlr/v4/test/rt/java/BaseTest.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/Java.test.stg create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestListeners.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestParserExec.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalLexer.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java create mode 100644 tool/test/org/antlr/v4/test/rt/java/TestSets.java create mode 100644 tool/test/org/antlr/v4/test/tool/BaseTest.java create mode 100644 tool/test/org/antlr/v4/test/tool/ErrorQueue.java rename tool/test/org/antlr/v4/test/{ => tool}/Java-LR.g4 (100%) rename tool/test/org/antlr/v4/test/{ => tool}/Java.g4 (100%) create mode 100644 tool/test/org/antlr/v4/test/tool/JavaUnicodeInputStream.java create mode 100644 tool/test/org/antlr/v4/test/tool/ParserInterpreterForTesting.java rename tool/test/org/antlr/v4/test/{ => tool}/PositionAdjustingLexer.g4 (100%) rename tool/test/org/antlr/v4/test/{ => tool}/Psl.g4 (100%) rename tool/test/org/antlr/v4/test/{ => tool}/TestASTStructure.gunit (100%) create mode 100644 tool/test/org/antlr/v4/test/tool/TestASTStructure.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestATNConstruction.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestATNDeserialization.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestATNInterpreter.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestATNLexerInterpreter.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestATNParserPrediction.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestATNSerialization.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestActionSplitter.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestActionTranslation.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestAttributeChecks.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestBasicSemanticErrors.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestBufferedTokenStream.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestCodeGeneration.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestCommonTokenStream.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestCompositeGrammars.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestFastQueue.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestFullContextParsing.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestGraphNodes.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestIntervalSet.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestLeftRecursion.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestLexerActions.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestLexerErrors.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestLexerExec.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestListeners.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestParseErrors.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestParseTrees.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestParserExec.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestParserInterpreter.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestParserProfiler.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestPerformance.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestScopeParsing.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestSemPredEvalLexer.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestSemPredEvalParser.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestSets.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestSymbolIssues.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestTokenPositionOptions.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestTokenStreamRewriter.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestTokenTypeAssignment.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestTopologicalSort.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestVocabulary.java create mode 100644 tool/test/org/antlr/v4/test/tool/TestXPath.java delete mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/ActionPlacement.st delete mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/LargeLexer.st delete mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyConfigs.st delete mode 100644 tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyTermination.st rename tool/test/org/antlr/v4/{test => xtest}/BaseTest.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/ErrorQueue.java (99%) create mode 100644 tool/test/org/antlr/v4/xtest/Java-LR.g4 create mode 100644 tool/test/org/antlr/v4/xtest/Java.g4 rename tool/test/org/antlr/v4/{test => xtest}/JavaUnicodeInputStream.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/ParserInterpreterForTesting.java (99%) create mode 100644 tool/test/org/antlr/v4/xtest/PositionAdjustingLexer.g4 create mode 100644 tool/test/org/antlr/v4/xtest/Psl.g4 create mode 100644 tool/test/org/antlr/v4/xtest/TestASTStructure.gunit rename tool/test/org/antlr/v4/{test => xtest}/TestASTStructure.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestATNConstruction.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestATNDeserialization.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestATNInterpreter.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestATNLexerInterpreter.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestATNParserPrediction.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestATNSerialization.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestActionSplitter.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestActionTranslation.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestAttributeChecks.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestBasicSemanticErrors.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestBufferedTokenStream.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestCodeGeneration.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestCommonTokenStream.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestCompositeGrammars.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestFastQueue.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestFullContextParsing.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestGraphNodes.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestIntervalSet.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestLeftRecursion.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestLexerActions.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestLexerErrors.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestLexerExec.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestListeners.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestParseErrors.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestParseTreeMatcher.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestParseTrees.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestParserExec.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestParserInterpreter.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestParserProfiler.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestPerformance.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestScopeParsing.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestSemPredEvalLexer.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestSemPredEvalParser.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestSets.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestSymbolIssues.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestTokenPositionOptions.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestTokenStreamRewriter.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestTokenTypeAssignment.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestToolSyntaxErrors.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestTopologicalSort.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestUnbufferedCharStream.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestUnbufferedTokenStream.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestVocabulary.java (99%) rename tool/test/org/antlr/v4/{test => xtest}/TestXPath.java (99%) diff --git a/tool/test/org/antlr/v4/testgen/AbstractParserTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/AbstractParserTestMethod.java similarity index 88% rename from tool/test/org/antlr/v4/testgen/AbstractParserTestMethod.java rename to tool/test/org/antlr/v4/test/rt/gen/AbstractParserTestMethod.java index 1ad771843..f9318a62d 100644 --- a/tool/test/org/antlr/v4/testgen/AbstractParserTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/AbstractParserTestMethod.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; public class AbstractParserTestMethod extends TestMethod { diff --git a/tool/test/org/antlr/v4/testgen/CompositeLexerTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/CompositeLexerTestMethod.java similarity index 96% rename from tool/test/org/antlr/v4/testgen/CompositeLexerTestMethod.java rename to tool/test/org/antlr/v4/test/rt/gen/CompositeLexerTestMethod.java index 2c978c9b8..9073c4874 100644 --- a/tool/test/org/antlr/v4/testgen/CompositeLexerTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/CompositeLexerTestMethod.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; import java.io.File; diff --git a/tool/test/org/antlr/v4/testgen/CompositeParserTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/CompositeParserTestMethod.java similarity index 96% rename from tool/test/org/antlr/v4/testgen/CompositeParserTestMethod.java rename to tool/test/org/antlr/v4/test/rt/gen/CompositeParserTestMethod.java index 8264aba7f..139c95221 100644 --- a/tool/test/org/antlr/v4/testgen/CompositeParserTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/CompositeParserTestMethod.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; import java.io.File; diff --git a/tool/test/org/antlr/v4/testgen/ConcreteParserTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/ConcreteParserTestMethod.java similarity index 94% rename from tool/test/org/antlr/v4/testgen/ConcreteParserTestMethod.java rename to tool/test/org/antlr/v4/test/rt/gen/ConcreteParserTestMethod.java index 6d04c3385..c246e6c67 100644 --- a/tool/test/org/antlr/v4/testgen/ConcreteParserTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/ConcreteParserTestMethod.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; import java.io.File; diff --git a/tool/test/org/antlr/v4/testgen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java similarity index 97% rename from tool/test/org/antlr/v4/testgen/Generator.java rename to tool/test/org/antlr/v4/test/rt/gen/Generator.java index cf90e0ae7..72daec410 100644 --- a/tool/test/org/antlr/v4/testgen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -1,9 +1,10 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.OutputStream; +import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.Collection; @@ -11,6 +12,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.antlr.v4.test.rt.java.BaseTest; import org.stringtemplate.v4.ST; import org.stringtemplate.v4.STGroup; import org.stringtemplate.v4.STGroupFile; @@ -31,13 +33,22 @@ public class Generator { private static Map readConfigs() throws Exception { Map configs = new HashMap(); configs.put("Source", readGrammarDir()); // source of test templates + configs.put("Java", readJavaDir()); // generated Java tests configs.put("Firefox", readFirefoxDir()); // generated Firefox tests return configs; } + private static File readJavaDir() throws Exception { + String className = BaseTest.class.getName().replace(".", "/"); + className = className.substring(0, className.lastIndexOf("/") + 1); + URL url = ClassLoader.getSystemResource(className); + String uri = url.toURI().toString().replace("target/test-classes", "test"); + return new File(new URI(uri)); + } + private static File readFirefoxDir() { // TODO read from env variable - return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-javascript/tool/test/org/antlr/v4/js/test/firefox"); + return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-javascript/tool/test/org/antlr/v4/test/rt/js/firefox"); } private static File readGrammarDir() throws Exception { @@ -53,7 +64,7 @@ public class Generator { } public static String escape(String s) { - return s==null ? null : s.replace("\\","\\\\").replace("\n", "\\n").replace("\"","\\\""); + return s==null ? null : s.replace("\\","\\\\").replace("\r", "\\r").replace("\n", "\\n").replace("\"","\\\""); } String target; @@ -964,13 +975,13 @@ public class Generator { "[@0,0:4='-.-.-',<1>,1:0]\n" + "[@1,5:5='!',<3>,1:5]\n" + "[@2,6:5='',<-1>,1:6]\n", null); - file.addLexerTest(input, "NonGreedyTermination", "L", "\"hi\"\"mom\"", + file.addLexerTest(input, "NonGreedyTermination1", "L", "\"hi\"\"mom\"", "[@0,0:3='\"hi\"',<1>,1:0]\n" + "[@1,4:8='\"mom\"',<1>,1:4]\n" + - "[@2,9:8='',<-1>,1:9]\n", null, 1); - file.addLexerTest(input, "NonGreedyTermination", "L", "\"\"\"mom\"", + "[@2,9:8='',<-1>,1:9]\n", null); + file.addLexerTest(input, "NonGreedyTermination2", "L", "\"\"\"mom\"", "[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" + - "[@1,7:6='',<-1>,1:7]\n", null, 2); + "[@1,7:6='',<-1>,1:7]\n", null); file.addLexerTest(input, "GreedyOptional", "L", "//blah\n//blah\n", "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + "[@1,14:13='',<-1>,3:14]\n", null); @@ -1043,7 +1054,7 @@ public class Generator { "ab\n" + "[@0,0:1='ab',<1>,1:0]\n" + "[@1,2:1='',<-1>,1:2]\n", null); - file.addLexerTest(input, "NonGreedyConfigs", "L", "qb", + file.addLexerTest(input, "NonGreedyConfigs", "L", "ab", "a\n" + "b\n" + "[@0,0:0='a',<1>,1:0]\n" + @@ -1131,18 +1142,18 @@ public class Generator { "A\n" + "[@0,0:0='9',<1>,1:0]\n" + "[@1,1:0='',<-1>,1:1]\n", null); - file.addLexerTest(input, "CharSetWithQuote", "L", "b\"a", + file.addLexerTest(input, "CharSetWithQuote1", "L", "b\"a", "A\n" + "[@0,0:2='b\"a',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n", null, 1); - file.addLexerTest(input, "CharSetWithQuote", "L", "b\"\\a", + "[@1,3:2='',<-1>,1:3]\n", null); + file.addLexerTest(input, "CharSetWithQuote2", "L", "b\"\\a", "A\n" + "[@0,0:3='b\"\\a',<1>,1:0]\n" + - "[@1,4:3='',<-1>,1:4]\n", null, 2); + "[@1,4:3='',<-1>,1:4]\n", null); final int TOKENS = 4; final int LABEL = 5; final int IDENTIFIER = 6; - file.addLexerTest(input, "PositionAdjustingLexer", "L", + file.addLexerTest(input, "PositionAdjustingLexer", "PositionAdjustingLexer", "tokens\n" + "tokens {\n" + "notLabel\n" + diff --git a/tool/test/org/antlr/v4/testgen/Grammar.java b/tool/test/org/antlr/v4/test/rt/gen/Grammar.java similarity index 97% rename from tool/test/org/antlr/v4/testgen/Grammar.java rename to tool/test/org/antlr/v4/test/rt/gen/Grammar.java index 4b159cf78..433dfd630 100644 --- a/tool/test/org/antlr/v4/testgen/Grammar.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Grammar.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; import java.io.File; import java.io.FileInputStream; diff --git a/tool/test/org/antlr/v4/testgen/LexerTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java similarity index 92% rename from tool/test/org/antlr/v4/testgen/LexerTestMethod.java rename to tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java index 42918df7e..f4b027ca7 100644 --- a/tool/test/org/antlr/v4/testgen/LexerTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; public class LexerTestMethod extends TestMethod { diff --git a/tool/test/org/antlr/v4/testgen/ParserTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/ParserTestMethod.java similarity index 90% rename from tool/test/org/antlr/v4/testgen/ParserTestMethod.java rename to tool/test/org/antlr/v4/test/rt/gen/ParserTestMethod.java index d3ee5fd07..6592b471c 100644 --- a/tool/test/org/antlr/v4/testgen/ParserTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/ParserTestMethod.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; public class ParserTestMethod extends TestMethod { diff --git a/tool/test/org/antlr/v4/testgen/TestFile.java b/tool/test/org/antlr/v4/test/rt/gen/TestFile.java similarity index 99% rename from tool/test/org/antlr/v4/testgen/TestFile.java rename to tool/test/org/antlr/v4/test/rt/gen/TestFile.java index 2143a753c..d61ddc365 100644 --- a/tool/test/org/antlr/v4/testgen/TestFile.java +++ b/tool/test/org/antlr/v4/test/rt/gen/TestFile.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; import java.io.File; import java.util.ArrayList; diff --git a/tool/test/org/antlr/v4/testgen/TestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/TestMethod.java similarity index 96% rename from tool/test/org/antlr/v4/testgen/TestMethod.java rename to tool/test/org/antlr/v4/test/rt/gen/TestMethod.java index ef764fe2c..7fdac4075 100644 --- a/tool/test/org/antlr/v4/testgen/TestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/TestMethod.java @@ -1,4 +1,4 @@ -package org.antlr.v4.testgen; +package org.antlr.v4.test.rt.gen; import java.io.File; diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/BringInLiteralsFromDelegate.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/BringInLiteralsFromDelegate.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/BringInLiteralsFromDelegate_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/CombinedImportsCombined.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/CombinedImportsCombined.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/CombinedImportsCombined_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/CombinedImportsCombined_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/CombinedImportsCombined_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorAccessesDelegateMembers_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRule.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithArgs_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRule_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesFirstVersionOfDelegateRule_T.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegate_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportLexerWithOnlyFragmentRules_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedGrammarWithEmptyOptions_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedRuleWithAction.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedRuleWithAction.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedRuleWithAction_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/ImportedRuleWithAction_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedRuleWithAction_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/KeywordVSIDOrder.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/KeywordVSIDOrder.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/KeywordVSIDOrder_S.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/CompositeParsers/KeywordVSIDOrder_S.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/KeywordVSIDOrder_S.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/AmbigYieldsCtxSensitiveDFA.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbiguityNoLoop.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/AmbiguityNoLoop.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/AmbiguityNoLoop.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/AmbiguityNoLoop.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/CtxSensitiveDFA.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFA.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/CtxSensitiveDFA.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/CtxSensitiveDFATwoDiffInput.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/ExprAmbiguity.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/ExprAmbiguity.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/ExprAmbiguity.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/ExprAmbiguity.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/LoopsSimulateTailRecursion.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/LoopsSimulateTailRecursion.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/LoopsSimulateTailRecursion.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/LoopsSimulateTailRecursion.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/SLLSeesEOFInLLGrammar.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/AmbigLR.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/AmbigLR.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/AmbigLR.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/AmbigLR.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Declarations.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/Declarations.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Declarations.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/Declarations.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/DirectCallToLeftRecursiveRule.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Expressions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/Expressions.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Expressions.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/Expressions.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/JavaExpressions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/JavaExpressions.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/JavaExpressions.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/JavaExpressions.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/LabelsOnOpSubrule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/LabelsOnOpSubrule.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/LabelsOnOpSubrule.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/LabelsOnOpSubrule.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/PrecedenceFilterConsidersContext.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActions.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActions.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActions.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsAndLabels.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPred.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/SemPred.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPred.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Simple.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/Simple.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/Simple.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/Simple.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/TernaryExpr.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/TernaryExpr.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/TernaryExpr.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/TernaryExpr.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/WhitespaceInfluence.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/WhitespaceInfluence.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LeftRecursion/WhitespaceInfluence.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/WhitespaceInfluence.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/DFAToATNThatFailsBackToDFA.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/DFAToATNThatMatchesThenFailsInATN.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/EnforcedGreedyNestedBrances.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/EnforcedGreedyNestedBrances.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/EnforcedGreedyNestedBrances.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/EnforcedGreedyNestedBrances.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/ErrorInMiddle.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/ErrorInMiddle.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/ErrorInMiddle.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/ErrorInMiddle.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStart.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/InvalidCharAtStart.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStart.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/InvalidCharAtStart.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/InvalidCharAtStartAfterDFACache.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInToken.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/InvalidCharInToken.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInToken.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/InvalidCharInToken.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/InvalidCharInTokenAfterDFACache.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/LexerExecDFA.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/LexerExecDFA.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/LexerExecDFA.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/LexerExecDFA.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerErrors/StringsEmbeddedInActions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/StringsEmbeddedInActions.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerErrors/StringsEmbeddedInActions.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/StringsEmbeddedInActions.st diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/ActionPlacement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/ActionPlacement.st new file mode 100644 index 000000000..18bffeafb --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/ActionPlacement.st @@ -0,0 +1,8 @@ +lexer grammar ; +I : ({} 'a' +| {} + 'a' {} + 'b' {}) + {} ; +WS : (' '|'\n') -> skip ; +J : .; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetInSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetInSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetInSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetInSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetNot.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetNot.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetNot.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetNot.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetPlus.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetPlus.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetPlus.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetPlus.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetRange.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetRange.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetRange.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetRange.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithEscapedChar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithEscapedChar.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithEscapedChar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithEscapedChar.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEndRange.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithMissingEndRange.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEndRange.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithMissingEndRange.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEscapeChar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithMissingEscapeChar.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithMissingEscapeChar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithMissingEscapeChar.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithQuote.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithQuote1.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithQuote.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithQuote1.st diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithQuote2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithQuote2.st new file mode 100644 index 000000000..01df4d68f --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithQuote2.st @@ -0,0 +1,3 @@ +lexer grammar ; +A : ["\\ab]+ {} ; +WS : [ \n\t]+ -> skip ; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithReversedRange.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithReversedRange.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/CharSetWithReversedRange.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/CharSetWithReversedRange.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFByItself.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/EOFByItself.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFByItself.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/EOFByItself.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFSuffixInFirstRule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/EOFSuffixInFirstRule.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/EOFSuffixInFirstRule.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/EOFSuffixInFirstRule.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyClosure.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyClosure.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyClosure.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyClosure.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyConfigs.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyConfigs.st similarity index 58% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyConfigs.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyConfigs.st index 1cc91df27..0c009d2ec 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyConfigs.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyConfigs.st @@ -1,4 +1,4 @@ lexer grammar ; -I : ('a' | 'ab') {} ; +I : ('a' | 'ab') {} ; WS : (' '|'\n') -> skip ; J : .; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyOptional.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyOptional.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyOptional.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyOptional.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyPositiveClosure.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyPositiveClosure.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/GreedyPositiveClosure.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/GreedyPositiveClosure.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/HexVsID.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/HexVsID.st similarity index 90% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/HexVsID.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/HexVsID.st index 028ebabb4..1e0f9ec0e 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/HexVsID.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/HexVsID.st @@ -5,4 +5,4 @@ FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ; DOT : '.' ; ID : 'a'..'z'+ ; fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ; -WS : (' '|'\n') -> skip ; +WS : (' '|'\n')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/KeywordID.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/KeywordID.st similarity index 74% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/KeywordID.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/KeywordID.st index 6c4987470..f98be3d02 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/KeywordID.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/KeywordID.st @@ -1,4 +1,4 @@ lexer grammar ; KEND : 'end' ; // has priority ID : 'a'..'z'+ ; -WS : (' '|'\n') -> skip ; +WS : (' '|'\n')+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/LargeLexer.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/LargeLexer.st new file mode 100644 index 000000000..6ff2a9a58 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/LargeLexer.st @@ -0,0 +1,4002 @@ +lexer grammar L; +WS : [ \t\r\n]+ -> skip; +KW0 : 'KW' '0'; +KW1 : 'KW' '1'; +KW2 : 'KW' '2'; +KW3 : 'KW' '3'; +KW4 : 'KW' '4'; +KW5 : 'KW' '5'; +KW6 : 'KW' '6'; +KW7 : 'KW' '7'; +KW8 : 'KW' '8'; +KW9 : 'KW' '9'; +KW10 : 'KW' '10'; +KW11 : 'KW' '11'; +KW12 : 'KW' '12'; +KW13 : 'KW' '13'; +KW14 : 'KW' '14'; +KW15 : 'KW' '15'; +KW16 : 'KW' '16'; +KW17 : 'KW' '17'; +KW18 : 'KW' '18'; +KW19 : 'KW' '19'; +KW20 : 'KW' '20'; +KW21 : 'KW' '21'; +KW22 : 'KW' '22'; +KW23 : 'KW' '23'; +KW24 : 'KW' '24'; +KW25 : 'KW' '25'; +KW26 : 'KW' '26'; +KW27 : 'KW' '27'; +KW28 : 'KW' '28'; +KW29 : 'KW' '29'; +KW30 : 'KW' '30'; +KW31 : 'KW' '31'; +KW32 : 'KW' '32'; +KW33 : 'KW' '33'; +KW34 : 'KW' '34'; +KW35 : 'KW' '35'; +KW36 : 'KW' '36'; +KW37 : 'KW' '37'; +KW38 : 'KW' '38'; +KW39 : 'KW' '39'; +KW40 : 'KW' '40'; +KW41 : 'KW' '41'; +KW42 : 'KW' '42'; +KW43 : 'KW' '43'; +KW44 : 'KW' '44'; +KW45 : 'KW' '45'; +KW46 : 'KW' '46'; +KW47 : 'KW' '47'; +KW48 : 'KW' '48'; +KW49 : 'KW' '49'; +KW50 : 'KW' '50'; +KW51 : 'KW' '51'; +KW52 : 'KW' '52'; +KW53 : 'KW' '53'; +KW54 : 'KW' '54'; +KW55 : 'KW' '55'; +KW56 : 'KW' '56'; +KW57 : 'KW' '57'; +KW58 : 'KW' '58'; +KW59 : 'KW' '59'; +KW60 : 'KW' '60'; +KW61 : 'KW' '61'; +KW62 : 'KW' '62'; +KW63 : 'KW' '63'; +KW64 : 'KW' '64'; +KW65 : 'KW' '65'; +KW66 : 'KW' '66'; +KW67 : 'KW' '67'; +KW68 : 'KW' '68'; +KW69 : 'KW' '69'; +KW70 : 'KW' '70'; +KW71 : 'KW' '71'; +KW72 : 'KW' '72'; +KW73 : 'KW' '73'; +KW74 : 'KW' '74'; +KW75 : 'KW' '75'; +KW76 : 'KW' '76'; +KW77 : 'KW' '77'; +KW78 : 'KW' '78'; +KW79 : 'KW' '79'; +KW80 : 'KW' '80'; +KW81 : 'KW' '81'; +KW82 : 'KW' '82'; +KW83 : 'KW' '83'; +KW84 : 'KW' '84'; +KW85 : 'KW' '85'; +KW86 : 'KW' '86'; +KW87 : 'KW' '87'; +KW88 : 'KW' '88'; +KW89 : 'KW' '89'; +KW90 : 'KW' '90'; +KW91 : 'KW' '91'; +KW92 : 'KW' '92'; +KW93 : 'KW' '93'; +KW94 : 'KW' '94'; +KW95 : 'KW' '95'; +KW96 : 'KW' '96'; +KW97 : 'KW' '97'; +KW98 : 'KW' '98'; +KW99 : 'KW' '99'; +KW100 : 'KW' '100'; +KW101 : 'KW' '101'; +KW102 : 'KW' '102'; +KW103 : 'KW' '103'; +KW104 : 'KW' '104'; +KW105 : 'KW' '105'; +KW106 : 'KW' '106'; +KW107 : 'KW' '107'; +KW108 : 'KW' '108'; +KW109 : 'KW' '109'; +KW110 : 'KW' '110'; +KW111 : 'KW' '111'; +KW112 : 'KW' '112'; +KW113 : 'KW' '113'; +KW114 : 'KW' '114'; +KW115 : 'KW' '115'; +KW116 : 'KW' '116'; +KW117 : 'KW' '117'; +KW118 : 'KW' '118'; +KW119 : 'KW' '119'; +KW120 : 'KW' '120'; +KW121 : 'KW' '121'; +KW122 : 'KW' '122'; +KW123 : 'KW' '123'; +KW124 : 'KW' '124'; +KW125 : 'KW' '125'; +KW126 : 'KW' '126'; +KW127 : 'KW' '127'; +KW128 : 'KW' '128'; +KW129 : 'KW' '129'; +KW130 : 'KW' '130'; +KW131 : 'KW' '131'; +KW132 : 'KW' '132'; +KW133 : 'KW' '133'; +KW134 : 'KW' '134'; +KW135 : 'KW' '135'; +KW136 : 'KW' '136'; +KW137 : 'KW' '137'; +KW138 : 'KW' '138'; +KW139 : 'KW' '139'; +KW140 : 'KW' '140'; +KW141 : 'KW' '141'; +KW142 : 'KW' '142'; +KW143 : 'KW' '143'; +KW144 : 'KW' '144'; +KW145 : 'KW' '145'; +KW146 : 'KW' '146'; +KW147 : 'KW' '147'; +KW148 : 'KW' '148'; +KW149 : 'KW' '149'; +KW150 : 'KW' '150'; +KW151 : 'KW' '151'; +KW152 : 'KW' '152'; +KW153 : 'KW' '153'; +KW154 : 'KW' '154'; +KW155 : 'KW' '155'; +KW156 : 'KW' '156'; +KW157 : 'KW' '157'; +KW158 : 'KW' '158'; +KW159 : 'KW' '159'; +KW160 : 'KW' '160'; +KW161 : 'KW' '161'; +KW162 : 'KW' '162'; +KW163 : 'KW' '163'; +KW164 : 'KW' '164'; +KW165 : 'KW' '165'; +KW166 : 'KW' '166'; +KW167 : 'KW' '167'; +KW168 : 'KW' '168'; +KW169 : 'KW' '169'; +KW170 : 'KW' '170'; +KW171 : 'KW' '171'; +KW172 : 'KW' '172'; +KW173 : 'KW' '173'; +KW174 : 'KW' '174'; +KW175 : 'KW' '175'; +KW176 : 'KW' '176'; +KW177 : 'KW' '177'; +KW178 : 'KW' '178'; +KW179 : 'KW' '179'; +KW180 : 'KW' '180'; +KW181 : 'KW' '181'; +KW182 : 'KW' '182'; +KW183 : 'KW' '183'; +KW184 : 'KW' '184'; +KW185 : 'KW' '185'; +KW186 : 'KW' '186'; +KW187 : 'KW' '187'; +KW188 : 'KW' '188'; +KW189 : 'KW' '189'; +KW190 : 'KW' '190'; +KW191 : 'KW' '191'; +KW192 : 'KW' '192'; +KW193 : 'KW' '193'; +KW194 : 'KW' '194'; +KW195 : 'KW' '195'; +KW196 : 'KW' '196'; +KW197 : 'KW' '197'; +KW198 : 'KW' '198'; +KW199 : 'KW' '199'; +KW200 : 'KW' '200'; +KW201 : 'KW' '201'; +KW202 : 'KW' '202'; +KW203 : 'KW' '203'; +KW204 : 'KW' '204'; +KW205 : 'KW' '205'; +KW206 : 'KW' '206'; +KW207 : 'KW' '207'; +KW208 : 'KW' '208'; +KW209 : 'KW' '209'; +KW210 : 'KW' '210'; +KW211 : 'KW' '211'; +KW212 : 'KW' '212'; +KW213 : 'KW' '213'; +KW214 : 'KW' '214'; +KW215 : 'KW' '215'; +KW216 : 'KW' '216'; +KW217 : 'KW' '217'; +KW218 : 'KW' '218'; +KW219 : 'KW' '219'; +KW220 : 'KW' '220'; +KW221 : 'KW' '221'; +KW222 : 'KW' '222'; +KW223 : 'KW' '223'; +KW224 : 'KW' '224'; +KW225 : 'KW' '225'; +KW226 : 'KW' '226'; +KW227 : 'KW' '227'; +KW228 : 'KW' '228'; +KW229 : 'KW' '229'; +KW230 : 'KW' '230'; +KW231 : 'KW' '231'; +KW232 : 'KW' '232'; +KW233 : 'KW' '233'; +KW234 : 'KW' '234'; +KW235 : 'KW' '235'; +KW236 : 'KW' '236'; +KW237 : 'KW' '237'; +KW238 : 'KW' '238'; +KW239 : 'KW' '239'; +KW240 : 'KW' '240'; +KW241 : 'KW' '241'; +KW242 : 'KW' '242'; +KW243 : 'KW' '243'; +KW244 : 'KW' '244'; +KW245 : 'KW' '245'; +KW246 : 'KW' '246'; +KW247 : 'KW' '247'; +KW248 : 'KW' '248'; +KW249 : 'KW' '249'; +KW250 : 'KW' '250'; +KW251 : 'KW' '251'; +KW252 : 'KW' '252'; +KW253 : 'KW' '253'; +KW254 : 'KW' '254'; +KW255 : 'KW' '255'; +KW256 : 'KW' '256'; +KW257 : 'KW' '257'; +KW258 : 'KW' '258'; +KW259 : 'KW' '259'; +KW260 : 'KW' '260'; +KW261 : 'KW' '261'; +KW262 : 'KW' '262'; +KW263 : 'KW' '263'; +KW264 : 'KW' '264'; +KW265 : 'KW' '265'; +KW266 : 'KW' '266'; +KW267 : 'KW' '267'; +KW268 : 'KW' '268'; +KW269 : 'KW' '269'; +KW270 : 'KW' '270'; +KW271 : 'KW' '271'; +KW272 : 'KW' '272'; +KW273 : 'KW' '273'; +KW274 : 'KW' '274'; +KW275 : 'KW' '275'; +KW276 : 'KW' '276'; +KW277 : 'KW' '277'; +KW278 : 'KW' '278'; +KW279 : 'KW' '279'; +KW280 : 'KW' '280'; +KW281 : 'KW' '281'; +KW282 : 'KW' '282'; +KW283 : 'KW' '283'; +KW284 : 'KW' '284'; +KW285 : 'KW' '285'; +KW286 : 'KW' '286'; +KW287 : 'KW' '287'; +KW288 : 'KW' '288'; +KW289 : 'KW' '289'; +KW290 : 'KW' '290'; +KW291 : 'KW' '291'; +KW292 : 'KW' '292'; +KW293 : 'KW' '293'; +KW294 : 'KW' '294'; +KW295 : 'KW' '295'; +KW296 : 'KW' '296'; +KW297 : 'KW' '297'; +KW298 : 'KW' '298'; +KW299 : 'KW' '299'; +KW300 : 'KW' '300'; +KW301 : 'KW' '301'; +KW302 : 'KW' '302'; +KW303 : 'KW' '303'; +KW304 : 'KW' '304'; +KW305 : 'KW' '305'; +KW306 : 'KW' '306'; +KW307 : 'KW' '307'; +KW308 : 'KW' '308'; +KW309 : 'KW' '309'; +KW310 : 'KW' '310'; +KW311 : 'KW' '311'; +KW312 : 'KW' '312'; +KW313 : 'KW' '313'; +KW314 : 'KW' '314'; +KW315 : 'KW' '315'; +KW316 : 'KW' '316'; +KW317 : 'KW' '317'; +KW318 : 'KW' '318'; +KW319 : 'KW' '319'; +KW320 : 'KW' '320'; +KW321 : 'KW' '321'; +KW322 : 'KW' '322'; +KW323 : 'KW' '323'; +KW324 : 'KW' '324'; +KW325 : 'KW' '325'; +KW326 : 'KW' '326'; +KW327 : 'KW' '327'; +KW328 : 'KW' '328'; +KW329 : 'KW' '329'; +KW330 : 'KW' '330'; +KW331 : 'KW' '331'; +KW332 : 'KW' '332'; +KW333 : 'KW' '333'; +KW334 : 'KW' '334'; +KW335 : 'KW' '335'; +KW336 : 'KW' '336'; +KW337 : 'KW' '337'; +KW338 : 'KW' '338'; +KW339 : 'KW' '339'; +KW340 : 'KW' '340'; +KW341 : 'KW' '341'; +KW342 : 'KW' '342'; +KW343 : 'KW' '343'; +KW344 : 'KW' '344'; +KW345 : 'KW' '345'; +KW346 : 'KW' '346'; +KW347 : 'KW' '347'; +KW348 : 'KW' '348'; +KW349 : 'KW' '349'; +KW350 : 'KW' '350'; +KW351 : 'KW' '351'; +KW352 : 'KW' '352'; +KW353 : 'KW' '353'; +KW354 : 'KW' '354'; +KW355 : 'KW' '355'; +KW356 : 'KW' '356'; +KW357 : 'KW' '357'; +KW358 : 'KW' '358'; +KW359 : 'KW' '359'; +KW360 : 'KW' '360'; +KW361 : 'KW' '361'; +KW362 : 'KW' '362'; +KW363 : 'KW' '363'; +KW364 : 'KW' '364'; +KW365 : 'KW' '365'; +KW366 : 'KW' '366'; +KW367 : 'KW' '367'; +KW368 : 'KW' '368'; +KW369 : 'KW' '369'; +KW370 : 'KW' '370'; +KW371 : 'KW' '371'; +KW372 : 'KW' '372'; +KW373 : 'KW' '373'; +KW374 : 'KW' '374'; +KW375 : 'KW' '375'; +KW376 : 'KW' '376'; +KW377 : 'KW' '377'; +KW378 : 'KW' '378'; +KW379 : 'KW' '379'; +KW380 : 'KW' '380'; +KW381 : 'KW' '381'; +KW382 : 'KW' '382'; +KW383 : 'KW' '383'; +KW384 : 'KW' '384'; +KW385 : 'KW' '385'; +KW386 : 'KW' '386'; +KW387 : 'KW' '387'; +KW388 : 'KW' '388'; +KW389 : 'KW' '389'; +KW390 : 'KW' '390'; +KW391 : 'KW' '391'; +KW392 : 'KW' '392'; +KW393 : 'KW' '393'; +KW394 : 'KW' '394'; +KW395 : 'KW' '395'; +KW396 : 'KW' '396'; +KW397 : 'KW' '397'; +KW398 : 'KW' '398'; +KW399 : 'KW' '399'; +KW400 : 'KW' '400'; +KW401 : 'KW' '401'; +KW402 : 'KW' '402'; +KW403 : 'KW' '403'; +KW404 : 'KW' '404'; +KW405 : 'KW' '405'; +KW406 : 'KW' '406'; +KW407 : 'KW' '407'; +KW408 : 'KW' '408'; +KW409 : 'KW' '409'; +KW410 : 'KW' '410'; +KW411 : 'KW' '411'; +KW412 : 'KW' '412'; +KW413 : 'KW' '413'; +KW414 : 'KW' '414'; +KW415 : 'KW' '415'; +KW416 : 'KW' '416'; +KW417 : 'KW' '417'; +KW418 : 'KW' '418'; +KW419 : 'KW' '419'; +KW420 : 'KW' '420'; +KW421 : 'KW' '421'; +KW422 : 'KW' '422'; +KW423 : 'KW' '423'; +KW424 : 'KW' '424'; +KW425 : 'KW' '425'; +KW426 : 'KW' '426'; +KW427 : 'KW' '427'; +KW428 : 'KW' '428'; +KW429 : 'KW' '429'; +KW430 : 'KW' '430'; +KW431 : 'KW' '431'; +KW432 : 'KW' '432'; +KW433 : 'KW' '433'; +KW434 : 'KW' '434'; +KW435 : 'KW' '435'; +KW436 : 'KW' '436'; +KW437 : 'KW' '437'; +KW438 : 'KW' '438'; +KW439 : 'KW' '439'; +KW440 : 'KW' '440'; +KW441 : 'KW' '441'; +KW442 : 'KW' '442'; +KW443 : 'KW' '443'; +KW444 : 'KW' '444'; +KW445 : 'KW' '445'; +KW446 : 'KW' '446'; +KW447 : 'KW' '447'; +KW448 : 'KW' '448'; +KW449 : 'KW' '449'; +KW450 : 'KW' '450'; +KW451 : 'KW' '451'; +KW452 : 'KW' '452'; +KW453 : 'KW' '453'; +KW454 : 'KW' '454'; +KW455 : 'KW' '455'; +KW456 : 'KW' '456'; +KW457 : 'KW' '457'; +KW458 : 'KW' '458'; +KW459 : 'KW' '459'; +KW460 : 'KW' '460'; +KW461 : 'KW' '461'; +KW462 : 'KW' '462'; +KW463 : 'KW' '463'; +KW464 : 'KW' '464'; +KW465 : 'KW' '465'; +KW466 : 'KW' '466'; +KW467 : 'KW' '467'; +KW468 : 'KW' '468'; +KW469 : 'KW' '469'; +KW470 : 'KW' '470'; +KW471 : 'KW' '471'; +KW472 : 'KW' '472'; +KW473 : 'KW' '473'; +KW474 : 'KW' '474'; +KW475 : 'KW' '475'; +KW476 : 'KW' '476'; +KW477 : 'KW' '477'; +KW478 : 'KW' '478'; +KW479 : 'KW' '479'; +KW480 : 'KW' '480'; +KW481 : 'KW' '481'; +KW482 : 'KW' '482'; +KW483 : 'KW' '483'; +KW484 : 'KW' '484'; +KW485 : 'KW' '485'; +KW486 : 'KW' '486'; +KW487 : 'KW' '487'; +KW488 : 'KW' '488'; +KW489 : 'KW' '489'; +KW490 : 'KW' '490'; +KW491 : 'KW' '491'; +KW492 : 'KW' '492'; +KW493 : 'KW' '493'; +KW494 : 'KW' '494'; +KW495 : 'KW' '495'; +KW496 : 'KW' '496'; +KW497 : 'KW' '497'; +KW498 : 'KW' '498'; +KW499 : 'KW' '499'; +KW500 : 'KW' '500'; +KW501 : 'KW' '501'; +KW502 : 'KW' '502'; +KW503 : 'KW' '503'; +KW504 : 'KW' '504'; +KW505 : 'KW' '505'; +KW506 : 'KW' '506'; +KW507 : 'KW' '507'; +KW508 : 'KW' '508'; +KW509 : 'KW' '509'; +KW510 : 'KW' '510'; +KW511 : 'KW' '511'; +KW512 : 'KW' '512'; +KW513 : 'KW' '513'; +KW514 : 'KW' '514'; +KW515 : 'KW' '515'; +KW516 : 'KW' '516'; +KW517 : 'KW' '517'; +KW518 : 'KW' '518'; +KW519 : 'KW' '519'; +KW520 : 'KW' '520'; +KW521 : 'KW' '521'; +KW522 : 'KW' '522'; +KW523 : 'KW' '523'; +KW524 : 'KW' '524'; +KW525 : 'KW' '525'; +KW526 : 'KW' '526'; +KW527 : 'KW' '527'; +KW528 : 'KW' '528'; +KW529 : 'KW' '529'; +KW530 : 'KW' '530'; +KW531 : 'KW' '531'; +KW532 : 'KW' '532'; +KW533 : 'KW' '533'; +KW534 : 'KW' '534'; +KW535 : 'KW' '535'; +KW536 : 'KW' '536'; +KW537 : 'KW' '537'; +KW538 : 'KW' '538'; +KW539 : 'KW' '539'; +KW540 : 'KW' '540'; +KW541 : 'KW' '541'; +KW542 : 'KW' '542'; +KW543 : 'KW' '543'; +KW544 : 'KW' '544'; +KW545 : 'KW' '545'; +KW546 : 'KW' '546'; +KW547 : 'KW' '547'; +KW548 : 'KW' '548'; +KW549 : 'KW' '549'; +KW550 : 'KW' '550'; +KW551 : 'KW' '551'; +KW552 : 'KW' '552'; +KW553 : 'KW' '553'; +KW554 : 'KW' '554'; +KW555 : 'KW' '555'; +KW556 : 'KW' '556'; +KW557 : 'KW' '557'; +KW558 : 'KW' '558'; +KW559 : 'KW' '559'; +KW560 : 'KW' '560'; +KW561 : 'KW' '561'; +KW562 : 'KW' '562'; +KW563 : 'KW' '563'; +KW564 : 'KW' '564'; +KW565 : 'KW' '565'; +KW566 : 'KW' '566'; +KW567 : 'KW' '567'; +KW568 : 'KW' '568'; +KW569 : 'KW' '569'; +KW570 : 'KW' '570'; +KW571 : 'KW' '571'; +KW572 : 'KW' '572'; +KW573 : 'KW' '573'; +KW574 : 'KW' '574'; +KW575 : 'KW' '575'; +KW576 : 'KW' '576'; +KW577 : 'KW' '577'; +KW578 : 'KW' '578'; +KW579 : 'KW' '579'; +KW580 : 'KW' '580'; +KW581 : 'KW' '581'; +KW582 : 'KW' '582'; +KW583 : 'KW' '583'; +KW584 : 'KW' '584'; +KW585 : 'KW' '585'; +KW586 : 'KW' '586'; +KW587 : 'KW' '587'; +KW588 : 'KW' '588'; +KW589 : 'KW' '589'; +KW590 : 'KW' '590'; +KW591 : 'KW' '591'; +KW592 : 'KW' '592'; +KW593 : 'KW' '593'; +KW594 : 'KW' '594'; +KW595 : 'KW' '595'; +KW596 : 'KW' '596'; +KW597 : 'KW' '597'; +KW598 : 'KW' '598'; +KW599 : 'KW' '599'; +KW600 : 'KW' '600'; +KW601 : 'KW' '601'; +KW602 : 'KW' '602'; +KW603 : 'KW' '603'; +KW604 : 'KW' '604'; +KW605 : 'KW' '605'; +KW606 : 'KW' '606'; +KW607 : 'KW' '607'; +KW608 : 'KW' '608'; +KW609 : 'KW' '609'; +KW610 : 'KW' '610'; +KW611 : 'KW' '611'; +KW612 : 'KW' '612'; +KW613 : 'KW' '613'; +KW614 : 'KW' '614'; +KW615 : 'KW' '615'; +KW616 : 'KW' '616'; +KW617 : 'KW' '617'; +KW618 : 'KW' '618'; +KW619 : 'KW' '619'; +KW620 : 'KW' '620'; +KW621 : 'KW' '621'; +KW622 : 'KW' '622'; +KW623 : 'KW' '623'; +KW624 : 'KW' '624'; +KW625 : 'KW' '625'; +KW626 : 'KW' '626'; +KW627 : 'KW' '627'; +KW628 : 'KW' '628'; +KW629 : 'KW' '629'; +KW630 : 'KW' '630'; +KW631 : 'KW' '631'; +KW632 : 'KW' '632'; +KW633 : 'KW' '633'; +KW634 : 'KW' '634'; +KW635 : 'KW' '635'; +KW636 : 'KW' '636'; +KW637 : 'KW' '637'; +KW638 : 'KW' '638'; +KW639 : 'KW' '639'; +KW640 : 'KW' '640'; +KW641 : 'KW' '641'; +KW642 : 'KW' '642'; +KW643 : 'KW' '643'; +KW644 : 'KW' '644'; +KW645 : 'KW' '645'; +KW646 : 'KW' '646'; +KW647 : 'KW' '647'; +KW648 : 'KW' '648'; +KW649 : 'KW' '649'; +KW650 : 'KW' '650'; +KW651 : 'KW' '651'; +KW652 : 'KW' '652'; +KW653 : 'KW' '653'; +KW654 : 'KW' '654'; +KW655 : 'KW' '655'; +KW656 : 'KW' '656'; +KW657 : 'KW' '657'; +KW658 : 'KW' '658'; +KW659 : 'KW' '659'; +KW660 : 'KW' '660'; +KW661 : 'KW' '661'; +KW662 : 'KW' '662'; +KW663 : 'KW' '663'; +KW664 : 'KW' '664'; +KW665 : 'KW' '665'; +KW666 : 'KW' '666'; +KW667 : 'KW' '667'; +KW668 : 'KW' '668'; +KW669 : 'KW' '669'; +KW670 : 'KW' '670'; +KW671 : 'KW' '671'; +KW672 : 'KW' '672'; +KW673 : 'KW' '673'; +KW674 : 'KW' '674'; +KW675 : 'KW' '675'; +KW676 : 'KW' '676'; +KW677 : 'KW' '677'; +KW678 : 'KW' '678'; +KW679 : 'KW' '679'; +KW680 : 'KW' '680'; +KW681 : 'KW' '681'; +KW682 : 'KW' '682'; +KW683 : 'KW' '683'; +KW684 : 'KW' '684'; +KW685 : 'KW' '685'; +KW686 : 'KW' '686'; +KW687 : 'KW' '687'; +KW688 : 'KW' '688'; +KW689 : 'KW' '689'; +KW690 : 'KW' '690'; +KW691 : 'KW' '691'; +KW692 : 'KW' '692'; +KW693 : 'KW' '693'; +KW694 : 'KW' '694'; +KW695 : 'KW' '695'; +KW696 : 'KW' '696'; +KW697 : 'KW' '697'; +KW698 : 'KW' '698'; +KW699 : 'KW' '699'; +KW700 : 'KW' '700'; +KW701 : 'KW' '701'; +KW702 : 'KW' '702'; +KW703 : 'KW' '703'; +KW704 : 'KW' '704'; +KW705 : 'KW' '705'; +KW706 : 'KW' '706'; +KW707 : 'KW' '707'; +KW708 : 'KW' '708'; +KW709 : 'KW' '709'; +KW710 : 'KW' '710'; +KW711 : 'KW' '711'; +KW712 : 'KW' '712'; +KW713 : 'KW' '713'; +KW714 : 'KW' '714'; +KW715 : 'KW' '715'; +KW716 : 'KW' '716'; +KW717 : 'KW' '717'; +KW718 : 'KW' '718'; +KW719 : 'KW' '719'; +KW720 : 'KW' '720'; +KW721 : 'KW' '721'; +KW722 : 'KW' '722'; +KW723 : 'KW' '723'; +KW724 : 'KW' '724'; +KW725 : 'KW' '725'; +KW726 : 'KW' '726'; +KW727 : 'KW' '727'; +KW728 : 'KW' '728'; +KW729 : 'KW' '729'; +KW730 : 'KW' '730'; +KW731 : 'KW' '731'; +KW732 : 'KW' '732'; +KW733 : 'KW' '733'; +KW734 : 'KW' '734'; +KW735 : 'KW' '735'; +KW736 : 'KW' '736'; +KW737 : 'KW' '737'; +KW738 : 'KW' '738'; +KW739 : 'KW' '739'; +KW740 : 'KW' '740'; +KW741 : 'KW' '741'; +KW742 : 'KW' '742'; +KW743 : 'KW' '743'; +KW744 : 'KW' '744'; +KW745 : 'KW' '745'; +KW746 : 'KW' '746'; +KW747 : 'KW' '747'; +KW748 : 'KW' '748'; +KW749 : 'KW' '749'; +KW750 : 'KW' '750'; +KW751 : 'KW' '751'; +KW752 : 'KW' '752'; +KW753 : 'KW' '753'; +KW754 : 'KW' '754'; +KW755 : 'KW' '755'; +KW756 : 'KW' '756'; +KW757 : 'KW' '757'; +KW758 : 'KW' '758'; +KW759 : 'KW' '759'; +KW760 : 'KW' '760'; +KW761 : 'KW' '761'; +KW762 : 'KW' '762'; +KW763 : 'KW' '763'; +KW764 : 'KW' '764'; +KW765 : 'KW' '765'; +KW766 : 'KW' '766'; +KW767 : 'KW' '767'; +KW768 : 'KW' '768'; +KW769 : 'KW' '769'; +KW770 : 'KW' '770'; +KW771 : 'KW' '771'; +KW772 : 'KW' '772'; +KW773 : 'KW' '773'; +KW774 : 'KW' '774'; +KW775 : 'KW' '775'; +KW776 : 'KW' '776'; +KW777 : 'KW' '777'; +KW778 : 'KW' '778'; +KW779 : 'KW' '779'; +KW780 : 'KW' '780'; +KW781 : 'KW' '781'; +KW782 : 'KW' '782'; +KW783 : 'KW' '783'; +KW784 : 'KW' '784'; +KW785 : 'KW' '785'; +KW786 : 'KW' '786'; +KW787 : 'KW' '787'; +KW788 : 'KW' '788'; +KW789 : 'KW' '789'; +KW790 : 'KW' '790'; +KW791 : 'KW' '791'; +KW792 : 'KW' '792'; +KW793 : 'KW' '793'; +KW794 : 'KW' '794'; +KW795 : 'KW' '795'; +KW796 : 'KW' '796'; +KW797 : 'KW' '797'; +KW798 : 'KW' '798'; +KW799 : 'KW' '799'; +KW800 : 'KW' '800'; +KW801 : 'KW' '801'; +KW802 : 'KW' '802'; +KW803 : 'KW' '803'; +KW804 : 'KW' '804'; +KW805 : 'KW' '805'; +KW806 : 'KW' '806'; +KW807 : 'KW' '807'; +KW808 : 'KW' '808'; +KW809 : 'KW' '809'; +KW810 : 'KW' '810'; +KW811 : 'KW' '811'; +KW812 : 'KW' '812'; +KW813 : 'KW' '813'; +KW814 : 'KW' '814'; +KW815 : 'KW' '815'; +KW816 : 'KW' '816'; +KW817 : 'KW' '817'; +KW818 : 'KW' '818'; +KW819 : 'KW' '819'; +KW820 : 'KW' '820'; +KW821 : 'KW' '821'; +KW822 : 'KW' '822'; +KW823 : 'KW' '823'; +KW824 : 'KW' '824'; +KW825 : 'KW' '825'; +KW826 : 'KW' '826'; +KW827 : 'KW' '827'; +KW828 : 'KW' '828'; +KW829 : 'KW' '829'; +KW830 : 'KW' '830'; +KW831 : 'KW' '831'; +KW832 : 'KW' '832'; +KW833 : 'KW' '833'; +KW834 : 'KW' '834'; +KW835 : 'KW' '835'; +KW836 : 'KW' '836'; +KW837 : 'KW' '837'; +KW838 : 'KW' '838'; +KW839 : 'KW' '839'; +KW840 : 'KW' '840'; +KW841 : 'KW' '841'; +KW842 : 'KW' '842'; +KW843 : 'KW' '843'; +KW844 : 'KW' '844'; +KW845 : 'KW' '845'; +KW846 : 'KW' '846'; +KW847 : 'KW' '847'; +KW848 : 'KW' '848'; +KW849 : 'KW' '849'; +KW850 : 'KW' '850'; +KW851 : 'KW' '851'; +KW852 : 'KW' '852'; +KW853 : 'KW' '853'; +KW854 : 'KW' '854'; +KW855 : 'KW' '855'; +KW856 : 'KW' '856'; +KW857 : 'KW' '857'; +KW858 : 'KW' '858'; +KW859 : 'KW' '859'; +KW860 : 'KW' '860'; +KW861 : 'KW' '861'; +KW862 : 'KW' '862'; +KW863 : 'KW' '863'; +KW864 : 'KW' '864'; +KW865 : 'KW' '865'; +KW866 : 'KW' '866'; +KW867 : 'KW' '867'; +KW868 : 'KW' '868'; +KW869 : 'KW' '869'; +KW870 : 'KW' '870'; +KW871 : 'KW' '871'; +KW872 : 'KW' '872'; +KW873 : 'KW' '873'; +KW874 : 'KW' '874'; +KW875 : 'KW' '875'; +KW876 : 'KW' '876'; +KW877 : 'KW' '877'; +KW878 : 'KW' '878'; +KW879 : 'KW' '879'; +KW880 : 'KW' '880'; +KW881 : 'KW' '881'; +KW882 : 'KW' '882'; +KW883 : 'KW' '883'; +KW884 : 'KW' '884'; +KW885 : 'KW' '885'; +KW886 : 'KW' '886'; +KW887 : 'KW' '887'; +KW888 : 'KW' '888'; +KW889 : 'KW' '889'; +KW890 : 'KW' '890'; +KW891 : 'KW' '891'; +KW892 : 'KW' '892'; +KW893 : 'KW' '893'; +KW894 : 'KW' '894'; +KW895 : 'KW' '895'; +KW896 : 'KW' '896'; +KW897 : 'KW' '897'; +KW898 : 'KW' '898'; +KW899 : 'KW' '899'; +KW900 : 'KW' '900'; +KW901 : 'KW' '901'; +KW902 : 'KW' '902'; +KW903 : 'KW' '903'; +KW904 : 'KW' '904'; +KW905 : 'KW' '905'; +KW906 : 'KW' '906'; +KW907 : 'KW' '907'; +KW908 : 'KW' '908'; +KW909 : 'KW' '909'; +KW910 : 'KW' '910'; +KW911 : 'KW' '911'; +KW912 : 'KW' '912'; +KW913 : 'KW' '913'; +KW914 : 'KW' '914'; +KW915 : 'KW' '915'; +KW916 : 'KW' '916'; +KW917 : 'KW' '917'; +KW918 : 'KW' '918'; +KW919 : 'KW' '919'; +KW920 : 'KW' '920'; +KW921 : 'KW' '921'; +KW922 : 'KW' '922'; +KW923 : 'KW' '923'; +KW924 : 'KW' '924'; +KW925 : 'KW' '925'; +KW926 : 'KW' '926'; +KW927 : 'KW' '927'; +KW928 : 'KW' '928'; +KW929 : 'KW' '929'; +KW930 : 'KW' '930'; +KW931 : 'KW' '931'; +KW932 : 'KW' '932'; +KW933 : 'KW' '933'; +KW934 : 'KW' '934'; +KW935 : 'KW' '935'; +KW936 : 'KW' '936'; +KW937 : 'KW' '937'; +KW938 : 'KW' '938'; +KW939 : 'KW' '939'; +KW940 : 'KW' '940'; +KW941 : 'KW' '941'; +KW942 : 'KW' '942'; +KW943 : 'KW' '943'; +KW944 : 'KW' '944'; +KW945 : 'KW' '945'; +KW946 : 'KW' '946'; +KW947 : 'KW' '947'; +KW948 : 'KW' '948'; +KW949 : 'KW' '949'; +KW950 : 'KW' '950'; +KW951 : 'KW' '951'; +KW952 : 'KW' '952'; +KW953 : 'KW' '953'; +KW954 : 'KW' '954'; +KW955 : 'KW' '955'; +KW956 : 'KW' '956'; +KW957 : 'KW' '957'; +KW958 : 'KW' '958'; +KW959 : 'KW' '959'; +KW960 : 'KW' '960'; +KW961 : 'KW' '961'; +KW962 : 'KW' '962'; +KW963 : 'KW' '963'; +KW964 : 'KW' '964'; +KW965 : 'KW' '965'; +KW966 : 'KW' '966'; +KW967 : 'KW' '967'; +KW968 : 'KW' '968'; +KW969 : 'KW' '969'; +KW970 : 'KW' '970'; +KW971 : 'KW' '971'; +KW972 : 'KW' '972'; +KW973 : 'KW' '973'; +KW974 : 'KW' '974'; +KW975 : 'KW' '975'; +KW976 : 'KW' '976'; +KW977 : 'KW' '977'; +KW978 : 'KW' '978'; +KW979 : 'KW' '979'; +KW980 : 'KW' '980'; +KW981 : 'KW' '981'; +KW982 : 'KW' '982'; +KW983 : 'KW' '983'; +KW984 : 'KW' '984'; +KW985 : 'KW' '985'; +KW986 : 'KW' '986'; +KW987 : 'KW' '987'; +KW988 : 'KW' '988'; +KW989 : 'KW' '989'; +KW990 : 'KW' '990'; +KW991 : 'KW' '991'; +KW992 : 'KW' '992'; +KW993 : 'KW' '993'; +KW994 : 'KW' '994'; +KW995 : 'KW' '995'; +KW996 : 'KW' '996'; +KW997 : 'KW' '997'; +KW998 : 'KW' '998'; +KW999 : 'KW' '999'; +KW1000 : 'KW' '1000'; +KW1001 : 'KW' '1001'; +KW1002 : 'KW' '1002'; +KW1003 : 'KW' '1003'; +KW1004 : 'KW' '1004'; +KW1005 : 'KW' '1005'; +KW1006 : 'KW' '1006'; +KW1007 : 'KW' '1007'; +KW1008 : 'KW' '1008'; +KW1009 : 'KW' '1009'; +KW1010 : 'KW' '1010'; +KW1011 : 'KW' '1011'; +KW1012 : 'KW' '1012'; +KW1013 : 'KW' '1013'; +KW1014 : 'KW' '1014'; +KW1015 : 'KW' '1015'; +KW1016 : 'KW' '1016'; +KW1017 : 'KW' '1017'; +KW1018 : 'KW' '1018'; +KW1019 : 'KW' '1019'; +KW1020 : 'KW' '1020'; +KW1021 : 'KW' '1021'; +KW1022 : 'KW' '1022'; +KW1023 : 'KW' '1023'; +KW1024 : 'KW' '1024'; +KW1025 : 'KW' '1025'; +KW1026 : 'KW' '1026'; +KW1027 : 'KW' '1027'; +KW1028 : 'KW' '1028'; +KW1029 : 'KW' '1029'; +KW1030 : 'KW' '1030'; +KW1031 : 'KW' '1031'; +KW1032 : 'KW' '1032'; +KW1033 : 'KW' '1033'; +KW1034 : 'KW' '1034'; +KW1035 : 'KW' '1035'; +KW1036 : 'KW' '1036'; +KW1037 : 'KW' '1037'; +KW1038 : 'KW' '1038'; +KW1039 : 'KW' '1039'; +KW1040 : 'KW' '1040'; +KW1041 : 'KW' '1041'; +KW1042 : 'KW' '1042'; +KW1043 : 'KW' '1043'; +KW1044 : 'KW' '1044'; +KW1045 : 'KW' '1045'; +KW1046 : 'KW' '1046'; +KW1047 : 'KW' '1047'; +KW1048 : 'KW' '1048'; +KW1049 : 'KW' '1049'; +KW1050 : 'KW' '1050'; +KW1051 : 'KW' '1051'; +KW1052 : 'KW' '1052'; +KW1053 : 'KW' '1053'; +KW1054 : 'KW' '1054'; +KW1055 : 'KW' '1055'; +KW1056 : 'KW' '1056'; +KW1057 : 'KW' '1057'; +KW1058 : 'KW' '1058'; +KW1059 : 'KW' '1059'; +KW1060 : 'KW' '1060'; +KW1061 : 'KW' '1061'; +KW1062 : 'KW' '1062'; +KW1063 : 'KW' '1063'; +KW1064 : 'KW' '1064'; +KW1065 : 'KW' '1065'; +KW1066 : 'KW' '1066'; +KW1067 : 'KW' '1067'; +KW1068 : 'KW' '1068'; +KW1069 : 'KW' '1069'; +KW1070 : 'KW' '1070'; +KW1071 : 'KW' '1071'; +KW1072 : 'KW' '1072'; +KW1073 : 'KW' '1073'; +KW1074 : 'KW' '1074'; +KW1075 : 'KW' '1075'; +KW1076 : 'KW' '1076'; +KW1077 : 'KW' '1077'; +KW1078 : 'KW' '1078'; +KW1079 : 'KW' '1079'; +KW1080 : 'KW' '1080'; +KW1081 : 'KW' '1081'; +KW1082 : 'KW' '1082'; +KW1083 : 'KW' '1083'; +KW1084 : 'KW' '1084'; +KW1085 : 'KW' '1085'; +KW1086 : 'KW' '1086'; +KW1087 : 'KW' '1087'; +KW1088 : 'KW' '1088'; +KW1089 : 'KW' '1089'; +KW1090 : 'KW' '1090'; +KW1091 : 'KW' '1091'; +KW1092 : 'KW' '1092'; +KW1093 : 'KW' '1093'; +KW1094 : 'KW' '1094'; +KW1095 : 'KW' '1095'; +KW1096 : 'KW' '1096'; +KW1097 : 'KW' '1097'; +KW1098 : 'KW' '1098'; +KW1099 : 'KW' '1099'; +KW1100 : 'KW' '1100'; +KW1101 : 'KW' '1101'; +KW1102 : 'KW' '1102'; +KW1103 : 'KW' '1103'; +KW1104 : 'KW' '1104'; +KW1105 : 'KW' '1105'; +KW1106 : 'KW' '1106'; +KW1107 : 'KW' '1107'; +KW1108 : 'KW' '1108'; +KW1109 : 'KW' '1109'; +KW1110 : 'KW' '1110'; +KW1111 : 'KW' '1111'; +KW1112 : 'KW' '1112'; +KW1113 : 'KW' '1113'; +KW1114 : 'KW' '1114'; +KW1115 : 'KW' '1115'; +KW1116 : 'KW' '1116'; +KW1117 : 'KW' '1117'; +KW1118 : 'KW' '1118'; +KW1119 : 'KW' '1119'; +KW1120 : 'KW' '1120'; +KW1121 : 'KW' '1121'; +KW1122 : 'KW' '1122'; +KW1123 : 'KW' '1123'; +KW1124 : 'KW' '1124'; +KW1125 : 'KW' '1125'; +KW1126 : 'KW' '1126'; +KW1127 : 'KW' '1127'; +KW1128 : 'KW' '1128'; +KW1129 : 'KW' '1129'; +KW1130 : 'KW' '1130'; +KW1131 : 'KW' '1131'; +KW1132 : 'KW' '1132'; +KW1133 : 'KW' '1133'; +KW1134 : 'KW' '1134'; +KW1135 : 'KW' '1135'; +KW1136 : 'KW' '1136'; +KW1137 : 'KW' '1137'; +KW1138 : 'KW' '1138'; +KW1139 : 'KW' '1139'; +KW1140 : 'KW' '1140'; +KW1141 : 'KW' '1141'; +KW1142 : 'KW' '1142'; +KW1143 : 'KW' '1143'; +KW1144 : 'KW' '1144'; +KW1145 : 'KW' '1145'; +KW1146 : 'KW' '1146'; +KW1147 : 'KW' '1147'; +KW1148 : 'KW' '1148'; +KW1149 : 'KW' '1149'; +KW1150 : 'KW' '1150'; +KW1151 : 'KW' '1151'; +KW1152 : 'KW' '1152'; +KW1153 : 'KW' '1153'; +KW1154 : 'KW' '1154'; +KW1155 : 'KW' '1155'; +KW1156 : 'KW' '1156'; +KW1157 : 'KW' '1157'; +KW1158 : 'KW' '1158'; +KW1159 : 'KW' '1159'; +KW1160 : 'KW' '1160'; +KW1161 : 'KW' '1161'; +KW1162 : 'KW' '1162'; +KW1163 : 'KW' '1163'; +KW1164 : 'KW' '1164'; +KW1165 : 'KW' '1165'; +KW1166 : 'KW' '1166'; +KW1167 : 'KW' '1167'; +KW1168 : 'KW' '1168'; +KW1169 : 'KW' '1169'; +KW1170 : 'KW' '1170'; +KW1171 : 'KW' '1171'; +KW1172 : 'KW' '1172'; +KW1173 : 'KW' '1173'; +KW1174 : 'KW' '1174'; +KW1175 : 'KW' '1175'; +KW1176 : 'KW' '1176'; +KW1177 : 'KW' '1177'; +KW1178 : 'KW' '1178'; +KW1179 : 'KW' '1179'; +KW1180 : 'KW' '1180'; +KW1181 : 'KW' '1181'; +KW1182 : 'KW' '1182'; +KW1183 : 'KW' '1183'; +KW1184 : 'KW' '1184'; +KW1185 : 'KW' '1185'; +KW1186 : 'KW' '1186'; +KW1187 : 'KW' '1187'; +KW1188 : 'KW' '1188'; +KW1189 : 'KW' '1189'; +KW1190 : 'KW' '1190'; +KW1191 : 'KW' '1191'; +KW1192 : 'KW' '1192'; +KW1193 : 'KW' '1193'; +KW1194 : 'KW' '1194'; +KW1195 : 'KW' '1195'; +KW1196 : 'KW' '1196'; +KW1197 : 'KW' '1197'; +KW1198 : 'KW' '1198'; +KW1199 : 'KW' '1199'; +KW1200 : 'KW' '1200'; +KW1201 : 'KW' '1201'; +KW1202 : 'KW' '1202'; +KW1203 : 'KW' '1203'; +KW1204 : 'KW' '1204'; +KW1205 : 'KW' '1205'; +KW1206 : 'KW' '1206'; +KW1207 : 'KW' '1207'; +KW1208 : 'KW' '1208'; +KW1209 : 'KW' '1209'; +KW1210 : 'KW' '1210'; +KW1211 : 'KW' '1211'; +KW1212 : 'KW' '1212'; +KW1213 : 'KW' '1213'; +KW1214 : 'KW' '1214'; +KW1215 : 'KW' '1215'; +KW1216 : 'KW' '1216'; +KW1217 : 'KW' '1217'; +KW1218 : 'KW' '1218'; +KW1219 : 'KW' '1219'; +KW1220 : 'KW' '1220'; +KW1221 : 'KW' '1221'; +KW1222 : 'KW' '1222'; +KW1223 : 'KW' '1223'; +KW1224 : 'KW' '1224'; +KW1225 : 'KW' '1225'; +KW1226 : 'KW' '1226'; +KW1227 : 'KW' '1227'; +KW1228 : 'KW' '1228'; +KW1229 : 'KW' '1229'; +KW1230 : 'KW' '1230'; +KW1231 : 'KW' '1231'; +KW1232 : 'KW' '1232'; +KW1233 : 'KW' '1233'; +KW1234 : 'KW' '1234'; +KW1235 : 'KW' '1235'; +KW1236 : 'KW' '1236'; +KW1237 : 'KW' '1237'; +KW1238 : 'KW' '1238'; +KW1239 : 'KW' '1239'; +KW1240 : 'KW' '1240'; +KW1241 : 'KW' '1241'; +KW1242 : 'KW' '1242'; +KW1243 : 'KW' '1243'; +KW1244 : 'KW' '1244'; +KW1245 : 'KW' '1245'; +KW1246 : 'KW' '1246'; +KW1247 : 'KW' '1247'; +KW1248 : 'KW' '1248'; +KW1249 : 'KW' '1249'; +KW1250 : 'KW' '1250'; +KW1251 : 'KW' '1251'; +KW1252 : 'KW' '1252'; +KW1253 : 'KW' '1253'; +KW1254 : 'KW' '1254'; +KW1255 : 'KW' '1255'; +KW1256 : 'KW' '1256'; +KW1257 : 'KW' '1257'; +KW1258 : 'KW' '1258'; +KW1259 : 'KW' '1259'; +KW1260 : 'KW' '1260'; +KW1261 : 'KW' '1261'; +KW1262 : 'KW' '1262'; +KW1263 : 'KW' '1263'; +KW1264 : 'KW' '1264'; +KW1265 : 'KW' '1265'; +KW1266 : 'KW' '1266'; +KW1267 : 'KW' '1267'; +KW1268 : 'KW' '1268'; +KW1269 : 'KW' '1269'; +KW1270 : 'KW' '1270'; +KW1271 : 'KW' '1271'; +KW1272 : 'KW' '1272'; +KW1273 : 'KW' '1273'; +KW1274 : 'KW' '1274'; +KW1275 : 'KW' '1275'; +KW1276 : 'KW' '1276'; +KW1277 : 'KW' '1277'; +KW1278 : 'KW' '1278'; +KW1279 : 'KW' '1279'; +KW1280 : 'KW' '1280'; +KW1281 : 'KW' '1281'; +KW1282 : 'KW' '1282'; +KW1283 : 'KW' '1283'; +KW1284 : 'KW' '1284'; +KW1285 : 'KW' '1285'; +KW1286 : 'KW' '1286'; +KW1287 : 'KW' '1287'; +KW1288 : 'KW' '1288'; +KW1289 : 'KW' '1289'; +KW1290 : 'KW' '1290'; +KW1291 : 'KW' '1291'; +KW1292 : 'KW' '1292'; +KW1293 : 'KW' '1293'; +KW1294 : 'KW' '1294'; +KW1295 : 'KW' '1295'; +KW1296 : 'KW' '1296'; +KW1297 : 'KW' '1297'; +KW1298 : 'KW' '1298'; +KW1299 : 'KW' '1299'; +KW1300 : 'KW' '1300'; +KW1301 : 'KW' '1301'; +KW1302 : 'KW' '1302'; +KW1303 : 'KW' '1303'; +KW1304 : 'KW' '1304'; +KW1305 : 'KW' '1305'; +KW1306 : 'KW' '1306'; +KW1307 : 'KW' '1307'; +KW1308 : 'KW' '1308'; +KW1309 : 'KW' '1309'; +KW1310 : 'KW' '1310'; +KW1311 : 'KW' '1311'; +KW1312 : 'KW' '1312'; +KW1313 : 'KW' '1313'; +KW1314 : 'KW' '1314'; +KW1315 : 'KW' '1315'; +KW1316 : 'KW' '1316'; +KW1317 : 'KW' '1317'; +KW1318 : 'KW' '1318'; +KW1319 : 'KW' '1319'; +KW1320 : 'KW' '1320'; +KW1321 : 'KW' '1321'; +KW1322 : 'KW' '1322'; +KW1323 : 'KW' '1323'; +KW1324 : 'KW' '1324'; +KW1325 : 'KW' '1325'; +KW1326 : 'KW' '1326'; +KW1327 : 'KW' '1327'; +KW1328 : 'KW' '1328'; +KW1329 : 'KW' '1329'; +KW1330 : 'KW' '1330'; +KW1331 : 'KW' '1331'; +KW1332 : 'KW' '1332'; +KW1333 : 'KW' '1333'; +KW1334 : 'KW' '1334'; +KW1335 : 'KW' '1335'; +KW1336 : 'KW' '1336'; +KW1337 : 'KW' '1337'; +KW1338 : 'KW' '1338'; +KW1339 : 'KW' '1339'; +KW1340 : 'KW' '1340'; +KW1341 : 'KW' '1341'; +KW1342 : 'KW' '1342'; +KW1343 : 'KW' '1343'; +KW1344 : 'KW' '1344'; +KW1345 : 'KW' '1345'; +KW1346 : 'KW' '1346'; +KW1347 : 'KW' '1347'; +KW1348 : 'KW' '1348'; +KW1349 : 'KW' '1349'; +KW1350 : 'KW' '1350'; +KW1351 : 'KW' '1351'; +KW1352 : 'KW' '1352'; +KW1353 : 'KW' '1353'; +KW1354 : 'KW' '1354'; +KW1355 : 'KW' '1355'; +KW1356 : 'KW' '1356'; +KW1357 : 'KW' '1357'; +KW1358 : 'KW' '1358'; +KW1359 : 'KW' '1359'; +KW1360 : 'KW' '1360'; +KW1361 : 'KW' '1361'; +KW1362 : 'KW' '1362'; +KW1363 : 'KW' '1363'; +KW1364 : 'KW' '1364'; +KW1365 : 'KW' '1365'; +KW1366 : 'KW' '1366'; +KW1367 : 'KW' '1367'; +KW1368 : 'KW' '1368'; +KW1369 : 'KW' '1369'; +KW1370 : 'KW' '1370'; +KW1371 : 'KW' '1371'; +KW1372 : 'KW' '1372'; +KW1373 : 'KW' '1373'; +KW1374 : 'KW' '1374'; +KW1375 : 'KW' '1375'; +KW1376 : 'KW' '1376'; +KW1377 : 'KW' '1377'; +KW1378 : 'KW' '1378'; +KW1379 : 'KW' '1379'; +KW1380 : 'KW' '1380'; +KW1381 : 'KW' '1381'; +KW1382 : 'KW' '1382'; +KW1383 : 'KW' '1383'; +KW1384 : 'KW' '1384'; +KW1385 : 'KW' '1385'; +KW1386 : 'KW' '1386'; +KW1387 : 'KW' '1387'; +KW1388 : 'KW' '1388'; +KW1389 : 'KW' '1389'; +KW1390 : 'KW' '1390'; +KW1391 : 'KW' '1391'; +KW1392 : 'KW' '1392'; +KW1393 : 'KW' '1393'; +KW1394 : 'KW' '1394'; +KW1395 : 'KW' '1395'; +KW1396 : 'KW' '1396'; +KW1397 : 'KW' '1397'; +KW1398 : 'KW' '1398'; +KW1399 : 'KW' '1399'; +KW1400 : 'KW' '1400'; +KW1401 : 'KW' '1401'; +KW1402 : 'KW' '1402'; +KW1403 : 'KW' '1403'; +KW1404 : 'KW' '1404'; +KW1405 : 'KW' '1405'; +KW1406 : 'KW' '1406'; +KW1407 : 'KW' '1407'; +KW1408 : 'KW' '1408'; +KW1409 : 'KW' '1409'; +KW1410 : 'KW' '1410'; +KW1411 : 'KW' '1411'; +KW1412 : 'KW' '1412'; +KW1413 : 'KW' '1413'; +KW1414 : 'KW' '1414'; +KW1415 : 'KW' '1415'; +KW1416 : 'KW' '1416'; +KW1417 : 'KW' '1417'; +KW1418 : 'KW' '1418'; +KW1419 : 'KW' '1419'; +KW1420 : 'KW' '1420'; +KW1421 : 'KW' '1421'; +KW1422 : 'KW' '1422'; +KW1423 : 'KW' '1423'; +KW1424 : 'KW' '1424'; +KW1425 : 'KW' '1425'; +KW1426 : 'KW' '1426'; +KW1427 : 'KW' '1427'; +KW1428 : 'KW' '1428'; +KW1429 : 'KW' '1429'; +KW1430 : 'KW' '1430'; +KW1431 : 'KW' '1431'; +KW1432 : 'KW' '1432'; +KW1433 : 'KW' '1433'; +KW1434 : 'KW' '1434'; +KW1435 : 'KW' '1435'; +KW1436 : 'KW' '1436'; +KW1437 : 'KW' '1437'; +KW1438 : 'KW' '1438'; +KW1439 : 'KW' '1439'; +KW1440 : 'KW' '1440'; +KW1441 : 'KW' '1441'; +KW1442 : 'KW' '1442'; +KW1443 : 'KW' '1443'; +KW1444 : 'KW' '1444'; +KW1445 : 'KW' '1445'; +KW1446 : 'KW' '1446'; +KW1447 : 'KW' '1447'; +KW1448 : 'KW' '1448'; +KW1449 : 'KW' '1449'; +KW1450 : 'KW' '1450'; +KW1451 : 'KW' '1451'; +KW1452 : 'KW' '1452'; +KW1453 : 'KW' '1453'; +KW1454 : 'KW' '1454'; +KW1455 : 'KW' '1455'; +KW1456 : 'KW' '1456'; +KW1457 : 'KW' '1457'; +KW1458 : 'KW' '1458'; +KW1459 : 'KW' '1459'; +KW1460 : 'KW' '1460'; +KW1461 : 'KW' '1461'; +KW1462 : 'KW' '1462'; +KW1463 : 'KW' '1463'; +KW1464 : 'KW' '1464'; +KW1465 : 'KW' '1465'; +KW1466 : 'KW' '1466'; +KW1467 : 'KW' '1467'; +KW1468 : 'KW' '1468'; +KW1469 : 'KW' '1469'; +KW1470 : 'KW' '1470'; +KW1471 : 'KW' '1471'; +KW1472 : 'KW' '1472'; +KW1473 : 'KW' '1473'; +KW1474 : 'KW' '1474'; +KW1475 : 'KW' '1475'; +KW1476 : 'KW' '1476'; +KW1477 : 'KW' '1477'; +KW1478 : 'KW' '1478'; +KW1479 : 'KW' '1479'; +KW1480 : 'KW' '1480'; +KW1481 : 'KW' '1481'; +KW1482 : 'KW' '1482'; +KW1483 : 'KW' '1483'; +KW1484 : 'KW' '1484'; +KW1485 : 'KW' '1485'; +KW1486 : 'KW' '1486'; +KW1487 : 'KW' '1487'; +KW1488 : 'KW' '1488'; +KW1489 : 'KW' '1489'; +KW1490 : 'KW' '1490'; +KW1491 : 'KW' '1491'; +KW1492 : 'KW' '1492'; +KW1493 : 'KW' '1493'; +KW1494 : 'KW' '1494'; +KW1495 : 'KW' '1495'; +KW1496 : 'KW' '1496'; +KW1497 : 'KW' '1497'; +KW1498 : 'KW' '1498'; +KW1499 : 'KW' '1499'; +KW1500 : 'KW' '1500'; +KW1501 : 'KW' '1501'; +KW1502 : 'KW' '1502'; +KW1503 : 'KW' '1503'; +KW1504 : 'KW' '1504'; +KW1505 : 'KW' '1505'; +KW1506 : 'KW' '1506'; +KW1507 : 'KW' '1507'; +KW1508 : 'KW' '1508'; +KW1509 : 'KW' '1509'; +KW1510 : 'KW' '1510'; +KW1511 : 'KW' '1511'; +KW1512 : 'KW' '1512'; +KW1513 : 'KW' '1513'; +KW1514 : 'KW' '1514'; +KW1515 : 'KW' '1515'; +KW1516 : 'KW' '1516'; +KW1517 : 'KW' '1517'; +KW1518 : 'KW' '1518'; +KW1519 : 'KW' '1519'; +KW1520 : 'KW' '1520'; +KW1521 : 'KW' '1521'; +KW1522 : 'KW' '1522'; +KW1523 : 'KW' '1523'; +KW1524 : 'KW' '1524'; +KW1525 : 'KW' '1525'; +KW1526 : 'KW' '1526'; +KW1527 : 'KW' '1527'; +KW1528 : 'KW' '1528'; +KW1529 : 'KW' '1529'; +KW1530 : 'KW' '1530'; +KW1531 : 'KW' '1531'; +KW1532 : 'KW' '1532'; +KW1533 : 'KW' '1533'; +KW1534 : 'KW' '1534'; +KW1535 : 'KW' '1535'; +KW1536 : 'KW' '1536'; +KW1537 : 'KW' '1537'; +KW1538 : 'KW' '1538'; +KW1539 : 'KW' '1539'; +KW1540 : 'KW' '1540'; +KW1541 : 'KW' '1541'; +KW1542 : 'KW' '1542'; +KW1543 : 'KW' '1543'; +KW1544 : 'KW' '1544'; +KW1545 : 'KW' '1545'; +KW1546 : 'KW' '1546'; +KW1547 : 'KW' '1547'; +KW1548 : 'KW' '1548'; +KW1549 : 'KW' '1549'; +KW1550 : 'KW' '1550'; +KW1551 : 'KW' '1551'; +KW1552 : 'KW' '1552'; +KW1553 : 'KW' '1553'; +KW1554 : 'KW' '1554'; +KW1555 : 'KW' '1555'; +KW1556 : 'KW' '1556'; +KW1557 : 'KW' '1557'; +KW1558 : 'KW' '1558'; +KW1559 : 'KW' '1559'; +KW1560 : 'KW' '1560'; +KW1561 : 'KW' '1561'; +KW1562 : 'KW' '1562'; +KW1563 : 'KW' '1563'; +KW1564 : 'KW' '1564'; +KW1565 : 'KW' '1565'; +KW1566 : 'KW' '1566'; +KW1567 : 'KW' '1567'; +KW1568 : 'KW' '1568'; +KW1569 : 'KW' '1569'; +KW1570 : 'KW' '1570'; +KW1571 : 'KW' '1571'; +KW1572 : 'KW' '1572'; +KW1573 : 'KW' '1573'; +KW1574 : 'KW' '1574'; +KW1575 : 'KW' '1575'; +KW1576 : 'KW' '1576'; +KW1577 : 'KW' '1577'; +KW1578 : 'KW' '1578'; +KW1579 : 'KW' '1579'; +KW1580 : 'KW' '1580'; +KW1581 : 'KW' '1581'; +KW1582 : 'KW' '1582'; +KW1583 : 'KW' '1583'; +KW1584 : 'KW' '1584'; +KW1585 : 'KW' '1585'; +KW1586 : 'KW' '1586'; +KW1587 : 'KW' '1587'; +KW1588 : 'KW' '1588'; +KW1589 : 'KW' '1589'; +KW1590 : 'KW' '1590'; +KW1591 : 'KW' '1591'; +KW1592 : 'KW' '1592'; +KW1593 : 'KW' '1593'; +KW1594 : 'KW' '1594'; +KW1595 : 'KW' '1595'; +KW1596 : 'KW' '1596'; +KW1597 : 'KW' '1597'; +KW1598 : 'KW' '1598'; +KW1599 : 'KW' '1599'; +KW1600 : 'KW' '1600'; +KW1601 : 'KW' '1601'; +KW1602 : 'KW' '1602'; +KW1603 : 'KW' '1603'; +KW1604 : 'KW' '1604'; +KW1605 : 'KW' '1605'; +KW1606 : 'KW' '1606'; +KW1607 : 'KW' '1607'; +KW1608 : 'KW' '1608'; +KW1609 : 'KW' '1609'; +KW1610 : 'KW' '1610'; +KW1611 : 'KW' '1611'; +KW1612 : 'KW' '1612'; +KW1613 : 'KW' '1613'; +KW1614 : 'KW' '1614'; +KW1615 : 'KW' '1615'; +KW1616 : 'KW' '1616'; +KW1617 : 'KW' '1617'; +KW1618 : 'KW' '1618'; +KW1619 : 'KW' '1619'; +KW1620 : 'KW' '1620'; +KW1621 : 'KW' '1621'; +KW1622 : 'KW' '1622'; +KW1623 : 'KW' '1623'; +KW1624 : 'KW' '1624'; +KW1625 : 'KW' '1625'; +KW1626 : 'KW' '1626'; +KW1627 : 'KW' '1627'; +KW1628 : 'KW' '1628'; +KW1629 : 'KW' '1629'; +KW1630 : 'KW' '1630'; +KW1631 : 'KW' '1631'; +KW1632 : 'KW' '1632'; +KW1633 : 'KW' '1633'; +KW1634 : 'KW' '1634'; +KW1635 : 'KW' '1635'; +KW1636 : 'KW' '1636'; +KW1637 : 'KW' '1637'; +KW1638 : 'KW' '1638'; +KW1639 : 'KW' '1639'; +KW1640 : 'KW' '1640'; +KW1641 : 'KW' '1641'; +KW1642 : 'KW' '1642'; +KW1643 : 'KW' '1643'; +KW1644 : 'KW' '1644'; +KW1645 : 'KW' '1645'; +KW1646 : 'KW' '1646'; +KW1647 : 'KW' '1647'; +KW1648 : 'KW' '1648'; +KW1649 : 'KW' '1649'; +KW1650 : 'KW' '1650'; +KW1651 : 'KW' '1651'; +KW1652 : 'KW' '1652'; +KW1653 : 'KW' '1653'; +KW1654 : 'KW' '1654'; +KW1655 : 'KW' '1655'; +KW1656 : 'KW' '1656'; +KW1657 : 'KW' '1657'; +KW1658 : 'KW' '1658'; +KW1659 : 'KW' '1659'; +KW1660 : 'KW' '1660'; +KW1661 : 'KW' '1661'; +KW1662 : 'KW' '1662'; +KW1663 : 'KW' '1663'; +KW1664 : 'KW' '1664'; +KW1665 : 'KW' '1665'; +KW1666 : 'KW' '1666'; +KW1667 : 'KW' '1667'; +KW1668 : 'KW' '1668'; +KW1669 : 'KW' '1669'; +KW1670 : 'KW' '1670'; +KW1671 : 'KW' '1671'; +KW1672 : 'KW' '1672'; +KW1673 : 'KW' '1673'; +KW1674 : 'KW' '1674'; +KW1675 : 'KW' '1675'; +KW1676 : 'KW' '1676'; +KW1677 : 'KW' '1677'; +KW1678 : 'KW' '1678'; +KW1679 : 'KW' '1679'; +KW1680 : 'KW' '1680'; +KW1681 : 'KW' '1681'; +KW1682 : 'KW' '1682'; +KW1683 : 'KW' '1683'; +KW1684 : 'KW' '1684'; +KW1685 : 'KW' '1685'; +KW1686 : 'KW' '1686'; +KW1687 : 'KW' '1687'; +KW1688 : 'KW' '1688'; +KW1689 : 'KW' '1689'; +KW1690 : 'KW' '1690'; +KW1691 : 'KW' '1691'; +KW1692 : 'KW' '1692'; +KW1693 : 'KW' '1693'; +KW1694 : 'KW' '1694'; +KW1695 : 'KW' '1695'; +KW1696 : 'KW' '1696'; +KW1697 : 'KW' '1697'; +KW1698 : 'KW' '1698'; +KW1699 : 'KW' '1699'; +KW1700 : 'KW' '1700'; +KW1701 : 'KW' '1701'; +KW1702 : 'KW' '1702'; +KW1703 : 'KW' '1703'; +KW1704 : 'KW' '1704'; +KW1705 : 'KW' '1705'; +KW1706 : 'KW' '1706'; +KW1707 : 'KW' '1707'; +KW1708 : 'KW' '1708'; +KW1709 : 'KW' '1709'; +KW1710 : 'KW' '1710'; +KW1711 : 'KW' '1711'; +KW1712 : 'KW' '1712'; +KW1713 : 'KW' '1713'; +KW1714 : 'KW' '1714'; +KW1715 : 'KW' '1715'; +KW1716 : 'KW' '1716'; +KW1717 : 'KW' '1717'; +KW1718 : 'KW' '1718'; +KW1719 : 'KW' '1719'; +KW1720 : 'KW' '1720'; +KW1721 : 'KW' '1721'; +KW1722 : 'KW' '1722'; +KW1723 : 'KW' '1723'; +KW1724 : 'KW' '1724'; +KW1725 : 'KW' '1725'; +KW1726 : 'KW' '1726'; +KW1727 : 'KW' '1727'; +KW1728 : 'KW' '1728'; +KW1729 : 'KW' '1729'; +KW1730 : 'KW' '1730'; +KW1731 : 'KW' '1731'; +KW1732 : 'KW' '1732'; +KW1733 : 'KW' '1733'; +KW1734 : 'KW' '1734'; +KW1735 : 'KW' '1735'; +KW1736 : 'KW' '1736'; +KW1737 : 'KW' '1737'; +KW1738 : 'KW' '1738'; +KW1739 : 'KW' '1739'; +KW1740 : 'KW' '1740'; +KW1741 : 'KW' '1741'; +KW1742 : 'KW' '1742'; +KW1743 : 'KW' '1743'; +KW1744 : 'KW' '1744'; +KW1745 : 'KW' '1745'; +KW1746 : 'KW' '1746'; +KW1747 : 'KW' '1747'; +KW1748 : 'KW' '1748'; +KW1749 : 'KW' '1749'; +KW1750 : 'KW' '1750'; +KW1751 : 'KW' '1751'; +KW1752 : 'KW' '1752'; +KW1753 : 'KW' '1753'; +KW1754 : 'KW' '1754'; +KW1755 : 'KW' '1755'; +KW1756 : 'KW' '1756'; +KW1757 : 'KW' '1757'; +KW1758 : 'KW' '1758'; +KW1759 : 'KW' '1759'; +KW1760 : 'KW' '1760'; +KW1761 : 'KW' '1761'; +KW1762 : 'KW' '1762'; +KW1763 : 'KW' '1763'; +KW1764 : 'KW' '1764'; +KW1765 : 'KW' '1765'; +KW1766 : 'KW' '1766'; +KW1767 : 'KW' '1767'; +KW1768 : 'KW' '1768'; +KW1769 : 'KW' '1769'; +KW1770 : 'KW' '1770'; +KW1771 : 'KW' '1771'; +KW1772 : 'KW' '1772'; +KW1773 : 'KW' '1773'; +KW1774 : 'KW' '1774'; +KW1775 : 'KW' '1775'; +KW1776 : 'KW' '1776'; +KW1777 : 'KW' '1777'; +KW1778 : 'KW' '1778'; +KW1779 : 'KW' '1779'; +KW1780 : 'KW' '1780'; +KW1781 : 'KW' '1781'; +KW1782 : 'KW' '1782'; +KW1783 : 'KW' '1783'; +KW1784 : 'KW' '1784'; +KW1785 : 'KW' '1785'; +KW1786 : 'KW' '1786'; +KW1787 : 'KW' '1787'; +KW1788 : 'KW' '1788'; +KW1789 : 'KW' '1789'; +KW1790 : 'KW' '1790'; +KW1791 : 'KW' '1791'; +KW1792 : 'KW' '1792'; +KW1793 : 'KW' '1793'; +KW1794 : 'KW' '1794'; +KW1795 : 'KW' '1795'; +KW1796 : 'KW' '1796'; +KW1797 : 'KW' '1797'; +KW1798 : 'KW' '1798'; +KW1799 : 'KW' '1799'; +KW1800 : 'KW' '1800'; +KW1801 : 'KW' '1801'; +KW1802 : 'KW' '1802'; +KW1803 : 'KW' '1803'; +KW1804 : 'KW' '1804'; +KW1805 : 'KW' '1805'; +KW1806 : 'KW' '1806'; +KW1807 : 'KW' '1807'; +KW1808 : 'KW' '1808'; +KW1809 : 'KW' '1809'; +KW1810 : 'KW' '1810'; +KW1811 : 'KW' '1811'; +KW1812 : 'KW' '1812'; +KW1813 : 'KW' '1813'; +KW1814 : 'KW' '1814'; +KW1815 : 'KW' '1815'; +KW1816 : 'KW' '1816'; +KW1817 : 'KW' '1817'; +KW1818 : 'KW' '1818'; +KW1819 : 'KW' '1819'; +KW1820 : 'KW' '1820'; +KW1821 : 'KW' '1821'; +KW1822 : 'KW' '1822'; +KW1823 : 'KW' '1823'; +KW1824 : 'KW' '1824'; +KW1825 : 'KW' '1825'; +KW1826 : 'KW' '1826'; +KW1827 : 'KW' '1827'; +KW1828 : 'KW' '1828'; +KW1829 : 'KW' '1829'; +KW1830 : 'KW' '1830'; +KW1831 : 'KW' '1831'; +KW1832 : 'KW' '1832'; +KW1833 : 'KW' '1833'; +KW1834 : 'KW' '1834'; +KW1835 : 'KW' '1835'; +KW1836 : 'KW' '1836'; +KW1837 : 'KW' '1837'; +KW1838 : 'KW' '1838'; +KW1839 : 'KW' '1839'; +KW1840 : 'KW' '1840'; +KW1841 : 'KW' '1841'; +KW1842 : 'KW' '1842'; +KW1843 : 'KW' '1843'; +KW1844 : 'KW' '1844'; +KW1845 : 'KW' '1845'; +KW1846 : 'KW' '1846'; +KW1847 : 'KW' '1847'; +KW1848 : 'KW' '1848'; +KW1849 : 'KW' '1849'; +KW1850 : 'KW' '1850'; +KW1851 : 'KW' '1851'; +KW1852 : 'KW' '1852'; +KW1853 : 'KW' '1853'; +KW1854 : 'KW' '1854'; +KW1855 : 'KW' '1855'; +KW1856 : 'KW' '1856'; +KW1857 : 'KW' '1857'; +KW1858 : 'KW' '1858'; +KW1859 : 'KW' '1859'; +KW1860 : 'KW' '1860'; +KW1861 : 'KW' '1861'; +KW1862 : 'KW' '1862'; +KW1863 : 'KW' '1863'; +KW1864 : 'KW' '1864'; +KW1865 : 'KW' '1865'; +KW1866 : 'KW' '1866'; +KW1867 : 'KW' '1867'; +KW1868 : 'KW' '1868'; +KW1869 : 'KW' '1869'; +KW1870 : 'KW' '1870'; +KW1871 : 'KW' '1871'; +KW1872 : 'KW' '1872'; +KW1873 : 'KW' '1873'; +KW1874 : 'KW' '1874'; +KW1875 : 'KW' '1875'; +KW1876 : 'KW' '1876'; +KW1877 : 'KW' '1877'; +KW1878 : 'KW' '1878'; +KW1879 : 'KW' '1879'; +KW1880 : 'KW' '1880'; +KW1881 : 'KW' '1881'; +KW1882 : 'KW' '1882'; +KW1883 : 'KW' '1883'; +KW1884 : 'KW' '1884'; +KW1885 : 'KW' '1885'; +KW1886 : 'KW' '1886'; +KW1887 : 'KW' '1887'; +KW1888 : 'KW' '1888'; +KW1889 : 'KW' '1889'; +KW1890 : 'KW' '1890'; +KW1891 : 'KW' '1891'; +KW1892 : 'KW' '1892'; +KW1893 : 'KW' '1893'; +KW1894 : 'KW' '1894'; +KW1895 : 'KW' '1895'; +KW1896 : 'KW' '1896'; +KW1897 : 'KW' '1897'; +KW1898 : 'KW' '1898'; +KW1899 : 'KW' '1899'; +KW1900 : 'KW' '1900'; +KW1901 : 'KW' '1901'; +KW1902 : 'KW' '1902'; +KW1903 : 'KW' '1903'; +KW1904 : 'KW' '1904'; +KW1905 : 'KW' '1905'; +KW1906 : 'KW' '1906'; +KW1907 : 'KW' '1907'; +KW1908 : 'KW' '1908'; +KW1909 : 'KW' '1909'; +KW1910 : 'KW' '1910'; +KW1911 : 'KW' '1911'; +KW1912 : 'KW' '1912'; +KW1913 : 'KW' '1913'; +KW1914 : 'KW' '1914'; +KW1915 : 'KW' '1915'; +KW1916 : 'KW' '1916'; +KW1917 : 'KW' '1917'; +KW1918 : 'KW' '1918'; +KW1919 : 'KW' '1919'; +KW1920 : 'KW' '1920'; +KW1921 : 'KW' '1921'; +KW1922 : 'KW' '1922'; +KW1923 : 'KW' '1923'; +KW1924 : 'KW' '1924'; +KW1925 : 'KW' '1925'; +KW1926 : 'KW' '1926'; +KW1927 : 'KW' '1927'; +KW1928 : 'KW' '1928'; +KW1929 : 'KW' '1929'; +KW1930 : 'KW' '1930'; +KW1931 : 'KW' '1931'; +KW1932 : 'KW' '1932'; +KW1933 : 'KW' '1933'; +KW1934 : 'KW' '1934'; +KW1935 : 'KW' '1935'; +KW1936 : 'KW' '1936'; +KW1937 : 'KW' '1937'; +KW1938 : 'KW' '1938'; +KW1939 : 'KW' '1939'; +KW1940 : 'KW' '1940'; +KW1941 : 'KW' '1941'; +KW1942 : 'KW' '1942'; +KW1943 : 'KW' '1943'; +KW1944 : 'KW' '1944'; +KW1945 : 'KW' '1945'; +KW1946 : 'KW' '1946'; +KW1947 : 'KW' '1947'; +KW1948 : 'KW' '1948'; +KW1949 : 'KW' '1949'; +KW1950 : 'KW' '1950'; +KW1951 : 'KW' '1951'; +KW1952 : 'KW' '1952'; +KW1953 : 'KW' '1953'; +KW1954 : 'KW' '1954'; +KW1955 : 'KW' '1955'; +KW1956 : 'KW' '1956'; +KW1957 : 'KW' '1957'; +KW1958 : 'KW' '1958'; +KW1959 : 'KW' '1959'; +KW1960 : 'KW' '1960'; +KW1961 : 'KW' '1961'; +KW1962 : 'KW' '1962'; +KW1963 : 'KW' '1963'; +KW1964 : 'KW' '1964'; +KW1965 : 'KW' '1965'; +KW1966 : 'KW' '1966'; +KW1967 : 'KW' '1967'; +KW1968 : 'KW' '1968'; +KW1969 : 'KW' '1969'; +KW1970 : 'KW' '1970'; +KW1971 : 'KW' '1971'; +KW1972 : 'KW' '1972'; +KW1973 : 'KW' '1973'; +KW1974 : 'KW' '1974'; +KW1975 : 'KW' '1975'; +KW1976 : 'KW' '1976'; +KW1977 : 'KW' '1977'; +KW1978 : 'KW' '1978'; +KW1979 : 'KW' '1979'; +KW1980 : 'KW' '1980'; +KW1981 : 'KW' '1981'; +KW1982 : 'KW' '1982'; +KW1983 : 'KW' '1983'; +KW1984 : 'KW' '1984'; +KW1985 : 'KW' '1985'; +KW1986 : 'KW' '1986'; +KW1987 : 'KW' '1987'; +KW1988 : 'KW' '1988'; +KW1989 : 'KW' '1989'; +KW1990 : 'KW' '1990'; +KW1991 : 'KW' '1991'; +KW1992 : 'KW' '1992'; +KW1993 : 'KW' '1993'; +KW1994 : 'KW' '1994'; +KW1995 : 'KW' '1995'; +KW1996 : 'KW' '1996'; +KW1997 : 'KW' '1997'; +KW1998 : 'KW' '1998'; +KW1999 : 'KW' '1999'; +KW2000 : 'KW' '2000'; +KW2001 : 'KW' '2001'; +KW2002 : 'KW' '2002'; +KW2003 : 'KW' '2003'; +KW2004 : 'KW' '2004'; +KW2005 : 'KW' '2005'; +KW2006 : 'KW' '2006'; +KW2007 : 'KW' '2007'; +KW2008 : 'KW' '2008'; +KW2009 : 'KW' '2009'; +KW2010 : 'KW' '2010'; +KW2011 : 'KW' '2011'; +KW2012 : 'KW' '2012'; +KW2013 : 'KW' '2013'; +KW2014 : 'KW' '2014'; +KW2015 : 'KW' '2015'; +KW2016 : 'KW' '2016'; +KW2017 : 'KW' '2017'; +KW2018 : 'KW' '2018'; +KW2019 : 'KW' '2019'; +KW2020 : 'KW' '2020'; +KW2021 : 'KW' '2021'; +KW2022 : 'KW' '2022'; +KW2023 : 'KW' '2023'; +KW2024 : 'KW' '2024'; +KW2025 : 'KW' '2025'; +KW2026 : 'KW' '2026'; +KW2027 : 'KW' '2027'; +KW2028 : 'KW' '2028'; +KW2029 : 'KW' '2029'; +KW2030 : 'KW' '2030'; +KW2031 : 'KW' '2031'; +KW2032 : 'KW' '2032'; +KW2033 : 'KW' '2033'; +KW2034 : 'KW' '2034'; +KW2035 : 'KW' '2035'; +KW2036 : 'KW' '2036'; +KW2037 : 'KW' '2037'; +KW2038 : 'KW' '2038'; +KW2039 : 'KW' '2039'; +KW2040 : 'KW' '2040'; +KW2041 : 'KW' '2041'; +KW2042 : 'KW' '2042'; +KW2043 : 'KW' '2043'; +KW2044 : 'KW' '2044'; +KW2045 : 'KW' '2045'; +KW2046 : 'KW' '2046'; +KW2047 : 'KW' '2047'; +KW2048 : 'KW' '2048'; +KW2049 : 'KW' '2049'; +KW2050 : 'KW' '2050'; +KW2051 : 'KW' '2051'; +KW2052 : 'KW' '2052'; +KW2053 : 'KW' '2053'; +KW2054 : 'KW' '2054'; +KW2055 : 'KW' '2055'; +KW2056 : 'KW' '2056'; +KW2057 : 'KW' '2057'; +KW2058 : 'KW' '2058'; +KW2059 : 'KW' '2059'; +KW2060 : 'KW' '2060'; +KW2061 : 'KW' '2061'; +KW2062 : 'KW' '2062'; +KW2063 : 'KW' '2063'; +KW2064 : 'KW' '2064'; +KW2065 : 'KW' '2065'; +KW2066 : 'KW' '2066'; +KW2067 : 'KW' '2067'; +KW2068 : 'KW' '2068'; +KW2069 : 'KW' '2069'; +KW2070 : 'KW' '2070'; +KW2071 : 'KW' '2071'; +KW2072 : 'KW' '2072'; +KW2073 : 'KW' '2073'; +KW2074 : 'KW' '2074'; +KW2075 : 'KW' '2075'; +KW2076 : 'KW' '2076'; +KW2077 : 'KW' '2077'; +KW2078 : 'KW' '2078'; +KW2079 : 'KW' '2079'; +KW2080 : 'KW' '2080'; +KW2081 : 'KW' '2081'; +KW2082 : 'KW' '2082'; +KW2083 : 'KW' '2083'; +KW2084 : 'KW' '2084'; +KW2085 : 'KW' '2085'; +KW2086 : 'KW' '2086'; +KW2087 : 'KW' '2087'; +KW2088 : 'KW' '2088'; +KW2089 : 'KW' '2089'; +KW2090 : 'KW' '2090'; +KW2091 : 'KW' '2091'; +KW2092 : 'KW' '2092'; +KW2093 : 'KW' '2093'; +KW2094 : 'KW' '2094'; +KW2095 : 'KW' '2095'; +KW2096 : 'KW' '2096'; +KW2097 : 'KW' '2097'; +KW2098 : 'KW' '2098'; +KW2099 : 'KW' '2099'; +KW2100 : 'KW' '2100'; +KW2101 : 'KW' '2101'; +KW2102 : 'KW' '2102'; +KW2103 : 'KW' '2103'; +KW2104 : 'KW' '2104'; +KW2105 : 'KW' '2105'; +KW2106 : 'KW' '2106'; +KW2107 : 'KW' '2107'; +KW2108 : 'KW' '2108'; +KW2109 : 'KW' '2109'; +KW2110 : 'KW' '2110'; +KW2111 : 'KW' '2111'; +KW2112 : 'KW' '2112'; +KW2113 : 'KW' '2113'; +KW2114 : 'KW' '2114'; +KW2115 : 'KW' '2115'; +KW2116 : 'KW' '2116'; +KW2117 : 'KW' '2117'; +KW2118 : 'KW' '2118'; +KW2119 : 'KW' '2119'; +KW2120 : 'KW' '2120'; +KW2121 : 'KW' '2121'; +KW2122 : 'KW' '2122'; +KW2123 : 'KW' '2123'; +KW2124 : 'KW' '2124'; +KW2125 : 'KW' '2125'; +KW2126 : 'KW' '2126'; +KW2127 : 'KW' '2127'; +KW2128 : 'KW' '2128'; +KW2129 : 'KW' '2129'; +KW2130 : 'KW' '2130'; +KW2131 : 'KW' '2131'; +KW2132 : 'KW' '2132'; +KW2133 : 'KW' '2133'; +KW2134 : 'KW' '2134'; +KW2135 : 'KW' '2135'; +KW2136 : 'KW' '2136'; +KW2137 : 'KW' '2137'; +KW2138 : 'KW' '2138'; +KW2139 : 'KW' '2139'; +KW2140 : 'KW' '2140'; +KW2141 : 'KW' '2141'; +KW2142 : 'KW' '2142'; +KW2143 : 'KW' '2143'; +KW2144 : 'KW' '2144'; +KW2145 : 'KW' '2145'; +KW2146 : 'KW' '2146'; +KW2147 : 'KW' '2147'; +KW2148 : 'KW' '2148'; +KW2149 : 'KW' '2149'; +KW2150 : 'KW' '2150'; +KW2151 : 'KW' '2151'; +KW2152 : 'KW' '2152'; +KW2153 : 'KW' '2153'; +KW2154 : 'KW' '2154'; +KW2155 : 'KW' '2155'; +KW2156 : 'KW' '2156'; +KW2157 : 'KW' '2157'; +KW2158 : 'KW' '2158'; +KW2159 : 'KW' '2159'; +KW2160 : 'KW' '2160'; +KW2161 : 'KW' '2161'; +KW2162 : 'KW' '2162'; +KW2163 : 'KW' '2163'; +KW2164 : 'KW' '2164'; +KW2165 : 'KW' '2165'; +KW2166 : 'KW' '2166'; +KW2167 : 'KW' '2167'; +KW2168 : 'KW' '2168'; +KW2169 : 'KW' '2169'; +KW2170 : 'KW' '2170'; +KW2171 : 'KW' '2171'; +KW2172 : 'KW' '2172'; +KW2173 : 'KW' '2173'; +KW2174 : 'KW' '2174'; +KW2175 : 'KW' '2175'; +KW2176 : 'KW' '2176'; +KW2177 : 'KW' '2177'; +KW2178 : 'KW' '2178'; +KW2179 : 'KW' '2179'; +KW2180 : 'KW' '2180'; +KW2181 : 'KW' '2181'; +KW2182 : 'KW' '2182'; +KW2183 : 'KW' '2183'; +KW2184 : 'KW' '2184'; +KW2185 : 'KW' '2185'; +KW2186 : 'KW' '2186'; +KW2187 : 'KW' '2187'; +KW2188 : 'KW' '2188'; +KW2189 : 'KW' '2189'; +KW2190 : 'KW' '2190'; +KW2191 : 'KW' '2191'; +KW2192 : 'KW' '2192'; +KW2193 : 'KW' '2193'; +KW2194 : 'KW' '2194'; +KW2195 : 'KW' '2195'; +KW2196 : 'KW' '2196'; +KW2197 : 'KW' '2197'; +KW2198 : 'KW' '2198'; +KW2199 : 'KW' '2199'; +KW2200 : 'KW' '2200'; +KW2201 : 'KW' '2201'; +KW2202 : 'KW' '2202'; +KW2203 : 'KW' '2203'; +KW2204 : 'KW' '2204'; +KW2205 : 'KW' '2205'; +KW2206 : 'KW' '2206'; +KW2207 : 'KW' '2207'; +KW2208 : 'KW' '2208'; +KW2209 : 'KW' '2209'; +KW2210 : 'KW' '2210'; +KW2211 : 'KW' '2211'; +KW2212 : 'KW' '2212'; +KW2213 : 'KW' '2213'; +KW2214 : 'KW' '2214'; +KW2215 : 'KW' '2215'; +KW2216 : 'KW' '2216'; +KW2217 : 'KW' '2217'; +KW2218 : 'KW' '2218'; +KW2219 : 'KW' '2219'; +KW2220 : 'KW' '2220'; +KW2221 : 'KW' '2221'; +KW2222 : 'KW' '2222'; +KW2223 : 'KW' '2223'; +KW2224 : 'KW' '2224'; +KW2225 : 'KW' '2225'; +KW2226 : 'KW' '2226'; +KW2227 : 'KW' '2227'; +KW2228 : 'KW' '2228'; +KW2229 : 'KW' '2229'; +KW2230 : 'KW' '2230'; +KW2231 : 'KW' '2231'; +KW2232 : 'KW' '2232'; +KW2233 : 'KW' '2233'; +KW2234 : 'KW' '2234'; +KW2235 : 'KW' '2235'; +KW2236 : 'KW' '2236'; +KW2237 : 'KW' '2237'; +KW2238 : 'KW' '2238'; +KW2239 : 'KW' '2239'; +KW2240 : 'KW' '2240'; +KW2241 : 'KW' '2241'; +KW2242 : 'KW' '2242'; +KW2243 : 'KW' '2243'; +KW2244 : 'KW' '2244'; +KW2245 : 'KW' '2245'; +KW2246 : 'KW' '2246'; +KW2247 : 'KW' '2247'; +KW2248 : 'KW' '2248'; +KW2249 : 'KW' '2249'; +KW2250 : 'KW' '2250'; +KW2251 : 'KW' '2251'; +KW2252 : 'KW' '2252'; +KW2253 : 'KW' '2253'; +KW2254 : 'KW' '2254'; +KW2255 : 'KW' '2255'; +KW2256 : 'KW' '2256'; +KW2257 : 'KW' '2257'; +KW2258 : 'KW' '2258'; +KW2259 : 'KW' '2259'; +KW2260 : 'KW' '2260'; +KW2261 : 'KW' '2261'; +KW2262 : 'KW' '2262'; +KW2263 : 'KW' '2263'; +KW2264 : 'KW' '2264'; +KW2265 : 'KW' '2265'; +KW2266 : 'KW' '2266'; +KW2267 : 'KW' '2267'; +KW2268 : 'KW' '2268'; +KW2269 : 'KW' '2269'; +KW2270 : 'KW' '2270'; +KW2271 : 'KW' '2271'; +KW2272 : 'KW' '2272'; +KW2273 : 'KW' '2273'; +KW2274 : 'KW' '2274'; +KW2275 : 'KW' '2275'; +KW2276 : 'KW' '2276'; +KW2277 : 'KW' '2277'; +KW2278 : 'KW' '2278'; +KW2279 : 'KW' '2279'; +KW2280 : 'KW' '2280'; +KW2281 : 'KW' '2281'; +KW2282 : 'KW' '2282'; +KW2283 : 'KW' '2283'; +KW2284 : 'KW' '2284'; +KW2285 : 'KW' '2285'; +KW2286 : 'KW' '2286'; +KW2287 : 'KW' '2287'; +KW2288 : 'KW' '2288'; +KW2289 : 'KW' '2289'; +KW2290 : 'KW' '2290'; +KW2291 : 'KW' '2291'; +KW2292 : 'KW' '2292'; +KW2293 : 'KW' '2293'; +KW2294 : 'KW' '2294'; +KW2295 : 'KW' '2295'; +KW2296 : 'KW' '2296'; +KW2297 : 'KW' '2297'; +KW2298 : 'KW' '2298'; +KW2299 : 'KW' '2299'; +KW2300 : 'KW' '2300'; +KW2301 : 'KW' '2301'; +KW2302 : 'KW' '2302'; +KW2303 : 'KW' '2303'; +KW2304 : 'KW' '2304'; +KW2305 : 'KW' '2305'; +KW2306 : 'KW' '2306'; +KW2307 : 'KW' '2307'; +KW2308 : 'KW' '2308'; +KW2309 : 'KW' '2309'; +KW2310 : 'KW' '2310'; +KW2311 : 'KW' '2311'; +KW2312 : 'KW' '2312'; +KW2313 : 'KW' '2313'; +KW2314 : 'KW' '2314'; +KW2315 : 'KW' '2315'; +KW2316 : 'KW' '2316'; +KW2317 : 'KW' '2317'; +KW2318 : 'KW' '2318'; +KW2319 : 'KW' '2319'; +KW2320 : 'KW' '2320'; +KW2321 : 'KW' '2321'; +KW2322 : 'KW' '2322'; +KW2323 : 'KW' '2323'; +KW2324 : 'KW' '2324'; +KW2325 : 'KW' '2325'; +KW2326 : 'KW' '2326'; +KW2327 : 'KW' '2327'; +KW2328 : 'KW' '2328'; +KW2329 : 'KW' '2329'; +KW2330 : 'KW' '2330'; +KW2331 : 'KW' '2331'; +KW2332 : 'KW' '2332'; +KW2333 : 'KW' '2333'; +KW2334 : 'KW' '2334'; +KW2335 : 'KW' '2335'; +KW2336 : 'KW' '2336'; +KW2337 : 'KW' '2337'; +KW2338 : 'KW' '2338'; +KW2339 : 'KW' '2339'; +KW2340 : 'KW' '2340'; +KW2341 : 'KW' '2341'; +KW2342 : 'KW' '2342'; +KW2343 : 'KW' '2343'; +KW2344 : 'KW' '2344'; +KW2345 : 'KW' '2345'; +KW2346 : 'KW' '2346'; +KW2347 : 'KW' '2347'; +KW2348 : 'KW' '2348'; +KW2349 : 'KW' '2349'; +KW2350 : 'KW' '2350'; +KW2351 : 'KW' '2351'; +KW2352 : 'KW' '2352'; +KW2353 : 'KW' '2353'; +KW2354 : 'KW' '2354'; +KW2355 : 'KW' '2355'; +KW2356 : 'KW' '2356'; +KW2357 : 'KW' '2357'; +KW2358 : 'KW' '2358'; +KW2359 : 'KW' '2359'; +KW2360 : 'KW' '2360'; +KW2361 : 'KW' '2361'; +KW2362 : 'KW' '2362'; +KW2363 : 'KW' '2363'; +KW2364 : 'KW' '2364'; +KW2365 : 'KW' '2365'; +KW2366 : 'KW' '2366'; +KW2367 : 'KW' '2367'; +KW2368 : 'KW' '2368'; +KW2369 : 'KW' '2369'; +KW2370 : 'KW' '2370'; +KW2371 : 'KW' '2371'; +KW2372 : 'KW' '2372'; +KW2373 : 'KW' '2373'; +KW2374 : 'KW' '2374'; +KW2375 : 'KW' '2375'; +KW2376 : 'KW' '2376'; +KW2377 : 'KW' '2377'; +KW2378 : 'KW' '2378'; +KW2379 : 'KW' '2379'; +KW2380 : 'KW' '2380'; +KW2381 : 'KW' '2381'; +KW2382 : 'KW' '2382'; +KW2383 : 'KW' '2383'; +KW2384 : 'KW' '2384'; +KW2385 : 'KW' '2385'; +KW2386 : 'KW' '2386'; +KW2387 : 'KW' '2387'; +KW2388 : 'KW' '2388'; +KW2389 : 'KW' '2389'; +KW2390 : 'KW' '2390'; +KW2391 : 'KW' '2391'; +KW2392 : 'KW' '2392'; +KW2393 : 'KW' '2393'; +KW2394 : 'KW' '2394'; +KW2395 : 'KW' '2395'; +KW2396 : 'KW' '2396'; +KW2397 : 'KW' '2397'; +KW2398 : 'KW' '2398'; +KW2399 : 'KW' '2399'; +KW2400 : 'KW' '2400'; +KW2401 : 'KW' '2401'; +KW2402 : 'KW' '2402'; +KW2403 : 'KW' '2403'; +KW2404 : 'KW' '2404'; +KW2405 : 'KW' '2405'; +KW2406 : 'KW' '2406'; +KW2407 : 'KW' '2407'; +KW2408 : 'KW' '2408'; +KW2409 : 'KW' '2409'; +KW2410 : 'KW' '2410'; +KW2411 : 'KW' '2411'; +KW2412 : 'KW' '2412'; +KW2413 : 'KW' '2413'; +KW2414 : 'KW' '2414'; +KW2415 : 'KW' '2415'; +KW2416 : 'KW' '2416'; +KW2417 : 'KW' '2417'; +KW2418 : 'KW' '2418'; +KW2419 : 'KW' '2419'; +KW2420 : 'KW' '2420'; +KW2421 : 'KW' '2421'; +KW2422 : 'KW' '2422'; +KW2423 : 'KW' '2423'; +KW2424 : 'KW' '2424'; +KW2425 : 'KW' '2425'; +KW2426 : 'KW' '2426'; +KW2427 : 'KW' '2427'; +KW2428 : 'KW' '2428'; +KW2429 : 'KW' '2429'; +KW2430 : 'KW' '2430'; +KW2431 : 'KW' '2431'; +KW2432 : 'KW' '2432'; +KW2433 : 'KW' '2433'; +KW2434 : 'KW' '2434'; +KW2435 : 'KW' '2435'; +KW2436 : 'KW' '2436'; +KW2437 : 'KW' '2437'; +KW2438 : 'KW' '2438'; +KW2439 : 'KW' '2439'; +KW2440 : 'KW' '2440'; +KW2441 : 'KW' '2441'; +KW2442 : 'KW' '2442'; +KW2443 : 'KW' '2443'; +KW2444 : 'KW' '2444'; +KW2445 : 'KW' '2445'; +KW2446 : 'KW' '2446'; +KW2447 : 'KW' '2447'; +KW2448 : 'KW' '2448'; +KW2449 : 'KW' '2449'; +KW2450 : 'KW' '2450'; +KW2451 : 'KW' '2451'; +KW2452 : 'KW' '2452'; +KW2453 : 'KW' '2453'; +KW2454 : 'KW' '2454'; +KW2455 : 'KW' '2455'; +KW2456 : 'KW' '2456'; +KW2457 : 'KW' '2457'; +KW2458 : 'KW' '2458'; +KW2459 : 'KW' '2459'; +KW2460 : 'KW' '2460'; +KW2461 : 'KW' '2461'; +KW2462 : 'KW' '2462'; +KW2463 : 'KW' '2463'; +KW2464 : 'KW' '2464'; +KW2465 : 'KW' '2465'; +KW2466 : 'KW' '2466'; +KW2467 : 'KW' '2467'; +KW2468 : 'KW' '2468'; +KW2469 : 'KW' '2469'; +KW2470 : 'KW' '2470'; +KW2471 : 'KW' '2471'; +KW2472 : 'KW' '2472'; +KW2473 : 'KW' '2473'; +KW2474 : 'KW' '2474'; +KW2475 : 'KW' '2475'; +KW2476 : 'KW' '2476'; +KW2477 : 'KW' '2477'; +KW2478 : 'KW' '2478'; +KW2479 : 'KW' '2479'; +KW2480 : 'KW' '2480'; +KW2481 : 'KW' '2481'; +KW2482 : 'KW' '2482'; +KW2483 : 'KW' '2483'; +KW2484 : 'KW' '2484'; +KW2485 : 'KW' '2485'; +KW2486 : 'KW' '2486'; +KW2487 : 'KW' '2487'; +KW2488 : 'KW' '2488'; +KW2489 : 'KW' '2489'; +KW2490 : 'KW' '2490'; +KW2491 : 'KW' '2491'; +KW2492 : 'KW' '2492'; +KW2493 : 'KW' '2493'; +KW2494 : 'KW' '2494'; +KW2495 : 'KW' '2495'; +KW2496 : 'KW' '2496'; +KW2497 : 'KW' '2497'; +KW2498 : 'KW' '2498'; +KW2499 : 'KW' '2499'; +KW2500 : 'KW' '2500'; +KW2501 : 'KW' '2501'; +KW2502 : 'KW' '2502'; +KW2503 : 'KW' '2503'; +KW2504 : 'KW' '2504'; +KW2505 : 'KW' '2505'; +KW2506 : 'KW' '2506'; +KW2507 : 'KW' '2507'; +KW2508 : 'KW' '2508'; +KW2509 : 'KW' '2509'; +KW2510 : 'KW' '2510'; +KW2511 : 'KW' '2511'; +KW2512 : 'KW' '2512'; +KW2513 : 'KW' '2513'; +KW2514 : 'KW' '2514'; +KW2515 : 'KW' '2515'; +KW2516 : 'KW' '2516'; +KW2517 : 'KW' '2517'; +KW2518 : 'KW' '2518'; +KW2519 : 'KW' '2519'; +KW2520 : 'KW' '2520'; +KW2521 : 'KW' '2521'; +KW2522 : 'KW' '2522'; +KW2523 : 'KW' '2523'; +KW2524 : 'KW' '2524'; +KW2525 : 'KW' '2525'; +KW2526 : 'KW' '2526'; +KW2527 : 'KW' '2527'; +KW2528 : 'KW' '2528'; +KW2529 : 'KW' '2529'; +KW2530 : 'KW' '2530'; +KW2531 : 'KW' '2531'; +KW2532 : 'KW' '2532'; +KW2533 : 'KW' '2533'; +KW2534 : 'KW' '2534'; +KW2535 : 'KW' '2535'; +KW2536 : 'KW' '2536'; +KW2537 : 'KW' '2537'; +KW2538 : 'KW' '2538'; +KW2539 : 'KW' '2539'; +KW2540 : 'KW' '2540'; +KW2541 : 'KW' '2541'; +KW2542 : 'KW' '2542'; +KW2543 : 'KW' '2543'; +KW2544 : 'KW' '2544'; +KW2545 : 'KW' '2545'; +KW2546 : 'KW' '2546'; +KW2547 : 'KW' '2547'; +KW2548 : 'KW' '2548'; +KW2549 : 'KW' '2549'; +KW2550 : 'KW' '2550'; +KW2551 : 'KW' '2551'; +KW2552 : 'KW' '2552'; +KW2553 : 'KW' '2553'; +KW2554 : 'KW' '2554'; +KW2555 : 'KW' '2555'; +KW2556 : 'KW' '2556'; +KW2557 : 'KW' '2557'; +KW2558 : 'KW' '2558'; +KW2559 : 'KW' '2559'; +KW2560 : 'KW' '2560'; +KW2561 : 'KW' '2561'; +KW2562 : 'KW' '2562'; +KW2563 : 'KW' '2563'; +KW2564 : 'KW' '2564'; +KW2565 : 'KW' '2565'; +KW2566 : 'KW' '2566'; +KW2567 : 'KW' '2567'; +KW2568 : 'KW' '2568'; +KW2569 : 'KW' '2569'; +KW2570 : 'KW' '2570'; +KW2571 : 'KW' '2571'; +KW2572 : 'KW' '2572'; +KW2573 : 'KW' '2573'; +KW2574 : 'KW' '2574'; +KW2575 : 'KW' '2575'; +KW2576 : 'KW' '2576'; +KW2577 : 'KW' '2577'; +KW2578 : 'KW' '2578'; +KW2579 : 'KW' '2579'; +KW2580 : 'KW' '2580'; +KW2581 : 'KW' '2581'; +KW2582 : 'KW' '2582'; +KW2583 : 'KW' '2583'; +KW2584 : 'KW' '2584'; +KW2585 : 'KW' '2585'; +KW2586 : 'KW' '2586'; +KW2587 : 'KW' '2587'; +KW2588 : 'KW' '2588'; +KW2589 : 'KW' '2589'; +KW2590 : 'KW' '2590'; +KW2591 : 'KW' '2591'; +KW2592 : 'KW' '2592'; +KW2593 : 'KW' '2593'; +KW2594 : 'KW' '2594'; +KW2595 : 'KW' '2595'; +KW2596 : 'KW' '2596'; +KW2597 : 'KW' '2597'; +KW2598 : 'KW' '2598'; +KW2599 : 'KW' '2599'; +KW2600 : 'KW' '2600'; +KW2601 : 'KW' '2601'; +KW2602 : 'KW' '2602'; +KW2603 : 'KW' '2603'; +KW2604 : 'KW' '2604'; +KW2605 : 'KW' '2605'; +KW2606 : 'KW' '2606'; +KW2607 : 'KW' '2607'; +KW2608 : 'KW' '2608'; +KW2609 : 'KW' '2609'; +KW2610 : 'KW' '2610'; +KW2611 : 'KW' '2611'; +KW2612 : 'KW' '2612'; +KW2613 : 'KW' '2613'; +KW2614 : 'KW' '2614'; +KW2615 : 'KW' '2615'; +KW2616 : 'KW' '2616'; +KW2617 : 'KW' '2617'; +KW2618 : 'KW' '2618'; +KW2619 : 'KW' '2619'; +KW2620 : 'KW' '2620'; +KW2621 : 'KW' '2621'; +KW2622 : 'KW' '2622'; +KW2623 : 'KW' '2623'; +KW2624 : 'KW' '2624'; +KW2625 : 'KW' '2625'; +KW2626 : 'KW' '2626'; +KW2627 : 'KW' '2627'; +KW2628 : 'KW' '2628'; +KW2629 : 'KW' '2629'; +KW2630 : 'KW' '2630'; +KW2631 : 'KW' '2631'; +KW2632 : 'KW' '2632'; +KW2633 : 'KW' '2633'; +KW2634 : 'KW' '2634'; +KW2635 : 'KW' '2635'; +KW2636 : 'KW' '2636'; +KW2637 : 'KW' '2637'; +KW2638 : 'KW' '2638'; +KW2639 : 'KW' '2639'; +KW2640 : 'KW' '2640'; +KW2641 : 'KW' '2641'; +KW2642 : 'KW' '2642'; +KW2643 : 'KW' '2643'; +KW2644 : 'KW' '2644'; +KW2645 : 'KW' '2645'; +KW2646 : 'KW' '2646'; +KW2647 : 'KW' '2647'; +KW2648 : 'KW' '2648'; +KW2649 : 'KW' '2649'; +KW2650 : 'KW' '2650'; +KW2651 : 'KW' '2651'; +KW2652 : 'KW' '2652'; +KW2653 : 'KW' '2653'; +KW2654 : 'KW' '2654'; +KW2655 : 'KW' '2655'; +KW2656 : 'KW' '2656'; +KW2657 : 'KW' '2657'; +KW2658 : 'KW' '2658'; +KW2659 : 'KW' '2659'; +KW2660 : 'KW' '2660'; +KW2661 : 'KW' '2661'; +KW2662 : 'KW' '2662'; +KW2663 : 'KW' '2663'; +KW2664 : 'KW' '2664'; +KW2665 : 'KW' '2665'; +KW2666 : 'KW' '2666'; +KW2667 : 'KW' '2667'; +KW2668 : 'KW' '2668'; +KW2669 : 'KW' '2669'; +KW2670 : 'KW' '2670'; +KW2671 : 'KW' '2671'; +KW2672 : 'KW' '2672'; +KW2673 : 'KW' '2673'; +KW2674 : 'KW' '2674'; +KW2675 : 'KW' '2675'; +KW2676 : 'KW' '2676'; +KW2677 : 'KW' '2677'; +KW2678 : 'KW' '2678'; +KW2679 : 'KW' '2679'; +KW2680 : 'KW' '2680'; +KW2681 : 'KW' '2681'; +KW2682 : 'KW' '2682'; +KW2683 : 'KW' '2683'; +KW2684 : 'KW' '2684'; +KW2685 : 'KW' '2685'; +KW2686 : 'KW' '2686'; +KW2687 : 'KW' '2687'; +KW2688 : 'KW' '2688'; +KW2689 : 'KW' '2689'; +KW2690 : 'KW' '2690'; +KW2691 : 'KW' '2691'; +KW2692 : 'KW' '2692'; +KW2693 : 'KW' '2693'; +KW2694 : 'KW' '2694'; +KW2695 : 'KW' '2695'; +KW2696 : 'KW' '2696'; +KW2697 : 'KW' '2697'; +KW2698 : 'KW' '2698'; +KW2699 : 'KW' '2699'; +KW2700 : 'KW' '2700'; +KW2701 : 'KW' '2701'; +KW2702 : 'KW' '2702'; +KW2703 : 'KW' '2703'; +KW2704 : 'KW' '2704'; +KW2705 : 'KW' '2705'; +KW2706 : 'KW' '2706'; +KW2707 : 'KW' '2707'; +KW2708 : 'KW' '2708'; +KW2709 : 'KW' '2709'; +KW2710 : 'KW' '2710'; +KW2711 : 'KW' '2711'; +KW2712 : 'KW' '2712'; +KW2713 : 'KW' '2713'; +KW2714 : 'KW' '2714'; +KW2715 : 'KW' '2715'; +KW2716 : 'KW' '2716'; +KW2717 : 'KW' '2717'; +KW2718 : 'KW' '2718'; +KW2719 : 'KW' '2719'; +KW2720 : 'KW' '2720'; +KW2721 : 'KW' '2721'; +KW2722 : 'KW' '2722'; +KW2723 : 'KW' '2723'; +KW2724 : 'KW' '2724'; +KW2725 : 'KW' '2725'; +KW2726 : 'KW' '2726'; +KW2727 : 'KW' '2727'; +KW2728 : 'KW' '2728'; +KW2729 : 'KW' '2729'; +KW2730 : 'KW' '2730'; +KW2731 : 'KW' '2731'; +KW2732 : 'KW' '2732'; +KW2733 : 'KW' '2733'; +KW2734 : 'KW' '2734'; +KW2735 : 'KW' '2735'; +KW2736 : 'KW' '2736'; +KW2737 : 'KW' '2737'; +KW2738 : 'KW' '2738'; +KW2739 : 'KW' '2739'; +KW2740 : 'KW' '2740'; +KW2741 : 'KW' '2741'; +KW2742 : 'KW' '2742'; +KW2743 : 'KW' '2743'; +KW2744 : 'KW' '2744'; +KW2745 : 'KW' '2745'; +KW2746 : 'KW' '2746'; +KW2747 : 'KW' '2747'; +KW2748 : 'KW' '2748'; +KW2749 : 'KW' '2749'; +KW2750 : 'KW' '2750'; +KW2751 : 'KW' '2751'; +KW2752 : 'KW' '2752'; +KW2753 : 'KW' '2753'; +KW2754 : 'KW' '2754'; +KW2755 : 'KW' '2755'; +KW2756 : 'KW' '2756'; +KW2757 : 'KW' '2757'; +KW2758 : 'KW' '2758'; +KW2759 : 'KW' '2759'; +KW2760 : 'KW' '2760'; +KW2761 : 'KW' '2761'; +KW2762 : 'KW' '2762'; +KW2763 : 'KW' '2763'; +KW2764 : 'KW' '2764'; +KW2765 : 'KW' '2765'; +KW2766 : 'KW' '2766'; +KW2767 : 'KW' '2767'; +KW2768 : 'KW' '2768'; +KW2769 : 'KW' '2769'; +KW2770 : 'KW' '2770'; +KW2771 : 'KW' '2771'; +KW2772 : 'KW' '2772'; +KW2773 : 'KW' '2773'; +KW2774 : 'KW' '2774'; +KW2775 : 'KW' '2775'; +KW2776 : 'KW' '2776'; +KW2777 : 'KW' '2777'; +KW2778 : 'KW' '2778'; +KW2779 : 'KW' '2779'; +KW2780 : 'KW' '2780'; +KW2781 : 'KW' '2781'; +KW2782 : 'KW' '2782'; +KW2783 : 'KW' '2783'; +KW2784 : 'KW' '2784'; +KW2785 : 'KW' '2785'; +KW2786 : 'KW' '2786'; +KW2787 : 'KW' '2787'; +KW2788 : 'KW' '2788'; +KW2789 : 'KW' '2789'; +KW2790 : 'KW' '2790'; +KW2791 : 'KW' '2791'; +KW2792 : 'KW' '2792'; +KW2793 : 'KW' '2793'; +KW2794 : 'KW' '2794'; +KW2795 : 'KW' '2795'; +KW2796 : 'KW' '2796'; +KW2797 : 'KW' '2797'; +KW2798 : 'KW' '2798'; +KW2799 : 'KW' '2799'; +KW2800 : 'KW' '2800'; +KW2801 : 'KW' '2801'; +KW2802 : 'KW' '2802'; +KW2803 : 'KW' '2803'; +KW2804 : 'KW' '2804'; +KW2805 : 'KW' '2805'; +KW2806 : 'KW' '2806'; +KW2807 : 'KW' '2807'; +KW2808 : 'KW' '2808'; +KW2809 : 'KW' '2809'; +KW2810 : 'KW' '2810'; +KW2811 : 'KW' '2811'; +KW2812 : 'KW' '2812'; +KW2813 : 'KW' '2813'; +KW2814 : 'KW' '2814'; +KW2815 : 'KW' '2815'; +KW2816 : 'KW' '2816'; +KW2817 : 'KW' '2817'; +KW2818 : 'KW' '2818'; +KW2819 : 'KW' '2819'; +KW2820 : 'KW' '2820'; +KW2821 : 'KW' '2821'; +KW2822 : 'KW' '2822'; +KW2823 : 'KW' '2823'; +KW2824 : 'KW' '2824'; +KW2825 : 'KW' '2825'; +KW2826 : 'KW' '2826'; +KW2827 : 'KW' '2827'; +KW2828 : 'KW' '2828'; +KW2829 : 'KW' '2829'; +KW2830 : 'KW' '2830'; +KW2831 : 'KW' '2831'; +KW2832 : 'KW' '2832'; +KW2833 : 'KW' '2833'; +KW2834 : 'KW' '2834'; +KW2835 : 'KW' '2835'; +KW2836 : 'KW' '2836'; +KW2837 : 'KW' '2837'; +KW2838 : 'KW' '2838'; +KW2839 : 'KW' '2839'; +KW2840 : 'KW' '2840'; +KW2841 : 'KW' '2841'; +KW2842 : 'KW' '2842'; +KW2843 : 'KW' '2843'; +KW2844 : 'KW' '2844'; +KW2845 : 'KW' '2845'; +KW2846 : 'KW' '2846'; +KW2847 : 'KW' '2847'; +KW2848 : 'KW' '2848'; +KW2849 : 'KW' '2849'; +KW2850 : 'KW' '2850'; +KW2851 : 'KW' '2851'; +KW2852 : 'KW' '2852'; +KW2853 : 'KW' '2853'; +KW2854 : 'KW' '2854'; +KW2855 : 'KW' '2855'; +KW2856 : 'KW' '2856'; +KW2857 : 'KW' '2857'; +KW2858 : 'KW' '2858'; +KW2859 : 'KW' '2859'; +KW2860 : 'KW' '2860'; +KW2861 : 'KW' '2861'; +KW2862 : 'KW' '2862'; +KW2863 : 'KW' '2863'; +KW2864 : 'KW' '2864'; +KW2865 : 'KW' '2865'; +KW2866 : 'KW' '2866'; +KW2867 : 'KW' '2867'; +KW2868 : 'KW' '2868'; +KW2869 : 'KW' '2869'; +KW2870 : 'KW' '2870'; +KW2871 : 'KW' '2871'; +KW2872 : 'KW' '2872'; +KW2873 : 'KW' '2873'; +KW2874 : 'KW' '2874'; +KW2875 : 'KW' '2875'; +KW2876 : 'KW' '2876'; +KW2877 : 'KW' '2877'; +KW2878 : 'KW' '2878'; +KW2879 : 'KW' '2879'; +KW2880 : 'KW' '2880'; +KW2881 : 'KW' '2881'; +KW2882 : 'KW' '2882'; +KW2883 : 'KW' '2883'; +KW2884 : 'KW' '2884'; +KW2885 : 'KW' '2885'; +KW2886 : 'KW' '2886'; +KW2887 : 'KW' '2887'; +KW2888 : 'KW' '2888'; +KW2889 : 'KW' '2889'; +KW2890 : 'KW' '2890'; +KW2891 : 'KW' '2891'; +KW2892 : 'KW' '2892'; +KW2893 : 'KW' '2893'; +KW2894 : 'KW' '2894'; +KW2895 : 'KW' '2895'; +KW2896 : 'KW' '2896'; +KW2897 : 'KW' '2897'; +KW2898 : 'KW' '2898'; +KW2899 : 'KW' '2899'; +KW2900 : 'KW' '2900'; +KW2901 : 'KW' '2901'; +KW2902 : 'KW' '2902'; +KW2903 : 'KW' '2903'; +KW2904 : 'KW' '2904'; +KW2905 : 'KW' '2905'; +KW2906 : 'KW' '2906'; +KW2907 : 'KW' '2907'; +KW2908 : 'KW' '2908'; +KW2909 : 'KW' '2909'; +KW2910 : 'KW' '2910'; +KW2911 : 'KW' '2911'; +KW2912 : 'KW' '2912'; +KW2913 : 'KW' '2913'; +KW2914 : 'KW' '2914'; +KW2915 : 'KW' '2915'; +KW2916 : 'KW' '2916'; +KW2917 : 'KW' '2917'; +KW2918 : 'KW' '2918'; +KW2919 : 'KW' '2919'; +KW2920 : 'KW' '2920'; +KW2921 : 'KW' '2921'; +KW2922 : 'KW' '2922'; +KW2923 : 'KW' '2923'; +KW2924 : 'KW' '2924'; +KW2925 : 'KW' '2925'; +KW2926 : 'KW' '2926'; +KW2927 : 'KW' '2927'; +KW2928 : 'KW' '2928'; +KW2929 : 'KW' '2929'; +KW2930 : 'KW' '2930'; +KW2931 : 'KW' '2931'; +KW2932 : 'KW' '2932'; +KW2933 : 'KW' '2933'; +KW2934 : 'KW' '2934'; +KW2935 : 'KW' '2935'; +KW2936 : 'KW' '2936'; +KW2937 : 'KW' '2937'; +KW2938 : 'KW' '2938'; +KW2939 : 'KW' '2939'; +KW2940 : 'KW' '2940'; +KW2941 : 'KW' '2941'; +KW2942 : 'KW' '2942'; +KW2943 : 'KW' '2943'; +KW2944 : 'KW' '2944'; +KW2945 : 'KW' '2945'; +KW2946 : 'KW' '2946'; +KW2947 : 'KW' '2947'; +KW2948 : 'KW' '2948'; +KW2949 : 'KW' '2949'; +KW2950 : 'KW' '2950'; +KW2951 : 'KW' '2951'; +KW2952 : 'KW' '2952'; +KW2953 : 'KW' '2953'; +KW2954 : 'KW' '2954'; +KW2955 : 'KW' '2955'; +KW2956 : 'KW' '2956'; +KW2957 : 'KW' '2957'; +KW2958 : 'KW' '2958'; +KW2959 : 'KW' '2959'; +KW2960 : 'KW' '2960'; +KW2961 : 'KW' '2961'; +KW2962 : 'KW' '2962'; +KW2963 : 'KW' '2963'; +KW2964 : 'KW' '2964'; +KW2965 : 'KW' '2965'; +KW2966 : 'KW' '2966'; +KW2967 : 'KW' '2967'; +KW2968 : 'KW' '2968'; +KW2969 : 'KW' '2969'; +KW2970 : 'KW' '2970'; +KW2971 : 'KW' '2971'; +KW2972 : 'KW' '2972'; +KW2973 : 'KW' '2973'; +KW2974 : 'KW' '2974'; +KW2975 : 'KW' '2975'; +KW2976 : 'KW' '2976'; +KW2977 : 'KW' '2977'; +KW2978 : 'KW' '2978'; +KW2979 : 'KW' '2979'; +KW2980 : 'KW' '2980'; +KW2981 : 'KW' '2981'; +KW2982 : 'KW' '2982'; +KW2983 : 'KW' '2983'; +KW2984 : 'KW' '2984'; +KW2985 : 'KW' '2985'; +KW2986 : 'KW' '2986'; +KW2987 : 'KW' '2987'; +KW2988 : 'KW' '2988'; +KW2989 : 'KW' '2989'; +KW2990 : 'KW' '2990'; +KW2991 : 'KW' '2991'; +KW2992 : 'KW' '2992'; +KW2993 : 'KW' '2993'; +KW2994 : 'KW' '2994'; +KW2995 : 'KW' '2995'; +KW2996 : 'KW' '2996'; +KW2997 : 'KW' '2997'; +KW2998 : 'KW' '2998'; +KW2999 : 'KW' '2999'; +KW3000 : 'KW' '3000'; +KW3001 : 'KW' '3001'; +KW3002 : 'KW' '3002'; +KW3003 : 'KW' '3003'; +KW3004 : 'KW' '3004'; +KW3005 : 'KW' '3005'; +KW3006 : 'KW' '3006'; +KW3007 : 'KW' '3007'; +KW3008 : 'KW' '3008'; +KW3009 : 'KW' '3009'; +KW3010 : 'KW' '3010'; +KW3011 : 'KW' '3011'; +KW3012 : 'KW' '3012'; +KW3013 : 'KW' '3013'; +KW3014 : 'KW' '3014'; +KW3015 : 'KW' '3015'; +KW3016 : 'KW' '3016'; +KW3017 : 'KW' '3017'; +KW3018 : 'KW' '3018'; +KW3019 : 'KW' '3019'; +KW3020 : 'KW' '3020'; +KW3021 : 'KW' '3021'; +KW3022 : 'KW' '3022'; +KW3023 : 'KW' '3023'; +KW3024 : 'KW' '3024'; +KW3025 : 'KW' '3025'; +KW3026 : 'KW' '3026'; +KW3027 : 'KW' '3027'; +KW3028 : 'KW' '3028'; +KW3029 : 'KW' '3029'; +KW3030 : 'KW' '3030'; +KW3031 : 'KW' '3031'; +KW3032 : 'KW' '3032'; +KW3033 : 'KW' '3033'; +KW3034 : 'KW' '3034'; +KW3035 : 'KW' '3035'; +KW3036 : 'KW' '3036'; +KW3037 : 'KW' '3037'; +KW3038 : 'KW' '3038'; +KW3039 : 'KW' '3039'; +KW3040 : 'KW' '3040'; +KW3041 : 'KW' '3041'; +KW3042 : 'KW' '3042'; +KW3043 : 'KW' '3043'; +KW3044 : 'KW' '3044'; +KW3045 : 'KW' '3045'; +KW3046 : 'KW' '3046'; +KW3047 : 'KW' '3047'; +KW3048 : 'KW' '3048'; +KW3049 : 'KW' '3049'; +KW3050 : 'KW' '3050'; +KW3051 : 'KW' '3051'; +KW3052 : 'KW' '3052'; +KW3053 : 'KW' '3053'; +KW3054 : 'KW' '3054'; +KW3055 : 'KW' '3055'; +KW3056 : 'KW' '3056'; +KW3057 : 'KW' '3057'; +KW3058 : 'KW' '3058'; +KW3059 : 'KW' '3059'; +KW3060 : 'KW' '3060'; +KW3061 : 'KW' '3061'; +KW3062 : 'KW' '3062'; +KW3063 : 'KW' '3063'; +KW3064 : 'KW' '3064'; +KW3065 : 'KW' '3065'; +KW3066 : 'KW' '3066'; +KW3067 : 'KW' '3067'; +KW3068 : 'KW' '3068'; +KW3069 : 'KW' '3069'; +KW3070 : 'KW' '3070'; +KW3071 : 'KW' '3071'; +KW3072 : 'KW' '3072'; +KW3073 : 'KW' '3073'; +KW3074 : 'KW' '3074'; +KW3075 : 'KW' '3075'; +KW3076 : 'KW' '3076'; +KW3077 : 'KW' '3077'; +KW3078 : 'KW' '3078'; +KW3079 : 'KW' '3079'; +KW3080 : 'KW' '3080'; +KW3081 : 'KW' '3081'; +KW3082 : 'KW' '3082'; +KW3083 : 'KW' '3083'; +KW3084 : 'KW' '3084'; +KW3085 : 'KW' '3085'; +KW3086 : 'KW' '3086'; +KW3087 : 'KW' '3087'; +KW3088 : 'KW' '3088'; +KW3089 : 'KW' '3089'; +KW3090 : 'KW' '3090'; +KW3091 : 'KW' '3091'; +KW3092 : 'KW' '3092'; +KW3093 : 'KW' '3093'; +KW3094 : 'KW' '3094'; +KW3095 : 'KW' '3095'; +KW3096 : 'KW' '3096'; +KW3097 : 'KW' '3097'; +KW3098 : 'KW' '3098'; +KW3099 : 'KW' '3099'; +KW3100 : 'KW' '3100'; +KW3101 : 'KW' '3101'; +KW3102 : 'KW' '3102'; +KW3103 : 'KW' '3103'; +KW3104 : 'KW' '3104'; +KW3105 : 'KW' '3105'; +KW3106 : 'KW' '3106'; +KW3107 : 'KW' '3107'; +KW3108 : 'KW' '3108'; +KW3109 : 'KW' '3109'; +KW3110 : 'KW' '3110'; +KW3111 : 'KW' '3111'; +KW3112 : 'KW' '3112'; +KW3113 : 'KW' '3113'; +KW3114 : 'KW' '3114'; +KW3115 : 'KW' '3115'; +KW3116 : 'KW' '3116'; +KW3117 : 'KW' '3117'; +KW3118 : 'KW' '3118'; +KW3119 : 'KW' '3119'; +KW3120 : 'KW' '3120'; +KW3121 : 'KW' '3121'; +KW3122 : 'KW' '3122'; +KW3123 : 'KW' '3123'; +KW3124 : 'KW' '3124'; +KW3125 : 'KW' '3125'; +KW3126 : 'KW' '3126'; +KW3127 : 'KW' '3127'; +KW3128 : 'KW' '3128'; +KW3129 : 'KW' '3129'; +KW3130 : 'KW' '3130'; +KW3131 : 'KW' '3131'; +KW3132 : 'KW' '3132'; +KW3133 : 'KW' '3133'; +KW3134 : 'KW' '3134'; +KW3135 : 'KW' '3135'; +KW3136 : 'KW' '3136'; +KW3137 : 'KW' '3137'; +KW3138 : 'KW' '3138'; +KW3139 : 'KW' '3139'; +KW3140 : 'KW' '3140'; +KW3141 : 'KW' '3141'; +KW3142 : 'KW' '3142'; +KW3143 : 'KW' '3143'; +KW3144 : 'KW' '3144'; +KW3145 : 'KW' '3145'; +KW3146 : 'KW' '3146'; +KW3147 : 'KW' '3147'; +KW3148 : 'KW' '3148'; +KW3149 : 'KW' '3149'; +KW3150 : 'KW' '3150'; +KW3151 : 'KW' '3151'; +KW3152 : 'KW' '3152'; +KW3153 : 'KW' '3153'; +KW3154 : 'KW' '3154'; +KW3155 : 'KW' '3155'; +KW3156 : 'KW' '3156'; +KW3157 : 'KW' '3157'; +KW3158 : 'KW' '3158'; +KW3159 : 'KW' '3159'; +KW3160 : 'KW' '3160'; +KW3161 : 'KW' '3161'; +KW3162 : 'KW' '3162'; +KW3163 : 'KW' '3163'; +KW3164 : 'KW' '3164'; +KW3165 : 'KW' '3165'; +KW3166 : 'KW' '3166'; +KW3167 : 'KW' '3167'; +KW3168 : 'KW' '3168'; +KW3169 : 'KW' '3169'; +KW3170 : 'KW' '3170'; +KW3171 : 'KW' '3171'; +KW3172 : 'KW' '3172'; +KW3173 : 'KW' '3173'; +KW3174 : 'KW' '3174'; +KW3175 : 'KW' '3175'; +KW3176 : 'KW' '3176'; +KW3177 : 'KW' '3177'; +KW3178 : 'KW' '3178'; +KW3179 : 'KW' '3179'; +KW3180 : 'KW' '3180'; +KW3181 : 'KW' '3181'; +KW3182 : 'KW' '3182'; +KW3183 : 'KW' '3183'; +KW3184 : 'KW' '3184'; +KW3185 : 'KW' '3185'; +KW3186 : 'KW' '3186'; +KW3187 : 'KW' '3187'; +KW3188 : 'KW' '3188'; +KW3189 : 'KW' '3189'; +KW3190 : 'KW' '3190'; +KW3191 : 'KW' '3191'; +KW3192 : 'KW' '3192'; +KW3193 : 'KW' '3193'; +KW3194 : 'KW' '3194'; +KW3195 : 'KW' '3195'; +KW3196 : 'KW' '3196'; +KW3197 : 'KW' '3197'; +KW3198 : 'KW' '3198'; +KW3199 : 'KW' '3199'; +KW3200 : 'KW' '3200'; +KW3201 : 'KW' '3201'; +KW3202 : 'KW' '3202'; +KW3203 : 'KW' '3203'; +KW3204 : 'KW' '3204'; +KW3205 : 'KW' '3205'; +KW3206 : 'KW' '3206'; +KW3207 : 'KW' '3207'; +KW3208 : 'KW' '3208'; +KW3209 : 'KW' '3209'; +KW3210 : 'KW' '3210'; +KW3211 : 'KW' '3211'; +KW3212 : 'KW' '3212'; +KW3213 : 'KW' '3213'; +KW3214 : 'KW' '3214'; +KW3215 : 'KW' '3215'; +KW3216 : 'KW' '3216'; +KW3217 : 'KW' '3217'; +KW3218 : 'KW' '3218'; +KW3219 : 'KW' '3219'; +KW3220 : 'KW' '3220'; +KW3221 : 'KW' '3221'; +KW3222 : 'KW' '3222'; +KW3223 : 'KW' '3223'; +KW3224 : 'KW' '3224'; +KW3225 : 'KW' '3225'; +KW3226 : 'KW' '3226'; +KW3227 : 'KW' '3227'; +KW3228 : 'KW' '3228'; +KW3229 : 'KW' '3229'; +KW3230 : 'KW' '3230'; +KW3231 : 'KW' '3231'; +KW3232 : 'KW' '3232'; +KW3233 : 'KW' '3233'; +KW3234 : 'KW' '3234'; +KW3235 : 'KW' '3235'; +KW3236 : 'KW' '3236'; +KW3237 : 'KW' '3237'; +KW3238 : 'KW' '3238'; +KW3239 : 'KW' '3239'; +KW3240 : 'KW' '3240'; +KW3241 : 'KW' '3241'; +KW3242 : 'KW' '3242'; +KW3243 : 'KW' '3243'; +KW3244 : 'KW' '3244'; +KW3245 : 'KW' '3245'; +KW3246 : 'KW' '3246'; +KW3247 : 'KW' '3247'; +KW3248 : 'KW' '3248'; +KW3249 : 'KW' '3249'; +KW3250 : 'KW' '3250'; +KW3251 : 'KW' '3251'; +KW3252 : 'KW' '3252'; +KW3253 : 'KW' '3253'; +KW3254 : 'KW' '3254'; +KW3255 : 'KW' '3255'; +KW3256 : 'KW' '3256'; +KW3257 : 'KW' '3257'; +KW3258 : 'KW' '3258'; +KW3259 : 'KW' '3259'; +KW3260 : 'KW' '3260'; +KW3261 : 'KW' '3261'; +KW3262 : 'KW' '3262'; +KW3263 : 'KW' '3263'; +KW3264 : 'KW' '3264'; +KW3265 : 'KW' '3265'; +KW3266 : 'KW' '3266'; +KW3267 : 'KW' '3267'; +KW3268 : 'KW' '3268'; +KW3269 : 'KW' '3269'; +KW3270 : 'KW' '3270'; +KW3271 : 'KW' '3271'; +KW3272 : 'KW' '3272'; +KW3273 : 'KW' '3273'; +KW3274 : 'KW' '3274'; +KW3275 : 'KW' '3275'; +KW3276 : 'KW' '3276'; +KW3277 : 'KW' '3277'; +KW3278 : 'KW' '3278'; +KW3279 : 'KW' '3279'; +KW3280 : 'KW' '3280'; +KW3281 : 'KW' '3281'; +KW3282 : 'KW' '3282'; +KW3283 : 'KW' '3283'; +KW3284 : 'KW' '3284'; +KW3285 : 'KW' '3285'; +KW3286 : 'KW' '3286'; +KW3287 : 'KW' '3287'; +KW3288 : 'KW' '3288'; +KW3289 : 'KW' '3289'; +KW3290 : 'KW' '3290'; +KW3291 : 'KW' '3291'; +KW3292 : 'KW' '3292'; +KW3293 : 'KW' '3293'; +KW3294 : 'KW' '3294'; +KW3295 : 'KW' '3295'; +KW3296 : 'KW' '3296'; +KW3297 : 'KW' '3297'; +KW3298 : 'KW' '3298'; +KW3299 : 'KW' '3299'; +KW3300 : 'KW' '3300'; +KW3301 : 'KW' '3301'; +KW3302 : 'KW' '3302'; +KW3303 : 'KW' '3303'; +KW3304 : 'KW' '3304'; +KW3305 : 'KW' '3305'; +KW3306 : 'KW' '3306'; +KW3307 : 'KW' '3307'; +KW3308 : 'KW' '3308'; +KW3309 : 'KW' '3309'; +KW3310 : 'KW' '3310'; +KW3311 : 'KW' '3311'; +KW3312 : 'KW' '3312'; +KW3313 : 'KW' '3313'; +KW3314 : 'KW' '3314'; +KW3315 : 'KW' '3315'; +KW3316 : 'KW' '3316'; +KW3317 : 'KW' '3317'; +KW3318 : 'KW' '3318'; +KW3319 : 'KW' '3319'; +KW3320 : 'KW' '3320'; +KW3321 : 'KW' '3321'; +KW3322 : 'KW' '3322'; +KW3323 : 'KW' '3323'; +KW3324 : 'KW' '3324'; +KW3325 : 'KW' '3325'; +KW3326 : 'KW' '3326'; +KW3327 : 'KW' '3327'; +KW3328 : 'KW' '3328'; +KW3329 : 'KW' '3329'; +KW3330 : 'KW' '3330'; +KW3331 : 'KW' '3331'; +KW3332 : 'KW' '3332'; +KW3333 : 'KW' '3333'; +KW3334 : 'KW' '3334'; +KW3335 : 'KW' '3335'; +KW3336 : 'KW' '3336'; +KW3337 : 'KW' '3337'; +KW3338 : 'KW' '3338'; +KW3339 : 'KW' '3339'; +KW3340 : 'KW' '3340'; +KW3341 : 'KW' '3341'; +KW3342 : 'KW' '3342'; +KW3343 : 'KW' '3343'; +KW3344 : 'KW' '3344'; +KW3345 : 'KW' '3345'; +KW3346 : 'KW' '3346'; +KW3347 : 'KW' '3347'; +KW3348 : 'KW' '3348'; +KW3349 : 'KW' '3349'; +KW3350 : 'KW' '3350'; +KW3351 : 'KW' '3351'; +KW3352 : 'KW' '3352'; +KW3353 : 'KW' '3353'; +KW3354 : 'KW' '3354'; +KW3355 : 'KW' '3355'; +KW3356 : 'KW' '3356'; +KW3357 : 'KW' '3357'; +KW3358 : 'KW' '3358'; +KW3359 : 'KW' '3359'; +KW3360 : 'KW' '3360'; +KW3361 : 'KW' '3361'; +KW3362 : 'KW' '3362'; +KW3363 : 'KW' '3363'; +KW3364 : 'KW' '3364'; +KW3365 : 'KW' '3365'; +KW3366 : 'KW' '3366'; +KW3367 : 'KW' '3367'; +KW3368 : 'KW' '3368'; +KW3369 : 'KW' '3369'; +KW3370 : 'KW' '3370'; +KW3371 : 'KW' '3371'; +KW3372 : 'KW' '3372'; +KW3373 : 'KW' '3373'; +KW3374 : 'KW' '3374'; +KW3375 : 'KW' '3375'; +KW3376 : 'KW' '3376'; +KW3377 : 'KW' '3377'; +KW3378 : 'KW' '3378'; +KW3379 : 'KW' '3379'; +KW3380 : 'KW' '3380'; +KW3381 : 'KW' '3381'; +KW3382 : 'KW' '3382'; +KW3383 : 'KW' '3383'; +KW3384 : 'KW' '3384'; +KW3385 : 'KW' '3385'; +KW3386 : 'KW' '3386'; +KW3387 : 'KW' '3387'; +KW3388 : 'KW' '3388'; +KW3389 : 'KW' '3389'; +KW3390 : 'KW' '3390'; +KW3391 : 'KW' '3391'; +KW3392 : 'KW' '3392'; +KW3393 : 'KW' '3393'; +KW3394 : 'KW' '3394'; +KW3395 : 'KW' '3395'; +KW3396 : 'KW' '3396'; +KW3397 : 'KW' '3397'; +KW3398 : 'KW' '3398'; +KW3399 : 'KW' '3399'; +KW3400 : 'KW' '3400'; +KW3401 : 'KW' '3401'; +KW3402 : 'KW' '3402'; +KW3403 : 'KW' '3403'; +KW3404 : 'KW' '3404'; +KW3405 : 'KW' '3405'; +KW3406 : 'KW' '3406'; +KW3407 : 'KW' '3407'; +KW3408 : 'KW' '3408'; +KW3409 : 'KW' '3409'; +KW3410 : 'KW' '3410'; +KW3411 : 'KW' '3411'; +KW3412 : 'KW' '3412'; +KW3413 : 'KW' '3413'; +KW3414 : 'KW' '3414'; +KW3415 : 'KW' '3415'; +KW3416 : 'KW' '3416'; +KW3417 : 'KW' '3417'; +KW3418 : 'KW' '3418'; +KW3419 : 'KW' '3419'; +KW3420 : 'KW' '3420'; +KW3421 : 'KW' '3421'; +KW3422 : 'KW' '3422'; +KW3423 : 'KW' '3423'; +KW3424 : 'KW' '3424'; +KW3425 : 'KW' '3425'; +KW3426 : 'KW' '3426'; +KW3427 : 'KW' '3427'; +KW3428 : 'KW' '3428'; +KW3429 : 'KW' '3429'; +KW3430 : 'KW' '3430'; +KW3431 : 'KW' '3431'; +KW3432 : 'KW' '3432'; +KW3433 : 'KW' '3433'; +KW3434 : 'KW' '3434'; +KW3435 : 'KW' '3435'; +KW3436 : 'KW' '3436'; +KW3437 : 'KW' '3437'; +KW3438 : 'KW' '3438'; +KW3439 : 'KW' '3439'; +KW3440 : 'KW' '3440'; +KW3441 : 'KW' '3441'; +KW3442 : 'KW' '3442'; +KW3443 : 'KW' '3443'; +KW3444 : 'KW' '3444'; +KW3445 : 'KW' '3445'; +KW3446 : 'KW' '3446'; +KW3447 : 'KW' '3447'; +KW3448 : 'KW' '3448'; +KW3449 : 'KW' '3449'; +KW3450 : 'KW' '3450'; +KW3451 : 'KW' '3451'; +KW3452 : 'KW' '3452'; +KW3453 : 'KW' '3453'; +KW3454 : 'KW' '3454'; +KW3455 : 'KW' '3455'; +KW3456 : 'KW' '3456'; +KW3457 : 'KW' '3457'; +KW3458 : 'KW' '3458'; +KW3459 : 'KW' '3459'; +KW3460 : 'KW' '3460'; +KW3461 : 'KW' '3461'; +KW3462 : 'KW' '3462'; +KW3463 : 'KW' '3463'; +KW3464 : 'KW' '3464'; +KW3465 : 'KW' '3465'; +KW3466 : 'KW' '3466'; +KW3467 : 'KW' '3467'; +KW3468 : 'KW' '3468'; +KW3469 : 'KW' '3469'; +KW3470 : 'KW' '3470'; +KW3471 : 'KW' '3471'; +KW3472 : 'KW' '3472'; +KW3473 : 'KW' '3473'; +KW3474 : 'KW' '3474'; +KW3475 : 'KW' '3475'; +KW3476 : 'KW' '3476'; +KW3477 : 'KW' '3477'; +KW3478 : 'KW' '3478'; +KW3479 : 'KW' '3479'; +KW3480 : 'KW' '3480'; +KW3481 : 'KW' '3481'; +KW3482 : 'KW' '3482'; +KW3483 : 'KW' '3483'; +KW3484 : 'KW' '3484'; +KW3485 : 'KW' '3485'; +KW3486 : 'KW' '3486'; +KW3487 : 'KW' '3487'; +KW3488 : 'KW' '3488'; +KW3489 : 'KW' '3489'; +KW3490 : 'KW' '3490'; +KW3491 : 'KW' '3491'; +KW3492 : 'KW' '3492'; +KW3493 : 'KW' '3493'; +KW3494 : 'KW' '3494'; +KW3495 : 'KW' '3495'; +KW3496 : 'KW' '3496'; +KW3497 : 'KW' '3497'; +KW3498 : 'KW' '3498'; +KW3499 : 'KW' '3499'; +KW3500 : 'KW' '3500'; +KW3501 : 'KW' '3501'; +KW3502 : 'KW' '3502'; +KW3503 : 'KW' '3503'; +KW3504 : 'KW' '3504'; +KW3505 : 'KW' '3505'; +KW3506 : 'KW' '3506'; +KW3507 : 'KW' '3507'; +KW3508 : 'KW' '3508'; +KW3509 : 'KW' '3509'; +KW3510 : 'KW' '3510'; +KW3511 : 'KW' '3511'; +KW3512 : 'KW' '3512'; +KW3513 : 'KW' '3513'; +KW3514 : 'KW' '3514'; +KW3515 : 'KW' '3515'; +KW3516 : 'KW' '3516'; +KW3517 : 'KW' '3517'; +KW3518 : 'KW' '3518'; +KW3519 : 'KW' '3519'; +KW3520 : 'KW' '3520'; +KW3521 : 'KW' '3521'; +KW3522 : 'KW' '3522'; +KW3523 : 'KW' '3523'; +KW3524 : 'KW' '3524'; +KW3525 : 'KW' '3525'; +KW3526 : 'KW' '3526'; +KW3527 : 'KW' '3527'; +KW3528 : 'KW' '3528'; +KW3529 : 'KW' '3529'; +KW3530 : 'KW' '3530'; +KW3531 : 'KW' '3531'; +KW3532 : 'KW' '3532'; +KW3533 : 'KW' '3533'; +KW3534 : 'KW' '3534'; +KW3535 : 'KW' '3535'; +KW3536 : 'KW' '3536'; +KW3537 : 'KW' '3537'; +KW3538 : 'KW' '3538'; +KW3539 : 'KW' '3539'; +KW3540 : 'KW' '3540'; +KW3541 : 'KW' '3541'; +KW3542 : 'KW' '3542'; +KW3543 : 'KW' '3543'; +KW3544 : 'KW' '3544'; +KW3545 : 'KW' '3545'; +KW3546 : 'KW' '3546'; +KW3547 : 'KW' '3547'; +KW3548 : 'KW' '3548'; +KW3549 : 'KW' '3549'; +KW3550 : 'KW' '3550'; +KW3551 : 'KW' '3551'; +KW3552 : 'KW' '3552'; +KW3553 : 'KW' '3553'; +KW3554 : 'KW' '3554'; +KW3555 : 'KW' '3555'; +KW3556 : 'KW' '3556'; +KW3557 : 'KW' '3557'; +KW3558 : 'KW' '3558'; +KW3559 : 'KW' '3559'; +KW3560 : 'KW' '3560'; +KW3561 : 'KW' '3561'; +KW3562 : 'KW' '3562'; +KW3563 : 'KW' '3563'; +KW3564 : 'KW' '3564'; +KW3565 : 'KW' '3565'; +KW3566 : 'KW' '3566'; +KW3567 : 'KW' '3567'; +KW3568 : 'KW' '3568'; +KW3569 : 'KW' '3569'; +KW3570 : 'KW' '3570'; +KW3571 : 'KW' '3571'; +KW3572 : 'KW' '3572'; +KW3573 : 'KW' '3573'; +KW3574 : 'KW' '3574'; +KW3575 : 'KW' '3575'; +KW3576 : 'KW' '3576'; +KW3577 : 'KW' '3577'; +KW3578 : 'KW' '3578'; +KW3579 : 'KW' '3579'; +KW3580 : 'KW' '3580'; +KW3581 : 'KW' '3581'; +KW3582 : 'KW' '3582'; +KW3583 : 'KW' '3583'; +KW3584 : 'KW' '3584'; +KW3585 : 'KW' '3585'; +KW3586 : 'KW' '3586'; +KW3587 : 'KW' '3587'; +KW3588 : 'KW' '3588'; +KW3589 : 'KW' '3589'; +KW3590 : 'KW' '3590'; +KW3591 : 'KW' '3591'; +KW3592 : 'KW' '3592'; +KW3593 : 'KW' '3593'; +KW3594 : 'KW' '3594'; +KW3595 : 'KW' '3595'; +KW3596 : 'KW' '3596'; +KW3597 : 'KW' '3597'; +KW3598 : 'KW' '3598'; +KW3599 : 'KW' '3599'; +KW3600 : 'KW' '3600'; +KW3601 : 'KW' '3601'; +KW3602 : 'KW' '3602'; +KW3603 : 'KW' '3603'; +KW3604 : 'KW' '3604'; +KW3605 : 'KW' '3605'; +KW3606 : 'KW' '3606'; +KW3607 : 'KW' '3607'; +KW3608 : 'KW' '3608'; +KW3609 : 'KW' '3609'; +KW3610 : 'KW' '3610'; +KW3611 : 'KW' '3611'; +KW3612 : 'KW' '3612'; +KW3613 : 'KW' '3613'; +KW3614 : 'KW' '3614'; +KW3615 : 'KW' '3615'; +KW3616 : 'KW' '3616'; +KW3617 : 'KW' '3617'; +KW3618 : 'KW' '3618'; +KW3619 : 'KW' '3619'; +KW3620 : 'KW' '3620'; +KW3621 : 'KW' '3621'; +KW3622 : 'KW' '3622'; +KW3623 : 'KW' '3623'; +KW3624 : 'KW' '3624'; +KW3625 : 'KW' '3625'; +KW3626 : 'KW' '3626'; +KW3627 : 'KW' '3627'; +KW3628 : 'KW' '3628'; +KW3629 : 'KW' '3629'; +KW3630 : 'KW' '3630'; +KW3631 : 'KW' '3631'; +KW3632 : 'KW' '3632'; +KW3633 : 'KW' '3633'; +KW3634 : 'KW' '3634'; +KW3635 : 'KW' '3635'; +KW3636 : 'KW' '3636'; +KW3637 : 'KW' '3637'; +KW3638 : 'KW' '3638'; +KW3639 : 'KW' '3639'; +KW3640 : 'KW' '3640'; +KW3641 : 'KW' '3641'; +KW3642 : 'KW' '3642'; +KW3643 : 'KW' '3643'; +KW3644 : 'KW' '3644'; +KW3645 : 'KW' '3645'; +KW3646 : 'KW' '3646'; +KW3647 : 'KW' '3647'; +KW3648 : 'KW' '3648'; +KW3649 : 'KW' '3649'; +KW3650 : 'KW' '3650'; +KW3651 : 'KW' '3651'; +KW3652 : 'KW' '3652'; +KW3653 : 'KW' '3653'; +KW3654 : 'KW' '3654'; +KW3655 : 'KW' '3655'; +KW3656 : 'KW' '3656'; +KW3657 : 'KW' '3657'; +KW3658 : 'KW' '3658'; +KW3659 : 'KW' '3659'; +KW3660 : 'KW' '3660'; +KW3661 : 'KW' '3661'; +KW3662 : 'KW' '3662'; +KW3663 : 'KW' '3663'; +KW3664 : 'KW' '3664'; +KW3665 : 'KW' '3665'; +KW3666 : 'KW' '3666'; +KW3667 : 'KW' '3667'; +KW3668 : 'KW' '3668'; +KW3669 : 'KW' '3669'; +KW3670 : 'KW' '3670'; +KW3671 : 'KW' '3671'; +KW3672 : 'KW' '3672'; +KW3673 : 'KW' '3673'; +KW3674 : 'KW' '3674'; +KW3675 : 'KW' '3675'; +KW3676 : 'KW' '3676'; +KW3677 : 'KW' '3677'; +KW3678 : 'KW' '3678'; +KW3679 : 'KW' '3679'; +KW3680 : 'KW' '3680'; +KW3681 : 'KW' '3681'; +KW3682 : 'KW' '3682'; +KW3683 : 'KW' '3683'; +KW3684 : 'KW' '3684'; +KW3685 : 'KW' '3685'; +KW3686 : 'KW' '3686'; +KW3687 : 'KW' '3687'; +KW3688 : 'KW' '3688'; +KW3689 : 'KW' '3689'; +KW3690 : 'KW' '3690'; +KW3691 : 'KW' '3691'; +KW3692 : 'KW' '3692'; +KW3693 : 'KW' '3693'; +KW3694 : 'KW' '3694'; +KW3695 : 'KW' '3695'; +KW3696 : 'KW' '3696'; +KW3697 : 'KW' '3697'; +KW3698 : 'KW' '3698'; +KW3699 : 'KW' '3699'; +KW3700 : 'KW' '3700'; +KW3701 : 'KW' '3701'; +KW3702 : 'KW' '3702'; +KW3703 : 'KW' '3703'; +KW3704 : 'KW' '3704'; +KW3705 : 'KW' '3705'; +KW3706 : 'KW' '3706'; +KW3707 : 'KW' '3707'; +KW3708 : 'KW' '3708'; +KW3709 : 'KW' '3709'; +KW3710 : 'KW' '3710'; +KW3711 : 'KW' '3711'; +KW3712 : 'KW' '3712'; +KW3713 : 'KW' '3713'; +KW3714 : 'KW' '3714'; +KW3715 : 'KW' '3715'; +KW3716 : 'KW' '3716'; +KW3717 : 'KW' '3717'; +KW3718 : 'KW' '3718'; +KW3719 : 'KW' '3719'; +KW3720 : 'KW' '3720'; +KW3721 : 'KW' '3721'; +KW3722 : 'KW' '3722'; +KW3723 : 'KW' '3723'; +KW3724 : 'KW' '3724'; +KW3725 : 'KW' '3725'; +KW3726 : 'KW' '3726'; +KW3727 : 'KW' '3727'; +KW3728 : 'KW' '3728'; +KW3729 : 'KW' '3729'; +KW3730 : 'KW' '3730'; +KW3731 : 'KW' '3731'; +KW3732 : 'KW' '3732'; +KW3733 : 'KW' '3733'; +KW3734 : 'KW' '3734'; +KW3735 : 'KW' '3735'; +KW3736 : 'KW' '3736'; +KW3737 : 'KW' '3737'; +KW3738 : 'KW' '3738'; +KW3739 : 'KW' '3739'; +KW3740 : 'KW' '3740'; +KW3741 : 'KW' '3741'; +KW3742 : 'KW' '3742'; +KW3743 : 'KW' '3743'; +KW3744 : 'KW' '3744'; +KW3745 : 'KW' '3745'; +KW3746 : 'KW' '3746'; +KW3747 : 'KW' '3747'; +KW3748 : 'KW' '3748'; +KW3749 : 'KW' '3749'; +KW3750 : 'KW' '3750'; +KW3751 : 'KW' '3751'; +KW3752 : 'KW' '3752'; +KW3753 : 'KW' '3753'; +KW3754 : 'KW' '3754'; +KW3755 : 'KW' '3755'; +KW3756 : 'KW' '3756'; +KW3757 : 'KW' '3757'; +KW3758 : 'KW' '3758'; +KW3759 : 'KW' '3759'; +KW3760 : 'KW' '3760'; +KW3761 : 'KW' '3761'; +KW3762 : 'KW' '3762'; +KW3763 : 'KW' '3763'; +KW3764 : 'KW' '3764'; +KW3765 : 'KW' '3765'; +KW3766 : 'KW' '3766'; +KW3767 : 'KW' '3767'; +KW3768 : 'KW' '3768'; +KW3769 : 'KW' '3769'; +KW3770 : 'KW' '3770'; +KW3771 : 'KW' '3771'; +KW3772 : 'KW' '3772'; +KW3773 : 'KW' '3773'; +KW3774 : 'KW' '3774'; +KW3775 : 'KW' '3775'; +KW3776 : 'KW' '3776'; +KW3777 : 'KW' '3777'; +KW3778 : 'KW' '3778'; +KW3779 : 'KW' '3779'; +KW3780 : 'KW' '3780'; +KW3781 : 'KW' '3781'; +KW3782 : 'KW' '3782'; +KW3783 : 'KW' '3783'; +KW3784 : 'KW' '3784'; +KW3785 : 'KW' '3785'; +KW3786 : 'KW' '3786'; +KW3787 : 'KW' '3787'; +KW3788 : 'KW' '3788'; +KW3789 : 'KW' '3789'; +KW3790 : 'KW' '3790'; +KW3791 : 'KW' '3791'; +KW3792 : 'KW' '3792'; +KW3793 : 'KW' '3793'; +KW3794 : 'KW' '3794'; +KW3795 : 'KW' '3795'; +KW3796 : 'KW' '3796'; +KW3797 : 'KW' '3797'; +KW3798 : 'KW' '3798'; +KW3799 : 'KW' '3799'; +KW3800 : 'KW' '3800'; +KW3801 : 'KW' '3801'; +KW3802 : 'KW' '3802'; +KW3803 : 'KW' '3803'; +KW3804 : 'KW' '3804'; +KW3805 : 'KW' '3805'; +KW3806 : 'KW' '3806'; +KW3807 : 'KW' '3807'; +KW3808 : 'KW' '3808'; +KW3809 : 'KW' '3809'; +KW3810 : 'KW' '3810'; +KW3811 : 'KW' '3811'; +KW3812 : 'KW' '3812'; +KW3813 : 'KW' '3813'; +KW3814 : 'KW' '3814'; +KW3815 : 'KW' '3815'; +KW3816 : 'KW' '3816'; +KW3817 : 'KW' '3817'; +KW3818 : 'KW' '3818'; +KW3819 : 'KW' '3819'; +KW3820 : 'KW' '3820'; +KW3821 : 'KW' '3821'; +KW3822 : 'KW' '3822'; +KW3823 : 'KW' '3823'; +KW3824 : 'KW' '3824'; +KW3825 : 'KW' '3825'; +KW3826 : 'KW' '3826'; +KW3827 : 'KW' '3827'; +KW3828 : 'KW' '3828'; +KW3829 : 'KW' '3829'; +KW3830 : 'KW' '3830'; +KW3831 : 'KW' '3831'; +KW3832 : 'KW' '3832'; +KW3833 : 'KW' '3833'; +KW3834 : 'KW' '3834'; +KW3835 : 'KW' '3835'; +KW3836 : 'KW' '3836'; +KW3837 : 'KW' '3837'; +KW3838 : 'KW' '3838'; +KW3839 : 'KW' '3839'; +KW3840 : 'KW' '3840'; +KW3841 : 'KW' '3841'; +KW3842 : 'KW' '3842'; +KW3843 : 'KW' '3843'; +KW3844 : 'KW' '3844'; +KW3845 : 'KW' '3845'; +KW3846 : 'KW' '3846'; +KW3847 : 'KW' '3847'; +KW3848 : 'KW' '3848'; +KW3849 : 'KW' '3849'; +KW3850 : 'KW' '3850'; +KW3851 : 'KW' '3851'; +KW3852 : 'KW' '3852'; +KW3853 : 'KW' '3853'; +KW3854 : 'KW' '3854'; +KW3855 : 'KW' '3855'; +KW3856 : 'KW' '3856'; +KW3857 : 'KW' '3857'; +KW3858 : 'KW' '3858'; +KW3859 : 'KW' '3859'; +KW3860 : 'KW' '3860'; +KW3861 : 'KW' '3861'; +KW3862 : 'KW' '3862'; +KW3863 : 'KW' '3863'; +KW3864 : 'KW' '3864'; +KW3865 : 'KW' '3865'; +KW3866 : 'KW' '3866'; +KW3867 : 'KW' '3867'; +KW3868 : 'KW' '3868'; +KW3869 : 'KW' '3869'; +KW3870 : 'KW' '3870'; +KW3871 : 'KW' '3871'; +KW3872 : 'KW' '3872'; +KW3873 : 'KW' '3873'; +KW3874 : 'KW' '3874'; +KW3875 : 'KW' '3875'; +KW3876 : 'KW' '3876'; +KW3877 : 'KW' '3877'; +KW3878 : 'KW' '3878'; +KW3879 : 'KW' '3879'; +KW3880 : 'KW' '3880'; +KW3881 : 'KW' '3881'; +KW3882 : 'KW' '3882'; +KW3883 : 'KW' '3883'; +KW3884 : 'KW' '3884'; +KW3885 : 'KW' '3885'; +KW3886 : 'KW' '3886'; +KW3887 : 'KW' '3887'; +KW3888 : 'KW' '3888'; +KW3889 : 'KW' '3889'; +KW3890 : 'KW' '3890'; +KW3891 : 'KW' '3891'; +KW3892 : 'KW' '3892'; +KW3893 : 'KW' '3893'; +KW3894 : 'KW' '3894'; +KW3895 : 'KW' '3895'; +KW3896 : 'KW' '3896'; +KW3897 : 'KW' '3897'; +KW3898 : 'KW' '3898'; +KW3899 : 'KW' '3899'; +KW3900 : 'KW' '3900'; +KW3901 : 'KW' '3901'; +KW3902 : 'KW' '3902'; +KW3903 : 'KW' '3903'; +KW3904 : 'KW' '3904'; +KW3905 : 'KW' '3905'; +KW3906 : 'KW' '3906'; +KW3907 : 'KW' '3907'; +KW3908 : 'KW' '3908'; +KW3909 : 'KW' '3909'; +KW3910 : 'KW' '3910'; +KW3911 : 'KW' '3911'; +KW3912 : 'KW' '3912'; +KW3913 : 'KW' '3913'; +KW3914 : 'KW' '3914'; +KW3915 : 'KW' '3915'; +KW3916 : 'KW' '3916'; +KW3917 : 'KW' '3917'; +KW3918 : 'KW' '3918'; +KW3919 : 'KW' '3919'; +KW3920 : 'KW' '3920'; +KW3921 : 'KW' '3921'; +KW3922 : 'KW' '3922'; +KW3923 : 'KW' '3923'; +KW3924 : 'KW' '3924'; +KW3925 : 'KW' '3925'; +KW3926 : 'KW' '3926'; +KW3927 : 'KW' '3927'; +KW3928 : 'KW' '3928'; +KW3929 : 'KW' '3929'; +KW3930 : 'KW' '3930'; +KW3931 : 'KW' '3931'; +KW3932 : 'KW' '3932'; +KW3933 : 'KW' '3933'; +KW3934 : 'KW' '3934'; +KW3935 : 'KW' '3935'; +KW3936 : 'KW' '3936'; +KW3937 : 'KW' '3937'; +KW3938 : 'KW' '3938'; +KW3939 : 'KW' '3939'; +KW3940 : 'KW' '3940'; +KW3941 : 'KW' '3941'; +KW3942 : 'KW' '3942'; +KW3943 : 'KW' '3943'; +KW3944 : 'KW' '3944'; +KW3945 : 'KW' '3945'; +KW3946 : 'KW' '3946'; +KW3947 : 'KW' '3947'; +KW3948 : 'KW' '3948'; +KW3949 : 'KW' '3949'; +KW3950 : 'KW' '3950'; +KW3951 : 'KW' '3951'; +KW3952 : 'KW' '3952'; +KW3953 : 'KW' '3953'; +KW3954 : 'KW' '3954'; +KW3955 : 'KW' '3955'; +KW3956 : 'KW' '3956'; +KW3957 : 'KW' '3957'; +KW3958 : 'KW' '3958'; +KW3959 : 'KW' '3959'; +KW3960 : 'KW' '3960'; +KW3961 : 'KW' '3961'; +KW3962 : 'KW' '3962'; +KW3963 : 'KW' '3963'; +KW3964 : 'KW' '3964'; +KW3965 : 'KW' '3965'; +KW3966 : 'KW' '3966'; +KW3967 : 'KW' '3967'; +KW3968 : 'KW' '3968'; +KW3969 : 'KW' '3969'; +KW3970 : 'KW' '3970'; +KW3971 : 'KW' '3971'; +KW3972 : 'KW' '3972'; +KW3973 : 'KW' '3973'; +KW3974 : 'KW' '3974'; +KW3975 : 'KW' '3975'; +KW3976 : 'KW' '3976'; +KW3977 : 'KW' '3977'; +KW3978 : 'KW' '3978'; +KW3979 : 'KW' '3979'; +KW3980 : 'KW' '3980'; +KW3981 : 'KW' '3981'; +KW3982 : 'KW' '3982'; +KW3983 : 'KW' '3983'; +KW3984 : 'KW' '3984'; +KW3985 : 'KW' '3985'; +KW3986 : 'KW' '3986'; +KW3987 : 'KW' '3987'; +KW3988 : 'KW' '3988'; +KW3989 : 'KW' '3989'; +KW3990 : 'KW' '3990'; +KW3991 : 'KW' '3991'; +KW3992 : 'KW' '3992'; +KW3993 : 'KW' '3993'; +KW3994 : 'KW' '3994'; +KW3995 : 'KW' '3995'; +KW3996 : 'KW' '3996'; +KW3997 : 'KW' '3997'; +KW3998 : 'KW' '3998'; +KW3999 : 'KW' '3999'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyClosure.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyClosure.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyClosure.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyClosure.st diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyConfigs.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyConfigs.st new file mode 100644 index 000000000..4b37c0453 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyConfigs.st @@ -0,0 +1,4 @@ +lexer grammar ; +I : .*? ('a' | 'ab') {} ; +WS : (' '|'\n') -> skip ; +J : . {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyOptional.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyOptional.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyOptional.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyOptional.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyPositiveClosure.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyPositiveClosure.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyPositiveClosure.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyPositiveClosure.st diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyTermination1.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyTermination1.st new file mode 100644 index 000000000..25bcd9b13 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyTermination1.st @@ -0,0 +1,2 @@ +lexer grammar ; +STRING : '"' ('""' | .)*? '"'; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyTermination2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyTermination2.st new file mode 100644 index 000000000..2028d9b52 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/NonGreedyTermination2.st @@ -0,0 +1,2 @@ +lexer grammar ; +STRING : '"' ('""' | .)+? '"'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/Parentheses.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/Parentheses.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/Parentheses.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/Parentheses.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/PositionAdjustingLexer.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/PositionAdjustingLexer.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/PositionAdjustingLexer.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/PositionAdjustingLexer.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/QuoteTranslation.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/QuoteTranslation.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/QuoteTranslation.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/QuoteTranslation.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.ST b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.st similarity index 76% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.ST rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.st index c6fece229..0eeb4b13f 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.ST +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardPlus.st @@ -1,3 +1,3 @@ lexer grammar ; CMT : '/*' (CMT | .)+? '*/' ; -WS : (' '|'\t')+; +WS : (' '|'\n')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st similarity index 76% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st index 5f9b7c9bb..865beab3e 100644 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/RecursiveLexerRuleRefWithWildcardStar.st @@ -1,3 +1,3 @@ lexer grammar ; CMT : '/*' (CMT | .)*? '*/' ; -WS : (' '|'\t')+; +WS : (' '|'\n')+; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/RefToRuleDoesNotSetTokenNorEmitAnother.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/Slashes.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/Slashes.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/LexerExec/Slashes.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/Slashes.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/Basic.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Listeners/Basic.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/Basic.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LR.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Listeners/LR.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LR.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LRWithLabels.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Listeners/LRWithLabels.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LRWithLabels.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/RuleGetters.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Listeners/RuleGetters.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/RuleGetters.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/TokenGetters.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Listeners/TokenGetters.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/TokenGetters.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/2AltLoop.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParseTrees/2AltLoop.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/2AltLoop.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/ExtraToken.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParseTrees/ExtraToken.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/ExtraToken.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/NoViableAlt.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParseTrees/NoViableAlt.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/NoViableAlt.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/RuleRef.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParseTrees/RuleRef.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/RuleRef.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/Sync.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Sync.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/Sync.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/Token2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParseTrees/Token2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/Token2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/TokenAndRuleContextString.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParseTrees/TokenAndRuleContextString.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/TokenAndRuleContextString.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/test2Alts.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParseTrees/test2Alts.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/test2Alts.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpToken.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpToken.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpToken.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpToken.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpTokenFromSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpTokenFromSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ConjuringUpTokenFromSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpTokenFromSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ContextListGetters.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/ContextListGetters.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ContextListGetters.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidATNStateRemoval.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidATNStateRemoval.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidATNStateRemoval.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidEmptyInput.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidEmptyInput.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/InvalidEmptyInput.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidEmptyInput.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/LL1ErrorInfo.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL1ErrorInfo.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/LL1ErrorInfo.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/LL2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/LL2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL3.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/LL3.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LL3.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/LL3.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LLStar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/LLStar.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/LLStar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/LLStar.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/MultiTokenDeletionBeforeLoop2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/MultiTokenDeletionDuringLoop.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/MultiTokenDeletionDuringLoop2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/NoViableAltAvoidance.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/NoViableAltAvoidance.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/NoViableAltAvoidance.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/NoViableAltAvoidance.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleSetInsertion.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleSetInsertion.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleSetInsertion.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleSetInsertion.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletion.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletion.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletion.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletion.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionBeforeLoop2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionDuringLoop.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionDuringLoop2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenDeletionExpectingSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenInsertion.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenInsertion.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/SingleTokenInsertion.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/SingleTokenInsertion.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserErrors/TokenMismatch.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/TokenMismatch.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserErrors/TokenMismatch.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/TokenMismatch.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/APlus.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/APlus.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/APlus.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/APlus.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AStar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AStar.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/AStar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AStar.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAPlus.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorAPlus.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAPlus.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorAPlus.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAStar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorAStar.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorAStar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorAStar.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorB.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorB.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorB.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorB.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBPlus.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorBPlus.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBPlus.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorBPlus.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBStar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorBStar.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/AorBStar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AorBStar.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Basic.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/Basic.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/Basic.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/Basic.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding1.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/IfIfElseGreedyBinding1.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding1.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/IfIfElseGreedyBinding1.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/IfIfElseGreedyBinding2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseGreedyBinding2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/IfIfElseGreedyBinding2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding1.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/IfIfElseNonGreedyBinding1.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding1.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/IfIfElseNonGreedyBinding1.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/IfIfElseNonGreedyBinding2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/IfIfElseNonGreedyBinding2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/IfIfElseNonGreedyBinding2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/LL1OptionalBlock.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/LL1OptionalBlock.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/LL1OptionalBlock.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/LL1OptionalBlock.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Labels.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/Labels.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/Labels.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/Labels.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelForClosureContext.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelsOnSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelsOnSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/ListLabelsOnSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelsOnSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/Optional.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/Optional.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/Optional.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/Optional.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/PredicatedIfIfElse.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredicatedIfIfElse.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/PredicatedIfIfElse.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredictionIssue334.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/PredictionIssue334.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/PredictionIssue334.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/PredictionIssue334.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/StartRuleWithoutEOF.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/ParserExec/StartRuleWithoutEOF.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/StartRuleWithoutEOF.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/DisableRule.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/DisableRule.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/DisableRule.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/EnumNotID.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/EnumNotID.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/EnumNotID.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/EnumNotID.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDnotEnum.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDnotEnum.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDnotEnum.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDvsEnum.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/IDvsEnum.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDvsEnum.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/Indent.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/Indent.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/Indent.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/Indent.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/LexerInputPositionSensitivePredicates.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/PredicatedKeywords.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/PredicatedKeywords.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalLexer/PredicatedKeywords.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/PredicatedKeywords.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAlts.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAlts.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAlts.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAlts.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionHidesPreds.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionHidesPreds.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionHidesPreds.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionHidesPreds.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/AtomWithClosureInTranslatedLRRule.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DepedentPredsInGlobalFOLLOW.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DisabledAlternative.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DisabledAlternative.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/DisabledAlternative.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DisabledAlternative.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Order.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Order.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Order.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Order.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredFromAltTestedInLoopBack.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/RewindBeforePredEval.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/RewindBeforePredEval.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/RewindBeforePredEval.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/RewindBeforePredEval.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Simple.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Simple.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/Simple.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Simple.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate2.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/SimpleValidate2.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate2.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeft.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeft.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeft.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeft.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ValidateInDFA.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ValidateInDFA.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/SemPredEvalParser/ValidateInDFA.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ValidateInDFA.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/CharSetLiteral.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/CharSetLiteral.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/CharSetLiteral.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/CharSetLiteral.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerOptionalSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerOptionalSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/LexerOptionalSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerOptionalSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerPlusSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerPlusSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/LexerPlusSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerPlusSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/LexerStarSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerStarSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/LexerStarSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerStarSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/NotChar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotChar.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/NotChar.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotChar.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithLabel.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithLabel.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithLabel.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithLabel.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithRuleRef3.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithRuleRef3.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/NotCharSetWithRuleRef3.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithRuleRef3.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalLexerSingleElement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalLexerSingleElement.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalLexerSingleElement.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalLexerSingleElement.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSingleElement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalSingleElement.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/OptionalSingleElement.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalSingleElement.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/ParserNotSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/ParserNotSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotToken.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/ParserNotToken.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotToken.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/ParserNotToken.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotTokenWithLabel.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/ParserNotTokenWithLabel.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/ParserNotTokenWithLabel.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/ParserNotTokenWithLabel.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/ParserSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/ParserSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/ParserSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/ParserSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/PlusLexerSingleElement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/PlusLexerSingleElement.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/PlusLexerSingleElement.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/PlusLexerSingleElement.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/PlusSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/PlusSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/PlusSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/PlusSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/RuleAsSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/RuleAsSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/RuleAsSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/RuleAsSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/SeqDoesNotBecomeSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/SeqDoesNotBecomeSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/SeqDoesNotBecomeSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/SeqDoesNotBecomeSet.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/StarLexerSingleElement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/StarLexerSingleElement.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/StarLexerSingleElement.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/StarLexerSingleElement.st diff --git a/tool/test/org/antlr/v4/testgen/grammars/Sets/StarSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/StarSet.st similarity index 100% rename from tool/test/org/antlr/v4/testgen/grammars/Sets/StarSet.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/StarSet.st diff --git a/tool/test/org/antlr/v4/test/rt/java/BaseTest.java b/tool/test/org/antlr/v4/test/rt/java/BaseTest.java new file mode 100644 index 000000000..e389de09d --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/BaseTest.java @@ -0,0 +1,1416 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.rt.java; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.PipedInputStream; +import java.io.PipedOutputStream; +import java.io.PrintStream; +import java.io.StringReader; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.tools.JavaCompiler; +import javax.tools.JavaFileObject; +import javax.tools.StandardJavaFileManager; +import javax.tools.ToolProvider; + +import org.antlr.v4.Tool; +import org.antlr.v4.automata.ATNFactory; +import org.antlr.v4.automata.ATNPrinter; +import org.antlr.v4.automata.LexerATNFactory; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.codegen.CodeGenerator; +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CommonToken; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.IntStream; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenSource; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.WritableToken; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ATNSerializer; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.runtime.atn.DecisionState; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.runtime.misc.NotNull; +import org.antlr.v4.runtime.misc.Nullable; +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.runtime.misc.Utils; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.semantics.SemanticPipeline; +import org.antlr.v4.test.tool.ErrorQueue; +import org.antlr.v4.tool.ANTLRMessage; +import org.antlr.v4.tool.DOTGenerator; +import org.antlr.v4.tool.DefaultToolListener; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.GrammarSemanticsMessage; +import org.antlr.v4.tool.LexerGrammar; +import org.antlr.v4.tool.Rule; +import org.junit.Before; +import org.junit.rules.TestRule; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; +import org.stringtemplate.v4.ST; +import org.stringtemplate.v4.STGroup; +import org.stringtemplate.v4.STGroupString; + +public abstract class BaseTest { + // -J-Dorg.antlr.v4.test.BaseTest.level=FINE + private static final Logger LOGGER = Logger.getLogger(BaseTest.class.getName()); + + public static final String newline = System.getProperty("line.separator"); + public static final String pathSep = System.getProperty("path.separator"); + + /** + * When the {@code antlr.testinprocess} runtime property is set to + * {@code true}, the test suite will attempt to load generated classes into + * the test process for direct execution rather than invoking the JVM in a + * new process for testing. + * + *

+ * In-process testing results in a substantial performance improvement, but + * some test environments created by IDEs do not support the mechanisms + * currently used by the tests to dynamically load compiled code. Therefore, + * the default behavior (used in all other cases) favors reliable + * cross-system test execution by executing generated test code in a + * separate process.

+ */ + public static final boolean TEST_IN_SAME_PROCESS = Boolean.parseBoolean(System.getProperty("antlr.testinprocess")); + + /** + * When the {@code antlr.preserve-test-dir} runtime property is set to + * {@code true}, the temporary directories created by the test run will not + * be removed at the end of the test run, even for tests that completed + * successfully. + * + *

+ * The default behavior (used in all other cases) is removing the temporary + * directories for all tests which completed successfully, and preserving + * the directories for tests which failed.

+ */ + public static final boolean PRESERVE_TEST_DIR = Boolean.parseBoolean(System.getProperty("antlr.preserve-test-dir")); + + /** + * The base test directory is the directory where generated files get placed + * during unit test execution. + * + *

+ * The default value for this property is the {@code java.io.tmpdir} system + * property, and can be overridden by setting the + * {@code antlr.java-test-dir} property to a custom location. Note that the + * {@code antlr.java-test-dir} property directly affects the + * {@link #CREATE_PER_TEST_DIRECTORIES} value as well.

+ */ + public static final String BASE_TEST_DIR; + + /** + * When {@code true}, a temporary directory will be created for each test + * executed during the test run. + * + *

+ * This value is {@code true} when the {@code antlr.java-test-dir} system + * property is set, and otherwise {@code false}.

+ */ + public static final boolean CREATE_PER_TEST_DIRECTORIES; + + static { + String baseTestDir = System.getProperty("antlr.java-test-dir"); + boolean perTestDirectories = false; + if (baseTestDir == null || baseTestDir.isEmpty()) { + baseTestDir = System.getProperty("java.io.tmpdir"); + perTestDirectories = true; + } + + if (!new File(baseTestDir).isDirectory()) { + throw new UnsupportedOperationException("The specified base test directory does not exist: " + baseTestDir); + } + + BASE_TEST_DIR = baseTestDir; + CREATE_PER_TEST_DIRECTORIES = perTestDirectories; + } + + /** + * Build up the full classpath we need, including the surefire path (if present) + */ + public static final String CLASSPATH = System.getProperty("java.class.path"); + + public String tmpdir = null; + + /** If error during parser execution, store stderr here; can't return + * stdout and stderr. This doesn't trap errors from running antlr. + */ + protected String stderrDuringParse; + + @org.junit.Rule + public final TestRule testWatcher = new TestWatcher() { + + @Override + protected void succeeded(Description description) { + // remove tmpdir if no error. + if (!PRESERVE_TEST_DIR) { + eraseTempDir(); + } + } + + }; + + @Before + public void setUp() throws Exception { + if (CREATE_PER_TEST_DIRECTORIES) { + // new output dir for each test + String testDirectory = getClass().getSimpleName() + "-" + System.currentTimeMillis(); + tmpdir = new File(BASE_TEST_DIR, testDirectory).getAbsolutePath(); + } + else { + tmpdir = new File(BASE_TEST_DIR).getAbsolutePath(); + if (!PRESERVE_TEST_DIR && new File(tmpdir).exists()) { + eraseFiles(); + } + } + } + + protected org.antlr.v4.Tool newTool(String[] args) { + Tool tool = new Tool(args); + return tool; + } + + protected Tool newTool() { + org.antlr.v4.Tool tool = new Tool(new String[] {"-o", tmpdir}); + return tool; + } + + protected ATN createATN(Grammar g, boolean useSerializer) { + if ( g.atn==null ) { + semanticProcess(g); + assertEquals(0, g.tool.getNumErrors()); + + ParserATNFactory f; + if ( g.isLexer() ) { + f = new LexerATNFactory((LexerGrammar)g); + } + else { + f = new ParserATNFactory(g); + } + + g.atn = f.createATN(); + assertEquals(0, g.tool.getNumErrors()); + } + + ATN atn = g.atn; + if (useSerializer) { + char[] serialized = ATNSerializer.getSerializedAsChars(atn); + return new ATNDeserializer().deserialize(serialized); + } + + return atn; + } + + protected void semanticProcess(Grammar g) { + if ( g.ast!=null && !g.ast.hasErrors ) { + System.out.println(g.ast.toStringTree()); + Tool antlr = new Tool(); + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) + for (Grammar imp : g.getImportedGrammars()) { + antlr.processNonCombinedGrammar(imp, false); + } + } + } + } + + public DFA createDFA(Grammar g, DecisionState s) { +// PredictionDFAFactory conv = new PredictionDFAFactory(g, s); +// DFA dfa = conv.createDFA(); +// conv.issueAmbiguityWarnings(); +// System.out.print("DFA="+dfa); +// return dfa; + return null; + } + +// public void minimizeDFA(DFA dfa) { +// DFAMinimizer dmin = new DFAMinimizer(dfa); +// dfa.minimized = dmin.minimize(); +// } + + IntegerList getTypesFromString(Grammar g, String expecting) { + IntegerList expectingTokenTypes = new IntegerList(); + if ( expecting!=null && !expecting.trim().isEmpty() ) { + for (String tname : expecting.replace(" ", "").split(",")) { + int ttype = g.getTokenType(tname); + expectingTokenTypes.add(ttype); + } + } + return expectingTokenTypes; + } + + public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { + ANTLRInputStream in = new ANTLRInputStream(input); + IntegerList tokenTypes = new IntegerList(); + int ttype; + do { + ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); + tokenTypes.add(ttype); + } while ( ttype!= Token.EOF ); + return tokenTypes; + } + + public List getTokenTypes(LexerGrammar lg, + ATN atn, + CharStream input) + { + LexerATNSimulator interp = new LexerATNSimulator(atn,new DFA[] { new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); + List tokenTypes = new ArrayList(); + int ttype; + boolean hitEOF = false; + do { + if ( hitEOF ) { + tokenTypes.add("EOF"); + break; + } + int t = input.LA(1); + ttype = interp.match(input, Lexer.DEFAULT_MODE); + if ( ttype == Token.EOF ) { + tokenTypes.add("EOF"); + } + else { + tokenTypes.add(lg.typeToTokenList.get(ttype)); + } + + if ( t==IntStream.EOF ) { + hitEOF = true; + } + } while ( ttype!=Token.EOF ); + return tokenTypes; + } + + List checkRuleDFA(String gtext, String ruleName, String expecting) + throws Exception + { + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(gtext, equeue); + ATN atn = createATN(g, false); + ATNState s = atn.ruleToStartState[g.getRule(ruleName).index]; + if ( s==null ) { + System.err.println("no such rule: "+ruleName); + return null; + } + ATNState t = s.transition(0).target; + if ( !(t instanceof DecisionState) ) { + System.out.println(ruleName+" has no decision"); + return null; + } + DecisionState blk = (DecisionState)t; + checkRuleDFA(g, blk, expecting); + return equeue.all; + } + + List checkRuleDFA(String gtext, int decision, String expecting) + throws Exception + { + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(gtext, equeue); + ATN atn = createATN(g, false); + DecisionState blk = atn.decisionToState.get(decision); + checkRuleDFA(g, blk, expecting); + return equeue.all; + } + + void checkRuleDFA(Grammar g, DecisionState blk, String expecting) + throws Exception + { + DFA dfa = createDFA(g, blk); + String result = null; + if ( dfa!=null ) result = dfa.toString(); + assertEquals(expecting, result); + } + + List checkLexerDFA(String gtext, String expecting) + throws Exception + { + return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting); + } + + List checkLexerDFA(String gtext, String modeName, String expecting) + throws Exception + { + ErrorQueue equeue = new ErrorQueue(); + LexerGrammar g = new LexerGrammar(gtext, equeue); + g.atn = createATN(g, false); +// LexerATNToDFAConverter conv = new LexerATNToDFAConverter(g); +// DFA dfa = conv.createDFA(modeName); +// g.setLookaheadDFA(0, dfa); // only one decision to worry about +// +// String result = null; +// if ( dfa!=null ) result = dfa.toString(); +// assertEquals(expecting, result); +// +// return equeue.all; + return null; + } + + protected String load(String fileName, @Nullable String encoding) + throws IOException + { + if ( fileName==null ) { + return null; + } + + String fullFileName = getClass().getPackage().getName().replace('.', '/') + '/' + fileName; + int size = 65000; + InputStreamReader isr; + InputStream fis = getClass().getClassLoader().getResourceAsStream(fullFileName); + if ( encoding!=null ) { + isr = new InputStreamReader(fis, encoding); + } + else { + isr = new InputStreamReader(fis); + } + try { + char[] data = new char[size]; + int n = isr.read(data); + return new String(data, 0, n); + } + finally { + isr.close(); + } + } + + /** Wow! much faster than compiling outside of VM. Finicky though. + * Had rules called r and modulo. Wouldn't compile til I changed to 'a'. + */ + protected boolean compile(String... fileNames) { + List files = new ArrayList(); + for (String fileName : fileNames) { + File f = new File(tmpdir, fileName); + files.add(f); + } + + JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); +// DiagnosticCollector diagnostics = +// new DiagnosticCollector(); + + StandardJavaFileManager fileManager = + compiler.getStandardFileManager(null, null, null); + + Iterable compilationUnits = + fileManager.getJavaFileObjectsFromFiles(files); + + Iterable compileOptions = + Arrays.asList("-g", "-source", "1.6", "-target", "1.6", "-implicit:class", "-Xlint:-options", "-d", tmpdir, "-cp", tmpdir+pathSep+CLASSPATH); + + JavaCompiler.CompilationTask task = + compiler.getTask(null, fileManager, null, compileOptions, null, + compilationUnits); + boolean ok = task.call(); + + try { + fileManager.close(); + } + catch (IOException ioe) { + ioe.printStackTrace(System.err); + } + +// List errors = new ArrayList(); +// for (Diagnostic diagnostic : diagnostics.getDiagnostics()) { +// errors.add( +// String.valueOf(diagnostic.getLineNumber())+ +// ": " + diagnostic.getMessage(null)); +// } +// if ( errors.size()>0 ) { +// System.err.println("compile stderr from: "+cmdLine); +// System.err.println(errors); +// return false; +// } + return ok; + + /* + File outputDir = new File(tmpdir); + try { + Process process = + Runtime.getRuntime().exec(args, null, outputDir); + StreamVacuum stdout = new StreamVacuum(process.getInputStream()); + StreamVacuum stderr = new StreamVacuum(process.getErrorStream()); + stdout.start(); + stderr.start(); + process.waitFor(); + stdout.join(); + stderr.join(); + if ( stdout.toString().length()>0 ) { + System.err.println("compile stdout from: "+cmdLine); + System.err.println(stdout); + } + if ( stderr.toString().length()>0 ) { + System.err.println("compile stderr from: "+cmdLine); + System.err.println(stderr); + } + int ret = process.exitValue(); + return ret==0; + } + catch (Exception e) { + System.err.println("can't exec compilation"); + e.printStackTrace(System.err); + return false; + } + */ + } + + protected ErrorQueue antlr(String grammarFileName, boolean defaultListener, String... extraOptions) { + final List options = new ArrayList(); + Collections.addAll(options, extraOptions); + if ( !options.contains("-o") ) { + options.add("-o"); + options.add(tmpdir); + } + if ( !options.contains("-lib") ) { + options.add("-lib"); + options.add(tmpdir); + } + if ( !options.contains("-encoding") ) { + options.add("-encoding"); + options.add("UTF-8"); + } + options.add(new File(tmpdir,grammarFileName).toString()); + + final String[] optionsA = new String[options.size()]; + options.toArray(optionsA); + Tool antlr = newTool(optionsA); + ErrorQueue equeue = new ErrorQueue(antlr); + antlr.addListener(equeue); + if (defaultListener) { + antlr.addListener(new DefaultToolListener(antlr)); + } + antlr.processGrammarsOnCommandLine(); + + if ( !defaultListener && !equeue.errors.isEmpty() ) { + System.err.println("antlr reports errors from "+options); + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage msg = equeue.errors.get(i); + System.err.println(msg); + } + System.out.println("!!!\ngrammar:"); + try { + System.out.println(new String(Utils.readFile(tmpdir+"/"+grammarFileName))); + } + catch (IOException ioe) { + System.err.println(ioe.toString()); + } + System.out.println("###"); + } + if ( !defaultListener && !equeue.warnings.isEmpty() ) { + System.err.println("antlr reports warnings from "+options); + for (int i = 0; i < equeue.warnings.size(); i++) { + ANTLRMessage msg = equeue.warnings.get(i); + System.err.println(msg); + } + } + + return equeue; + } + + protected ErrorQueue antlr(String grammarFileName, String grammarStr, boolean defaultListener, String... extraOptions) { + System.out.println("dir "+tmpdir); + mkdir(tmpdir); + writeFile(tmpdir, grammarFileName, grammarStr); + return antlr(grammarFileName, defaultListener, extraOptions); + } + + protected String execLexer(String grammarFileName, + String grammarStr, + String lexerName, + String input) + { + return execLexer(grammarFileName, grammarStr, lexerName, input, false); + } + + protected String execLexer(String grammarFileName, + String grammarStr, + String lexerName, + String input, + boolean showDFA) + { + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, + grammarStr, + null, + lexerName); + assertTrue(success); + writeFile(tmpdir, "input", input); + writeLexerTestFile(lexerName, showDFA); + compile("Test.java"); + String output = execClass("Test"); + if ( stderrDuringParse!=null && stderrDuringParse.length()>0 ) { + System.err.println(stderrDuringParse); + } + return output; + } + + public ParseTree execParser(String startRuleName, String input, + String parserName, String lexerName) + throws Exception + { + Pair pl = getParserAndLexer(input, parserName, lexerName); + Parser parser = pl.a; + return execStartRule(startRuleName, parser); + } + + public ParseTree execStartRule(String startRuleName, Parser parser) + throws IllegalAccessException, InvocationTargetException, + NoSuchMethodException + { + Method startRule = null; + Object[] args = null; + try { + startRule = parser.getClass().getMethod(startRuleName); + } + catch (NoSuchMethodException nsme) { + // try with int _p arg for recursive func + startRule = parser.getClass().getMethod(startRuleName, int.class); + args = new Integer[] {0}; + } + ParseTree result = (ParseTree)startRule.invoke(parser, args); +// System.out.println("parse tree = "+result.toStringTree(parser)); + return result; + } + + public Pair getParserAndLexer(String input, + String parserName, String lexerName) + throws Exception + { + final Class lexerClass = loadLexerClassFromTempDir(lexerName); + final Class parserClass = loadParserClassFromTempDir(parserName); + + ANTLRInputStream in = new ANTLRInputStream(new StringReader(input)); + + Class c = lexerClass.asSubclass(Lexer.class); + Constructor ctor = c.getConstructor(CharStream.class); + Lexer lexer = ctor.newInstance(in); + + Class pc = parserClass.asSubclass(Parser.class); + Constructor pctor = pc.getConstructor(TokenStream.class); + CommonTokenStream tokens = new CommonTokenStream(lexer); + Parser parser = pctor.newInstance(tokens); + return new Pair(parser, lexer); + } + + public Class loadClassFromTempDir(String name) throws Exception { + ClassLoader loader = + new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, + ClassLoader.getSystemClassLoader()); + return loader.loadClass(name); + } + + public Class loadLexerClassFromTempDir(String name) throws Exception { + return loadClassFromTempDir(name).asSubclass(Lexer.class); + } + + public Class loadParserClassFromTempDir(String name) throws Exception { + return loadClassFromTempDir(name).asSubclass(Parser.class); + } + + protected String execParser(String grammarFileName, + String grammarStr, + String parserName, + String lexerName, + String startRuleName, + String input, boolean debug) + { + return execParser(grammarFileName, grammarStr, parserName, + lexerName, startRuleName, input, debug, false); + } + + protected String execParser(String grammarFileName, + String grammarStr, + String parserName, + String lexerName, + String startRuleName, + String input, boolean debug, + boolean profile) + { + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, + grammarStr, + parserName, + lexerName, + "-visitor"); + assertTrue(success); + writeFile(tmpdir, "input", input); + return rawExecRecognizer(parserName, + lexerName, + startRuleName, + debug, + profile); + } + + /** Return true if all is well */ + protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, + String grammarStr, + @Nullable String parserName, + String lexerName, + String... extraOptions) + { + return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); + } + + /** Return true if all is well */ + protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, + String grammarStr, + @Nullable String parserName, + String lexerName, + boolean defaultListener, + String... extraOptions) + { + ErrorQueue equeue = + antlr(grammarFileName, grammarStr, defaultListener, extraOptions); + if (!equeue.errors.isEmpty()) { + return false; + } + + List files = new ArrayList(); + if ( lexerName!=null ) { + files.add(lexerName+".java"); + } + if ( parserName!=null ) { + files.add(parserName+".java"); + Set optionsSet = new HashSet(Arrays.asList(extraOptions)); + if (!optionsSet.contains("-no-listener")) { + files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseListener.java"); + } + if (optionsSet.contains("-visitor")) { + files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseVisitor.java"); + } + } + boolean allIsWell = compile(files.toArray(new String[files.size()])); + return allIsWell; + } + + protected String rawExecRecognizer(String parserName, + String lexerName, + String parserStartRuleName, + boolean debug, + boolean profile) + { + this.stderrDuringParse = null; + if ( parserName==null ) { + writeLexerTestFile(lexerName, false); + } + else { + writeTestFile(parserName, + lexerName, + parserStartRuleName, + debug, + profile); + } + + compile("Test.java"); + return execClass("Test"); + } + + public String execRecognizer() { + return execClass("Test"); + } + + public String execClass(String className) { + if (TEST_IN_SAME_PROCESS) { + try { + ClassLoader loader = new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, ClassLoader.getSystemClassLoader()); + final Class mainClass = (Class)loader.loadClass(className); + final Method mainMethod = mainClass.getDeclaredMethod("main", String[].class); + PipedInputStream stdoutIn = new PipedInputStream(); + PipedInputStream stderrIn = new PipedInputStream(); + PipedOutputStream stdoutOut = new PipedOutputStream(stdoutIn); + PipedOutputStream stderrOut = new PipedOutputStream(stderrIn); + StreamVacuum stdoutVacuum = new StreamVacuum(stdoutIn); + StreamVacuum stderrVacuum = new StreamVacuum(stderrIn); + + PrintStream originalOut = System.out; + System.setOut(new PrintStream(stdoutOut)); + try { + PrintStream originalErr = System.err; + try { + System.setErr(new PrintStream(stderrOut)); + stdoutVacuum.start(); + stderrVacuum.start(); + mainMethod.invoke(null, (Object)new String[] { new File(tmpdir, "input").getAbsolutePath() }); + } + finally { + System.setErr(originalErr); + } + } + finally { + System.setOut(originalOut); + } + + stdoutOut.close(); + stderrOut.close(); + stdoutVacuum.join(); + stderrVacuum.join(); + String output = stdoutVacuum.toString(); + if ( stderrVacuum.toString().length()>0 ) { + this.stderrDuringParse = stderrVacuum.toString(); + System.err.println("exec stderrVacuum: "+ stderrVacuum); + } + return output; + } catch (MalformedURLException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (IOException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (InterruptedException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (IllegalAccessException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (IllegalArgumentException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (InvocationTargetException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (NoSuchMethodException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (SecurityException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (ClassNotFoundException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } + } + + try { + String[] args = new String[] { + "java", "-classpath", tmpdir+pathSep+CLASSPATH, + className, new File(tmpdir, "input").getAbsolutePath() + }; + //String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath(); + //System.out.println("execParser: "+cmdLine); + Process process = + Runtime.getRuntime().exec(args, null, new File(tmpdir)); + StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); + StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); + stdoutVacuum.start(); + stderrVacuum.start(); + process.waitFor(); + stdoutVacuum.join(); + stderrVacuum.join(); + String output = stdoutVacuum.toString(); + if ( stderrVacuum.toString().length()>0 ) { + this.stderrDuringParse = stderrVacuum.toString(); + System.err.println("exec stderrVacuum: "+ stderrVacuum); + } + return output; + } + catch (Exception e) { + System.err.println("can't exec recognizer"); + e.printStackTrace(System.err); + } + return null; + } + + public void testErrors(String[] pairs, boolean printTree) { + for (int i = 0; i < pairs.length; i+=2) { + String input = pairs[i]; + String expect = pairs[i+1]; + + String[] lines = input.split("\n"); + String fileName = getFilenameFromFirstLineOfGrammar(lines[0]); + ErrorQueue equeue = antlr(fileName, input, false); + + String actual = equeue.toString(true); + actual = actual.replace(tmpdir + File.separator, ""); + System.err.println(actual); + String msg = input; + msg = msg.replace("\n","\\n"); + msg = msg.replace("\r","\\r"); + msg = msg.replace("\t","\\t"); + + assertEquals("error in: "+msg,expect,actual); + } + } + + public String getFilenameFromFirstLineOfGrammar(String line) { + String fileName = "A" + Tool.GRAMMAR_EXTENSION; + int grIndex = line.lastIndexOf("grammar"); + int semi = line.lastIndexOf(';'); + if ( grIndex>=0 && semi>=0 ) { + int space = line.indexOf(' ', grIndex); + fileName = line.substring(space+1, semi)+Tool.GRAMMAR_EXTENSION; + } + if ( fileName.length()==Tool.GRAMMAR_EXTENSION.length() ) fileName = "A" + Tool.GRAMMAR_EXTENSION; + return fileName; + } + +// void ambig(List msgs, int[] expectedAmbigAlts, String expectedAmbigInput) +// throws Exception +// { +// ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); +// } + +// void ambig(List msgs, int i, int[] expectedAmbigAlts, String expectedAmbigInput) +// throws Exception +// { +// List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); +// AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); +// if ( a==null ) assertNull(expectedAmbigAlts); +// else { +// assertEquals(a.conflictingAlts.toString(), Arrays.toString(expectedAmbigAlts)); +// } +// assertEquals(expectedAmbigInput, a.input); +// } + +// void unreachable(List msgs, int[] expectedUnreachableAlts) +// throws Exception +// { +// unreachable(msgs, 0, expectedUnreachableAlts); +// } + +// void unreachable(List msgs, int i, int[] expectedUnreachableAlts) +// throws Exception +// { +// List amsgs = getMessagesOfType(msgs, UnreachableAltsMessage.class); +// UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); +// if ( u==null ) assertNull(expectedUnreachableAlts); +// else { +// assertEquals(u.conflictingAlts.toString(), Arrays.toString(expectedUnreachableAlts)); +// } +// } + + List getMessagesOfType(List msgs, Class c) { + List filtered = new ArrayList(); + for (ANTLRMessage m : msgs) { + if ( m.getClass() == c ) filtered.add(m); + } + return filtered; + } + + void checkRuleATN(Grammar g, String ruleName, String expecting) { + DOTGenerator dot = new DOTGenerator(g); + System.out.println(dot.getDOT(g.atn.ruleToStartState[g.getRule(ruleName).index])); + + Rule r = g.getRule(ruleName); + ATNState startState = g.atn.ruleToStartState[r.index]; + ATNPrinter serializer = new ATNPrinter(g, startState); + String result = serializer.asString(); + + //System.out.print(result); + assertEquals(expecting, result); + } + + public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { + int lp = templates.indexOf('('); + String name = templates.substring(0, lp); + STGroup group = new STGroupString(templates); + ST st = group.getInstanceOf(name); + st.add(actionName, action); + String grammar = st.render(); + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(grammar, equeue); + if ( g.ast!=null && !g.ast.hasErrors ) { + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + + ATNFactory factory = new ParserATNFactory(g); + if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); + g.atn = factory.createATN(); + + CodeGenerator gen = new CodeGenerator(g); + ST outputFileST = gen.generateParser(); + String output = outputFileST.render(); + //System.out.println(output); + String b = "#" + actionName + "#"; + int start = output.indexOf(b); + String e = "#end-" + actionName + "#"; + int end = output.indexOf(e); + String snippet = output.substring(start+b.length(),end); + assertEquals(expected, snippet); + } + if ( equeue.size()>0 ) { + System.err.println(equeue.toString()); + } + } + + public static class StreamVacuum implements Runnable { + StringBuilder buf = new StringBuilder(); + BufferedReader in; + Thread sucker; + public StreamVacuum(InputStream in) { + this.in = new BufferedReader( new InputStreamReader(in) ); + } + public void start() { + sucker = new Thread(this); + sucker.start(); + } + @Override + public void run() { + try { + String line = in.readLine(); + while (line!=null) { + buf.append(line); + buf.append('\n'); + line = in.readLine(); + } + } + catch (IOException ioe) { + System.err.println("can't read output from process"); + } + } + /** wait for the thread to finish */ + public void join() throws InterruptedException { + sucker.join(); + } + @Override + public String toString() { + return buf.toString(); + } + } + + protected void checkGrammarSemanticsError(ErrorQueue equeue, + GrammarSemanticsMessage expectedMessage) + throws Exception + { + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage m = equeue.errors.get(i); + if (m.getErrorType()==expectedMessage.getErrorType() ) { + foundMsg = m; + } + } + assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); + assertTrue("error is not a GrammarSemanticsMessage", + foundMsg instanceof GrammarSemanticsMessage); + assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); + if ( equeue.size()!=1 ) { + System.err.println(equeue); + } + } + + protected void checkGrammarSemanticsWarning(ErrorQueue equeue, + GrammarSemanticsMessage expectedMessage) + throws Exception + { + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.warnings.size(); i++) { + ANTLRMessage m = equeue.warnings.get(i); + if (m.getErrorType()==expectedMessage.getErrorType() ) { + foundMsg = m; + } + } + assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); + assertTrue("error is not a GrammarSemanticsMessage", + foundMsg instanceof GrammarSemanticsMessage); + assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); + if ( equeue.size()!=1 ) { + System.err.println(equeue); + } + } + + protected void checkError(ErrorQueue equeue, + ANTLRMessage expectedMessage) + throws Exception + { + //System.out.println("errors="+equeue); + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage m = equeue.errors.get(i); + if (m.getErrorType()==expectedMessage.getErrorType() ) { + foundMsg = m; + } + } + assertTrue("no error; "+expectedMessage.getErrorType()+" expected", !equeue.errors.isEmpty()); + assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1); + assertNotNull("couldn't find expected error: "+expectedMessage.getErrorType(), foundMsg); + /* + assertTrue("error is not a GrammarSemanticsMessage", + foundMsg instanceof GrammarSemanticsMessage); + */ + assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); + } + + public static class FilteringTokenStream extends CommonTokenStream { + public FilteringTokenStream(TokenSource src) { super(src); } + Set hide = new HashSet(); + @Override + protected boolean sync(int i) { + if (!super.sync(i)) { + return false; + } + + Token t = get(i); + if ( hide.contains(t.getType()) ) { + ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); + } + + return true; + } + public void setTokenTypeChannel(int ttype, int channel) { + hide.add(ttype); + } + } + + public static void writeFile(String dir, String fileName, String content) { + try { + Utils.writeFile(dir+"/"+fileName, content, "UTF-8"); + } + catch (IOException ioe) { + System.err.println("can't write file"); + ioe.printStackTrace(System.err); + } + } + + protected void mkdir(String dir) { + File f = new File(dir); + f.mkdirs(); + } + + protected void writeTestFile(String parserName, + String lexerName, + String parserStartRuleName, + boolean debug, + boolean profile) + { + ST outputFileST = new ST( + "import org.antlr.v4.runtime.*;\n" + + "import org.antlr.v4.runtime.tree.*;\n" + + "import org.antlr.v4.runtime.atn.*;\n" + + "import java.util.Arrays;\n"+ + "\n" + + "public class Test {\n" + + " public static void main(String[] args) throws Exception {\n" + + " CharStream input = new ANTLRFileStream(args[0]);\n" + + " lex = new (input);\n" + + " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + + " \n"+ + " parser.setBuildParseTree(true);\n" + + " \n"+ + " ParserRuleContext tree = parser.();\n" + + " System.out.println(Arrays.toString(profiler.getDecisionInfo()));\n" + + " ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" + + " }\n" + + "\n" + + " static class TreeShapeListener implements ParseTreeListener {\n" + + " @Override public void visitTerminal(TerminalNode node) { }\n" + + " @Override public void visitErrorNode(ErrorNode node) { }\n" + + " @Override public void exitEveryRule(ParserRuleContext ctx) { }\n" + + "\n" + + " @Override\n" + + " public void enterEveryRule(ParserRuleContext ctx) {\n" + + " for (int i = 0; i \\< ctx.getChildCount(); i++) {\n" + + " ParseTree parent = ctx.getChild(i).getParent();\n" + + " if (!(parent instanceof RuleNode) || ((RuleNode)parent).getRuleContext() != ctx) {\n" + + " throw new IllegalStateException(\"Invalid parse tree shape detected.\");\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}" + ); + ST createParserST = new ST(" parser = new (tokens);\n"); + if ( debug ) { + createParserST = + new ST( + " parser = new (tokens);\n" + + " parser.addErrorListener(new DiagnosticErrorListener());\n"); + } + if ( profile ) { + outputFileST.add("profile", + "ProfilingATNSimulator profiler = new ProfilingATNSimulator(parser);\n" + + "parser.setInterpreter(profiler);"); + } + else { + outputFileST.add("profile", new ArrayList()); + } + outputFileST.add("createParser", createParserST); + outputFileST.add("parserName", parserName); + outputFileST.add("lexerName", lexerName); + outputFileST.add("parserStartRuleName", parserStartRuleName); + writeFile(tmpdir, "Test.java", outputFileST.render()); + } + + protected void writeLexerTestFile(String lexerName, boolean showDFA) { + ST outputFileST = new ST( + "import org.antlr.v4.runtime.*;\n" + + "\n" + + "public class Test {\n" + + " public static void main(String[] args) throws Exception {\n" + + " CharStream input = new ANTLRFileStream(args[0]);\n" + + " lex = new (input);\n" + + " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + + " tokens.fill();\n" + + " for (Object t : tokens.getTokens()) System.out.println(t);\n" + + (showDFA?"System.out.print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString());\n":"")+ + " }\n" + + "}" + ); + + outputFileST.add("lexerName", lexerName); + writeFile(tmpdir, "Test.java", outputFileST.render()); + } + + public void writeRecognizerAndCompile(String parserName, String lexerName, + String parserStartRuleName, + boolean debug, + boolean profile) { + if ( parserName==null ) { + writeLexerTestFile(lexerName, debug); + } + else { + writeTestFile(parserName, + lexerName, + parserStartRuleName, + debug, + profile); + } + + compile("Test.java"); + } + + + protected void eraseFiles(final String filesEndingWith) { + File tmpdirF = new File(tmpdir); + String[] files = tmpdirF.list(); + for(int i = 0; files!=null && i < files.length; i++) { + if ( files[i].endsWith(filesEndingWith) ) { + new File(tmpdir+"/"+files[i]).delete(); + } + } + } + + protected void eraseFiles() { + if (tmpdir == null) { + return; + } + + File tmpdirF = new File(tmpdir); + String[] files = tmpdirF.list(); + for(int i = 0; files!=null && i < files.length; i++) { + new File(tmpdir+"/"+files[i]).delete(); + } + } + + protected void eraseTempDir() { + File tmpdirF = new File(tmpdir); + if ( tmpdirF.exists() ) { + eraseFiles(); + tmpdirF.delete(); + } + } + + public String getFirstLineOfException() { + if ( this.stderrDuringParse ==null ) { + return null; + } + String[] lines = this.stderrDuringParse.split("\n"); + String prefix="Exception in thread \"main\" "; + return lines[0].substring(prefix.length(),lines[0].length()); + } + + /** + * When looking at a result set that consists of a Map/HashTable + * we cannot rely on the output order, as the hashing algorithm or other aspects + * of the implementation may be different on differnt JDKs or platforms. Hence + * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a + * bit of a hack, but guarantees that we get the same order on all systems. We assume that + * the keys are strings. + * + * @param m The Map that contains keys we wish to return in sorted order + * @return A string that represents all the keys in sorted order. + */ + public String sortMapToString(Map m) { + // Pass in crap, and get nothing back + // + if (m == null) { + return null; + } + + System.out.println("Map toString looks like: " + m.toString()); + + // Sort the keys in the Map + // + TreeMap nset = new TreeMap(m); + + System.out.println("Tree map looks like: " + nset.toString()); + return nset.toString(); + } + + public List realElements(List elements) { + return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); + } + + public void assertNotNullOrEmpty(String message, String text) { + assertNotNull(message, text); + assertFalse(message, text.isEmpty()); + } + + public void assertNotNullOrEmpty(String text) { + assertNotNull(text); + assertFalse(text.isEmpty()); + } + + public static class IntTokenStream implements TokenStream { + IntegerList types; + int p=0; + public IntTokenStream(IntegerList types) { this.types = types; } + + @Override + public void consume() { p++; } + + @Override + public int LA(int i) { return LT(i).getType(); } + + @Override + public int mark() { + return index(); + } + + @Override + public int index() { return p; } + + @Override + public void release(int marker) { + seek(marker); + } + + @Override + public void seek(int index) { + p = index; + } + + @Override + public int size() { + return types.size(); + } + + @Override + public String getSourceName() { + return UNKNOWN_SOURCE_NAME; + } + + @Override + public Token LT(int i) { + CommonToken t; + int rawIndex = p + i - 1; + if ( rawIndex>=types.size() ) t = new CommonToken(Token.EOF); + else t = new CommonToken(types.get(rawIndex)); + t.setTokenIndex(rawIndex); + return t; + } + + @Override + public Token get(int i) { + return new org.antlr.v4.runtime.CommonToken(types.get(i)); + } + + @Override + public TokenSource getTokenSource() { + return null; + } + + @NotNull + @Override + public String getText() { + throw new UnsupportedOperationException("can't give strings"); + } + + @NotNull + @Override + public String getText(Interval interval) { + throw new UnsupportedOperationException("can't give strings"); + } + + @NotNull + @Override + public String getText(RuleContext ctx) { + throw new UnsupportedOperationException("can't give strings"); + } + + @NotNull + @Override + public String getText(Token start, Token stop) { + throw new UnsupportedOperationException("can't give strings"); + } + } + + /** Sort a list */ + public > List sort(List data) { + List dup = new ArrayList(); + dup.addAll(data); + Collections.sort(dup); + return dup; + } + + /** Return map sorted by key */ + public ,V> LinkedHashMap sort(Map data) { + LinkedHashMap dup = new LinkedHashMap(); + List keys = new ArrayList(); + keys.addAll(data.keySet()); + Collections.sort(keys); + for (K k : keys) { + dup.put(k, data.get(k)); + } + return dup; + } +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg new file mode 100644 index 000000000..4bea570e7 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -0,0 +1,349 @@ +TestFile(file) ::= << +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + + +import org.antlr.v4.test.tool.ErrorQueue; + + +import org.antlr.v4.tool.Grammar; + + +public class Test extends BaseTest { + + }; separator="\n", wrap, anchor> + +} +>> + +LexerTestMethod(test) ::= << +@Test +public void test() throws Exception { + = };separator="\\n\" +\n", wrap, anchor>"; + mkdir(tmpdir); + writeFile(tmpdir, ".g4", slave_); + };separator="\n", wrap, anchor> + String grammar = };separator="\\n\" +\n", wrap, anchor>"; + + String found = execLexer(".g4", grammar, "", ""); + assertEquals(\\n"};separator=" + \n", wrap, anchor>, found); + + assertEquals("", this.stderrDuringParse); + + assertNull(this.stderrDuringParse); + +} + +>> + +CompositeLexerTestMethod(test) ::= << + +>> + + +ParserTestMethod(test) ::= << +@Test +public void test() throws Exception { + = };separator="\\n\" +\n", wrap, anchor>"; + mkdir(tmpdir); + writeFile(tmpdir, ".g4", slave_); + };separator="\n", wrap, anchor> + String grammar = };separator="\\n\" +\n", wrap, anchor>"; + + String found = execParser(".g4", grammar, "Parser", "Lexer", "", "", false); + assertEquals("", found); + + assertEquals("", this.stderrDuringParse); + + assertNull(this.stderrDuringParse); + +} + +>> + +CompositeParserTestMethod(test) ::= << + +>> + +AbstractParserTestMethod(test) ::= << +String test(String input) throws Exception { + String grammar = };separator="\\n\" +\n", wrap, anchor>"; + return execParser(".g4", grammar, "Parser", "Lexer", "", input, false); +} + +>> + +ConcreteParserTestMethod(test) ::= << +@Test +public void test() throws Exception { + String found = test(""); + assertEquals("", found); + + assertEquals("", this.stderrDuringParse); + + assertNull(this.stderrDuringParse); + +} + +>> + +writeln(s) ::= <);>> + +write(s) ::= <;>> + +assert(s) ::= <);>> + +LocalVar() ::= "var " + +InitMember(n,v) ::= <%this. = ;%> + +GetMember(n) ::= <%this.%> + +SetMember(n,v) ::= <%this. = ;%> + +AddMember(n,v) ::= <%this. += ;%> + +PlusMember(v,n) ::= <% + this.%> + +MemberEquals(n,v) ::= <%this. === %> + +ModMemberEquals(n,m,v) ::= <%this. % m === %> + +ModMemberNotEquals(n,m,v) ::= <%this. % m != %> + +DumpDFA() ::= "this.dumpDFA();" + +Pass() ::= "" + +BuildParseTrees() ::= "this.buildParseTrees = true;" + +BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> + +ToStringTree(s) ::= <%.toStringTree(null, this);%> + +Column() ::= "this.column" + +Text() ::= "this.getText()" + +ValEquals(a,b) ::= <%===%> + +TextEquals(a) ::= <%this.text===""%> + +PlusText(a) ::= <%"" + this.getText()%> + +InputText() ::= "this._input.getText()" + +LTEquals(i, v) ::= <%this._input.LT().text===%> + +LANotEquals(i, v) ::= <%this._input.LA()!=%> + +TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> + +ImportListener(X) ::= <Listener = require('./Listener').Listener;>> + +GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames));" + +WriteRuleInvocationStack() ::= "document.getElementById('output').value += antlr4.Utils.arrayToString(this.getRuleInvocationStack()) + '\\n';" + +LL_EXACT_AMBIG_DETECTION() ::= <> + +PositionAdjustingLexer() ::= << + +@Override +public Token nextToken() { + if (!(_interp instanceof PositionAdjustingLexerATNSimulator)) { + _interp = new PositionAdjustingLexerATNSimulator(this, _ATN, _decisionToDFA, _sharedContextCache); + } + + return super.nextToken(); +} + +@Override +public Token emit() { + switch (_type) { + case TOKENS: + handleAcceptPositionForKeyword("tokens"); + break; + + case LABEL: + handleAcceptPositionForIdentifier(); + break; + + default: + break; + } + + return super.emit(); +} + +private boolean handleAcceptPositionForIdentifier() { + String tokenText = getText(); + int identifierLength = 0; + while (identifierLength \< tokenText.length() && isIdentifierChar(tokenText.charAt(identifierLength))) { + identifierLength++; + } + + if (getInputStream().index() > _tokenStartCharIndex + identifierLength) { + int offset = identifierLength - 1; + getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset); + return true; + } + + return false; +} + +private boolean handleAcceptPositionForKeyword(String keyword) { + if (getInputStream().index() > _tokenStartCharIndex + keyword.length()) { + int offset = keyword.length() - 1; + getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset); + return true; + } + + return false; +} + +@Override +public PositionAdjustingLexerATNSimulator getInterpreter() { + return (PositionAdjustingLexerATNSimulator)super.getInterpreter(); +} + +private static boolean isIdentifierChar(char c) { + return Character.isLetterOrDigit(c) || c == '_'; +} + +protected static class PositionAdjustingLexerATNSimulator extends LexerATNSimulator { + + public PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn, + DFA[] decisionToDFA, + PredictionContextCache sharedContextCache) + { + super(recog, atn, decisionToDFA, sharedContextCache); + } + + protected void resetAcceptPosition(CharStream input, int index, int line, int charPositionInLine) { + input.seek(index); + this.line = line; + this.charPositionInLine = charPositionInLine; + consume(input); + } + +} + +>> + +BasicListener(X) ::= << +this.LeafListener = function() { + this.visitTerminal = function(node) { + document.getElementById('output').value += node.symbol.text + '\\n'; + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype); +this.LeafListener.prototype.constructor = this.LeafListener; + +>> + +walkListener(s) ::= << +var walker = new antlr4.tree.ParseTreeWalker(); +walker.walk(new this.LeafListener(), ); + +>> + +TokenGetterListener(X) ::= << +this.LeafListener = function() { + this.exitA = function(ctx) { + var str; + if(ctx.getChildCount()===2) { + str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); + } else { + str = ctx.ID().symbol.toString(); + } + document.getElementById('output').value += str + '\\n'; + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype);\n" + +this.LeafListener.prototype.constructor = this.LeafListener;\n" + + +>> + +RuleGetterListener(X) ::= << +this.LeafListener = function() { + this.exitA = function(ctx) { + var str; + if(ctx.getChildCount()===2) { + str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; + } else { + str = ctx.b(0).start.text; + } + document.getElementById('output').value += str + '\\n'; + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype);\n" + +this.LeafListener.prototype.constructor = this.LeafListener;\n" + + +>> + + +LRListener(X) ::= << +this.LeafListener = function() { + this.exitE = function(ctx) { + var str; + if(ctx.getChildCount()===3) { + str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; + } else { + str = ctx.INT().symbol.text; + } + document.getElementById('output').value += str + '\\n'; + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype); +this.LeafListener.prototype.constructor = this.LeafListener; + +>> + +LRWithLabelsListener(X) ::= << +this.LeafListener = function() { + this.exitCall = function(ctx) { + var str = ctx.e().start.text + ' ' + ctx.eList(); + document.getElementById('output').value += str + '\\n'; + }; + this.exitInt = function(ctx) { + var str = ctx.INT().symbol.text; + document.getElementById('output').value += str + '\\n'; + }; + return this; +}; +this.LeafListener.prototype = Object.create(Listener.prototype); +this.LeafListener.prototype.constructor = this.LeafListener; + +>> + +DeclareContextListGettersFunction() ::= << + function foo() { + var s = new SContext(); + var a = s.a(); + var b = s.b(); + }; +>> + +Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo\\n'};" + +Invoke_foo() ::= "this.foo();" + +Declare_pred() ::= <> + +Invoke_pred(v) ::= <)>> + + + diff --git a/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java b/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java new file mode 100644 index 000000000..bc0343ea0 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java @@ -0,0 +1,49 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestCompositeLexers extends BaseTest { + + @Test + public void testLexerDelegatorInvokesDelegateRule() throws Exception { + String slave_S = "lexer grammar S;\n" + + "A : 'a' {document.getElementById('output').value += \"S.a\";};\n" + + "C : 'c' ;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "lexer grammar M;\n" + + "import S;\n" + + "B : 'b';\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("M.g4", grammar, "M", "abc"); + assertEquals("S.A\n" + + "[@0,0:0='a',<3>,1:0]\n" + + "[@1,1:1='b',<1>,1:1]\n" + + "[@2,2:2='c',<4>,1:2]\n" + + "[@3,3:2='',<-1>,1:3]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLexerDelegatorRuleOverridesDelegate() throws Exception { + String slave_S = "lexer grammar S;\n" + + "A : 'a' {document.getElementById('output').value += \"S.A\";};\n" + + "B : 'b' {document.getElementById('output').value += \"S.B\";};"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "lexer grammar M;\n" + + "import S;\n" + + "A : 'a' B {document.getElementById('output').value += \"M.A\";};\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("M.g4", grammar, "M", "ab"); + assertEquals("M.A\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java b/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java new file mode 100644 index 000000000..96a4b7d3d --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java @@ -0,0 +1,328 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +import org.antlr.v4.test.tool.ErrorQueue; +import org.antlr.v4.tool.Grammar; + +public class TestCompositeParsers extends BaseTest { + + @Test + public void testDelegatorInvokesDelegateRule() throws Exception { + String slave_S = "parser grammar S;\n" + + "a : B {System.out.println(\"S.a\");};"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "B : 'b' ; // defines B from inherited token space\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "b", false); + assertEquals("S.a\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testBringInLiteralsFromDelegate() throws Exception { + String slave_S = "parser grammar S;\n" + + "a : '=' 'a' {document.getElementById('output').value += \"S.a\";};"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "=a", false); + assertEquals("S.a\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception { + String slave_S = "parser grammar S;\n" + + "a[int x] returns [int y] : B {document.getElementById('output').value += \"S.a\";;$y=1000;};"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "s : label=a[3] {System.out.println($label.y);} ;\n" + + "B : 'b' ; // defines B from inherited token space\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "a", false); + assertEquals("S.a1000\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception { + String slave_S = "parser grammar S;\n" + + "A : B {document.getElementById('output').value += \"S.a\";};"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "s : a {document.getElementById('output').value += $a.text;} ;\n" + + "B : 'b' ; // defines B from inherited token space\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "b", false); + assertEquals("S.ab\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDelegatorAccessesDelegateMembers() throws Exception { + String slave_S = "parser grammar S;\n" + + "@members {\n" + + "this.foo = function() {document.getElementById('output').value += 'foo\\n'};\n" + + "}\n" + + "a : B;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M; // uses no rules from the import\n" + + "import S;\n" + + "s : 'b'{this.foo();}; // gS is import pointer\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "b", false); + assertEquals("foo\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception { + String slave_S = "parser grammar S;\n" + + "a : B {System.out.println(\"S.a\");};\n" + + "b : B;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String slave_T = "parser grammar T;\n" + + "a : B {System.out.println(\"T.a\");};"; + mkdir(tmpdir); + writeFile(tmpdir, "T.g4", slave_T); + + String grammar = "grammar M;\n" + + "import S,T;\n" + + "s : a ;\n" + + "B : 'b' ; // defines B from inherited token space\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "b", false); + assertEquals("S.a\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDelegatesSeeSameTokenType() throws Exception { + String slave_S = "parser grammar S;\n" + + "tokens { A, B, C }\n" + + "x : A {document.getElementById('output').value += \"S.x\";};"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String slave_T = "parser grammar S;\n" + + "tokens { C, B, A } // reverse order\n" + + "y : A {document.getElementById('output').value += \"T.y\";};"; + mkdir(tmpdir); + writeFile(tmpdir, "T.g4", slave_T); + + String grammar = "// The lexer will create rules to match letters a, b, c.\n" + + "// The associated token types A, B, C must have the same value\n" + + "// and all import'd parsers. Since ANTLR regenerates all imports\n" + + "// for use with the delegator M, it can generate the same token type\n" + + "// mapping in each parser:\n" + + "// public static final int C=6;\n" + + "// public static final int EOF=-1;\n" + + "// public static final int B=5;\n" + + "// public static final int WS=7;\n" + + "// public static final int A=4;\n" + + "grammar M;\n" + + "import S,T;\n" + + "s : x y ; // matches AA, which should be 'aa'\n" + + "B : 'b' ; // another order: B, A, C\n" + + "A : 'a' ; \n" + + "C : 'c' ; \n" + + "WS : (' '|'\\n') -> skip ;"; + writeFile(tmpdir, "M.g4", grammar); + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(tmpdir+"/M.g4", grammar, equeue); + String expectedTokenIDToTypeMap = "{EOF=-1, B=1, A=2, C=3, WS=4}"; + String expectedStringLiteralToTypeMap = "{'a'=2, 'b'=1, 'c'=3}"; + String expectedTypeToTokenList = "[B, A, C, WS]"; + assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString()); + assertEquals(expectedStringLiteralToTypeMap, sort(g.stringLiteralToTypeMap).toString()); + assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); + assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "aa", false); + assertEquals("S.x\nT.y\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCombinedImportsCombined() throws Exception { + String slave_S = "parser grammar S;\n" + + "tokens { A, B, C }\n" + + "x : 'x' INT {System.out.println(\"S.x\");};\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "s : x INT;"; + writeFile(tmpdir, "M.g4", grammar); + ErrorQueue equeue = new ErrorQueue(); + new Grammar(tmpdir+"/M.g4", grammar, equeue); + assertEquals("unexpected errors: " + equeue, 0, equeue.errors.size()); + + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "x 34 9", false); + assertEquals("S.x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDelegatorRuleOverridesDelegate() throws Exception { + String slave_S = "parser grammar S;\n" + + "a : b {document.getElementById('output').value += \"S.a\";};\n" + + "b : B ;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "b : 'b'|'c';\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "a", "c", false); + assertEquals("S.a\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception { + String slave_S = "parser grammar S;\n" + + "type_ : 'int' ;\n" + + "decl : type_ ID ';'\n" + + " | type_ ID init ';' {\n" + + " document.getElementById('output').value += \"decl: \" + $text;\n" + + " };\n" + + "init : '=' INT;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "prog : decl ;\n" + + "type_ : 'int' | 'float' ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "prog", "float x = 3;", false); + assertEquals("Decl: floatx=3;\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDelegatorRuleOverridesDelegates() throws Exception { + String slave_S = "parser grammar S;\n" + + "a : b {document.getElementById('output').value += \"S.a\";};\n" + + "b : 'b' ;\n" + + " "; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String slave_T = "parser grammar S;\n" + + "tokens { A }\n" + + "b : 'b' {document.getElementById('output').value += \"T.b\";};"; + mkdir(tmpdir); + writeFile(tmpdir, "T.g4", slave_T); + + String grammar = "grammar M;\n" + + "import S, T;\n" + + "b : 'b'|'c' {document.getElementById('output').value += \"M.b\";}|B|A;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "a", "c", false); + assertEquals("M.b\nS.a\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testKeywordVSIDOrder() throws Exception { + String slave_S = "lexer grammar S;\n" + + "ID : 'a'..'z'+;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "a : A {System.out.println(\"M.a: \"+$A);};\n" + + "A : 'abc' {System.out.println(\"M.A\");};\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "a", "abc", false); + assertEquals("M.A\nM.a: [@0,0:2='abc',<1>,1:0]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testImportedRuleWithAction() throws Exception { + String slave_S = "parser grammar S;\n" + + "a : @after {} : B;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "s : a;\n" + + "B : 'b';\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "b", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testImportedGrammarWithEmptyOptions() throws Exception { + String slave_S = "parser grammar S;\n" + + "options {}\n" + + "a : B;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "s : a;\n" + + "B : 'b';\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "b", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testImportLexerWithOnlyFragmentRules() throws Exception { + String slave_S = "lexer grammar S;\n" + + "fragment\n" + + "UNICODE_CLASS_Zs : '\\u0020' | '\\u00A0' | '\\u1680' | '\\u180E'\n" + + " | '\\u2000'..'\\u200A'\n" + + " | '\\u202F' | '\\u205F' | '\\u3000'\n" + + " ;"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave_S); + + String grammar = "grammar M;\n" + + "import S;\n" + + "program : 'test' 'test';\n" + + "WS : (UNICODE_CLASS_Zs)+ -> skip;"; + String found = execParser("M.g4", grammar, "MParser", "MLexer", "program", "test test", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java b/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java new file mode 100644 index 000000000..de6bed7b3 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java @@ -0,0 +1,208 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestFullContextParsing extends BaseTest { + + @Test + public void testAmbigYieldsCtxSensitiveDFA() throws Exception { + String grammar = "grammar T;\n" + + "s @after {this.dumpDFA();}\n" + + " : ID | ID {} ;\n" + + "ID : 'a'..'z'+;\n" + + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); + assertEquals("Decision 0:\ns0-ID->:s1^=>1\n", found); + assertEquals("line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", this.stderrDuringParse); + } + + String testCtxSensitiveDFA(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {this.dumpDFA();}\n" + + " : '$' a | '@' b ;\n" + + "a : e ID ;\n" + + "b : e INT ID ;\n" + + "e : INT | ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testCtxSensitiveDFA_1() throws Exception { + String found = testCtxSensitiveDFA("$ 34 abc"); + assertEquals("Decision 1:\ns0-INT->s1\ns1-ID->:s2^=>1\n", found); + assertEquals("line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\nline 1:2 reportContextSensitivity d=1 (e), input='34'\n", this.stderrDuringParse); + } + + @Test + public void testCtxSensitiveDFA_2() throws Exception { + String found = testCtxSensitiveDFA("@ 34 abc"); + assertEquals("Decision 1:\ns0-INT->s1\ns1-ID->:s2^=>1\n", found); + assertEquals("line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\nline 1:5 reportContextSensitivity d=1 (e), input='34abc'\n", this.stderrDuringParse); + } + + @Test + public void testCtxSensitiveDFATwoDiffInput() throws Exception { + String grammar = "grammar T;\n" + + "s @after {this.dumpDFA();}\n" + + " : ('$' a | '@' b)+ ;\n" + + "a : e ID ;\n" + + "b : e INT ID ;\n" + + "e : INT | ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "$ 34 abc @ 34 abc", false); + assertEquals("Decision 2:\ns0-INT->s1\ns1-ID->:s2^=>1\n", found); + assertEquals("line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\nline 1:2 reportContextSensitivity d=2 (e), input='34'\nline 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\nline 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", this.stderrDuringParse); + } + + @Test + public void testSLLSeesEOFInLLGrammar() throws Exception { + String grammar = "grammar T;\n" + + "s @after {this.dumpDFA();}\n" + + " : a;\n" + + "a : e ID ;\n" + + "b : e INT ID ;\n" + + "e : INT | ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "34 abc", false); + assertEquals("Decision 0:\ns0-INT->s1\ns1-ID->:s2^=>1\n", found); + assertEquals("line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\nline 1:0 reportContextSensitivity d=0 (e), input='34'\n", this.stderrDuringParse); + } + + String testFullContextIF_THEN_ELSEParse(String input) throws Exception { + String grammar = "grammar T;\n" + + "s \n" + + "@init {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;}\n" + + "@after {this.dumpDFA();}\n" + + " : '{' stat* '}' ;\n" + + "stat: 'if' ID 'then' stat ('else' ID)?\n" + + " | 'return\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testFullContextIF_THEN_ELSEParse_1() throws Exception { + String found = testFullContextIF_THEN_ELSEParse("{ if x then return }"); + assertEquals("Decision 1:\ns0-'}'->:s1=>2\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testFullContextIF_THEN_ELSEParse_2() throws Exception { + String found = testFullContextIF_THEN_ELSEParse("{ if x then return else foo }"); + assertEquals("Decision 1:\ns0-'else'->:s1^=>1\n", found); + assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:19 reportContextSensitivity d=1 (stat), input='else'\n", this.stderrDuringParse); + } + + @Test + public void testFullContextIF_THEN_ELSEParse_3() throws Exception { + String found = testFullContextIF_THEN_ELSEParse("{ if x then if y then return else foo }"); + assertEquals("Decision 1:\ns0-'else'->:s1^=>1\ns0-'}'->:s2=>2\n", found); + assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse); + } + + @Test + public void testFullContextIF_THEN_ELSEParse_4() throws Exception { + String found = testFullContextIF_THEN_ELSEParse("{ if x then if y then return else foo else bar }"); + assertEquals("Decision 1:\ns0-'else'->:s1^=>1\n", found); + assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\nline 1:38 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:38 reportContextSensitivity d=1 (stat), input='else'\n", this.stderrDuringParse); + } + + @Test + public void testFullContextIF_THEN_ELSEParse_5() throws Exception { + String found = testFullContextIF_THEN_ELSEParse("{ if x then return else foo\nif x then if y then return else foo }"); + assertEquals("Decision 1:\ns0-'else'->:s1^=>1\ns0-'}'->:s2=>2\n", found); + assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:19 reportContextSensitivity d=1 (stat), input='else'\nline 2:27 reportAttemptingFullContext d=1 (stat), input='else'\nline 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse); + } + + @Test + public void testFullContextIF_THEN_ELSEParse_6() throws Exception { + String found = testFullContextIF_THEN_ELSEParse("{ if x then return else foo\nif x then if y then return else foo }"); + assertEquals("Decision 1:\ns0-'else'->:s1^=>1\ns0-'}'->:s2=>2\n", found); + assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:19 reportContextSensitivity d=1 (stat), input='else'\nline 2:27 reportAttemptingFullContext d=1 (stat), input='else'\nline 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse); + } + + @Test + public void testLoopsSimulateTailRecursion() throws Exception { + String grammar = "grammar T;\n" + + "prog\n" + + "@init {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;}\n" + + " : expr_or_assign*;\n" + + "expr_or_assign\n" + + " : expr '++' {System.out.println(\"fail.\");}\n" + + " | expr {System.out.println(\"pass: \"+$expr.text);}\n" + + " ;\n" + + "expr: expr_primary ('<-' ID)?;\n" + + "expr_primary\n" + + " : '(' ID ')'\n" + + " | ID '(' ID ')'\n" + + " | ID\n" + + " ;\n" + + "ID : [a-z]+ ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a(i)<-x", false); + assertEquals("pass: a(i)<-x\n", found); + assertEquals("line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\nline 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n", this.stderrDuringParse); + } + + @Test + public void testAmbiguityNoLoop() throws Exception { + String grammar = "grammar T;\n" + + "prog\n" + + "@init {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;}\n" + + " : expr expr {System.out.println(\"alt 1\");}\n" + + " | expr\n" + + " ;\n" + + "expr: '@'\n" + + " | ID '@'\n" + + " | ID\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a@", false); + assertEquals("alt 1\n", found); + assertEquals("line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\nline 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\nline 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\nline 1:2 reportContextSensitivity d=1 (expr), input='a@'\n", this.stderrDuringParse); + } + + String testExprAmbiguity(String input) throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;}\n" + + ": expr[0] {System.out.println($expr.ctx.toStringTree(null, this););};\n" + + " expr[int _p]\n" + + " : ID \n" + + " ( \n" + + " {5 >= $_p}? '*' expr[6]\n" + + " | {4 >= $_p}? '+' expr[5]\n" + + " )*\n" + + " ;\n" + + "ID : [a-zA-Z]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testExprAmbiguity_1() throws Exception { + String found = testExprAmbiguity("a+b"); + assertEquals("(expr a + (expr b))\n", found); + assertEquals("line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\nline 1:2 reportContextSensitivity d=1 (expr), input='+b'\n", this.stderrDuringParse); + } + + @Test + public void testExprAmbiguity_2() throws Exception { + String found = testExprAmbiguity("a+b*c"); + assertEquals("(expr a + (expr b * (expr c)))\n", found); + assertEquals("line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\nline 1:2 reportContextSensitivity d=1 (expr), input='+b'\nline 1:3 reportAttemptingFullContext d=1 (expr), input='*'\nline 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n", this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java b/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java new file mode 100644 index 000000000..7b2b4a756 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java @@ -0,0 +1,805 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestLeftRecursion extends BaseTest { + + String testSimple(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(null, this););} : a ;\n" + + "a : a ID\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testSimple_1() throws Exception { + String found = testSimple("x"); + assertEquals("(s (a x))\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testSimple_2() throws Exception { + String found = testSimple("x y"); + assertEquals("(s (a (a x) y))\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testSimple_3() throws Exception { + String found = testSimple("x y z"); + assertEquals("(s (a (a (a x) y) z))\n", found); + assertNull(this.stderrDuringParse); + } + + String testDirectCallToLeftRecursiveRule(String input) throws Exception { + String grammar = "grammar T;\n" + + "a @after {System.out.println($ctx.toStringTree(null, this););} : a ID\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); + } + + @Test + public void testDirectCallToLeftRecursiveRule_1() throws Exception { + String found = testDirectCallToLeftRecursiveRule("x"); + assertEquals("(a x)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDirectCallToLeftRecursiveRule_2() throws Exception { + String found = testDirectCallToLeftRecursiveRule("x y"); + assertEquals("(a (a x) y)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDirectCallToLeftRecursiveRule_3() throws Exception { + String found = testDirectCallToLeftRecursiveRule("x y z"); + assertEquals("(a (a (a x) y) z)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testSemPred() throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(null, this););} : a ;\n" + + "a : a {true}? ID\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x y z", false); + assertEquals("(s (a (a (a x) y) z))\n", found); + assertNull(this.stderrDuringParse); + } + + String testTernaryExpr(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(null, this););} : e EOF ; // must indicate EOF can follow or 'a' won't match\n" + + "e : e '*' e\n" + + " | e '+' e\n" + + " | e '?' e ':' e\n" + + " | e '=' e\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testTernaryExpr_1() throws Exception { + String found = testTernaryExpr("a"); + assertEquals("(s (e a) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExpr_2() throws Exception { + String found = testTernaryExpr("a+b"); + assertEquals("(s (e (e a) + (e b)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExpr_3() throws Exception { + String found = testTernaryExpr("a*b"); + assertEquals("(s (e (e a) * (e b)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExpr_4() throws Exception { + String found = testTernaryExpr("a?b:c"); + assertEquals("(s (e (e a) ? (e b) : (e c)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExpr_5() throws Exception { + String found = testTernaryExpr("a=b=c"); + assertEquals("(s (e (e a) = (e (e b) = (e c))) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExpr_6() throws Exception { + String found = testTernaryExpr("a?b+c:d"); + assertEquals("(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExpr_7() throws Exception { + String found = testTernaryExpr("a?b=c:d"); + assertEquals("(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExpr_8() throws Exception { + String found = testTernaryExpr("a? b?c:d : e"); + assertEquals("(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExpr_9() throws Exception { + String found = testTernaryExpr("a?b: c?d:e"); + assertEquals("(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )", found); + assertNull(this.stderrDuringParse); + } + + String testExpressions(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(null, this););} : e EOF ; // must indicate EOF can follow\n" + + "e : e '.' ID\n" + + " | e '.' 'this'\n" + + " | '-' e\n" + + " | e '*' e\n" + + " | e ('+'|'-') e\n" + + " | INT\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testExpressions_1() throws Exception { + String found = testExpressions("a"); + assertEquals("(s (e a) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testExpressions_2() throws Exception { + String found = testExpressions("1"); + assertEquals("(s (e 1) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testExpressions_3() throws Exception { + String found = testExpressions("a-1"); + assertEquals("(s (e (e a) - (e 1)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testExpressions_4() throws Exception { + String found = testExpressions("a.b"); + assertEquals("(s (e (e a) . b) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testExpressions_5() throws Exception { + String found = testExpressions("a.this"); + assertEquals("(s (e (e a) . this) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testExpressions_6() throws Exception { + String found = testExpressions("-a"); + assertEquals("(s (e - (e a)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testExpressions_7() throws Exception { + String found = testExpressions("-a+b"); + assertEquals("(s (e (e - (e a)) + (e b)) )", found); + assertNull(this.stderrDuringParse); + } + + String testJavaExpressions(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(null, this););} : e EOF ; // must indicate EOF can follow\n" + + "expressionList\n" + + " : e (',' e)*\n" + + " ;\n" + + "e : '(' e ')'\n" + + " | 'this' \n" + + " | 'super'\n" + + " | INT\n" + + " | ID\n" + + " | type '.' 'class'\n" + + " | e '.' ID\n" + + " | e '.' 'this'\n" + + " | e '.' 'super' '(' expressionList? ')'\n" + + " | e '.' 'new' ID '(' expressionList? ')'\n" + + " | 'new' type ( '(' expressionList? ')' | ('[' e ']')+)\n" + + " | e '[' e ']'\n" + + " | '(' type ')' e\n" + + " | e ('++' | '--')\n" + + " | e '(' expressionList? ')'\n" + + " | ('+'|'-'|'++'|'--') e\n" + + " | ('~'|'!') e\n" + + " | e ('*'|'/'|'%') e\n" + + " | e ('+'|'-') e\n" + + " | e ('<<' | '>>>' | '>>') e\n" + + " | e ('<=' | '>=' | '>' | '<') e\n" + + " | e 'instanceof' e\n" + + " | e ('==' | '!=') e\n" + + " | e '&' e\n" + + " | e '^' e\n" + + " | e '|' e\n" + + " | e '&&' e\n" + + " | e '||' e\n" + + " | e '?' e ':' e\n" + + " |\n" + + " e ('='\n" + + " |'+='\n" + + " |'-='\n" + + " |'*='\n" + + " |'/='\n" + + " |'&='\n" + + " |'|='\n" + + " |'^='\n" + + " |'>>='\n" + + " |'>>>='\n" + + " |'<<='\n" + + " |'%=') e\n" + + " ;\n" + + "type: ID \n" + + " | ID '[' ']'\n" + + " | 'int'\n" + + " | 'int' '[' ']' \n" + + " ;\n" + + "ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testJavaExpressions_1() throws Exception { + String found = testJavaExpressions("a|b&c"); + assertEquals("(s (e (e a) | (e (e b) & (e c))) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_2() throws Exception { + String found = testJavaExpressions("(a|b)&c"); + assertEquals("(s (e (e ( (e (e a) | (e b)) )) & (e c)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_3() throws Exception { + String found = testJavaExpressions("a > b"); + assertEquals("(s (e (e a) > (e b)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_4() throws Exception { + String found = testJavaExpressions("a >> b"); + assertEquals("(s (e (e a) >> (e b)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_5() throws Exception { + String found = testJavaExpressions("a=b=c"); + assertEquals("(s (e (e a) = (e (e b) = (e c))) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_6() throws Exception { + String found = testJavaExpressions("a^b^c"); + assertEquals("(s (e (e a) ^ (e (e b) ^ (e c))) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_7() throws Exception { + String found = testJavaExpressions("(T)x"); + assertEquals("(s (e ( (type T) ) (e x)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_8() throws Exception { + String found = testJavaExpressions("new A().b"); + assertEquals("(s (e (e new (type A) ( )) . b) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_9() throws Exception { + String found = testJavaExpressions("(T)t.f()"); + assertEquals("(s (e (e ( (type T) ) (e (e t) . f)) ( )) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_10() throws Exception { + String found = testJavaExpressions("a.f(x)==T.c"); + assertEquals("(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_11() throws Exception { + String found = testJavaExpressions("a.f().g(x,1)"); + assertEquals("(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testJavaExpressions_12() throws Exception { + String found = testJavaExpressions("new T[((n-1) * x) + 1]"); + assertEquals("(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )", found); + assertNull(this.stderrDuringParse); + } + + String testDeclarations(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(null, this););} : declarator EOF ; // must indicate EOF can follow\n" + + "declarator\n" + + " : declarator '[' e ']'\n" + + " | declarator '[' ']'\n" + + " | declarator '(' ')'\n" + + " | '*' declarator // binds less tight than suffixes\n" + + " | '(' declarator ')'\n" + + " | ID\n" + + " ;\n" + + "e : INT ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testDeclarations_1() throws Exception { + String found = testDeclarations("a"); + assertEquals("(s (declarator a) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_2() throws Exception { + String found = testDeclarations("*a"); + assertEquals("(s (declarator * (declarator a)) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_3() throws Exception { + String found = testDeclarations("**a"); + assertEquals("(s (declarator * (declarator * (declarator a))) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_4() throws Exception { + String found = testDeclarations("a[3]"); + assertEquals("(s (declarator (declarator a) [ (e 3) ]) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_5() throws Exception { + String found = testDeclarations("b[]"); + assertEquals("(s (declarator (declarator b) [ ]) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_6() throws Exception { + String found = testDeclarations("(a)"); + assertEquals("(s (declarator ( (declarator a) )) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_7() throws Exception { + String found = testDeclarations("a[]()"); + assertEquals("(s (declarator (declarator (declarator a) [ ]) ( )) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_8() throws Exception { + String found = testDeclarations("a[][]"); + assertEquals("(s (declarator (declarator (declarator a) [ ]) [ ]) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_9() throws Exception { + String found = testDeclarations("*a[]"); + assertEquals("(s (declarator * (declarator (declarator a) [ ])) )", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDeclarations_10() throws Exception { + String found = testDeclarations("(*a)[]"); + assertEquals("(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )", found); + assertNull(this.stderrDuringParse); + } + + String testReturnValueAndActions(String input) throws Exception { + String grammar = "grammar T;\n" + + "s : e {System.out.println($e.v);}; \n" + + "e returns [int v, list ignored]\n" + + " : a=e '*' b=e {$v = $a.v * $b.v;}\n" + + " | a=e '+' b=e {$v = $a.v + $b.v;}\n" + + " | INT {$v = $INT.int;}\n" + + " | '(' x=e ')' {$v = $x.v;}\n" + + " ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testReturnValueAndActions_1() throws Exception { + String found = testReturnValueAndActions("4"); + assertEquals("4", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActions_2() throws Exception { + String found = testReturnValueAndActions("1+2"); + assertEquals("3", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActions_3() throws Exception { + String found = testReturnValueAndActions("1+2*3"); + assertEquals("7", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActions_4() throws Exception { + String found = testReturnValueAndActions("(1+2)*3"); + assertEquals("9", found); + assertNull(this.stderrDuringParse); + } + + String testLabelsOnOpSubrule(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(null, this););} : e;\n" + + "e : a=e op=('*'|'/') b=e {}\n" + + " | INT {}\n" + + " | '(' x=e ')' {}\n" + + " ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testLabelsOnOpSubrule_1() throws Exception { + String found = testLabelsOnOpSubrule("4"); + assertEquals("(s (e 4))", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLabelsOnOpSubrule_2() throws Exception { + String found = testLabelsOnOpSubrule("1*2/3"); + assertEquals("(s (e (e (e 1) * (e 2)) / (e 3)))", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLabelsOnOpSubrule_3() throws Exception { + String found = testLabelsOnOpSubrule("(1/2)*3"); + assertEquals("(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", found); + assertNull(this.stderrDuringParse); + } + + String testReturnValueAndActionsAndLabels(String input) throws Exception { + String grammar = "grammar T;\n" + + "s : q=e {System.out.println($e.v);}; \n" + + "e returns [int v]\n" + + " : a=e op='*' b=e {$v = $a.v * $b.v;} # mult\n" + + " | a=e '+' b=e {$v = $a.v + $b.v;} # add\n" + + " | INT {$v = $INT.int;} # anInt\n" + + " | '(' x=e ')' {$v = $x.v;} # parens\n" + + " | x=e '++' {$v = $x.v+1;} # inc\n" + + " | e '--' # dec\n" + + " | ID {$v = 3;} # anID\n" + + " ; \n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testReturnValueAndActionsAndLabels_1() throws Exception { + String found = testReturnValueAndActionsAndLabels("4"); + assertEquals("4", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsAndLabels_2() throws Exception { + String found = testReturnValueAndActionsAndLabels("1+2"); + assertEquals("3", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsAndLabels_3() throws Exception { + String found = testReturnValueAndActionsAndLabels("1+2*3"); + assertEquals("7", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsAndLabels_4() throws Exception { + String found = testReturnValueAndActionsAndLabels("i++*3"); + assertEquals("12", found); + assertNull(this.stderrDuringParse); + } + + String testMultipleAlternativesWithCommonLabel(String input) throws Exception { + String grammar = "grammar T;\n" + + "s : e {System.out.println($e.v);}; \n" + + "e returns [int v]\n" + + " : e '*' e {$v = $ctx.e(0).v * $ctx.e(1).v;} # binary\n" + + " | e '+' e {$v = $ctx.e(0).v + $ctx.e(1).v;} # binary\n" + + " | INT {$v = $INT.int;} # anInt\n" + + " | '(' e ')' {$v = $e.v;} # parens\n" + + " | left=e INC {console.assert($ctx.INC() !== null);;$v = $left.v + 1;} # unary\n" + + " | left=e DEC {console.assert($ctx.DEC() !== null);;$v = $left.v - 1;} # unary\n" + + " | ID {$v = 3} # anID\n" + + " ; \n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "INC : '++' ;\n" + + "DEC : '--' ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testMultipleAlternativesWithCommonLabel_1() throws Exception { + String found = testMultipleAlternativesWithCommonLabel("4"); + assertEquals("4", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testMultipleAlternativesWithCommonLabel_2() throws Exception { + String found = testMultipleAlternativesWithCommonLabel("1+2"); + assertEquals("3", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testMultipleAlternativesWithCommonLabel_3() throws Exception { + String found = testMultipleAlternativesWithCommonLabel("1+2*3"); + assertEquals("7", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testMultipleAlternativesWithCommonLabel_4() throws Exception { + String found = testMultipleAlternativesWithCommonLabel("i++*3"); + assertEquals("12", found); + assertNull(this.stderrDuringParse); + } + + String testPrefixOpWithActionAndLabel(String input) throws Exception { + String grammar = "grammar T;\n" + + "s : e {System.out.println($e.result);} ;\n" + + "e returns [String result]\n" + + " : ID '=' e1=e {$result = \\\"(\\\" + $ID.text + \\\"=\\\" + $e1.result + \\\")\\\";}\n" + + " | ID {$result = $ID.text;}\n" + + " | e1=e '+' e2=e {$result = \\\"(\\\" + $e1.result + \\\"+\\\" + $e2.result + \\\")\\\";}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testPrefixOpWithActionAndLabel_1() throws Exception { + String found = testPrefixOpWithActionAndLabel("a"); + assertEquals("a", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPrefixOpWithActionAndLabel_2() throws Exception { + String found = testPrefixOpWithActionAndLabel("a+b"); + assertEquals("(a+b)", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPrefixOpWithActionAndLabel_3() throws Exception { + String found = testPrefixOpWithActionAndLabel("a=b+c"); + assertEquals("((a=b)+c)", found); + assertNull(this.stderrDuringParse); + } + + String testAmbigLR(String input) throws Exception { + String grammar = "grammar Expr;\n" + + "prog: stat ;\n" + + "stat: expr NEWLINE # printExpr\n" + + " | ID '=' expr NEWLINE # assign\n" + + " | NEWLINE # blank\n" + + " ;\n" + + "expr: expr ('*'|'/') expr # MulDiv\n" + + " | expr ('+'|'-') expr # AddSub\n" + + " | INT # int\n" + + " | ID # id\n" + + " | '(' expr ')' # parens\n" + + " ;\n" + + "\n" + + "MUL : '*' ; // assigns token name to '*' used above in grammar\n" + + "DIV : '/' ;\n" + + "ADD : '+' ;\n" + + "SUB : '-' ;\n" + + "ID : [a-zA-Z]+ ; // match identifiers\n" + + "INT : [0-9]+ ; // match integers\n" + + "NEWLINE:'\\r'? '\\n' ; // return newlines to parser (is end-statement signal)\n" + + "WS : [ \\t]+ -> skip ; // toss out whitespace"; + return execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", input, false); + } + + @Test + public void testAmbigLR_1() throws Exception { + String found = testAmbigLR("1\n"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAmbigLR_2() throws Exception { + String found = testAmbigLR("a = 5\n"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAmbigLR_3() throws Exception { + String found = testAmbigLR("b = 6\n"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAmbigLR_4() throws Exception { + String found = testAmbigLR("a+b*2\n"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAmbigLR_5() throws Exception { + String found = testAmbigLR("(1+2)*3\n"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + String testWhitespaceInfluence(String input) throws Exception { + String grammar = "grammar Expr;\n" + + "prog : expression EOF;\n" + + "expression\n" + + " : ID '(' expression (',' expression)* ')' # doFunction\n" + + " | '(' expression ')' # doParenthesis\n" + + " | '!' expression # doNot\n" + + " | '-' expression # doNegate\n" + + " | '+' expression # doPositiv\n" + + " | expression '^' expression # doPower\n" + + " | expression '*' expression # doMultipy\n" + + " | expression '/' expression # doDivide\n" + + " | expression '%' expression # doModulo\n" + + " | expression '-' expression # doMinus\n" + + " | expression '+' expression # doPlus\n" + + " | expression '=' expression # doEqual\n" + + " | expression '!=' expression # doNotEqual\n" + + " | expression '>' expression # doGreather\n" + + " | expression '>=' expression # doGreatherEqual\n" + + " | expression '<' expression # doLesser\n" + + " | expression '<=' expression # doLesserEqual\n" + + " | expression K_IN '(' expression (',' expression)* ')' # doIn\n" + + " | expression ( '&' | K_AND) expression # doAnd\n" + + " | expression ( '|' | K_OR) expression # doOr\n" + + " | '[' expression (',' expression)* ']' # newArray\n" + + " | K_TRUE # newTrueBoolean\n" + + " | K_FALSE # newFalseBoolean\n" + + " | NUMBER # newNumber\n" + + " | DATE # newDateTime\n" + + " | ID # newIdentifier\n" + + " | SQ_STRING # newString\n" + + " | K_NULL # newNull\n" + + " ;\n" + + "\n" + + "// Fragments\n" + + "fragment DIGIT : '0' .. '9'; \n" + + "fragment UPPER : 'A' .. 'Z';\n" + + "fragment LOWER : 'a' .. 'z';\n" + + "fragment LETTER : LOWER | UPPER;\n" + + "fragment WORD : LETTER | '_' | '$' | '#' | '.';\n" + + "fragment ALPHANUM : WORD | DIGIT; \n" + + "\n" + + "// Tokens\n" + + "ID : LETTER ALPHANUM*;\n" + + "NUMBER : DIGIT+ ('.' DIGIT+)? (('e'|'E')('+'|'-')? DIGIT+)?;\n" + + "DATE : '\\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT (' ' DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT ('.' DIGIT+)?)? '\\'';\n" + + "SQ_STRING : '\\'' ('\\'\\'' | ~'\\'')* '\\'';\n" + + "DQ_STRING : '\\\"' ('\\\\\"' | ~'\\\"')* '\\\"';\n" + + "WS : [ \\t\\n\\r]+ -> skip ;\n" + + "COMMENTS : ('/*' .*? '*/' | '//' ~'\\n'* '\\n' ) -> skip;\\n\";"; + return execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", input, false); + } + + @Test + public void testWhitespaceInfluence_1() throws Exception { + String found = testWhitespaceInfluence("Test(1,3)"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testWhitespaceInfluence_2() throws Exception { + String found = testWhitespaceInfluence("Test(1, 3)"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPrecedenceFilterConsidersContext() throws Exception { + String grammar = "grammar T;\n" + + "prog \n" + + "@after {System.out.println($ctx.toStringTree(null, this););}\n" + + ": statement* EOF {};\n" + + "statement: letterA | statement letterA 'b' ;\n" + + "letterA: 'a';"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "aa", false); + assertEquals("(prog (statement (letterA a)) (statement (letterA a)) )\n", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java b/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java new file mode 100644 index 000000000..b42587de7 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java @@ -0,0 +1,141 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestLexerErrors extends BaseTest { + + @Test + public void testInvalidCharAtStart() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'a' 'b' ;"; + String found = execLexer("L.g4", grammar, "L", "x"); + assertEquals("[@0,1:0='',<-1>,1:1]\n", found); + assertEquals("line 1:0 token recognition error at: 'x'\n", this.stderrDuringParse); + } + + @Test + public void testStringsEmbeddedInActions_1() throws Exception { + String grammar = "lexer grammar L;\n" + + "ACTION2 : '[' (STRING | ~'\"')*? ']';\n" + + "STRING : '\"' ('\\\"' | .)*? '\"';\n" + + "WS : [ \\t\\r\\n]+ -> skip;"; + String found = execLexer("L.g4", grammar, "L", "[\"foo\"]"); + assertEquals("[@0,0:6='[\"foo\"]',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testStringsEmbeddedInActions_2() throws Exception { + String grammar = "lexer grammar L;\n" + + "ACTION2 : '[' (STRING | ~'\"')*? ']';\n" + + "STRING : '\"' ('\\\"' | .)*? '\"';\n" + + "WS : [ \\t\\r\\n]+ -> skip;"; + String found = execLexer("L.g4", grammar, "L", "[\"foo]"); + assertEquals("[@0,6:5='',<-1>,1:6]\n", found); + assertEquals("line 1:0 token recognition error at: '[\"foo]'\n", this.stderrDuringParse); + } + + @Test + public void testEnforcedGreedyNestedBrances_1() throws Exception { + String grammar = "lexer grammar L;\n" + + "ACTION : '{' (ACTION | ~[{}])* '}';\n" + + "WS : [ \\r\\n\\t]+ -> skip;"; + String found = execLexer("L.g4", grammar, "L", "{ { } }"); + assertEquals("[@0,0:6='{ { } }',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testEnforcedGreedyNestedBrances_2() throws Exception { + String grammar = "lexer grammar L;\n" + + "ACTION : '{' (ACTION | ~[{}])* '}';\n" + + "WS : [ \\r\\n\\t]+ -> skip;"; + String found = execLexer("L.g4", grammar, "L", "{ { }"); + assertEquals("[@0,5:4='',<-1>,1:5]\n", found); + assertEquals("line 1:0 token recognition error at: '{ { }'\n", this.stderrDuringParse); + } + + @Test + public void testInvalidCharAtStartAfterDFACache() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'a' 'b' ;"; + String found = execLexer("L.g4", grammar, "L", "abx"); + assertEquals("[@0,0:1='ab',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n", found); + assertEquals("line 1:2 token recognition error at: 'x'\n", this.stderrDuringParse); + } + + @Test + public void testInvalidCharInToken() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'a' 'b' ;"; + String found = execLexer("L.g4", grammar, "L", "ax"); + assertEquals("[@0,2:1='',<-1>,1:2]\n", found); + assertEquals("line 1:0 token recognition error at: 'ax'\n", this.stderrDuringParse); + } + + @Test + public void testInvalidCharInTokenAfterDFACache() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'a' 'b' ;"; + String found = execLexer("L.g4", grammar, "L", "abax"); + assertEquals("[@0,0:1='ab',<1>,1:0]\n" + + "[@1,4:3='',<-1>,1:4]\n", found); + assertEquals("line 1:2 token recognition error at: 'ax'\n", this.stderrDuringParse); + } + + @Test + public void testDFAToATNThatFailsBackToDFA() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'ab' ;\n" + + "B : 'abc' ;"; + String found = execLexer("L.g4", grammar, "L", "ababx"); + assertEquals("[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:3='ab',<1>,1:2]\n" + + "[@2,5:4='',<-1>,1:5]\n", found); + assertEquals("line 1:4 token recognition error at: 'x'\n", this.stderrDuringParse); + } + + @Test + public void testDFAToATNThatMatchesThenFailsInATN() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'ab' ;\n" + + "B : 'abc' ;\n" + + "C : 'abcd' ;"; + String found = execLexer("L.g4", grammar, "L", "ababcx"); + assertEquals("[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:4='abc',<2>,1:2]\n" + + "[@2,6:5='',<-1>,1:6]\n", found); + assertEquals("line 1:5 token recognition error at: 'x'\n", this.stderrDuringParse); + } + + @Test + public void testErrorInMiddle() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'abc' ;"; + String found = execLexer("L.g4", grammar, "L", "abx"); + assertEquals("[@0,3:2='',<-1>,1:3]\n", found); + assertEquals("line 1:0 token recognition error at: 'abx'\n", this.stderrDuringParse); + } + + @Test + public void testLexerExecDFA() throws Exception { + String grammar = "lexer grammar L;\n" + + "start : ID ':' expr;\n" + + "expr : primary expr? {} | expr '->' ID;\n" + + "primary : ID;\n" + + "ID : [a-z]+;\n" + + ";"; + String found = execLexer("L.g4", grammar, "L", "x : x"); + assertEquals("[@0,0:0='x',<3>,1:0]\n" + + "[@1,2:2=':',<2>,1:2]\n" + + "[@2,4:4='x',<3>,1:4]\n" + + "[@3,5:4='',<-1>,1:5]\n", found); + assertEquals("line 1:1 token recognition error at: ' '\nline 1:3 token recognition error at: ' '\n", this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java b/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java new file mode 100644 index 000000000..de92cced3 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java @@ -0,0 +1,4627 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestLexerExec extends BaseTest { + + @Test + public void testQuoteTranslation() throws Exception { + String grammar = "lexer grammar L;\n" + + "QUOTE : '\"' ; // make sure this compiles"; + String found = execLexer("L.g4", grammar, "L", "\""); + assertEquals("[@0,0:0='\"',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : '-' I ;\n" + + "I : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34 -21 3"); + assertEquals("[@0,0:1='34',<2>,1:0]\n" + + "[@1,3:5='-21',<1>,1:3]\n" + + "[@2,7:7='3',<2>,1:7]\n" + + "[@3,8:7='',<-1>,1:8]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testSlashes() throws Exception { + String grammar = "lexer grammar L;\n" + + "Backslash : '\\\\';\n" + + "Slash : '/';\n" + + "Vee : '\\\\/';\n" + + "Wedge : '/\\\\';\n" + + "WS : [ \\t] -> skip;"; + String found = execLexer("L.g4", grammar, "L", "\\ / \\/ /\\"); + assertEquals("[@0,0:0='\\',<1>,1:0]\n" + + "[@1,2:2='/',<2>,1:2]\n" + + "[@2,4:5='\\/',<3>,1:4]\n" + + "[@3,7:8='/\\',<4>,1:7]\n" + + "[@4,9:8='',<-1>,1:9]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testParentheses() throws Exception { + String grammar = "lexer grammar L;\n" + + "START_BLOCK: '-.-.-';\n" + + "ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+;\n" + + "fragment LETTER: L_A|L_K;\n" + + "fragment L_A: '.-';\n" + + "fragment L_K: '-.-';\n" + + "SEPARATOR: '!';"; + String found = execLexer("L.g4", grammar, "L", "-.-.-!"); + assertEquals("[@0,0:4='-.-.-',<1>,1:0]\n" + + "[@1,5:5='!',<3>,1:5]\n" + + "[@2,6:5='',<-1>,1:6]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNonGreedyTermination1() throws Exception { + String grammar = "lexer grammar L;\n" + + "STRING : '\"' ('\"\"' | .)*? '\"';"; + String found = execLexer("L.g4", grammar, "L", "\"hi\"\"mom\""); + assertEquals("[@0,0:3='\"hi\"',<1>,1:0]\n" + + "[@1,4:8='\"mom\"',<1>,1:4]\n" + + "[@2,9:8='',<-1>,1:9]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNonGreedyTermination2() throws Exception { + String grammar = "lexer grammar L;\n" + + "STRING : '\"' ('\"\"' | .)+? '\"';"; + String found = execLexer("L.g4", grammar, "L", "\"\"\"mom\""); + assertEquals("[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testGreedyOptional() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : '//' .*? '\\n' CMT?;\n" + + "WS : (' '|'\\t')+;"; + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals("[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNonGreedyOptional() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : '//' .*? '\\n' CMT??;\n" + + "WS : (' '|'\\t')+;"; + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals("[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testGreedyClosure() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : '//' .*? '\\n' CMT*;\n" + + "WS : (' '|'\\t')+;"; + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals("[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNonGreedyClosure() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : '//' .*? '\\n' CMT*?;\n" + + "WS : (' '|'\\t')+;"; + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals("[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testGreedyPositiveClosure() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : ('//' .*? '\\n')+;\n" + + "WS : (' '|'\\t')+;"; + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals("[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNonGreedyPositiveClosure() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : ('//' .*? '\\n')+?;\n" + + "WS : (' '|'\\t')+;"; + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals("[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testRecursiveLexerRuleRefWithWildcardStar_1() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : '/*' (CMT | .)*? '*/' ;\n" + + "WS : (' '|'\\n')+;"; + String found = execLexer("L.g4", grammar, "L", "/* ick */\n/* /* */\n/* /*nested*/ */\n"); + assertEquals("[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,9:9='\\n',<2>,1:9]\n" + + "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,35:35='\\n',<2>,3:16]\n" + + "[@4,36:35='',<-1>,4:17]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testRecursiveLexerRuleRefWithWildcardStar_2() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : '/*' (CMT | .)*? '*/' ;\n" + + "WS : (' '|'\\n')+;"; + String found = execLexer("L.g4", grammar, "L", "/* ick */x\n/* /* */x\n/* /*nested*/ */x\n"); + assertEquals("[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,10:10='\\n',<2>,1:10]\n" + + "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,38:38='\\n',<2>,3:17]\n" + + "[@4,39:38='',<-1>,4:18]\n", found); + assertEquals("line 1:9 token recognition error at: 'x'\nline 3:16 token recognition error at: 'x'\n", this.stderrDuringParse); + } + + @Test + public void testRecursiveLexerRuleRefWithWildcardPlus_1() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : '/*' (CMT | .)+? '*/' ;\n" + + "WS : (' '|'\\n')+;"; + String found = execLexer("L.g4", grammar, "L", "/* ick */\n/* /* */\n/* /*nested*/ */\n"); + assertEquals("[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,9:9='\\n',<2>,1:9]\n" + + "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,35:35='\\n',<2>,3:16]\n" + + "[@4,36:35='',<-1>,4:17]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testRecursiveLexerRuleRefWithWildcardPlus_2() throws Exception { + String grammar = "lexer grammar L;\n" + + "CMT : '/*' (CMT | .)+? '*/' ;\n" + + "WS : (' '|'\\n')+;"; + String found = execLexer("L.g4", grammar, "L", "/* ick */x\n/* /* */x\n/* /*nested*/ */x\n"); + assertEquals("[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,10:10='\\n',<2>,1:10]\n" + + "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,38:38='\\n',<2>,3:17]\n" + + "[@4,39:38='',<-1>,4:18]\n", found); + assertEquals("line 1:9 token recognition error at: 'x'\nline 3:16 token recognition error at: 'x'\n", this.stderrDuringParse); + } + + @Test + public void testActionPlacement() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : ({System.out.println(\"stuff fail: \" + this.getText());} 'a'\n" + + "| {System.out.println(\"stuff0: \" + this.getText());}\n" + + " 'a' {System.out.println(\"stuff1: \" + this.getText());}\n" + + " 'b' {System.out.println(\"stuff2: \" + this.getText());})\n" + + " {System.out.println(this.getText());} ;\n" + + "WS : (' '|'\\n') -> skip ;\n" + + "J : .;"; + String found = execLexer("L.g4", grammar, "L", "ab"); + assertEquals("stuff0: \n" + + "stuff1: a\n" + + "stuff2: ab\n" + + "ab\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testGreedyConfigs() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : ('a' | 'ab') {System.out.println(this.getText());} ;\n" + + "WS : (' '|'\\n') -> skip ;\n" + + "J : .;"; + String found = execLexer("L.g4", grammar, "L", "ab"); + assertEquals("ab\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNonGreedyConfigs() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : .*? ('a' | 'ab') {System.out.println(this.getText());} ;\n" + + "WS : (' '|'\\n') -> skip ;\n" + + "J : . {System.out.println(this.getText());};"; + String found = execLexer("L.g4", grammar, "L", "ab"); + assertEquals("a\n" + + "b\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,1:1='b',<3>,1:1]\n" + + "[@2,2:1='',<-1>,1:2]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testKeywordID() throws Exception { + String grammar = "lexer grammar L;\n" + + "KEND : 'end' ; // has priority\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n')+;"; + String found = execLexer("L.g4", grammar, "L", "end eend ending a"); + assertEquals("[@0,0:2='end',<1>,1:0]\n" + + "[@1,3:3=' ',<3>,1:3]\n" + + "[@2,4:7='eend',<2>,1:4]\n" + + "[@3,8:8=' ',<3>,1:8]\n" + + "[@4,9:14='ending',<2>,1:9]\n" + + "[@5,15:15=' ',<3>,1:15]\n" + + "[@6,16:16='a',<2>,1:16]\n" + + "[@7,17:16='',<-1>,1:17]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testHexVsID() throws Exception { + String grammar = "lexer grammar L;\n" + + "HexLiteral : '0' ('x'|'X') HexDigit+ ;\n" + + "DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ;\n" + + "FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ;\n" + + "DOT : '.' ;\n" + + "ID : 'a'..'z'+ ;\n" + + "fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;\n" + + "WS : (' '|'\\n')+;"; + String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l"); + assertEquals("[@0,0:0='x',<5>,1:0]\n" + + "[@1,1:1=' ',<6>,1:1]\n" + + "[@2,2:2='0',<2>,1:2]\n" + + "[@3,3:3=' ',<6>,1:3]\n" + + "[@4,4:4='1',<2>,1:4]\n" + + "[@5,5:5=' ',<6>,1:5]\n" + + "[@6,6:6='a',<5>,1:6]\n" + + "[@7,7:7='.',<4>,1:7]\n" + + "[@8,8:8='b',<5>,1:8]\n" + + "[@9,9:9=' ',<6>,1:9]\n" + + "[@10,10:10='a',<5>,1:10]\n" + + "[@11,11:11='.',<4>,1:11]\n" + + "[@12,12:12='l',<5>,1:12]\n" + + "[@13,13:12='',<-1>,1:13]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testEOFByItself() throws Exception { + String grammar = "lexer grammar L;\n" + + "DONE : EOF ;\n" + + "A : 'a';"; + String found = execLexer("L.g4", grammar, "L", ""); + assertEquals("[@0,0:-1='',<1>,1:0]\n" + + "[@1,0:-1='',<-1>,1:0]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testEOFSuffixInFirstRule_1() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'a' EOF ;\n" + + "B : 'a';\n" + + "C : 'c';"; + String found = execLexer("L.g4", grammar, "L", ""); + assertEquals("[@0,0:-1='',<-1>,1:0]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testEOFSuffixInFirstRule_2() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : 'a' EOF ;\n" + + "B : 'a';\n" + + "C : 'c';"; + String found = execLexer("L.g4", grammar, "L", "a"); + assertEquals("[@0,0:0='a',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSet() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n" + + "WS : [ \\n\\u000D] -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); + assertEquals("I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,5:6='34',<1>,2:1]\n" + + "[@2,7:6='',<-1>,2:3]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetPlus() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n" + + "WS : [ \\n\\u000D]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); + assertEquals("I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,5:6='34',<1>,2:1]\n" + + "[@2,7:6='',<-1>,2:3]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetNot() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : ~[ab \\n] ~[ \\ncd]* {System.out.println(\"I\");} ;\n" + + "WS : [ \\n\\u000D]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "xaf"); + assertEquals("I\n" + + "[@0,0:2='xaf',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetInSet() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : (~[ab \\n]|'a') {System.out.println(\"I\");} ;\n" + + "WS : [ \\n\\u000D]+ -> skip ;\n" + + " "; + String found = execLexer("L.g4", grammar, "L", "a x"); + assertEquals("I\n" + + "I\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,2:2='x',<1>,1:2]\n" + + "[@2,3:2='',<-1>,1:3]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetRange() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : [0-9]+ {System.out.println(\"I\");} ;\n" + + "ID : [a-zA-Z] [a-zA-Z0-9]* {System.out.println(\"ID\");} ;\n" + + "WS : [ \\n\\u0009\\r]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34\r 34 a2 abc \n "); + assertEquals("I\n" + + "I\n" + + "ID\n" + + "ID\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,4:5='34',<1>,1:4]\n" + + "[@2,7:8='a2',<2>,1:7]\n" + + "[@3,10:12='abc',<2>,1:10]\n" + + "[@4,18:17='',<-1>,2:3]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetWithMissingEndRange() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : [0-]+ {System.out.println(\"I\");} ;\n" + + "WS : [ \\n\\u000D]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "00\r\n"); + assertEquals("I\n" + + "[@0,0:1='00',<1>,1:0]\n" + + "[@1,4:3='',<-1>,2:0]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetWithMissingEscapeChar() throws Exception { + String grammar = "lexer grammar L;\n" + + "I : [0-9]+ {System.out.println(\"I\");} ;\n" + + "WS : [ \\u]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34 "); + assertEquals("I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetWithEscapedChar() throws Exception { + String grammar = "lexer grammar L;\n" + + "DASHBRACK : [\\-\\]]+ {System.out.println(\"DASHBRACK\");} ;\n" + + "WS : [ \\u]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "- ] "); + assertEquals("DASHBRACK\n" + + "DASHBRACK\n" + + "[@0,0:0='-',<1>,1:0]\n" + + "[@1,2:2=']',<1>,1:2]\n" + + "[@2,4:3='',<-1>,1:4]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetWithReversedRange() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : [z-a9]+ {System.out.println(\"A\");} ;\n" + + "WS : [ \\u]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "9"); + assertEquals("A\n" + + "[@0,0:0='9',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetWithQuote1() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : [\"a-z]+ {System.out.println(\"A\");} ;\n" + + "WS : [ \\n\\t]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "b\"a"); + assertEquals("A\n" + + "[@0,0:2='b\"a',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetWithQuote2() throws Exception { + String grammar = "lexer grammar L;\n" + + "A : [\"\\ab]+ {System.out.println(\"A\");} ;\n" + + "WS : [ \\n\\t]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "b\"\\a"); + assertEquals("A\n" + + "[@0,0:3='b\"\\a',<1>,1:0]\n" + + "[@1,4:3='',<-1>,1:4]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPositionAdjustingLexer() throws Exception { + String grammar = "lexer grammar PositionAdjustingLexer;\n" + + "\n" + + "@members {\n" + + "@Override\n" + + "public Token nextToken() {\n" + + " if (!(_interp instanceof PositionAdjustingLexerATNSimulator)) {\n" + + " _interp = new PositionAdjustingLexerATNSimulator(this, _ATN, _decisionToDFA, _sharedContextCache);\n" + + " }\n" + + "\n" + + " return super.nextToken();\n" + + "}\n" + + "\n" + + "@Override\n" + + "public Token emit() {\n" + + " switch (_type) {\n" + + " case TOKENS:\n" + + " handleAcceptPositionForKeyword(\"tokens\");\n" + + " break;\n" + + "\n" + + " case LABEL:\n" + + " handleAcceptPositionForIdentifier();\n" + + " break;\n" + + "\n" + + " default:\n" + + " break;\n" + + " }\n" + + "\n" + + " return super.emit();\n" + + "}\n" + + "\n" + + "private boolean handleAcceptPositionForIdentifier() {\n" + + " String tokenText = getText();\n" + + " int identifierLength = 0;\n" + + " while (identifierLength < tokenText.length() && isIdentifierChar(tokenText.charAt(identifierLength))) {\n" + + " identifierLength++;\n" + + " }\n" + + "\n" + + " if (getInputStream().index() > _tokenStartCharIndex + identifierLength) {\n" + + " int offset = identifierLength - 1;\n" + + " getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset);\n" + + " return true;\n" + + " }\n" + + "\n" + + " return false;\n" + + "}\n" + + "\n" + + "private boolean handleAcceptPositionForKeyword(String keyword) {\n" + + " if (getInputStream().index() > _tokenStartCharIndex + keyword.length()) {\n" + + " int offset = keyword.length() - 1;\n" + + " getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset);\n" + + " return true;\n" + + " }\n" + + "\n" + + " return false;\n" + + "}\n" + + "\n" + + "@Override\n" + + "public PositionAdjustingLexerATNSimulator getInterpreter() {\n" + + " return (PositionAdjustingLexerATNSimulator)super.getInterpreter();\n" + + "}\n" + + "\n" + + "private static boolean isIdentifierChar(char c) {\n" + + " return Character.isLetterOrDigit(c) || c == '_';\n" + + "}\n" + + "\n" + + "protected static class PositionAdjustingLexerATNSimulator extends LexerATNSimulator {\n" + + "\n" + + " public PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn,\n" + + " DFA[] decisionToDFA,\n" + + " PredictionContextCache sharedContextCache)\n" + + " {\n" + + " super(recog, atn, decisionToDFA, sharedContextCache);\n" + + " }\n" + + "\n" + + " protected void resetAcceptPosition(CharStream input, int index, int line, int charPositionInLine) {\n" + + " input.seek(index);\n" + + " this.line = line;\n" + + " this.charPositionInLine = charPositionInLine;\n" + + " consume(input);\n" + + " }\n" + + "\n" + + "}\n" + + "\n" + + "}\n" + + "\n" + + "ASSIGN : '=' ;\n" + + "PLUS_ASSIGN : '+=' ;\n" + + "LCURLY: '{';\n" + + "\n" + + "// 'tokens' followed by '{'\n" + + "TOKENS : 'tokens' IGNORED '{';\n" + + "\n" + + "// IDENTIFIER followed by '+=' or '='\n" + + "LABEL\n" + + " : IDENTIFIER IGNORED '+'? '='\n" + + " ;\n" + + "\n" + + "IDENTIFIER\n" + + " : [a-zA-Z_] [a-zA-Z0-9_]*\n" + + " ;\n" + + "\n" + + "fragment\n" + + "IGNORED\n" + + " : [ \\t\\r\\n]*\n" + + " ;\n" + + "\n" + + "NEWLINE\n" + + " : [\\r\\n]+ -> skip\n" + + " ;\n" + + "\n" + + "WS\n" + + " : [ \\t]+ -> skip\n" + + " ;"; + String found = execLexer("PositionAdjustingLexer.g4", grammar, "PositionAdjustingLexer", "tokens\ntokens {\nnotLabel\nlabel1 =\nlabel2 +=\nnotLabel\n"); + assertEquals("[@0,0:5='tokens',<6>,1:0]\n" + + "[@1,7:12='tokens',<4>,2:0]\n" + + "[@2,14:14='{',<3>,2:7]\n" + + "[@3,16:23='notLabel',<6>,3:0]\n" + + "[@4,25:30='label1',<5>,4:0]\n" + + "[@5,32:32='=',<1>,4:7]\n" + + "[@6,34:39='label2',<5>,5:0]\n" + + "[@7,41:42='+=',<2>,5:7]\n" + + "[@8,44:51='notLabel',<6>,6:0]\n" + + "[@9,53:52='',<-1>,7:0]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLargeLexer() throws Exception { + String grammar = "lexer grammar L;\n" + + "WS : [ \\t\\r\\n]+ -> skip;\n" + + "KW0 : 'KW' '0';\n" + + "KW1 : 'KW' '1';\n" + + "KW2 : 'KW' '2';\n" + + "KW3 : 'KW' '3';\n" + + "KW4 : 'KW' '4';\n" + + "KW5 : 'KW' '5';\n" + + "KW6 : 'KW' '6';\n" + + "KW7 : 'KW' '7';\n" + + "KW8 : 'KW' '8';\n" + + "KW9 : 'KW' '9';\n" + + "KW10 : 'KW' '10';\n" + + "KW11 : 'KW' '11';\n" + + "KW12 : 'KW' '12';\n" + + "KW13 : 'KW' '13';\n" + + "KW14 : 'KW' '14';\n" + + "KW15 : 'KW' '15';\n" + + "KW16 : 'KW' '16';\n" + + "KW17 : 'KW' '17';\n" + + "KW18 : 'KW' '18';\n" + + "KW19 : 'KW' '19';\n" + + "KW20 : 'KW' '20';\n" + + "KW21 : 'KW' '21';\n" + + "KW22 : 'KW' '22';\n" + + "KW23 : 'KW' '23';\n" + + "KW24 : 'KW' '24';\n" + + "KW25 : 'KW' '25';\n" + + "KW26 : 'KW' '26';\n" + + "KW27 : 'KW' '27';\n" + + "KW28 : 'KW' '28';\n" + + "KW29 : 'KW' '29';\n" + + "KW30 : 'KW' '30';\n" + + "KW31 : 'KW' '31';\n" + + "KW32 : 'KW' '32';\n" + + "KW33 : 'KW' '33';\n" + + "KW34 : 'KW' '34';\n" + + "KW35 : 'KW' '35';\n" + + "KW36 : 'KW' '36';\n" + + "KW37 : 'KW' '37';\n" + + "KW38 : 'KW' '38';\n" + + "KW39 : 'KW' '39';\n" + + "KW40 : 'KW' '40';\n" + + "KW41 : 'KW' '41';\n" + + "KW42 : 'KW' '42';\n" + + "KW43 : 'KW' '43';\n" + + "KW44 : 'KW' '44';\n" + + "KW45 : 'KW' '45';\n" + + "KW46 : 'KW' '46';\n" + + "KW47 : 'KW' '47';\n" + + "KW48 : 'KW' '48';\n" + + "KW49 : 'KW' '49';\n" + + "KW50 : 'KW' '50';\n" + + "KW51 : 'KW' '51';\n" + + "KW52 : 'KW' '52';\n" + + "KW53 : 'KW' '53';\n" + + "KW54 : 'KW' '54';\n" + + "KW55 : 'KW' '55';\n" + + "KW56 : 'KW' '56';\n" + + "KW57 : 'KW' '57';\n" + + "KW58 : 'KW' '58';\n" + + "KW59 : 'KW' '59';\n" + + "KW60 : 'KW' '60';\n" + + "KW61 : 'KW' '61';\n" + + "KW62 : 'KW' '62';\n" + + "KW63 : 'KW' '63';\n" + + "KW64 : 'KW' '64';\n" + + "KW65 : 'KW' '65';\n" + + "KW66 : 'KW' '66';\n" + + "KW67 : 'KW' '67';\n" + + "KW68 : 'KW' '68';\n" + + "KW69 : 'KW' '69';\n" + + "KW70 : 'KW' '70';\n" + + "KW71 : 'KW' '71';\n" + + "KW72 : 'KW' '72';\n" + + "KW73 : 'KW' '73';\n" + + "KW74 : 'KW' '74';\n" + + "KW75 : 'KW' '75';\n" + + "KW76 : 'KW' '76';\n" + + "KW77 : 'KW' '77';\n" + + "KW78 : 'KW' '78';\n" + + "KW79 : 'KW' '79';\n" + + "KW80 : 'KW' '80';\n" + + "KW81 : 'KW' '81';\n" + + "KW82 : 'KW' '82';\n" + + "KW83 : 'KW' '83';\n" + + "KW84 : 'KW' '84';\n" + + "KW85 : 'KW' '85';\n" + + "KW86 : 'KW' '86';\n" + + "KW87 : 'KW' '87';\n" + + "KW88 : 'KW' '88';\n" + + "KW89 : 'KW' '89';\n" + + "KW90 : 'KW' '90';\n" + + "KW91 : 'KW' '91';\n" + + "KW92 : 'KW' '92';\n" + + "KW93 : 'KW' '93';\n" + + "KW94 : 'KW' '94';\n" + + "KW95 : 'KW' '95';\n" + + "KW96 : 'KW' '96';\n" + + "KW97 : 'KW' '97';\n" + + "KW98 : 'KW' '98';\n" + + "KW99 : 'KW' '99';\n" + + "KW100 : 'KW' '100';\n" + + "KW101 : 'KW' '101';\n" + + "KW102 : 'KW' '102';\n" + + "KW103 : 'KW' '103';\n" + + "KW104 : 'KW' '104';\n" + + "KW105 : 'KW' '105';\n" + + "KW106 : 'KW' '106';\n" + + "KW107 : 'KW' '107';\n" + + "KW108 : 'KW' '108';\n" + + "KW109 : 'KW' '109';\n" + + "KW110 : 'KW' '110';\n" + + "KW111 : 'KW' '111';\n" + + "KW112 : 'KW' '112';\n" + + "KW113 : 'KW' '113';\n" + + "KW114 : 'KW' '114';\n" + + "KW115 : 'KW' '115';\n" + + "KW116 : 'KW' '116';\n" + + "KW117 : 'KW' '117';\n" + + "KW118 : 'KW' '118';\n" + + "KW119 : 'KW' '119';\n" + + "KW120 : 'KW' '120';\n" + + "KW121 : 'KW' '121';\n" + + "KW122 : 'KW' '122';\n" + + "KW123 : 'KW' '123';\n" + + "KW124 : 'KW' '124';\n" + + "KW125 : 'KW' '125';\n" + + "KW126 : 'KW' '126';\n" + + "KW127 : 'KW' '127';\n" + + "KW128 : 'KW' '128';\n" + + "KW129 : 'KW' '129';\n" + + "KW130 : 'KW' '130';\n" + + "KW131 : 'KW' '131';\n" + + "KW132 : 'KW' '132';\n" + + "KW133 : 'KW' '133';\n" + + "KW134 : 'KW' '134';\n" + + "KW135 : 'KW' '135';\n" + + "KW136 : 'KW' '136';\n" + + "KW137 : 'KW' '137';\n" + + "KW138 : 'KW' '138';\n" + + "KW139 : 'KW' '139';\n" + + "KW140 : 'KW' '140';\n" + + "KW141 : 'KW' '141';\n" + + "KW142 : 'KW' '142';\n" + + "KW143 : 'KW' '143';\n" + + "KW144 : 'KW' '144';\n" + + "KW145 : 'KW' '145';\n" + + "KW146 : 'KW' '146';\n" + + "KW147 : 'KW' '147';\n" + + "KW148 : 'KW' '148';\n" + + "KW149 : 'KW' '149';\n" + + "KW150 : 'KW' '150';\n" + + "KW151 : 'KW' '151';\n" + + "KW152 : 'KW' '152';\n" + + "KW153 : 'KW' '153';\n" + + "KW154 : 'KW' '154';\n" + + "KW155 : 'KW' '155';\n" + + "KW156 : 'KW' '156';\n" + + "KW157 : 'KW' '157';\n" + + "KW158 : 'KW' '158';\n" + + "KW159 : 'KW' '159';\n" + + "KW160 : 'KW' '160';\n" + + "KW161 : 'KW' '161';\n" + + "KW162 : 'KW' '162';\n" + + "KW163 : 'KW' '163';\n" + + "KW164 : 'KW' '164';\n" + + "KW165 : 'KW' '165';\n" + + "KW166 : 'KW' '166';\n" + + "KW167 : 'KW' '167';\n" + + "KW168 : 'KW' '168';\n" + + "KW169 : 'KW' '169';\n" + + "KW170 : 'KW' '170';\n" + + "KW171 : 'KW' '171';\n" + + "KW172 : 'KW' '172';\n" + + "KW173 : 'KW' '173';\n" + + "KW174 : 'KW' '174';\n" + + "KW175 : 'KW' '175';\n" + + "KW176 : 'KW' '176';\n" + + "KW177 : 'KW' '177';\n" + + "KW178 : 'KW' '178';\n" + + "KW179 : 'KW' '179';\n" + + "KW180 : 'KW' '180';\n" + + "KW181 : 'KW' '181';\n" + + "KW182 : 'KW' '182';\n" + + "KW183 : 'KW' '183';\n" + + "KW184 : 'KW' '184';\n" + + "KW185 : 'KW' '185';\n" + + "KW186 : 'KW' '186';\n" + + "KW187 : 'KW' '187';\n" + + "KW188 : 'KW' '188';\n" + + "KW189 : 'KW' '189';\n" + + "KW190 : 'KW' '190';\n" + + "KW191 : 'KW' '191';\n" + + "KW192 : 'KW' '192';\n" + + "KW193 : 'KW' '193';\n" + + "KW194 : 'KW' '194';\n" + + "KW195 : 'KW' '195';\n" + + "KW196 : 'KW' '196';\n" + + "KW197 : 'KW' '197';\n" + + "KW198 : 'KW' '198';\n" + + "KW199 : 'KW' '199';\n" + + "KW200 : 'KW' '200';\n" + + "KW201 : 'KW' '201';\n" + + "KW202 : 'KW' '202';\n" + + "KW203 : 'KW' '203';\n" + + "KW204 : 'KW' '204';\n" + + "KW205 : 'KW' '205';\n" + + "KW206 : 'KW' '206';\n" + + "KW207 : 'KW' '207';\n" + + "KW208 : 'KW' '208';\n" + + "KW209 : 'KW' '209';\n" + + "KW210 : 'KW' '210';\n" + + "KW211 : 'KW' '211';\n" + + "KW212 : 'KW' '212';\n" + + "KW213 : 'KW' '213';\n" + + "KW214 : 'KW' '214';\n" + + "KW215 : 'KW' '215';\n" + + "KW216 : 'KW' '216';\n" + + "KW217 : 'KW' '217';\n" + + "KW218 : 'KW' '218';\n" + + "KW219 : 'KW' '219';\n" + + "KW220 : 'KW' '220';\n" + + "KW221 : 'KW' '221';\n" + + "KW222 : 'KW' '222';\n" + + "KW223 : 'KW' '223';\n" + + "KW224 : 'KW' '224';\n" + + "KW225 : 'KW' '225';\n" + + "KW226 : 'KW' '226';\n" + + "KW227 : 'KW' '227';\n" + + "KW228 : 'KW' '228';\n" + + "KW229 : 'KW' '229';\n" + + "KW230 : 'KW' '230';\n" + + "KW231 : 'KW' '231';\n" + + "KW232 : 'KW' '232';\n" + + "KW233 : 'KW' '233';\n" + + "KW234 : 'KW' '234';\n" + + "KW235 : 'KW' '235';\n" + + "KW236 : 'KW' '236';\n" + + "KW237 : 'KW' '237';\n" + + "KW238 : 'KW' '238';\n" + + "KW239 : 'KW' '239';\n" + + "KW240 : 'KW' '240';\n" + + "KW241 : 'KW' '241';\n" + + "KW242 : 'KW' '242';\n" + + "KW243 : 'KW' '243';\n" + + "KW244 : 'KW' '244';\n" + + "KW245 : 'KW' '245';\n" + + "KW246 : 'KW' '246';\n" + + "KW247 : 'KW' '247';\n" + + "KW248 : 'KW' '248';\n" + + "KW249 : 'KW' '249';\n" + + "KW250 : 'KW' '250';\n" + + "KW251 : 'KW' '251';\n" + + "KW252 : 'KW' '252';\n" + + "KW253 : 'KW' '253';\n" + + "KW254 : 'KW' '254';\n" + + "KW255 : 'KW' '255';\n" + + "KW256 : 'KW' '256';\n" + + "KW257 : 'KW' '257';\n" + + "KW258 : 'KW' '258';\n" + + "KW259 : 'KW' '259';\n" + + "KW260 : 'KW' '260';\n" + + "KW261 : 'KW' '261';\n" + + "KW262 : 'KW' '262';\n" + + "KW263 : 'KW' '263';\n" + + "KW264 : 'KW' '264';\n" + + "KW265 : 'KW' '265';\n" + + "KW266 : 'KW' '266';\n" + + "KW267 : 'KW' '267';\n" + + "KW268 : 'KW' '268';\n" + + "KW269 : 'KW' '269';\n" + + "KW270 : 'KW' '270';\n" + + "KW271 : 'KW' '271';\n" + + "KW272 : 'KW' '272';\n" + + "KW273 : 'KW' '273';\n" + + "KW274 : 'KW' '274';\n" + + "KW275 : 'KW' '275';\n" + + "KW276 : 'KW' '276';\n" + + "KW277 : 'KW' '277';\n" + + "KW278 : 'KW' '278';\n" + + "KW279 : 'KW' '279';\n" + + "KW280 : 'KW' '280';\n" + + "KW281 : 'KW' '281';\n" + + "KW282 : 'KW' '282';\n" + + "KW283 : 'KW' '283';\n" + + "KW284 : 'KW' '284';\n" + + "KW285 : 'KW' '285';\n" + + "KW286 : 'KW' '286';\n" + + "KW287 : 'KW' '287';\n" + + "KW288 : 'KW' '288';\n" + + "KW289 : 'KW' '289';\n" + + "KW290 : 'KW' '290';\n" + + "KW291 : 'KW' '291';\n" + + "KW292 : 'KW' '292';\n" + + "KW293 : 'KW' '293';\n" + + "KW294 : 'KW' '294';\n" + + "KW295 : 'KW' '295';\n" + + "KW296 : 'KW' '296';\n" + + "KW297 : 'KW' '297';\n" + + "KW298 : 'KW' '298';\n" + + "KW299 : 'KW' '299';\n" + + "KW300 : 'KW' '300';\n" + + "KW301 : 'KW' '301';\n" + + "KW302 : 'KW' '302';\n" + + "KW303 : 'KW' '303';\n" + + "KW304 : 'KW' '304';\n" + + "KW305 : 'KW' '305';\n" + + "KW306 : 'KW' '306';\n" + + "KW307 : 'KW' '307';\n" + + "KW308 : 'KW' '308';\n" + + "KW309 : 'KW' '309';\n" + + "KW310 : 'KW' '310';\n" + + "KW311 : 'KW' '311';\n" + + "KW312 : 'KW' '312';\n" + + "KW313 : 'KW' '313';\n" + + "KW314 : 'KW' '314';\n" + + "KW315 : 'KW' '315';\n" + + "KW316 : 'KW' '316';\n" + + "KW317 : 'KW' '317';\n" + + "KW318 : 'KW' '318';\n" + + "KW319 : 'KW' '319';\n" + + "KW320 : 'KW' '320';\n" + + "KW321 : 'KW' '321';\n" + + "KW322 : 'KW' '322';\n" + + "KW323 : 'KW' '323';\n" + + "KW324 : 'KW' '324';\n" + + "KW325 : 'KW' '325';\n" + + "KW326 : 'KW' '326';\n" + + "KW327 : 'KW' '327';\n" + + "KW328 : 'KW' '328';\n" + + "KW329 : 'KW' '329';\n" + + "KW330 : 'KW' '330';\n" + + "KW331 : 'KW' '331';\n" + + "KW332 : 'KW' '332';\n" + + "KW333 : 'KW' '333';\n" + + "KW334 : 'KW' '334';\n" + + "KW335 : 'KW' '335';\n" + + "KW336 : 'KW' '336';\n" + + "KW337 : 'KW' '337';\n" + + "KW338 : 'KW' '338';\n" + + "KW339 : 'KW' '339';\n" + + "KW340 : 'KW' '340';\n" + + "KW341 : 'KW' '341';\n" + + "KW342 : 'KW' '342';\n" + + "KW343 : 'KW' '343';\n" + + "KW344 : 'KW' '344';\n" + + "KW345 : 'KW' '345';\n" + + "KW346 : 'KW' '346';\n" + + "KW347 : 'KW' '347';\n" + + "KW348 : 'KW' '348';\n" + + "KW349 : 'KW' '349';\n" + + "KW350 : 'KW' '350';\n" + + "KW351 : 'KW' '351';\n" + + "KW352 : 'KW' '352';\n" + + "KW353 : 'KW' '353';\n" + + "KW354 : 'KW' '354';\n" + + "KW355 : 'KW' '355';\n" + + "KW356 : 'KW' '356';\n" + + "KW357 : 'KW' '357';\n" + + "KW358 : 'KW' '358';\n" + + "KW359 : 'KW' '359';\n" + + "KW360 : 'KW' '360';\n" + + "KW361 : 'KW' '361';\n" + + "KW362 : 'KW' '362';\n" + + "KW363 : 'KW' '363';\n" + + "KW364 : 'KW' '364';\n" + + "KW365 : 'KW' '365';\n" + + "KW366 : 'KW' '366';\n" + + "KW367 : 'KW' '367';\n" + + "KW368 : 'KW' '368';\n" + + "KW369 : 'KW' '369';\n" + + "KW370 : 'KW' '370';\n" + + "KW371 : 'KW' '371';\n" + + "KW372 : 'KW' '372';\n" + + "KW373 : 'KW' '373';\n" + + "KW374 : 'KW' '374';\n" + + "KW375 : 'KW' '375';\n" + + "KW376 : 'KW' '376';\n" + + "KW377 : 'KW' '377';\n" + + "KW378 : 'KW' '378';\n" + + "KW379 : 'KW' '379';\n" + + "KW380 : 'KW' '380';\n" + + "KW381 : 'KW' '381';\n" + + "KW382 : 'KW' '382';\n" + + "KW383 : 'KW' '383';\n" + + "KW384 : 'KW' '384';\n" + + "KW385 : 'KW' '385';\n" + + "KW386 : 'KW' '386';\n" + + "KW387 : 'KW' '387';\n" + + "KW388 : 'KW' '388';\n" + + "KW389 : 'KW' '389';\n" + + "KW390 : 'KW' '390';\n" + + "KW391 : 'KW' '391';\n" + + "KW392 : 'KW' '392';\n" + + "KW393 : 'KW' '393';\n" + + "KW394 : 'KW' '394';\n" + + "KW395 : 'KW' '395';\n" + + "KW396 : 'KW' '396';\n" + + "KW397 : 'KW' '397';\n" + + "KW398 : 'KW' '398';\n" + + "KW399 : 'KW' '399';\n" + + "KW400 : 'KW' '400';\n" + + "KW401 : 'KW' '401';\n" + + "KW402 : 'KW' '402';\n" + + "KW403 : 'KW' '403';\n" + + "KW404 : 'KW' '404';\n" + + "KW405 : 'KW' '405';\n" + + "KW406 : 'KW' '406';\n" + + "KW407 : 'KW' '407';\n" + + "KW408 : 'KW' '408';\n" + + "KW409 : 'KW' '409';\n" + + "KW410 : 'KW' '410';\n" + + "KW411 : 'KW' '411';\n" + + "KW412 : 'KW' '412';\n" + + "KW413 : 'KW' '413';\n" + + "KW414 : 'KW' '414';\n" + + "KW415 : 'KW' '415';\n" + + "KW416 : 'KW' '416';\n" + + "KW417 : 'KW' '417';\n" + + "KW418 : 'KW' '418';\n" + + "KW419 : 'KW' '419';\n" + + "KW420 : 'KW' '420';\n" + + "KW421 : 'KW' '421';\n" + + "KW422 : 'KW' '422';\n" + + "KW423 : 'KW' '423';\n" + + "KW424 : 'KW' '424';\n" + + "KW425 : 'KW' '425';\n" + + "KW426 : 'KW' '426';\n" + + "KW427 : 'KW' '427';\n" + + "KW428 : 'KW' '428';\n" + + "KW429 : 'KW' '429';\n" + + "KW430 : 'KW' '430';\n" + + "KW431 : 'KW' '431';\n" + + "KW432 : 'KW' '432';\n" + + "KW433 : 'KW' '433';\n" + + "KW434 : 'KW' '434';\n" + + "KW435 : 'KW' '435';\n" + + "KW436 : 'KW' '436';\n" + + "KW437 : 'KW' '437';\n" + + "KW438 : 'KW' '438';\n" + + "KW439 : 'KW' '439';\n" + + "KW440 : 'KW' '440';\n" + + "KW441 : 'KW' '441';\n" + + "KW442 : 'KW' '442';\n" + + "KW443 : 'KW' '443';\n" + + "KW444 : 'KW' '444';\n" + + "KW445 : 'KW' '445';\n" + + "KW446 : 'KW' '446';\n" + + "KW447 : 'KW' '447';\n" + + "KW448 : 'KW' '448';\n" + + "KW449 : 'KW' '449';\n" + + "KW450 : 'KW' '450';\n" + + "KW451 : 'KW' '451';\n" + + "KW452 : 'KW' '452';\n" + + "KW453 : 'KW' '453';\n" + + "KW454 : 'KW' '454';\n" + + "KW455 : 'KW' '455';\n" + + "KW456 : 'KW' '456';\n" + + "KW457 : 'KW' '457';\n" + + "KW458 : 'KW' '458';\n" + + "KW459 : 'KW' '459';\n" + + "KW460 : 'KW' '460';\n" + + "KW461 : 'KW' '461';\n" + + "KW462 : 'KW' '462';\n" + + "KW463 : 'KW' '463';\n" + + "KW464 : 'KW' '464';\n" + + "KW465 : 'KW' '465';\n" + + "KW466 : 'KW' '466';\n" + + "KW467 : 'KW' '467';\n" + + "KW468 : 'KW' '468';\n" + + "KW469 : 'KW' '469';\n" + + "KW470 : 'KW' '470';\n" + + "KW471 : 'KW' '471';\n" + + "KW472 : 'KW' '472';\n" + + "KW473 : 'KW' '473';\n" + + "KW474 : 'KW' '474';\n" + + "KW475 : 'KW' '475';\n" + + "KW476 : 'KW' '476';\n" + + "KW477 : 'KW' '477';\n" + + "KW478 : 'KW' '478';\n" + + "KW479 : 'KW' '479';\n" + + "KW480 : 'KW' '480';\n" + + "KW481 : 'KW' '481';\n" + + "KW482 : 'KW' '482';\n" + + "KW483 : 'KW' '483';\n" + + "KW484 : 'KW' '484';\n" + + "KW485 : 'KW' '485';\n" + + "KW486 : 'KW' '486';\n" + + "KW487 : 'KW' '487';\n" + + "KW488 : 'KW' '488';\n" + + "KW489 : 'KW' '489';\n" + + "KW490 : 'KW' '490';\n" + + "KW491 : 'KW' '491';\n" + + "KW492 : 'KW' '492';\n" + + "KW493 : 'KW' '493';\n" + + "KW494 : 'KW' '494';\n" + + "KW495 : 'KW' '495';\n" + + "KW496 : 'KW' '496';\n" + + "KW497 : 'KW' '497';\n" + + "KW498 : 'KW' '498';\n" + + "KW499 : 'KW' '499';\n" + + "KW500 : 'KW' '500';\n" + + "KW501 : 'KW' '501';\n" + + "KW502 : 'KW' '502';\n" + + "KW503 : 'KW' '503';\n" + + "KW504 : 'KW' '504';\n" + + "KW505 : 'KW' '505';\n" + + "KW506 : 'KW' '506';\n" + + "KW507 : 'KW' '507';\n" + + "KW508 : 'KW' '508';\n" + + "KW509 : 'KW' '509';\n" + + "KW510 : 'KW' '510';\n" + + "KW511 : 'KW' '511';\n" + + "KW512 : 'KW' '512';\n" + + "KW513 : 'KW' '513';\n" + + "KW514 : 'KW' '514';\n" + + "KW515 : 'KW' '515';\n" + + "KW516 : 'KW' '516';\n" + + "KW517 : 'KW' '517';\n" + + "KW518 : 'KW' '518';\n" + + "KW519 : 'KW' '519';\n" + + "KW520 : 'KW' '520';\n" + + "KW521 : 'KW' '521';\n" + + "KW522 : 'KW' '522';\n" + + "KW523 : 'KW' '523';\n" + + "KW524 : 'KW' '524';\n" + + "KW525 : 'KW' '525';\n" + + "KW526 : 'KW' '526';\n" + + "KW527 : 'KW' '527';\n" + + "KW528 : 'KW' '528';\n" + + "KW529 : 'KW' '529';\n" + + "KW530 : 'KW' '530';\n" + + "KW531 : 'KW' '531';\n" + + "KW532 : 'KW' '532';\n" + + "KW533 : 'KW' '533';\n" + + "KW534 : 'KW' '534';\n" + + "KW535 : 'KW' '535';\n" + + "KW536 : 'KW' '536';\n" + + "KW537 : 'KW' '537';\n" + + "KW538 : 'KW' '538';\n" + + "KW539 : 'KW' '539';\n" + + "KW540 : 'KW' '540';\n" + + "KW541 : 'KW' '541';\n" + + "KW542 : 'KW' '542';\n" + + "KW543 : 'KW' '543';\n" + + "KW544 : 'KW' '544';\n" + + "KW545 : 'KW' '545';\n" + + "KW546 : 'KW' '546';\n" + + "KW547 : 'KW' '547';\n" + + "KW548 : 'KW' '548';\n" + + "KW549 : 'KW' '549';\n" + + "KW550 : 'KW' '550';\n" + + "KW551 : 'KW' '551';\n" + + "KW552 : 'KW' '552';\n" + + "KW553 : 'KW' '553';\n" + + "KW554 : 'KW' '554';\n" + + "KW555 : 'KW' '555';\n" + + "KW556 : 'KW' '556';\n" + + "KW557 : 'KW' '557';\n" + + "KW558 : 'KW' '558';\n" + + "KW559 : 'KW' '559';\n" + + "KW560 : 'KW' '560';\n" + + "KW561 : 'KW' '561';\n" + + "KW562 : 'KW' '562';\n" + + "KW563 : 'KW' '563';\n" + + "KW564 : 'KW' '564';\n" + + "KW565 : 'KW' '565';\n" + + "KW566 : 'KW' '566';\n" + + "KW567 : 'KW' '567';\n" + + "KW568 : 'KW' '568';\n" + + "KW569 : 'KW' '569';\n" + + "KW570 : 'KW' '570';\n" + + "KW571 : 'KW' '571';\n" + + "KW572 : 'KW' '572';\n" + + "KW573 : 'KW' '573';\n" + + "KW574 : 'KW' '574';\n" + + "KW575 : 'KW' '575';\n" + + "KW576 : 'KW' '576';\n" + + "KW577 : 'KW' '577';\n" + + "KW578 : 'KW' '578';\n" + + "KW579 : 'KW' '579';\n" + + "KW580 : 'KW' '580';\n" + + "KW581 : 'KW' '581';\n" + + "KW582 : 'KW' '582';\n" + + "KW583 : 'KW' '583';\n" + + "KW584 : 'KW' '584';\n" + + "KW585 : 'KW' '585';\n" + + "KW586 : 'KW' '586';\n" + + "KW587 : 'KW' '587';\n" + + "KW588 : 'KW' '588';\n" + + "KW589 : 'KW' '589';\n" + + "KW590 : 'KW' '590';\n" + + "KW591 : 'KW' '591';\n" + + "KW592 : 'KW' '592';\n" + + "KW593 : 'KW' '593';\n" + + "KW594 : 'KW' '594';\n" + + "KW595 : 'KW' '595';\n" + + "KW596 : 'KW' '596';\n" + + "KW597 : 'KW' '597';\n" + + "KW598 : 'KW' '598';\n" + + "KW599 : 'KW' '599';\n" + + "KW600 : 'KW' '600';\n" + + "KW601 : 'KW' '601';\n" + + "KW602 : 'KW' '602';\n" + + "KW603 : 'KW' '603';\n" + + "KW604 : 'KW' '604';\n" + + "KW605 : 'KW' '605';\n" + + "KW606 : 'KW' '606';\n" + + "KW607 : 'KW' '607';\n" + + "KW608 : 'KW' '608';\n" + + "KW609 : 'KW' '609';\n" + + "KW610 : 'KW' '610';\n" + + "KW611 : 'KW' '611';\n" + + "KW612 : 'KW' '612';\n" + + "KW613 : 'KW' '613';\n" + + "KW614 : 'KW' '614';\n" + + "KW615 : 'KW' '615';\n" + + "KW616 : 'KW' '616';\n" + + "KW617 : 'KW' '617';\n" + + "KW618 : 'KW' '618';\n" + + "KW619 : 'KW' '619';\n" + + "KW620 : 'KW' '620';\n" + + "KW621 : 'KW' '621';\n" + + "KW622 : 'KW' '622';\n" + + "KW623 : 'KW' '623';\n" + + "KW624 : 'KW' '624';\n" + + "KW625 : 'KW' '625';\n" + + "KW626 : 'KW' '626';\n" + + "KW627 : 'KW' '627';\n" + + "KW628 : 'KW' '628';\n" + + "KW629 : 'KW' '629';\n" + + "KW630 : 'KW' '630';\n" + + "KW631 : 'KW' '631';\n" + + "KW632 : 'KW' '632';\n" + + "KW633 : 'KW' '633';\n" + + "KW634 : 'KW' '634';\n" + + "KW635 : 'KW' '635';\n" + + "KW636 : 'KW' '636';\n" + + "KW637 : 'KW' '637';\n" + + "KW638 : 'KW' '638';\n" + + "KW639 : 'KW' '639';\n" + + "KW640 : 'KW' '640';\n" + + "KW641 : 'KW' '641';\n" + + "KW642 : 'KW' '642';\n" + + "KW643 : 'KW' '643';\n" + + "KW644 : 'KW' '644';\n" + + "KW645 : 'KW' '645';\n" + + "KW646 : 'KW' '646';\n" + + "KW647 : 'KW' '647';\n" + + "KW648 : 'KW' '648';\n" + + "KW649 : 'KW' '649';\n" + + "KW650 : 'KW' '650';\n" + + "KW651 : 'KW' '651';\n" + + "KW652 : 'KW' '652';\n" + + "KW653 : 'KW' '653';\n" + + "KW654 : 'KW' '654';\n" + + "KW655 : 'KW' '655';\n" + + "KW656 : 'KW' '656';\n" + + "KW657 : 'KW' '657';\n" + + "KW658 : 'KW' '658';\n" + + "KW659 : 'KW' '659';\n" + + "KW660 : 'KW' '660';\n" + + "KW661 : 'KW' '661';\n" + + "KW662 : 'KW' '662';\n" + + "KW663 : 'KW' '663';\n" + + "KW664 : 'KW' '664';\n" + + "KW665 : 'KW' '665';\n" + + "KW666 : 'KW' '666';\n" + + "KW667 : 'KW' '667';\n" + + "KW668 : 'KW' '668';\n" + + "KW669 : 'KW' '669';\n" + + "KW670 : 'KW' '670';\n" + + "KW671 : 'KW' '671';\n" + + "KW672 : 'KW' '672';\n" + + "KW673 : 'KW' '673';\n" + + "KW674 : 'KW' '674';\n" + + "KW675 : 'KW' '675';\n" + + "KW676 : 'KW' '676';\n" + + "KW677 : 'KW' '677';\n" + + "KW678 : 'KW' '678';\n" + + "KW679 : 'KW' '679';\n" + + "KW680 : 'KW' '680';\n" + + "KW681 : 'KW' '681';\n" + + "KW682 : 'KW' '682';\n" + + "KW683 : 'KW' '683';\n" + + "KW684 : 'KW' '684';\n" + + "KW685 : 'KW' '685';\n" + + "KW686 : 'KW' '686';\n" + + "KW687 : 'KW' '687';\n" + + "KW688 : 'KW' '688';\n" + + "KW689 : 'KW' '689';\n" + + "KW690 : 'KW' '690';\n" + + "KW691 : 'KW' '691';\n" + + "KW692 : 'KW' '692';\n" + + "KW693 : 'KW' '693';\n" + + "KW694 : 'KW' '694';\n" + + "KW695 : 'KW' '695';\n" + + "KW696 : 'KW' '696';\n" + + "KW697 : 'KW' '697';\n" + + "KW698 : 'KW' '698';\n" + + "KW699 : 'KW' '699';\n" + + "KW700 : 'KW' '700';\n" + + "KW701 : 'KW' '701';\n" + + "KW702 : 'KW' '702';\n" + + "KW703 : 'KW' '703';\n" + + "KW704 : 'KW' '704';\n" + + "KW705 : 'KW' '705';\n" + + "KW706 : 'KW' '706';\n" + + "KW707 : 'KW' '707';\n" + + "KW708 : 'KW' '708';\n" + + "KW709 : 'KW' '709';\n" + + "KW710 : 'KW' '710';\n" + + "KW711 : 'KW' '711';\n" + + "KW712 : 'KW' '712';\n" + + "KW713 : 'KW' '713';\n" + + "KW714 : 'KW' '714';\n" + + "KW715 : 'KW' '715';\n" + + "KW716 : 'KW' '716';\n" + + "KW717 : 'KW' '717';\n" + + "KW718 : 'KW' '718';\n" + + "KW719 : 'KW' '719';\n" + + "KW720 : 'KW' '720';\n" + + "KW721 : 'KW' '721';\n" + + "KW722 : 'KW' '722';\n" + + "KW723 : 'KW' '723';\n" + + "KW724 : 'KW' '724';\n" + + "KW725 : 'KW' '725';\n" + + "KW726 : 'KW' '726';\n" + + "KW727 : 'KW' '727';\n" + + "KW728 : 'KW' '728';\n" + + "KW729 : 'KW' '729';\n" + + "KW730 : 'KW' '730';\n" + + "KW731 : 'KW' '731';\n" + + "KW732 : 'KW' '732';\n" + + "KW733 : 'KW' '733';\n" + + "KW734 : 'KW' '734';\n" + + "KW735 : 'KW' '735';\n" + + "KW736 : 'KW' '736';\n" + + "KW737 : 'KW' '737';\n" + + "KW738 : 'KW' '738';\n" + + "KW739 : 'KW' '739';\n" + + "KW740 : 'KW' '740';\n" + + "KW741 : 'KW' '741';\n" + + "KW742 : 'KW' '742';\n" + + "KW743 : 'KW' '743';\n" + + "KW744 : 'KW' '744';\n" + + "KW745 : 'KW' '745';\n" + + "KW746 : 'KW' '746';\n" + + "KW747 : 'KW' '747';\n" + + "KW748 : 'KW' '748';\n" + + "KW749 : 'KW' '749';\n" + + "KW750 : 'KW' '750';\n" + + "KW751 : 'KW' '751';\n" + + "KW752 : 'KW' '752';\n" + + "KW753 : 'KW' '753';\n" + + "KW754 : 'KW' '754';\n" + + "KW755 : 'KW' '755';\n" + + "KW756 : 'KW' '756';\n" + + "KW757 : 'KW' '757';\n" + + "KW758 : 'KW' '758';\n" + + "KW759 : 'KW' '759';\n" + + "KW760 : 'KW' '760';\n" + + "KW761 : 'KW' '761';\n" + + "KW762 : 'KW' '762';\n" + + "KW763 : 'KW' '763';\n" + + "KW764 : 'KW' '764';\n" + + "KW765 : 'KW' '765';\n" + + "KW766 : 'KW' '766';\n" + + "KW767 : 'KW' '767';\n" + + "KW768 : 'KW' '768';\n" + + "KW769 : 'KW' '769';\n" + + "KW770 : 'KW' '770';\n" + + "KW771 : 'KW' '771';\n" + + "KW772 : 'KW' '772';\n" + + "KW773 : 'KW' '773';\n" + + "KW774 : 'KW' '774';\n" + + "KW775 : 'KW' '775';\n" + + "KW776 : 'KW' '776';\n" + + "KW777 : 'KW' '777';\n" + + "KW778 : 'KW' '778';\n" + + "KW779 : 'KW' '779';\n" + + "KW780 : 'KW' '780';\n" + + "KW781 : 'KW' '781';\n" + + "KW782 : 'KW' '782';\n" + + "KW783 : 'KW' '783';\n" + + "KW784 : 'KW' '784';\n" + + "KW785 : 'KW' '785';\n" + + "KW786 : 'KW' '786';\n" + + "KW787 : 'KW' '787';\n" + + "KW788 : 'KW' '788';\n" + + "KW789 : 'KW' '789';\n" + + "KW790 : 'KW' '790';\n" + + "KW791 : 'KW' '791';\n" + + "KW792 : 'KW' '792';\n" + + "KW793 : 'KW' '793';\n" + + "KW794 : 'KW' '794';\n" + + "KW795 : 'KW' '795';\n" + + "KW796 : 'KW' '796';\n" + + "KW797 : 'KW' '797';\n" + + "KW798 : 'KW' '798';\n" + + "KW799 : 'KW' '799';\n" + + "KW800 : 'KW' '800';\n" + + "KW801 : 'KW' '801';\n" + + "KW802 : 'KW' '802';\n" + + "KW803 : 'KW' '803';\n" + + "KW804 : 'KW' '804';\n" + + "KW805 : 'KW' '805';\n" + + "KW806 : 'KW' '806';\n" + + "KW807 : 'KW' '807';\n" + + "KW808 : 'KW' '808';\n" + + "KW809 : 'KW' '809';\n" + + "KW810 : 'KW' '810';\n" + + "KW811 : 'KW' '811';\n" + + "KW812 : 'KW' '812';\n" + + "KW813 : 'KW' '813';\n" + + "KW814 : 'KW' '814';\n" + + "KW815 : 'KW' '815';\n" + + "KW816 : 'KW' '816';\n" + + "KW817 : 'KW' '817';\n" + + "KW818 : 'KW' '818';\n" + + "KW819 : 'KW' '819';\n" + + "KW820 : 'KW' '820';\n" + + "KW821 : 'KW' '821';\n" + + "KW822 : 'KW' '822';\n" + + "KW823 : 'KW' '823';\n" + + "KW824 : 'KW' '824';\n" + + "KW825 : 'KW' '825';\n" + + "KW826 : 'KW' '826';\n" + + "KW827 : 'KW' '827';\n" + + "KW828 : 'KW' '828';\n" + + "KW829 : 'KW' '829';\n" + + "KW830 : 'KW' '830';\n" + + "KW831 : 'KW' '831';\n" + + "KW832 : 'KW' '832';\n" + + "KW833 : 'KW' '833';\n" + + "KW834 : 'KW' '834';\n" + + "KW835 : 'KW' '835';\n" + + "KW836 : 'KW' '836';\n" + + "KW837 : 'KW' '837';\n" + + "KW838 : 'KW' '838';\n" + + "KW839 : 'KW' '839';\n" + + "KW840 : 'KW' '840';\n" + + "KW841 : 'KW' '841';\n" + + "KW842 : 'KW' '842';\n" + + "KW843 : 'KW' '843';\n" + + "KW844 : 'KW' '844';\n" + + "KW845 : 'KW' '845';\n" + + "KW846 : 'KW' '846';\n" + + "KW847 : 'KW' '847';\n" + + "KW848 : 'KW' '848';\n" + + "KW849 : 'KW' '849';\n" + + "KW850 : 'KW' '850';\n" + + "KW851 : 'KW' '851';\n" + + "KW852 : 'KW' '852';\n" + + "KW853 : 'KW' '853';\n" + + "KW854 : 'KW' '854';\n" + + "KW855 : 'KW' '855';\n" + + "KW856 : 'KW' '856';\n" + + "KW857 : 'KW' '857';\n" + + "KW858 : 'KW' '858';\n" + + "KW859 : 'KW' '859';\n" + + "KW860 : 'KW' '860';\n" + + "KW861 : 'KW' '861';\n" + + "KW862 : 'KW' '862';\n" + + "KW863 : 'KW' '863';\n" + + "KW864 : 'KW' '864';\n" + + "KW865 : 'KW' '865';\n" + + "KW866 : 'KW' '866';\n" + + "KW867 : 'KW' '867';\n" + + "KW868 : 'KW' '868';\n" + + "KW869 : 'KW' '869';\n" + + "KW870 : 'KW' '870';\n" + + "KW871 : 'KW' '871';\n" + + "KW872 : 'KW' '872';\n" + + "KW873 : 'KW' '873';\n" + + "KW874 : 'KW' '874';\n" + + "KW875 : 'KW' '875';\n" + + "KW876 : 'KW' '876';\n" + + "KW877 : 'KW' '877';\n" + + "KW878 : 'KW' '878';\n" + + "KW879 : 'KW' '879';\n" + + "KW880 : 'KW' '880';\n" + + "KW881 : 'KW' '881';\n" + + "KW882 : 'KW' '882';\n" + + "KW883 : 'KW' '883';\n" + + "KW884 : 'KW' '884';\n" + + "KW885 : 'KW' '885';\n" + + "KW886 : 'KW' '886';\n" + + "KW887 : 'KW' '887';\n" + + "KW888 : 'KW' '888';\n" + + "KW889 : 'KW' '889';\n" + + "KW890 : 'KW' '890';\n" + + "KW891 : 'KW' '891';\n" + + "KW892 : 'KW' '892';\n" + + "KW893 : 'KW' '893';\n" + + "KW894 : 'KW' '894';\n" + + "KW895 : 'KW' '895';\n" + + "KW896 : 'KW' '896';\n" + + "KW897 : 'KW' '897';\n" + + "KW898 : 'KW' '898';\n" + + "KW899 : 'KW' '899';\n" + + "KW900 : 'KW' '900';\n" + + "KW901 : 'KW' '901';\n" + + "KW902 : 'KW' '902';\n" + + "KW903 : 'KW' '903';\n" + + "KW904 : 'KW' '904';\n" + + "KW905 : 'KW' '905';\n" + + "KW906 : 'KW' '906';\n" + + "KW907 : 'KW' '907';\n" + + "KW908 : 'KW' '908';\n" + + "KW909 : 'KW' '909';\n" + + "KW910 : 'KW' '910';\n" + + "KW911 : 'KW' '911';\n" + + "KW912 : 'KW' '912';\n" + + "KW913 : 'KW' '913';\n" + + "KW914 : 'KW' '914';\n" + + "KW915 : 'KW' '915';\n" + + "KW916 : 'KW' '916';\n" + + "KW917 : 'KW' '917';\n" + + "KW918 : 'KW' '918';\n" + + "KW919 : 'KW' '919';\n" + + "KW920 : 'KW' '920';\n" + + "KW921 : 'KW' '921';\n" + + "KW922 : 'KW' '922';\n" + + "KW923 : 'KW' '923';\n" + + "KW924 : 'KW' '924';\n" + + "KW925 : 'KW' '925';\n" + + "KW926 : 'KW' '926';\n" + + "KW927 : 'KW' '927';\n" + + "KW928 : 'KW' '928';\n" + + "KW929 : 'KW' '929';\n" + + "KW930 : 'KW' '930';\n" + + "KW931 : 'KW' '931';\n" + + "KW932 : 'KW' '932';\n" + + "KW933 : 'KW' '933';\n" + + "KW934 : 'KW' '934';\n" + + "KW935 : 'KW' '935';\n" + + "KW936 : 'KW' '936';\n" + + "KW937 : 'KW' '937';\n" + + "KW938 : 'KW' '938';\n" + + "KW939 : 'KW' '939';\n" + + "KW940 : 'KW' '940';\n" + + "KW941 : 'KW' '941';\n" + + "KW942 : 'KW' '942';\n" + + "KW943 : 'KW' '943';\n" + + "KW944 : 'KW' '944';\n" + + "KW945 : 'KW' '945';\n" + + "KW946 : 'KW' '946';\n" + + "KW947 : 'KW' '947';\n" + + "KW948 : 'KW' '948';\n" + + "KW949 : 'KW' '949';\n" + + "KW950 : 'KW' '950';\n" + + "KW951 : 'KW' '951';\n" + + "KW952 : 'KW' '952';\n" + + "KW953 : 'KW' '953';\n" + + "KW954 : 'KW' '954';\n" + + "KW955 : 'KW' '955';\n" + + "KW956 : 'KW' '956';\n" + + "KW957 : 'KW' '957';\n" + + "KW958 : 'KW' '958';\n" + + "KW959 : 'KW' '959';\n" + + "KW960 : 'KW' '960';\n" + + "KW961 : 'KW' '961';\n" + + "KW962 : 'KW' '962';\n" + + "KW963 : 'KW' '963';\n" + + "KW964 : 'KW' '964';\n" + + "KW965 : 'KW' '965';\n" + + "KW966 : 'KW' '966';\n" + + "KW967 : 'KW' '967';\n" + + "KW968 : 'KW' '968';\n" + + "KW969 : 'KW' '969';\n" + + "KW970 : 'KW' '970';\n" + + "KW971 : 'KW' '971';\n" + + "KW972 : 'KW' '972';\n" + + "KW973 : 'KW' '973';\n" + + "KW974 : 'KW' '974';\n" + + "KW975 : 'KW' '975';\n" + + "KW976 : 'KW' '976';\n" + + "KW977 : 'KW' '977';\n" + + "KW978 : 'KW' '978';\n" + + "KW979 : 'KW' '979';\n" + + "KW980 : 'KW' '980';\n" + + "KW981 : 'KW' '981';\n" + + "KW982 : 'KW' '982';\n" + + "KW983 : 'KW' '983';\n" + + "KW984 : 'KW' '984';\n" + + "KW985 : 'KW' '985';\n" + + "KW986 : 'KW' '986';\n" + + "KW987 : 'KW' '987';\n" + + "KW988 : 'KW' '988';\n" + + "KW989 : 'KW' '989';\n" + + "KW990 : 'KW' '990';\n" + + "KW991 : 'KW' '991';\n" + + "KW992 : 'KW' '992';\n" + + "KW993 : 'KW' '993';\n" + + "KW994 : 'KW' '994';\n" + + "KW995 : 'KW' '995';\n" + + "KW996 : 'KW' '996';\n" + + "KW997 : 'KW' '997';\n" + + "KW998 : 'KW' '998';\n" + + "KW999 : 'KW' '999';\n" + + "KW1000 : 'KW' '1000';\n" + + "KW1001 : 'KW' '1001';\n" + + "KW1002 : 'KW' '1002';\n" + + "KW1003 : 'KW' '1003';\n" + + "KW1004 : 'KW' '1004';\n" + + "KW1005 : 'KW' '1005';\n" + + "KW1006 : 'KW' '1006';\n" + + "KW1007 : 'KW' '1007';\n" + + "KW1008 : 'KW' '1008';\n" + + "KW1009 : 'KW' '1009';\n" + + "KW1010 : 'KW' '1010';\n" + + "KW1011 : 'KW' '1011';\n" + + "KW1012 : 'KW' '1012';\n" + + "KW1013 : 'KW' '1013';\n" + + "KW1014 : 'KW' '1014';\n" + + "KW1015 : 'KW' '1015';\n" + + "KW1016 : 'KW' '1016';\n" + + "KW1017 : 'KW' '1017';\n" + + "KW1018 : 'KW' '1018';\n" + + "KW1019 : 'KW' '1019';\n" + + "KW1020 : 'KW' '1020';\n" + + "KW1021 : 'KW' '1021';\n" + + "KW1022 : 'KW' '1022';\n" + + "KW1023 : 'KW' '1023';\n" + + "KW1024 : 'KW' '1024';\n" + + "KW1025 : 'KW' '1025';\n" + + "KW1026 : 'KW' '1026';\n" + + "KW1027 : 'KW' '1027';\n" + + "KW1028 : 'KW' '1028';\n" + + "KW1029 : 'KW' '1029';\n" + + "KW1030 : 'KW' '1030';\n" + + "KW1031 : 'KW' '1031';\n" + + "KW1032 : 'KW' '1032';\n" + + "KW1033 : 'KW' '1033';\n" + + "KW1034 : 'KW' '1034';\n" + + "KW1035 : 'KW' '1035';\n" + + "KW1036 : 'KW' '1036';\n" + + "KW1037 : 'KW' '1037';\n" + + "KW1038 : 'KW' '1038';\n" + + "KW1039 : 'KW' '1039';\n" + + "KW1040 : 'KW' '1040';\n" + + "KW1041 : 'KW' '1041';\n" + + "KW1042 : 'KW' '1042';\n" + + "KW1043 : 'KW' '1043';\n" + + "KW1044 : 'KW' '1044';\n" + + "KW1045 : 'KW' '1045';\n" + + "KW1046 : 'KW' '1046';\n" + + "KW1047 : 'KW' '1047';\n" + + "KW1048 : 'KW' '1048';\n" + + "KW1049 : 'KW' '1049';\n" + + "KW1050 : 'KW' '1050';\n" + + "KW1051 : 'KW' '1051';\n" + + "KW1052 : 'KW' '1052';\n" + + "KW1053 : 'KW' '1053';\n" + + "KW1054 : 'KW' '1054';\n" + + "KW1055 : 'KW' '1055';\n" + + "KW1056 : 'KW' '1056';\n" + + "KW1057 : 'KW' '1057';\n" + + "KW1058 : 'KW' '1058';\n" + + "KW1059 : 'KW' '1059';\n" + + "KW1060 : 'KW' '1060';\n" + + "KW1061 : 'KW' '1061';\n" + + "KW1062 : 'KW' '1062';\n" + + "KW1063 : 'KW' '1063';\n" + + "KW1064 : 'KW' '1064';\n" + + "KW1065 : 'KW' '1065';\n" + + "KW1066 : 'KW' '1066';\n" + + "KW1067 : 'KW' '1067';\n" + + "KW1068 : 'KW' '1068';\n" + + "KW1069 : 'KW' '1069';\n" + + "KW1070 : 'KW' '1070';\n" + + "KW1071 : 'KW' '1071';\n" + + "KW1072 : 'KW' '1072';\n" + + "KW1073 : 'KW' '1073';\n" + + "KW1074 : 'KW' '1074';\n" + + "KW1075 : 'KW' '1075';\n" + + "KW1076 : 'KW' '1076';\n" + + "KW1077 : 'KW' '1077';\n" + + "KW1078 : 'KW' '1078';\n" + + "KW1079 : 'KW' '1079';\n" + + "KW1080 : 'KW' '1080';\n" + + "KW1081 : 'KW' '1081';\n" + + "KW1082 : 'KW' '1082';\n" + + "KW1083 : 'KW' '1083';\n" + + "KW1084 : 'KW' '1084';\n" + + "KW1085 : 'KW' '1085';\n" + + "KW1086 : 'KW' '1086';\n" + + "KW1087 : 'KW' '1087';\n" + + "KW1088 : 'KW' '1088';\n" + + "KW1089 : 'KW' '1089';\n" + + "KW1090 : 'KW' '1090';\n" + + "KW1091 : 'KW' '1091';\n" + + "KW1092 : 'KW' '1092';\n" + + "KW1093 : 'KW' '1093';\n" + + "KW1094 : 'KW' '1094';\n" + + "KW1095 : 'KW' '1095';\n" + + "KW1096 : 'KW' '1096';\n" + + "KW1097 : 'KW' '1097';\n" + + "KW1098 : 'KW' '1098';\n" + + "KW1099 : 'KW' '1099';\n" + + "KW1100 : 'KW' '1100';\n" + + "KW1101 : 'KW' '1101';\n" + + "KW1102 : 'KW' '1102';\n" + + "KW1103 : 'KW' '1103';\n" + + "KW1104 : 'KW' '1104';\n" + + "KW1105 : 'KW' '1105';\n" + + "KW1106 : 'KW' '1106';\n" + + "KW1107 : 'KW' '1107';\n" + + "KW1108 : 'KW' '1108';\n" + + "KW1109 : 'KW' '1109';\n" + + "KW1110 : 'KW' '1110';\n" + + "KW1111 : 'KW' '1111';\n" + + "KW1112 : 'KW' '1112';\n" + + "KW1113 : 'KW' '1113';\n" + + "KW1114 : 'KW' '1114';\n" + + "KW1115 : 'KW' '1115';\n" + + "KW1116 : 'KW' '1116';\n" + + "KW1117 : 'KW' '1117';\n" + + "KW1118 : 'KW' '1118';\n" + + "KW1119 : 'KW' '1119';\n" + + "KW1120 : 'KW' '1120';\n" + + "KW1121 : 'KW' '1121';\n" + + "KW1122 : 'KW' '1122';\n" + + "KW1123 : 'KW' '1123';\n" + + "KW1124 : 'KW' '1124';\n" + + "KW1125 : 'KW' '1125';\n" + + "KW1126 : 'KW' '1126';\n" + + "KW1127 : 'KW' '1127';\n" + + "KW1128 : 'KW' '1128';\n" + + "KW1129 : 'KW' '1129';\n" + + "KW1130 : 'KW' '1130';\n" + + "KW1131 : 'KW' '1131';\n" + + "KW1132 : 'KW' '1132';\n" + + "KW1133 : 'KW' '1133';\n" + + "KW1134 : 'KW' '1134';\n" + + "KW1135 : 'KW' '1135';\n" + + "KW1136 : 'KW' '1136';\n" + + "KW1137 : 'KW' '1137';\n" + + "KW1138 : 'KW' '1138';\n" + + "KW1139 : 'KW' '1139';\n" + + "KW1140 : 'KW' '1140';\n" + + "KW1141 : 'KW' '1141';\n" + + "KW1142 : 'KW' '1142';\n" + + "KW1143 : 'KW' '1143';\n" + + "KW1144 : 'KW' '1144';\n" + + "KW1145 : 'KW' '1145';\n" + + "KW1146 : 'KW' '1146';\n" + + "KW1147 : 'KW' '1147';\n" + + "KW1148 : 'KW' '1148';\n" + + "KW1149 : 'KW' '1149';\n" + + "KW1150 : 'KW' '1150';\n" + + "KW1151 : 'KW' '1151';\n" + + "KW1152 : 'KW' '1152';\n" + + "KW1153 : 'KW' '1153';\n" + + "KW1154 : 'KW' '1154';\n" + + "KW1155 : 'KW' '1155';\n" + + "KW1156 : 'KW' '1156';\n" + + "KW1157 : 'KW' '1157';\n" + + "KW1158 : 'KW' '1158';\n" + + "KW1159 : 'KW' '1159';\n" + + "KW1160 : 'KW' '1160';\n" + + "KW1161 : 'KW' '1161';\n" + + "KW1162 : 'KW' '1162';\n" + + "KW1163 : 'KW' '1163';\n" + + "KW1164 : 'KW' '1164';\n" + + "KW1165 : 'KW' '1165';\n" + + "KW1166 : 'KW' '1166';\n" + + "KW1167 : 'KW' '1167';\n" + + "KW1168 : 'KW' '1168';\n" + + "KW1169 : 'KW' '1169';\n" + + "KW1170 : 'KW' '1170';\n" + + "KW1171 : 'KW' '1171';\n" + + "KW1172 : 'KW' '1172';\n" + + "KW1173 : 'KW' '1173';\n" + + "KW1174 : 'KW' '1174';\n" + + "KW1175 : 'KW' '1175';\n" + + "KW1176 : 'KW' '1176';\n" + + "KW1177 : 'KW' '1177';\n" + + "KW1178 : 'KW' '1178';\n" + + "KW1179 : 'KW' '1179';\n" + + "KW1180 : 'KW' '1180';\n" + + "KW1181 : 'KW' '1181';\n" + + "KW1182 : 'KW' '1182';\n" + + "KW1183 : 'KW' '1183';\n" + + "KW1184 : 'KW' '1184';\n" + + "KW1185 : 'KW' '1185';\n" + + "KW1186 : 'KW' '1186';\n" + + "KW1187 : 'KW' '1187';\n" + + "KW1188 : 'KW' '1188';\n" + + "KW1189 : 'KW' '1189';\n" + + "KW1190 : 'KW' '1190';\n" + + "KW1191 : 'KW' '1191';\n" + + "KW1192 : 'KW' '1192';\n" + + "KW1193 : 'KW' '1193';\n" + + "KW1194 : 'KW' '1194';\n" + + "KW1195 : 'KW' '1195';\n" + + "KW1196 : 'KW' '1196';\n" + + "KW1197 : 'KW' '1197';\n" + + "KW1198 : 'KW' '1198';\n" + + "KW1199 : 'KW' '1199';\n" + + "KW1200 : 'KW' '1200';\n" + + "KW1201 : 'KW' '1201';\n" + + "KW1202 : 'KW' '1202';\n" + + "KW1203 : 'KW' '1203';\n" + + "KW1204 : 'KW' '1204';\n" + + "KW1205 : 'KW' '1205';\n" + + "KW1206 : 'KW' '1206';\n" + + "KW1207 : 'KW' '1207';\n" + + "KW1208 : 'KW' '1208';\n" + + "KW1209 : 'KW' '1209';\n" + + "KW1210 : 'KW' '1210';\n" + + "KW1211 : 'KW' '1211';\n" + + "KW1212 : 'KW' '1212';\n" + + "KW1213 : 'KW' '1213';\n" + + "KW1214 : 'KW' '1214';\n" + + "KW1215 : 'KW' '1215';\n" + + "KW1216 : 'KW' '1216';\n" + + "KW1217 : 'KW' '1217';\n" + + "KW1218 : 'KW' '1218';\n" + + "KW1219 : 'KW' '1219';\n" + + "KW1220 : 'KW' '1220';\n" + + "KW1221 : 'KW' '1221';\n" + + "KW1222 : 'KW' '1222';\n" + + "KW1223 : 'KW' '1223';\n" + + "KW1224 : 'KW' '1224';\n" + + "KW1225 : 'KW' '1225';\n" + + "KW1226 : 'KW' '1226';\n" + + "KW1227 : 'KW' '1227';\n" + + "KW1228 : 'KW' '1228';\n" + + "KW1229 : 'KW' '1229';\n" + + "KW1230 : 'KW' '1230';\n" + + "KW1231 : 'KW' '1231';\n" + + "KW1232 : 'KW' '1232';\n" + + "KW1233 : 'KW' '1233';\n" + + "KW1234 : 'KW' '1234';\n" + + "KW1235 : 'KW' '1235';\n" + + "KW1236 : 'KW' '1236';\n" + + "KW1237 : 'KW' '1237';\n" + + "KW1238 : 'KW' '1238';\n" + + "KW1239 : 'KW' '1239';\n" + + "KW1240 : 'KW' '1240';\n" + + "KW1241 : 'KW' '1241';\n" + + "KW1242 : 'KW' '1242';\n" + + "KW1243 : 'KW' '1243';\n" + + "KW1244 : 'KW' '1244';\n" + + "KW1245 : 'KW' '1245';\n" + + "KW1246 : 'KW' '1246';\n" + + "KW1247 : 'KW' '1247';\n" + + "KW1248 : 'KW' '1248';\n" + + "KW1249 : 'KW' '1249';\n" + + "KW1250 : 'KW' '1250';\n" + + "KW1251 : 'KW' '1251';\n" + + "KW1252 : 'KW' '1252';\n" + + "KW1253 : 'KW' '1253';\n" + + "KW1254 : 'KW' '1254';\n" + + "KW1255 : 'KW' '1255';\n" + + "KW1256 : 'KW' '1256';\n" + + "KW1257 : 'KW' '1257';\n" + + "KW1258 : 'KW' '1258';\n" + + "KW1259 : 'KW' '1259';\n" + + "KW1260 : 'KW' '1260';\n" + + "KW1261 : 'KW' '1261';\n" + + "KW1262 : 'KW' '1262';\n" + + "KW1263 : 'KW' '1263';\n" + + "KW1264 : 'KW' '1264';\n" + + "KW1265 : 'KW' '1265';\n" + + "KW1266 : 'KW' '1266';\n" + + "KW1267 : 'KW' '1267';\n" + + "KW1268 : 'KW' '1268';\n" + + "KW1269 : 'KW' '1269';\n" + + "KW1270 : 'KW' '1270';\n" + + "KW1271 : 'KW' '1271';\n" + + "KW1272 : 'KW' '1272';\n" + + "KW1273 : 'KW' '1273';\n" + + "KW1274 : 'KW' '1274';\n" + + "KW1275 : 'KW' '1275';\n" + + "KW1276 : 'KW' '1276';\n" + + "KW1277 : 'KW' '1277';\n" + + "KW1278 : 'KW' '1278';\n" + + "KW1279 : 'KW' '1279';\n" + + "KW1280 : 'KW' '1280';\n" + + "KW1281 : 'KW' '1281';\n" + + "KW1282 : 'KW' '1282';\n" + + "KW1283 : 'KW' '1283';\n" + + "KW1284 : 'KW' '1284';\n" + + "KW1285 : 'KW' '1285';\n" + + "KW1286 : 'KW' '1286';\n" + + "KW1287 : 'KW' '1287';\n" + + "KW1288 : 'KW' '1288';\n" + + "KW1289 : 'KW' '1289';\n" + + "KW1290 : 'KW' '1290';\n" + + "KW1291 : 'KW' '1291';\n" + + "KW1292 : 'KW' '1292';\n" + + "KW1293 : 'KW' '1293';\n" + + "KW1294 : 'KW' '1294';\n" + + "KW1295 : 'KW' '1295';\n" + + "KW1296 : 'KW' '1296';\n" + + "KW1297 : 'KW' '1297';\n" + + "KW1298 : 'KW' '1298';\n" + + "KW1299 : 'KW' '1299';\n" + + "KW1300 : 'KW' '1300';\n" + + "KW1301 : 'KW' '1301';\n" + + "KW1302 : 'KW' '1302';\n" + + "KW1303 : 'KW' '1303';\n" + + "KW1304 : 'KW' '1304';\n" + + "KW1305 : 'KW' '1305';\n" + + "KW1306 : 'KW' '1306';\n" + + "KW1307 : 'KW' '1307';\n" + + "KW1308 : 'KW' '1308';\n" + + "KW1309 : 'KW' '1309';\n" + + "KW1310 : 'KW' '1310';\n" + + "KW1311 : 'KW' '1311';\n" + + "KW1312 : 'KW' '1312';\n" + + "KW1313 : 'KW' '1313';\n" + + "KW1314 : 'KW' '1314';\n" + + "KW1315 : 'KW' '1315';\n" + + "KW1316 : 'KW' '1316';\n" + + "KW1317 : 'KW' '1317';\n" + + "KW1318 : 'KW' '1318';\n" + + "KW1319 : 'KW' '1319';\n" + + "KW1320 : 'KW' '1320';\n" + + "KW1321 : 'KW' '1321';\n" + + "KW1322 : 'KW' '1322';\n" + + "KW1323 : 'KW' '1323';\n" + + "KW1324 : 'KW' '1324';\n" + + "KW1325 : 'KW' '1325';\n" + + "KW1326 : 'KW' '1326';\n" + + "KW1327 : 'KW' '1327';\n" + + "KW1328 : 'KW' '1328';\n" + + "KW1329 : 'KW' '1329';\n" + + "KW1330 : 'KW' '1330';\n" + + "KW1331 : 'KW' '1331';\n" + + "KW1332 : 'KW' '1332';\n" + + "KW1333 : 'KW' '1333';\n" + + "KW1334 : 'KW' '1334';\n" + + "KW1335 : 'KW' '1335';\n" + + "KW1336 : 'KW' '1336';\n" + + "KW1337 : 'KW' '1337';\n" + + "KW1338 : 'KW' '1338';\n" + + "KW1339 : 'KW' '1339';\n" + + "KW1340 : 'KW' '1340';\n" + + "KW1341 : 'KW' '1341';\n" + + "KW1342 : 'KW' '1342';\n" + + "KW1343 : 'KW' '1343';\n" + + "KW1344 : 'KW' '1344';\n" + + "KW1345 : 'KW' '1345';\n" + + "KW1346 : 'KW' '1346';\n" + + "KW1347 : 'KW' '1347';\n" + + "KW1348 : 'KW' '1348';\n" + + "KW1349 : 'KW' '1349';\n" + + "KW1350 : 'KW' '1350';\n" + + "KW1351 : 'KW' '1351';\n" + + "KW1352 : 'KW' '1352';\n" + + "KW1353 : 'KW' '1353';\n" + + "KW1354 : 'KW' '1354';\n" + + "KW1355 : 'KW' '1355';\n" + + "KW1356 : 'KW' '1356';\n" + + "KW1357 : 'KW' '1357';\n" + + "KW1358 : 'KW' '1358';\n" + + "KW1359 : 'KW' '1359';\n" + + "KW1360 : 'KW' '1360';\n" + + "KW1361 : 'KW' '1361';\n" + + "KW1362 : 'KW' '1362';\n" + + "KW1363 : 'KW' '1363';\n" + + "KW1364 : 'KW' '1364';\n" + + "KW1365 : 'KW' '1365';\n" + + "KW1366 : 'KW' '1366';\n" + + "KW1367 : 'KW' '1367';\n" + + "KW1368 : 'KW' '1368';\n" + + "KW1369 : 'KW' '1369';\n" + + "KW1370 : 'KW' '1370';\n" + + "KW1371 : 'KW' '1371';\n" + + "KW1372 : 'KW' '1372';\n" + + "KW1373 : 'KW' '1373';\n" + + "KW1374 : 'KW' '1374';\n" + + "KW1375 : 'KW' '1375';\n" + + "KW1376 : 'KW' '1376';\n" + + "KW1377 : 'KW' '1377';\n" + + "KW1378 : 'KW' '1378';\n" + + "KW1379 : 'KW' '1379';\n" + + "KW1380 : 'KW' '1380';\n" + + "KW1381 : 'KW' '1381';\n" + + "KW1382 : 'KW' '1382';\n" + + "KW1383 : 'KW' '1383';\n" + + "KW1384 : 'KW' '1384';\n" + + "KW1385 : 'KW' '1385';\n" + + "KW1386 : 'KW' '1386';\n" + + "KW1387 : 'KW' '1387';\n" + + "KW1388 : 'KW' '1388';\n" + + "KW1389 : 'KW' '1389';\n" + + "KW1390 : 'KW' '1390';\n" + + "KW1391 : 'KW' '1391';\n" + + "KW1392 : 'KW' '1392';\n" + + "KW1393 : 'KW' '1393';\n" + + "KW1394 : 'KW' '1394';\n" + + "KW1395 : 'KW' '1395';\n" + + "KW1396 : 'KW' '1396';\n" + + "KW1397 : 'KW' '1397';\n" + + "KW1398 : 'KW' '1398';\n" + + "KW1399 : 'KW' '1399';\n" + + "KW1400 : 'KW' '1400';\n" + + "KW1401 : 'KW' '1401';\n" + + "KW1402 : 'KW' '1402';\n" + + "KW1403 : 'KW' '1403';\n" + + "KW1404 : 'KW' '1404';\n" + + "KW1405 : 'KW' '1405';\n" + + "KW1406 : 'KW' '1406';\n" + + "KW1407 : 'KW' '1407';\n" + + "KW1408 : 'KW' '1408';\n" + + "KW1409 : 'KW' '1409';\n" + + "KW1410 : 'KW' '1410';\n" + + "KW1411 : 'KW' '1411';\n" + + "KW1412 : 'KW' '1412';\n" + + "KW1413 : 'KW' '1413';\n" + + "KW1414 : 'KW' '1414';\n" + + "KW1415 : 'KW' '1415';\n" + + "KW1416 : 'KW' '1416';\n" + + "KW1417 : 'KW' '1417';\n" + + "KW1418 : 'KW' '1418';\n" + + "KW1419 : 'KW' '1419';\n" + + "KW1420 : 'KW' '1420';\n" + + "KW1421 : 'KW' '1421';\n" + + "KW1422 : 'KW' '1422';\n" + + "KW1423 : 'KW' '1423';\n" + + "KW1424 : 'KW' '1424';\n" + + "KW1425 : 'KW' '1425';\n" + + "KW1426 : 'KW' '1426';\n" + + "KW1427 : 'KW' '1427';\n" + + "KW1428 : 'KW' '1428';\n" + + "KW1429 : 'KW' '1429';\n" + + "KW1430 : 'KW' '1430';\n" + + "KW1431 : 'KW' '1431';\n" + + "KW1432 : 'KW' '1432';\n" + + "KW1433 : 'KW' '1433';\n" + + "KW1434 : 'KW' '1434';\n" + + "KW1435 : 'KW' '1435';\n" + + "KW1436 : 'KW' '1436';\n" + + "KW1437 : 'KW' '1437';\n" + + "KW1438 : 'KW' '1438';\n" + + "KW1439 : 'KW' '1439';\n" + + "KW1440 : 'KW' '1440';\n" + + "KW1441 : 'KW' '1441';\n" + + "KW1442 : 'KW' '1442';\n" + + "KW1443 : 'KW' '1443';\n" + + "KW1444 : 'KW' '1444';\n" + + "KW1445 : 'KW' '1445';\n" + + "KW1446 : 'KW' '1446';\n" + + "KW1447 : 'KW' '1447';\n" + + "KW1448 : 'KW' '1448';\n" + + "KW1449 : 'KW' '1449';\n" + + "KW1450 : 'KW' '1450';\n" + + "KW1451 : 'KW' '1451';\n" + + "KW1452 : 'KW' '1452';\n" + + "KW1453 : 'KW' '1453';\n" + + "KW1454 : 'KW' '1454';\n" + + "KW1455 : 'KW' '1455';\n" + + "KW1456 : 'KW' '1456';\n" + + "KW1457 : 'KW' '1457';\n" + + "KW1458 : 'KW' '1458';\n" + + "KW1459 : 'KW' '1459';\n" + + "KW1460 : 'KW' '1460';\n" + + "KW1461 : 'KW' '1461';\n" + + "KW1462 : 'KW' '1462';\n" + + "KW1463 : 'KW' '1463';\n" + + "KW1464 : 'KW' '1464';\n" + + "KW1465 : 'KW' '1465';\n" + + "KW1466 : 'KW' '1466';\n" + + "KW1467 : 'KW' '1467';\n" + + "KW1468 : 'KW' '1468';\n" + + "KW1469 : 'KW' '1469';\n" + + "KW1470 : 'KW' '1470';\n" + + "KW1471 : 'KW' '1471';\n" + + "KW1472 : 'KW' '1472';\n" + + "KW1473 : 'KW' '1473';\n" + + "KW1474 : 'KW' '1474';\n" + + "KW1475 : 'KW' '1475';\n" + + "KW1476 : 'KW' '1476';\n" + + "KW1477 : 'KW' '1477';\n" + + "KW1478 : 'KW' '1478';\n" + + "KW1479 : 'KW' '1479';\n" + + "KW1480 : 'KW' '1480';\n" + + "KW1481 : 'KW' '1481';\n" + + "KW1482 : 'KW' '1482';\n" + + "KW1483 : 'KW' '1483';\n" + + "KW1484 : 'KW' '1484';\n" + + "KW1485 : 'KW' '1485';\n" + + "KW1486 : 'KW' '1486';\n" + + "KW1487 : 'KW' '1487';\n" + + "KW1488 : 'KW' '1488';\n" + + "KW1489 : 'KW' '1489';\n" + + "KW1490 : 'KW' '1490';\n" + + "KW1491 : 'KW' '1491';\n" + + "KW1492 : 'KW' '1492';\n" + + "KW1493 : 'KW' '1493';\n" + + "KW1494 : 'KW' '1494';\n" + + "KW1495 : 'KW' '1495';\n" + + "KW1496 : 'KW' '1496';\n" + + "KW1497 : 'KW' '1497';\n" + + "KW1498 : 'KW' '1498';\n" + + "KW1499 : 'KW' '1499';\n" + + "KW1500 : 'KW' '1500';\n" + + "KW1501 : 'KW' '1501';\n" + + "KW1502 : 'KW' '1502';\n" + + "KW1503 : 'KW' '1503';\n" + + "KW1504 : 'KW' '1504';\n" + + "KW1505 : 'KW' '1505';\n" + + "KW1506 : 'KW' '1506';\n" + + "KW1507 : 'KW' '1507';\n" + + "KW1508 : 'KW' '1508';\n" + + "KW1509 : 'KW' '1509';\n" + + "KW1510 : 'KW' '1510';\n" + + "KW1511 : 'KW' '1511';\n" + + "KW1512 : 'KW' '1512';\n" + + "KW1513 : 'KW' '1513';\n" + + "KW1514 : 'KW' '1514';\n" + + "KW1515 : 'KW' '1515';\n" + + "KW1516 : 'KW' '1516';\n" + + "KW1517 : 'KW' '1517';\n" + + "KW1518 : 'KW' '1518';\n" + + "KW1519 : 'KW' '1519';\n" + + "KW1520 : 'KW' '1520';\n" + + "KW1521 : 'KW' '1521';\n" + + "KW1522 : 'KW' '1522';\n" + + "KW1523 : 'KW' '1523';\n" + + "KW1524 : 'KW' '1524';\n" + + "KW1525 : 'KW' '1525';\n" + + "KW1526 : 'KW' '1526';\n" + + "KW1527 : 'KW' '1527';\n" + + "KW1528 : 'KW' '1528';\n" + + "KW1529 : 'KW' '1529';\n" + + "KW1530 : 'KW' '1530';\n" + + "KW1531 : 'KW' '1531';\n" + + "KW1532 : 'KW' '1532';\n" + + "KW1533 : 'KW' '1533';\n" + + "KW1534 : 'KW' '1534';\n" + + "KW1535 : 'KW' '1535';\n" + + "KW1536 : 'KW' '1536';\n" + + "KW1537 : 'KW' '1537';\n" + + "KW1538 : 'KW' '1538';\n" + + "KW1539 : 'KW' '1539';\n" + + "KW1540 : 'KW' '1540';\n" + + "KW1541 : 'KW' '1541';\n" + + "KW1542 : 'KW' '1542';\n" + + "KW1543 : 'KW' '1543';\n" + + "KW1544 : 'KW' '1544';\n" + + "KW1545 : 'KW' '1545';\n" + + "KW1546 : 'KW' '1546';\n" + + "KW1547 : 'KW' '1547';\n" + + "KW1548 : 'KW' '1548';\n" + + "KW1549 : 'KW' '1549';\n" + + "KW1550 : 'KW' '1550';\n" + + "KW1551 : 'KW' '1551';\n" + + "KW1552 : 'KW' '1552';\n" + + "KW1553 : 'KW' '1553';\n" + + "KW1554 : 'KW' '1554';\n" + + "KW1555 : 'KW' '1555';\n" + + "KW1556 : 'KW' '1556';\n" + + "KW1557 : 'KW' '1557';\n" + + "KW1558 : 'KW' '1558';\n" + + "KW1559 : 'KW' '1559';\n" + + "KW1560 : 'KW' '1560';\n" + + "KW1561 : 'KW' '1561';\n" + + "KW1562 : 'KW' '1562';\n" + + "KW1563 : 'KW' '1563';\n" + + "KW1564 : 'KW' '1564';\n" + + "KW1565 : 'KW' '1565';\n" + + "KW1566 : 'KW' '1566';\n" + + "KW1567 : 'KW' '1567';\n" + + "KW1568 : 'KW' '1568';\n" + + "KW1569 : 'KW' '1569';\n" + + "KW1570 : 'KW' '1570';\n" + + "KW1571 : 'KW' '1571';\n" + + "KW1572 : 'KW' '1572';\n" + + "KW1573 : 'KW' '1573';\n" + + "KW1574 : 'KW' '1574';\n" + + "KW1575 : 'KW' '1575';\n" + + "KW1576 : 'KW' '1576';\n" + + "KW1577 : 'KW' '1577';\n" + + "KW1578 : 'KW' '1578';\n" + + "KW1579 : 'KW' '1579';\n" + + "KW1580 : 'KW' '1580';\n" + + "KW1581 : 'KW' '1581';\n" + + "KW1582 : 'KW' '1582';\n" + + "KW1583 : 'KW' '1583';\n" + + "KW1584 : 'KW' '1584';\n" + + "KW1585 : 'KW' '1585';\n" + + "KW1586 : 'KW' '1586';\n" + + "KW1587 : 'KW' '1587';\n" + + "KW1588 : 'KW' '1588';\n" + + "KW1589 : 'KW' '1589';\n" + + "KW1590 : 'KW' '1590';\n" + + "KW1591 : 'KW' '1591';\n" + + "KW1592 : 'KW' '1592';\n" + + "KW1593 : 'KW' '1593';\n" + + "KW1594 : 'KW' '1594';\n" + + "KW1595 : 'KW' '1595';\n" + + "KW1596 : 'KW' '1596';\n" + + "KW1597 : 'KW' '1597';\n" + + "KW1598 : 'KW' '1598';\n" + + "KW1599 : 'KW' '1599';\n" + + "KW1600 : 'KW' '1600';\n" + + "KW1601 : 'KW' '1601';\n" + + "KW1602 : 'KW' '1602';\n" + + "KW1603 : 'KW' '1603';\n" + + "KW1604 : 'KW' '1604';\n" + + "KW1605 : 'KW' '1605';\n" + + "KW1606 : 'KW' '1606';\n" + + "KW1607 : 'KW' '1607';\n" + + "KW1608 : 'KW' '1608';\n" + + "KW1609 : 'KW' '1609';\n" + + "KW1610 : 'KW' '1610';\n" + + "KW1611 : 'KW' '1611';\n" + + "KW1612 : 'KW' '1612';\n" + + "KW1613 : 'KW' '1613';\n" + + "KW1614 : 'KW' '1614';\n" + + "KW1615 : 'KW' '1615';\n" + + "KW1616 : 'KW' '1616';\n" + + "KW1617 : 'KW' '1617';\n" + + "KW1618 : 'KW' '1618';\n" + + "KW1619 : 'KW' '1619';\n" + + "KW1620 : 'KW' '1620';\n" + + "KW1621 : 'KW' '1621';\n" + + "KW1622 : 'KW' '1622';\n" + + "KW1623 : 'KW' '1623';\n" + + "KW1624 : 'KW' '1624';\n" + + "KW1625 : 'KW' '1625';\n" + + "KW1626 : 'KW' '1626';\n" + + "KW1627 : 'KW' '1627';\n" + + "KW1628 : 'KW' '1628';\n" + + "KW1629 : 'KW' '1629';\n" + + "KW1630 : 'KW' '1630';\n" + + "KW1631 : 'KW' '1631';\n" + + "KW1632 : 'KW' '1632';\n" + + "KW1633 : 'KW' '1633';\n" + + "KW1634 : 'KW' '1634';\n" + + "KW1635 : 'KW' '1635';\n" + + "KW1636 : 'KW' '1636';\n" + + "KW1637 : 'KW' '1637';\n" + + "KW1638 : 'KW' '1638';\n" + + "KW1639 : 'KW' '1639';\n" + + "KW1640 : 'KW' '1640';\n" + + "KW1641 : 'KW' '1641';\n" + + "KW1642 : 'KW' '1642';\n" + + "KW1643 : 'KW' '1643';\n" + + "KW1644 : 'KW' '1644';\n" + + "KW1645 : 'KW' '1645';\n" + + "KW1646 : 'KW' '1646';\n" + + "KW1647 : 'KW' '1647';\n" + + "KW1648 : 'KW' '1648';\n" + + "KW1649 : 'KW' '1649';\n" + + "KW1650 : 'KW' '1650';\n" + + "KW1651 : 'KW' '1651';\n" + + "KW1652 : 'KW' '1652';\n" + + "KW1653 : 'KW' '1653';\n" + + "KW1654 : 'KW' '1654';\n" + + "KW1655 : 'KW' '1655';\n" + + "KW1656 : 'KW' '1656';\n" + + "KW1657 : 'KW' '1657';\n" + + "KW1658 : 'KW' '1658';\n" + + "KW1659 : 'KW' '1659';\n" + + "KW1660 : 'KW' '1660';\n" + + "KW1661 : 'KW' '1661';\n" + + "KW1662 : 'KW' '1662';\n" + + "KW1663 : 'KW' '1663';\n" + + "KW1664 : 'KW' '1664';\n" + + "KW1665 : 'KW' '1665';\n" + + "KW1666 : 'KW' '1666';\n" + + "KW1667 : 'KW' '1667';\n" + + "KW1668 : 'KW' '1668';\n" + + "KW1669 : 'KW' '1669';\n" + + "KW1670 : 'KW' '1670';\n" + + "KW1671 : 'KW' '1671';\n" + + "KW1672 : 'KW' '1672';\n" + + "KW1673 : 'KW' '1673';\n" + + "KW1674 : 'KW' '1674';\n" + + "KW1675 : 'KW' '1675';\n" + + "KW1676 : 'KW' '1676';\n" + + "KW1677 : 'KW' '1677';\n" + + "KW1678 : 'KW' '1678';\n" + + "KW1679 : 'KW' '1679';\n" + + "KW1680 : 'KW' '1680';\n" + + "KW1681 : 'KW' '1681';\n" + + "KW1682 : 'KW' '1682';\n" + + "KW1683 : 'KW' '1683';\n" + + "KW1684 : 'KW' '1684';\n" + + "KW1685 : 'KW' '1685';\n" + + "KW1686 : 'KW' '1686';\n" + + "KW1687 : 'KW' '1687';\n" + + "KW1688 : 'KW' '1688';\n" + + "KW1689 : 'KW' '1689';\n" + + "KW1690 : 'KW' '1690';\n" + + "KW1691 : 'KW' '1691';\n" + + "KW1692 : 'KW' '1692';\n" + + "KW1693 : 'KW' '1693';\n" + + "KW1694 : 'KW' '1694';\n" + + "KW1695 : 'KW' '1695';\n" + + "KW1696 : 'KW' '1696';\n" + + "KW1697 : 'KW' '1697';\n" + + "KW1698 : 'KW' '1698';\n" + + "KW1699 : 'KW' '1699';\n" + + "KW1700 : 'KW' '1700';\n" + + "KW1701 : 'KW' '1701';\n" + + "KW1702 : 'KW' '1702';\n" + + "KW1703 : 'KW' '1703';\n" + + "KW1704 : 'KW' '1704';\n" + + "KW1705 : 'KW' '1705';\n" + + "KW1706 : 'KW' '1706';\n" + + "KW1707 : 'KW' '1707';\n" + + "KW1708 : 'KW' '1708';\n" + + "KW1709 : 'KW' '1709';\n" + + "KW1710 : 'KW' '1710';\n" + + "KW1711 : 'KW' '1711';\n" + + "KW1712 : 'KW' '1712';\n" + + "KW1713 : 'KW' '1713';\n" + + "KW1714 : 'KW' '1714';\n" + + "KW1715 : 'KW' '1715';\n" + + "KW1716 : 'KW' '1716';\n" + + "KW1717 : 'KW' '1717';\n" + + "KW1718 : 'KW' '1718';\n" + + "KW1719 : 'KW' '1719';\n" + + "KW1720 : 'KW' '1720';\n" + + "KW1721 : 'KW' '1721';\n" + + "KW1722 : 'KW' '1722';\n" + + "KW1723 : 'KW' '1723';\n" + + "KW1724 : 'KW' '1724';\n" + + "KW1725 : 'KW' '1725';\n" + + "KW1726 : 'KW' '1726';\n" + + "KW1727 : 'KW' '1727';\n" + + "KW1728 : 'KW' '1728';\n" + + "KW1729 : 'KW' '1729';\n" + + "KW1730 : 'KW' '1730';\n" + + "KW1731 : 'KW' '1731';\n" + + "KW1732 : 'KW' '1732';\n" + + "KW1733 : 'KW' '1733';\n" + + "KW1734 : 'KW' '1734';\n" + + "KW1735 : 'KW' '1735';\n" + + "KW1736 : 'KW' '1736';\n" + + "KW1737 : 'KW' '1737';\n" + + "KW1738 : 'KW' '1738';\n" + + "KW1739 : 'KW' '1739';\n" + + "KW1740 : 'KW' '1740';\n" + + "KW1741 : 'KW' '1741';\n" + + "KW1742 : 'KW' '1742';\n" + + "KW1743 : 'KW' '1743';\n" + + "KW1744 : 'KW' '1744';\n" + + "KW1745 : 'KW' '1745';\n" + + "KW1746 : 'KW' '1746';\n" + + "KW1747 : 'KW' '1747';\n" + + "KW1748 : 'KW' '1748';\n" + + "KW1749 : 'KW' '1749';\n" + + "KW1750 : 'KW' '1750';\n" + + "KW1751 : 'KW' '1751';\n" + + "KW1752 : 'KW' '1752';\n" + + "KW1753 : 'KW' '1753';\n" + + "KW1754 : 'KW' '1754';\n" + + "KW1755 : 'KW' '1755';\n" + + "KW1756 : 'KW' '1756';\n" + + "KW1757 : 'KW' '1757';\n" + + "KW1758 : 'KW' '1758';\n" + + "KW1759 : 'KW' '1759';\n" + + "KW1760 : 'KW' '1760';\n" + + "KW1761 : 'KW' '1761';\n" + + "KW1762 : 'KW' '1762';\n" + + "KW1763 : 'KW' '1763';\n" + + "KW1764 : 'KW' '1764';\n" + + "KW1765 : 'KW' '1765';\n" + + "KW1766 : 'KW' '1766';\n" + + "KW1767 : 'KW' '1767';\n" + + "KW1768 : 'KW' '1768';\n" + + "KW1769 : 'KW' '1769';\n" + + "KW1770 : 'KW' '1770';\n" + + "KW1771 : 'KW' '1771';\n" + + "KW1772 : 'KW' '1772';\n" + + "KW1773 : 'KW' '1773';\n" + + "KW1774 : 'KW' '1774';\n" + + "KW1775 : 'KW' '1775';\n" + + "KW1776 : 'KW' '1776';\n" + + "KW1777 : 'KW' '1777';\n" + + "KW1778 : 'KW' '1778';\n" + + "KW1779 : 'KW' '1779';\n" + + "KW1780 : 'KW' '1780';\n" + + "KW1781 : 'KW' '1781';\n" + + "KW1782 : 'KW' '1782';\n" + + "KW1783 : 'KW' '1783';\n" + + "KW1784 : 'KW' '1784';\n" + + "KW1785 : 'KW' '1785';\n" + + "KW1786 : 'KW' '1786';\n" + + "KW1787 : 'KW' '1787';\n" + + "KW1788 : 'KW' '1788';\n" + + "KW1789 : 'KW' '1789';\n" + + "KW1790 : 'KW' '1790';\n" + + "KW1791 : 'KW' '1791';\n" + + "KW1792 : 'KW' '1792';\n" + + "KW1793 : 'KW' '1793';\n" + + "KW1794 : 'KW' '1794';\n" + + "KW1795 : 'KW' '1795';\n" + + "KW1796 : 'KW' '1796';\n" + + "KW1797 : 'KW' '1797';\n" + + "KW1798 : 'KW' '1798';\n" + + "KW1799 : 'KW' '1799';\n" + + "KW1800 : 'KW' '1800';\n" + + "KW1801 : 'KW' '1801';\n" + + "KW1802 : 'KW' '1802';\n" + + "KW1803 : 'KW' '1803';\n" + + "KW1804 : 'KW' '1804';\n" + + "KW1805 : 'KW' '1805';\n" + + "KW1806 : 'KW' '1806';\n" + + "KW1807 : 'KW' '1807';\n" + + "KW1808 : 'KW' '1808';\n" + + "KW1809 : 'KW' '1809';\n" + + "KW1810 : 'KW' '1810';\n" + + "KW1811 : 'KW' '1811';\n" + + "KW1812 : 'KW' '1812';\n" + + "KW1813 : 'KW' '1813';\n" + + "KW1814 : 'KW' '1814';\n" + + "KW1815 : 'KW' '1815';\n" + + "KW1816 : 'KW' '1816';\n" + + "KW1817 : 'KW' '1817';\n" + + "KW1818 : 'KW' '1818';\n" + + "KW1819 : 'KW' '1819';\n" + + "KW1820 : 'KW' '1820';\n" + + "KW1821 : 'KW' '1821';\n" + + "KW1822 : 'KW' '1822';\n" + + "KW1823 : 'KW' '1823';\n" + + "KW1824 : 'KW' '1824';\n" + + "KW1825 : 'KW' '1825';\n" + + "KW1826 : 'KW' '1826';\n" + + "KW1827 : 'KW' '1827';\n" + + "KW1828 : 'KW' '1828';\n" + + "KW1829 : 'KW' '1829';\n" + + "KW1830 : 'KW' '1830';\n" + + "KW1831 : 'KW' '1831';\n" + + "KW1832 : 'KW' '1832';\n" + + "KW1833 : 'KW' '1833';\n" + + "KW1834 : 'KW' '1834';\n" + + "KW1835 : 'KW' '1835';\n" + + "KW1836 : 'KW' '1836';\n" + + "KW1837 : 'KW' '1837';\n" + + "KW1838 : 'KW' '1838';\n" + + "KW1839 : 'KW' '1839';\n" + + "KW1840 : 'KW' '1840';\n" + + "KW1841 : 'KW' '1841';\n" + + "KW1842 : 'KW' '1842';\n" + + "KW1843 : 'KW' '1843';\n" + + "KW1844 : 'KW' '1844';\n" + + "KW1845 : 'KW' '1845';\n" + + "KW1846 : 'KW' '1846';\n" + + "KW1847 : 'KW' '1847';\n" + + "KW1848 : 'KW' '1848';\n" + + "KW1849 : 'KW' '1849';\n" + + "KW1850 : 'KW' '1850';\n" + + "KW1851 : 'KW' '1851';\n" + + "KW1852 : 'KW' '1852';\n" + + "KW1853 : 'KW' '1853';\n" + + "KW1854 : 'KW' '1854';\n" + + "KW1855 : 'KW' '1855';\n" + + "KW1856 : 'KW' '1856';\n" + + "KW1857 : 'KW' '1857';\n" + + "KW1858 : 'KW' '1858';\n" + + "KW1859 : 'KW' '1859';\n" + + "KW1860 : 'KW' '1860';\n" + + "KW1861 : 'KW' '1861';\n" + + "KW1862 : 'KW' '1862';\n" + + "KW1863 : 'KW' '1863';\n" + + "KW1864 : 'KW' '1864';\n" + + "KW1865 : 'KW' '1865';\n" + + "KW1866 : 'KW' '1866';\n" + + "KW1867 : 'KW' '1867';\n" + + "KW1868 : 'KW' '1868';\n" + + "KW1869 : 'KW' '1869';\n" + + "KW1870 : 'KW' '1870';\n" + + "KW1871 : 'KW' '1871';\n" + + "KW1872 : 'KW' '1872';\n" + + "KW1873 : 'KW' '1873';\n" + + "KW1874 : 'KW' '1874';\n" + + "KW1875 : 'KW' '1875';\n" + + "KW1876 : 'KW' '1876';\n" + + "KW1877 : 'KW' '1877';\n" + + "KW1878 : 'KW' '1878';\n" + + "KW1879 : 'KW' '1879';\n" + + "KW1880 : 'KW' '1880';\n" + + "KW1881 : 'KW' '1881';\n" + + "KW1882 : 'KW' '1882';\n" + + "KW1883 : 'KW' '1883';\n" + + "KW1884 : 'KW' '1884';\n" + + "KW1885 : 'KW' '1885';\n" + + "KW1886 : 'KW' '1886';\n" + + "KW1887 : 'KW' '1887';\n" + + "KW1888 : 'KW' '1888';\n" + + "KW1889 : 'KW' '1889';\n" + + "KW1890 : 'KW' '1890';\n" + + "KW1891 : 'KW' '1891';\n" + + "KW1892 : 'KW' '1892';\n" + + "KW1893 : 'KW' '1893';\n" + + "KW1894 : 'KW' '1894';\n" + + "KW1895 : 'KW' '1895';\n" + + "KW1896 : 'KW' '1896';\n" + + "KW1897 : 'KW' '1897';\n" + + "KW1898 : 'KW' '1898';\n" + + "KW1899 : 'KW' '1899';\n" + + "KW1900 : 'KW' '1900';\n" + + "KW1901 : 'KW' '1901';\n" + + "KW1902 : 'KW' '1902';\n" + + "KW1903 : 'KW' '1903';\n" + + "KW1904 : 'KW' '1904';\n" + + "KW1905 : 'KW' '1905';\n" + + "KW1906 : 'KW' '1906';\n" + + "KW1907 : 'KW' '1907';\n" + + "KW1908 : 'KW' '1908';\n" + + "KW1909 : 'KW' '1909';\n" + + "KW1910 : 'KW' '1910';\n" + + "KW1911 : 'KW' '1911';\n" + + "KW1912 : 'KW' '1912';\n" + + "KW1913 : 'KW' '1913';\n" + + "KW1914 : 'KW' '1914';\n" + + "KW1915 : 'KW' '1915';\n" + + "KW1916 : 'KW' '1916';\n" + + "KW1917 : 'KW' '1917';\n" + + "KW1918 : 'KW' '1918';\n" + + "KW1919 : 'KW' '1919';\n" + + "KW1920 : 'KW' '1920';\n" + + "KW1921 : 'KW' '1921';\n" + + "KW1922 : 'KW' '1922';\n" + + "KW1923 : 'KW' '1923';\n" + + "KW1924 : 'KW' '1924';\n" + + "KW1925 : 'KW' '1925';\n" + + "KW1926 : 'KW' '1926';\n" + + "KW1927 : 'KW' '1927';\n" + + "KW1928 : 'KW' '1928';\n" + + "KW1929 : 'KW' '1929';\n" + + "KW1930 : 'KW' '1930';\n" + + "KW1931 : 'KW' '1931';\n" + + "KW1932 : 'KW' '1932';\n" + + "KW1933 : 'KW' '1933';\n" + + "KW1934 : 'KW' '1934';\n" + + "KW1935 : 'KW' '1935';\n" + + "KW1936 : 'KW' '1936';\n" + + "KW1937 : 'KW' '1937';\n" + + "KW1938 : 'KW' '1938';\n" + + "KW1939 : 'KW' '1939';\n" + + "KW1940 : 'KW' '1940';\n" + + "KW1941 : 'KW' '1941';\n" + + "KW1942 : 'KW' '1942';\n" + + "KW1943 : 'KW' '1943';\n" + + "KW1944 : 'KW' '1944';\n" + + "KW1945 : 'KW' '1945';\n" + + "KW1946 : 'KW' '1946';\n" + + "KW1947 : 'KW' '1947';\n" + + "KW1948 : 'KW' '1948';\n" + + "KW1949 : 'KW' '1949';\n" + + "KW1950 : 'KW' '1950';\n" + + "KW1951 : 'KW' '1951';\n" + + "KW1952 : 'KW' '1952';\n" + + "KW1953 : 'KW' '1953';\n" + + "KW1954 : 'KW' '1954';\n" + + "KW1955 : 'KW' '1955';\n" + + "KW1956 : 'KW' '1956';\n" + + "KW1957 : 'KW' '1957';\n" + + "KW1958 : 'KW' '1958';\n" + + "KW1959 : 'KW' '1959';\n" + + "KW1960 : 'KW' '1960';\n" + + "KW1961 : 'KW' '1961';\n" + + "KW1962 : 'KW' '1962';\n" + + "KW1963 : 'KW' '1963';\n" + + "KW1964 : 'KW' '1964';\n" + + "KW1965 : 'KW' '1965';\n" + + "KW1966 : 'KW' '1966';\n" + + "KW1967 : 'KW' '1967';\n" + + "KW1968 : 'KW' '1968';\n" + + "KW1969 : 'KW' '1969';\n" + + "KW1970 : 'KW' '1970';\n" + + "KW1971 : 'KW' '1971';\n" + + "KW1972 : 'KW' '1972';\n" + + "KW1973 : 'KW' '1973';\n" + + "KW1974 : 'KW' '1974';\n" + + "KW1975 : 'KW' '1975';\n" + + "KW1976 : 'KW' '1976';\n" + + "KW1977 : 'KW' '1977';\n" + + "KW1978 : 'KW' '1978';\n" + + "KW1979 : 'KW' '1979';\n" + + "KW1980 : 'KW' '1980';\n" + + "KW1981 : 'KW' '1981';\n" + + "KW1982 : 'KW' '1982';\n" + + "KW1983 : 'KW' '1983';\n" + + "KW1984 : 'KW' '1984';\n" + + "KW1985 : 'KW' '1985';\n" + + "KW1986 : 'KW' '1986';\n" + + "KW1987 : 'KW' '1987';\n" + + "KW1988 : 'KW' '1988';\n" + + "KW1989 : 'KW' '1989';\n" + + "KW1990 : 'KW' '1990';\n" + + "KW1991 : 'KW' '1991';\n" + + "KW1992 : 'KW' '1992';\n" + + "KW1993 : 'KW' '1993';\n" + + "KW1994 : 'KW' '1994';\n" + + "KW1995 : 'KW' '1995';\n" + + "KW1996 : 'KW' '1996';\n" + + "KW1997 : 'KW' '1997';\n" + + "KW1998 : 'KW' '1998';\n" + + "KW1999 : 'KW' '1999';\n" + + "KW2000 : 'KW' '2000';\n" + + "KW2001 : 'KW' '2001';\n" + + "KW2002 : 'KW' '2002';\n" + + "KW2003 : 'KW' '2003';\n" + + "KW2004 : 'KW' '2004';\n" + + "KW2005 : 'KW' '2005';\n" + + "KW2006 : 'KW' '2006';\n" + + "KW2007 : 'KW' '2007';\n" + + "KW2008 : 'KW' '2008';\n" + + "KW2009 : 'KW' '2009';\n" + + "KW2010 : 'KW' '2010';\n" + + "KW2011 : 'KW' '2011';\n" + + "KW2012 : 'KW' '2012';\n" + + "KW2013 : 'KW' '2013';\n" + + "KW2014 : 'KW' '2014';\n" + + "KW2015 : 'KW' '2015';\n" + + "KW2016 : 'KW' '2016';\n" + + "KW2017 : 'KW' '2017';\n" + + "KW2018 : 'KW' '2018';\n" + + "KW2019 : 'KW' '2019';\n" + + "KW2020 : 'KW' '2020';\n" + + "KW2021 : 'KW' '2021';\n" + + "KW2022 : 'KW' '2022';\n" + + "KW2023 : 'KW' '2023';\n" + + "KW2024 : 'KW' '2024';\n" + + "KW2025 : 'KW' '2025';\n" + + "KW2026 : 'KW' '2026';\n" + + "KW2027 : 'KW' '2027';\n" + + "KW2028 : 'KW' '2028';\n" + + "KW2029 : 'KW' '2029';\n" + + "KW2030 : 'KW' '2030';\n" + + "KW2031 : 'KW' '2031';\n" + + "KW2032 : 'KW' '2032';\n" + + "KW2033 : 'KW' '2033';\n" + + "KW2034 : 'KW' '2034';\n" + + "KW2035 : 'KW' '2035';\n" + + "KW2036 : 'KW' '2036';\n" + + "KW2037 : 'KW' '2037';\n" + + "KW2038 : 'KW' '2038';\n" + + "KW2039 : 'KW' '2039';\n" + + "KW2040 : 'KW' '2040';\n" + + "KW2041 : 'KW' '2041';\n" + + "KW2042 : 'KW' '2042';\n" + + "KW2043 : 'KW' '2043';\n" + + "KW2044 : 'KW' '2044';\n" + + "KW2045 : 'KW' '2045';\n" + + "KW2046 : 'KW' '2046';\n" + + "KW2047 : 'KW' '2047';\n" + + "KW2048 : 'KW' '2048';\n" + + "KW2049 : 'KW' '2049';\n" + + "KW2050 : 'KW' '2050';\n" + + "KW2051 : 'KW' '2051';\n" + + "KW2052 : 'KW' '2052';\n" + + "KW2053 : 'KW' '2053';\n" + + "KW2054 : 'KW' '2054';\n" + + "KW2055 : 'KW' '2055';\n" + + "KW2056 : 'KW' '2056';\n" + + "KW2057 : 'KW' '2057';\n" + + "KW2058 : 'KW' '2058';\n" + + "KW2059 : 'KW' '2059';\n" + + "KW2060 : 'KW' '2060';\n" + + "KW2061 : 'KW' '2061';\n" + + "KW2062 : 'KW' '2062';\n" + + "KW2063 : 'KW' '2063';\n" + + "KW2064 : 'KW' '2064';\n" + + "KW2065 : 'KW' '2065';\n" + + "KW2066 : 'KW' '2066';\n" + + "KW2067 : 'KW' '2067';\n" + + "KW2068 : 'KW' '2068';\n" + + "KW2069 : 'KW' '2069';\n" + + "KW2070 : 'KW' '2070';\n" + + "KW2071 : 'KW' '2071';\n" + + "KW2072 : 'KW' '2072';\n" + + "KW2073 : 'KW' '2073';\n" + + "KW2074 : 'KW' '2074';\n" + + "KW2075 : 'KW' '2075';\n" + + "KW2076 : 'KW' '2076';\n" + + "KW2077 : 'KW' '2077';\n" + + "KW2078 : 'KW' '2078';\n" + + "KW2079 : 'KW' '2079';\n" + + "KW2080 : 'KW' '2080';\n" + + "KW2081 : 'KW' '2081';\n" + + "KW2082 : 'KW' '2082';\n" + + "KW2083 : 'KW' '2083';\n" + + "KW2084 : 'KW' '2084';\n" + + "KW2085 : 'KW' '2085';\n" + + "KW2086 : 'KW' '2086';\n" + + "KW2087 : 'KW' '2087';\n" + + "KW2088 : 'KW' '2088';\n" + + "KW2089 : 'KW' '2089';\n" + + "KW2090 : 'KW' '2090';\n" + + "KW2091 : 'KW' '2091';\n" + + "KW2092 : 'KW' '2092';\n" + + "KW2093 : 'KW' '2093';\n" + + "KW2094 : 'KW' '2094';\n" + + "KW2095 : 'KW' '2095';\n" + + "KW2096 : 'KW' '2096';\n" + + "KW2097 : 'KW' '2097';\n" + + "KW2098 : 'KW' '2098';\n" + + "KW2099 : 'KW' '2099';\n" + + "KW2100 : 'KW' '2100';\n" + + "KW2101 : 'KW' '2101';\n" + + "KW2102 : 'KW' '2102';\n" + + "KW2103 : 'KW' '2103';\n" + + "KW2104 : 'KW' '2104';\n" + + "KW2105 : 'KW' '2105';\n" + + "KW2106 : 'KW' '2106';\n" + + "KW2107 : 'KW' '2107';\n" + + "KW2108 : 'KW' '2108';\n" + + "KW2109 : 'KW' '2109';\n" + + "KW2110 : 'KW' '2110';\n" + + "KW2111 : 'KW' '2111';\n" + + "KW2112 : 'KW' '2112';\n" + + "KW2113 : 'KW' '2113';\n" + + "KW2114 : 'KW' '2114';\n" + + "KW2115 : 'KW' '2115';\n" + + "KW2116 : 'KW' '2116';\n" + + "KW2117 : 'KW' '2117';\n" + + "KW2118 : 'KW' '2118';\n" + + "KW2119 : 'KW' '2119';\n" + + "KW2120 : 'KW' '2120';\n" + + "KW2121 : 'KW' '2121';\n" + + "KW2122 : 'KW' '2122';\n" + + "KW2123 : 'KW' '2123';\n" + + "KW2124 : 'KW' '2124';\n" + + "KW2125 : 'KW' '2125';\n" + + "KW2126 : 'KW' '2126';\n" + + "KW2127 : 'KW' '2127';\n" + + "KW2128 : 'KW' '2128';\n" + + "KW2129 : 'KW' '2129';\n" + + "KW2130 : 'KW' '2130';\n" + + "KW2131 : 'KW' '2131';\n" + + "KW2132 : 'KW' '2132';\n" + + "KW2133 : 'KW' '2133';\n" + + "KW2134 : 'KW' '2134';\n" + + "KW2135 : 'KW' '2135';\n" + + "KW2136 : 'KW' '2136';\n" + + "KW2137 : 'KW' '2137';\n" + + "KW2138 : 'KW' '2138';\n" + + "KW2139 : 'KW' '2139';\n" + + "KW2140 : 'KW' '2140';\n" + + "KW2141 : 'KW' '2141';\n" + + "KW2142 : 'KW' '2142';\n" + + "KW2143 : 'KW' '2143';\n" + + "KW2144 : 'KW' '2144';\n" + + "KW2145 : 'KW' '2145';\n" + + "KW2146 : 'KW' '2146';\n" + + "KW2147 : 'KW' '2147';\n" + + "KW2148 : 'KW' '2148';\n" + + "KW2149 : 'KW' '2149';\n" + + "KW2150 : 'KW' '2150';\n" + + "KW2151 : 'KW' '2151';\n" + + "KW2152 : 'KW' '2152';\n" + + "KW2153 : 'KW' '2153';\n" + + "KW2154 : 'KW' '2154';\n" + + "KW2155 : 'KW' '2155';\n" + + "KW2156 : 'KW' '2156';\n" + + "KW2157 : 'KW' '2157';\n" + + "KW2158 : 'KW' '2158';\n" + + "KW2159 : 'KW' '2159';\n" + + "KW2160 : 'KW' '2160';\n" + + "KW2161 : 'KW' '2161';\n" + + "KW2162 : 'KW' '2162';\n" + + "KW2163 : 'KW' '2163';\n" + + "KW2164 : 'KW' '2164';\n" + + "KW2165 : 'KW' '2165';\n" + + "KW2166 : 'KW' '2166';\n" + + "KW2167 : 'KW' '2167';\n" + + "KW2168 : 'KW' '2168';\n" + + "KW2169 : 'KW' '2169';\n" + + "KW2170 : 'KW' '2170';\n" + + "KW2171 : 'KW' '2171';\n" + + "KW2172 : 'KW' '2172';\n" + + "KW2173 : 'KW' '2173';\n" + + "KW2174 : 'KW' '2174';\n" + + "KW2175 : 'KW' '2175';\n" + + "KW2176 : 'KW' '2176';\n" + + "KW2177 : 'KW' '2177';\n" + + "KW2178 : 'KW' '2178';\n" + + "KW2179 : 'KW' '2179';\n" + + "KW2180 : 'KW' '2180';\n" + + "KW2181 : 'KW' '2181';\n" + + "KW2182 : 'KW' '2182';\n" + + "KW2183 : 'KW' '2183';\n" + + "KW2184 : 'KW' '2184';\n" + + "KW2185 : 'KW' '2185';\n" + + "KW2186 : 'KW' '2186';\n" + + "KW2187 : 'KW' '2187';\n" + + "KW2188 : 'KW' '2188';\n" + + "KW2189 : 'KW' '2189';\n" + + "KW2190 : 'KW' '2190';\n" + + "KW2191 : 'KW' '2191';\n" + + "KW2192 : 'KW' '2192';\n" + + "KW2193 : 'KW' '2193';\n" + + "KW2194 : 'KW' '2194';\n" + + "KW2195 : 'KW' '2195';\n" + + "KW2196 : 'KW' '2196';\n" + + "KW2197 : 'KW' '2197';\n" + + "KW2198 : 'KW' '2198';\n" + + "KW2199 : 'KW' '2199';\n" + + "KW2200 : 'KW' '2200';\n" + + "KW2201 : 'KW' '2201';\n" + + "KW2202 : 'KW' '2202';\n" + + "KW2203 : 'KW' '2203';\n" + + "KW2204 : 'KW' '2204';\n" + + "KW2205 : 'KW' '2205';\n" + + "KW2206 : 'KW' '2206';\n" + + "KW2207 : 'KW' '2207';\n" + + "KW2208 : 'KW' '2208';\n" + + "KW2209 : 'KW' '2209';\n" + + "KW2210 : 'KW' '2210';\n" + + "KW2211 : 'KW' '2211';\n" + + "KW2212 : 'KW' '2212';\n" + + "KW2213 : 'KW' '2213';\n" + + "KW2214 : 'KW' '2214';\n" + + "KW2215 : 'KW' '2215';\n" + + "KW2216 : 'KW' '2216';\n" + + "KW2217 : 'KW' '2217';\n" + + "KW2218 : 'KW' '2218';\n" + + "KW2219 : 'KW' '2219';\n" + + "KW2220 : 'KW' '2220';\n" + + "KW2221 : 'KW' '2221';\n" + + "KW2222 : 'KW' '2222';\n" + + "KW2223 : 'KW' '2223';\n" + + "KW2224 : 'KW' '2224';\n" + + "KW2225 : 'KW' '2225';\n" + + "KW2226 : 'KW' '2226';\n" + + "KW2227 : 'KW' '2227';\n" + + "KW2228 : 'KW' '2228';\n" + + "KW2229 : 'KW' '2229';\n" + + "KW2230 : 'KW' '2230';\n" + + "KW2231 : 'KW' '2231';\n" + + "KW2232 : 'KW' '2232';\n" + + "KW2233 : 'KW' '2233';\n" + + "KW2234 : 'KW' '2234';\n" + + "KW2235 : 'KW' '2235';\n" + + "KW2236 : 'KW' '2236';\n" + + "KW2237 : 'KW' '2237';\n" + + "KW2238 : 'KW' '2238';\n" + + "KW2239 : 'KW' '2239';\n" + + "KW2240 : 'KW' '2240';\n" + + "KW2241 : 'KW' '2241';\n" + + "KW2242 : 'KW' '2242';\n" + + "KW2243 : 'KW' '2243';\n" + + "KW2244 : 'KW' '2244';\n" + + "KW2245 : 'KW' '2245';\n" + + "KW2246 : 'KW' '2246';\n" + + "KW2247 : 'KW' '2247';\n" + + "KW2248 : 'KW' '2248';\n" + + "KW2249 : 'KW' '2249';\n" + + "KW2250 : 'KW' '2250';\n" + + "KW2251 : 'KW' '2251';\n" + + "KW2252 : 'KW' '2252';\n" + + "KW2253 : 'KW' '2253';\n" + + "KW2254 : 'KW' '2254';\n" + + "KW2255 : 'KW' '2255';\n" + + "KW2256 : 'KW' '2256';\n" + + "KW2257 : 'KW' '2257';\n" + + "KW2258 : 'KW' '2258';\n" + + "KW2259 : 'KW' '2259';\n" + + "KW2260 : 'KW' '2260';\n" + + "KW2261 : 'KW' '2261';\n" + + "KW2262 : 'KW' '2262';\n" + + "KW2263 : 'KW' '2263';\n" + + "KW2264 : 'KW' '2264';\n" + + "KW2265 : 'KW' '2265';\n" + + "KW2266 : 'KW' '2266';\n" + + "KW2267 : 'KW' '2267';\n" + + "KW2268 : 'KW' '2268';\n" + + "KW2269 : 'KW' '2269';\n" + + "KW2270 : 'KW' '2270';\n" + + "KW2271 : 'KW' '2271';\n" + + "KW2272 : 'KW' '2272';\n" + + "KW2273 : 'KW' '2273';\n" + + "KW2274 : 'KW' '2274';\n" + + "KW2275 : 'KW' '2275';\n" + + "KW2276 : 'KW' '2276';\n" + + "KW2277 : 'KW' '2277';\n" + + "KW2278 : 'KW' '2278';\n" + + "KW2279 : 'KW' '2279';\n" + + "KW2280 : 'KW' '2280';\n" + + "KW2281 : 'KW' '2281';\n" + + "KW2282 : 'KW' '2282';\n" + + "KW2283 : 'KW' '2283';\n" + + "KW2284 : 'KW' '2284';\n" + + "KW2285 : 'KW' '2285';\n" + + "KW2286 : 'KW' '2286';\n" + + "KW2287 : 'KW' '2287';\n" + + "KW2288 : 'KW' '2288';\n" + + "KW2289 : 'KW' '2289';\n" + + "KW2290 : 'KW' '2290';\n" + + "KW2291 : 'KW' '2291';\n" + + "KW2292 : 'KW' '2292';\n" + + "KW2293 : 'KW' '2293';\n" + + "KW2294 : 'KW' '2294';\n" + + "KW2295 : 'KW' '2295';\n" + + "KW2296 : 'KW' '2296';\n" + + "KW2297 : 'KW' '2297';\n" + + "KW2298 : 'KW' '2298';\n" + + "KW2299 : 'KW' '2299';\n" + + "KW2300 : 'KW' '2300';\n" + + "KW2301 : 'KW' '2301';\n" + + "KW2302 : 'KW' '2302';\n" + + "KW2303 : 'KW' '2303';\n" + + "KW2304 : 'KW' '2304';\n" + + "KW2305 : 'KW' '2305';\n" + + "KW2306 : 'KW' '2306';\n" + + "KW2307 : 'KW' '2307';\n" + + "KW2308 : 'KW' '2308';\n" + + "KW2309 : 'KW' '2309';\n" + + "KW2310 : 'KW' '2310';\n" + + "KW2311 : 'KW' '2311';\n" + + "KW2312 : 'KW' '2312';\n" + + "KW2313 : 'KW' '2313';\n" + + "KW2314 : 'KW' '2314';\n" + + "KW2315 : 'KW' '2315';\n" + + "KW2316 : 'KW' '2316';\n" + + "KW2317 : 'KW' '2317';\n" + + "KW2318 : 'KW' '2318';\n" + + "KW2319 : 'KW' '2319';\n" + + "KW2320 : 'KW' '2320';\n" + + "KW2321 : 'KW' '2321';\n" + + "KW2322 : 'KW' '2322';\n" + + "KW2323 : 'KW' '2323';\n" + + "KW2324 : 'KW' '2324';\n" + + "KW2325 : 'KW' '2325';\n" + + "KW2326 : 'KW' '2326';\n" + + "KW2327 : 'KW' '2327';\n" + + "KW2328 : 'KW' '2328';\n" + + "KW2329 : 'KW' '2329';\n" + + "KW2330 : 'KW' '2330';\n" + + "KW2331 : 'KW' '2331';\n" + + "KW2332 : 'KW' '2332';\n" + + "KW2333 : 'KW' '2333';\n" + + "KW2334 : 'KW' '2334';\n" + + "KW2335 : 'KW' '2335';\n" + + "KW2336 : 'KW' '2336';\n" + + "KW2337 : 'KW' '2337';\n" + + "KW2338 : 'KW' '2338';\n" + + "KW2339 : 'KW' '2339';\n" + + "KW2340 : 'KW' '2340';\n" + + "KW2341 : 'KW' '2341';\n" + + "KW2342 : 'KW' '2342';\n" + + "KW2343 : 'KW' '2343';\n" + + "KW2344 : 'KW' '2344';\n" + + "KW2345 : 'KW' '2345';\n" + + "KW2346 : 'KW' '2346';\n" + + "KW2347 : 'KW' '2347';\n" + + "KW2348 : 'KW' '2348';\n" + + "KW2349 : 'KW' '2349';\n" + + "KW2350 : 'KW' '2350';\n" + + "KW2351 : 'KW' '2351';\n" + + "KW2352 : 'KW' '2352';\n" + + "KW2353 : 'KW' '2353';\n" + + "KW2354 : 'KW' '2354';\n" + + "KW2355 : 'KW' '2355';\n" + + "KW2356 : 'KW' '2356';\n" + + "KW2357 : 'KW' '2357';\n" + + "KW2358 : 'KW' '2358';\n" + + "KW2359 : 'KW' '2359';\n" + + "KW2360 : 'KW' '2360';\n" + + "KW2361 : 'KW' '2361';\n" + + "KW2362 : 'KW' '2362';\n" + + "KW2363 : 'KW' '2363';\n" + + "KW2364 : 'KW' '2364';\n" + + "KW2365 : 'KW' '2365';\n" + + "KW2366 : 'KW' '2366';\n" + + "KW2367 : 'KW' '2367';\n" + + "KW2368 : 'KW' '2368';\n" + + "KW2369 : 'KW' '2369';\n" + + "KW2370 : 'KW' '2370';\n" + + "KW2371 : 'KW' '2371';\n" + + "KW2372 : 'KW' '2372';\n" + + "KW2373 : 'KW' '2373';\n" + + "KW2374 : 'KW' '2374';\n" + + "KW2375 : 'KW' '2375';\n" + + "KW2376 : 'KW' '2376';\n" + + "KW2377 : 'KW' '2377';\n" + + "KW2378 : 'KW' '2378';\n" + + "KW2379 : 'KW' '2379';\n" + + "KW2380 : 'KW' '2380';\n" + + "KW2381 : 'KW' '2381';\n" + + "KW2382 : 'KW' '2382';\n" + + "KW2383 : 'KW' '2383';\n" + + "KW2384 : 'KW' '2384';\n" + + "KW2385 : 'KW' '2385';\n" + + "KW2386 : 'KW' '2386';\n" + + "KW2387 : 'KW' '2387';\n" + + "KW2388 : 'KW' '2388';\n" + + "KW2389 : 'KW' '2389';\n" + + "KW2390 : 'KW' '2390';\n" + + "KW2391 : 'KW' '2391';\n" + + "KW2392 : 'KW' '2392';\n" + + "KW2393 : 'KW' '2393';\n" + + "KW2394 : 'KW' '2394';\n" + + "KW2395 : 'KW' '2395';\n" + + "KW2396 : 'KW' '2396';\n" + + "KW2397 : 'KW' '2397';\n" + + "KW2398 : 'KW' '2398';\n" + + "KW2399 : 'KW' '2399';\n" + + "KW2400 : 'KW' '2400';\n" + + "KW2401 : 'KW' '2401';\n" + + "KW2402 : 'KW' '2402';\n" + + "KW2403 : 'KW' '2403';\n" + + "KW2404 : 'KW' '2404';\n" + + "KW2405 : 'KW' '2405';\n" + + "KW2406 : 'KW' '2406';\n" + + "KW2407 : 'KW' '2407';\n" + + "KW2408 : 'KW' '2408';\n" + + "KW2409 : 'KW' '2409';\n" + + "KW2410 : 'KW' '2410';\n" + + "KW2411 : 'KW' '2411';\n" + + "KW2412 : 'KW' '2412';\n" + + "KW2413 : 'KW' '2413';\n" + + "KW2414 : 'KW' '2414';\n" + + "KW2415 : 'KW' '2415';\n" + + "KW2416 : 'KW' '2416';\n" + + "KW2417 : 'KW' '2417';\n" + + "KW2418 : 'KW' '2418';\n" + + "KW2419 : 'KW' '2419';\n" + + "KW2420 : 'KW' '2420';\n" + + "KW2421 : 'KW' '2421';\n" + + "KW2422 : 'KW' '2422';\n" + + "KW2423 : 'KW' '2423';\n" + + "KW2424 : 'KW' '2424';\n" + + "KW2425 : 'KW' '2425';\n" + + "KW2426 : 'KW' '2426';\n" + + "KW2427 : 'KW' '2427';\n" + + "KW2428 : 'KW' '2428';\n" + + "KW2429 : 'KW' '2429';\n" + + "KW2430 : 'KW' '2430';\n" + + "KW2431 : 'KW' '2431';\n" + + "KW2432 : 'KW' '2432';\n" + + "KW2433 : 'KW' '2433';\n" + + "KW2434 : 'KW' '2434';\n" + + "KW2435 : 'KW' '2435';\n" + + "KW2436 : 'KW' '2436';\n" + + "KW2437 : 'KW' '2437';\n" + + "KW2438 : 'KW' '2438';\n" + + "KW2439 : 'KW' '2439';\n" + + "KW2440 : 'KW' '2440';\n" + + "KW2441 : 'KW' '2441';\n" + + "KW2442 : 'KW' '2442';\n" + + "KW2443 : 'KW' '2443';\n" + + "KW2444 : 'KW' '2444';\n" + + "KW2445 : 'KW' '2445';\n" + + "KW2446 : 'KW' '2446';\n" + + "KW2447 : 'KW' '2447';\n" + + "KW2448 : 'KW' '2448';\n" + + "KW2449 : 'KW' '2449';\n" + + "KW2450 : 'KW' '2450';\n" + + "KW2451 : 'KW' '2451';\n" + + "KW2452 : 'KW' '2452';\n" + + "KW2453 : 'KW' '2453';\n" + + "KW2454 : 'KW' '2454';\n" + + "KW2455 : 'KW' '2455';\n" + + "KW2456 : 'KW' '2456';\n" + + "KW2457 : 'KW' '2457';\n" + + "KW2458 : 'KW' '2458';\n" + + "KW2459 : 'KW' '2459';\n" + + "KW2460 : 'KW' '2460';\n" + + "KW2461 : 'KW' '2461';\n" + + "KW2462 : 'KW' '2462';\n" + + "KW2463 : 'KW' '2463';\n" + + "KW2464 : 'KW' '2464';\n" + + "KW2465 : 'KW' '2465';\n" + + "KW2466 : 'KW' '2466';\n" + + "KW2467 : 'KW' '2467';\n" + + "KW2468 : 'KW' '2468';\n" + + "KW2469 : 'KW' '2469';\n" + + "KW2470 : 'KW' '2470';\n" + + "KW2471 : 'KW' '2471';\n" + + "KW2472 : 'KW' '2472';\n" + + "KW2473 : 'KW' '2473';\n" + + "KW2474 : 'KW' '2474';\n" + + "KW2475 : 'KW' '2475';\n" + + "KW2476 : 'KW' '2476';\n" + + "KW2477 : 'KW' '2477';\n" + + "KW2478 : 'KW' '2478';\n" + + "KW2479 : 'KW' '2479';\n" + + "KW2480 : 'KW' '2480';\n" + + "KW2481 : 'KW' '2481';\n" + + "KW2482 : 'KW' '2482';\n" + + "KW2483 : 'KW' '2483';\n" + + "KW2484 : 'KW' '2484';\n" + + "KW2485 : 'KW' '2485';\n" + + "KW2486 : 'KW' '2486';\n" + + "KW2487 : 'KW' '2487';\n" + + "KW2488 : 'KW' '2488';\n" + + "KW2489 : 'KW' '2489';\n" + + "KW2490 : 'KW' '2490';\n" + + "KW2491 : 'KW' '2491';\n" + + "KW2492 : 'KW' '2492';\n" + + "KW2493 : 'KW' '2493';\n" + + "KW2494 : 'KW' '2494';\n" + + "KW2495 : 'KW' '2495';\n" + + "KW2496 : 'KW' '2496';\n" + + "KW2497 : 'KW' '2497';\n" + + "KW2498 : 'KW' '2498';\n" + + "KW2499 : 'KW' '2499';\n" + + "KW2500 : 'KW' '2500';\n" + + "KW2501 : 'KW' '2501';\n" + + "KW2502 : 'KW' '2502';\n" + + "KW2503 : 'KW' '2503';\n" + + "KW2504 : 'KW' '2504';\n" + + "KW2505 : 'KW' '2505';\n" + + "KW2506 : 'KW' '2506';\n" + + "KW2507 : 'KW' '2507';\n" + + "KW2508 : 'KW' '2508';\n" + + "KW2509 : 'KW' '2509';\n" + + "KW2510 : 'KW' '2510';\n" + + "KW2511 : 'KW' '2511';\n" + + "KW2512 : 'KW' '2512';\n" + + "KW2513 : 'KW' '2513';\n" + + "KW2514 : 'KW' '2514';\n" + + "KW2515 : 'KW' '2515';\n" + + "KW2516 : 'KW' '2516';\n" + + "KW2517 : 'KW' '2517';\n" + + "KW2518 : 'KW' '2518';\n" + + "KW2519 : 'KW' '2519';\n" + + "KW2520 : 'KW' '2520';\n" + + "KW2521 : 'KW' '2521';\n" + + "KW2522 : 'KW' '2522';\n" + + "KW2523 : 'KW' '2523';\n" + + "KW2524 : 'KW' '2524';\n" + + "KW2525 : 'KW' '2525';\n" + + "KW2526 : 'KW' '2526';\n" + + "KW2527 : 'KW' '2527';\n" + + "KW2528 : 'KW' '2528';\n" + + "KW2529 : 'KW' '2529';\n" + + "KW2530 : 'KW' '2530';\n" + + "KW2531 : 'KW' '2531';\n" + + "KW2532 : 'KW' '2532';\n" + + "KW2533 : 'KW' '2533';\n" + + "KW2534 : 'KW' '2534';\n" + + "KW2535 : 'KW' '2535';\n" + + "KW2536 : 'KW' '2536';\n" + + "KW2537 : 'KW' '2537';\n" + + "KW2538 : 'KW' '2538';\n" + + "KW2539 : 'KW' '2539';\n" + + "KW2540 : 'KW' '2540';\n" + + "KW2541 : 'KW' '2541';\n" + + "KW2542 : 'KW' '2542';\n" + + "KW2543 : 'KW' '2543';\n" + + "KW2544 : 'KW' '2544';\n" + + "KW2545 : 'KW' '2545';\n" + + "KW2546 : 'KW' '2546';\n" + + "KW2547 : 'KW' '2547';\n" + + "KW2548 : 'KW' '2548';\n" + + "KW2549 : 'KW' '2549';\n" + + "KW2550 : 'KW' '2550';\n" + + "KW2551 : 'KW' '2551';\n" + + "KW2552 : 'KW' '2552';\n" + + "KW2553 : 'KW' '2553';\n" + + "KW2554 : 'KW' '2554';\n" + + "KW2555 : 'KW' '2555';\n" + + "KW2556 : 'KW' '2556';\n" + + "KW2557 : 'KW' '2557';\n" + + "KW2558 : 'KW' '2558';\n" + + "KW2559 : 'KW' '2559';\n" + + "KW2560 : 'KW' '2560';\n" + + "KW2561 : 'KW' '2561';\n" + + "KW2562 : 'KW' '2562';\n" + + "KW2563 : 'KW' '2563';\n" + + "KW2564 : 'KW' '2564';\n" + + "KW2565 : 'KW' '2565';\n" + + "KW2566 : 'KW' '2566';\n" + + "KW2567 : 'KW' '2567';\n" + + "KW2568 : 'KW' '2568';\n" + + "KW2569 : 'KW' '2569';\n" + + "KW2570 : 'KW' '2570';\n" + + "KW2571 : 'KW' '2571';\n" + + "KW2572 : 'KW' '2572';\n" + + "KW2573 : 'KW' '2573';\n" + + "KW2574 : 'KW' '2574';\n" + + "KW2575 : 'KW' '2575';\n" + + "KW2576 : 'KW' '2576';\n" + + "KW2577 : 'KW' '2577';\n" + + "KW2578 : 'KW' '2578';\n" + + "KW2579 : 'KW' '2579';\n" + + "KW2580 : 'KW' '2580';\n" + + "KW2581 : 'KW' '2581';\n" + + "KW2582 : 'KW' '2582';\n" + + "KW2583 : 'KW' '2583';\n" + + "KW2584 : 'KW' '2584';\n" + + "KW2585 : 'KW' '2585';\n" + + "KW2586 : 'KW' '2586';\n" + + "KW2587 : 'KW' '2587';\n" + + "KW2588 : 'KW' '2588';\n" + + "KW2589 : 'KW' '2589';\n" + + "KW2590 : 'KW' '2590';\n" + + "KW2591 : 'KW' '2591';\n" + + "KW2592 : 'KW' '2592';\n" + + "KW2593 : 'KW' '2593';\n" + + "KW2594 : 'KW' '2594';\n" + + "KW2595 : 'KW' '2595';\n" + + "KW2596 : 'KW' '2596';\n" + + "KW2597 : 'KW' '2597';\n" + + "KW2598 : 'KW' '2598';\n" + + "KW2599 : 'KW' '2599';\n" + + "KW2600 : 'KW' '2600';\n" + + "KW2601 : 'KW' '2601';\n" + + "KW2602 : 'KW' '2602';\n" + + "KW2603 : 'KW' '2603';\n" + + "KW2604 : 'KW' '2604';\n" + + "KW2605 : 'KW' '2605';\n" + + "KW2606 : 'KW' '2606';\n" + + "KW2607 : 'KW' '2607';\n" + + "KW2608 : 'KW' '2608';\n" + + "KW2609 : 'KW' '2609';\n" + + "KW2610 : 'KW' '2610';\n" + + "KW2611 : 'KW' '2611';\n" + + "KW2612 : 'KW' '2612';\n" + + "KW2613 : 'KW' '2613';\n" + + "KW2614 : 'KW' '2614';\n" + + "KW2615 : 'KW' '2615';\n" + + "KW2616 : 'KW' '2616';\n" + + "KW2617 : 'KW' '2617';\n" + + "KW2618 : 'KW' '2618';\n" + + "KW2619 : 'KW' '2619';\n" + + "KW2620 : 'KW' '2620';\n" + + "KW2621 : 'KW' '2621';\n" + + "KW2622 : 'KW' '2622';\n" + + "KW2623 : 'KW' '2623';\n" + + "KW2624 : 'KW' '2624';\n" + + "KW2625 : 'KW' '2625';\n" + + "KW2626 : 'KW' '2626';\n" + + "KW2627 : 'KW' '2627';\n" + + "KW2628 : 'KW' '2628';\n" + + "KW2629 : 'KW' '2629';\n" + + "KW2630 : 'KW' '2630';\n" + + "KW2631 : 'KW' '2631';\n" + + "KW2632 : 'KW' '2632';\n" + + "KW2633 : 'KW' '2633';\n" + + "KW2634 : 'KW' '2634';\n" + + "KW2635 : 'KW' '2635';\n" + + "KW2636 : 'KW' '2636';\n" + + "KW2637 : 'KW' '2637';\n" + + "KW2638 : 'KW' '2638';\n" + + "KW2639 : 'KW' '2639';\n" + + "KW2640 : 'KW' '2640';\n" + + "KW2641 : 'KW' '2641';\n" + + "KW2642 : 'KW' '2642';\n" + + "KW2643 : 'KW' '2643';\n" + + "KW2644 : 'KW' '2644';\n" + + "KW2645 : 'KW' '2645';\n" + + "KW2646 : 'KW' '2646';\n" + + "KW2647 : 'KW' '2647';\n" + + "KW2648 : 'KW' '2648';\n" + + "KW2649 : 'KW' '2649';\n" + + "KW2650 : 'KW' '2650';\n" + + "KW2651 : 'KW' '2651';\n" + + "KW2652 : 'KW' '2652';\n" + + "KW2653 : 'KW' '2653';\n" + + "KW2654 : 'KW' '2654';\n" + + "KW2655 : 'KW' '2655';\n" + + "KW2656 : 'KW' '2656';\n" + + "KW2657 : 'KW' '2657';\n" + + "KW2658 : 'KW' '2658';\n" + + "KW2659 : 'KW' '2659';\n" + + "KW2660 : 'KW' '2660';\n" + + "KW2661 : 'KW' '2661';\n" + + "KW2662 : 'KW' '2662';\n" + + "KW2663 : 'KW' '2663';\n" + + "KW2664 : 'KW' '2664';\n" + + "KW2665 : 'KW' '2665';\n" + + "KW2666 : 'KW' '2666';\n" + + "KW2667 : 'KW' '2667';\n" + + "KW2668 : 'KW' '2668';\n" + + "KW2669 : 'KW' '2669';\n" + + "KW2670 : 'KW' '2670';\n" + + "KW2671 : 'KW' '2671';\n" + + "KW2672 : 'KW' '2672';\n" + + "KW2673 : 'KW' '2673';\n" + + "KW2674 : 'KW' '2674';\n" + + "KW2675 : 'KW' '2675';\n" + + "KW2676 : 'KW' '2676';\n" + + "KW2677 : 'KW' '2677';\n" + + "KW2678 : 'KW' '2678';\n" + + "KW2679 : 'KW' '2679';\n" + + "KW2680 : 'KW' '2680';\n" + + "KW2681 : 'KW' '2681';\n" + + "KW2682 : 'KW' '2682';\n" + + "KW2683 : 'KW' '2683';\n" + + "KW2684 : 'KW' '2684';\n" + + "KW2685 : 'KW' '2685';\n" + + "KW2686 : 'KW' '2686';\n" + + "KW2687 : 'KW' '2687';\n" + + "KW2688 : 'KW' '2688';\n" + + "KW2689 : 'KW' '2689';\n" + + "KW2690 : 'KW' '2690';\n" + + "KW2691 : 'KW' '2691';\n" + + "KW2692 : 'KW' '2692';\n" + + "KW2693 : 'KW' '2693';\n" + + "KW2694 : 'KW' '2694';\n" + + "KW2695 : 'KW' '2695';\n" + + "KW2696 : 'KW' '2696';\n" + + "KW2697 : 'KW' '2697';\n" + + "KW2698 : 'KW' '2698';\n" + + "KW2699 : 'KW' '2699';\n" + + "KW2700 : 'KW' '2700';\n" + + "KW2701 : 'KW' '2701';\n" + + "KW2702 : 'KW' '2702';\n" + + "KW2703 : 'KW' '2703';\n" + + "KW2704 : 'KW' '2704';\n" + + "KW2705 : 'KW' '2705';\n" + + "KW2706 : 'KW' '2706';\n" + + "KW2707 : 'KW' '2707';\n" + + "KW2708 : 'KW' '2708';\n" + + "KW2709 : 'KW' '2709';\n" + + "KW2710 : 'KW' '2710';\n" + + "KW2711 : 'KW' '2711';\n" + + "KW2712 : 'KW' '2712';\n" + + "KW2713 : 'KW' '2713';\n" + + "KW2714 : 'KW' '2714';\n" + + "KW2715 : 'KW' '2715';\n" + + "KW2716 : 'KW' '2716';\n" + + "KW2717 : 'KW' '2717';\n" + + "KW2718 : 'KW' '2718';\n" + + "KW2719 : 'KW' '2719';\n" + + "KW2720 : 'KW' '2720';\n" + + "KW2721 : 'KW' '2721';\n" + + "KW2722 : 'KW' '2722';\n" + + "KW2723 : 'KW' '2723';\n" + + "KW2724 : 'KW' '2724';\n" + + "KW2725 : 'KW' '2725';\n" + + "KW2726 : 'KW' '2726';\n" + + "KW2727 : 'KW' '2727';\n" + + "KW2728 : 'KW' '2728';\n" + + "KW2729 : 'KW' '2729';\n" + + "KW2730 : 'KW' '2730';\n" + + "KW2731 : 'KW' '2731';\n" + + "KW2732 : 'KW' '2732';\n" + + "KW2733 : 'KW' '2733';\n" + + "KW2734 : 'KW' '2734';\n" + + "KW2735 : 'KW' '2735';\n" + + "KW2736 : 'KW' '2736';\n" + + "KW2737 : 'KW' '2737';\n" + + "KW2738 : 'KW' '2738';\n" + + "KW2739 : 'KW' '2739';\n" + + "KW2740 : 'KW' '2740';\n" + + "KW2741 : 'KW' '2741';\n" + + "KW2742 : 'KW' '2742';\n" + + "KW2743 : 'KW' '2743';\n" + + "KW2744 : 'KW' '2744';\n" + + "KW2745 : 'KW' '2745';\n" + + "KW2746 : 'KW' '2746';\n" + + "KW2747 : 'KW' '2747';\n" + + "KW2748 : 'KW' '2748';\n" + + "KW2749 : 'KW' '2749';\n" + + "KW2750 : 'KW' '2750';\n" + + "KW2751 : 'KW' '2751';\n" + + "KW2752 : 'KW' '2752';\n" + + "KW2753 : 'KW' '2753';\n" + + "KW2754 : 'KW' '2754';\n" + + "KW2755 : 'KW' '2755';\n" + + "KW2756 : 'KW' '2756';\n" + + "KW2757 : 'KW' '2757';\n" + + "KW2758 : 'KW' '2758';\n" + + "KW2759 : 'KW' '2759';\n" + + "KW2760 : 'KW' '2760';\n" + + "KW2761 : 'KW' '2761';\n" + + "KW2762 : 'KW' '2762';\n" + + "KW2763 : 'KW' '2763';\n" + + "KW2764 : 'KW' '2764';\n" + + "KW2765 : 'KW' '2765';\n" + + "KW2766 : 'KW' '2766';\n" + + "KW2767 : 'KW' '2767';\n" + + "KW2768 : 'KW' '2768';\n" + + "KW2769 : 'KW' '2769';\n" + + "KW2770 : 'KW' '2770';\n" + + "KW2771 : 'KW' '2771';\n" + + "KW2772 : 'KW' '2772';\n" + + "KW2773 : 'KW' '2773';\n" + + "KW2774 : 'KW' '2774';\n" + + "KW2775 : 'KW' '2775';\n" + + "KW2776 : 'KW' '2776';\n" + + "KW2777 : 'KW' '2777';\n" + + "KW2778 : 'KW' '2778';\n" + + "KW2779 : 'KW' '2779';\n" + + "KW2780 : 'KW' '2780';\n" + + "KW2781 : 'KW' '2781';\n" + + "KW2782 : 'KW' '2782';\n" + + "KW2783 : 'KW' '2783';\n" + + "KW2784 : 'KW' '2784';\n" + + "KW2785 : 'KW' '2785';\n" + + "KW2786 : 'KW' '2786';\n" + + "KW2787 : 'KW' '2787';\n" + + "KW2788 : 'KW' '2788';\n" + + "KW2789 : 'KW' '2789';\n" + + "KW2790 : 'KW' '2790';\n" + + "KW2791 : 'KW' '2791';\n" + + "KW2792 : 'KW' '2792';\n" + + "KW2793 : 'KW' '2793';\n" + + "KW2794 : 'KW' '2794';\n" + + "KW2795 : 'KW' '2795';\n" + + "KW2796 : 'KW' '2796';\n" + + "KW2797 : 'KW' '2797';\n" + + "KW2798 : 'KW' '2798';\n" + + "KW2799 : 'KW' '2799';\n" + + "KW2800 : 'KW' '2800';\n" + + "KW2801 : 'KW' '2801';\n" + + "KW2802 : 'KW' '2802';\n" + + "KW2803 : 'KW' '2803';\n" + + "KW2804 : 'KW' '2804';\n" + + "KW2805 : 'KW' '2805';\n" + + "KW2806 : 'KW' '2806';\n" + + "KW2807 : 'KW' '2807';\n" + + "KW2808 : 'KW' '2808';\n" + + "KW2809 : 'KW' '2809';\n" + + "KW2810 : 'KW' '2810';\n" + + "KW2811 : 'KW' '2811';\n" + + "KW2812 : 'KW' '2812';\n" + + "KW2813 : 'KW' '2813';\n" + + "KW2814 : 'KW' '2814';\n" + + "KW2815 : 'KW' '2815';\n" + + "KW2816 : 'KW' '2816';\n" + + "KW2817 : 'KW' '2817';\n" + + "KW2818 : 'KW' '2818';\n" + + "KW2819 : 'KW' '2819';\n" + + "KW2820 : 'KW' '2820';\n" + + "KW2821 : 'KW' '2821';\n" + + "KW2822 : 'KW' '2822';\n" + + "KW2823 : 'KW' '2823';\n" + + "KW2824 : 'KW' '2824';\n" + + "KW2825 : 'KW' '2825';\n" + + "KW2826 : 'KW' '2826';\n" + + "KW2827 : 'KW' '2827';\n" + + "KW2828 : 'KW' '2828';\n" + + "KW2829 : 'KW' '2829';\n" + + "KW2830 : 'KW' '2830';\n" + + "KW2831 : 'KW' '2831';\n" + + "KW2832 : 'KW' '2832';\n" + + "KW2833 : 'KW' '2833';\n" + + "KW2834 : 'KW' '2834';\n" + + "KW2835 : 'KW' '2835';\n" + + "KW2836 : 'KW' '2836';\n" + + "KW2837 : 'KW' '2837';\n" + + "KW2838 : 'KW' '2838';\n" + + "KW2839 : 'KW' '2839';\n" + + "KW2840 : 'KW' '2840';\n" + + "KW2841 : 'KW' '2841';\n" + + "KW2842 : 'KW' '2842';\n" + + "KW2843 : 'KW' '2843';\n" + + "KW2844 : 'KW' '2844';\n" + + "KW2845 : 'KW' '2845';\n" + + "KW2846 : 'KW' '2846';\n" + + "KW2847 : 'KW' '2847';\n" + + "KW2848 : 'KW' '2848';\n" + + "KW2849 : 'KW' '2849';\n" + + "KW2850 : 'KW' '2850';\n" + + "KW2851 : 'KW' '2851';\n" + + "KW2852 : 'KW' '2852';\n" + + "KW2853 : 'KW' '2853';\n" + + "KW2854 : 'KW' '2854';\n" + + "KW2855 : 'KW' '2855';\n" + + "KW2856 : 'KW' '2856';\n" + + "KW2857 : 'KW' '2857';\n" + + "KW2858 : 'KW' '2858';\n" + + "KW2859 : 'KW' '2859';\n" + + "KW2860 : 'KW' '2860';\n" + + "KW2861 : 'KW' '2861';\n" + + "KW2862 : 'KW' '2862';\n" + + "KW2863 : 'KW' '2863';\n" + + "KW2864 : 'KW' '2864';\n" + + "KW2865 : 'KW' '2865';\n" + + "KW2866 : 'KW' '2866';\n" + + "KW2867 : 'KW' '2867';\n" + + "KW2868 : 'KW' '2868';\n" + + "KW2869 : 'KW' '2869';\n" + + "KW2870 : 'KW' '2870';\n" + + "KW2871 : 'KW' '2871';\n" + + "KW2872 : 'KW' '2872';\n" + + "KW2873 : 'KW' '2873';\n" + + "KW2874 : 'KW' '2874';\n" + + "KW2875 : 'KW' '2875';\n" + + "KW2876 : 'KW' '2876';\n" + + "KW2877 : 'KW' '2877';\n" + + "KW2878 : 'KW' '2878';\n" + + "KW2879 : 'KW' '2879';\n" + + "KW2880 : 'KW' '2880';\n" + + "KW2881 : 'KW' '2881';\n" + + "KW2882 : 'KW' '2882';\n" + + "KW2883 : 'KW' '2883';\n" + + "KW2884 : 'KW' '2884';\n" + + "KW2885 : 'KW' '2885';\n" + + "KW2886 : 'KW' '2886';\n" + + "KW2887 : 'KW' '2887';\n" + + "KW2888 : 'KW' '2888';\n" + + "KW2889 : 'KW' '2889';\n" + + "KW2890 : 'KW' '2890';\n" + + "KW2891 : 'KW' '2891';\n" + + "KW2892 : 'KW' '2892';\n" + + "KW2893 : 'KW' '2893';\n" + + "KW2894 : 'KW' '2894';\n" + + "KW2895 : 'KW' '2895';\n" + + "KW2896 : 'KW' '2896';\n" + + "KW2897 : 'KW' '2897';\n" + + "KW2898 : 'KW' '2898';\n" + + "KW2899 : 'KW' '2899';\n" + + "KW2900 : 'KW' '2900';\n" + + "KW2901 : 'KW' '2901';\n" + + "KW2902 : 'KW' '2902';\n" + + "KW2903 : 'KW' '2903';\n" + + "KW2904 : 'KW' '2904';\n" + + "KW2905 : 'KW' '2905';\n" + + "KW2906 : 'KW' '2906';\n" + + "KW2907 : 'KW' '2907';\n" + + "KW2908 : 'KW' '2908';\n" + + "KW2909 : 'KW' '2909';\n" + + "KW2910 : 'KW' '2910';\n" + + "KW2911 : 'KW' '2911';\n" + + "KW2912 : 'KW' '2912';\n" + + "KW2913 : 'KW' '2913';\n" + + "KW2914 : 'KW' '2914';\n" + + "KW2915 : 'KW' '2915';\n" + + "KW2916 : 'KW' '2916';\n" + + "KW2917 : 'KW' '2917';\n" + + "KW2918 : 'KW' '2918';\n" + + "KW2919 : 'KW' '2919';\n" + + "KW2920 : 'KW' '2920';\n" + + "KW2921 : 'KW' '2921';\n" + + "KW2922 : 'KW' '2922';\n" + + "KW2923 : 'KW' '2923';\n" + + "KW2924 : 'KW' '2924';\n" + + "KW2925 : 'KW' '2925';\n" + + "KW2926 : 'KW' '2926';\n" + + "KW2927 : 'KW' '2927';\n" + + "KW2928 : 'KW' '2928';\n" + + "KW2929 : 'KW' '2929';\n" + + "KW2930 : 'KW' '2930';\n" + + "KW2931 : 'KW' '2931';\n" + + "KW2932 : 'KW' '2932';\n" + + "KW2933 : 'KW' '2933';\n" + + "KW2934 : 'KW' '2934';\n" + + "KW2935 : 'KW' '2935';\n" + + "KW2936 : 'KW' '2936';\n" + + "KW2937 : 'KW' '2937';\n" + + "KW2938 : 'KW' '2938';\n" + + "KW2939 : 'KW' '2939';\n" + + "KW2940 : 'KW' '2940';\n" + + "KW2941 : 'KW' '2941';\n" + + "KW2942 : 'KW' '2942';\n" + + "KW2943 : 'KW' '2943';\n" + + "KW2944 : 'KW' '2944';\n" + + "KW2945 : 'KW' '2945';\n" + + "KW2946 : 'KW' '2946';\n" + + "KW2947 : 'KW' '2947';\n" + + "KW2948 : 'KW' '2948';\n" + + "KW2949 : 'KW' '2949';\n" + + "KW2950 : 'KW' '2950';\n" + + "KW2951 : 'KW' '2951';\n" + + "KW2952 : 'KW' '2952';\n" + + "KW2953 : 'KW' '2953';\n" + + "KW2954 : 'KW' '2954';\n" + + "KW2955 : 'KW' '2955';\n" + + "KW2956 : 'KW' '2956';\n" + + "KW2957 : 'KW' '2957';\n" + + "KW2958 : 'KW' '2958';\n" + + "KW2959 : 'KW' '2959';\n" + + "KW2960 : 'KW' '2960';\n" + + "KW2961 : 'KW' '2961';\n" + + "KW2962 : 'KW' '2962';\n" + + "KW2963 : 'KW' '2963';\n" + + "KW2964 : 'KW' '2964';\n" + + "KW2965 : 'KW' '2965';\n" + + "KW2966 : 'KW' '2966';\n" + + "KW2967 : 'KW' '2967';\n" + + "KW2968 : 'KW' '2968';\n" + + "KW2969 : 'KW' '2969';\n" + + "KW2970 : 'KW' '2970';\n" + + "KW2971 : 'KW' '2971';\n" + + "KW2972 : 'KW' '2972';\n" + + "KW2973 : 'KW' '2973';\n" + + "KW2974 : 'KW' '2974';\n" + + "KW2975 : 'KW' '2975';\n" + + "KW2976 : 'KW' '2976';\n" + + "KW2977 : 'KW' '2977';\n" + + "KW2978 : 'KW' '2978';\n" + + "KW2979 : 'KW' '2979';\n" + + "KW2980 : 'KW' '2980';\n" + + "KW2981 : 'KW' '2981';\n" + + "KW2982 : 'KW' '2982';\n" + + "KW2983 : 'KW' '2983';\n" + + "KW2984 : 'KW' '2984';\n" + + "KW2985 : 'KW' '2985';\n" + + "KW2986 : 'KW' '2986';\n" + + "KW2987 : 'KW' '2987';\n" + + "KW2988 : 'KW' '2988';\n" + + "KW2989 : 'KW' '2989';\n" + + "KW2990 : 'KW' '2990';\n" + + "KW2991 : 'KW' '2991';\n" + + "KW2992 : 'KW' '2992';\n" + + "KW2993 : 'KW' '2993';\n" + + "KW2994 : 'KW' '2994';\n" + + "KW2995 : 'KW' '2995';\n" + + "KW2996 : 'KW' '2996';\n" + + "KW2997 : 'KW' '2997';\n" + + "KW2998 : 'KW' '2998';\n" + + "KW2999 : 'KW' '2999';\n" + + "KW3000 : 'KW' '3000';\n" + + "KW3001 : 'KW' '3001';\n" + + "KW3002 : 'KW' '3002';\n" + + "KW3003 : 'KW' '3003';\n" + + "KW3004 : 'KW' '3004';\n" + + "KW3005 : 'KW' '3005';\n" + + "KW3006 : 'KW' '3006';\n" + + "KW3007 : 'KW' '3007';\n" + + "KW3008 : 'KW' '3008';\n" + + "KW3009 : 'KW' '3009';\n" + + "KW3010 : 'KW' '3010';\n" + + "KW3011 : 'KW' '3011';\n" + + "KW3012 : 'KW' '3012';\n" + + "KW3013 : 'KW' '3013';\n" + + "KW3014 : 'KW' '3014';\n" + + "KW3015 : 'KW' '3015';\n" + + "KW3016 : 'KW' '3016';\n" + + "KW3017 : 'KW' '3017';\n" + + "KW3018 : 'KW' '3018';\n" + + "KW3019 : 'KW' '3019';\n" + + "KW3020 : 'KW' '3020';\n" + + "KW3021 : 'KW' '3021';\n" + + "KW3022 : 'KW' '3022';\n" + + "KW3023 : 'KW' '3023';\n" + + "KW3024 : 'KW' '3024';\n" + + "KW3025 : 'KW' '3025';\n" + + "KW3026 : 'KW' '3026';\n" + + "KW3027 : 'KW' '3027';\n" + + "KW3028 : 'KW' '3028';\n" + + "KW3029 : 'KW' '3029';\n" + + "KW3030 : 'KW' '3030';\n" + + "KW3031 : 'KW' '3031';\n" + + "KW3032 : 'KW' '3032';\n" + + "KW3033 : 'KW' '3033';\n" + + "KW3034 : 'KW' '3034';\n" + + "KW3035 : 'KW' '3035';\n" + + "KW3036 : 'KW' '3036';\n" + + "KW3037 : 'KW' '3037';\n" + + "KW3038 : 'KW' '3038';\n" + + "KW3039 : 'KW' '3039';\n" + + "KW3040 : 'KW' '3040';\n" + + "KW3041 : 'KW' '3041';\n" + + "KW3042 : 'KW' '3042';\n" + + "KW3043 : 'KW' '3043';\n" + + "KW3044 : 'KW' '3044';\n" + + "KW3045 : 'KW' '3045';\n" + + "KW3046 : 'KW' '3046';\n" + + "KW3047 : 'KW' '3047';\n" + + "KW3048 : 'KW' '3048';\n" + + "KW3049 : 'KW' '3049';\n" + + "KW3050 : 'KW' '3050';\n" + + "KW3051 : 'KW' '3051';\n" + + "KW3052 : 'KW' '3052';\n" + + "KW3053 : 'KW' '3053';\n" + + "KW3054 : 'KW' '3054';\n" + + "KW3055 : 'KW' '3055';\n" + + "KW3056 : 'KW' '3056';\n" + + "KW3057 : 'KW' '3057';\n" + + "KW3058 : 'KW' '3058';\n" + + "KW3059 : 'KW' '3059';\n" + + "KW3060 : 'KW' '3060';\n" + + "KW3061 : 'KW' '3061';\n" + + "KW3062 : 'KW' '3062';\n" + + "KW3063 : 'KW' '3063';\n" + + "KW3064 : 'KW' '3064';\n" + + "KW3065 : 'KW' '3065';\n" + + "KW3066 : 'KW' '3066';\n" + + "KW3067 : 'KW' '3067';\n" + + "KW3068 : 'KW' '3068';\n" + + "KW3069 : 'KW' '3069';\n" + + "KW3070 : 'KW' '3070';\n" + + "KW3071 : 'KW' '3071';\n" + + "KW3072 : 'KW' '3072';\n" + + "KW3073 : 'KW' '3073';\n" + + "KW3074 : 'KW' '3074';\n" + + "KW3075 : 'KW' '3075';\n" + + "KW3076 : 'KW' '3076';\n" + + "KW3077 : 'KW' '3077';\n" + + "KW3078 : 'KW' '3078';\n" + + "KW3079 : 'KW' '3079';\n" + + "KW3080 : 'KW' '3080';\n" + + "KW3081 : 'KW' '3081';\n" + + "KW3082 : 'KW' '3082';\n" + + "KW3083 : 'KW' '3083';\n" + + "KW3084 : 'KW' '3084';\n" + + "KW3085 : 'KW' '3085';\n" + + "KW3086 : 'KW' '3086';\n" + + "KW3087 : 'KW' '3087';\n" + + "KW3088 : 'KW' '3088';\n" + + "KW3089 : 'KW' '3089';\n" + + "KW3090 : 'KW' '3090';\n" + + "KW3091 : 'KW' '3091';\n" + + "KW3092 : 'KW' '3092';\n" + + "KW3093 : 'KW' '3093';\n" + + "KW3094 : 'KW' '3094';\n" + + "KW3095 : 'KW' '3095';\n" + + "KW3096 : 'KW' '3096';\n" + + "KW3097 : 'KW' '3097';\n" + + "KW3098 : 'KW' '3098';\n" + + "KW3099 : 'KW' '3099';\n" + + "KW3100 : 'KW' '3100';\n" + + "KW3101 : 'KW' '3101';\n" + + "KW3102 : 'KW' '3102';\n" + + "KW3103 : 'KW' '3103';\n" + + "KW3104 : 'KW' '3104';\n" + + "KW3105 : 'KW' '3105';\n" + + "KW3106 : 'KW' '3106';\n" + + "KW3107 : 'KW' '3107';\n" + + "KW3108 : 'KW' '3108';\n" + + "KW3109 : 'KW' '3109';\n" + + "KW3110 : 'KW' '3110';\n" + + "KW3111 : 'KW' '3111';\n" + + "KW3112 : 'KW' '3112';\n" + + "KW3113 : 'KW' '3113';\n" + + "KW3114 : 'KW' '3114';\n" + + "KW3115 : 'KW' '3115';\n" + + "KW3116 : 'KW' '3116';\n" + + "KW3117 : 'KW' '3117';\n" + + "KW3118 : 'KW' '3118';\n" + + "KW3119 : 'KW' '3119';\n" + + "KW3120 : 'KW' '3120';\n" + + "KW3121 : 'KW' '3121';\n" + + "KW3122 : 'KW' '3122';\n" + + "KW3123 : 'KW' '3123';\n" + + "KW3124 : 'KW' '3124';\n" + + "KW3125 : 'KW' '3125';\n" + + "KW3126 : 'KW' '3126';\n" + + "KW3127 : 'KW' '3127';\n" + + "KW3128 : 'KW' '3128';\n" + + "KW3129 : 'KW' '3129';\n" + + "KW3130 : 'KW' '3130';\n" + + "KW3131 : 'KW' '3131';\n" + + "KW3132 : 'KW' '3132';\n" + + "KW3133 : 'KW' '3133';\n" + + "KW3134 : 'KW' '3134';\n" + + "KW3135 : 'KW' '3135';\n" + + "KW3136 : 'KW' '3136';\n" + + "KW3137 : 'KW' '3137';\n" + + "KW3138 : 'KW' '3138';\n" + + "KW3139 : 'KW' '3139';\n" + + "KW3140 : 'KW' '3140';\n" + + "KW3141 : 'KW' '3141';\n" + + "KW3142 : 'KW' '3142';\n" + + "KW3143 : 'KW' '3143';\n" + + "KW3144 : 'KW' '3144';\n" + + "KW3145 : 'KW' '3145';\n" + + "KW3146 : 'KW' '3146';\n" + + "KW3147 : 'KW' '3147';\n" + + "KW3148 : 'KW' '3148';\n" + + "KW3149 : 'KW' '3149';\n" + + "KW3150 : 'KW' '3150';\n" + + "KW3151 : 'KW' '3151';\n" + + "KW3152 : 'KW' '3152';\n" + + "KW3153 : 'KW' '3153';\n" + + "KW3154 : 'KW' '3154';\n" + + "KW3155 : 'KW' '3155';\n" + + "KW3156 : 'KW' '3156';\n" + + "KW3157 : 'KW' '3157';\n" + + "KW3158 : 'KW' '3158';\n" + + "KW3159 : 'KW' '3159';\n" + + "KW3160 : 'KW' '3160';\n" + + "KW3161 : 'KW' '3161';\n" + + "KW3162 : 'KW' '3162';\n" + + "KW3163 : 'KW' '3163';\n" + + "KW3164 : 'KW' '3164';\n" + + "KW3165 : 'KW' '3165';\n" + + "KW3166 : 'KW' '3166';\n" + + "KW3167 : 'KW' '3167';\n" + + "KW3168 : 'KW' '3168';\n" + + "KW3169 : 'KW' '3169';\n" + + "KW3170 : 'KW' '3170';\n" + + "KW3171 : 'KW' '3171';\n" + + "KW3172 : 'KW' '3172';\n" + + "KW3173 : 'KW' '3173';\n" + + "KW3174 : 'KW' '3174';\n" + + "KW3175 : 'KW' '3175';\n" + + "KW3176 : 'KW' '3176';\n" + + "KW3177 : 'KW' '3177';\n" + + "KW3178 : 'KW' '3178';\n" + + "KW3179 : 'KW' '3179';\n" + + "KW3180 : 'KW' '3180';\n" + + "KW3181 : 'KW' '3181';\n" + + "KW3182 : 'KW' '3182';\n" + + "KW3183 : 'KW' '3183';\n" + + "KW3184 : 'KW' '3184';\n" + + "KW3185 : 'KW' '3185';\n" + + "KW3186 : 'KW' '3186';\n" + + "KW3187 : 'KW' '3187';\n" + + "KW3188 : 'KW' '3188';\n" + + "KW3189 : 'KW' '3189';\n" + + "KW3190 : 'KW' '3190';\n" + + "KW3191 : 'KW' '3191';\n" + + "KW3192 : 'KW' '3192';\n" + + "KW3193 : 'KW' '3193';\n" + + "KW3194 : 'KW' '3194';\n" + + "KW3195 : 'KW' '3195';\n" + + "KW3196 : 'KW' '3196';\n" + + "KW3197 : 'KW' '3197';\n" + + "KW3198 : 'KW' '3198';\n" + + "KW3199 : 'KW' '3199';\n" + + "KW3200 : 'KW' '3200';\n" + + "KW3201 : 'KW' '3201';\n" + + "KW3202 : 'KW' '3202';\n" + + "KW3203 : 'KW' '3203';\n" + + "KW3204 : 'KW' '3204';\n" + + "KW3205 : 'KW' '3205';\n" + + "KW3206 : 'KW' '3206';\n" + + "KW3207 : 'KW' '3207';\n" + + "KW3208 : 'KW' '3208';\n" + + "KW3209 : 'KW' '3209';\n" + + "KW3210 : 'KW' '3210';\n" + + "KW3211 : 'KW' '3211';\n" + + "KW3212 : 'KW' '3212';\n" + + "KW3213 : 'KW' '3213';\n" + + "KW3214 : 'KW' '3214';\n" + + "KW3215 : 'KW' '3215';\n" + + "KW3216 : 'KW' '3216';\n" + + "KW3217 : 'KW' '3217';\n" + + "KW3218 : 'KW' '3218';\n" + + "KW3219 : 'KW' '3219';\n" + + "KW3220 : 'KW' '3220';\n" + + "KW3221 : 'KW' '3221';\n" + + "KW3222 : 'KW' '3222';\n" + + "KW3223 : 'KW' '3223';\n" + + "KW3224 : 'KW' '3224';\n" + + "KW3225 : 'KW' '3225';\n" + + "KW3226 : 'KW' '3226';\n" + + "KW3227 : 'KW' '3227';\n" + + "KW3228 : 'KW' '3228';\n" + + "KW3229 : 'KW' '3229';\n" + + "KW3230 : 'KW' '3230';\n" + + "KW3231 : 'KW' '3231';\n" + + "KW3232 : 'KW' '3232';\n" + + "KW3233 : 'KW' '3233';\n" + + "KW3234 : 'KW' '3234';\n" + + "KW3235 : 'KW' '3235';\n" + + "KW3236 : 'KW' '3236';\n" + + "KW3237 : 'KW' '3237';\n" + + "KW3238 : 'KW' '3238';\n" + + "KW3239 : 'KW' '3239';\n" + + "KW3240 : 'KW' '3240';\n" + + "KW3241 : 'KW' '3241';\n" + + "KW3242 : 'KW' '3242';\n" + + "KW3243 : 'KW' '3243';\n" + + "KW3244 : 'KW' '3244';\n" + + "KW3245 : 'KW' '3245';\n" + + "KW3246 : 'KW' '3246';\n" + + "KW3247 : 'KW' '3247';\n" + + "KW3248 : 'KW' '3248';\n" + + "KW3249 : 'KW' '3249';\n" + + "KW3250 : 'KW' '3250';\n" + + "KW3251 : 'KW' '3251';\n" + + "KW3252 : 'KW' '3252';\n" + + "KW3253 : 'KW' '3253';\n" + + "KW3254 : 'KW' '3254';\n" + + "KW3255 : 'KW' '3255';\n" + + "KW3256 : 'KW' '3256';\n" + + "KW3257 : 'KW' '3257';\n" + + "KW3258 : 'KW' '3258';\n" + + "KW3259 : 'KW' '3259';\n" + + "KW3260 : 'KW' '3260';\n" + + "KW3261 : 'KW' '3261';\n" + + "KW3262 : 'KW' '3262';\n" + + "KW3263 : 'KW' '3263';\n" + + "KW3264 : 'KW' '3264';\n" + + "KW3265 : 'KW' '3265';\n" + + "KW3266 : 'KW' '3266';\n" + + "KW3267 : 'KW' '3267';\n" + + "KW3268 : 'KW' '3268';\n" + + "KW3269 : 'KW' '3269';\n" + + "KW3270 : 'KW' '3270';\n" + + "KW3271 : 'KW' '3271';\n" + + "KW3272 : 'KW' '3272';\n" + + "KW3273 : 'KW' '3273';\n" + + "KW3274 : 'KW' '3274';\n" + + "KW3275 : 'KW' '3275';\n" + + "KW3276 : 'KW' '3276';\n" + + "KW3277 : 'KW' '3277';\n" + + "KW3278 : 'KW' '3278';\n" + + "KW3279 : 'KW' '3279';\n" + + "KW3280 : 'KW' '3280';\n" + + "KW3281 : 'KW' '3281';\n" + + "KW3282 : 'KW' '3282';\n" + + "KW3283 : 'KW' '3283';\n" + + "KW3284 : 'KW' '3284';\n" + + "KW3285 : 'KW' '3285';\n" + + "KW3286 : 'KW' '3286';\n" + + "KW3287 : 'KW' '3287';\n" + + "KW3288 : 'KW' '3288';\n" + + "KW3289 : 'KW' '3289';\n" + + "KW3290 : 'KW' '3290';\n" + + "KW3291 : 'KW' '3291';\n" + + "KW3292 : 'KW' '3292';\n" + + "KW3293 : 'KW' '3293';\n" + + "KW3294 : 'KW' '3294';\n" + + "KW3295 : 'KW' '3295';\n" + + "KW3296 : 'KW' '3296';\n" + + "KW3297 : 'KW' '3297';\n" + + "KW3298 : 'KW' '3298';\n" + + "KW3299 : 'KW' '3299';\n" + + "KW3300 : 'KW' '3300';\n" + + "KW3301 : 'KW' '3301';\n" + + "KW3302 : 'KW' '3302';\n" + + "KW3303 : 'KW' '3303';\n" + + "KW3304 : 'KW' '3304';\n" + + "KW3305 : 'KW' '3305';\n" + + "KW3306 : 'KW' '3306';\n" + + "KW3307 : 'KW' '3307';\n" + + "KW3308 : 'KW' '3308';\n" + + "KW3309 : 'KW' '3309';\n" + + "KW3310 : 'KW' '3310';\n" + + "KW3311 : 'KW' '3311';\n" + + "KW3312 : 'KW' '3312';\n" + + "KW3313 : 'KW' '3313';\n" + + "KW3314 : 'KW' '3314';\n" + + "KW3315 : 'KW' '3315';\n" + + "KW3316 : 'KW' '3316';\n" + + "KW3317 : 'KW' '3317';\n" + + "KW3318 : 'KW' '3318';\n" + + "KW3319 : 'KW' '3319';\n" + + "KW3320 : 'KW' '3320';\n" + + "KW3321 : 'KW' '3321';\n" + + "KW3322 : 'KW' '3322';\n" + + "KW3323 : 'KW' '3323';\n" + + "KW3324 : 'KW' '3324';\n" + + "KW3325 : 'KW' '3325';\n" + + "KW3326 : 'KW' '3326';\n" + + "KW3327 : 'KW' '3327';\n" + + "KW3328 : 'KW' '3328';\n" + + "KW3329 : 'KW' '3329';\n" + + "KW3330 : 'KW' '3330';\n" + + "KW3331 : 'KW' '3331';\n" + + "KW3332 : 'KW' '3332';\n" + + "KW3333 : 'KW' '3333';\n" + + "KW3334 : 'KW' '3334';\n" + + "KW3335 : 'KW' '3335';\n" + + "KW3336 : 'KW' '3336';\n" + + "KW3337 : 'KW' '3337';\n" + + "KW3338 : 'KW' '3338';\n" + + "KW3339 : 'KW' '3339';\n" + + "KW3340 : 'KW' '3340';\n" + + "KW3341 : 'KW' '3341';\n" + + "KW3342 : 'KW' '3342';\n" + + "KW3343 : 'KW' '3343';\n" + + "KW3344 : 'KW' '3344';\n" + + "KW3345 : 'KW' '3345';\n" + + "KW3346 : 'KW' '3346';\n" + + "KW3347 : 'KW' '3347';\n" + + "KW3348 : 'KW' '3348';\n" + + "KW3349 : 'KW' '3349';\n" + + "KW3350 : 'KW' '3350';\n" + + "KW3351 : 'KW' '3351';\n" + + "KW3352 : 'KW' '3352';\n" + + "KW3353 : 'KW' '3353';\n" + + "KW3354 : 'KW' '3354';\n" + + "KW3355 : 'KW' '3355';\n" + + "KW3356 : 'KW' '3356';\n" + + "KW3357 : 'KW' '3357';\n" + + "KW3358 : 'KW' '3358';\n" + + "KW3359 : 'KW' '3359';\n" + + "KW3360 : 'KW' '3360';\n" + + "KW3361 : 'KW' '3361';\n" + + "KW3362 : 'KW' '3362';\n" + + "KW3363 : 'KW' '3363';\n" + + "KW3364 : 'KW' '3364';\n" + + "KW3365 : 'KW' '3365';\n" + + "KW3366 : 'KW' '3366';\n" + + "KW3367 : 'KW' '3367';\n" + + "KW3368 : 'KW' '3368';\n" + + "KW3369 : 'KW' '3369';\n" + + "KW3370 : 'KW' '3370';\n" + + "KW3371 : 'KW' '3371';\n" + + "KW3372 : 'KW' '3372';\n" + + "KW3373 : 'KW' '3373';\n" + + "KW3374 : 'KW' '3374';\n" + + "KW3375 : 'KW' '3375';\n" + + "KW3376 : 'KW' '3376';\n" + + "KW3377 : 'KW' '3377';\n" + + "KW3378 : 'KW' '3378';\n" + + "KW3379 : 'KW' '3379';\n" + + "KW3380 : 'KW' '3380';\n" + + "KW3381 : 'KW' '3381';\n" + + "KW3382 : 'KW' '3382';\n" + + "KW3383 : 'KW' '3383';\n" + + "KW3384 : 'KW' '3384';\n" + + "KW3385 : 'KW' '3385';\n" + + "KW3386 : 'KW' '3386';\n" + + "KW3387 : 'KW' '3387';\n" + + "KW3388 : 'KW' '3388';\n" + + "KW3389 : 'KW' '3389';\n" + + "KW3390 : 'KW' '3390';\n" + + "KW3391 : 'KW' '3391';\n" + + "KW3392 : 'KW' '3392';\n" + + "KW3393 : 'KW' '3393';\n" + + "KW3394 : 'KW' '3394';\n" + + "KW3395 : 'KW' '3395';\n" + + "KW3396 : 'KW' '3396';\n" + + "KW3397 : 'KW' '3397';\n" + + "KW3398 : 'KW' '3398';\n" + + "KW3399 : 'KW' '3399';\n" + + "KW3400 : 'KW' '3400';\n" + + "KW3401 : 'KW' '3401';\n" + + "KW3402 : 'KW' '3402';\n" + + "KW3403 : 'KW' '3403';\n" + + "KW3404 : 'KW' '3404';\n" + + "KW3405 : 'KW' '3405';\n" + + "KW3406 : 'KW' '3406';\n" + + "KW3407 : 'KW' '3407';\n" + + "KW3408 : 'KW' '3408';\n" + + "KW3409 : 'KW' '3409';\n" + + "KW3410 : 'KW' '3410';\n" + + "KW3411 : 'KW' '3411';\n" + + "KW3412 : 'KW' '3412';\n" + + "KW3413 : 'KW' '3413';\n" + + "KW3414 : 'KW' '3414';\n" + + "KW3415 : 'KW' '3415';\n" + + "KW3416 : 'KW' '3416';\n" + + "KW3417 : 'KW' '3417';\n" + + "KW3418 : 'KW' '3418';\n" + + "KW3419 : 'KW' '3419';\n" + + "KW3420 : 'KW' '3420';\n" + + "KW3421 : 'KW' '3421';\n" + + "KW3422 : 'KW' '3422';\n" + + "KW3423 : 'KW' '3423';\n" + + "KW3424 : 'KW' '3424';\n" + + "KW3425 : 'KW' '3425';\n" + + "KW3426 : 'KW' '3426';\n" + + "KW3427 : 'KW' '3427';\n" + + "KW3428 : 'KW' '3428';\n" + + "KW3429 : 'KW' '3429';\n" + + "KW3430 : 'KW' '3430';\n" + + "KW3431 : 'KW' '3431';\n" + + "KW3432 : 'KW' '3432';\n" + + "KW3433 : 'KW' '3433';\n" + + "KW3434 : 'KW' '3434';\n" + + "KW3435 : 'KW' '3435';\n" + + "KW3436 : 'KW' '3436';\n" + + "KW3437 : 'KW' '3437';\n" + + "KW3438 : 'KW' '3438';\n" + + "KW3439 : 'KW' '3439';\n" + + "KW3440 : 'KW' '3440';\n" + + "KW3441 : 'KW' '3441';\n" + + "KW3442 : 'KW' '3442';\n" + + "KW3443 : 'KW' '3443';\n" + + "KW3444 : 'KW' '3444';\n" + + "KW3445 : 'KW' '3445';\n" + + "KW3446 : 'KW' '3446';\n" + + "KW3447 : 'KW' '3447';\n" + + "KW3448 : 'KW' '3448';\n" + + "KW3449 : 'KW' '3449';\n" + + "KW3450 : 'KW' '3450';\n" + + "KW3451 : 'KW' '3451';\n" + + "KW3452 : 'KW' '3452';\n" + + "KW3453 : 'KW' '3453';\n" + + "KW3454 : 'KW' '3454';\n" + + "KW3455 : 'KW' '3455';\n" + + "KW3456 : 'KW' '3456';\n" + + "KW3457 : 'KW' '3457';\n" + + "KW3458 : 'KW' '3458';\n" + + "KW3459 : 'KW' '3459';\n" + + "KW3460 : 'KW' '3460';\n" + + "KW3461 : 'KW' '3461';\n" + + "KW3462 : 'KW' '3462';\n" + + "KW3463 : 'KW' '3463';\n" + + "KW3464 : 'KW' '3464';\n" + + "KW3465 : 'KW' '3465';\n" + + "KW3466 : 'KW' '3466';\n" + + "KW3467 : 'KW' '3467';\n" + + "KW3468 : 'KW' '3468';\n" + + "KW3469 : 'KW' '3469';\n" + + "KW3470 : 'KW' '3470';\n" + + "KW3471 : 'KW' '3471';\n" + + "KW3472 : 'KW' '3472';\n" + + "KW3473 : 'KW' '3473';\n" + + "KW3474 : 'KW' '3474';\n" + + "KW3475 : 'KW' '3475';\n" + + "KW3476 : 'KW' '3476';\n" + + "KW3477 : 'KW' '3477';\n" + + "KW3478 : 'KW' '3478';\n" + + "KW3479 : 'KW' '3479';\n" + + "KW3480 : 'KW' '3480';\n" + + "KW3481 : 'KW' '3481';\n" + + "KW3482 : 'KW' '3482';\n" + + "KW3483 : 'KW' '3483';\n" + + "KW3484 : 'KW' '3484';\n" + + "KW3485 : 'KW' '3485';\n" + + "KW3486 : 'KW' '3486';\n" + + "KW3487 : 'KW' '3487';\n" + + "KW3488 : 'KW' '3488';\n" + + "KW3489 : 'KW' '3489';\n" + + "KW3490 : 'KW' '3490';\n" + + "KW3491 : 'KW' '3491';\n" + + "KW3492 : 'KW' '3492';\n" + + "KW3493 : 'KW' '3493';\n" + + "KW3494 : 'KW' '3494';\n" + + "KW3495 : 'KW' '3495';\n" + + "KW3496 : 'KW' '3496';\n" + + "KW3497 : 'KW' '3497';\n" + + "KW3498 : 'KW' '3498';\n" + + "KW3499 : 'KW' '3499';\n" + + "KW3500 : 'KW' '3500';\n" + + "KW3501 : 'KW' '3501';\n" + + "KW3502 : 'KW' '3502';\n" + + "KW3503 : 'KW' '3503';\n" + + "KW3504 : 'KW' '3504';\n" + + "KW3505 : 'KW' '3505';\n" + + "KW3506 : 'KW' '3506';\n" + + "KW3507 : 'KW' '3507';\n" + + "KW3508 : 'KW' '3508';\n" + + "KW3509 : 'KW' '3509';\n" + + "KW3510 : 'KW' '3510';\n" + + "KW3511 : 'KW' '3511';\n" + + "KW3512 : 'KW' '3512';\n" + + "KW3513 : 'KW' '3513';\n" + + "KW3514 : 'KW' '3514';\n" + + "KW3515 : 'KW' '3515';\n" + + "KW3516 : 'KW' '3516';\n" + + "KW3517 : 'KW' '3517';\n" + + "KW3518 : 'KW' '3518';\n" + + "KW3519 : 'KW' '3519';\n" + + "KW3520 : 'KW' '3520';\n" + + "KW3521 : 'KW' '3521';\n" + + "KW3522 : 'KW' '3522';\n" + + "KW3523 : 'KW' '3523';\n" + + "KW3524 : 'KW' '3524';\n" + + "KW3525 : 'KW' '3525';\n" + + "KW3526 : 'KW' '3526';\n" + + "KW3527 : 'KW' '3527';\n" + + "KW3528 : 'KW' '3528';\n" + + "KW3529 : 'KW' '3529';\n" + + "KW3530 : 'KW' '3530';\n" + + "KW3531 : 'KW' '3531';\n" + + "KW3532 : 'KW' '3532';\n" + + "KW3533 : 'KW' '3533';\n" + + "KW3534 : 'KW' '3534';\n" + + "KW3535 : 'KW' '3535';\n" + + "KW3536 : 'KW' '3536';\n" + + "KW3537 : 'KW' '3537';\n" + + "KW3538 : 'KW' '3538';\n" + + "KW3539 : 'KW' '3539';\n" + + "KW3540 : 'KW' '3540';\n" + + "KW3541 : 'KW' '3541';\n" + + "KW3542 : 'KW' '3542';\n" + + "KW3543 : 'KW' '3543';\n" + + "KW3544 : 'KW' '3544';\n" + + "KW3545 : 'KW' '3545';\n" + + "KW3546 : 'KW' '3546';\n" + + "KW3547 : 'KW' '3547';\n" + + "KW3548 : 'KW' '3548';\n" + + "KW3549 : 'KW' '3549';\n" + + "KW3550 : 'KW' '3550';\n" + + "KW3551 : 'KW' '3551';\n" + + "KW3552 : 'KW' '3552';\n" + + "KW3553 : 'KW' '3553';\n" + + "KW3554 : 'KW' '3554';\n" + + "KW3555 : 'KW' '3555';\n" + + "KW3556 : 'KW' '3556';\n" + + "KW3557 : 'KW' '3557';\n" + + "KW3558 : 'KW' '3558';\n" + + "KW3559 : 'KW' '3559';\n" + + "KW3560 : 'KW' '3560';\n" + + "KW3561 : 'KW' '3561';\n" + + "KW3562 : 'KW' '3562';\n" + + "KW3563 : 'KW' '3563';\n" + + "KW3564 : 'KW' '3564';\n" + + "KW3565 : 'KW' '3565';\n" + + "KW3566 : 'KW' '3566';\n" + + "KW3567 : 'KW' '3567';\n" + + "KW3568 : 'KW' '3568';\n" + + "KW3569 : 'KW' '3569';\n" + + "KW3570 : 'KW' '3570';\n" + + "KW3571 : 'KW' '3571';\n" + + "KW3572 : 'KW' '3572';\n" + + "KW3573 : 'KW' '3573';\n" + + "KW3574 : 'KW' '3574';\n" + + "KW3575 : 'KW' '3575';\n" + + "KW3576 : 'KW' '3576';\n" + + "KW3577 : 'KW' '3577';\n" + + "KW3578 : 'KW' '3578';\n" + + "KW3579 : 'KW' '3579';\n" + + "KW3580 : 'KW' '3580';\n" + + "KW3581 : 'KW' '3581';\n" + + "KW3582 : 'KW' '3582';\n" + + "KW3583 : 'KW' '3583';\n" + + "KW3584 : 'KW' '3584';\n" + + "KW3585 : 'KW' '3585';\n" + + "KW3586 : 'KW' '3586';\n" + + "KW3587 : 'KW' '3587';\n" + + "KW3588 : 'KW' '3588';\n" + + "KW3589 : 'KW' '3589';\n" + + "KW3590 : 'KW' '3590';\n" + + "KW3591 : 'KW' '3591';\n" + + "KW3592 : 'KW' '3592';\n" + + "KW3593 : 'KW' '3593';\n" + + "KW3594 : 'KW' '3594';\n" + + "KW3595 : 'KW' '3595';\n" + + "KW3596 : 'KW' '3596';\n" + + "KW3597 : 'KW' '3597';\n" + + "KW3598 : 'KW' '3598';\n" + + "KW3599 : 'KW' '3599';\n" + + "KW3600 : 'KW' '3600';\n" + + "KW3601 : 'KW' '3601';\n" + + "KW3602 : 'KW' '3602';\n" + + "KW3603 : 'KW' '3603';\n" + + "KW3604 : 'KW' '3604';\n" + + "KW3605 : 'KW' '3605';\n" + + "KW3606 : 'KW' '3606';\n" + + "KW3607 : 'KW' '3607';\n" + + "KW3608 : 'KW' '3608';\n" + + "KW3609 : 'KW' '3609';\n" + + "KW3610 : 'KW' '3610';\n" + + "KW3611 : 'KW' '3611';\n" + + "KW3612 : 'KW' '3612';\n" + + "KW3613 : 'KW' '3613';\n" + + "KW3614 : 'KW' '3614';\n" + + "KW3615 : 'KW' '3615';\n" + + "KW3616 : 'KW' '3616';\n" + + "KW3617 : 'KW' '3617';\n" + + "KW3618 : 'KW' '3618';\n" + + "KW3619 : 'KW' '3619';\n" + + "KW3620 : 'KW' '3620';\n" + + "KW3621 : 'KW' '3621';\n" + + "KW3622 : 'KW' '3622';\n" + + "KW3623 : 'KW' '3623';\n" + + "KW3624 : 'KW' '3624';\n" + + "KW3625 : 'KW' '3625';\n" + + "KW3626 : 'KW' '3626';\n" + + "KW3627 : 'KW' '3627';\n" + + "KW3628 : 'KW' '3628';\n" + + "KW3629 : 'KW' '3629';\n" + + "KW3630 : 'KW' '3630';\n" + + "KW3631 : 'KW' '3631';\n" + + "KW3632 : 'KW' '3632';\n" + + "KW3633 : 'KW' '3633';\n" + + "KW3634 : 'KW' '3634';\n" + + "KW3635 : 'KW' '3635';\n" + + "KW3636 : 'KW' '3636';\n" + + "KW3637 : 'KW' '3637';\n" + + "KW3638 : 'KW' '3638';\n" + + "KW3639 : 'KW' '3639';\n" + + "KW3640 : 'KW' '3640';\n" + + "KW3641 : 'KW' '3641';\n" + + "KW3642 : 'KW' '3642';\n" + + "KW3643 : 'KW' '3643';\n" + + "KW3644 : 'KW' '3644';\n" + + "KW3645 : 'KW' '3645';\n" + + "KW3646 : 'KW' '3646';\n" + + "KW3647 : 'KW' '3647';\n" + + "KW3648 : 'KW' '3648';\n" + + "KW3649 : 'KW' '3649';\n" + + "KW3650 : 'KW' '3650';\n" + + "KW3651 : 'KW' '3651';\n" + + "KW3652 : 'KW' '3652';\n" + + "KW3653 : 'KW' '3653';\n" + + "KW3654 : 'KW' '3654';\n" + + "KW3655 : 'KW' '3655';\n" + + "KW3656 : 'KW' '3656';\n" + + "KW3657 : 'KW' '3657';\n" + + "KW3658 : 'KW' '3658';\n" + + "KW3659 : 'KW' '3659';\n" + + "KW3660 : 'KW' '3660';\n" + + "KW3661 : 'KW' '3661';\n" + + "KW3662 : 'KW' '3662';\n" + + "KW3663 : 'KW' '3663';\n" + + "KW3664 : 'KW' '3664';\n" + + "KW3665 : 'KW' '3665';\n" + + "KW3666 : 'KW' '3666';\n" + + "KW3667 : 'KW' '3667';\n" + + "KW3668 : 'KW' '3668';\n" + + "KW3669 : 'KW' '3669';\n" + + "KW3670 : 'KW' '3670';\n" + + "KW3671 : 'KW' '3671';\n" + + "KW3672 : 'KW' '3672';\n" + + "KW3673 : 'KW' '3673';\n" + + "KW3674 : 'KW' '3674';\n" + + "KW3675 : 'KW' '3675';\n" + + "KW3676 : 'KW' '3676';\n" + + "KW3677 : 'KW' '3677';\n" + + "KW3678 : 'KW' '3678';\n" + + "KW3679 : 'KW' '3679';\n" + + "KW3680 : 'KW' '3680';\n" + + "KW3681 : 'KW' '3681';\n" + + "KW3682 : 'KW' '3682';\n" + + "KW3683 : 'KW' '3683';\n" + + "KW3684 : 'KW' '3684';\n" + + "KW3685 : 'KW' '3685';\n" + + "KW3686 : 'KW' '3686';\n" + + "KW3687 : 'KW' '3687';\n" + + "KW3688 : 'KW' '3688';\n" + + "KW3689 : 'KW' '3689';\n" + + "KW3690 : 'KW' '3690';\n" + + "KW3691 : 'KW' '3691';\n" + + "KW3692 : 'KW' '3692';\n" + + "KW3693 : 'KW' '3693';\n" + + "KW3694 : 'KW' '3694';\n" + + "KW3695 : 'KW' '3695';\n" + + "KW3696 : 'KW' '3696';\n" + + "KW3697 : 'KW' '3697';\n" + + "KW3698 : 'KW' '3698';\n" + + "KW3699 : 'KW' '3699';\n" + + "KW3700 : 'KW' '3700';\n" + + "KW3701 : 'KW' '3701';\n" + + "KW3702 : 'KW' '3702';\n" + + "KW3703 : 'KW' '3703';\n" + + "KW3704 : 'KW' '3704';\n" + + "KW3705 : 'KW' '3705';\n" + + "KW3706 : 'KW' '3706';\n" + + "KW3707 : 'KW' '3707';\n" + + "KW3708 : 'KW' '3708';\n" + + "KW3709 : 'KW' '3709';\n" + + "KW3710 : 'KW' '3710';\n" + + "KW3711 : 'KW' '3711';\n" + + "KW3712 : 'KW' '3712';\n" + + "KW3713 : 'KW' '3713';\n" + + "KW3714 : 'KW' '3714';\n" + + "KW3715 : 'KW' '3715';\n" + + "KW3716 : 'KW' '3716';\n" + + "KW3717 : 'KW' '3717';\n" + + "KW3718 : 'KW' '3718';\n" + + "KW3719 : 'KW' '3719';\n" + + "KW3720 : 'KW' '3720';\n" + + "KW3721 : 'KW' '3721';\n" + + "KW3722 : 'KW' '3722';\n" + + "KW3723 : 'KW' '3723';\n" + + "KW3724 : 'KW' '3724';\n" + + "KW3725 : 'KW' '3725';\n" + + "KW3726 : 'KW' '3726';\n" + + "KW3727 : 'KW' '3727';\n" + + "KW3728 : 'KW' '3728';\n" + + "KW3729 : 'KW' '3729';\n" + + "KW3730 : 'KW' '3730';\n" + + "KW3731 : 'KW' '3731';\n" + + "KW3732 : 'KW' '3732';\n" + + "KW3733 : 'KW' '3733';\n" + + "KW3734 : 'KW' '3734';\n" + + "KW3735 : 'KW' '3735';\n" + + "KW3736 : 'KW' '3736';\n" + + "KW3737 : 'KW' '3737';\n" + + "KW3738 : 'KW' '3738';\n" + + "KW3739 : 'KW' '3739';\n" + + "KW3740 : 'KW' '3740';\n" + + "KW3741 : 'KW' '3741';\n" + + "KW3742 : 'KW' '3742';\n" + + "KW3743 : 'KW' '3743';\n" + + "KW3744 : 'KW' '3744';\n" + + "KW3745 : 'KW' '3745';\n" + + "KW3746 : 'KW' '3746';\n" + + "KW3747 : 'KW' '3747';\n" + + "KW3748 : 'KW' '3748';\n" + + "KW3749 : 'KW' '3749';\n" + + "KW3750 : 'KW' '3750';\n" + + "KW3751 : 'KW' '3751';\n" + + "KW3752 : 'KW' '3752';\n" + + "KW3753 : 'KW' '3753';\n" + + "KW3754 : 'KW' '3754';\n" + + "KW3755 : 'KW' '3755';\n" + + "KW3756 : 'KW' '3756';\n" + + "KW3757 : 'KW' '3757';\n" + + "KW3758 : 'KW' '3758';\n" + + "KW3759 : 'KW' '3759';\n" + + "KW3760 : 'KW' '3760';\n" + + "KW3761 : 'KW' '3761';\n" + + "KW3762 : 'KW' '3762';\n" + + "KW3763 : 'KW' '3763';\n" + + "KW3764 : 'KW' '3764';\n" + + "KW3765 : 'KW' '3765';\n" + + "KW3766 : 'KW' '3766';\n" + + "KW3767 : 'KW' '3767';\n" + + "KW3768 : 'KW' '3768';\n" + + "KW3769 : 'KW' '3769';\n" + + "KW3770 : 'KW' '3770';\n" + + "KW3771 : 'KW' '3771';\n" + + "KW3772 : 'KW' '3772';\n" + + "KW3773 : 'KW' '3773';\n" + + "KW3774 : 'KW' '3774';\n" + + "KW3775 : 'KW' '3775';\n" + + "KW3776 : 'KW' '3776';\n" + + "KW3777 : 'KW' '3777';\n" + + "KW3778 : 'KW' '3778';\n" + + "KW3779 : 'KW' '3779';\n" + + "KW3780 : 'KW' '3780';\n" + + "KW3781 : 'KW' '3781';\n" + + "KW3782 : 'KW' '3782';\n" + + "KW3783 : 'KW' '3783';\n" + + "KW3784 : 'KW' '3784';\n" + + "KW3785 : 'KW' '3785';\n" + + "KW3786 : 'KW' '3786';\n" + + "KW3787 : 'KW' '3787';\n" + + "KW3788 : 'KW' '3788';\n" + + "KW3789 : 'KW' '3789';\n" + + "KW3790 : 'KW' '3790';\n" + + "KW3791 : 'KW' '3791';\n" + + "KW3792 : 'KW' '3792';\n" + + "KW3793 : 'KW' '3793';\n" + + "KW3794 : 'KW' '3794';\n" + + "KW3795 : 'KW' '3795';\n" + + "KW3796 : 'KW' '3796';\n" + + "KW3797 : 'KW' '3797';\n" + + "KW3798 : 'KW' '3798';\n" + + "KW3799 : 'KW' '3799';\n" + + "KW3800 : 'KW' '3800';\n" + + "KW3801 : 'KW' '3801';\n" + + "KW3802 : 'KW' '3802';\n" + + "KW3803 : 'KW' '3803';\n" + + "KW3804 : 'KW' '3804';\n" + + "KW3805 : 'KW' '3805';\n" + + "KW3806 : 'KW' '3806';\n" + + "KW3807 : 'KW' '3807';\n" + + "KW3808 : 'KW' '3808';\n" + + "KW3809 : 'KW' '3809';\n" + + "KW3810 : 'KW' '3810';\n" + + "KW3811 : 'KW' '3811';\n" + + "KW3812 : 'KW' '3812';\n" + + "KW3813 : 'KW' '3813';\n" + + "KW3814 : 'KW' '3814';\n" + + "KW3815 : 'KW' '3815';\n" + + "KW3816 : 'KW' '3816';\n" + + "KW3817 : 'KW' '3817';\n" + + "KW3818 : 'KW' '3818';\n" + + "KW3819 : 'KW' '3819';\n" + + "KW3820 : 'KW' '3820';\n" + + "KW3821 : 'KW' '3821';\n" + + "KW3822 : 'KW' '3822';\n" + + "KW3823 : 'KW' '3823';\n" + + "KW3824 : 'KW' '3824';\n" + + "KW3825 : 'KW' '3825';\n" + + "KW3826 : 'KW' '3826';\n" + + "KW3827 : 'KW' '3827';\n" + + "KW3828 : 'KW' '3828';\n" + + "KW3829 : 'KW' '3829';\n" + + "KW3830 : 'KW' '3830';\n" + + "KW3831 : 'KW' '3831';\n" + + "KW3832 : 'KW' '3832';\n" + + "KW3833 : 'KW' '3833';\n" + + "KW3834 : 'KW' '3834';\n" + + "KW3835 : 'KW' '3835';\n" + + "KW3836 : 'KW' '3836';\n" + + "KW3837 : 'KW' '3837';\n" + + "KW3838 : 'KW' '3838';\n" + + "KW3839 : 'KW' '3839';\n" + + "KW3840 : 'KW' '3840';\n" + + "KW3841 : 'KW' '3841';\n" + + "KW3842 : 'KW' '3842';\n" + + "KW3843 : 'KW' '3843';\n" + + "KW3844 : 'KW' '3844';\n" + + "KW3845 : 'KW' '3845';\n" + + "KW3846 : 'KW' '3846';\n" + + "KW3847 : 'KW' '3847';\n" + + "KW3848 : 'KW' '3848';\n" + + "KW3849 : 'KW' '3849';\n" + + "KW3850 : 'KW' '3850';\n" + + "KW3851 : 'KW' '3851';\n" + + "KW3852 : 'KW' '3852';\n" + + "KW3853 : 'KW' '3853';\n" + + "KW3854 : 'KW' '3854';\n" + + "KW3855 : 'KW' '3855';\n" + + "KW3856 : 'KW' '3856';\n" + + "KW3857 : 'KW' '3857';\n" + + "KW3858 : 'KW' '3858';\n" + + "KW3859 : 'KW' '3859';\n" + + "KW3860 : 'KW' '3860';\n" + + "KW3861 : 'KW' '3861';\n" + + "KW3862 : 'KW' '3862';\n" + + "KW3863 : 'KW' '3863';\n" + + "KW3864 : 'KW' '3864';\n" + + "KW3865 : 'KW' '3865';\n" + + "KW3866 : 'KW' '3866';\n" + + "KW3867 : 'KW' '3867';\n" + + "KW3868 : 'KW' '3868';\n" + + "KW3869 : 'KW' '3869';\n" + + "KW3870 : 'KW' '3870';\n" + + "KW3871 : 'KW' '3871';\n" + + "KW3872 : 'KW' '3872';\n" + + "KW3873 : 'KW' '3873';\n" + + "KW3874 : 'KW' '3874';\n" + + "KW3875 : 'KW' '3875';\n" + + "KW3876 : 'KW' '3876';\n" + + "KW3877 : 'KW' '3877';\n" + + "KW3878 : 'KW' '3878';\n" + + "KW3879 : 'KW' '3879';\n" + + "KW3880 : 'KW' '3880';\n" + + "KW3881 : 'KW' '3881';\n" + + "KW3882 : 'KW' '3882';\n" + + "KW3883 : 'KW' '3883';\n" + + "KW3884 : 'KW' '3884';\n" + + "KW3885 : 'KW' '3885';\n" + + "KW3886 : 'KW' '3886';\n" + + "KW3887 : 'KW' '3887';\n" + + "KW3888 : 'KW' '3888';\n" + + "KW3889 : 'KW' '3889';\n" + + "KW3890 : 'KW' '3890';\n" + + "KW3891 : 'KW' '3891';\n" + + "KW3892 : 'KW' '3892';\n" + + "KW3893 : 'KW' '3893';\n" + + "KW3894 : 'KW' '3894';\n" + + "KW3895 : 'KW' '3895';\n" + + "KW3896 : 'KW' '3896';\n" + + "KW3897 : 'KW' '3897';\n" + + "KW3898 : 'KW' '3898';\n" + + "KW3899 : 'KW' '3899';\n" + + "KW3900 : 'KW' '3900';\n" + + "KW3901 : 'KW' '3901';\n" + + "KW3902 : 'KW' '3902';\n" + + "KW3903 : 'KW' '3903';\n" + + "KW3904 : 'KW' '3904';\n" + + "KW3905 : 'KW' '3905';\n" + + "KW3906 : 'KW' '3906';\n" + + "KW3907 : 'KW' '3907';\n" + + "KW3908 : 'KW' '3908';\n" + + "KW3909 : 'KW' '3909';\n" + + "KW3910 : 'KW' '3910';\n" + + "KW3911 : 'KW' '3911';\n" + + "KW3912 : 'KW' '3912';\n" + + "KW3913 : 'KW' '3913';\n" + + "KW3914 : 'KW' '3914';\n" + + "KW3915 : 'KW' '3915';\n" + + "KW3916 : 'KW' '3916';\n" + + "KW3917 : 'KW' '3917';\n" + + "KW3918 : 'KW' '3918';\n" + + "KW3919 : 'KW' '3919';\n" + + "KW3920 : 'KW' '3920';\n" + + "KW3921 : 'KW' '3921';\n" + + "KW3922 : 'KW' '3922';\n" + + "KW3923 : 'KW' '3923';\n" + + "KW3924 : 'KW' '3924';\n" + + "KW3925 : 'KW' '3925';\n" + + "KW3926 : 'KW' '3926';\n" + + "KW3927 : 'KW' '3927';\n" + + "KW3928 : 'KW' '3928';\n" + + "KW3929 : 'KW' '3929';\n" + + "KW3930 : 'KW' '3930';\n" + + "KW3931 : 'KW' '3931';\n" + + "KW3932 : 'KW' '3932';\n" + + "KW3933 : 'KW' '3933';\n" + + "KW3934 : 'KW' '3934';\n" + + "KW3935 : 'KW' '3935';\n" + + "KW3936 : 'KW' '3936';\n" + + "KW3937 : 'KW' '3937';\n" + + "KW3938 : 'KW' '3938';\n" + + "KW3939 : 'KW' '3939';\n" + + "KW3940 : 'KW' '3940';\n" + + "KW3941 : 'KW' '3941';\n" + + "KW3942 : 'KW' '3942';\n" + + "KW3943 : 'KW' '3943';\n" + + "KW3944 : 'KW' '3944';\n" + + "KW3945 : 'KW' '3945';\n" + + "KW3946 : 'KW' '3946';\n" + + "KW3947 : 'KW' '3947';\n" + + "KW3948 : 'KW' '3948';\n" + + "KW3949 : 'KW' '3949';\n" + + "KW3950 : 'KW' '3950';\n" + + "KW3951 : 'KW' '3951';\n" + + "KW3952 : 'KW' '3952';\n" + + "KW3953 : 'KW' '3953';\n" + + "KW3954 : 'KW' '3954';\n" + + "KW3955 : 'KW' '3955';\n" + + "KW3956 : 'KW' '3956';\n" + + "KW3957 : 'KW' '3957';\n" + + "KW3958 : 'KW' '3958';\n" + + "KW3959 : 'KW' '3959';\n" + + "KW3960 : 'KW' '3960';\n" + + "KW3961 : 'KW' '3961';\n" + + "KW3962 : 'KW' '3962';\n" + + "KW3963 : 'KW' '3963';\n" + + "KW3964 : 'KW' '3964';\n" + + "KW3965 : 'KW' '3965';\n" + + "KW3966 : 'KW' '3966';\n" + + "KW3967 : 'KW' '3967';\n" + + "KW3968 : 'KW' '3968';\n" + + "KW3969 : 'KW' '3969';\n" + + "KW3970 : 'KW' '3970';\n" + + "KW3971 : 'KW' '3971';\n" + + "KW3972 : 'KW' '3972';\n" + + "KW3973 : 'KW' '3973';\n" + + "KW3974 : 'KW' '3974';\n" + + "KW3975 : 'KW' '3975';\n" + + "KW3976 : 'KW' '3976';\n" + + "KW3977 : 'KW' '3977';\n" + + "KW3978 : 'KW' '3978';\n" + + "KW3979 : 'KW' '3979';\n" + + "KW3980 : 'KW' '3980';\n" + + "KW3981 : 'KW' '3981';\n" + + "KW3982 : 'KW' '3982';\n" + + "KW3983 : 'KW' '3983';\n" + + "KW3984 : 'KW' '3984';\n" + + "KW3985 : 'KW' '3985';\n" + + "KW3986 : 'KW' '3986';\n" + + "KW3987 : 'KW' '3987';\n" + + "KW3988 : 'KW' '3988';\n" + + "KW3989 : 'KW' '3989';\n" + + "KW3990 : 'KW' '3990';\n" + + "KW3991 : 'KW' '3991';\n" + + "KW3992 : 'KW' '3992';\n" + + "KW3993 : 'KW' '3993';\n" + + "KW3994 : 'KW' '3994';\n" + + "KW3995 : 'KW' '3995';\n" + + "KW3996 : 'KW' '3996';\n" + + "KW3997 : 'KW' '3997';\n" + + "KW3998 : 'KW' '3998';\n" + + "KW3999 : 'KW' '3999';"; + String found = execLexer("L.g4", grammar, "L", "KW400"); + assertEquals("[@0,0:4='KW400',<402>,1:0]\n" + + "[@1,5:4='',<-1>,1:5]\n", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestListeners.java b/tool/test/org/antlr/v4/test/rt/java/TestListeners.java new file mode 100644 index 000000000..955eca626 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestListeners.java @@ -0,0 +1,257 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestListeners extends BaseTest { + + @Test + public void testBasic() throws Exception { + String grammar = "grammar T;\n" + + "@parser::header {\n" + + "var TListener = require('./TListener').TListener;\n" + + "}\n" + + "\n" + + "@parser::members {\n" + + "this.LeafListener = function() {\n" + + " this.visitTerminal = function(node) {\n" + + " document.getElementById('output').value += node.symbol.text + '\\n';\n" + + " };\n" + + " return this;\n" + + "};\n" + + "this.LeafListener.prototype = Object.create(TListener.prototype);\n" + + "this.LeafListener.prototype.constructor = this.LeafListener;\n" + + "\n" + + "}\n" + + "\n" + + "s\n" + + "@after {\n" + + "System.out.println($ctx.r.toStringTree(null, this););\n" + + "var walker = new antlr4.tree.ParseTreeWalker();\n" + + "walker.walk(new this.LeafListener(), $ctx.r);\n" + + "\n" + + "}\n" + + " : r=a ;\n" + + "a : INT INT\n" + + " | ID\n" + + " ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); + assertEquals("(a 1 2)\n1\n2\n", found); + assertNull(this.stderrDuringParse); + } + + String testTokenGetters(String input) throws Exception { + String grammar = "grammar T;\n" + + "@parser::header {\n" + + "var TListener = require('./TListener').TListener;\n" + + "}\n" + + "\n" + + "@parser::members {\n" + + "this.LeafListener = function() {\n" + + " this.exitA = function(ctx) {\n" + + " var str;\n" + + " if(ctx.getChildCount()===2) {\n" + + " str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT());\n" + + " } else {\n" + + " str = ctx.ID().symbol.toString();\n" + + " }\n" + + " document.getElementById('output').value += str + '\\n';\n" + + " };\n" + + " return this;\n" + + "};\n" + + "this.LeafListener.prototype = Object.create(TListener.prototype);\\n\" +\n" + + "this.LeafListener.prototype.constructor = this.LeafListener;\\n\" +\n" + + "\n" + + "}\n" + + "\n" + + "s\n" + + "@after {\n" + + "System.out.println($ctx.r.toStringTree(null, this););\n" + + "var walker = new antlr4.tree.ParseTreeWalker();\n" + + "walker.walk(new this.LeafListener(), $ctx.r);\n" + + "\n" + + "}\n" + + " : r=a ;\n" + + "a : INT INT\n" + + " | ID\n" + + " ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testTokenGetters_1() throws Exception { + String found = testTokenGetters("1 2"); + assertEquals("(a 1 2)\n1 2 [1, 2]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTokenGetters_2() throws Exception { + String found = testTokenGetters("abc"); + assertEquals("(a abc)\n[@0,0:2='abc',<4>,1:0]\n", found); + assertNull(this.stderrDuringParse); + } + + String testRuleGetters(String input) throws Exception { + String grammar = "grammar T;\n" + + "@parser::header {\n" + + "var TListener = require('./TListener').TListener;\n" + + "}\n" + + "\n" + + "@parser::members {\n" + + "this.LeafListener = function() {\n" + + " this.exitA = function(ctx) {\n" + + " var str;\n" + + " if(ctx.getChildCount()===2) {\n" + + " str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text;\n" + + " } else {\n" + + " str = ctx.b(0).start.text;\n" + + " }\n" + + " document.getElementById('output').value += str + '\\n';\n" + + " };\n" + + " return this;\n" + + "};\n" + + "this.LeafListener.prototype = Object.create(TListener.prototype);\\n\" +\n" + + "this.LeafListener.prototype.constructor = this.LeafListener;\\n\" +\n" + + "\n" + + "}\n" + + "\n" + + "s\n" + + "@after {\n" + + "System.out.println($ctx.r.toStringTree(null, this););\n" + + "var walker = new antlr4.tree.ParseTreeWalker();\n" + + "walker.walk(new this.LeafListener(), $ctx.r);\n" + + "\n" + + "}\n" + + " : r=a ;\n" + + "a : b b // forces list\n" + + " | b // a list still\n" + + " ;\n" + + "b : ID | INT;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testRuleGetters_1() throws Exception { + String found = testRuleGetters("1 2"); + assertEquals("(a (b 1) (b 2))\n1 2 1\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testRuleGetters_2() throws Exception { + String found = testRuleGetters("abc"); + assertEquals("(a (b abc))\nabc\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLR() throws Exception { + String grammar = "grammar T;\n" + + "@parser::header {\n" + + "var TListener = require('./TListener').TListener;\n" + + "}\n" + + "\n" + + "@parser::members {\n" + + "this.LeafListener = function() {\n" + + " this.exitE = function(ctx) {\n" + + " var str;\n" + + " if(ctx.getChildCount()===3) {\n" + + " str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text;\n" + + " } else {\n" + + " str = ctx.INT().symbol.text;\n" + + " }\n" + + " document.getElementById('output').value += str + '\\n';\n" + + " };\n" + + " return this;\n" + + "};\n" + + "this.LeafListener.prototype = Object.create(TListener.prototype);\n" + + "this.LeafListener.prototype.constructor = this.LeafListener;\n" + + "\n" + + "}\n" + + "\n" + + "s\n" + + "@after {\n" + + "System.out.println($ctx.r.toStringTree(null, this););\n" + + "var walker = new antlr4.tree.ParseTreeWalker();\n" + + "walker.walk(new this.LeafListener(), $ctx.r);\n" + + "\n" + + "}\n" + + " : r=e ;\n" + + "e : e op='*' e\n" + + " | e op='+' e\n" + + " | INT\n" + + " ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1+2*3", false); + assertEquals("(e (e 1) + (e (e 2) * (e 3)))\n1\n2\n3\n2 3 2\n1 2 1\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLRWithLabels() throws Exception { + String grammar = "grammar T;\n" + + "@parser::header {\n" + + "var TListener = require('./TListener').TListener;\n" + + "}\n" + + "\n" + + "@parser::members {\n" + + "this.LeafListener = function() {\n" + + " this.exitCall = function(ctx) {\n" + + " var str = ctx.e().start.text + ' ' + ctx.eList();\n" + + " document.getElementById('output').value += str + '\\n';\n" + + " };\n" + + " this.exitInt = function(ctx) {\n" + + " var str = ctx.INT().symbol.text;\n" + + " document.getElementById('output').value += str + '\\n';\n" + + " };\n" + + " return this;\n" + + "};\n" + + "this.LeafListener.prototype = Object.create(TListener.prototype);\n" + + "this.LeafListener.prototype.constructor = this.LeafListener;\n" + + "\n" + + "}\n" + + "\n" + + "s\n" + + "@after {\n" + + "System.out.println($ctx.r.toStringTree(null, this););\n" + + "var walker = new antlr4.tree.ParseTreeWalker();\n" + + "walker.walk(new this.LeafListener(), $ctx.r);\n" + + "\n" + + "}\n" + + " : r=e ;\n" + + "e : e '(' eList ')' # Call\n" + + " | INT # Int\n" + + " ;\n" + + "eList : e (',' e)* ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1(2,3)", false); + assertEquals("(e (e 1) ( (eList (e 2) , (e 3)) ))\n1\n2\n3\n1 [13 6]\n", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java b/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java new file mode 100644 index 000000000..d707d1f4a --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java @@ -0,0 +1,163 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestParseTrees extends BaseTest { + + @Test + public void testTokenAndRuleContextString() throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {\n" + + "this.buildParseTrees = true;\n" + + "}\n" + + "@after {\n" + + "System.out.println($r.ctx.toStringTree(null, this););\n" + + "}\n" + + " : r=a ;\n" + + "a : 'x' { \n" + + "document.getElementById('output').value += antlr4.Utils.arrayToString(this.getRuleInvocationStack()) + '\\n';\n" + + "} ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false); + assertEquals("[a, s]\n(a x)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testToken2() throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {\n" + + "this.buildParseTrees = true;\n" + + "}\n" + + "@after {\n" + + "System.out.println($r.ctx.toStringTree(null, this););\n" + + "}\n" + + " : r=a ;\n" + + "a : 'x' 'y'\n" + + " ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xy", false); + assertEquals("(a x y)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testtest2Alts() throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {\n" + + "this.buildParseTrees = true;\n" + + "}\n" + + "@after {\n" + + "System.out.println($r.ctx.toStringTree(null, this););\n" + + "}\n" + + " : r=a ;\n" + + "a : 'x' | 'y'\n" + + " ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y", false); + assertEquals("(a y)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void test2AltLoop() throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {\n" + + "this.buildParseTrees = true;\n" + + "}\n" + + "@after {\n" + + "System.out.println($r.ctx.toStringTree(null, this););\n" + + "}\n" + + " : r=a ;\n" + + "a : ('x' | 'y')* 'z'\n" + + " ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xyyxyxz", false); + assertEquals("(a x y y x y x z)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testRuleRef() throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {\n" + + "this.buildParseTrees = true;\n" + + "}\n" + + "@after {\n" + + "System.out.println($r.ctx.toStringTree(null, this););\n" + + "}\n" + + " : r=a ;\n" + + "a : b 'x'\n" + + " ;\n" + + "b : 'y' \n" + + " ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "yx", false); + assertEquals("(a (b y) x)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testExtraToken() throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {\n" + + "this.buildParseTrees = true;\n" + + "}\n" + + "@after {\n" + + "System.out.println($r.ctx.toStringTree(null, this););\n" + + "}\n" + + " : r=a ;\n" + + "a : 'x' 'y'\n" + + " ;\n" + + "Z : 'z' \n" + + " ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzy", false); + assertEquals("(a x z y)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNoViableAlt() throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {\n" + + "this.buildParseTrees = true;\n" + + "}\n" + + "@after {\n" + + "System.out.println($r.ctx.toStringTree(null, this););\n" + + "}\n" + + " : r=a ;\n" + + "a : 'x' | 'y'\n" + + " ;\n" + + "Z : 'z' \n" + + " ;\n" + + " "; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "z", false); + assertEquals("(a z)\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testSync() throws Exception { + String grammar = "grammar T;\n" + + "s\n" + + "@init {\n" + + "this.buildParseTrees = true;\n" + + "}\n" + + "@after {\n" + + "System.out.println($r.ctx.toStringTree(null, this););\n" + + "}\n" + + " : r=a ;\n" + + "a : 'x' 'y'* '!'\n" + + " ;\n" + + "Z : 'z' \n" + + " ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzyy!", false); + assertEquals("(a x z y y !)\n", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java b/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java new file mode 100644 index 000000000..91bcb633f --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java @@ -0,0 +1,289 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestParserErrors extends BaseTest { + + @Test + public void testTokenMismatch() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aa", false); + assertEquals("", found); + assertEquals("line 1:1 mismatched input 'a' expecting 'b'\n", this.stderrDuringParse); + } + + @Test + public void testSingleTokenDeletion() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false); + assertEquals("", found); + assertEquals("line 1:1 extraneous input 'a' expecting 'b'\n", this.stderrDuringParse); + } + + @Test + public void testSingleTokenDeletionExpectingSet() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' ('b'|'c') ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false); + assertEquals("", found); + assertEquals("line 1:1 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse); + } + + @Test + public void testSingleTokenInsertion() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b' 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); + assertEquals("", found); + assertEquals("line 1:1 missing 'b' at 'c'\n", this.stderrDuringParse); + } + + @Test + public void testConjuringUpToken() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); + assertEquals("conjured=[@-1,-1:-1='',<1>,1:1]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testSingleSetInsertion() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' ('b'|'c') 'd' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); + assertEquals("", found); + assertEquals("line 1:1 missing {'b', 'c'} at 'd'\n", this.stderrDuringParse); + } + + @Test + public void testConjuringUpTokenFromSet() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); + assertEquals("conjured=[@-1,-1:-1='',<1>,1:1]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLL2() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b'\n" + + " | 'a' 'c'\n" + + ";\n" + + "q : 'e' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ae", false); + assertEquals("", found); + assertEquals("line 1:1 no viable alternative at input 'ae'\n", this.stderrDuringParse); + } + + @Test + public void testLL3() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b'* 'c'\n" + + " | 'a' 'b' 'd'\n" + + ";\n" + + "q : 'e' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abe", false); + assertEquals("", found); + assertEquals("line 1:2 no viable alternative at input 'abe'\n", this.stderrDuringParse); + } + + @Test + public void testLLStar() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a'+ 'b'\n" + + " | 'a'+ 'c'\n" + + ";\n" + + "q : 'e' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aaae", false); + assertEquals("", found); + assertEquals("line 1:3 no viable alternative at input 'aaae'\n", this.stderrDuringParse); + } + + @Test + public void testSingleTokenDeletionBeforeLoop() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b'* ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false); + assertEquals("", found); + assertEquals("line 1:1 extraneous input 'a' expecting {, 'b'}\nline 1:3 token recognition error at: 'c'\n", this.stderrDuringParse); + } + + @Test + public void testMultiTokenDeletionBeforeLoop() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b'* 'c';"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false); + assertEquals("", found); + assertEquals("line 1:1 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse); + } + + @Test + public void testSingleTokenDeletionDuringLoop() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b'* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false); + assertEquals("", found); + assertEquals("line 1:2 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse); + } + + @Test + public void testMultiTokenDeletionDuringLoop() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' 'b'* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false); + assertEquals("", found); + assertEquals("line 1:2 extraneous input 'a' expecting {'b', 'c'}\nline 1:6 extraneous input 'a' expecting {'b', 'c'}\n", this.stderrDuringParse); + } + + @Test + public void testSingleTokenDeletionBeforeLoop2() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' ('b'|'z'{})*;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false); + assertEquals("", found); + assertEquals("line 1:1 extraneous input 'a' expecting {, 'b', 'z'}\nline 1:3 token recognition error at: 'c'\n", this.stderrDuringParse); + } + + @Test + public void testMultiTokenDeletionBeforeLoop2() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' ('b'|'z'{})* 'c';"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false); + assertEquals("", found); + assertEquals("line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'}\n", this.stderrDuringParse); + } + + @Test + public void testSingleTokenDeletionDuringLoop2() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' ('b'|'z'{})* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false); + assertEquals("", found); + assertEquals("line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n", this.stderrDuringParse); + } + + @Test + public void testMultiTokenDeletionDuringLoop2() throws Exception { + String grammar = "grammar T;\n" + + "a : 'a' ('b'|'z'{})* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false); + assertEquals("", found); + assertEquals("line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\nline 1:6 extraneous input 'a' expecting {'b', 'z', 'c'}\n", this.stderrDuringParse); + } + + @Test + public void testLL1ErrorInfo() throws Exception { + String grammar = "grammar T;\n" + + "start : animal (AND acClass)? service EOF;\n" + + "animal : (DOG | CAT );\n" + + "service : (HARDWARE | SOFTWARE) ;\n" + + "AND : 'and';\n" + + "DOG : 'dog';\n" + + "CAT : 'cat';\n" + + "HARDWARE: 'hardware';\n" + + "SOFTWARE: 'software';\n" + + "WS : ' ' -> skip ;\n" + + "acClass\n" + + "@init\n" + + "{System.out.println(this.getExpectedTokens().toString(this.tokenNames)););}\n" + + " : ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "dog and software", false); + assertEquals("{'hardware', 'software'}\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testInvalidEmptyInput() throws Exception { + String grammar = "grammar T;\n" + + "start : ID+;\n" + + "ID : [a-z]+;\n" + + ";"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "", false); + assertEquals("", found); + assertEquals("line 1:0 missing ID at ''\n", this.stderrDuringParse); + } + + @Test + public void testContextListGetters() throws Exception { + String grammar = "grammar T;\n" + + "@parser::members{\n" + + " function foo() {\n" + + " var s = new SContext();\n" + + " var a = s.a();\n" + + " var b = s.b();\n" + + " };\n" + + "}\n" + + "s : (a | b)+;\n" + + "a : 'a' {document.getElementById('output').value += 'a';};\n" + + "b : 'b' {document.getElementById('output').value += 'b';};\n" + + ";"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abab", false); + assertEquals("abab\n", found); + assertNull(this.stderrDuringParse); + } + + String testDuplicatedLeftRecursiveCall(String input) throws Exception { + String grammar = "grammar T;\n" + + "start : expr EOF;\n" + + "expr : 'x'\n" + + " | expr expr\n" + + " ;\n" + + ";"; + return execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); + } + + @Test + public void testDuplicatedLeftRecursiveCall_1() throws Exception { + String found = testDuplicatedLeftRecursiveCall("xx"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDuplicatedLeftRecursiveCall_2() throws Exception { + String found = testDuplicatedLeftRecursiveCall("xxx"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDuplicatedLeftRecursiveCall_3() throws Exception { + String found = testDuplicatedLeftRecursiveCall("xxxx"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testInvalidATNStateRemoval() throws Exception { + String grammar = "grammar T;\n" + + "start : ID ':' expr;\n" + + "expr : primary expr? {} | expr '->' ID;\n" + + "primary : ID;\n" + + "ID : [a-z]+;\n" + + ";"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x:x", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNoViableAltAvoidance() throws Exception { + String grammar = "grammar T;\n" + + "s : e '!' ;\n" + + "e : 'a' 'b'\n" + + " | 'a'\n" + + " ;\n" + + "DOT : '.' ;\n" + + "WS : [ \\t\\r\\n]+ -> skip;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a.", false); + assertEquals("", found); + assertEquals("line 1:1 mismatched input '.' expecting '!'\n", this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java b/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java new file mode 100644 index 000000000..1b3cb8a32 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java @@ -0,0 +1,378 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestParserExec extends BaseTest { + + @Test + public void testLabels() throws Exception { + String grammar = "grammar T;\n" + + "a : b1=b b2+=b* b3+=';' ;\n" + + "b : id_=ID val+=INT*;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abc 34", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testListLabelsOnSet() throws Exception { + String grammar = "grammar T;\n" + + "a : b b* ';' ;\n" + + "b : ID val+=(INT | FLOAT)*;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "FLOAT : [0-9]+ '.' [0-9]+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abc 34", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAorB() throws Exception { + String grammar = "grammar T;\n" + + "a : ID {\n" + + "System.out.println(\"alt 1\");\n" + + "} | INT {\n" + + "System.out.println(\"alt 2\");\n" + + "};\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "34", false); + assertEquals("alt 2\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testBasic() throws Exception { + String grammar = "grammar T;\n" + + "a : ID INT {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abc 34", false); + assertEquals("abc34\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAPlus() throws Exception { + String grammar = "grammar T;\n" + + "a : ID+ {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+;\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "a b c", false); + assertEquals("abc\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAorAPlus() throws Exception { + String grammar = "grammar T;\n" + + "a : (ID|ID)+ {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+;\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "a b c", false); + assertEquals("abc\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testIfIfElseGreedyBinding1() throws Exception { + String grammar = "grammar T;\n" + + "start : statement+ ;\n" + + "statement : 'x' | ifStatement;\n" + + "ifStatement : 'if' 'y' statement ('else' statement)? {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> channel(HIDDEN);"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "if y if y x else x", false); + assertEquals("if y x else x\nif y if y x else x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testIfIfElseGreedyBinding2() throws Exception { + String grammar = "grammar T;\n" + + "start : statement+ ;\n" + + "statement : 'x' | ifStatement;\n" + + "ifStatement : 'if' 'y' statement ('else' statement|) {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> channel(HIDDEN);"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "if y if y x else x", false); + assertEquals("if y x else x\nif y if y x else x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testIfIfElseNonGreedyBinding1() throws Exception { + String grammar = "grammar T;\n" + + "start : statement+ ;\n" + + "statement : 'x' | ifStatement;\n" + + "ifStatement : 'if' 'y' statement ('else' statement)?? {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> channel(HIDDEN);"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "if y if y x else x", false); + assertEquals("if y x\nif y if y x else x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testIfIfElseNonGreedyBinding2() throws Exception { + String grammar = "grammar T;\n" + + "start : statement+ ;\n" + + "statement : 'x' | ifStatement;\n" + + "ifStatement : 'if' 'y' statement (|'else' statement) {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> channel(HIDDEN);"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "if y if y x else x", false); + assertEquals("if y x\nif y if y x else x\n", found); + assertNull(this.stderrDuringParse); + } + + String testAStar(String input) throws Exception { + String grammar = "grammar T;\n" + + "a : ID* {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+;\n" + + "WS : (' '|'\\n') -> skip;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); + } + + @Test + public void testAStar_1() throws Exception { + String found = testAStar(""); + assertEquals("\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAStar_2() throws Exception { + String found = testAStar("a b c"); + assertEquals("abc\n", found); + assertNull(this.stderrDuringParse); + } + + String testLL1OptionalBlock(String input) throws Exception { + String grammar = "grammar T;\n" + + "a : (ID|{}INT)? {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); + } + + @Test + public void testLL1OptionalBlock_1() throws Exception { + String found = testLL1OptionalBlock(""); + assertEquals("\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLL1OptionalBlock_2() throws Exception { + String found = testLL1OptionalBlock("a"); + assertEquals("a\n", found); + assertNull(this.stderrDuringParse); + } + + String testAorAStar(String input) throws Exception { + String grammar = "grammar T;\n" + + "a : (ID|ID)* {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+;\n" + + "WS : (' '|'\\n') -> skip;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); + } + + @Test + public void testAorAStar_1() throws Exception { + String found = testAorAStar(""); + assertEquals("\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAorAStar_2() throws Exception { + String found = testAorAStar("a b c"); + assertEquals("abc\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAorBPlus() throws Exception { + String grammar = "grammar T;\n" + + "a : (ID|INT{\n" + + "})+ {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "a 34 c", false); + assertEquals("a34c\n", found); + assertNull(this.stderrDuringParse); + } + + String testAorBStar(String input) throws Exception { + String grammar = "grammar T;\n" + + "a : (ID|INT{\n" + + "})* {\n" + + "System.out.println($text);\n" + + "};\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); + } + + @Test + public void testAorBStar_1() throws Exception { + String found = testAorBStar(""); + assertEquals("\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAorBStar_2() throws Exception { + String found = testAorBStar("a 34 c"); + assertEquals("a34c\n", found); + assertNull(this.stderrDuringParse); + } + + String testOptional(String input) throws Exception { + String grammar = "grammar T;\n" + + "stat : ifstat | 'x';\n" + + "ifstat : 'if' stat ('else' stat)?;\n" + + "WS : [ \\n\\t]+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "stat", input, false); + } + + @Test + public void testOptional_1() throws Exception { + String found = testOptional("x"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptional_2() throws Exception { + String found = testOptional("if x"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptional_3() throws Exception { + String found = testOptional("if x else x"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptional_4() throws Exception { + String found = testOptional("if if x else x"); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPredicatedIfIfElse() throws Exception { + String grammar = "grammar T;\n" + + "s : stmt EOF ;\n" + + "stmt : ifStmt | ID;\n" + + "ifStmt : 'if' ID stmt ('else' stmt | { this._input.LA(1)!=ELSE }?);\n" + + "ELSE : 'else';\n" + + "ID : [a-zA-Z]+;\n" + + "WS : [ \\n\\t]+ -> skip;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "if x if x a else b", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLabelAliasingAcrossLabeledAlternatives() throws Exception { + String grammar = "grammar T;\n" + + "start : a* EOF;\n" + + "a\n" + + " : label=subrule { System.out.println($label.text); } #One\n" + + " | label='y' { System.out.println($label.text); } #Two\n" + + " ;\n" + + "subrule : 'x';\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xy", false); + assertEquals("x\ny\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPredictionIssue334() throws Exception { + String grammar = "grammar T;\n" + + "file_ @init{\n" + + "this._errHandler = new antlr4.error.BailErrorStrategy();\n" + + "} \n" + + "@after {\n" + + "System.out.println($ctx.toStringTree(null, this););\n" + + "}\n" + + " : item (SEMICOLON item)* SEMICOLON? EOF ;\n" + + "item : A B?;\n" + + "SEMICOLON: ';';\n" + + "A : 'a'|'A';\n" + + "B : 'b'|'B';\n" + + "WS : [ \\r\\t\\n]+ -> skip;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "file_", "a", false); + assertEquals("(file_ (item a) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testListLabelForClosureContext() throws Exception { + String grammar = "grammar T;\n" + + "ifStatement\n" + + "@after {\n" + + "var items = $ctx.elseIfStatement() \n" + + "}\n" + + " : 'if' expression\n" + + " ( ( 'then'\n" + + " executableStatement*\n" + + " elseIfStatement* // <--- problem is here\n" + + " elseStatement?\n" + + " 'end' 'if'\n" + + " ) | executableStatement )\n" + + " ;\n" + + "\n" + + "elseIfStatement\n" + + " : 'else' 'if' expression 'then' executableStatement*\n" + + " ;\n" + + "expression : 'a' ;\n" + + "executableStatement : 'a' ;\n" + + "elseStatement : 'a' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "expression", "a", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalLexer.java b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalLexer.java new file mode 100644 index 000000000..1817a622b --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalLexer.java @@ -0,0 +1,147 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestSemPredEvalLexer extends BaseTest { + + @Test + public void testDisableRule() throws Exception { + String grammar = "lexer grammar L;\n" + + "E1 : 'enum' { false }? ;\n" + + "E2 : 'enum' { true }? ; // winner not E1 or ID\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execLexer("L.g4", grammar, "L", "enum abc"); + assertEquals("[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<3>,1:5]\n" + + "[@2,8:7='',<-1>,1:8]\n" + + "s0-' '->:s5=>4\n" + + "s0-'a'->:s6=>3\n" + + "s0-'e'->:s1=>3\n" + + ":s1=>3-'n'->:s2=>3\n" + + ":s2=>3-'u'->:s3=>3\n" + + ":s6=>3-'b'->:s6=>3\n" + + ":s6=>3-'c'->:s6=>3\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testIDvsEnum() throws Exception { + String grammar = "lexer grammar L;\n" + + "ENUM : 'enum' { false }? ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execLexer("L.g4", grammar, "L", "enum abc enum"); + assertEquals("[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<2>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s5=>3\n" + + "s0-'a'->:s4=>2\n" + + "s0-'e'->:s1=>2\n" + + ":s1=>2-'n'->:s2=>2\n" + + ":s2=>2-'u'->:s3=>2\n" + + ":s4=>2-'b'->:s4=>2\n" + + ":s4=>2-'c'->:s4=>2\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testIDnotEnum() throws Exception { + String grammar = "lexer grammar L;\n" + + "ENUM : [a-z]+ { false }? ;\n" + + "ID : [a-z]+ ;\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execLexer("L.g4", grammar, "L", "enum abc enum"); + assertEquals("[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<2>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s2=>3\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testEnumNotID() throws Exception { + String grammar = "lexer grammar L;\n" + + "ENUM : [a-z]+ { this.text===\"enum\" }? ;\n" + + "ID : [a-z]+ ;\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execLexer("L.g4", grammar, "L", "enum abc enum"); + assertEquals("[@0,0:3='enum',<1>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<1>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s3=>3\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testIndent() throws Exception { + String grammar = "lexer grammar L;\n" + + "ID : [a-z]+ ;\n" + + "INDENT : [ \\t]+ { this._tokenStartColumn===0 }? \\n\" +\n" + + " { System.out.println(\"INDENT\"); } ;\"+\n" + + "NL : '\\n';\n" + + "WS : [ \\t]+ ;"; + String found = execLexer("L.g4", grammar, "L", "abc\n def \n"); + assertEquals("INDENT\n" + + "[@0,0:2='abc',<1>,1:0]\n" + + "[@1,3:3='\\n',<3>,1:3]\n" + + "[@2,4:5=' ',<2>,2:0]\n" + + "[@3,6:8='def',<1>,2:2]\n" + + "[@4,9:10=' ',<4>,2:5]\n" + + "[@5,11:11='\\n',<3>,2:7]\n" + + "[@6,12:11='',<-1>,3:8]\n" + + "s0-'\n" + + "'->:s2=>3\n" + + "s0-'a'->:s1=>1\n" + + "s0-'d'->:s1=>1\n" + + ":s1=>1-'b'->:s1=>1\n" + + ":s1=>1-'c'->:s1=>1\n" + + ":s1=>1-'e'->:s1=>1\n" + + ":s1=>1-'f'->:s1=>1\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLexerInputPositionSensitivePredicates() throws Exception { + String grammar = "lexer grammar L;\n" + + "WORD1 : ID1+ { System.out.println(this.getText()); } ;\n" + + "WORD2 : ID2+ { System.out.println(this.getText()); } ;\n" + + "fragment ID1 : { this.column < 2 }? [a-zA-Z];\n" + + "fragment ID2 : { this.column >= 2 }? [a-zA-Z];\n" + + "WS : (' '|'\\n') -> skip;"; + String found = execLexer("L.g4", grammar, "L", "a cde\nabcde\n"); + assertEquals("a\n" + + "cde\n" + + "ab\n" + + "cde\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,2:4='cde',<2>,1:2]\n" + + "[@2,6:7='ab',<1>,2:0]\n" + + "[@3,8:10='cde',<2>,2:2]\n" + + "[@4,12:11='',<-1>,3:0]\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPredicatedKeywords() throws Exception { + String grammar = "lexer grammar L;\n" + + "ENUM : [a-z]+ { this.text===\"enum\" }? { System.out.println(\"enum!\"); } ;\n" + + "ID : [a-z]+ { System.out.println(\"ID\" + this.getText()); } ;\n" + + "WS : [ \\n] -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "enum enu a"); + assertEquals("enum!\n" + + "ID enu\n" + + "ID a\n" + + "[@0,0:3='enum',<1>,1:0]\n" + + "[@1,5:7='enu',<2>,1:5]\n" + + "[@2,9:9='a',<2>,1:9]\n" + + "[@3,10:9='',<-1>,1:10]\n", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java new file mode 100644 index 000000000..602542aa4 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java @@ -0,0 +1,426 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestSemPredEvalParser extends BaseTest { + + @Test + public void testSimpleValidate() throws Exception { + String grammar = "grammar T;\n" + + "s : a ;\n" + + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false); + assertEquals("", found); + assertEquals("line 1:0 no viable alternative at input 'x'\n", this.stderrDuringParse); + } + + @Test + public void testSimpleValidate2() throws Exception { + String grammar = "grammar T;\n" + + "s : a a a;\n" + + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "3 4 x", false); + assertEquals("alt 2\nalt 2\n", found); + assertEquals("line 1:4 no viable alternative at input 'x'\n", this.stderrDuringParse); + } + + @Test + public void testAtomWithClosureInTranslatedLRRule() throws Exception { + String grammar = "grammar T;\n" + + "start : e[0] EOF;\n" + + "e[int _p]\n" + + " : ( 'a' | 'b'+ ) ( {3 >= $_p}? '+' e[4] )*\n" + + " ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "a+b+a", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testValidateInDFA() throws Exception { + String grammar = "grammar T;\n" + + "s : a ';' a;\n" + + "// ';' helps us to resynchronize without consuming\n" + + "// 2nd 'a' reference. We our testing that the DFA also\n" + + "// throws an exception if the validating predicate fails\n" + + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x ; y", false); + assertEquals("", found); + assertEquals("line 1:0 no viable alternative at input 'x'\nline 1:4 no viable alternative at input 'y'\n", this.stderrDuringParse); + } + + @Test + public void testSimple() throws Exception { + String grammar = "grammar T;\n" + + "s : a a a; // do 3x: once in ATN, next in DFA then INT in ATN\n" + + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? ID {System.out.println(\"alt 2\");}\n" + + " | INT {System.out.println(\"alt 3\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x y 3", false); + assertEquals("alt 2\nalt 2\nalt 3\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOrder() throws Exception { + String grammar = "grammar T;\n" + + "s : a {} a; // do 2x: once in ATN, next in DFA;\n" + + "// action blocks lookahead from falling off of 'a'\n" + + "// and looking into 2nd 'a' ref. !ctx dependent pred\n" + + "a : ID {System.out.println(\"alt 1\");}\n" + + " | {true}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x y", false); + assertEquals("alt 1\nalt 1\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void test2UnpredicatedAlts() throws Exception { + String grammar = "grammar T;\n" + + "s : {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;} a ';' a; // do 2x: once in ATN, next in DFA\n" + + "a : ID {System.out.println(\"alt 1\");}\n" + + " | ID {System.out.println(\"alt 2\");}\n" + + " | {false}? ID {System.out.println(\"alt 3\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x; y", false); + assertEquals("alt 1\nalt 1\n", found); + assertEquals("line 1:0 reportAttemptingFullContext d=0 (a), input='x'\nline 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x'\nline 1:3 reportAttemptingFullContext d=0 (a), input='y'\nline 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y'\n", this.stderrDuringParse); + } + + @Test + public void test2UnpredicatedAltsAndOneOrthogonalAlt() throws Exception { + String grammar = "grammar T;\n" + + "s : {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;} a ';' a ';' a;\n" + + "a : INT {System.out.println(\"alt 1\");}\n" + + " | ID {System.out.println(\"alt 2\");} // must pick this one for ID since pred is false\n" + + " | ID {System.out.println(\"alt 3\");}\n" + + " | {false}? ID {console.log(\\\"alt 4\\\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "34; x; y", false); + assertEquals("alt 1\nalt 2\nalt 2\n", found); + assertEquals("line 1:4 reportAttemptingFullContext d=0 (a), input='x'\nline 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x'\nline 1:7 reportAttemptingFullContext d=0 (a), input='y'\nline 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y'\n", this.stderrDuringParse); + } + + @Test + public void testRewindBeforePredEval() throws Exception { + String grammar = "grammar T;\n" + + "s : a a;\n" + + "a : {this._input.LT(1).text===\"x\"}? ID INT {System.out.println(\"alt 1\");}\n" + + " | {this._input.LT(1).text===\"y\"}? ID INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y 3 x 4", false); + assertEquals("alt 2\nalt 1\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNoTruePredsThrowsNoViableAlt() throws Exception { + String grammar = "grammar T;\n" + + "s : a a;\n" + + "a : {false}? ID INT {System.out.println(\"alt 1\");}\n" + + " | {false}? ID INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y 3 x 4", false); + assertEquals("", found); + assertEquals("line 1:0 no viable alternative at input 'y'\n", this.stderrDuringParse); + } + + @Test + public void testToLeft() throws Exception { + String grammar = "grammar T;\n" + + " s : a+ ;\n" + + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x x y", false); + assertEquals("alt 2\nalt 2\nalt 2\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testUnpredicatedPathsInAlt() throws Exception { + String grammar = "grammar T;\n" + + "s : a {System.out.println(\"alt 1\");}\n" + + " | b {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "a : {false}? ID INT\n" + + " | ID INT\n" + + " ;\n" + + "b : ID ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x 4", false); + assertEquals("alt 1\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testActionHidesPreds() throws Exception { + String grammar = "grammar T;\n" + + "@members {this.i = 0;}\n" + + "s : a+ ;\n" + + "a : {this.i = 1;} ID {this.i === 1}? {System.out.println(\"alt 1\");}\n" + + " | {this.i = 2;} ID {this.i === 2}? {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x x y", false); + assertEquals("alt 1\nalt 1\nalt 1\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testToLeftWithVaryingPredicate() throws Exception { + String grammar = "grammar T;\n" + + "@members {this.i=0}\n" + + "s : ({this.i += 1;\\nSystem.out.println(\"i=\" + this.i);} a)+ ;\n" + + "a : {this.i % m === 0}? ID {System.out.println(\"alt 1\");}\n" + + " | {this.i % m != 0}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x x y", false); + assertEquals("i=1\nalt 2\ni=2\nalt 1\ni=3\nalt 2\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPredicateDependentOnArg() throws Exception { + String grammar = "grammar T;\n" + + "@members {i=0}\n" + + "s : a[2] a[1];\n" + + "\"a[int i]\" +\n" + + "\" : {$i===1}? ID {System.out.println(\"alt 1\");}\n" + + " | {$i===2}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a b", false); + assertEquals("alt 2\nalt 1\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPredicateDependentOnArg2() throws Exception { + String grammar = "grammar T;\n" + + "@members {i=0}\n" + + "s : a[2] a[1];\n" + + "a[int i]\" +\n" + + " : {$i===1}? ID {System.out.println(\"alt 1\");}\n" + + " | {$i===2}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a b", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDependentPredNotInOuterCtxShouldBeIgnored() throws Exception { + String grammar = "grammar T;\n" + + "s : b[2] ';' | b[2] '.' ; // decision in s drills down to ctx-dependent pred in a;\n" + + "b[int i] : a[i] ;\n" + + "a[int i]\" +\n" + + " : {$i===1}? ID {System.out.println(\"alt 1\");}\n" + + " | {$i===2}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a;", false); + assertEquals("alt 2\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testIndependentPredNotPassedOuterCtxToAvoidCastException() throws Exception { + String grammar = "grammar T;\n" + + "s : b ';' | b '.' ;\n" + + "b : a ;\n" + + "a\n" + + " : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a;", false); + assertEquals("alt 2\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPredsInGlobalFOLLOW() throws Exception { + String grammar = "grammar T;\n" + + "@members {\n" + + "this.pred = function(v) {\n" + + " document.getElementById('output').value += 'eval=\" + v.toString() + '\\n';\n" + + " return v;\n" + + "};\n" + + "}\n" + + "s : e {this.pred(true)}? {System.out.println(\"parse\");} '!' ;\n" + + "t : e {this.pred(false)}? ID ;\n" + + "e : ID | ; // non-LL(1) so we use ATN\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", false); + assertEquals("eval=true\nparse\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testDepedentPredsInGlobalFOLLOW() throws Exception { + String grammar = "grammar T;\n" + + "@members {\n" + + "this.pred = function(v) {\n" + + " document.getElementById('output').value += 'eval=\" + v.toString() + '\\n';\n" + + " return v;\n" + + "};\n" + + "}\n" + + "s : a[99] ;\n" + + "a[int i] : e {this.pred($i===99)}? {System.out.println(\"parse\");} '!' ;\n" + + "b[int i] : e {this.pred($i===99)}? ID ;\n" + + "e : ID | ; // non-LL(1) so we use ATN\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", false); + assertEquals("eval=true\nparse\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testActionsHidePredsInGlobalFOLLOW() throws Exception { + String grammar = "grammar T;\n" + + "@members {\n" + + "this.p = function(v) {\n" + + "this.pred = function(v) {\n" + + " document.getElementById('output').value += 'eval=\" + v.toString() + '\\n';\n" + + " return v;\n" + + "};\n" + + "}\n" + + "s : e {} {this.pred(true)}? {System.out.println(\"parse\");} '!' ;\n" + + "t : e {} {this.pred(false)}? ID ;\n" + + "e : ID | ; // non-LL(1) so we use ATN\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", false); + assertEquals("eval=true\nparse\n", found); + assertNull(this.stderrDuringParse); + } + + String testPredTestedEvenWhenUnAmbig(String input) throws Exception { + String grammar = "grammar T;\n" + + "@members {this.enumKeyword = true;}\n" + + "primary\n" + + " : ID {System.out.println(\"ID \"+$ID.text);}\n" + + " | {!this.enumKeyword}? 'enum' {System.out.println(\"enum\");}\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n\\r]+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "primary", input, false); + } + + @Test + public void testPredTestedEvenWhenUnAmbig_1() throws Exception { + String found = testPredTestedEvenWhenUnAmbig("abc"); + assertEquals("ID abc\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPredTestedEvenWhenUnAmbig_2() throws Exception { + String found = testPredTestedEvenWhenUnAmbig("enum"); + assertEquals("", found); + assertEquals("line 1:0 no viable alternative at input 'enum'\n", this.stderrDuringParse); + } + + @Test + public void testDisabledAlternative() throws Exception { + String grammar = "grammar T;\n" + + "cppCompilationUnit : content+ EOF;\n" + + "content: anything | {false}? .;\n" + + "anything: ANY_CHAR;\n" + + "ANY_CHAR: [_a-zA-Z0-9];"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "cppCompilationUnit", "hello", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + String testPredFromAltTestedInLoopBack(String input) throws Exception { + String grammar = "grammar T;\n" + + "file_\n" + + "@after {System.out.println($ctx.toStringTree(null, this););}\n" + + " : para para EOF ;\n" + + "para: paraContent NL NL ;\n" + + "paraContent : ('s'|'x'|{this._input.LA(2)!=NL}? NL)+ ;\n" + + "NL : '\\n' ;\n" + + "s : 's' ;\n" + + "X : 'x' ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "file_", input, false); + } + + @Test + public void testPredFromAltTestedInLoopBack_1() throws Exception { + String found = testPredFromAltTestedInLoopBack("s\n\n\nx\n"); + assertEquals("(file_ (para (paraContent s) \n \n) (para (paraContent \n x \n)) )\n", found); + assertEquals("line 5:2 mismatched input '' expecting '\n'\n", this.stderrDuringParse); + } + + @Test + public void testPredFromAltTestedInLoopBack_2() throws Exception { + String found = testPredFromAltTestedInLoopBack("s\n\n\nx\n\n"); + assertEquals("(file_ (para (paraContent s) \n \n) (para (paraContent \n x) \n \n) )\n", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestSets.java b/tool/test/org/antlr/v4/test/rt/java/TestSets.java new file mode 100644 index 000000000..2e7dd9e50 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/java/TestSets.java @@ -0,0 +1,227 @@ +package org.antlr.v4.test.rt.java; + +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestSets extends BaseTest { + + @Test + public void testSeqDoesNotBecomeSet() throws Exception { + String grammar = "grammar T;\n" + + "a : C {System.out.println(this._input.getText());} ;\n" + + "fragment A : '1' | '2';\n" + + "fragment B : '3' '4';\n" + + "C : A | B;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "34", false); + assertEquals("34\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testParserSet() throws Exception { + String grammar = "grammar T;\n" + + "a : t=('x'|'y') {System.out.println($t.text);} ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); + assertEquals("x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testParserNotSet() throws Exception { + String grammar = "grammar T;\n" + + "a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "zz", false); + assertEquals("z\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testParserNotToken() throws Exception { + String grammar = "grammar T;\n" + + "a : ~'x' 'z' {System.out.println(this._input.getText());} ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "zz", false); + assertEquals("zz\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testParserNotTokenWithLabel() throws Exception { + String grammar = "grammar T;\n" + + "a : t=~'x' 'z' {System.out.println($t.text);} ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "zz", false); + assertEquals("z\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testRuleAsSet() throws Exception { + String grammar = "grammar T;\n" + + "a @after {System.out.println(this._input.getText());} : 'a' | 'b' |'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "b", false); + assertEquals("b\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNotChar() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "a : ~'b' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); + assertEquals("x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptionalSingleElement() throws Exception { + String grammar = "grammar T;\n" + + "a : A? 'c' {System.out.println(this._input.getText());} ;\n" + + "a : 'b' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "bc", false); + assertEquals("bc\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptionalLexerSingleElement() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println(this._input.getText());} ;\n" + + "a : 'b'? 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "bc", false); + assertEquals("bc\n", found); + assertNull(this.stderrDuringParse); + } + + String testStarLexerSingleElement(String input) throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println(this._input.getText());} ;\n" + + "a : 'b'* 'c' ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); + } + + @Test + public void testStarLexerSingleElement_1() throws Exception { + String found = testStarLexerSingleElement("bbbbc"); + assertEquals("bbbbc\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testStarLexerSingleElement_2() throws Exception { + String found = testStarLexerSingleElement("c"); + assertEquals("c\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPlusLexerSingleElement() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println(this._input.getText());} ;\n" + + "a : 'b'+ 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "bbbbc", false); + assertEquals("bbbbc\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptionalSet() throws Exception { + String grammar = "grammar T;\n" + + "a : ('a'|'b')? 'c' {System.out.println(this._input.getText());} ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); + assertEquals("ac\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testStarSet() throws Exception { + String grammar = "grammar T;\n" + + "a : ('a'|'b')* 'c' {System.out.println(this._input.getText());} ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaac", false); + assertEquals("abaac\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testPlusSet() throws Exception { + String grammar = "grammar T;\n" + + "a : ('a'|'b')+ 'c' {System.out.println(this._input.getText());} ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaac", false); + assertEquals("abaac\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLexerOptionalSet() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println(this._input.getText());} ;\n" + + "a : ('a'|'b')? 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); + assertEquals("ac\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLexerStarSet() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println(this._input.getText());} ;\n" + + "a : ('a'|'b')* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaac", false); + assertEquals("abaac\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testLexerPlusSet() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println(this._input.getText());} ;\n" + + "a : ('a'|'b')+ 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaac", false); + assertEquals("abaac\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNotCharSet() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "a : ~('b'|'c') ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); + assertEquals("x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNotCharSetWithLabel() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "a : h=~('b'|'c') ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); + assertEquals("x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testNotCharSetWithRuleRef3() throws Exception { + String grammar = "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "a : ('a'|B) ; // this doesn't collapse to set but works\n" + + "fragment\n" + + "B : ~('a'|'c') ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); + assertEquals("x\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testCharSetLiteral() throws Exception { + String grammar = "grammar T;\n" + + "a : (A {System.out.println($A.text);})+ ;\n" + + "a : [AaBb] ;\n" + + "WS : (' '|'\\n')+ -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "A a B b", false); + assertEquals("A\na\nB\nb\n", found); + assertNull(this.stderrDuringParse); + } + + +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/tool/BaseTest.java b/tool/test/org/antlr/v4/test/tool/BaseTest.java new file mode 100644 index 000000000..b25c61588 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/BaseTest.java @@ -0,0 +1,1414 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.v4.Tool; +import org.antlr.v4.automata.ATNFactory; +import org.antlr.v4.automata.ATNPrinter; +import org.antlr.v4.automata.LexerATNFactory; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.codegen.CodeGenerator; +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CommonToken; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.IntStream; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenSource; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.WritableToken; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ATNSerializer; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.runtime.atn.DecisionState; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.runtime.misc.NotNull; +import org.antlr.v4.runtime.misc.Nullable; +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.runtime.misc.Utils; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.semantics.SemanticPipeline; +import org.antlr.v4.tool.ANTLRMessage; +import org.antlr.v4.tool.DOTGenerator; +import org.antlr.v4.tool.DefaultToolListener; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.GrammarSemanticsMessage; +import org.antlr.v4.tool.LexerGrammar; +import org.antlr.v4.tool.Rule; +import org.junit.Before; +import org.junit.rules.TestRule; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; +import org.stringtemplate.v4.ST; +import org.stringtemplate.v4.STGroup; +import org.stringtemplate.v4.STGroupString; + +import javax.tools.JavaCompiler; +import javax.tools.JavaFileObject; +import javax.tools.StandardJavaFileManager; +import javax.tools.ToolProvider; +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.PipedInputStream; +import java.io.PipedOutputStream; +import java.io.PrintStream; +import java.io.StringReader; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public abstract class BaseTest { + // -J-Dorg.antlr.v4.test.BaseTest.level=FINE + private static final Logger LOGGER = Logger.getLogger(BaseTest.class.getName()); + + public static final String newline = System.getProperty("line.separator"); + public static final String pathSep = System.getProperty("path.separator"); + + /** + * When the {@code antlr.testinprocess} runtime property is set to + * {@code true}, the test suite will attempt to load generated classes into + * the test process for direct execution rather than invoking the JVM in a + * new process for testing. + * + *

+ * In-process testing results in a substantial performance improvement, but + * some test environments created by IDEs do not support the mechanisms + * currently used by the tests to dynamically load compiled code. Therefore, + * the default behavior (used in all other cases) favors reliable + * cross-system test execution by executing generated test code in a + * separate process.

+ */ + public static final boolean TEST_IN_SAME_PROCESS = Boolean.parseBoolean(System.getProperty("antlr.testinprocess")); + + /** + * When the {@code antlr.preserve-test-dir} runtime property is set to + * {@code true}, the temporary directories created by the test run will not + * be removed at the end of the test run, even for tests that completed + * successfully. + * + *

+ * The default behavior (used in all other cases) is removing the temporary + * directories for all tests which completed successfully, and preserving + * the directories for tests which failed.

+ */ + public static final boolean PRESERVE_TEST_DIR = Boolean.parseBoolean(System.getProperty("antlr.preserve-test-dir")); + + /** + * The base test directory is the directory where generated files get placed + * during unit test execution. + * + *

+ * The default value for this property is the {@code java.io.tmpdir} system + * property, and can be overridden by setting the + * {@code antlr.java-test-dir} property to a custom location. Note that the + * {@code antlr.java-test-dir} property directly affects the + * {@link #CREATE_PER_TEST_DIRECTORIES} value as well.

+ */ + public static final String BASE_TEST_DIR; + + /** + * When {@code true}, a temporary directory will be created for each test + * executed during the test run. + * + *

+ * This value is {@code true} when the {@code antlr.java-test-dir} system + * property is set, and otherwise {@code false}.

+ */ + public static final boolean CREATE_PER_TEST_DIRECTORIES; + + static { + String baseTestDir = System.getProperty("antlr.java-test-dir"); + boolean perTestDirectories = false; + if (baseTestDir == null || baseTestDir.isEmpty()) { + baseTestDir = System.getProperty("java.io.tmpdir"); + perTestDirectories = true; + } + + if (!new File(baseTestDir).isDirectory()) { + throw new UnsupportedOperationException("The specified base test directory does not exist: " + baseTestDir); + } + + BASE_TEST_DIR = baseTestDir; + CREATE_PER_TEST_DIRECTORIES = perTestDirectories; + } + + /** + * Build up the full classpath we need, including the surefire path (if present) + */ + public static final String CLASSPATH = System.getProperty("java.class.path"); + + public String tmpdir = null; + + /** If error during parser execution, store stderr here; can't return + * stdout and stderr. This doesn't trap errors from running antlr. + */ + protected String stderrDuringParse; + + @org.junit.Rule + public final TestRule testWatcher = new TestWatcher() { + + @Override + protected void succeeded(Description description) { + // remove tmpdir if no error. + if (!PRESERVE_TEST_DIR) { + eraseTempDir(); + } + } + + }; + + @Before + public void setUp() throws Exception { + if (CREATE_PER_TEST_DIRECTORIES) { + // new output dir for each test + String testDirectory = getClass().getSimpleName() + "-" + System.currentTimeMillis(); + tmpdir = new File(BASE_TEST_DIR, testDirectory).getAbsolutePath(); + } + else { + tmpdir = new File(BASE_TEST_DIR).getAbsolutePath(); + if (!PRESERVE_TEST_DIR && new File(tmpdir).exists()) { + eraseFiles(); + } + } + } + + protected org.antlr.v4.Tool newTool(String[] args) { + Tool tool = new Tool(args); + return tool; + } + + protected Tool newTool() { + org.antlr.v4.Tool tool = new Tool(new String[] {"-o", tmpdir}); + return tool; + } + + protected ATN createATN(Grammar g, boolean useSerializer) { + if ( g.atn==null ) { + semanticProcess(g); + assertEquals(0, g.tool.getNumErrors()); + + ParserATNFactory f; + if ( g.isLexer() ) { + f = new LexerATNFactory((LexerGrammar)g); + } + else { + f = new ParserATNFactory(g); + } + + g.atn = f.createATN(); + assertEquals(0, g.tool.getNumErrors()); + } + + ATN atn = g.atn; + if (useSerializer) { + char[] serialized = ATNSerializer.getSerializedAsChars(atn); + return new ATNDeserializer().deserialize(serialized); + } + + return atn; + } + + protected void semanticProcess(Grammar g) { + if ( g.ast!=null && !g.ast.hasErrors ) { + System.out.println(g.ast.toStringTree()); + Tool antlr = new Tool(); + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) + for (Grammar imp : g.getImportedGrammars()) { + antlr.processNonCombinedGrammar(imp, false); + } + } + } + } + + public DFA createDFA(Grammar g, DecisionState s) { +// PredictionDFAFactory conv = new PredictionDFAFactory(g, s); +// DFA dfa = conv.createDFA(); +// conv.issueAmbiguityWarnings(); +// System.out.print("DFA="+dfa); +// return dfa; + return null; + } + +// public void minimizeDFA(DFA dfa) { +// DFAMinimizer dmin = new DFAMinimizer(dfa); +// dfa.minimized = dmin.minimize(); +// } + + IntegerList getTypesFromString(Grammar g, String expecting) { + IntegerList expectingTokenTypes = new IntegerList(); + if ( expecting!=null && !expecting.trim().isEmpty() ) { + for (String tname : expecting.replace(" ", "").split(",")) { + int ttype = g.getTokenType(tname); + expectingTokenTypes.add(ttype); + } + } + return expectingTokenTypes; + } + + public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { + ANTLRInputStream in = new ANTLRInputStream(input); + IntegerList tokenTypes = new IntegerList(); + int ttype; + do { + ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); + tokenTypes.add(ttype); + } while ( ttype!= Token.EOF ); + return tokenTypes; + } + + public List getTokenTypes(LexerGrammar lg, + ATN atn, + CharStream input) + { + LexerATNSimulator interp = new LexerATNSimulator(atn,new DFA[] { new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); + List tokenTypes = new ArrayList(); + int ttype; + boolean hitEOF = false; + do { + if ( hitEOF ) { + tokenTypes.add("EOF"); + break; + } + int t = input.LA(1); + ttype = interp.match(input, Lexer.DEFAULT_MODE); + if ( ttype == Token.EOF ) { + tokenTypes.add("EOF"); + } + else { + tokenTypes.add(lg.typeToTokenList.get(ttype)); + } + + if ( t==IntStream.EOF ) { + hitEOF = true; + } + } while ( ttype!=Token.EOF ); + return tokenTypes; + } + + List checkRuleDFA(String gtext, String ruleName, String expecting) + throws Exception + { + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(gtext, equeue); + ATN atn = createATN(g, false); + ATNState s = atn.ruleToStartState[g.getRule(ruleName).index]; + if ( s==null ) { + System.err.println("no such rule: "+ruleName); + return null; + } + ATNState t = s.transition(0).target; + if ( !(t instanceof DecisionState) ) { + System.out.println(ruleName+" has no decision"); + return null; + } + DecisionState blk = (DecisionState)t; + checkRuleDFA(g, blk, expecting); + return equeue.all; + } + + List checkRuleDFA(String gtext, int decision, String expecting) + throws Exception + { + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(gtext, equeue); + ATN atn = createATN(g, false); + DecisionState blk = atn.decisionToState.get(decision); + checkRuleDFA(g, blk, expecting); + return equeue.all; + } + + void checkRuleDFA(Grammar g, DecisionState blk, String expecting) + throws Exception + { + DFA dfa = createDFA(g, blk); + String result = null; + if ( dfa!=null ) result = dfa.toString(); + assertEquals(expecting, result); + } + + List checkLexerDFA(String gtext, String expecting) + throws Exception + { + return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting); + } + + List checkLexerDFA(String gtext, String modeName, String expecting) + throws Exception + { + ErrorQueue equeue = new ErrorQueue(); + LexerGrammar g = new LexerGrammar(gtext, equeue); + g.atn = createATN(g, false); +// LexerATNToDFAConverter conv = new LexerATNToDFAConverter(g); +// DFA dfa = conv.createDFA(modeName); +// g.setLookaheadDFA(0, dfa); // only one decision to worry about +// +// String result = null; +// if ( dfa!=null ) result = dfa.toString(); +// assertEquals(expecting, result); +// +// return equeue.all; + return null; + } + + protected String load(String fileName, @Nullable String encoding) + throws IOException + { + if ( fileName==null ) { + return null; + } + + String fullFileName = getClass().getPackage().getName().replace('.', '/') + '/' + fileName; + int size = 65000; + InputStreamReader isr; + InputStream fis = getClass().getClassLoader().getResourceAsStream(fullFileName); + if ( encoding!=null ) { + isr = new InputStreamReader(fis, encoding); + } + else { + isr = new InputStreamReader(fis); + } + try { + char[] data = new char[size]; + int n = isr.read(data); + return new String(data, 0, n); + } + finally { + isr.close(); + } + } + + /** Wow! much faster than compiling outside of VM. Finicky though. + * Had rules called r and modulo. Wouldn't compile til I changed to 'a'. + */ + protected boolean compile(String... fileNames) { + List files = new ArrayList(); + for (String fileName : fileNames) { + File f = new File(tmpdir, fileName); + files.add(f); + } + + JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); +// DiagnosticCollector diagnostics = +// new DiagnosticCollector(); + + StandardJavaFileManager fileManager = + compiler.getStandardFileManager(null, null, null); + + Iterable compilationUnits = + fileManager.getJavaFileObjectsFromFiles(files); + + Iterable compileOptions = + Arrays.asList("-g", "-source", "1.6", "-target", "1.6", "-implicit:class", "-Xlint:-options", "-d", tmpdir, "-cp", tmpdir+pathSep+CLASSPATH); + + JavaCompiler.CompilationTask task = + compiler.getTask(null, fileManager, null, compileOptions, null, + compilationUnits); + boolean ok = task.call(); + + try { + fileManager.close(); + } + catch (IOException ioe) { + ioe.printStackTrace(System.err); + } + +// List errors = new ArrayList(); +// for (Diagnostic diagnostic : diagnostics.getDiagnostics()) { +// errors.add( +// String.valueOf(diagnostic.getLineNumber())+ +// ": " + diagnostic.getMessage(null)); +// } +// if ( errors.size()>0 ) { +// System.err.println("compile stderr from: "+cmdLine); +// System.err.println(errors); +// return false; +// } + return ok; + + /* + File outputDir = new File(tmpdir); + try { + Process process = + Runtime.getRuntime().exec(args, null, outputDir); + StreamVacuum stdout = new StreamVacuum(process.getInputStream()); + StreamVacuum stderr = new StreamVacuum(process.getErrorStream()); + stdout.start(); + stderr.start(); + process.waitFor(); + stdout.join(); + stderr.join(); + if ( stdout.toString().length()>0 ) { + System.err.println("compile stdout from: "+cmdLine); + System.err.println(stdout); + } + if ( stderr.toString().length()>0 ) { + System.err.println("compile stderr from: "+cmdLine); + System.err.println(stderr); + } + int ret = process.exitValue(); + return ret==0; + } + catch (Exception e) { + System.err.println("can't exec compilation"); + e.printStackTrace(System.err); + return false; + } + */ + } + + protected ErrorQueue antlr(String grammarFileName, boolean defaultListener, String... extraOptions) { + final List options = new ArrayList(); + Collections.addAll(options, extraOptions); + if ( !options.contains("-o") ) { + options.add("-o"); + options.add(tmpdir); + } + if ( !options.contains("-lib") ) { + options.add("-lib"); + options.add(tmpdir); + } + if ( !options.contains("-encoding") ) { + options.add("-encoding"); + options.add("UTF-8"); + } + options.add(new File(tmpdir,grammarFileName).toString()); + + final String[] optionsA = new String[options.size()]; + options.toArray(optionsA); + Tool antlr = newTool(optionsA); + ErrorQueue equeue = new ErrorQueue(antlr); + antlr.addListener(equeue); + if (defaultListener) { + antlr.addListener(new DefaultToolListener(antlr)); + } + antlr.processGrammarsOnCommandLine(); + + if ( !defaultListener && !equeue.errors.isEmpty() ) { + System.err.println("antlr reports errors from "+options); + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage msg = equeue.errors.get(i); + System.err.println(msg); + } + System.out.println("!!!\ngrammar:"); + try { + System.out.println(new String(Utils.readFile(tmpdir+"/"+grammarFileName))); + } + catch (IOException ioe) { + System.err.println(ioe.toString()); + } + System.out.println("###"); + } + if ( !defaultListener && !equeue.warnings.isEmpty() ) { + System.err.println("antlr reports warnings from "+options); + for (int i = 0; i < equeue.warnings.size(); i++) { + ANTLRMessage msg = equeue.warnings.get(i); + System.err.println(msg); + } + } + + return equeue; + } + + protected ErrorQueue antlr(String grammarFileName, String grammarStr, boolean defaultListener, String... extraOptions) { + System.out.println("dir "+tmpdir); + mkdir(tmpdir); + writeFile(tmpdir, grammarFileName, grammarStr); + return antlr(grammarFileName, defaultListener, extraOptions); + } + + protected String execLexer(String grammarFileName, + String grammarStr, + String lexerName, + String input) + { + return execLexer(grammarFileName, grammarStr, lexerName, input, false); + } + + protected String execLexer(String grammarFileName, + String grammarStr, + String lexerName, + String input, + boolean showDFA) + { + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, + grammarStr, + null, + lexerName); + assertTrue(success); + writeFile(tmpdir, "input", input); + writeLexerTestFile(lexerName, showDFA); + compile("Test.java"); + String output = execClass("Test"); + if ( stderrDuringParse!=null && stderrDuringParse.length()>0 ) { + System.err.println(stderrDuringParse); + } + return output; + } + + public ParseTree execParser(String startRuleName, String input, + String parserName, String lexerName) + throws Exception + { + Pair pl = getParserAndLexer(input, parserName, lexerName); + Parser parser = pl.a; + return execStartRule(startRuleName, parser); + } + + public ParseTree execStartRule(String startRuleName, Parser parser) + throws IllegalAccessException, InvocationTargetException, + NoSuchMethodException + { + Method startRule = null; + Object[] args = null; + try { + startRule = parser.getClass().getMethod(startRuleName); + } + catch (NoSuchMethodException nsme) { + // try with int _p arg for recursive func + startRule = parser.getClass().getMethod(startRuleName, int.class); + args = new Integer[] {0}; + } + ParseTree result = (ParseTree)startRule.invoke(parser, args); +// System.out.println("parse tree = "+result.toStringTree(parser)); + return result; + } + + public Pair getParserAndLexer(String input, + String parserName, String lexerName) + throws Exception + { + final Class lexerClass = loadLexerClassFromTempDir(lexerName); + final Class parserClass = loadParserClassFromTempDir(parserName); + + ANTLRInputStream in = new ANTLRInputStream(new StringReader(input)); + + Class c = lexerClass.asSubclass(Lexer.class); + Constructor ctor = c.getConstructor(CharStream.class); + Lexer lexer = ctor.newInstance(in); + + Class pc = parserClass.asSubclass(Parser.class); + Constructor pctor = pc.getConstructor(TokenStream.class); + CommonTokenStream tokens = new CommonTokenStream(lexer); + Parser parser = pctor.newInstance(tokens); + return new Pair(parser, lexer); + } + + public Class loadClassFromTempDir(String name) throws Exception { + ClassLoader loader = + new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, + ClassLoader.getSystemClassLoader()); + return loader.loadClass(name); + } + + public Class loadLexerClassFromTempDir(String name) throws Exception { + return loadClassFromTempDir(name).asSubclass(Lexer.class); + } + + public Class loadParserClassFromTempDir(String name) throws Exception { + return loadClassFromTempDir(name).asSubclass(Parser.class); + } + + protected String execParser(String grammarFileName, + String grammarStr, + String parserName, + String lexerName, + String startRuleName, + String input, boolean debug) + { + return execParser(grammarFileName, grammarStr, parserName, + lexerName, startRuleName, input, debug, false); + } + + protected String execParser(String grammarFileName, + String grammarStr, + String parserName, + String lexerName, + String startRuleName, + String input, boolean debug, + boolean profile) + { + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, + grammarStr, + parserName, + lexerName, + "-visitor"); + assertTrue(success); + writeFile(tmpdir, "input", input); + return rawExecRecognizer(parserName, + lexerName, + startRuleName, + debug, + profile); + } + + /** Return true if all is well */ + protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, + String grammarStr, + @Nullable String parserName, + String lexerName, + String... extraOptions) + { + return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); + } + + /** Return true if all is well */ + protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, + String grammarStr, + @Nullable String parserName, + String lexerName, + boolean defaultListener, + String... extraOptions) + { + ErrorQueue equeue = + antlr(grammarFileName, grammarStr, defaultListener, extraOptions); + if (!equeue.errors.isEmpty()) { + return false; + } + + List files = new ArrayList(); + if ( lexerName!=null ) { + files.add(lexerName+".java"); + } + if ( parserName!=null ) { + files.add(parserName+".java"); + Set optionsSet = new HashSet(Arrays.asList(extraOptions)); + if (!optionsSet.contains("-no-listener")) { + files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseListener.java"); + } + if (optionsSet.contains("-visitor")) { + files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseVisitor.java"); + } + } + boolean allIsWell = compile(files.toArray(new String[files.size()])); + return allIsWell; + } + + protected String rawExecRecognizer(String parserName, + String lexerName, + String parserStartRuleName, + boolean debug, + boolean profile) + { + this.stderrDuringParse = null; + if ( parserName==null ) { + writeLexerTestFile(lexerName, false); + } + else { + writeTestFile(parserName, + lexerName, + parserStartRuleName, + debug, + profile); + } + + compile("Test.java"); + return execClass("Test"); + } + + public String execRecognizer() { + return execClass("Test"); + } + + public String execClass(String className) { + if (TEST_IN_SAME_PROCESS) { + try { + ClassLoader loader = new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, ClassLoader.getSystemClassLoader()); + final Class mainClass = (Class)loader.loadClass(className); + final Method mainMethod = mainClass.getDeclaredMethod("main", String[].class); + PipedInputStream stdoutIn = new PipedInputStream(); + PipedInputStream stderrIn = new PipedInputStream(); + PipedOutputStream stdoutOut = new PipedOutputStream(stdoutIn); + PipedOutputStream stderrOut = new PipedOutputStream(stderrIn); + StreamVacuum stdoutVacuum = new StreamVacuum(stdoutIn); + StreamVacuum stderrVacuum = new StreamVacuum(stderrIn); + + PrintStream originalOut = System.out; + System.setOut(new PrintStream(stdoutOut)); + try { + PrintStream originalErr = System.err; + try { + System.setErr(new PrintStream(stderrOut)); + stdoutVacuum.start(); + stderrVacuum.start(); + mainMethod.invoke(null, (Object)new String[] { new File(tmpdir, "input").getAbsolutePath() }); + } + finally { + System.setErr(originalErr); + } + } + finally { + System.setOut(originalOut); + } + + stdoutOut.close(); + stderrOut.close(); + stdoutVacuum.join(); + stderrVacuum.join(); + String output = stdoutVacuum.toString(); + if ( stderrVacuum.toString().length()>0 ) { + this.stderrDuringParse = stderrVacuum.toString(); + System.err.println("exec stderrVacuum: "+ stderrVacuum); + } + return output; + } catch (MalformedURLException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (IOException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (InterruptedException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (IllegalAccessException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (IllegalArgumentException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (InvocationTargetException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (NoSuchMethodException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (SecurityException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } catch (ClassNotFoundException ex) { + LOGGER.log(Level.SEVERE, null, ex); + throw new RuntimeException(ex); + } + } + + try { + String[] args = new String[] { + "java", "-classpath", tmpdir+pathSep+CLASSPATH, + className, new File(tmpdir, "input").getAbsolutePath() + }; + //String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath(); + //System.out.println("execParser: "+cmdLine); + Process process = + Runtime.getRuntime().exec(args, null, new File(tmpdir)); + StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); + StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); + stdoutVacuum.start(); + stderrVacuum.start(); + process.waitFor(); + stdoutVacuum.join(); + stderrVacuum.join(); + String output = stdoutVacuum.toString(); + if ( stderrVacuum.toString().length()>0 ) { + this.stderrDuringParse = stderrVacuum.toString(); + System.err.println("exec stderrVacuum: "+ stderrVacuum); + } + return output; + } + catch (Exception e) { + System.err.println("can't exec recognizer"); + e.printStackTrace(System.err); + } + return null; + } + + public void testErrors(String[] pairs, boolean printTree) { + for (int i = 0; i < pairs.length; i+=2) { + String input = pairs[i]; + String expect = pairs[i+1]; + + String[] lines = input.split("\n"); + String fileName = getFilenameFromFirstLineOfGrammar(lines[0]); + ErrorQueue equeue = antlr(fileName, input, false); + + String actual = equeue.toString(true); + actual = actual.replace(tmpdir + File.separator, ""); + System.err.println(actual); + String msg = input; + msg = msg.replace("\n","\\n"); + msg = msg.replace("\r","\\r"); + msg = msg.replace("\t","\\t"); + + assertEquals("error in: "+msg,expect,actual); + } + } + + public String getFilenameFromFirstLineOfGrammar(String line) { + String fileName = "A" + Tool.GRAMMAR_EXTENSION; + int grIndex = line.lastIndexOf("grammar"); + int semi = line.lastIndexOf(';'); + if ( grIndex>=0 && semi>=0 ) { + int space = line.indexOf(' ', grIndex); + fileName = line.substring(space+1, semi)+Tool.GRAMMAR_EXTENSION; + } + if ( fileName.length()==Tool.GRAMMAR_EXTENSION.length() ) fileName = "A" + Tool.GRAMMAR_EXTENSION; + return fileName; + } + +// void ambig(List msgs, int[] expectedAmbigAlts, String expectedAmbigInput) +// throws Exception +// { +// ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); +// } + +// void ambig(List msgs, int i, int[] expectedAmbigAlts, String expectedAmbigInput) +// throws Exception +// { +// List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); +// AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); +// if ( a==null ) assertNull(expectedAmbigAlts); +// else { +// assertEquals(a.conflictingAlts.toString(), Arrays.toString(expectedAmbigAlts)); +// } +// assertEquals(expectedAmbigInput, a.input); +// } + +// void unreachable(List msgs, int[] expectedUnreachableAlts) +// throws Exception +// { +// unreachable(msgs, 0, expectedUnreachableAlts); +// } + +// void unreachable(List msgs, int i, int[] expectedUnreachableAlts) +// throws Exception +// { +// List amsgs = getMessagesOfType(msgs, UnreachableAltsMessage.class); +// UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); +// if ( u==null ) assertNull(expectedUnreachableAlts); +// else { +// assertEquals(u.conflictingAlts.toString(), Arrays.toString(expectedUnreachableAlts)); +// } +// } + + List getMessagesOfType(List msgs, Class c) { + List filtered = new ArrayList(); + for (ANTLRMessage m : msgs) { + if ( m.getClass() == c ) filtered.add(m); + } + return filtered; + } + + void checkRuleATN(Grammar g, String ruleName, String expecting) { + DOTGenerator dot = new DOTGenerator(g); + System.out.println(dot.getDOT(g.atn.ruleToStartState[g.getRule(ruleName).index])); + + Rule r = g.getRule(ruleName); + ATNState startState = g.atn.ruleToStartState[r.index]; + ATNPrinter serializer = new ATNPrinter(g, startState); + String result = serializer.asString(); + + //System.out.print(result); + assertEquals(expecting, result); + } + + public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { + int lp = templates.indexOf('('); + String name = templates.substring(0, lp); + STGroup group = new STGroupString(templates); + ST st = group.getInstanceOf(name); + st.add(actionName, action); + String grammar = st.render(); + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(grammar, equeue); + if ( g.ast!=null && !g.ast.hasErrors ) { + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + + ATNFactory factory = new ParserATNFactory(g); + if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); + g.atn = factory.createATN(); + + CodeGenerator gen = new CodeGenerator(g); + ST outputFileST = gen.generateParser(); + String output = outputFileST.render(); + //System.out.println(output); + String b = "#" + actionName + "#"; + int start = output.indexOf(b); + String e = "#end-" + actionName + "#"; + int end = output.indexOf(e); + String snippet = output.substring(start+b.length(),end); + assertEquals(expected, snippet); + } + if ( equeue.size()>0 ) { + System.err.println(equeue.toString()); + } + } + + public static class StreamVacuum implements Runnable { + StringBuilder buf = new StringBuilder(); + BufferedReader in; + Thread sucker; + public StreamVacuum(InputStream in) { + this.in = new BufferedReader( new InputStreamReader(in) ); + } + public void start() { + sucker = new Thread(this); + sucker.start(); + } + @Override + public void run() { + try { + String line = in.readLine(); + while (line!=null) { + buf.append(line); + buf.append('\n'); + line = in.readLine(); + } + } + catch (IOException ioe) { + System.err.println("can't read output from process"); + } + } + /** wait for the thread to finish */ + public void join() throws InterruptedException { + sucker.join(); + } + @Override + public String toString() { + return buf.toString(); + } + } + + protected void checkGrammarSemanticsError(ErrorQueue equeue, + GrammarSemanticsMessage expectedMessage) + throws Exception + { + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage m = equeue.errors.get(i); + if (m.getErrorType()==expectedMessage.getErrorType() ) { + foundMsg = m; + } + } + assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); + assertTrue("error is not a GrammarSemanticsMessage", + foundMsg instanceof GrammarSemanticsMessage); + assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); + if ( equeue.size()!=1 ) { + System.err.println(equeue); + } + } + + protected void checkGrammarSemanticsWarning(ErrorQueue equeue, + GrammarSemanticsMessage expectedMessage) + throws Exception + { + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.warnings.size(); i++) { + ANTLRMessage m = equeue.warnings.get(i); + if (m.getErrorType()==expectedMessage.getErrorType() ) { + foundMsg = m; + } + } + assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); + assertTrue("error is not a GrammarSemanticsMessage", + foundMsg instanceof GrammarSemanticsMessage); + assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); + if ( equeue.size()!=1 ) { + System.err.println(equeue); + } + } + + protected void checkError(ErrorQueue equeue, + ANTLRMessage expectedMessage) + throws Exception + { + //System.out.println("errors="+equeue); + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage m = equeue.errors.get(i); + if (m.getErrorType()==expectedMessage.getErrorType() ) { + foundMsg = m; + } + } + assertTrue("no error; "+expectedMessage.getErrorType()+" expected", !equeue.errors.isEmpty()); + assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1); + assertNotNull("couldn't find expected error: "+expectedMessage.getErrorType(), foundMsg); + /* + assertTrue("error is not a GrammarSemanticsMessage", + foundMsg instanceof GrammarSemanticsMessage); + */ + assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); + } + + public static class FilteringTokenStream extends CommonTokenStream { + public FilteringTokenStream(TokenSource src) { super(src); } + Set hide = new HashSet(); + @Override + protected boolean sync(int i) { + if (!super.sync(i)) { + return false; + } + + Token t = get(i); + if ( hide.contains(t.getType()) ) { + ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); + } + + return true; + } + public void setTokenTypeChannel(int ttype, int channel) { + hide.add(ttype); + } + } + + public static void writeFile(String dir, String fileName, String content) { + try { + Utils.writeFile(dir+"/"+fileName, content, "UTF-8"); + } + catch (IOException ioe) { + System.err.println("can't write file"); + ioe.printStackTrace(System.err); + } + } + + protected void mkdir(String dir) { + File f = new File(dir); + f.mkdirs(); + } + + protected void writeTestFile(String parserName, + String lexerName, + String parserStartRuleName, + boolean debug, + boolean profile) + { + ST outputFileST = new ST( + "import org.antlr.v4.runtime.*;\n" + + "import org.antlr.v4.runtime.tree.*;\n" + + "import org.antlr.v4.runtime.atn.*;\n" + + "import java.util.Arrays;\n"+ + "\n" + + "public class Test {\n" + + " public static void main(String[] args) throws Exception {\n" + + " CharStream input = new ANTLRFileStream(args[0]);\n" + + " lex = new (input);\n" + + " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + + " \n"+ + " parser.setBuildParseTree(true);\n" + + " \n"+ + " ParserRuleContext tree = parser.();\n" + + " System.out.println(Arrays.toString(profiler.getDecisionInfo()));\n" + + " ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" + + " }\n" + + "\n" + + " static class TreeShapeListener implements ParseTreeListener {\n" + + " @Override public void visitTerminal(TerminalNode node) { }\n" + + " @Override public void visitErrorNode(ErrorNode node) { }\n" + + " @Override public void exitEveryRule(ParserRuleContext ctx) { }\n" + + "\n" + + " @Override\n" + + " public void enterEveryRule(ParserRuleContext ctx) {\n" + + " for (int i = 0; i \\< ctx.getChildCount(); i++) {\n" + + " ParseTree parent = ctx.getChild(i).getParent();\n" + + " if (!(parent instanceof RuleNode) || ((RuleNode)parent).getRuleContext() != ctx) {\n" + + " throw new IllegalStateException(\"Invalid parse tree shape detected.\");\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}" + ); + ST createParserST = new ST(" parser = new (tokens);\n"); + if ( debug ) { + createParserST = + new ST( + " parser = new (tokens);\n" + + " parser.addErrorListener(new DiagnosticErrorListener());\n"); + } + if ( profile ) { + outputFileST.add("profile", + "ProfilingATNSimulator profiler = new ProfilingATNSimulator(parser);\n" + + "parser.setInterpreter(profiler);"); + } + else { + outputFileST.add("profile", new ArrayList()); + } + outputFileST.add("createParser", createParserST); + outputFileST.add("parserName", parserName); + outputFileST.add("lexerName", lexerName); + outputFileST.add("parserStartRuleName", parserStartRuleName); + writeFile(tmpdir, "Test.java", outputFileST.render()); + } + + protected void writeLexerTestFile(String lexerName, boolean showDFA) { + ST outputFileST = new ST( + "import org.antlr.v4.runtime.*;\n" + + "\n" + + "public class Test {\n" + + " public static void main(String[] args) throws Exception {\n" + + " CharStream input = new ANTLRFileStream(args[0]);\n" + + " lex = new (input);\n" + + " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + + " tokens.fill();\n" + + " for (Object t : tokens.getTokens()) System.out.println(t);\n" + + (showDFA?"System.out.print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString());\n":"")+ + " }\n" + + "}" + ); + + outputFileST.add("lexerName", lexerName); + writeFile(tmpdir, "Test.java", outputFileST.render()); + } + + public void writeRecognizerAndCompile(String parserName, String lexerName, + String parserStartRuleName, + boolean debug, + boolean profile) { + if ( parserName==null ) { + writeLexerTestFile(lexerName, debug); + } + else { + writeTestFile(parserName, + lexerName, + parserStartRuleName, + debug, + profile); + } + + compile("Test.java"); + } + + + protected void eraseFiles(final String filesEndingWith) { + File tmpdirF = new File(tmpdir); + String[] files = tmpdirF.list(); + for(int i = 0; files!=null && i < files.length; i++) { + if ( files[i].endsWith(filesEndingWith) ) { + new File(tmpdir+"/"+files[i]).delete(); + } + } + } + + protected void eraseFiles() { + if (tmpdir == null) { + return; + } + + File tmpdirF = new File(tmpdir); + String[] files = tmpdirF.list(); + for(int i = 0; files!=null && i < files.length; i++) { + new File(tmpdir+"/"+files[i]).delete(); + } + } + + protected void eraseTempDir() { + File tmpdirF = new File(tmpdir); + if ( tmpdirF.exists() ) { + eraseFiles(); + tmpdirF.delete(); + } + } + + public String getFirstLineOfException() { + if ( this.stderrDuringParse ==null ) { + return null; + } + String[] lines = this.stderrDuringParse.split("\n"); + String prefix="Exception in thread \"main\" "; + return lines[0].substring(prefix.length(),lines[0].length()); + } + + /** + * When looking at a result set that consists of a Map/HashTable + * we cannot rely on the output order, as the hashing algorithm or other aspects + * of the implementation may be different on differnt JDKs or platforms. Hence + * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a + * bit of a hack, but guarantees that we get the same order on all systems. We assume that + * the keys are strings. + * + * @param m The Map that contains keys we wish to return in sorted order + * @return A string that represents all the keys in sorted order. + */ + public String sortMapToString(Map m) { + // Pass in crap, and get nothing back + // + if (m == null) { + return null; + } + + System.out.println("Map toString looks like: " + m.toString()); + + // Sort the keys in the Map + // + TreeMap nset = new TreeMap(m); + + System.out.println("Tree map looks like: " + nset.toString()); + return nset.toString(); + } + + public List realElements(List elements) { + return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); + } + + public void assertNotNullOrEmpty(String message, String text) { + assertNotNull(message, text); + assertFalse(message, text.isEmpty()); + } + + public void assertNotNullOrEmpty(String text) { + assertNotNull(text); + assertFalse(text.isEmpty()); + } + + public static class IntTokenStream implements TokenStream { + IntegerList types; + int p=0; + public IntTokenStream(IntegerList types) { this.types = types; } + + @Override + public void consume() { p++; } + + @Override + public int LA(int i) { return LT(i).getType(); } + + @Override + public int mark() { + return index(); + } + + @Override + public int index() { return p; } + + @Override + public void release(int marker) { + seek(marker); + } + + @Override + public void seek(int index) { + p = index; + } + + @Override + public int size() { + return types.size(); + } + + @Override + public String getSourceName() { + return UNKNOWN_SOURCE_NAME; + } + + @Override + public Token LT(int i) { + CommonToken t; + int rawIndex = p + i - 1; + if ( rawIndex>=types.size() ) t = new CommonToken(Token.EOF); + else t = new CommonToken(types.get(rawIndex)); + t.setTokenIndex(rawIndex); + return t; + } + + @Override + public Token get(int i) { + return new org.antlr.v4.runtime.CommonToken(types.get(i)); + } + + @Override + public TokenSource getTokenSource() { + return null; + } + + @NotNull + @Override + public String getText() { + throw new UnsupportedOperationException("can't give strings"); + } + + @NotNull + @Override + public String getText(Interval interval) { + throw new UnsupportedOperationException("can't give strings"); + } + + @NotNull + @Override + public String getText(RuleContext ctx) { + throw new UnsupportedOperationException("can't give strings"); + } + + @NotNull + @Override + public String getText(Token start, Token stop) { + throw new UnsupportedOperationException("can't give strings"); + } + } + + /** Sort a list */ + public > List sort(List data) { + List dup = new ArrayList(); + dup.addAll(data); + Collections.sort(dup); + return dup; + } + + /** Return map sorted by key */ + public ,V> LinkedHashMap sort(Map data) { + LinkedHashMap dup = new LinkedHashMap(); + List keys = new ArrayList(); + keys.addAll(data.keySet()); + Collections.sort(keys); + for (K k : keys) { + dup.put(k, data.get(k)); + } + return dup; + } +} \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/tool/ErrorQueue.java b/tool/test/org/antlr/v4/test/tool/ErrorQueue.java new file mode 100644 index 000000000..bd78830bf --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/ErrorQueue.java @@ -0,0 +1,108 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.v4.Tool; +import org.antlr.v4.runtime.misc.Utils; +import org.antlr.v4.tool.ANTLRMessage; +import org.antlr.v4.tool.ANTLRToolListener; +import org.antlr.v4.tool.ToolMessage; +import org.stringtemplate.v4.ST; + +import java.util.ArrayList; +import java.util.List; + +public class ErrorQueue implements ANTLRToolListener { + public final Tool tool; + public final List infos = new ArrayList(); + public final List errors = new ArrayList(); + public final List warnings = new ArrayList(); + public final List all = new ArrayList(); + + public ErrorQueue() { + this(null); + } + + public ErrorQueue(Tool tool) { + this.tool = tool; + } + + @Override + public void info(String msg) { + infos.add(msg); + } + + @Override + public void error(ANTLRMessage msg) { + errors.add(msg); + all.add(msg); + } + + @Override + public void warning(ANTLRMessage msg) { + warnings.add(msg); + all.add(msg); + } + + public void error(ToolMessage msg) { + errors.add(msg); + all.add(msg); + } + + public int size() { + return all.size() + infos.size(); + } + + @Override + public String toString() { + return toString(false); + } + + public String toString(boolean rendered) { + if (!rendered) { + return Utils.join(all.iterator(), "\n"); + } + + if (tool == null) { + throw new IllegalStateException(String.format("No %s instance is available.", Tool.class.getName())); + } + + StringBuilder buf = new StringBuilder(); + for (ANTLRMessage m : all) { + ST st = tool.errMgr.getMessageTemplate(m); + buf.append(st.render()); + buf.append("\n"); + } + + return buf.toString(); + } + +} + diff --git a/tool/test/org/antlr/v4/test/Java-LR.g4 b/tool/test/org/antlr/v4/test/tool/Java-LR.g4 similarity index 100% rename from tool/test/org/antlr/v4/test/Java-LR.g4 rename to tool/test/org/antlr/v4/test/tool/Java-LR.g4 diff --git a/tool/test/org/antlr/v4/test/Java.g4 b/tool/test/org/antlr/v4/test/tool/Java.g4 similarity index 100% rename from tool/test/org/antlr/v4/test/Java.g4 rename to tool/test/org/antlr/v4/test/tool/Java.g4 diff --git a/tool/test/org/antlr/v4/test/tool/JavaUnicodeInputStream.java b/tool/test/org/antlr/v4/test/tool/JavaUnicodeInputStream.java new file mode 100644 index 000000000..1a0d539a8 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/JavaUnicodeInputStream.java @@ -0,0 +1,267 @@ +/* + * [The "BSD license"] + * Copyright (c) 2013 Terence Parr + * Copyright (c) 2013 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.runtime.misc.NotNull; + +/** + * + * @author Sam Harwell + */ +public class JavaUnicodeInputStream implements CharStream { + @NotNull + private final CharStream source; + private final IntegerList escapeIndexes = new IntegerList(); + private final IntegerList escapeCharacters = new IntegerList(); + private final IntegerList escapeIndirectionLevels = new IntegerList(); + + private int escapeListIndex; + private int range; + private int slashCount; + + private int la1; + + public JavaUnicodeInputStream(@NotNull CharStream source) { + if (source == null) { + throw new NullPointerException("source"); + } + + this.source = source; + this.la1 = source.LA(1); + } + + @Override + public int size() { + return source.size(); + } + + @Override + public int index() { + return source.index(); + } + + @Override + public String getSourceName() { + return source.getSourceName(); + } + + @Override + public String getText(Interval interval) { + return source.getText(interval); + } + + @Override + public void consume() { + if (la1 != '\\') { + source.consume(); + la1 = source.LA(1); + range = Math.max(range, source.index()); + slashCount = 0; + return; + } + + // make sure the next character has been processed + this.LA(1); + + if (escapeListIndex >= escapeIndexes.size() || escapeIndexes.get(escapeListIndex) != index()) { + source.consume(); + slashCount++; + } + else { + int indirectionLevel = escapeIndirectionLevels.get(escapeListIndex); + for (int i = 0; i < 6 + indirectionLevel; i++) { + source.consume(); + } + + escapeListIndex++; + slashCount = 0; + } + + la1 = source.LA(1); + assert range >= index(); + } + + @Override + public int LA(int i) { + if (i == 1 && la1 != '\\') { + return la1; + } + + if (i <= 0) { + int desiredIndex = index() + i; + for (int j = escapeListIndex - 1; j >= 0; j--) { + if (escapeIndexes.get(j) + 6 + escapeIndirectionLevels.get(j) > desiredIndex) { + desiredIndex -= 5 + escapeIndirectionLevels.get(j); + } + + if (escapeIndexes.get(j) == desiredIndex) { + return escapeCharacters.get(j); + } + } + + return source.LA(desiredIndex - index()); + } + else { + int desiredIndex = index() + i - 1; + for (int j = escapeListIndex; j < escapeIndexes.size(); j++) { + if (escapeIndexes.get(j) == desiredIndex) { + return escapeCharacters.get(j); + } + else if (escapeIndexes.get(j) < desiredIndex) { + desiredIndex += 5 + escapeIndirectionLevels.get(j); + } + else { + return source.LA(desiredIndex - index() + 1); + } + } + + int[] currentIndex = { index() }; + int[] slashCountPtr = { slashCount }; + int[] indirectionLevelPtr = { 0 }; + for (int j = 0; j < i; j++) { + int previousIndex = currentIndex[0]; + int c = readCharAt(currentIndex, slashCountPtr, indirectionLevelPtr); + if (currentIndex[0] > range) { + if (currentIndex[0] - previousIndex > 1) { + escapeIndexes.add(previousIndex); + escapeCharacters.add(c); + escapeIndirectionLevels.add(indirectionLevelPtr[0]); + } + + range = currentIndex[0]; + } + + if (j == i - 1) { + return c; + } + } + + throw new IllegalStateException("shouldn't be reachable"); + } + } + + @Override + public int mark() { + return source.mark(); + } + + @Override + public void release(int marker) { + source.release(marker); + } + + @Override + public void seek(int index) { + if (index > range) { + throw new UnsupportedOperationException(); + } + + source.seek(index); + la1 = source.LA(1); + + slashCount = 0; + while (source.LA(-slashCount - 1) == '\\') { + slashCount++; + } + + escapeListIndex = escapeIndexes.binarySearch(source.index()); + if (escapeListIndex < 0) { + escapeListIndex = -escapeListIndex - 1; + } + } + + private static boolean isHexDigit(int c) { + return c >= '0' && c <= '9' + || c >= 'a' && c <= 'f' + || c >= 'A' && c <= 'F'; + } + + private static int hexValue(int c) { + if (c >= '0' && c <= '9') { + return c - '0'; + } + + if (c >= 'a' && c <= 'f') { + return c - 'a' + 10; + } + + if (c >= 'A' && c <= 'F') { + return c - 'A' + 10; + } + + throw new IllegalArgumentException("c"); + } + + private int readCharAt(int[] nextIndexPtr, int[] slashCountPtr, int[] indirectionLevelPtr) { + assert nextIndexPtr != null && nextIndexPtr.length == 1; + assert slashCountPtr != null && slashCountPtr.length == 1; + assert indirectionLevelPtr != null && indirectionLevelPtr.length == 1; + + boolean blockUnicodeEscape = (slashCountPtr[0] % 2) != 0; + + int c0 = source.LA(nextIndexPtr[0] - index() + 1); + if (c0 == '\\') { + slashCountPtr[0]++; + + if (!blockUnicodeEscape) { + int c1 = source.LA(nextIndexPtr[0] - index() + 2); + if (c1 == 'u') { + int c2 = source.LA(nextIndexPtr[0] - index() + 3); + indirectionLevelPtr[0] = 0; + while (c2 == 'u') { + indirectionLevelPtr[0]++; + c2 = source.LA(nextIndexPtr[0] - index() + 3 + indirectionLevelPtr[0]); + } + + int c3 = source.LA(nextIndexPtr[0] - index() + 4 + indirectionLevelPtr[0]); + int c4 = source.LA(nextIndexPtr[0] - index() + 5 + indirectionLevelPtr[0]); + int c5 = source.LA(nextIndexPtr[0] - index() + 6 + indirectionLevelPtr[0]); + if (isHexDigit(c2) && isHexDigit(c3) && isHexDigit(c4) && isHexDigit(c5)) { + int value = hexValue(c2); + value = (value << 4) + hexValue(c3); + value = (value << 4) + hexValue(c4); + value = (value << 4) + hexValue(c5); + + nextIndexPtr[0] += 6 + indirectionLevelPtr[0]; + slashCountPtr[0] = 0; + return value; + } + } + } + } + + nextIndexPtr[0]++; + return c0; + } +} diff --git a/tool/test/org/antlr/v4/test/tool/ParserInterpreterForTesting.java b/tool/test/org/antlr/v4/test/tool/ParserInterpreterForTesting.java new file mode 100644 index 000000000..e1a36243f --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/ParserInterpreterForTesting.java @@ -0,0 +1,132 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.Tool; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.runtime.atn.DecisionState; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.NotNull; +import org.antlr.v4.runtime.misc.Nullable; +import org.antlr.v4.tool.Grammar; + +public class ParserInterpreterForTesting { + public static class DummyParser extends Parser { + public final ATN atn; + public final DFA[] decisionToDFA; // not shared for interp + public final PredictionContextCache sharedContextCache = + new PredictionContextCache(); + + public Grammar g; + public DummyParser(Grammar g, ATN atn, TokenStream input) { + super(input); + this.g = g; + this.atn = atn; + this.decisionToDFA = new DFA[atn.getNumberOfDecisions()]; + for (int i = 0; i < decisionToDFA.length; i++) { + decisionToDFA[i] = new DFA(atn.getDecisionState(i), i); + } + } + + @Override + public String getGrammarFileName() { + throw new UnsupportedOperationException("not implemented"); + } + + @Override + public String[] getRuleNames() { + return g.rules.keySet().toArray(new String[g.rules.size()]); + } + + @Override + @Deprecated + public String[] getTokenNames() { + return g.getTokenNames(); + } + + @Override + public ATN getATN() { + return atn; + } + } + + protected Grammar g; + public DummyParser parser; + protected ParserATNSimulator atnSimulator; + protected TokenStream input; + + public ParserInterpreterForTesting(@NotNull Grammar g) { + this.g = g; + } + + public ParserInterpreterForTesting(@NotNull Grammar g, @NotNull TokenStream input) { + Tool antlr = new Tool(); + antlr.process(g,false); + parser = new DummyParser(g, g.atn, input); + atnSimulator = + new ParserATNSimulator(parser, g.atn, parser.decisionToDFA, + parser.sharedContextCache); + } + + public int adaptivePredict(@NotNull TokenStream input, int decision, + @Nullable ParserRuleContext outerContext) + { + return atnSimulator.adaptivePredict(input, decision, outerContext); + } + + public int matchATN(@NotNull TokenStream input, + @NotNull ATNState startState) + { + if (startState.getNumberOfTransitions() == 1) { + return 1; + } + else if (startState instanceof DecisionState) { + return atnSimulator.adaptivePredict(input, ((DecisionState)startState).decision, null); + } + else if (startState.getNumberOfTransitions() > 0) { + return 1; + } + else { + return -1; + } + } + + public ParserATNSimulator getATNSimulator() { + return atnSimulator; + } + +} diff --git a/tool/test/org/antlr/v4/test/PositionAdjustingLexer.g4 b/tool/test/org/antlr/v4/test/tool/PositionAdjustingLexer.g4 similarity index 100% rename from tool/test/org/antlr/v4/test/PositionAdjustingLexer.g4 rename to tool/test/org/antlr/v4/test/tool/PositionAdjustingLexer.g4 diff --git a/tool/test/org/antlr/v4/test/Psl.g4 b/tool/test/org/antlr/v4/test/tool/Psl.g4 similarity index 100% rename from tool/test/org/antlr/v4/test/Psl.g4 rename to tool/test/org/antlr/v4/test/tool/Psl.g4 diff --git a/tool/test/org/antlr/v4/test/TestASTStructure.gunit b/tool/test/org/antlr/v4/test/tool/TestASTStructure.gunit similarity index 100% rename from tool/test/org/antlr/v4/test/TestASTStructure.gunit rename to tool/test/org/antlr/v4/test/tool/TestASTStructure.gunit diff --git a/tool/test/org/antlr/v4/test/tool/TestASTStructure.java b/tool/test/org/antlr/v4/test/tool/TestASTStructure.java new file mode 100644 index 000000000..d081fe8da --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestASTStructure.java @@ -0,0 +1,406 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.runtime.ANTLRStringStream; +import org.antlr.runtime.CharStream; +import org.antlr.runtime.CommonTokenStream; +import org.antlr.runtime.Parser; +import org.antlr.runtime.RuleReturnScope; +import org.antlr.runtime.TokenSource; +import org.antlr.runtime.TokenStream; +import org.antlr.runtime.tree.Tree; +import org.antlr.runtime.tree.TreeAdaptor; +import org.junit.Test; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; + +import static org.junit.Assert.assertEquals; + +// NO LONGER using gunit!!! + +public class TestASTStructure { + String lexerClassName = "org.antlr.v4.parse.ANTLRLexer"; + String parserClassName = "org.antlr.v4.parse.ANTLRParser"; + String adaptorClassName = "org.antlr.v4.parse.GrammarASTAdaptor"; + + public Object execParser( + String ruleName, + String input, + int scriptLine) + throws Exception + { + ANTLRStringStream is = new ANTLRStringStream(input); + Class lexerClass = Class.forName(lexerClassName).asSubclass(TokenSource.class); + Constructor lexConstructor = lexerClass.getConstructor(CharStream.class); + TokenSource lexer = lexConstructor.newInstance(is); + is.setLine(scriptLine); + + CommonTokenStream tokens = new CommonTokenStream(lexer); + + Class parserClass = Class.forName(parserClassName).asSubclass(Parser.class); + Constructor parConstructor = parserClass.getConstructor(TokenStream.class); + Parser parser = parConstructor.newInstance(tokens); + + // set up customized tree adaptor if necessary + if ( adaptorClassName!=null ) { + Method m = parserClass.getMethod("setTreeAdaptor", TreeAdaptor.class); + Class adaptorClass = Class.forName(adaptorClassName).asSubclass(TreeAdaptor.class); + m.invoke(parser, adaptorClass.newInstance()); + } + + Method ruleMethod = parserClass.getMethod(ruleName); + + // INVOKE RULE + return ruleMethod.invoke(parser); + } + + @Test public void test_grammarSpec1() throws Exception { + // gunit test on line 15 + RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "parser grammar P; a : A;", 15); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A)))))"; + assertEquals("testing rule grammarSpec", expecting, actual); + } + + @Test public void test_grammarSpec2() throws Exception { + // gunit test on line 18 + RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n tokens { A, B }\n @header {foo}\n a : A;\n ", 18); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(PARSER_GRAMMAR P (tokens { A B) (@ header {foo}) (RULES (RULE a (BLOCK (ALT A)))))"; + assertEquals("testing rule grammarSpec", expecting, actual); + } + + @Test public void test_grammarSpec3() throws Exception { + // gunit test on line 30 + RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n @header {foo}\n tokens { A,B }\n a : A;\n ", 30); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(PARSER_GRAMMAR P (@ header {foo}) (tokens { A B) (RULES (RULE a (BLOCK (ALT A)))))"; + assertEquals("testing rule grammarSpec", expecting, actual); + } + + @Test public void test_grammarSpec4() throws Exception { + // gunit test on line 42 + RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n import A=B, C;\n a : A;\n ", 42); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(PARSER_GRAMMAR P (import (= A B) C) (RULES (RULE a (BLOCK (ALT A)))))"; + assertEquals("testing rule grammarSpec", expecting, actual); + } @Test public void test_delegateGrammars1() throws Exception { + // gunit test on line 53 + RuleReturnScope rstruct = (RuleReturnScope)execParser("delegateGrammars", "import A;", 53); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(import A)"; + assertEquals("testing rule delegateGrammars", expecting, actual); + } @Test public void test_rule1() throws Exception { + // gunit test on line 56 + RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "a : A;", 56); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c))))))"; + assertEquals("testing rule rule", expecting, actual); + } + + @Test public void test_rule2() throws Exception { + // gunit test on line 58 + RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "A : B+;", 58); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(RULE A (BLOCK (ALT (+ (BLOCK (ALT B))))))"; + assertEquals("testing rule rule", expecting, actual); + } + + @Test public void test_rule3() throws Exception { + // gunit test on line 60 + RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a[int i] returns [int y]\n @init {blort}\n : ID ;\n ", 60); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(RULE a int i (returns int y) (@ init {blort}) (BLOCK (ALT ID)))"; + assertEquals("testing rule rule", expecting, actual); + } + + @Test public void test_rule4() throws Exception { + // gunit test on line 75 + RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a[int i] returns [int y]\n @init {blort}\n options {backtrack=true;}\n : ID;\n ", 75); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(RULE a int i (returns int y) (@ init {blort}) (OPTIONS (= backtrack true)) (BLOCK (ALT ID)))"; + assertEquals("testing rule rule", expecting, actual); + } + + @Test public void test_rule5() throws Exception { + // gunit test on line 88 + RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A b] {foo}\n finally {bar}\n ", 88); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A b {foo}) (finally {bar}))"; + assertEquals("testing rule rule", expecting, actual); + } + + @Test public void test_rule6() throws Exception { + // gunit test on line 97 + RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A a] {foo}\n catch[B b] {fu}\n finally {bar}\n ", 97); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A a {foo}) (catch B b {fu}) (finally {bar}))"; + assertEquals("testing rule rule", expecting, actual); + } + + @Test public void test_rule7() throws Exception { + // gunit test on line 107 + RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n\ta[int i]\n\tlocals [int a, float b]\n\t\t:\tA\n\t\t;\n\t", 107); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(RULE a int i (locals int a, float b) (BLOCK (ALT A)))"; + assertEquals("testing rule rule", expecting, actual); + } + + @Test public void test_rule8() throws Exception { + // gunit test on line 115 + RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n\ta[int i] throws a.b.c\n\t\t:\tA\n\t\t;\n\t", 115); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(RULE a int i (throws a.b.c) (BLOCK (ALT A)))"; + assertEquals("testing rule rule", expecting, actual); + } @Test public void test_ebnf1() throws Exception { + // gunit test on line 123 + RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)", 123); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(BLOCK (ALT A) (ALT B))"; + assertEquals("testing rule ebnf", expecting, actual); + } + + @Test public void test_ebnf2() throws Exception { + // gunit test on line 124 + RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)?", 124); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(? (BLOCK (ALT A) (ALT B)))"; + assertEquals("testing rule ebnf", expecting, actual); + } + + @Test public void test_ebnf3() throws Exception { + // gunit test on line 125 + RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)*", 125); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(* (BLOCK (ALT A) (ALT B)))"; + assertEquals("testing rule ebnf", expecting, actual); + } + + @Test public void test_ebnf4() throws Exception { + // gunit test on line 126 + RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)+", 126); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+ (BLOCK (ALT A) (ALT B)))"; + assertEquals("testing rule ebnf", expecting, actual); + } @Test public void test_element1() throws Exception { + // gunit test on line 129 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "~A", 129); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(~ (SET A))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element2() throws Exception { + // gunit test on line 130 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b+", 130); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+ (BLOCK (ALT b)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element3() throws Exception { + // gunit test on line 131 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)+", 131); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+ (BLOCK (ALT b)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element4() throws Exception { + // gunit test on line 132 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b?", 132); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(? (BLOCK (ALT b)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element5() throws Exception { + // gunit test on line 133 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)?", 133); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(? (BLOCK (ALT b)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element6() throws Exception { + // gunit test on line 134 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)*", 134); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(* (BLOCK (ALT b)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element7() throws Exception { + // gunit test on line 135 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b*", 135); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(* (BLOCK (ALT b)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element8() throws Exception { + // gunit test on line 136 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'while'*", 136); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(* (BLOCK (ALT 'while')))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element9() throws Exception { + // gunit test on line 137 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'+", 137); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+ (BLOCK (ALT 'a')))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element10() throws Exception { + // gunit test on line 138 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "a[3]", 138); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(a 3)"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element11() throws Exception { + // gunit test on line 139 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'..'z'+", 139); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+ (BLOCK (ALT (.. 'a' 'z'))))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element12() throws Exception { + // gunit test on line 140 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID", 140); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(= x ID)"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element13() throws Exception { + // gunit test on line 141 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID?", 141); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(? (BLOCK (ALT (= x ID))))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element14() throws Exception { + // gunit test on line 142 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID*", 142); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(* (BLOCK (ALT (= x ID))))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element15() throws Exception { + // gunit test on line 143 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b", 143); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(= x b)"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element16() throws Exception { + // gunit test on line 144 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=(A|B)", 144); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(= x (BLOCK (ALT A) (ALT B)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element17() throws Exception { + // gunit test on line 145 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=~(A|B)", 145); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(= x (~ (SET A B)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element18() throws Exception { + // gunit test on line 146 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)", 146); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+= x (~ (SET A B)))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element19() throws Exception { + // gunit test on line 147 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)+", 147); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+ (BLOCK (ALT (+= x (~ (SET A B))))))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element20() throws Exception { + // gunit test on line 148 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b+", 148); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+ (BLOCK (ALT (= x b))))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element21() throws Exception { + // gunit test on line 149 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=ID*", 149); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(* (BLOCK (ALT (+= x ID))))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element22() throws Exception { + // gunit test on line 150 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+='int'*", 150); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(* (BLOCK (ALT (+= x 'int'))))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element23() throws Exception { + // gunit test on line 151 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=b+", 151); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(+ (BLOCK (ALT (+= x b))))"; + assertEquals("testing rule element", expecting, actual); + } + + @Test public void test_element24() throws Exception { + // gunit test on line 152 + RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "({blort} 'x')*", 152); + Object actual = ((Tree)rstruct.getTree()).toStringTree(); + Object expecting = "(* (BLOCK (ALT {blort} 'x')))"; + assertEquals("testing rule element", expecting, actual); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestATNConstruction.java b/tool/test/org/antlr/v4/test/tool/TestATNConstruction.java new file mode 100644 index 000000000..3afc2edf0 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestATNConstruction.java @@ -0,0 +1,981 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.v4.Tool; +import org.antlr.v4.automata.ATNPrinter; +import org.antlr.v4.automata.LexerATNFactory; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.parse.ANTLRParser; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.tool.ErrorType; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; +import org.antlr.v4.tool.ast.GrammarAST; +import org.antlr.v4.tool.ast.GrammarRootAST; +import org.antlr.v4.tool.ast.RuleAST; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestATNConstruction extends BaseTest { + @Test + public void testA() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A;"); + String expecting = + "RuleStart_a_0->s2\n" + + "s2-A->s3\n" + + "s3->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s4\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAB() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A B ;"); + String expecting = + "RuleStart_a_0->s2\n" + + "s2-A->s3\n" + + "s3-B->s4\n" + + "s4->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s5\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAorB() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A | B {;} ;"); + String expecting = + "RuleStart_a_0->BlockStart_5\n" + + "BlockStart_5->s2\n" + + "BlockStart_5->s3\n" + + "s2-A->BlockEnd_6\n" + + "s3-B->s4\n" + + "BlockEnd_6->RuleStop_a_1\n" + + "s4-action_0:-1->BlockEnd_6\n" + + "RuleStop_a_1-EOF->s7\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testSetAorB() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A | B ;"); + String expecting = + "RuleStart_a_0->s2\n" + + "s2-{A, B}->s3\n" + + "s3->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s4\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testLexerIsntSetMultiCharString() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar P;\n"+ + "A : ('0x' | '0X') ;"); + String expecting = + "s0->RuleStart_A_1\n" + + "RuleStart_A_1->BlockStart_7\n" + + "BlockStart_7->s3\n" + + "BlockStart_7->s5\n" + + "s3-'0'->s4\n" + + "s5-'0'->s6\n" + + "s4-'x'->BlockEnd_8\n" + + "s6-'X'->BlockEnd_8\n" + + "BlockEnd_8->RuleStop_A_2\n"; + checkTokensRule(g, null, expecting); + } + @Test public void testRange() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar P;\n"+ + "A : 'a'..'c' ;" + ); + String expecting = + "s0->RuleStart_A_1\n" + + "RuleStart_A_1->s3\n" + + "s3-'a'..'c'->s4\n" + + "s4->RuleStop_A_2\n"; + checkTokensRule(g, null, expecting); + } + @Test public void testRangeOrRange() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar P;\n"+ + "A : ('a'..'c' 'h' | 'q' 'j'..'l') ;" + ); + String expecting = + "s0->RuleStart_A_1\n" + + "RuleStart_A_1->BlockStart_7\n" + + "BlockStart_7->s3\n" + + "BlockStart_7->s5\n" + + "s3-'a'..'c'->s4\n" + + "s5-'q'->s6\n" + + "s4-'h'->BlockEnd_8\n" + + "s6-'j'..'l'->BlockEnd_8\n" + + "BlockEnd_8->RuleStop_A_2\n"; + checkTokensRule(g, null, expecting); + } + @Test public void testStringLiteralInParser() throws Exception { + Grammar g = new Grammar( + "grammar P;\n"+ + "a : A|'b' ;" + ); + String expecting = + "RuleStart_a_0->s2\n" + + "s2-{'b', A}->s3\n" + + "s3->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s4\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testABorCD() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A B | C D;"); + String expecting = + "RuleStart_a_0->BlockStart_6\n" + + "BlockStart_6->s2\n" + + "BlockStart_6->s4\n" + + "s2-A->s3\n" + + "s4-C->s5\n" + + "s3-B->BlockEnd_7\n" + + "s5-D->BlockEnd_7\n" + + "BlockEnd_7->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s8\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testbA() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : b A ;\n"+ + "b : B ;"); + String expecting = + "RuleStart_a_0->s4\n" + + "s4-b->RuleStart_b_2\n" + + "s5-A->s6\n" + + "s6->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s9\n"; + checkRuleATN(g, "a", expecting); + expecting = + "RuleStart_b_2->s7\n" + + "s7-B->s8\n" + + "s8->RuleStop_b_3\n" + + "RuleStop_b_3->s5\n"; + checkRuleATN(g, "b", expecting); + } + @Test public void testFollow() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : b A ;\n"+ + "b : B ;\n"+ + "c : b C;"); + String expecting = + "RuleStart_b_2->s9\n" + + "s9-B->s10\n" + + "s10->RuleStop_b_3\n" + + "RuleStop_b_3->s7\n" + + "RuleStop_b_3->s12\n"; + checkRuleATN(g, "b", expecting); + } + @Test public void testAorEpsilon() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A | ;"); + String expecting = + "RuleStart_a_0->BlockStart_4\n" + + "BlockStart_4->s2\n" + + "BlockStart_4->s3\n" + + "s2-A->BlockEnd_5\n" + + "s3->BlockEnd_5\n" + + "BlockEnd_5->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s6\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAOptional() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A?;"); + String expecting = + "RuleStart_a_0->BlockStart_3\n" + + "BlockStart_3->s2\n" + + "BlockStart_3->BlockEnd_4\n" + + "s2-A->BlockEnd_4\n" + + "BlockEnd_4->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s5\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAorBoptional() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : (A{;}|B)?;"); + String expecting = + "RuleStart_a_0->BlockStart_5\n" + + "BlockStart_5->s2\n" + + "BlockStart_5->s4\n" + + "BlockStart_5->BlockEnd_6\n" + + "s2-A->s3\n" + + "s4-B->BlockEnd_6\n" + + "BlockEnd_6->RuleStop_a_1\n" + + "s3-action_0:-1->BlockEnd_6\n" + + "RuleStop_a_1-EOF->s7\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testSetAorBoptional() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : (A|B)?;"); + String expecting = + "RuleStart_a_0->BlockStart_3\n" + + "BlockStart_3->s2\n" + + "BlockStart_3->BlockEnd_4\n" + + "s2-{A, B}->BlockEnd_4\n" + + "BlockEnd_4->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s5\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAorBthenC() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : (A | B) C;"); + String expecting = + "RuleStart_a_0->s2\n" + + "s2-{A, B}->s3\n" + + "s3-C->s4\n" + + "s4->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s5\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAplus() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A+;"); + String expecting = + "RuleStart_a_0->PlusBlockStart_3\n" + + "PlusBlockStart_3->s2\n" + + "s2-A->BlockEnd_4\n" + + "BlockEnd_4->PlusLoopBack_5\n" + + "PlusLoopBack_5->PlusBlockStart_3\n" + + "PlusLoopBack_5->s6\n" + + "s6->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s7\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAplusSingleAltHasPlusASTPointingAtLoopBackState() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "s : a B ;\n" + // (RULE a (BLOCK (ALT (+ (BLOCK (ALT A)))))) + "a : A+;"); + String expecting = + "RuleStart_a_2->PlusBlockStart_8\n" + + "PlusBlockStart_8->s7\n" + + "s7-A->BlockEnd_9\n" + + "BlockEnd_9->PlusLoopBack_10\n" + + "PlusLoopBack_10->PlusBlockStart_8\n" + + "PlusLoopBack_10->s11\n" + + "s11->RuleStop_a_3\n" + + "RuleStop_a_3->s5\n"; + checkRuleATN(g, "a", expecting); + // Get all AST -> ATNState relationships. Make sure loopback is covered when no loop entry decision + List ruleNodes = g.ast.getNodesWithType(ANTLRParser.RULE); + RuleAST a = (RuleAST)ruleNodes.get(1); + List nodesInRule = a.getNodesWithType(null); + Map covered = new LinkedHashMap(); + for (GrammarAST node : nodesInRule) { + if ( node.atnState != null ) { + covered.put(node, node.atnState); + } + } + assertEquals("{RULE=2, BLOCK=8, +=10, BLOCK=8, A=7}", covered.toString()); + } + @Test public void testAorBplus() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : (A|B{;})+;"); + String expecting = + "RuleStart_a_0->PlusBlockStart_5\n" + + "PlusBlockStart_5->s2\n" + + "PlusBlockStart_5->s3\n" + + "s2-A->BlockEnd_6\n" + + "s3-B->s4\n" + + "BlockEnd_6->PlusLoopBack_7\n" + + "s4-action_0:-1->BlockEnd_6\n" + + "PlusLoopBack_7->PlusBlockStart_5\n" + + "PlusLoopBack_7->s8\n" + + "s8->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s9\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAorBorEmptyPlus() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : (A | B | )+ ;"); + String expecting = + "RuleStart_a_0->PlusBlockStart_5\n" + + "PlusBlockStart_5->s2\n" + + "PlusBlockStart_5->s3\n" + + "PlusBlockStart_5->s4\n" + + "s2-A->BlockEnd_6\n" + + "s3-B->BlockEnd_6\n" + + "s4->BlockEnd_6\n" + + "BlockEnd_6->PlusLoopBack_7\n" + + "PlusLoopBack_7->PlusBlockStart_5\n" + + "PlusLoopBack_7->s8\n" + + "s8->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s9\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAStar() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : A*;"); + String expecting = + "RuleStart_a_0->StarLoopEntry_5\n" + + "StarLoopEntry_5->StarBlockStart_3\n" + + "StarLoopEntry_5->s6\n" + + "StarBlockStart_3->s2\n" + + "s6->RuleStop_a_1\n" + + "s2-A->BlockEnd_4\n" + + "RuleStop_a_1-EOF->s8\n" + + "BlockEnd_4->StarLoopBack_7\n" + + "StarLoopBack_7->StarLoopEntry_5\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testNestedAstar() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : (COMMA ID*)*;"); + String expecting = + "RuleStart_a_0->StarLoopEntry_11\n" + + "StarLoopEntry_11->StarBlockStart_9\n" + + "StarLoopEntry_11->s12\n" + + "StarBlockStart_9->s2\n" + + "s12->RuleStop_a_1\n" + + "s2-COMMA->StarLoopEntry_6\n" + + "RuleStop_a_1-EOF->s14\n" + + "StarLoopEntry_6->StarBlockStart_4\n" + + "StarLoopEntry_6->s7\n" + + "StarBlockStart_4->s3\n" + + "s7->BlockEnd_10\n" + + "s3-ID->BlockEnd_5\n" + + "BlockEnd_10->StarLoopBack_13\n" + + "BlockEnd_5->StarLoopBack_8\n" + + "StarLoopBack_13->StarLoopEntry_11\n" + + "StarLoopBack_8->StarLoopEntry_6\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testAorBstar() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : (A | B{;})* ;"); + String expecting = + "RuleStart_a_0->StarLoopEntry_7\n" + + "StarLoopEntry_7->StarBlockStart_5\n" + + "StarLoopEntry_7->s8\n" + + "StarBlockStart_5->s2\n" + + "StarBlockStart_5->s3\n" + + "s8->RuleStop_a_1\n" + + "s2-A->BlockEnd_6\n" + + "s3-B->s4\n" + + "RuleStop_a_1-EOF->s10\n" + + "BlockEnd_6->StarLoopBack_9\n" + + "s4-action_0:-1->BlockEnd_6\n" + + "StarLoopBack_9->StarLoopEntry_7\n"; + checkRuleATN(g, "a", expecting); + } + @Test public void testPredicatedAorB() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : {p1}? A | {p2}? B ;"); + String expecting = + "RuleStart_a_0->BlockStart_6\n" + + "BlockStart_6->s2\n" + + "BlockStart_6->s4\n" + + "s2-pred_0:0->s3\n" + + "s4-pred_0:1->s5\n" + + "s3-A->BlockEnd_7\n" + + "s5-B->BlockEnd_7\n" + + "BlockEnd_7->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s8\n"; + checkRuleATN(g, "a", expecting); + } + + @Test public void testParserRuleRefInLexerRule() throws Exception { + boolean threwException = false; + ErrorQueue errorQueue = new ErrorQueue(); + try { + String gstr = + "grammar U;\n"+ + "a : A;\n"+ + "A : a;\n"; + + Tool tool = new Tool(); + tool.removeListeners(); + tool.addListener(errorQueue); + assertEquals(0, errorQueue.size()); + GrammarRootAST grammarRootAST = tool.parseGrammarFromString(gstr); + assertEquals(0, errorQueue.size()); + Grammar g = tool.createGrammar(grammarRootAST); + assertEquals(0, errorQueue.size()); + g.fileName = ""; + tool.process(g, false); + } + catch (Exception e) { + threwException = true; + e.printStackTrace(); + } + System.out.println(errorQueue); + assertEquals(1, errorQueue.errors.size()); + assertEquals(ErrorType.PARSER_RULE_REF_IN_LEXER_RULE, errorQueue.errors.get(0).getErrorType()); + assertEquals("[a, A]", Arrays.toString(errorQueue.errors.get(0).getArgs())); + assertTrue(!threwException); + } + +/* + @Test public void testMultiplePredicates() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : {p1}? {p1a}? A | {p2}? B | {p3} b;\n" + + "b : {p4}? B ;"); + String expecting = + "\n"; + checkRule(g, "a", expecting); + } + @Test public void testSets() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "a : ( A | B )+ ;\n" + + "b : ( A | B{;} )+ ;\n" + + "c : (A|B) (A|B) ;\n" + + "d : ( A | B )* ;\n" + + "e : ( A | B )? ;"); + String expecting = + "\n"; + checkRule(g, "a", expecting); + expecting = + "\n"; + checkRule(g, "b", expecting); + expecting = + "\n"; + checkRule(g, "c", expecting); + expecting = + "\n"; + checkRule(g, "d", expecting); + expecting = + "\n"; + checkRule(g, "e", expecting); + } + @Test public void testNotSet() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "tokens { A; B; C; }\n"+ + "a : ~A ;\n"); + String expecting = + "\n"; + checkRule(g, "a", expecting); + } + @Test public void testNotSingletonBlockSet() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "tokens { A; B; C; }\n"+ + "a : ~(A) ;\n"); + String expecting = + "\n"; + checkRule(g, "a", expecting); + } + @Test public void testNotCharSet() throws Exception { + Grammar g = new Grammar( + "lexer grammar P;\n"+ + "A : ~'3' ;\n"); + String expecting = + "RuleStart_A_1->s5\n" + + "s5-{'\\u0000'..'2', '4'..'\\uFFFE'}->s6\n" + + "s6->RuleStop_A_2\n"; + checkRule(g, "A", expecting); + } + @Test public void testNotBlockSet() throws Exception { + Grammar g = new Grammar( + "lexer grammar P;\n"+ + "A : ~('3'|'b') ;\n"); + String expecting = + "\n"; + checkRule(g, "A", expecting); + } + @Test public void testNotSetLoop() throws Exception { + Grammar g = new Grammar( + "lexer grammar P;\n"+ + "A : ~('3')* ;\n"); + String expecting = + "\n"; + checkRule(g, "A", expecting); + } + @Test public void testNotBlockSetLoop() throws Exception { + Grammar g = new Grammar( + "lexer grammar P;\n"+ + "A : ~('3'|'b')* ;\n"); + String expecting = + "\n"; + checkRule(g, "A", expecting); + } + @Test public void testLabeledNotSet() throws Exception { + Grammar g = new Grammar( + "parser grammar P;\n"+ + "tokens { A; B; C; }\n"+ + "a : t=~A ;\n"); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2-B..C->.s3\n" + + ".s3->:s4\n" + + ":s4-EOF->.s5\n"; + checkRule(g, "a", expecting); + } + @Test public void testLabeledNotCharSet() throws Exception { + Grammar g = new Grammar( + "lexer grammar P;\n"+ + "A : t=~'3' ;\n"); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" + + ".s3->:s4\n" + + ":s4-->.s5\n"; + checkRule(g, "A", expecting); + } + @Test public void testLabeledNotBlockSet() throws Exception { + Grammar g = new Grammar( + "lexer grammar P;\n"+ + "A : t=~('3'|'b') ;\n"); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" + + ".s3->:s4\n" + + ":s4-->.s5\n"; + checkRule(g, "A", expecting); + } + @Test public void testEscapedCharLiteral() throws Exception { + Grammar g = new Grammar( + "grammar P;\n"+ + "a : '\\n';"); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2-'\\n'->.s3\n" + + ".s3->:s4\n" + + ":s4-EOF->.s5\n"; + checkRule(g, "a", expecting); + } + @Test public void testEscapedStringLiteral() throws Exception { + Grammar g = new Grammar( + "grammar P;\n"+ + "a : 'a\\nb\\u0030c\\'';"); + String expecting = + "RuleStart_a_0->s2\n" + + "s2-'a\\nb\\u0030c\\''->s3\n" + + "s3->RuleStop_a_1\n" + + "RuleStop_a_1-EOF->s4\n"; + checkRule(g, "a", expecting); + } + // AUTO BACKTRACKING STUFF + @Test public void testAutoBacktracking_RuleBlock() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : 'a'{;}|'b';" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s1->.s9\n" + + ".s10-'b'->.s11\n" + + ".s11->.s6\n" + + ".s2-{synpred1_t}?->.s3\n" + + ".s3-'a'->.s4\n" + + ".s4-{}->.s5\n" + + ".s5->.s6\n" + + ".s6->:s7\n" + + ".s9->.s10\n" + + ":s7-EOF->.s8\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_RuleSetBlock() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : 'a'|'b';" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2-'a'..'b'->.s3\n" + + ".s3->:s4\n" + + ":s4-EOF->.s5\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_SimpleBlock() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a'{;}|'b') ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s10->.s11\n" + + ".s11-'b'->.s12\n" + + ".s12->.s7\n" + + ".s2->.s10\n" + + ".s2->.s3\n" + + ".s3-{synpred1_t}?->.s4\n" + + ".s4-'a'->.s5\n" + + ".s5-{}->.s6\n" + + ".s6->.s7\n" + + ".s7->:s8\n" + + ":s8-EOF->.s9\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_SetBlock() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a'|'b') ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2-'a'..'b'->.s3\n" + + ".s3->:s4\n" + + ":s4-EOF->.s5\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_StarBlock() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a'{;}|'b')* ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s12->.s13\n" + + ".s13-{synpred2_t}?->.s14\n" + + ".s14-'b'->.s15\n" + + ".s15->.s8\n" + + ".s16->.s9\n" + + ".s2->.s16\n" + + ".s2->.s3\n" + + ".s3->.s12\n" + + ".s3->.s4\n" + + ".s4-{synpred1_t}?->.s5\n" + + ".s5-'a'->.s6\n" + + ".s6-{}->.s7\n" + + ".s7->.s8\n" + + ".s8->.s3\n" + + ".s8->.s9\n" + + ".s9->:s10\n" + + ":s10-EOF->.s11\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_StarSetBlock_IgnoresPreds() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a'|'b')* ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2->.s3\n" + + ".s2->.s9\n" + + ".s3->.s4\n" + + ".s4-'a'..'b'->.s5\n" + + ".s5->.s3\n" + + ".s5->.s6\n" + + ".s6->:s7\n" + + ".s9->.s6\n" + + ":s7-EOF->.s8\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_StarSetBlock() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a'|'b'{;})* ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s11->.s12\n" + + ".s12-{synpred2_t}?->.s13\n" + + ".s13-'b'->.s14\n" + + ".s14-{}->.s15\n" + + ".s15->.s7\n" + + ".s16->.s8\n" + + ".s2->.s16\n" + + ".s2->.s3\n" + + ".s3->.s11\n" + + ".s3->.s4\n" + + ".s4-{synpred1_t}?->.s5\n" + + ".s5-'a'->.s6\n" + + ".s6->.s7\n" + + ".s7->.s3\n" + + ".s7->.s8\n" + + ".s8->:s9\n" + + ":s9-EOF->.s10\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_StarBlock1Alt() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a')* ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s10->.s7\n" + + ".s2->.s10\n" + + ".s2->.s3\n" + + ".s3->.s4\n" + + ".s4-{synpred1_t}?->.s5\n" + + ".s5-'a'->.s6\n" + + ".s6->.s3\n" + + ".s6->.s7\n" + + ".s7->:s8\n" + + ":s8-EOF->.s9\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_PlusBlock() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a'{;}|'b')+ ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s12->.s13\n" + + ".s13-{synpred2_t}?->.s14\n" + + ".s14-'b'->.s15\n" + + ".s15->.s8\n" + + ".s2->.s3\n" + + ".s3->.s12\n" + + ".s3->.s4\n" + + ".s4-{synpred1_t}?->.s5\n" + + ".s5-'a'->.s6\n" + + ".s6-{}->.s7\n" + + ".s7->.s8\n" + + ".s8->.s3\n" + + ".s8->.s9\n" + + ".s9->:s10\n" + + ":s10-EOF->.s11\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_PlusSetBlock() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a'|'b'{;})+ ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s11->.s12\n" + + ".s12-{synpred2_t}?->.s13\n" + + ".s13-'b'->.s14\n" + + ".s14-{}->.s15\n" + + ".s15->.s7\n" + + ".s2->.s3\n" + + ".s3->.s11\n" + + ".s3->.s4\n" + + ".s4-{synpred1_t}?->.s5\n" + + ".s5-'a'->.s6\n" + + ".s6->.s7\n" + + ".s7->.s3\n" + + ".s7->.s8\n" + + ".s8->:s9\n" + + ":s9-EOF->.s10\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_PlusBlock1Alt() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a')+ ;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2->.s3\n" + + ".s3->.s4\n" + + ".s4-{synpred1_t}?->.s5\n" + + ".s5-'a'->.s6\n" + + ".s6->.s3\n" + + ".s6->.s7\n" + + ".s7->:s8\n" + + ":s8-EOF->.s9\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_OptionalBlock2Alts() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a'{;}|'b')?;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s10->.s11\n" + + ".s10->.s14\n" + + ".s11-{synpred2_t}?->.s12\n" + + ".s12-'b'->.s13\n" + + ".s13->.s7\n" + + ".s14->.s7\n" + + ".s2->.s10\n" + + ".s2->.s3\n" + + ".s3-{synpred1_t}?->.s4\n" + + ".s4-'a'->.s5\n" + + ".s5-{}->.s6\n" + + ".s6->.s7\n" + + ".s7->:s8\n" + + ":s8-EOF->.s9\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_OptionalBlock1Alt() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a')?;" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s2->.s3\n" + + ".s2->.s9\n" + + ".s3-{synpred1_t}?->.s4\n" + + ".s4-'a'->.s5\n" + + ".s5->.s6\n" + + ".s6->:s7\n" + + ".s9->.s6\n" + + ":s7-EOF->.s8\n"; + checkRule(g, "a", expecting); + } + @Test public void testAutoBacktracking_ExistingPred() throws Exception { + Grammar g = new Grammar( + "grammar t;\n" + + "options {backtrack=true;}\n"+ + "a : ('a')=> 'a' | 'b';" + ); + String expecting = + ".s0->.s1\n" + + ".s1->.s2\n" + + ".s1->.s8\n" + + ".s10->.s5\n" + + ".s2-{synpred1_t}?->.s3\n" + + ".s3-'a'->.s4\n" + + ".s4->.s5\n" + + ".s5->:s6\n" + + ".s8->.s9\n" + + ".s9-'b'->.s10\n" + + ":s6-EOF->.s7\n"; + checkRule(g, "a", expecting); + } +*/ + @Test public void testDefaultMode() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' ;\n" + + "X : 'x' ;\n" + + "mode FOO;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + String expecting = + "s0->RuleStart_A_2\n" + + "s0->RuleStart_X_4\n" + + "RuleStart_A_2->s10\n" + + "RuleStart_X_4->s12\n" + + "s10-'a'->s11\n" + + "s12-'x'->s13\n" + + "s11->RuleStop_A_3\n" + + "s13->RuleStop_X_5\n"; + checkTokensRule(g, "DEFAULT_MODE", expecting); + } + @Test public void testMode() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' ;\n" + + "X : 'x' ;\n" + + "mode FOO;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + String expecting = + "s1->RuleStart_B_6\n" + + "s1->RuleStart_C_8\n" + + "RuleStart_B_6->s14\n" + + "RuleStart_C_8->s16\n" + + "s14-'b'->s15\n" + + "s16-'c'->s17\n" + + "s15->RuleStop_B_7\n" + + "s17->RuleStop_C_9\n"; + checkTokensRule(g, "FOO", expecting); + } + void checkTokensRule(LexerGrammar g, String modeName, String expecting) { +// if ( g.ast!=null && !g.ast.hasErrors ) { +// System.out.println(g.ast.toStringTree()); +// Tool antlr = new Tool(); +// SemanticPipeline sem = new SemanticPipeline(g); +// sem.process(); +// if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) +// for (Grammar imp : g.getImportedGrammars()) { +// antlr.processNonCombinedGrammar(imp); +// } +// } +// } + if ( modeName==null ) modeName = "DEFAULT_MODE"; + if ( g.modes.get(modeName)==null ) { + System.err.println("no such mode "+modeName); + return; + } + ParserATNFactory f = new LexerATNFactory(g); + ATN nfa = f.createATN(); + ATNState startState = nfa.modeNameToStartState.get(modeName); + ATNPrinter serializer = new ATNPrinter(g, startState); + String result = serializer.asString(); + //System.out.print(result); + assertEquals(expecting, result); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestATNDeserialization.java b/tool/test/org/antlr/v4/test/tool/TestATNDeserialization.java new file mode 100644 index 000000000..a791849e5 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestATNDeserialization.java @@ -0,0 +1,189 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ATNSerializer; +import org.antlr.v4.runtime.misc.Utils; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import static org.junit.Assert.*; + +import java.util.Arrays; + +public class TestATNDeserialization extends BaseTest { + @Test public void testSimpleNoBlock() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A B ;"); + checkDeserializationIsStable(g); + } + + @Test public void testEOF() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : EOF ;"); + checkDeserializationIsStable(g); + } + + @Test public void testEOFInSet() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : (EOF|A) ;"); + checkDeserializationIsStable(g); + } + + @Test public void testNot() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A, B, C}\n" + + "a : ~A ;"); + checkDeserializationIsStable(g); + } + + @Test public void testWildcard() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A, B, C}\n" + + "a : . ;"); + checkDeserializationIsStable(g); + } + + @Test public void testPEGAchillesHeel() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A B ;"); + checkDeserializationIsStable(g); + } + + @Test public void test3Alts() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A B | A B C ;"); + checkDeserializationIsStable(g); + } + + @Test public void testSimpleLoop() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A+ B ;"); + checkDeserializationIsStable(g); + } + + @Test public void testRuleRef() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : e ;\n" + + "e : E ;\n"); + checkDeserializationIsStable(g); + } + + @Test public void testLexerTwoRules() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' ;\n" + + "B : 'b' ;\n"); + checkDeserializationIsStable(lg); + } + + @Test public void testLexerEOF() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' EOF ;\n"); + checkDeserializationIsStable(lg); + } + + @Test public void testLexerEOFInSet() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' (EOF|'\\n') ;\n"); + checkDeserializationIsStable(lg); + } + + @Test public void testLexerRange() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "INT : '0'..'9' ;\n"); + checkDeserializationIsStable(lg); + } + + @Test public void testLexerLoops() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "INT : '0'..'9'+ ;\n"); + checkDeserializationIsStable(lg); + } + + @Test public void testLexerNotSet() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : ~('a'|'b')\n ;"); + checkDeserializationIsStable(lg); + } + + @Test public void testLexerNotSetWithRange() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : ~('a'|'b'|'e'|'p'..'t')\n ;"); + checkDeserializationIsStable(lg); + } + + @Test public void testLexerNotSetWithRange2() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : ~('a'|'b') ~('e'|'p'..'t')\n ;"); + checkDeserializationIsStable(lg); + } + + @Test public void test2ModesInLexer() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a'\n ;\n" + + "mode M;\n" + + "B : 'b';\n" + + "mode M2;\n" + + "C : 'c';\n"); + checkDeserializationIsStable(lg); + } + + protected void checkDeserializationIsStable(Grammar g) { + ATN atn = createATN(g, false); + char[] data = Utils.toCharArray(ATNSerializer.getSerialized(atn)); + String atnData = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + ATN atn2 = new ATNDeserializer().deserialize(data); + String atn2Data = ATNSerializer.getDecoded(atn2, Arrays.asList(g.getTokenNames())); + + assertEquals(atnData, atn2Data); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestATNInterpreter.java b/tool/test/org/antlr/v4/test/tool/TestATNInterpreter.java new file mode 100644 index 000000000..7894fb7f7 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestATNInterpreter.java @@ -0,0 +1,409 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.runtime.atn.BlockStartState; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.tool.DOTGenerator; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; +import org.antlr.v4.tool.Rule; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + + // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH + // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH + // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH + +public class TestATNInterpreter extends BaseTest { + @Test public void testSimpleNoBlock() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A B ;"); + checkMatchedAlt(lg, g, "ab", 1); + } + + @Test public void testSet() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A,B,C}\n" + + "a : ~A ;"); + checkMatchedAlt(lg, g, "b", 1); + } + + @Test public void testPEGAchillesHeel() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A B ;"); + checkMatchedAlt(lg, g, "a", 1); + checkMatchedAlt(lg, g, "ab", 2); + checkMatchedAlt(lg, g, "abc", 2); + } + + @Test public void testMustTrackPreviousGoodAlt() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A B ;"); + + checkMatchedAlt(lg, g, "a", 1); + checkMatchedAlt(lg, g, "ab", 2); + + checkMatchedAlt(lg, g, "ac", 1); + checkMatchedAlt(lg, g, "abc", 2); + } + + @Test(expected = NoViableAltException.class) + public void testMustTrackPreviousGoodAltWithEOF() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : (A | A B) EOF;"); + + checkMatchedAlt(lg, g, "a", 1); + checkMatchedAlt(lg, g, "ab", 2); + + try { + checkMatchedAlt(lg, g, "ac", 1); + } + catch (NoViableAltException re) { + assertEquals(1, re.getOffendingToken().getTokenIndex()); + assertEquals(3, re.getOffendingToken().getType()); + throw re; + } + } + + @Test public void testMustTrackPreviousGoodAlt2() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A B | A B C ;"); + + checkMatchedAlt(lg, g, "a", 1); + checkMatchedAlt(lg, g, "ab", 2); + checkMatchedAlt(lg, g, "abc", 3); + + checkMatchedAlt(lg, g, "ad", 1); + checkMatchedAlt(lg, g, "abd", 2); + checkMatchedAlt(lg, g, "abcd", 3); + } + + @Test(expected = NoViableAltException.class) + public void testMustTrackPreviousGoodAlt2WithEOF() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : (A | A B | A B C) EOF;"); + + checkMatchedAlt(lg, g, "a", 1); + checkMatchedAlt(lg, g, "ab", 2); + checkMatchedAlt(lg, g, "abc", 3); + + try { + checkMatchedAlt(lg, g, "abd", 1); + } + catch (NoViableAltException re) { + assertEquals(2, re.getOffendingToken().getTokenIndex()); + assertEquals(4, re.getOffendingToken().getType()); + throw re; + } + } + + @Test public void testMustTrackPreviousGoodAlt3() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A B | A | A B C ;"); + + checkMatchedAlt(lg, g, "a", 2); + checkMatchedAlt(lg, g, "ab", 1); + checkMatchedAlt(lg, g, "abc", 3); + + checkMatchedAlt(lg, g, "ad", 2); + checkMatchedAlt(lg, g, "abd", 1); + checkMatchedAlt(lg, g, "abcd", 3); + } + + @Test(expected = NoViableAltException.class) + public void testMustTrackPreviousGoodAlt3WithEOF() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : (A B | A | A B C) EOF;"); + + checkMatchedAlt(lg, g, "a", 2); + checkMatchedAlt(lg, g, "ab", 1); + checkMatchedAlt(lg, g, "abc", 3); + + try { + checkMatchedAlt(lg, g, "abd", 1); + } + catch (NoViableAltException re) { + assertEquals(2, re.getOffendingToken().getTokenIndex()); + assertEquals(4, re.getOffendingToken().getType()); + throw re; + } + } + + @Test public void testAmbigAltChooseFirst() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A B | A B ;"); // first alt + checkMatchedAlt(lg, g, "ab", 1); + checkMatchedAlt(lg, g, "abc", 1); + } + + @Test public void testAmbigAltChooseFirstWithFollowingToken() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : (A B | A B) C ;"); // first alt + checkMatchedAlt(lg, g, "abc", 1); + checkMatchedAlt(lg, g, "abcd", 1); + } + + @Test public void testAmbigAltChooseFirstWithFollowingToken2() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : (A B | A B | C) D ;"); + checkMatchedAlt(lg, g, "abd", 1); + checkMatchedAlt(lg, g, "abdc", 1); + checkMatchedAlt(lg, g, "cd", 3); + } + + @Test public void testAmbigAltChooseFirst2() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A B | A B | A B C ;"); + + checkMatchedAlt(lg, g, "ab", 1); + checkMatchedAlt(lg, g, "abc", 3); + + checkMatchedAlt(lg, g, "abd", 1); + checkMatchedAlt(lg, g, "abcd", 3); + } + + @Test(expected = NoViableAltException.class) + public void testAmbigAltChooseFirst2WithEOF() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : (A B | A B | A B C) EOF;"); + + checkMatchedAlt(lg, g, "ab", 1); + checkMatchedAlt(lg, g, "abc", 3); + + try { + checkMatchedAlt(lg, g, "abd", 1); + } + catch (NoViableAltException re) { + assertEquals(2, re.getOffendingToken().getTokenIndex()); + assertEquals(4, re.getOffendingToken().getType()); + throw re; + } + } + + @Test public void testSimpleLoop() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "D : 'd' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A+ B ;"); + checkMatchedAlt(lg, g, "ab", 1); + checkMatchedAlt(lg, g, "aab", 1); + checkMatchedAlt(lg, g, "aaaaaab", 1); + checkMatchedAlt(lg, g, "aabd", 1); + } + + @Test public void testCommonLeftPrefix() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A B | A C ;"); + checkMatchedAlt(lg, g, "ab", 1); + checkMatchedAlt(lg, g, "ac", 2); + } + + @Test public void testArbitraryLeftPrefix() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A+ B | A+ C ;"); + checkMatchedAlt(lg, g, "aac", 2); + } + + @Test public void testRecursiveLeftPrefix() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "LP : '(' ;\n" + + "RP : ')' ;\n" + + "INT : '0'..'9'+ ;\n" + ); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A,B,C,LP,RP,INT}\n" + + "a : e B | e C ;\n" + + "e : LP e RP\n" + + " | INT\n" + + " ;"); + checkMatchedAlt(lg, g, "34b", 1); + checkMatchedAlt(lg, g, "34c", 2); + checkMatchedAlt(lg, g, "(34)b", 1); + checkMatchedAlt(lg, g, "(34)c", 2); + checkMatchedAlt(lg, g, "((34))b", 1); + checkMatchedAlt(lg, g, "((34))c", 2); + } + + public void checkMatchedAlt(LexerGrammar lg, final Grammar g, + String inputString, + int expected) + { + ATN lexatn = createATN(lg, true); + LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn,new DFA[] { new DFA(lexatn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); + IntegerList types = getTokenTypesViaATN(inputString, lexInterp); + System.out.println(types); + + g.importVocab(lg); + + ParserATNFactory f = new ParserATNFactory(g); + ATN atn = f.createATN(); + + IntTokenStream input = new IntTokenStream(types); + System.out.println("input="+input.types); + ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, input); + ATNState startState = atn.ruleToStartState[g.getRule("a").index]; + if ( startState.transition(0).target instanceof BlockStartState ) { + startState = startState.transition(0).target; + } + + DOTGenerator dot = new DOTGenerator(g); + System.out.println(dot.getDOT(atn.ruleToStartState[g.getRule("a").index])); + Rule r = g.getRule("e"); + if ( r!=null ) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); + + int result = interp.matchATN(input, startState); + assertEquals(expected, result); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestATNLexerInterpreter.java b/tool/test/org/antlr/v4/test/tool/TestATNLexerInterpreter.java new file mode 100644 index 000000000..b1dbaf588 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestATNLexerInterpreter.java @@ -0,0 +1,325 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.runtime.misc.Utils; +import org.antlr.v4.tool.DOTGenerator; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.*; + +/** + * Lexer rules are little quirky when it comes to wildcards. Problem + * stems from the fact that we want the longest match to win among + * several rules and even within a rule. However, that conflicts + * with the notion of non-greedy, which by definition tries to match + * the fewest possible. During ATN construction, non-greedy loops + * have their entry and exit branches reversed so that the ATN + * simulator will see the exit branch 1st, giving it a priority. The + * 1st path to the stop state kills any other paths for that rule + * that begin with the wildcard. In general, this does everything we + * want, but occasionally there are some quirks as you'll see from + * the tests below. + */ +public class TestATNLexerInterpreter extends BaseTest { + @Test public void testLexerTwoRules() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' ;\n" + + "B : 'b' ;\n"); + String expecting = "A, B, A, B, EOF"; + checkLexerMatches(lg, "abab", expecting); + } + + @Test public void testShortLongRule() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'xy'\n" + + " | 'xyz'\n" + // this alt is preferred since there are no non-greedy configs + " ;\n" + + "Z : 'z'\n" + + " ;\n"); + checkLexerMatches(lg, "xy", "A, EOF"); + checkLexerMatches(lg, "xyz", "A, EOF"); + } + + @Test public void testShortLongRule2() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'xyz'\n" + // make sure nongreedy mech cut off doesn't kill this alt + " | 'xy'\n" + + " ;\n"); + checkLexerMatches(lg, "xy", "A, EOF"); + checkLexerMatches(lg, "xyz", "A, EOF"); + } + + @Test public void testWildOnEndFirstAlt() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'xy' .\n" + // should pursue '.' since xyz hits stop first, before 2nd alt + " | 'xy'\n" + + " ;\n" + + "Z : 'z'\n" + + " ;\n"); + checkLexerMatches(lg, "xy", "A, EOF"); + checkLexerMatches(lg, "xyz", "A, EOF"); + } + + @Test public void testWildOnEndLastAlt() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'xy'\n" + + " | 'xy' .\n" + // this alt is preferred since there are no non-greedy configs + " ;\n" + + "Z : 'z'\n" + + " ;\n"); + checkLexerMatches(lg, "xy", "A, EOF"); + checkLexerMatches(lg, "xyz", "A, EOF"); + } + + @Test public void testWildcardNonQuirkWhenSplitBetweenTwoRules() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'xy' ;\n" + + "B : 'xy' . 'z' ;\n"); + checkLexerMatches(lg, "xy", "A, EOF"); + checkLexerMatches(lg, "xyqz", "B, EOF"); + } + + @Test public void testLexerLoops() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "INT : '0'..'9'+ ;\n" + + "ID : 'a'..'z'+ ;\n"); + String expecting = "ID, INT, ID, INT, EOF"; + checkLexerMatches(lg, "a34bde3", expecting); + } + + @Test public void testLexerNotSet() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : ~('a'|'b')\n ;"); + String expecting = "ID, EOF"; + checkLexerMatches(lg, "c", expecting); + } + + @Test public void testLexerKeywordIDAmbiguity() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "KEND : 'end' ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n')+ ;"); + String expecting = "ID, EOF"; + //checkLexerMatches(lg, "e", expecting); + expecting = "KEND, EOF"; + checkLexerMatches(lg, "end", expecting); + expecting = "ID, EOF"; + checkLexerMatches(lg, "ending", expecting); + expecting = "ID, WS, KEND, WS, ID, EOF"; + checkLexerMatches(lg, "a end bcd", expecting); + } + + @Test public void testLexerRuleRef() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "INT : DIGIT+ ;\n" + + "fragment DIGIT : '0'..'9' ;\n" + + "WS : (' '|'\\n')+ ;"); + String expecting = "INT, WS, INT, EOF"; + checkLexerMatches(lg, "32 99", expecting); + } + + @Test public void testRecursiveLexerRuleRef() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '/*' (CMT | ~'*')+ '*/' ;\n" + + "WS : (' '|'\\n')+ ;"); + String expecting = "CMT, WS, CMT, EOF"; + checkLexerMatches(lg, "/* ick */\n/* /*nested*/ */", expecting); + } + + @Test public void testRecursiveLexerRuleRefWithWildcard() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '/*' (CMT | .)*? '*/' ;\n" + + "WS : (' '|'\\n')+ ;"); + + String expecting = "CMT, WS, CMT, WS, EOF"; + checkLexerMatches(lg, + "/* ick */\n" + + "/* /* */\n" + + "/* /*nested*/ */\n", + expecting); + } + + @Test public void testLexerWildcardGreedyLoopByDefault() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '//' .* '\\n' ;\n"); + String expecting = "CMT, EOF"; + checkLexerMatches(lg, "//x\n//y\n", expecting); + } + + @Test public void testLexerWildcardLoopExplicitNonGreedy() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '//' .*? '\\n' ;\n"); + String expecting = "CMT, CMT, EOF"; + checkLexerMatches(lg, "//x\n//y\n", expecting); + } + + @Test public void testLexerEscapeInString() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "STR : '[' ('~' ']' | .)* ']' ;\n"); + checkLexerMatches(lg, "[a~]b]", "STR, EOF"); + checkLexerMatches(lg, "[a]", "STR, EOF"); + } + + @Test public void testLexerWildcardGreedyPlusLoopByDefault() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '//' .+ '\\n' ;\n"); + String expecting = "CMT, EOF"; + checkLexerMatches(lg, "//x\n//y\n", expecting); + } + + @Test public void testLexerWildcardExplicitNonGreedyPlusLoop() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '//' .+? '\\n' ;\n"); + String expecting = "CMT, CMT, EOF"; + checkLexerMatches(lg, "//x\n//y\n", expecting); + } + + // does not fail since ('*/')? can't match and have rule succeed + @Test public void testLexerGreedyOptionalShouldWorkAsWeExpect() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '/*' ('*/')? '*/' ;\n"); + String expecting = "CMT, EOF"; + checkLexerMatches(lg, "/**/", expecting); + } + + @Test public void testGreedyBetweenRules() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : '' ;\n" + + "B : '<' .+ '>' ;\n"); + String expecting = "B, EOF"; + checkLexerMatches(lg, "", expecting); + } + + @Test public void testNonGreedyBetweenRules() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : '' ;\n" + + "B : '<' .+? '>' ;\n"); + String expecting = "A, B, EOF"; + checkLexerMatches(lg, "", expecting); + } + + @Test public void testEOFAtEndOfLineComment() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '//' ~('\\n')* ;\n"); + String expecting = "CMT, EOF"; + checkLexerMatches(lg, "//x", expecting); + } + + @Test public void testEOFAtEndOfLineComment2() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '//' ~('\\n'|'\\r')* ;\n"); + String expecting = "CMT, EOF"; + checkLexerMatches(lg, "//x", expecting); + } + + /** only positive sets like (EOF|'\n') can match EOF and not in wildcard or ~foo sets + * EOF matches but does not advance cursor. + */ + @Test public void testEOFInSetAtEndOfLineComment() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "CMT : '//' .* (EOF|'\\n') ;\n"); + String expecting = "CMT, EOF"; + checkLexerMatches(lg, "//", expecting); + } + + @Test public void testEOFSuffixInSecondRule() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' ;\n"+ // shorter than 'a' EOF, despite EOF being 0 width + "B : 'a' EOF ;\n"); + String expecting = "B, EOF"; + checkLexerMatches(lg, "a", expecting); + } + + @Test public void testEOFSuffixInFirstRule() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' EOF ;\n"+ + "B : 'a';\n"); + String expecting = "A, EOF"; + checkLexerMatches(lg, "a", expecting); + } + + @Test public void testEOFByItself() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "DONE : EOF ;\n"+ + "A : 'a';\n"); + String expecting = "A, DONE, EOF"; + checkLexerMatches(lg, "a", expecting); + } + + protected void checkLexerMatches(LexerGrammar lg, String inputString, String expecting) { + ATN atn = createATN(lg, true); + CharStream input = new ANTLRInputStream(inputString); + ATNState startState = atn.modeNameToStartState.get("DEFAULT_MODE"); + DOTGenerator dot = new DOTGenerator(lg); + System.out.println(dot.getDOT(startState, true)); + + List tokenTypes = getTokenTypes(lg, atn, input); + + String result = Utils.join(tokenTypes.iterator(), ", "); + System.out.println(tokenTypes); + assertEquals(expecting, result); + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestATNParserPrediction.java b/tool/test/org/antlr/v4/test/tool/TestATNParserPrediction.java new file mode 100644 index 000000000..56671add1 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestATNParserPrediction.java @@ -0,0 +1,531 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.Tool; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.tool.DOTGenerator; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; +import org.antlr.v4.tool.Rule; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + + // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH + // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH + // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH + +public class TestATNParserPrediction extends BaseTest { + @Test public void testAorB() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A{;} | B ;"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "a", 1); + checkPredictedAlt(lg, g, decision, "b", 2); + + // After matching these inputs for decision, what is DFA after each prediction? + String[] inputs = { + "a", + "b", + "a" + }; + String[] dfa = { + "s0-'a'->:s1=>1\n", + + "s0-'a'->:s1=>1\n" + + "s0-'b'->:s2=>2\n", + + "s0-'a'->:s1=>1\n" + // don't change after it works + "s0-'b'->:s2=>2\n", + }; + checkDFAConstruction(lg, g, decision, inputs, dfa); + } + + @Test public void testEmptyInput() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | ;"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "a", 1); + checkPredictedAlt(lg, g, decision, "", 2); + + // After matching these inputs for decision, what is DFA after each prediction? + String[] inputs = { + "a", + "", + }; + String[] dfa = { + "s0-'a'->:s1=>1\n", + + "s0-EOF->:s2=>2\n" + + "s0-'a'->:s1=>1\n", + }; + checkDFAConstruction(lg, g, decision, inputs, dfa); + } + + @Test public void testPEGAchillesHeel() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A B ;"); + checkPredictedAlt(lg, g, 0, "a", 1); + checkPredictedAlt(lg, g, 0, "ab", 2); + checkPredictedAlt(lg, g, 0, "abc", 2); + + String[] inputs = { + "a", + "ab", + "abc" + }; + String[] dfa = { + "s0-'a'->s1\n" + + "s1-EOF->:s2=>1\n", + + "s0-'a'->s1\n" + + "s1-EOF->:s2=>1\n" + + "s1-'b'->:s3=>2\n", + + "s0-'a'->s1\n" + + "s1-EOF->:s2=>1\n" + + "s1-'b'->:s3=>2\n" + }; + checkDFAConstruction(lg, g, 0, inputs, dfa); + } + + @Test public void testRuleRefxory() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : x | y ;\n" + + "x : A ;\n" + + "y : B ;\n"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "a", 1); + checkPredictedAlt(lg, g, decision, "b", 2); + + // After matching these inputs for decision, what is DFA after each prediction? + String[] inputs = { + "a", + "b", + "a" + }; + String[] dfa = { + "s0-'a'->:s1=>1\n", + + "s0-'a'->:s1=>1\n" + + "s0-'b'->:s2=>2\n", + + "s0-'a'->:s1=>1\n" + // don't change after it works + "s0-'b'->:s2=>2\n", + }; + checkDFAConstruction(lg, g, decision, inputs, dfa); + } + + @Test public void testOptionalRuleChasesGlobalFollow() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A,B,C}\n" + + "a : x B ;\n" + + "b : x C ;\n" + + "x : A | ;\n"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "a", 1); + checkPredictedAlt(lg, g, decision, "b", 2); + checkPredictedAlt(lg, g, decision, "c", 2); + + // After matching these inputs for decision, what is DFA after each prediction? + String[] inputs = { + "a", + "b", + "c", + "c", + }; + String[] dfa = { + "s0-'a'->:s1=>1\n", + + "s0-'a'->:s1=>1\n" + + "s0-'b'->:s2=>2\n", + + "s0-'a'->:s1=>1\n" + + "s0-'b'->:s2=>2\n" + + "s0-'c'->:s3=>2\n", + + "s0-'a'->:s1=>1\n" + + "s0-'b'->:s2=>2\n" + + "s0-'c'->:s3=>2\n", + }; + checkDFAConstruction(lg, g, decision, inputs, dfa); + } + + @Test public void testLL1Ambig() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A | A B ;"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "a", 1); + checkPredictedAlt(lg, g, decision, "ab", 3); + + // After matching these inputs for decision, what is DFA after each prediction? + String[] inputs = { + "a", + "ab", + "ab" + }; + String[] dfa = { + "s0-'a'->s1\n" + + "s1-EOF->:s2^=>1\n", + + "s0-'a'->s1\n" + + "s1-EOF->:s2^=>1\n" + + "s1-'b'->:s3=>3\n", + + "s0-'a'->s1\n" + + "s1-EOF->:s2^=>1\n" + + "s1-'b'->:s3=>3\n", + }; + checkDFAConstruction(lg, g, decision, inputs, dfa); + } + + @Test public void testLL2Ambig() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A B | A B | A B C ;"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "ab", 1); + checkPredictedAlt(lg, g, decision, "abc", 3); + + // After matching these inputs for decision, what is DFA after each prediction? + String[] inputs = { + "ab", + "abc", + "ab" + }; + String[] dfa = { + "s0-'a'->s1\n" + + "s1-'b'->s2\n" + + "s2-EOF->:s3^=>1\n", + + "s0-'a'->s1\n" + + "s1-'b'->s2\n" + + "s2-EOF->:s3^=>1\n" + + "s2-'c'->:s4=>3\n", + + "s0-'a'->s1\n" + + "s1-'b'->s2\n" + + "s2-EOF->:s3^=>1\n" + + "s2-'c'->:s4=>3\n", + }; + checkDFAConstruction(lg, g, decision, inputs, dfa); + } + + @Test public void testRecursiveLeftPrefix() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "LP : '(' ;\n" + + "RP : ')' ;\n" + + "INT : '0'..'9'+ ;\n" + ); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A,B,C,LP,RP,INT}\n" + + "a : e B | e C ;\n" + + "e : LP e RP\n" + + " | INT\n" + + " ;"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "34b", 1); + checkPredictedAlt(lg, g, decision, "34c", 2); + checkPredictedAlt(lg, g, decision, "((34))b", 1); + checkPredictedAlt(lg, g, decision, "((34))c", 2); + + // After matching these inputs for decision, what is DFA after each prediction? + String[] inputs = { + "34b", + "34c", + "((34))b", + "((34))c" + }; + String[] dfa = { + "s0-INT->s1\n" + + "s1-'b'->:s2=>1\n", + + "s0-INT->s1\n" + + "s1-'b'->:s2=>1\n" + + "s1-'c'->:s3=>2\n", + + "s0-'('->s4\n" + + "s0-INT->s1\n" + + "s1-'b'->:s2=>1\n" + + "s1-'c'->:s3=>2\n" + + "s4-'('->s5\n" + + "s5-INT->s6\n" + + "s6-')'->s7\n" + + "s7-')'->s1\n", + + "s0-'('->s4\n" + + "s0-INT->s1\n" + + "s1-'b'->:s2=>1\n" + + "s1-'c'->:s3=>2\n" + + "s4-'('->s5\n" + + "s5-INT->s6\n" + + "s6-')'->s7\n" + + "s7-')'->s1\n", + }; + checkDFAConstruction(lg, g, decision, inputs, dfa); + } + + @Test public void testRecursiveLeftPrefixWithAorABIssue() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "LP : '(' ;\n" + + "RP : ')' ;\n" + + "INT : '0'..'9'+ ;\n" + ); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A,B,C,LP,RP,INT}\n" + + "a : e A | e A B ;\n" + + "e : LP e RP\n" + + " | INT\n" + + " ;"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "34a", 1); + checkPredictedAlt(lg, g, decision, "34ab", 2); // PEG would miss this one! + checkPredictedAlt(lg, g, decision, "((34))a", 1); + checkPredictedAlt(lg, g, decision, "((34))ab", 2); + + // After matching these inputs for decision, what is DFA after each prediction? + String[] inputs = { + "34a", + "34ab", + "((34))a", + "((34))ab", + }; + String[] dfa = { + "s0-INT->s1\n" + + "s1-'a'->s2\n" + + "s2-EOF->:s3=>1\n", + + "s0-INT->s1\n" + + "s1-'a'->s2\n" + + "s2-EOF->:s3=>1\n" + + "s2-'b'->:s4=>2\n", + + "s0-'('->s5\n" + + "s0-INT->s1\n" + + "s1-'a'->s2\n" + + "s2-EOF->:s3=>1\n" + + "s2-'b'->:s4=>2\n" + + "s5-'('->s6\n" + + "s6-INT->s7\n" + + "s7-')'->s8\n" + + "s8-')'->s1\n", + + "s0-'('->s5\n" + + "s0-INT->s1\n" + + "s1-'a'->s2\n" + + "s2-EOF->:s3=>1\n" + + "s2-'b'->:s4=>2\n" + + "s5-'('->s6\n" + + "s6-INT->s7\n" + + "s7-')'->s8\n" + + "s8-')'->s1\n", + }; + checkDFAConstruction(lg, g, decision, inputs, dfa); + } + + @Test public void testContinuePrediction() throws Exception { + // Sam found prev def of ambiguity was too restrictive. + // E.g., (13, 1, []), (13, 2, []), (12, 2, []) should not + // be declared ambig since (12, 2, []) can take us to + // unambig state maybe. keep going. + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "ID : 'a'..'z' ;\n" + // one char + "SEMI : ';' ;\n"+ + "INT : '0'..'9'+ ;\n" + ); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {ID,SEMI,INT}\n" + + "a : (ID | ID ID?) SEMI ;"); + int decision = 1; + checkPredictedAlt(lg, g, decision, "a;", 1); + checkPredictedAlt(lg, g, decision, "ab;", 2); + } + + @Test public void testContinuePrediction2() throws Exception { + // ID is ambig for first two alts, but ID SEMI lets us move forward with alt 3 + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "ID : 'a'..'z' ;\n" + // one char + "SEMI : ';' ;\n"+ + "INT : '0'..'9'+ ;\n" + ); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {ID,SEMI,INT}\n" + + "a : ID | ID | ID SEMI ;\n"); + int decision = 0; + checkPredictedAlt(lg, g, decision, "a", 1); + checkPredictedAlt(lg, g, decision, "a;", 3); + } + + /** first check that the ATN predicts right alt. + * Then check adaptive prediction. + */ + public void checkPredictedAlt(LexerGrammar lg, Grammar g, int decision, + String inputString, int expectedAlt) + { + Tool.internalOption_ShowATNConfigsInDFA = true; + ATN lexatn = createATN(lg, true); + LexerATNSimulator lexInterp = + new LexerATNSimulator(lexatn,new DFA[] { new DFA(lexatn.modeToStartState.get(Lexer.DEFAULT_MODE)) },new PredictionContextCache()); + IntegerList types = getTokenTypesViaATN(inputString, lexInterp); + System.out.println(types); + + semanticProcess(lg); + g.importVocab(lg); + semanticProcess(g); + + ParserATNFactory f = new ParserATNFactory(g); + ATN atn = f.createATN(); + + DOTGenerator dot = new DOTGenerator(g); + + Rule r = g.getRule("a"); + if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); + r = g.getRule("b"); + if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); + r = g.getRule("e"); + if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); + r = g.getRule("ifstat"); + if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); + r = g.getRule("block"); + if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); + + // Check ATN prediction +// ParserATNSimulator interp = new ParserATNSimulator(atn); + TokenStream input = new IntTokenStream(types); + ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, input); + int alt = interp.adaptivePredict(input, decision, ParserRuleContext.EMPTY); + + assertEquals(expectedAlt, alt); + + // Check adaptive prediction + input.seek(0); + alt = interp.adaptivePredict(input, decision, null); + assertEquals(expectedAlt, alt); + // run 2x; first time creates DFA in atn + input.seek(0); + alt = interp.adaptivePredict(input, decision, null); + assertEquals(expectedAlt, alt); + } + + public void checkDFAConstruction(LexerGrammar lg, Grammar g, int decision, + String[] inputString, String[] dfaString) + { +// Tool.internalOption_ShowATNConfigsInDFA = true; + ATN lexatn = createATN(lg, true); + LexerATNSimulator lexInterp = + new LexerATNSimulator(lexatn,new DFA[] { new DFA(lexatn.getDecisionState(Lexer.DEFAULT_MODE)) }, new PredictionContextCache()); + + semanticProcess(lg); + g.importVocab(lg); + semanticProcess(g); + + ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, null); + for (int i=0; i2 EPSILON 0,0,0\n" + + "2->3 ATOM 1,0,0\n" + + "3->4 ATOM 2,0,0\n" + + "4->1 EPSILON 0,0,0\n"; + ATN atn = createATN(g, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testEOF() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A EOF ;"); + String expecting = + "max type 1\n" + + "0:RULE_START 0\n" + + "1:RULE_STOP 0\n" + + "2:BASIC 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "5:BASIC 0\n" + + "rule 0:0\n" + + "0->2 EPSILON 0,0,0\n" + + "2->3 ATOM 1,0,0\n" + + "3->4 ATOM 0,0,1\n" + + "4->1 EPSILON 0,0,0\n"; + ATN atn = createATN(g, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testEOFInSet() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : (A|EOF) ;"); + String expecting = + "max type 1\n" + + "0:RULE_START 0\n" + + "1:RULE_STOP 0\n" + + "2:BASIC 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "rule 0:0\n" + + "0:EOF, A..A\n" + + "0->2 EPSILON 0,0,0\n" + + "2->3 SET 0,0,0\n" + + "3->1 EPSILON 0,0,0\n"; + ATN atn = createATN(g, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testNot() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A, B, C}\n" + + "a : ~A ;"); + String expecting = + "max type 3\n" + + "0:RULE_START 0\n" + + "1:RULE_STOP 0\n" + + "2:BASIC 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "rule 0:0\n" + + "0:A..A\n" + + "0->2 EPSILON 0,0,0\n" + + "2->3 NOT_SET 0,0,0\n" + + "3->1 EPSILON 0,0,0\n"; + ATN atn = createATN(g, true); + DOTGenerator gen = new DOTGenerator(g); + System.out.println(gen.getDOT(atn.ruleToStartState[0])); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testWildcard() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "tokens {A, B, C}\n" + + "a : . ;"); + String expecting = + "max type 3\n" + + "0:RULE_START 0\n" + + "1:RULE_STOP 0\n" + + "2:BASIC 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "rule 0:0\n" + + "0->2 EPSILON 0,0,0\n" + + "2->3 WILDCARD 0,0,0\n" + + "3->1 EPSILON 0,0,0\n"; + ATN atn = createATN(g, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testPEGAchillesHeel() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A B ;"); + String expecting = + "max type 2\n" + + "0:RULE_START 0\n" + + "1:RULE_STOP 0\n" + + "2:BASIC 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "5:BLOCK_START 0 6\n" + + "6:BLOCK_END 0\n" + + "7:BASIC 0\n" + + "rule 0:0\n" + + "0->5 EPSILON 0,0,0\n" + + "2->6 ATOM 1,0,0\n" + + "3->4 ATOM 1,0,0\n" + + "4->6 ATOM 2,0,0\n" + + "5->2 EPSILON 0,0,0\n" + + "5->3 EPSILON 0,0,0\n" + + "6->1 EPSILON 0,0,0\n" + + "0:5\n"; + ATN atn = createATN(g, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void test3Alts() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A | A B | A B C ;"); + String expecting = + "max type 3\n" + + "0:RULE_START 0\n" + + "1:RULE_STOP 0\n" + + "2:BASIC 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "5:BASIC 0\n" + + "6:BASIC 0\n" + + "7:BASIC 0\n" + + "8:BLOCK_START 0 9\n" + + "9:BLOCK_END 0\n" + + "10:BASIC 0\n" + + "rule 0:0\n" + + "0->8 EPSILON 0,0,0\n" + + "2->9 ATOM 1,0,0\n" + + "3->4 ATOM 1,0,0\n" + + "4->9 ATOM 2,0,0\n" + + "5->6 ATOM 1,0,0\n" + + "6->7 ATOM 2,0,0\n" + + "7->9 ATOM 3,0,0\n" + + "8->2 EPSILON 0,0,0\n" + + "8->3 EPSILON 0,0,0\n" + + "8->5 EPSILON 0,0,0\n" + + "9->1 EPSILON 0,0,0\n" + + "0:8\n"; + ATN atn = createATN(g, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testSimpleLoop() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : A+ B ;"); + String expecting = + "max type 2\n" + + "0:RULE_START 0\n" + + "1:RULE_STOP 0\n" + + "2:BASIC 0\n" + + "3:PLUS_BLOCK_START 0 4\n" + + "4:BLOCK_END 0\n" + + "5:PLUS_LOOP_BACK 0\n" + + "6:LOOP_END 0 5\n" + + "7:BASIC 0\n" + + "8:BASIC 0\n" + + "9:BASIC 0\n" + + "rule 0:0\n" + + "0->3 EPSILON 0,0,0\n" + + "2->4 ATOM 1,0,0\n" + + "3->2 EPSILON 0,0,0\n" + + "4->5 EPSILON 0,0,0\n" + + "5->3 EPSILON 0,0,0\n" + + "5->6 EPSILON 0,0,0\n" + + "6->7 EPSILON 0,0,0\n" + + "7->8 ATOM 2,0,0\n" + + "8->1 EPSILON 0,0,0\n" + + "0:5\n"; + ATN atn = createATN(g, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testRuleRef() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : e ;\n" + + "e : E ;\n"); + String expecting = + "max type 1\n" + + "0:RULE_START 0\n" + + "1:RULE_STOP 0\n" + + "2:RULE_START 1\n" + + "3:RULE_STOP 1\n" + + "4:BASIC 0\n" + + "5:BASIC 0\n" + + "6:BASIC 1\n" + + "7:BASIC 1\n" + + "8:BASIC 1\n" + + "rule 0:0\n" + + "rule 1:2\n" + + "0->4 EPSILON 0,0,0\n" + + "2->6 EPSILON 0,0,0\n" + + "4->5 RULE 2,1,0\n" + + "5->1 EPSILON 0,0,0\n" + + "6->7 ATOM 1,0,0\n" + + "7->3 EPSILON 0,0,0\n"; + ATN atn = createATN(g, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerTwoRules() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' ;\n" + + "B : 'b' ;\n"); + String expecting = + "max type 2\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:RULE_START 1\n" + + "4:RULE_STOP 1\n" + + "5:BASIC 0\n" + + "6:BASIC 0\n" + + "7:BASIC 1\n" + + "8:BASIC 1\n" + + "rule 0:1 1\n" + + "rule 1:3 2\n" + + "mode 0:0\n" + + "0->1 EPSILON 0,0,0\n" + + "0->3 EPSILON 0,0,0\n" + + "1->5 EPSILON 0,0,0\n" + + "3->7 EPSILON 0,0,0\n" + + "5->6 ATOM 97,0,0\n" + + "6->2 EPSILON 0,0,0\n" + + "7->8 ATOM 98,0,0\n" + + "8->4 EPSILON 0,0,0\n" + + "0:0\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerRange() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "INT : '0'..'9' ;\n"); + String expecting = + "max type 1\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "rule 0:1 1\n" + + "mode 0:0\n" + + "0->1 EPSILON 0,0,0\n" + + "1->3 EPSILON 0,0,0\n" + + "3->4 RANGE 48,57,0\n" + + "4->2 EPSILON 0,0,0\n" + + "0:0\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerEOF() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "INT : 'a' EOF ;\n"); + String expecting = + "max type 1\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "5:BASIC 0\n" + + "rule 0:1 1\n" + + "mode 0:0\n" + + "0->1 EPSILON 0,0,0\n" + + "1->3 EPSILON 0,0,0\n" + + "3->4 ATOM 97,0,0\n" + + "4->5 ATOM 0,0,1\n" + + "5->2 EPSILON 0,0,0\n" + + "0:0\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerEOFInSet() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "INT : 'a' (EOF|'\\n') ;\n"); + String expecting = + "max type 1\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "5:BLOCK_START 0 6\n" + + "6:BLOCK_END 0\n" + + "rule 0:1 1\n" + + "mode 0:0\n" + + "0:EOF, '\\n'..'\\n'\n" + + "0->1 EPSILON 0,0,0\n" + + "1->3 EPSILON 0,0,0\n" + + "3->5 ATOM 97,0,0\n" + + "4->6 SET 0,0,0\n" + + "5->4 EPSILON 0,0,0\n" + + "6->2 EPSILON 0,0,0\n" + + "0:0\n" + + "1:5\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerLoops() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "INT : '0'..'9'+ ;\n"); + String expecting = + "max type 1\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:BASIC 0\n" + + "4:PLUS_BLOCK_START 0 5\n" + + "5:BLOCK_END 0\n" + + "6:PLUS_LOOP_BACK 0\n" + + "7:LOOP_END 0 6\n" + + "rule 0:1 1\n" + + "mode 0:0\n" + + "0->1 EPSILON 0,0,0\n" + + "1->4 EPSILON 0,0,0\n" + + "3->5 RANGE 48,57,0\n" + + "4->3 EPSILON 0,0,0\n" + + "5->6 EPSILON 0,0,0\n" + + "6->4 EPSILON 0,0,0\n" + + "6->7 EPSILON 0,0,0\n" + + "7->2 EPSILON 0,0,0\n" + + "0:0\n" + + "1:6\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerAction() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a' {a} ;\n" + + "B : 'b' ;\n" + + "C : 'c' {c} ;\n"); + String expecting = + "max type 3\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:RULE_START 1\n" + + "4:RULE_STOP 1\n" + + "5:RULE_START 2\n" + + "6:RULE_STOP 2\n" + + "7:BASIC 0\n" + + "8:BASIC 0\n" + + "9:BASIC 0\n" + + "10:BASIC 1\n" + + "11:BASIC 1\n" + + "12:BASIC 2\n" + + "13:BASIC 2\n" + + "14:BASIC 2\n" + + "rule 0:1 1\n" + + "rule 1:3 2\n" + + "rule 2:5 3\n" + + "mode 0:0\n" + + "0->1 EPSILON 0,0,0\n" + + "0->3 EPSILON 0,0,0\n" + + "0->5 EPSILON 0,0,0\n" + + "1->7 EPSILON 0,0,0\n" + + "3->10 EPSILON 0,0,0\n" + + "5->12 EPSILON 0,0,0\n" + + "7->8 ATOM 97,0,0\n" + + "8->9 ACTION 0,0,0\n" + + "9->2 EPSILON 0,0,0\n" + + "10->11 ATOM 98,0,0\n" + + "11->4 EPSILON 0,0,0\n" + + "12->13 ATOM 99,0,0\n" + + "13->14 ACTION 2,1,0\n" + + "14->6 EPSILON 0,0,0\n" + + "0:0\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerNotSet() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : ~('a'|'b')\n ;"); + String expecting = + "max type 1\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "rule 0:1 1\n" + + "mode 0:0\n" + + "0:'a'..'b'\n" + + "0->1 EPSILON 0,0,0\n" + + "1->3 EPSILON 0,0,0\n" + + "3->4 NOT_SET 0,0,0\n" + + "4->2 EPSILON 0,0,0\n" + + "0:0\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerSetWithRange() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : ('a'|'b'|'e'|'p'..'t')\n ;"); + String expecting = + "max type 1\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "rule 0:1 1\n" + + "mode 0:0\n" + + "0:'a'..'b', 'e'..'e', 'p'..'t'\n" + + "0->1 EPSILON 0,0,0\n" + + "1->3 EPSILON 0,0,0\n" + + "3->4 SET 0,0,0\n" + + "4->2 EPSILON 0,0,0\n" + + "0:0\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerNotSetWithRange() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : ~('a'|'b'|'e'|'p'..'t')\n ;"); + String expecting = + "max type 1\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "rule 0:1 1\n" + + "mode 0:0\n" + + "0:'a'..'b', 'e'..'e', 'p'..'t'\n" + + "0->1 EPSILON 0,0,0\n" + + "1->3 EPSILON 0,0,0\n" + + "3->4 NOT_SET 0,0,0\n" + + "4->2 EPSILON 0,0,0\n" + + "0:0\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerWildcardWithMode() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : 'a'..'z'+ ;\n"+ + "mode CMT;" + + "COMMENT : '*/' {skip(); popMode();} ;\n" + + "JUNK : . {more();} ;\n"); + String expecting = + "max type 3\n" + + "0:TOKEN_START -1\n" + + "1:TOKEN_START -1\n" + + "2:RULE_START 0\n" + + "3:RULE_STOP 0\n" + + "4:RULE_START 1\n" + + "5:RULE_STOP 1\n" + + "6:RULE_START 2\n" + + "7:RULE_STOP 2\n" + + "8:BASIC 0\n" + + "9:PLUS_BLOCK_START 0 10\n" + + "10:BLOCK_END 0\n" + + "11:PLUS_LOOP_BACK 0\n" + + "12:LOOP_END 0 11\n" + + "13:BASIC 1\n" + + "14:BASIC 1\n" + + "15:BASIC 1\n" + + "16:BASIC 1\n" + + "17:BASIC 1\n" + + "18:BASIC 2\n" + + "19:BASIC 2\n" + + "20:BASIC 2\n" + + "rule 0:2 1\n" + + "rule 1:4 2\n" + + "rule 2:6 3\n" + + "mode 0:0\n" + + "mode 1:1\n" + + "0->2 EPSILON 0,0,0\n" + + "1->4 EPSILON 0,0,0\n" + + "1->6 EPSILON 0,0,0\n" + + "2->9 EPSILON 0,0,0\n" + + "4->13 EPSILON 0,0,0\n" + + "6->18 EPSILON 0,0,0\n" + + "8->10 RANGE 97,122,0\n" + + "9->8 EPSILON 0,0,0\n" + + "10->11 EPSILON 0,0,0\n" + + "11->9 EPSILON 0,0,0\n" + + "11->12 EPSILON 0,0,0\n" + + "12->3 EPSILON 0,0,0\n" + + "13->14 ATOM 42,0,0\n" + + "14->15 ATOM 47,0,0\n" + + "15->16 EPSILON 0,0,0\n" + + "16->17 ACTION 1,0,0\n" + + "17->5 EPSILON 0,0,0\n" + + "18->19 WILDCARD 0,0,0\n" + + "19->20 ACTION 2,1,0\n" + + "20->7 EPSILON 0,0,0\n" + + "0:0\n" + + "1:1\n" + + "2:11\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testLexerNotSetWithRange2() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "ID : ~('a'|'b') ~('e'|'p'..'t')\n ;"); + String expecting = + "max type 1\n" + + "0:TOKEN_START -1\n" + + "1:RULE_START 0\n" + + "2:RULE_STOP 0\n" + + "3:BASIC 0\n" + + "4:BASIC 0\n" + + "5:BASIC 0\n" + + "rule 0:1 1\n" + + "mode 0:0\n" + + "0:'a'..'b'\n" + + "1:'e'..'e', 'p'..'t'\n" + + "0->1 EPSILON 0,0,0\n" + + "1->3 EPSILON 0,0,0\n" + + "3->4 NOT_SET 0,0,0\n" + + "4->5 NOT_SET 1,0,0\n" + + "5->2 EPSILON 0,0,0\n" + + "0:0\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void testModeInLexer() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a'\n ;\n" + + "B : 'b';\n" + + "mode A;\n" + + "C : 'c';\n"+ + "D : 'd';\n"); + String expecting = + "max type 4\n" + + "0:TOKEN_START -1\n" + + "1:TOKEN_START -1\n" + + "2:RULE_START 0\n" + + "3:RULE_STOP 0\n" + + "4:RULE_START 1\n" + + "5:RULE_STOP 1\n" + + "6:RULE_START 2\n" + + "7:RULE_STOP 2\n" + + "8:RULE_START 3\n" + + "9:RULE_STOP 3\n" + + "10:BASIC 0\n" + + "11:BASIC 0\n" + + "12:BASIC 1\n" + + "13:BASIC 1\n" + + "14:BASIC 2\n" + + "15:BASIC 2\n" + + "16:BASIC 3\n" + + "17:BASIC 3\n" + + "rule 0:2 1\n" + + "rule 1:4 2\n" + + "rule 2:6 3\n" + + "rule 3:8 4\n" + + "mode 0:0\n" + + "mode 1:1\n" + + "0->2 EPSILON 0,0,0\n" + + "0->4 EPSILON 0,0,0\n" + + "1->6 EPSILON 0,0,0\n" + + "1->8 EPSILON 0,0,0\n" + + "2->10 EPSILON 0,0,0\n" + + "4->12 EPSILON 0,0,0\n" + + "6->14 EPSILON 0,0,0\n" + + "8->16 EPSILON 0,0,0\n" + + "10->11 ATOM 97,0,0\n" + + "11->3 EPSILON 0,0,0\n" + + "12->13 ATOM 98,0,0\n" + + "13->5 EPSILON 0,0,0\n" + + "14->15 ATOM 99,0,0\n" + + "15->7 EPSILON 0,0,0\n" + + "16->17 ATOM 100,0,0\n" + + "17->9 EPSILON 0,0,0\n" + + "0:0\n" + + "1:1\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + + @Test public void test2ModesInLexer() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n"+ + "A : 'a'\n ;\n" + + "mode M;\n" + + "B : 'b';\n" + + "mode M2;\n" + + "C : 'c';\n"); + String expecting = + "max type 3\n" + + "0:TOKEN_START -1\n" + + "1:TOKEN_START -1\n" + + "2:TOKEN_START -1\n" + + "3:RULE_START 0\n" + + "4:RULE_STOP 0\n" + + "5:RULE_START 1\n" + + "6:RULE_STOP 1\n" + + "7:RULE_START 2\n" + + "8:RULE_STOP 2\n" + + "9:BASIC 0\n" + + "10:BASIC 0\n" + + "11:BASIC 1\n" + + "12:BASIC 1\n" + + "13:BASIC 2\n" + + "14:BASIC 2\n" + + "rule 0:3 1\n" + + "rule 1:5 2\n" + + "rule 2:7 3\n" + + "mode 0:0\n" + + "mode 1:1\n" + + "mode 2:2\n" + + "0->3 EPSILON 0,0,0\n" + + "1->5 EPSILON 0,0,0\n" + + "2->7 EPSILON 0,0,0\n" + + "3->9 EPSILON 0,0,0\n" + + "5->11 EPSILON 0,0,0\n" + + "7->13 EPSILON 0,0,0\n" + + "9->10 ATOM 97,0,0\n" + + "10->4 EPSILON 0,0,0\n" + + "11->12 ATOM 98,0,0\n" + + "12->6 EPSILON 0,0,0\n" + + "13->14 ATOM 99,0,0\n" + + "14->8 EPSILON 0,0,0\n" + + "0:0\n" + + "1:1\n" + + "2:2\n"; + ATN atn = createATN(lg, true); + String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); + assertEquals(expecting, result); + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestActionSplitter.java b/tool/test/org/antlr/v4/test/tool/TestActionSplitter.java new file mode 100644 index 000000000..e9d6139e9 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestActionSplitter.java @@ -0,0 +1,82 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.runtime.ANTLRStringStream; +import org.antlr.runtime.Token; +import org.antlr.v4.parse.ActionSplitter; +import org.antlr.v4.semantics.BlankActionSplitterListener; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.*; + +public class TestActionSplitter extends BaseTest { + static String[] exprs = { + "foo", "['foo'<" + ActionSplitter.TEXT + ">]", + "$x", "['$x'<" + ActionSplitter.ATTR + ">]", + "\\$x", "['\\$x'<" + ActionSplitter.TEXT + ">]", + "$x.y", "['$x.y'<" + ActionSplitter.QUALIFIED_ATTR + ">]", + "$ID.text", "['$ID.text'<" + ActionSplitter.QUALIFIED_ATTR + ">]", + "$ID", "['$ID'<" + ActionSplitter.ATTR + ">]", + "$ID.getText()", "['$ID'<" + ActionSplitter.ATTR + ">, '.getText()'<" + ActionSplitter.TEXT + ">]", + "$ID.text = \"test\";", "['$ID.text'<" + ActionSplitter.QUALIFIED_ATTR + ">, ' = \"test\";'<" + ActionSplitter.TEXT + ">]", + "$a.line == $b.line", "['$a.line'<" + ActionSplitter.QUALIFIED_ATTR + ">, ' == '<" + ActionSplitter.TEXT + ">, '$b.line'<" + ActionSplitter.QUALIFIED_ATTR + ">]", + "$r.tree", "['$r.tree'<" + ActionSplitter.QUALIFIED_ATTR + ">]", + "foo $a::n bar", "['foo '<" + ActionSplitter.TEXT + ">, '$a::n'<" + ActionSplitter.NONLOCAL_ATTR + ">, ' bar'<" + ActionSplitter.TEXT + ">]", + "$rule::x;", "['$rule::x'<" + ActionSplitter.NONLOCAL_ATTR + ">, ';'<" + ActionSplitter.TEXT + ">]", + "$field::x = $field.st;", "['$field::x = $field.st;'<" + ActionSplitter.SET_NONLOCAL_ATTR + ">]", + "$foo.get(\"ick\");", "['$foo'<" + ActionSplitter.ATTR + ">, '.get(\"ick\");'<" + ActionSplitter.TEXT + ">]", + }; + + @Test public void testExprs() { + for (int i = 0; i < exprs.length; i+=2) { + String input = exprs[i]; + String expect = exprs[i+1]; + List chunks = getActionChunks(input); + assertEquals("input: "+input, expect, chunks.toString()); + } + } + + public static List getActionChunks(String a) { + List chunks = new ArrayList(); + ActionSplitter splitter = new ActionSplitter(new ANTLRStringStream(a), + new BlankActionSplitterListener()); + Token t = splitter.nextToken(); + while ( t.getType()!=Token.EOF ) { + chunks.add("'"+t.getText()+"'<"+t.getType()+">"); + t = splitter.nextToken(); + } + return chunks; + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestActionTranslation.java b/tool/test/org/antlr/v4/test/tool/TestActionTranslation.java new file mode 100644 index 000000000..ed7542706 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestActionTranslation.java @@ -0,0 +1,424 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.junit.Test; + +/** */ +public class TestActionTranslation extends BaseTest { + String attributeTemplate = + "attributeTemplate(members,init,inline,finally,inline2) ::= <<\n" + + "parser grammar A;\n"+ + "@members {#members##end-members#}\n" + + "a[int x, int x1] returns [int y]\n" + + "@init {#init##end-init#}\n" + + " : id=ID ids+=ID lab=b[34] c d {\n" + + " #inline##end-inline#\n" + + " }\n" + + " c\n" + + " ;\n" + + " finally {#finally##end-finally#}\n" + + "b[int d] returns [int e]\n" + + " : {#inline2##end-inline2#}\n" + + " ;\n" + + "c returns [int x, int y] : ;\n" + + "d : ;\n" + + ">>"; + + @Test public void testEscapedLessThanInAction() throws Exception { + String action = "i<3; ''"; + String expected = "i<3; ''"; + testActions(attributeTemplate, "members", action, expected); + testActions(attributeTemplate, "init", action, expected); + testActions(attributeTemplate, "inline", action, expected); + testActions(attributeTemplate, "finally", action, expected); + testActions(attributeTemplate, "inline2", action, expected); + } + + @Test public void testEscaped$InAction() throws Exception { + String action = "int \\$n; \"\\$in string\\$\""; + String expected = "int $n; \"$in string$\""; + testActions(attributeTemplate, "members", action, expected); + testActions(attributeTemplate, "init", action, expected); + testActions(attributeTemplate, "inline", action, expected); + testActions(attributeTemplate, "finally", action, expected); + testActions(attributeTemplate, "inline2", action, expected); + } + + /** + * Regression test for "in antlr v4 lexer, $ translation issue in action". + * https://github.com/antlr/antlr4/issues/176 + */ + @Test public void testUnescaped$InAction() throws Exception { + String action = "\\$string$"; + String expected = "$string$"; + testActions(attributeTemplate, "members", action, expected); + testActions(attributeTemplate, "init", action, expected); + testActions(attributeTemplate, "inline", action, expected); + testActions(attributeTemplate, "finally", action, expected); + testActions(attributeTemplate, "inline2", action, expected); + } + + @Test public void testEscapedSlash() throws Exception { + String action = "x = '\\n';"; // x = '\n'; -> x = '\n'; + String expected = "x = '\\n';"; + testActions(attributeTemplate, "members", action, expected); + testActions(attributeTemplate, "init", action, expected); + testActions(attributeTemplate, "inline", action, expected); + testActions(attributeTemplate, "finally", action, expected); + testActions(attributeTemplate, "inline2", action, expected); + } + + @Test public void testComplicatedArgParsing() throws Exception { + String action = "x, (*a).foo(21,33), 3.2+1, '\\n', "+ + "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; + String expected = "x, (*a).foo(21,33), 3.2+1, '\\n', "+ + "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; + testActions(attributeTemplate, "members", action, expected); + testActions(attributeTemplate, "init", action, expected); + testActions(attributeTemplate, "inline", action, expected); + testActions(attributeTemplate, "finally", action, expected); + testActions(attributeTemplate, "inline2", action, expected); + } + + @Test public void testComplicatedArgParsingWithTranslation() throws Exception { + String action = "x, $ID.text+\"3242\", (*$ID).foo(21,33), 3.2+1, '\\n', "+ + "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; + String expected = + "x, (((AContext)_localctx).ID!=null?((AContext)_localctx).ID.getText():null)+\"3242\", " + + "(*((AContext)_localctx).ID).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testArguments() throws Exception { + String action = "$x; $ctx.x"; + String expected = "_localctx.x; _localctx.x"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testReturnValue() throws Exception { + String action = "$y; $ctx.y"; + String expected = "_localctx.y; _localctx.y"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testReturnValueWithNumber() throws Exception { + String action = "$ctx.x1"; + String expected = "_localctx.x1"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testReturnValuesCurrentRule() throws Exception { + String action = "$y; $ctx.y;"; + String expected = "_localctx.y; _localctx.y;"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testReturnValues() throws Exception { + String action = "$lab.e; $b.e; $y.e = \"\";"; + String expected = "((AContext)_localctx).lab.e; ((AContext)_localctx).b.e; _localctx.y.e = \"\";"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testReturnWithMultipleRuleRefs() throws Exception { + String action = "$c.x; $c.y;"; + String expected = "((AContext)_localctx).c.x; ((AContext)_localctx).c.y;"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testTokenRefs() throws Exception { + String action = "$id; $ID; $id.text; $id.getText(); $id.line;"; + String expected = "((AContext)_localctx).id; ((AContext)_localctx).ID; (((AContext)_localctx).id!=null?((AContext)_localctx).id.getText():null); ((AContext)_localctx).id.getText(); (((AContext)_localctx).id!=null?((AContext)_localctx).id.getLine():0);"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testRuleRefs() throws Exception { + String action = "$lab.start; $c.text;"; + String expected = "(((AContext)_localctx).lab!=null?(((AContext)_localctx).lab.start):null); (((AContext)_localctx).c!=null?_input.getText(((AContext)_localctx).c.start,((AContext)_localctx).c.stop):null);"; + testActions(attributeTemplate, "inline", action, expected); + } + + @Test public void testRefToTextAttributeForCurrentRule() throws Exception { + String action = "$ctx.text; $text"; + + // this is the expected translation for all cases + String expected = + "_localctx.text; _input.getText(_localctx.start, _input.LT(-1))"; + + testActions(attributeTemplate, "init", action, expected); + testActions(attributeTemplate, "inline", action, expected); + testActions(attributeTemplate, "finally", action, expected); + } + + @Test public void testDynamicRuleScopeRefInSubrule() throws Exception { + String action = "$a::n;"; + } + @Test public void testRuleScopeFromAnotherRule() throws Exception { + String action = "$a::n;"; // must be qualified + } + @Test public void testFullyQualifiedRefToCurrentRuleParameter() throws Exception { + String action = "$a.i;"; + } + @Test public void testFullyQualifiedRefToCurrentRuleRetVal() throws Exception { + String action = "$a.i;"; + } + @Test public void testSetFullyQualifiedRefToCurrentRuleRetVal() throws Exception { + String action = "$a.i = 1;"; + } + @Test public void testIsolatedRefToCurrentRule() throws Exception { + String action = "$a;"; + } + @Test public void testIsolatedRefToRule() throws Exception { + String action = "$x;"; + } + @Test public void testFullyQualifiedRefToLabelInCurrentRule() throws Exception { + String action = "$a.x;"; + } + @Test public void testFullyQualifiedRefToListLabelInCurrentRule() throws Exception { + String action = "$a.x;"; // must be qualified + } + @Test public void testFullyQualifiedRefToTemplateAttributeInCurrentRule() throws Exception { + String action = "$a.st;"; // can be qualified + } + @Test public void testRuleRefWhenRuleHasScope() throws Exception { + String action = "$b.start;"; + } + @Test public void testDynamicScopeRefOkEvenThoughRuleRefExists() throws Exception { + String action = "$b::n;"; + } + @Test public void testRefToTemplateAttributeForCurrentRule() throws Exception { + String action = "$st=null;"; + } + + @Test public void testRefToStartAttributeForCurrentRule() throws Exception { + String action = "$start;"; + } + + @Test public void testTokenLabelFromMultipleAlts() throws Exception { + String action = "$ID.text;"; // must be qualified + } + @Test public void testRuleLabelFromMultipleAlts() throws Exception { + String action = "$b.text;"; // must be qualified + } + @Test public void testUnqualifiedRuleScopeAttribute() throws Exception { + String action = "$n;"; // must be qualified + } + @Test public void testRuleAndTokenLabelTypeMismatch() throws Exception { + } + @Test public void testListAndTokenLabelTypeMismatch() throws Exception { + } + @Test public void testListAndRuleLabelTypeMismatch() throws Exception { + } + @Test public void testArgReturnValueMismatch() throws Exception { + } + @Test public void testSimplePlusEqualLabel() throws Exception { + String action = "$ids.size();"; // must be qualified + } + @Test public void testPlusEqualStringLabel() throws Exception { + String action = "$ids.size();"; // must be qualified + } + @Test public void testPlusEqualSetLabel() throws Exception { + String action = "$ids.size();"; // must be qualified + } + @Test public void testPlusEqualWildcardLabel() throws Exception { + String action = "$ids.size();"; // must be qualified + } + @Test public void testImplicitTokenLabel() throws Exception { + String action = "$ID; $ID.text; $ID.getText()"; + } + + @Test public void testImplicitRuleLabel() throws Exception { + String action = "$r.start;"; + } + + @Test public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception { + String action = "$r.start;"; + } + + @Test public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception { + String action = "$r.start;"; + } + + @Test public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception { + String action = "$ID.text;"; + } + + @Test public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception { + String action = "$ID.text;"; + } + + @Test public void testRuleLabelWithoutOutputOption() throws Exception { + } + @Test public void testMissingArgs() throws Exception { + } + @Test public void testArgsWhenNoneDefined() throws Exception { + } + @Test public void testReturnInitValue() throws Exception { + } + @Test public void testMultipleReturnInitValue() throws Exception { + } + @Test public void testCStyleReturnInitValue() throws Exception { + } + @Test public void testArgsWithInitValues() throws Exception { + } + @Test public void testArgsOnToken() throws Exception { + } + @Test public void testArgsOnTokenInLexer() throws Exception { + } + @Test public void testLabelOnRuleRefInLexer() throws Exception { + String action = "$i.text"; + } + + @Test public void testRefToRuleRefInLexer() throws Exception { + String action = "$ID.text"; + } + + @Test public void testRefToRuleRefInLexerNoAttribute() throws Exception { + String action = "$ID"; + } + + @Test public void testCharLabelInLexer() throws Exception { + } + @Test public void testCharListLabelInLexer() throws Exception { + } + @Test public void testWildcardCharLabelInLexer() throws Exception { + } + @Test public void testWildcardCharListLabelInLexer() throws Exception { + } + @Test public void testMissingArgsInLexer() throws Exception { + } + @Test public void testLexerRulePropertyRefs() throws Exception { + String action = "$text $type $line $pos $channel $index $start $stop"; + } + + @Test public void testLexerLabelRefs() throws Exception { + String action = "$a $b.text $c $d.text"; + } + + @Test public void testSettingLexerRulePropertyRefs() throws Exception { + String action = "$text $type=1 $line=1 $pos=1 $channel=1 $index"; + } + + @Test public void testArgsOnTokenInLexerRuleOfCombined() throws Exception { + } + @Test public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception { + } + @Test public void testTokenLabelTreeProperty() throws Exception { + String action = "$id.tree;"; + } + + @Test public void testTokenRefTreeProperty() throws Exception { + String action = "$ID.tree;"; + } + + @Test public void testAmbiguousTokenRef() throws Exception { + String action = "$ID;"; + } + + @Test public void testAmbiguousTokenRefWithProp() throws Exception { + String action = "$ID.text;"; + } + + @Test public void testRuleRefWithDynamicScope() throws Exception { + String action = "$field::x = $field.st;"; + } + + @Test public void testAssignToOwnRulenameAttr() throws Exception { + String action = "$rule.tree = null;"; + } + + @Test public void testAssignToOwnParamAttr() throws Exception { + String action = "$rule.i = 42; $i = 23;"; + } + + @Test public void testIllegalAssignToOwnRulenameAttr() throws Exception { + String action = "$rule.stop = 0;"; + } + + @Test public void testIllegalAssignToLocalAttr() throws Exception { + String action = "$tree = null; $st = null; $start = 0; $stop = 0; $text = 0;"; + } + + @Test public void testIllegalAssignRuleRefAttr() throws Exception { + String action = "$other.tree = null;"; + } + + @Test public void testIllegalAssignTokenRefAttr() throws Exception { + String action = "$ID.text = \"test\";"; + } + + @Test public void testAssignToTreeNodeAttribute() throws Exception { + String action = "$tree.scope = localScope;"; + } + + @Test public void testDoNotTranslateAttributeCompare() throws Exception { + String action = "$a.line == $b.line"; + } + + @Test public void testDoNotTranslateScopeAttributeCompare() throws Exception { + String action = "if ($rule::foo == \"foo\" || 1) { System.out.println(\"ouch\"); }"; + } + + @Test public void testTreeRuleStopAttributeIsInvalid() throws Exception { + String action = "$r.x; $r.start; $r.stop"; + } + + @Test public void testRefToTextAttributeForCurrentTreeRule() throws Exception { + String action = "$text"; + } + + @Test public void testTypeOfGuardedAttributeRefIsCorrect() throws Exception { + String action = "int x = $b::n;"; + } + + @Test public void testBracketArgParsing() throws Exception { + } + + @Test public void testStringArgParsing() throws Exception { + String action = "34, '{', \"it's<\", '\"', \"\\\"\", 19"; + } + @Test public void testComplicatedSingleArgParsing() throws Exception { + String action = "(*a).foo(21,33,\",\")"; + } + @Test public void testArgWithLT() throws Exception { + String action = "34<50"; + } + @Test public void testGenericsAsArgumentDefinition() throws Exception { + String action = "$foo.get(\"ick\");"; + } + @Test public void testGenericsAsArgumentDefinition2() throws Exception { + String action = "$foo.get(\"ick\"); x=3;"; + } + @Test public void testGenericsAsReturnValue() throws Exception { + } + + // TODO: nonlocal $rule::x +} diff --git a/tool/test/org/antlr/v4/test/tool/TestAttributeChecks.java b/tool/test/org/antlr/v4/test/tool/TestAttributeChecks.java new file mode 100644 index 000000000..c2b02c05b --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestAttributeChecks.java @@ -0,0 +1,273 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.runtime.RecognitionException; +import org.antlr.v4.tool.ErrorType; +import org.junit.Test; +import org.stringtemplate.v4.ST; + +/** */ +public class TestAttributeChecks extends BaseTest { + String attributeTemplate = + "parser grammar A;\n"+ + "@members {}\n" + + "tokens{ID}\n" + + "a[int x] returns [int y]\n" + + "@init {}\n" + + " : id=ID ids+=ID lab=b[34] labs+=b[34] {\n" + + " \n" + + " }\n" + + " c\n" + + " ;\n" + + " finally {}\n" + + "b[int d] returns [int e]\n" + + " : {}\n" + + " ;\n" + + "c : ;\n"; + + String[] membersChecks = { + "$a", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:2:11: unknown attribute reference a in $a\n", + "$a.y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:2:11: unknown attribute reference a in $a.y\n", + }; + + String[] initChecks = { + "$text", "", + "$start", "", + "$x = $y", "", + "$y = $x", "", + "$lab.e", "", + "$ids", "", + "$labs", "", + + "$c", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:5:8: unknown attribute reference c in $c\n", + "$a.q", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:5:10: unknown attribute q for rule a in $a.q\n", + }; + + String[] inlineChecks = { + "$text", "", + "$start", "", + "$x = $y", "", + "$y = $x", "", + "$y.b = 3;", "", + "$ctx.x = $ctx.y", "", + "$lab.e", "", + "$lab.text", "", + "$b.e", "", + "$c.text", "", + "$ID", "", + "$ID.text", "", + "$id", "", + "$id.text", "", + "$ids", "", + "$labs", "", + }; + + String[] bad_inlineChecks = { + "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:7:4: missing attribute access on rule reference lab in $lab\n", + "$q", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q\n", + "$q.y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q.y\n", + "$q = 3", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q\n", + "$q = 3;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q = 3;\n", + "$q.y = 3;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q.y\n", + "$q = $blort;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q = $blort;\n" + + "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:9: unknown attribute reference blort in $blort\n", + "$a.ick", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:7:6: unknown attribute ick for rule a in $a.ick\n", + "$a.ick = 3;", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:7:6: unknown attribute ick for rule a in $a.ick\n", + "$b.d", "error(" + ErrorType.INVALID_RULE_PARAMETER_REF.code + "): A.g4:7:6: parameter d of rule b is not accessible in this scope: $b.d\n", // cant see rule refs arg + "$d.text", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref + "$lab.d", "error(" + ErrorType.INVALID_RULE_PARAMETER_REF.code + "): A.g4:7:8: parameter d of rule b is not accessible in this scope: $lab.d\n", + "$ids = null;", "error(" + ErrorType.ASSIGNMENT_TO_LIST_LABEL.code + "): A.g4:7:4: cannot assign a value to list label ids\n", + "$labs = null;","error(" + ErrorType.ASSIGNMENT_TO_LIST_LABEL.code + "): A.g4:7:4: cannot assign a value to list label labs\n", + }; + + String[] finallyChecks = { + "$text", "", + "$start", "", + "$x = $y", "", + "$y = $x", "", + "$lab.e", "", + "$lab.text", "", + "$id", "", + "$id.text", "", + "$ids", "", + "$labs", "", + + "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:10:14: missing attribute access on rule reference lab in $lab\n", + "$q", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q\n", + "$q.y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q.y\n", + "$q = 3", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q\n", + "$q = 3;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q = 3;\n", + "$q.y = 3;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q.y\n", + "$q = $blort;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q = $blort;\n" + + "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:19: unknown attribute reference blort in $blort\n", + "$a.ick", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:10:16: unknown attribute ick for rule a in $a.ick\n", + "$a.ick = 3;", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:10:16: unknown attribute ick for rule a in $a.ick\n", + "$b.e", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference b in $b.e\n", // cant see rule refs outside alts + "$b.d", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference b in $b.d\n", + "$c.text", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference c in $c.text\n", + "$lab.d", "error(" + ErrorType.INVALID_RULE_PARAMETER_REF.code + "): A.g4:10:18: parameter d of rule b is not accessible in this scope: $lab.d\n", + }; + + String[] dynMembersChecks = { + "$S", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:2:11: unknown attribute reference S in $S\n", + "$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::i\n", + "$S::i=$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::i\n" + + "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:17: reference to undefined rule S in non-local ref $S::i\n", + + "$b::f", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:2:14: unknown attribute f for rule b in $b::f\n", + "$S::j", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::j\n", + "$S::j = 3;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::j = 3;\n", + "$S::j = $S::k;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::j = $S::k;\n", + }; + + String[] dynInitChecks = { + "$a", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:5:8: missing attribute access on rule reference a in $a\n", + "$b", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:5:8: unknown attribute reference b in $b\n", + "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:5:8: missing attribute access on rule reference lab in $lab\n", + "$b::f", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:5:11: unknown attribute f for rule b in $b::f\n", + "$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::i\n", + "$S::i=$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::i\n" + + "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:14: reference to undefined rule S in non-local ref $S::i\n", + "$a::z", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:5:11: unknown attribute z for rule a in $a::z\n", + "$S", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:5:8: unknown attribute reference S in $S\n", + + "$S::j", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::j\n", + "$S::j = 3;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::j = 3;\n", + "$S::j = $S::k;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::j = $S::k;\n", + }; + + String[] dynInlineChecks = { + "$a", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:7:4: missing attribute access on rule reference a in $a\n", + "$b", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:7:4: missing attribute access on rule reference b in $b\n", + "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:7:4: missing attribute access on rule reference lab in $lab\n", + "$b::f", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:7:7: unknown attribute f for rule b in $b::f\n", + "$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::i\n", + "$S::i=$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::i\n" + + "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:10: reference to undefined rule S in non-local ref $S::i\n", + "$a::z", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:7:7: unknown attribute z for rule a in $a::z\n", + + "$S::j", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::j\n", + "$S::j = 3;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::j = 3;\n", + "$S::j = $S::k;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::j = $S::k;\n", + "$Q[-1]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", + "$Q[-i]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", + "$Q[i]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", + "$Q[0]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", + "$Q[-1]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", + "$Q[-i]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", + "$Q[i]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", + "$Q[0]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", + "$S[-1]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", + "$S[-i]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", + "$S[i]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", + "$S[0]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", + "$S[-1]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", + "$S[-i]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", + "$S[i]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", + "$S[0]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", + "$S[$S::y]::i", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n" + + "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:7: reference to undefined rule S in non-local ref $S::y\n" + }; + + String[] dynFinallyChecks = { + "$a", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:10:14: missing attribute access on rule reference a in $a\n", + "$b", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference b in $b\n", + "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:10:14: missing attribute access on rule reference lab in $lab\n", + "$b::f", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:10:17: unknown attribute f for rule b in $b::f\n", + "$S", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference S in $S\n", + "$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::i\n", + "$S::i=$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::i\n" + + "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:20: reference to undefined rule S in non-local ref $S::i\n", + "$a::z", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:10:17: unknown attribute z for rule a in $a::z\n", + + "$S::j", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::j\n", + "$S::j = 3;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::j = 3;\n", + "$S::j = $S::k;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::j = $S::k;\n", + }; + + @Test public void testMembersActions() throws RecognitionException { + testActions("members", membersChecks, attributeTemplate); + } + + @Test public void testDynamicMembersActions() throws RecognitionException { + testActions("members", dynMembersChecks, attributeTemplate); + } + + @Test public void testInitActions() throws RecognitionException { + testActions("init", initChecks, attributeTemplate); + } + + @Test public void testDynamicInitActions() throws RecognitionException { + testActions("init", dynInitChecks, attributeTemplate); + } + + @Test public void testInlineActions() throws RecognitionException { + testActions("inline", inlineChecks, attributeTemplate); + } + + @Test public void testDynamicInlineActions() throws RecognitionException { + testActions("inline", dynInlineChecks, attributeTemplate); + } + + @Test public void testBadInlineActions() throws RecognitionException { + testActions("inline", bad_inlineChecks, attributeTemplate); + } + + @Test public void testFinallyActions() throws RecognitionException { + testActions("finally", finallyChecks, attributeTemplate); + } + + @Test public void testDynamicFinallyActions() throws RecognitionException { + testActions("finally", dynFinallyChecks, attributeTemplate); + } + + @Test public void testTokenRef() throws RecognitionException { + String grammar = + "parser grammar S;\n" + + "tokens{ID}\n" + + "a : x=ID {Token t = $x; t = $ID;} ;\n"; + String expected = + ""; + testErrors(new String[] {grammar, expected}, false); + } + + public void testActions(String location, String[] pairs, String template) { + for (int i = 0; i < pairs.length; i+=2) { + String action = pairs[i]; + String expected = pairs[i+1]; + ST st = new ST(template); + st.add(location, action); + String grammar = st.render(); + testErrors(new String[] {grammar, expected}, false); + } + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestBasicSemanticErrors.java b/tool/test/org/antlr/v4/test/tool/TestBasicSemanticErrors.java new file mode 100644 index 000000000..4d4b85d2c --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestBasicSemanticErrors.java @@ -0,0 +1,117 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.tool.ErrorType; +import org.junit.Test; +import org.stringtemplate.v4.ST; + +public class TestBasicSemanticErrors extends BaseTest { + static String[] U = { + // INPUT + "parser grammar U;\n" + + "options { foo=bar; k=3;}\n" + + "tokens {\n" + + " ID,\n" + + " f,\n" + + " S\n" + + "}\n" + + "tokens { A }\n" + + "options { x=y; }\n" + + "\n" + + "a\n" + + "options { blech=bar; greedy=true; }\n" + + " : ID\n" + + " ;\n" + + "b : ( options { ick=bar; greedy=true; } : ID )+ ;\n" + + "c : ID ID ;", + // YIELDS + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:2:10: unsupported option foo\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:2:19: unsupported option k\n" + + "error(" + ErrorType.TOKEN_NAMES_MUST_START_UPPER.code + "): U.g4:5:8: token names must start with an uppercase letter: f\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:9:10: unsupported option x\n" + + "error(" + ErrorType.REPEATED_PREQUEL.code + "): U.g4:9:0: repeated grammar prequel spec (options, tokens, or import); please merge\n" + + "error(" + ErrorType.REPEATED_PREQUEL.code + "): U.g4:8:0: repeated grammar prequel spec (options, tokens, or import); please merge\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:12:10: unsupported option blech\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:12:21: unsupported option greedy\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:15:16: unsupported option ick\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:15:25: unsupported option greedy\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:16:16: unsupported option x\n", + }; + + @Test public void testU() { super.testErrors(U, false); } + + /** + * Regression test for #25 "Don't allow labels on not token set subrules". + * https://github.com/antlr/antlr4/issues/25 + */ + @Test + public void testIllegalNonSetLabel() throws Exception { + String grammar = + "grammar T;\n" + + "ss : op=('=' | '+=' | expr) EOF;\n" + + "expr : '=' '=';\n" + + ""; + + String expected = + "error(" + ErrorType.LABEL_BLOCK_NOT_A_SET.code + "): T.g4:2:5: label op assigned to a block which is not a set\n"; + + testErrors(new String[] { grammar, expected }, false); + } + + @Test + public void testArgumentRetvalLocalConflicts() throws Exception { + String grammarTemplate = + "grammar T;\n" + + "ss[] returns []\n" + + "locals []\n" + + " : EOF;\n" + + "expr : '=';\n"; + + String expected = + "error(" + ErrorType.ARG_CONFLICTS_WITH_RULE.code + "): T.g4:2:7: parameter expr conflicts with rule with same name\n" + + "error(" + ErrorType.RETVAL_CONFLICTS_WITH_RULE.code + "): T.g4:2:26: return value expr conflicts with rule with same name\n" + + "error(" + ErrorType.LOCAL_CONFLICTS_WITH_RULE.code + "): T.g4:3:12: local expr conflicts with rule with same name\n" + + "error(" + ErrorType.RETVAL_CONFLICTS_WITH_ARG.code + "): T.g4:2:26: return value expr conflicts with parameter with same name\n" + + "error(" + ErrorType.LOCAL_CONFLICTS_WITH_ARG.code + "): T.g4:3:12: local expr conflicts with parameter with same name\n" + + "error(" + ErrorType.LOCAL_CONFLICTS_WITH_RETVAL.code + "): T.g4:3:12: local expr conflicts with return value with same name\n" + + "error(" + ErrorType.LABEL_CONFLICTS_WITH_RULE.code + "): T.g4:4:4: label expr conflicts with rule with same name\n" + + "error(" + ErrorType.LABEL_CONFLICTS_WITH_ARG.code + "): T.g4:4:4: label expr conflicts with parameter with same name\n" + + "error(" + ErrorType.LABEL_CONFLICTS_WITH_RETVAL.code + "): T.g4:4:4: label expr conflicts with return value with same name\n" + + "error(" + ErrorType.LABEL_CONFLICTS_WITH_LOCAL.code + "): T.g4:4:4: label expr conflicts with local with same name\n"; + ST grammarST = new ST(grammarTemplate); + grammarST.add("args", "int expr"); + grammarST.add("retvals", "int expr"); + grammarST.add("locals", "int expr"); + grammarST.add("body", "expr=expr"); + testErrors(new String[] { grammarST.render(), expected }, false); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestBufferedTokenStream.java b/tool/test/org/antlr/v4/test/tool/TestBufferedTokenStream.java new file mode 100644 index 000000000..70c29f9e5 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestBufferedTokenStream.java @@ -0,0 +1,180 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.BufferedTokenStream; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.LexerInterpreter; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenSource; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestBufferedTokenStream extends BaseTest { + + protected TokenStream createTokenStream(TokenSource src) { + return new BufferedTokenStream(src); + } + + @Test public void testFirstToken() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 3 * 0 + 2 * 0; + CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TokenStream tokens = createTokenStream(lexEngine); + + String result = tokens.LT(1).getText(); + String expecting = "x"; + assertEquals(expecting, result); + } + + @Test public void test2ndToken() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 3 * 0 + 2 * 0; + CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TokenStream tokens = createTokenStream(lexEngine); + + String result = tokens.LT(2).getText(); + String expecting = " "; + assertEquals(expecting, result); + } + + @Test public void testCompleteBuffer() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 3 * 0 + 2 * 0; + CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TokenStream tokens = createTokenStream(lexEngine); + + int i = 1; + Token t = tokens.LT(i); + while ( t.getType()!=Token.EOF ) { + i++; + t = tokens.LT(i); + } + tokens.LT(i++); // push it past end + tokens.LT(i++); + + String result = tokens.getText(); + String expecting = "x = 3 * 0 + 2 * 0;"; + assertEquals(expecting, result); + } + + @Test public void testCompleteBufferAfterConsuming() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 3 * 0 + 2 * 0; + CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TokenStream tokens = createTokenStream(lexEngine); + + Token t = tokens.LT(1); + while ( t.getType()!=Token.EOF ) { + tokens.consume(); + t = tokens.LT(1); + } + + String result = tokens.getText(); + String expecting = "x = 3 * 0 + 2 * 0;"; + assertEquals(expecting, result); + } + + @Test public void testLookback() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 3 * 0 + 2 * 0; + CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TokenStream tokens = createTokenStream(lexEngine); + + tokens.consume(); // get x into buffer + Token t = tokens.LT(-1); + assertEquals("x", t.getText()); + + tokens.consume(); + tokens.consume(); // consume '=' + t = tokens.LT(-3); + assertEquals("x", t.getText()); + t = tokens.LT(-2); + assertEquals(" ", t.getText()); + t = tokens.LT(-1); + assertEquals("=", t.getText()); + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestCodeGeneration.java b/tool/test/org/antlr/v4/test/tool/TestCodeGeneration.java new file mode 100644 index 000000000..32f76266c --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestCodeGeneration.java @@ -0,0 +1,162 @@ +/* + * [The "BSD license"] + * Copyright (c) 2014 Terence Parr + * Copyright (c) 2014 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.runtime.RecognitionException; +import org.antlr.v4.automata.ATNFactory; +import org.antlr.v4.automata.LexerATNFactory; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.codegen.CodeGenerator; +import org.antlr.v4.semantics.SemanticPipeline; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; +import org.stringtemplate.v4.AutoIndentWriter; +import org.stringtemplate.v4.InstanceScope; +import org.stringtemplate.v4.Interpreter; +import org.stringtemplate.v4.ST; +import org.stringtemplate.v4.STGroup; +import org.stringtemplate.v4.STWriter; +import org.stringtemplate.v4.misc.ErrorManager; +import org.stringtemplate.v4.misc.ErrorType; + +import java.io.IOException; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertFalse; + +public class TestCodeGeneration extends BaseTest { + @Test public void testArgDecl() throws Exception { // should use template not string + ErrorQueue equeue = new ErrorQueue(); + String g = + "grammar T;\n" + + "a[int xyz] : 'a' ;\n"; + List evals = getEvalInfoForString(g, "int xyz"); + System.out.println(evals); + for (int i = 0; i < evals.size(); i++) { + String eval = evals.get(i); + assertFalse("eval should not be POJO: "+eval, eval.startsWith(" evals = new ArrayList(); + ErrorManager myErrMgrCopy; + int tab = 0; + public DebugInterpreter(STGroup group, ErrorManager errMgr, boolean debug) { + super(group, errMgr, debug); + myErrMgrCopy = errMgr; + } + + @Override + protected int writeObject(STWriter out, InstanceScope scope, Object o, String[] options) { + if ( o instanceof ST ) { + String name = ((ST)o).getName(); + name = name.substring(1); + if ( !name.startsWith("_sub") ) { + try { + out.write(""); + evals.add(""); + int r = super.writeObject(out, scope, o, options); + out.write(""); + evals.add(""); + return r; + } catch (IOException ioe) { + myErrMgrCopy.IOError(scope.st, ErrorType.WRITE_IO_ERROR, ioe); + } + } + } + return super.writeObject(out, scope, o, options); + } + + @Override + protected int writePOJO(STWriter out, InstanceScope scope, Object o, String[] options) throws IOException { + Class type = o.getClass(); + String name = type.getSimpleName(); + out.write(""+o.toString()+""); + evals.add("" + o.toString() + ""); + return super.writePOJO(out, scope, o, options); + } + + public void indent(STWriter out) throws IOException { + for (int i=1; i<=tab; i++) { + out.write("\t"); + } + } + } + + public List getEvalInfoForString(String grammarString, String pattern) throws RecognitionException { + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(grammarString); + List evals = new ArrayList(); + if ( g.ast!=null && !g.ast.hasErrors ) { + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + + ATNFactory factory = new ParserATNFactory(g); + if (g.isLexer()) factory = new LexerATNFactory((LexerGrammar) g); + g.atn = factory.createATN(); + + CodeGenerator gen = new CodeGenerator(g); + ST outputFileST = gen.generateParser(); + +// STViz viz = outputFileST.inspect(); +// try { +// viz.waitForClose(); +// } +// catch (Exception e) { +// e.printStackTrace(); +// } + + boolean debug = false; + DebugInterpreter interp = + new DebugInterpreter(outputFileST.groupThatCreatedThisInstance, + outputFileST.impl.nativeGroup.errMgr, + debug); + InstanceScope scope = new InstanceScope(null, outputFileST); + StringWriter sw = new StringWriter(); + AutoIndentWriter out = new AutoIndentWriter(sw); + interp.exec(out, scope); + + for (String e : interp.evals) { + if (e.contains(pattern)) { + evals.add(e); + } + } + } + if ( equeue.size()>0 ) { + System.err.println(equeue.toString()); + } + return evals; + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestCommonTokenStream.java b/tool/test/org/antlr/v4/test/tool/TestCommonTokenStream.java new file mode 100644 index 000000000..02f3688ee --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestCommonTokenStream.java @@ -0,0 +1,309 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CommonToken; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.IntStream; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenFactory; +import org.antlr.v4.runtime.TokenSource; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.WritableToken; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestCommonTokenStream extends TestBufferedTokenStream { + + @Override + protected TokenStream createTokenStream(TokenSource src) { + return new CommonTokenStream(src); + } + + @Test public void testOffChannel() throws Exception { + TokenSource lexer = // simulate input " x =34 ;\n" + new TokenSource() { + int i = 0; + WritableToken[] tokens = { + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, + new CommonToken(1,"x"), + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, + new CommonToken(1,"="), + new CommonToken(1,"34"), + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, + new CommonToken(1,";"), + new CommonToken(1,"\n") {{channel = Lexer.HIDDEN;}}, + new CommonToken(Token.EOF,"") + }; + @Override + public Token nextToken() { + return tokens[i++]; + } + @Override + public String getSourceName() { return "test"; } + @Override + public int getCharPositionInLine() { + return 0; + } + @Override + public int getLine() { + return 0; + } + @Override + public CharStream getInputStream() { + return null; + } + + @Override + public void setTokenFactory(TokenFactory factory) { + } + + @Override + public TokenFactory getTokenFactory() { + return null; + } + }; + + CommonTokenStream tokens = new CommonTokenStream(lexer); + + assertEquals("x", tokens.LT(1).getText()); // must skip first off channel token + tokens.consume(); + assertEquals("=", tokens.LT(1).getText()); + assertEquals("x", tokens.LT(-1).getText()); + + tokens.consume(); + assertEquals("34", tokens.LT(1).getText()); + assertEquals("=", tokens.LT(-1).getText()); + + tokens.consume(); + assertEquals(";", tokens.LT(1).getText()); + assertEquals("34", tokens.LT(-1).getText()); + + tokens.consume(); + assertEquals(Token.EOF, tokens.LA(1)); + assertEquals(";", tokens.LT(-1).getText()); + + assertEquals("34", tokens.LT(-2).getText()); + assertEquals("=", tokens.LT(-3).getText()); + assertEquals("x", tokens.LT(-4).getText()); + } + + @Test public void testFetchOffChannel() throws Exception { + TokenSource lexer = // simulate input " x =34 ; \n" + // token indexes 01234 56789 + new TokenSource() { + int i = 0; + WritableToken[] tokens = { + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 0 + new CommonToken(1,"x"), // 1 + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 2 + new CommonToken(1,"="), // 3 + new CommonToken(1,"34"), // 4 + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 5 + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 6 + new CommonToken(1,";"), // 7 + new CommonToken(1," ") {{channel = Lexer.HIDDEN;}},// 8 + new CommonToken(1,"\n") {{channel = Lexer.HIDDEN;}},// 9 + new CommonToken(Token.EOF,"") // 10 + }; + @Override + public Token nextToken() { + return tokens[i++]; + } + @Override + public String getSourceName() { return "test"; } + @Override + public int getCharPositionInLine() { + return 0; + } + @Override + public int getLine() { + return 0; + } + @Override + public CharStream getInputStream() { + return null; + } + + @Override + public void setTokenFactory(TokenFactory factory) { + } + + @Override + public TokenFactory getTokenFactory() { + return null; + } + }; + + CommonTokenStream tokens = new CommonTokenStream(lexer); + tokens.fill(); + assertEquals(null, tokens.getHiddenTokensToLeft(0)); + assertEquals(null, tokens.getHiddenTokensToRight(0)); + + assertEquals("[[@0,0:0=' ',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToLeft(1).toString()); + assertEquals("[[@2,0:0=' ',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToRight(1).toString()); + + assertEquals(null, tokens.getHiddenTokensToLeft(2)); + assertEquals(null, tokens.getHiddenTokensToRight(2)); + + assertEquals("[[@2,0:0=' ',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToLeft(3).toString()); + assertEquals(null, tokens.getHiddenTokensToRight(3)); + + assertEquals(null, tokens.getHiddenTokensToLeft(4)); + assertEquals("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToRight(4).toString()); + + assertEquals(null, tokens.getHiddenTokensToLeft(5)); + assertEquals("[[@6,0:0=' ',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToRight(5).toString()); + + assertEquals("[[@5,0:0=' ',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToLeft(6).toString()); + assertEquals(null, tokens.getHiddenTokensToRight(6)); + + assertEquals("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToLeft(7).toString()); + assertEquals("[[@8,0:0=' ',<1>,channel=1,0:-1], [@9,0:0='\\n',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToRight(7).toString()); + + assertEquals(null, tokens.getHiddenTokensToLeft(8)); + assertEquals("[[@9,0:0='\\n',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToRight(8).toString()); + + assertEquals("[[@8,0:0=' ',<1>,channel=1,0:-1]]", + tokens.getHiddenTokensToLeft(9).toString()); + assertEquals(null, tokens.getHiddenTokensToRight(9)); + } + + @Test + public void testSingleEOF() throws Exception { + TokenSource lexer = new TokenSource() { + + @Override + public Token nextToken() { + return new CommonToken(Token.EOF); + } + + @Override + public int getLine() { + return 0; + } + + @Override + public int getCharPositionInLine() { + return 0; + } + + @Override + public CharStream getInputStream() { + return null; + } + + @Override + public String getSourceName() { + return IntStream.UNKNOWN_SOURCE_NAME; + } + + @Override + public TokenFactory getTokenFactory() { + throw new UnsupportedOperationException("Not supported yet."); + } + + @Override + public void setTokenFactory(TokenFactory factory) { + throw new UnsupportedOperationException("Not supported yet."); + } + }; + + CommonTokenStream tokens = new CommonTokenStream(lexer); + tokens.fill(); + + assertEquals(Token.EOF, tokens.LA(1)); + assertEquals(0, tokens.index()); + assertEquals(1, tokens.size()); + } + + @Test(expected = IllegalStateException.class) + public void testCannotConsumeEOF() throws Exception { + TokenSource lexer = new TokenSource() { + + @Override + public Token nextToken() { + return new CommonToken(Token.EOF); + } + + @Override + public int getLine() { + return 0; + } + + @Override + public int getCharPositionInLine() { + return 0; + } + + @Override + public CharStream getInputStream() { + return null; + } + + @Override + public String getSourceName() { + return IntStream.UNKNOWN_SOURCE_NAME; + } + + @Override + public TokenFactory getTokenFactory() { + throw new UnsupportedOperationException("Not supported yet."); + } + + @Override + public void setTokenFactory(TokenFactory factory) { + throw new UnsupportedOperationException("Not supported yet."); + } + }; + + CommonTokenStream tokens = new CommonTokenStream(lexer); + tokens.fill(); + + assertEquals(Token.EOF, tokens.LA(1)); + assertEquals(0, tokens.index()); + assertEquals(1, tokens.size()); + tokens.consume(); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestCompositeGrammars.java b/tool/test/org/antlr/v4/test/tool/TestCompositeGrammars.java new file mode 100644 index 000000000..f703336f3 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestCompositeGrammars.java @@ -0,0 +1,820 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.tool.ErrorType; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.GrammarSemanticsMessage; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class TestCompositeGrammars extends BaseTest { + protected boolean debug = false; + + @Test public void testImportFileLocationInSubdir() throws Exception { + String slave = + "parser grammar S;\n" + + "a : B {System.out.println(\"S.a\");} ;\n"; + mkdir(tmpdir); + String subdir = tmpdir + "/sub"; + mkdir(subdir); + writeFile(subdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "B : 'b' ;" + // defines B from inherited token space + "WS : (' '|'\\n') -> skip ;\n" ; + writeFile(tmpdir, "M.g4", master); + ErrorQueue equeue = antlr("M.g4", false, "-lib", subdir); + assertEquals(equeue.size(), 0); + } + + @Test public void testImportFileNotSearchedForInOutputDir() throws Exception { + String slave = + "parser grammar S;\n" + + "a : B {System.out.println(\"S.a\");} ;\n"; + mkdir(tmpdir); + String outdir = tmpdir + "/out"; + mkdir(outdir); + writeFile(outdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "B : 'b' ;" + // defines B from inherited token space + "WS : (' '|'\\n') -> skip ;\n" ; + writeFile(tmpdir, "M.g4", master); + ErrorQueue equeue = antlr("M.g4", false, "-o", outdir); + assertEquals(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, equeue.errors.get(0).getErrorType()); + } + + @Test public void testOutputDirShouldNotEffectImports() throws Exception { + String slave = + "parser grammar S;\n" + + "a : B {System.out.println(\"S.a\");} ;\n"; + mkdir(tmpdir); + String subdir = tmpdir + "/sub"; + mkdir(subdir); + writeFile(subdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "B : 'b' ;" + // defines B from inherited token space + "WS : (' '|'\\n') -> skip ;\n" ; + writeFile(tmpdir, "M.g4", master); + String outdir = tmpdir + "/out"; + mkdir(outdir); + ErrorQueue equeue = antlr("M.g4", false, "-o", outdir, "-lib", subdir); + assertEquals(0, equeue.size()); + } + + @Test public void testTokensFileInOutputDirAndImportFileInSubdir() throws Exception { + String slave = + "parser grammar S;\n" + + "a : B {System.out.println(\"S.a\");} ;\n"; + mkdir(tmpdir); + String subdir = tmpdir + "/sub"; + mkdir(subdir); + writeFile(subdir, "S.g4", slave); + String parser = + "parser grammar MParser;\n" + + "import S;\n" + + "options {tokenVocab=MLexer;}\n" + + "s : a ;\n"; + writeFile(tmpdir, "MParser.g4", parser); + String lexer = + "lexer grammar MLexer;\n" + + "B : 'b' ;" + // defines B from inherited token space + "WS : (' '|'\\n') -> skip ;\n" ; + writeFile(tmpdir, "MLexer.g4", lexer); + String outdir = tmpdir + "/out"; + mkdir(outdir); + ErrorQueue equeue = antlr("MLexer.g4", false, "-o", outdir); + assertEquals(0, equeue.size()); + equeue = antlr("MParser.g4", false, "-o", outdir, "-lib", subdir); + assertEquals(0, equeue.size()); + } + + @Test public void testDelegatorInvokesDelegateRule() throws Exception { + String slave = + "parser grammar S;\n" + + "a : B {System.out.println(\"S.a\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "B : 'b' ;" + // defines B from inherited token space + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "b", debug); + assertEquals("S.a\n", found); + } + + @Test public void testBringInLiteralsFromDelegate() throws Exception { + String slave = + "parser grammar S;\n" + + "a : '=' 'a' {System.out.println(\"S.a\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "=a", debug); + assertEquals("S.a\n", found); + } + + @Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception { + // must generate something like: + // public int a(int x) throws RecognitionException { return gS.a(x); } + // in M. + String slave = + "parser grammar S;\n" + + "a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : label=a[3] {System.out.println($label.y);} ;\n" + + "B : 'b' ;" + // defines B from inherited token space + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "b", debug); + assertEquals("S.a1000\n", found); + } + + @Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception { + // must generate something like: + // public int a(int x) throws RecognitionException { return gS.a(x); } + // in M. + String slave = + "parser grammar S;\n" + + "a : B {System.out.print(\"S.a\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : a {System.out.println($a.text);} ;\n" + + "B : 'b' ;" + // defines B from inherited token space + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "b", debug); + assertEquals("S.ab\n", found); + } + + @Test public void testDelegatorAccessesDelegateMembers() throws Exception { + String slave = + "parser grammar S;\n" + + "@parser::members {\n" + + " public void foo() {System.out.println(\"foo\");}\n" + + "}\n" + + "a : B ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + // uses no rules from the import + "import S;\n" + + "s : 'b' {foo();} ;\n" + // gS is import pointer + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "b", debug); + assertEquals("foo\n", found); + } + + @Test public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception { + String slave = + "parser grammar S;\n" + + "a : b {System.out.println(\"S.a\");} ;\n" + + "b : B ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String slave2 = + "parser grammar T;\n" + + "a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a + writeFile(tmpdir, "T.g4", slave2); + String master = + "grammar M;\n" + + "import S,T;\n" + + "s : a ;\n" + + "B : 'b' ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "b", debug); + assertEquals("S.a\n", found); + } + + @Test public void testDelegatesSeeSameTokenType() throws Exception { + String slave = + "parser grammar S;\n" + // A, B, C token type order + "tokens { A, B, C }\n" + + "x : A {System.out.println(\"S.x\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String slave2 = + "parser grammar T;\n" + + "tokens { C, B, A }\n" + // reverse order + "y : A {System.out.println(\"T.y\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "T.g4", slave2); + // The lexer will create rules to match letters a, b, c. + // The associated token types A, B, C must have the same value + // and all import'd parsers. Since ANTLR regenerates all imports + // for use with the delegator M, it can generate the same token type + // mapping in each parser: + // public static final int C=6; + // public static final int EOF=-1; + // public static final int B=5; + // public static final int WS=7; + // public static final int A=4; + + String master = + "grammar M;\n" + + "import S,T;\n" + + "s : x y ;\n" + // matches AA, which should be "aa" + "B : 'b' ;\n" + // another order: B, A, C + "A : 'a' ;\n" + + "C : 'c' ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "aa", debug); + assertEquals("S.x\n" + + "T.y\n", found); + } + + @Test public void testDelegatesSeeSameTokenType2() throws Exception { + ErrorQueue equeue = new ErrorQueue(); + String slave = + "parser grammar S;\n" + // A, B, C token type order + "tokens { A, B, C }\n" + + "x : A {System.out.println(\"S.x\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String slave2 = + "parser grammar T;\n" + + "tokens { C, B, A }\n" + // reverse order + "y : A {System.out.println(\"T.y\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "T.g4", slave2); + + String master = + "grammar M;\n" + + "import S,T;\n" + + "s : x y ;\n" + // matches AA, which should be "aa" + "B : 'b' ;\n" + // another order: B, A, C + "A : 'a' ;\n" + + "C : 'c' ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + writeFile(tmpdir, "M.g4", master); + Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); + + String expectedTokenIDToTypeMap = "{EOF=-1, B=1, A=2, C=3, WS=4}"; + String expectedStringLiteralToTypeMap = "{'a'=2, 'b'=1, 'c'=3}"; + String expectedTypeToTokenList = "[B, A, C, WS]"; + + assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString()); + assertEquals(expectedStringLiteralToTypeMap, sort(g.stringLiteralToTypeMap).toString()); + assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); + + assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "aa", debug); + assertEquals("S.x\n" + + "T.y\n", found); + } + + @Test public void testCombinedImportsCombined() throws Exception { + ErrorQueue equeue = new ErrorQueue(); + String slave = + "grammar S;\n" + // A, B, C token type order + "tokens { A, B, C }\n" + + "x : 'x' INT {System.out.println(\"S.x\");} ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + + String master = + "grammar M;\n" + + "import S;\n" + + "s : x INT ;\n"; + writeFile(tmpdir, "M.g4", master); + Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); + + assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "x 34 9", debug); + assertEquals("S.x\n", found); + } + + @Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception { + ErrorQueue equeue = new ErrorQueue(); + String slave = + "parser grammar S;\n" + + "options {tokenVocab=whatever;}\n" + + "tokens { A }\n" + + "x : A {System.out.println(\"S.x\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + + String master = + "grammar M;\n" + + "import S;\n" + + "s : x ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + writeFile(tmpdir, "M.g4", master); + Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); + + Object expectedArg = "S"; + ErrorType expectedMsgID = ErrorType.OPTIONS_IN_DELEGATE; + GrammarSemanticsMessage expectedMessage = + new GrammarSemanticsMessage(expectedMsgID, g.fileName, null, expectedArg); + checkGrammarSemanticsWarning(equeue, expectedMessage); + + assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size()); + } + + @Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception { + ErrorQueue equeue = new ErrorQueue(); + String slave = + "parser grammar S;\n" + + "options {toke\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + + String master = + "grammar M;\n" + + "import S;\n" + + "s : x ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + writeFile(tmpdir, "M.g4", master); + Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); + + assertEquals(ErrorType.SYNTAX_ERROR, equeue.errors.get(0).getErrorType()); + } + + @Test public void testDelegatorRuleOverridesDelegate() throws Exception { + String slave = + "parser grammar S;\n" + + "a : b {System.out.println(\"S.a\");} ;\n" + + "b : B ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "b : 'b'|'c' ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "a", "c", debug); + assertEquals("S.a\n", found); + } + + @Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception { + String slave = + "parser grammar JavaDecl;\n" + + "type : 'int' ;\n" + + "decl : type ID ';'\n" + + " | type ID init ';' {System.out.println(\"JavaDecl: \"+$text);}\n" + + " ;\n" + + "init : '=' INT ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "JavaDecl.g4", slave); + String master = + "grammar Java;\n" + + "import JavaDecl;\n" + + "prog : decl ;\n" + + "type : 'int' | 'float' ;\n" + + "\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + // for float to work in decl, type must be overridden + String found = execParser("Java.g4", master, "JavaParser", "JavaLexer", + "prog", "float x = 3;", debug); + assertEquals("JavaDecl: floatx=3;\n", found); + } + + @Test public void testDelegatorRuleOverridesDelegates() throws Exception { + String slave = + "parser grammar S;\n" + + "a : b {System.out.println(\"S.a\");} ;\n" + + "b : 'b' ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + + String slave2 = + "parser grammar T;\n" + + "tokens { A }\n" + + "b : 'b' {System.out.println(\"T.b\");} ;\n"; + writeFile(tmpdir, "T.g4", slave2); + + String master = + "grammar M;\n" + + "import S, T;\n" + + "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "a", "c", debug); + assertEquals("M.b\n" + + "S.a\n", found); + } + // LEXER INHERITANCE + + @Test public void testLexerDelegatorInvokesDelegateRule() throws Exception { + String slave = + "lexer grammar S;\n" + + "A : 'a' {System.out.println(\"S.A\");} ;\n" + + "C : 'c' ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "lexer grammar M;\n" + + "import S;\n" + + "B : 'b' ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + String expecting = + "S.A\n" + + "[@0,0:0='a',<3>,1:0]\n" + + "[@1,1:1='b',<1>,1:1]\n" + + "[@2,2:2='c',<4>,1:2]\n" + + "[@3,3:2='',<-1>,1:3]\n"; + String found = execLexer("M.g4", master, "M", "abc", debug); + assertEquals(expecting, found); + } + + @Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception { + String slave = + "lexer grammar S;\n" + + "A : 'a' {System.out.println(\"S.A\");} ;\n" + + "B : 'b' {System.out.println(\"S.B\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "lexer grammar M;\n" + + "import S;\n" + + "A : 'a' B {System.out.println(\"M.A\");} ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execLexer("M.g4", master, "M", "ab", debug); + assertEquals("M.A\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n", found); + } + + @Test public void testKeywordVSIDOrder() throws Exception { + // rules in lexer are imported at END so rules in master override + // *and* get priority over imported rules. So importing ID doesn't + // mess up keywords in master grammar + ErrorQueue equeue = new ErrorQueue(); + String slave = + "lexer grammar S;\n" + + "ID : 'a'..'z'+ ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "a : A {System.out.println(\"M.a: \"+$A);} ;\n" + + "A : 'abc' {System.out.println(\"M.A\");} ;\n" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "a", "abc", debug); + + assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size()); + + assertEquals("M.A\n" + + "M.a: [@0,0:2='abc',<1>,1:0]\n", found); + } + + // Make sure that M can import S that imports T. + @Test public void test3LevelImport() throws Exception { + ErrorQueue equeue = new ErrorQueue(); + String slave = + "parser grammar T;\n" + + "a : T ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "T.g4", slave); + String slave2 = + "parser grammar S;\n" + + "import T;\n" + + "a : S ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave2); + + String master = + "grammar M;\n" + + "import S;\n" + + "a : M ;\n" ; + writeFile(tmpdir, "M.g4", master); + Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); + + String expectedTokenIDToTypeMap = "{EOF=-1, M=1}"; // S and T aren't imported; overridden + String expectedStringLiteralToTypeMap = "{}"; + String expectedTypeToTokenList = "[M]"; + + assertEquals(expectedTokenIDToTypeMap, + g.tokenNameToTypeMap.toString()); + assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); + assertEquals(expectedTypeToTokenList, + realElements(g.typeToTokenList).toString()); + + assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + + boolean ok = + rawGenerateAndBuildRecognizer("M.g4", master, "MParser", null); + boolean expecting = true; // should be ok + assertEquals(expecting, ok); + } + + @Test public void testBigTreeOfImports() throws Exception { + ErrorQueue equeue = new ErrorQueue(); + String slave = + "parser grammar T;\n" + + "tokens{T}\n" + + "x : T ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "T.g4", slave); + slave = + "parser grammar S;\n" + + "import T;\n" + + "tokens{S}\n" + + "y : S ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + + slave = + "parser grammar C;\n" + + "tokens{C}\n" + + "i : C ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "C.g4", slave); + slave = + "parser grammar B;\n" + + "tokens{B}\n" + + "j : B ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "B.g4", slave); + slave = + "parser grammar A;\n" + + "import B,C;\n" + + "tokens{A}\n" + + "k : A ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "A.g4", slave); + + String master = + "grammar M;\n" + + "import S,A;\n" + + "tokens{M}\n" + + "a : M ;\n" ; + writeFile(tmpdir, "M.g4", master); + Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); + + assertEquals("[]", equeue.errors.toString()); + assertEquals("[]", equeue.warnings.toString()); + String expectedTokenIDToTypeMap = "{EOF=-1, M=1, S=2, T=3, A=4, B=5, C=6}"; + String expectedStringLiteralToTypeMap = "{}"; + String expectedTypeToTokenList = "[M, S, T, A, B, C]"; + + assertEquals(expectedTokenIDToTypeMap, + g.tokenNameToTypeMap.toString()); + assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); + assertEquals(expectedTypeToTokenList, + realElements(g.typeToTokenList).toString()); + + boolean ok = + rawGenerateAndBuildRecognizer("M.g4", master, "MParser", null); + boolean expecting = true; // should be ok + assertEquals(expecting, ok); + } + + @Test public void testRulesVisibleThroughMultilevelImport() throws Exception { + ErrorQueue equeue = new ErrorQueue(); + String slave = + "parser grammar T;\n" + + "x : T ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "T.g4", slave); + String slave2 = + "parser grammar S;\n" + // A, B, C token type order + "import T;\n" + + "a : S ;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave2); + + String master = + "grammar M;\n" + + "import S;\n" + + "a : M x ;\n" ; // x MUST BE VISIBLE TO M + writeFile(tmpdir, "M.g4", master); + Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); + + String expectedTokenIDToTypeMap = "{EOF=-1, M=1, T=2}"; + String expectedStringLiteralToTypeMap = "{}"; + String expectedTypeToTokenList = "[M, T]"; + + assertEquals(expectedTokenIDToTypeMap, + g.tokenNameToTypeMap.toString()); + assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); + assertEquals(expectedTypeToTokenList, + realElements(g.typeToTokenList).toString()); + + assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + } + + @Test public void testNestedComposite() throws Exception { + // Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438 + ErrorQueue equeue = new ErrorQueue(); + String gstr = + "lexer grammar L;\n" + + "T1: '1';\n" + + "T2: '2';\n" + + "T3: '3';\n" + + "T4: '4';\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "L.g4", gstr); + gstr = + "parser grammar G1;\n" + + "s: a | b;\n" + + "a: T1;\n" + + "b: T2;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "G1.g4", gstr); + + gstr = + "parser grammar G2;\n" + + "import G1;\n" + + "a: T3;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "G2.g4", gstr); + String G3str = + "grammar G3;\n" + + "import G2;\n" + + "b: T4;\n" ; + mkdir(tmpdir); + writeFile(tmpdir, "G3.g4", G3str); + + Grammar g = new Grammar(tmpdir+"/G3.g4", G3str, equeue); + + String expectedTokenIDToTypeMap = "{EOF=-1, T4=1, T3=2}"; + String expectedStringLiteralToTypeMap = "{}"; + String expectedTypeToTokenList = "[T4, T3]"; + + assertEquals(expectedTokenIDToTypeMap, + g.tokenNameToTypeMap.toString()); + assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); + assertEquals(expectedTypeToTokenList, + realElements(g.typeToTokenList).toString()); + + assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + + boolean ok = + rawGenerateAndBuildRecognizer("G3.g4", G3str, "G3Parser", null); + boolean expecting = true; // should be ok + assertEquals(expecting, ok); + } + + @Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception { + String slave = + "parser grammar S;\n" + + "a : B {System.out.print(\"S.a\");} ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "@header{package mypackage;}\n" + + "s : a ;\n" + + "B : 'b' ;" + // defines B from inherited token space + "WS : (' '|'\\n') -> skip ;\n" ; + ErrorQueue equeue = antlr("M.g4", master, false); + int expecting = 0; // should be ok + assertEquals(expecting, equeue.errors.size()); + } + + @Test public void testImportedRuleWithAction() throws Exception { + // wasn't terminating. @after was injected into M as if it were @members + String slave = + "parser grammar S;\n" + + "a @after {int x;} : B ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "B : 'b' ;" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "b", debug); + assertEquals("", found); + } + + @Test public void testImportedGrammarWithEmptyOptions() throws Exception { + String slave = + "parser grammar S;\n" + + "options {}\n" + + "a : B ;\n"; + mkdir(tmpdir); + writeFile(tmpdir, "S.g4", slave); + String master = + "grammar M;\n" + + "import S;\n" + + "s : a ;\n" + + "B : 'b' ;" + + "WS : (' '|'\\n') -> skip ;\n" ; + String found = execParser("M.g4", master, "MParser", "MLexer", + "s", "b", debug); + assertEquals("", found); + } + + /** + * This is a regression test for antlr/antlr4#248 "Including grammar with + * only fragments breaks generated lexer". + * https://github.com/antlr/antlr4/issues/248 + */ + @Test public void testImportLexerWithOnlyFragmentRules() { + String slave = + "lexer grammar Unicode;\n" + + "\n" + + "fragment\n" + + "UNICODE_CLASS_Zs : '\\u0020' | '\\u00A0' | '\\u1680' | '\\u180E'\n" + + " | '\\u2000'..'\\u200A'\n" + + " | '\\u202F' | '\\u205F' | '\\u3000'\n" + + " ;\n"; + String master = + "grammar Test;\n" + + "import Unicode;\n" + + "\n" + + "program : 'test' 'test' ;\n" + + "\n" + + "WS : (UNICODE_CLASS_Zs)+ -> skip;\n"; + + mkdir(tmpdir); + writeFile(tmpdir, "Unicode.g4", slave); + String found = execParser("Test.g4", master, "TestParser", "TestLexer", "program", "test test", debug); + assertEquals("", found); + assertNull(stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#670 "exception when importing + * grammar". + * https://github.com/antlr/antlr4/issues/670 + */ + @Test + public void testImportLargeGrammar() throws Exception { + String slave = load("Java.g4", "UTF-8"); + String master = + "grammar NewJava;\n" + + "import Java;\n"; + + System.out.println("dir "+tmpdir); + mkdir(tmpdir); + writeFile(tmpdir, "Java.g4", slave); + String found = execParser("NewJava.g4", master, "NewJavaParser", "NewJavaLexer", "compilationUnit", "package Foo;", debug); + assertEquals("", found); + assertNull(stderrDuringParse); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestFastQueue.java b/tool/test/org/antlr/v4/test/tool/TestFastQueue.java new file mode 100644 index 000000000..c28c8c7d0 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestFastQueue.java @@ -0,0 +1,134 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.runtime.misc.FastQueue; +import org.junit.Test; + +import java.util.NoSuchElementException; + +import static org.junit.Assert.assertEquals; + +public class TestFastQueue { + @Test public void testQueueNoRemove() throws Exception { + FastQueue q = new FastQueue(); + q.add("a"); + q.add("b"); + q.add("c"); + q.add("d"); + q.add("e"); + String expecting = "a b c d e"; + String found = q.toString(); + assertEquals(expecting, found); + } + + @Test public void testQueueThenRemoveAll() throws Exception { + FastQueue q = new FastQueue(); + q.add("a"); + q.add("b"); + q.add("c"); + q.add("d"); + q.add("e"); + StringBuilder buf = new StringBuilder(); + while ( q.size()>0 ) { + String o = q.remove(); + buf.append(o); + if ( q.size()>0 ) buf.append(" "); + } + assertEquals("queue should be empty", 0, q.size()); + String expecting = "a b c d e"; + String found = buf.toString(); + assertEquals(expecting, found); + } + + @Test public void testQueueThenRemoveOneByOne() throws Exception { + StringBuilder buf = new StringBuilder(); + FastQueue q = new FastQueue(); + q.add("a"); + buf.append(q.remove()); + q.add("b"); + buf.append(q.remove()); + q.add("c"); + buf.append(q.remove()); + q.add("d"); + buf.append(q.remove()); + q.add("e"); + buf.append(q.remove()); + assertEquals("queue should be empty", 0, q.size()); + String expecting = "abcde"; + String found = buf.toString(); + assertEquals(expecting, found); + } + + // E r r o r s + + @Test public void testGetFromEmptyQueue() throws Exception { + FastQueue q = new FastQueue(); + String msg = null; + try { q.remove(); } + catch (NoSuchElementException nsee) { + msg = nsee.getMessage(); + } + String expecting = "queue index 0 > last index -1"; + String found = msg; + assertEquals(expecting, found); + } + + @Test public void testGetFromEmptyQueueAfterSomeAdds() throws Exception { + FastQueue q = new FastQueue(); + q.add("a"); + q.add("b"); + q.remove(); + q.remove(); + String msg = null; + try { q.remove(); } + catch (NoSuchElementException nsee) { + msg = nsee.getMessage(); + } + String expecting = "queue index 0 > last index -1"; + String found = msg; + assertEquals(expecting, found); + } + + @Test public void testGetFromEmptyQueueAfterClear() throws Exception { + FastQueue q = new FastQueue(); + q.add("a"); + q.add("b"); + q.clear(); + String msg = null; + try { q.remove(); } + catch (NoSuchElementException nsee) { + msg = nsee.getMessage(); + } + String expecting = "queue index 0 > last index -1"; + String found = msg; + assertEquals(expecting, found); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestFullContextParsing.java b/tool/test/org/antlr/v4/test/tool/TestFullContextParsing.java new file mode 100644 index 000000000..817032b21 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestFullContextParsing.java @@ -0,0 +1,356 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +/* + cover these cases: + dead end + single alt + single alt + preds + conflict + conflict + preds + + */ +public class TestFullContextParsing extends BaseTest { + @Test public void testAmbigYieldsCtxSensitiveDFA() { + String grammar = + "grammar T;\n"+ + "s" + + "@after {dumpDFA();}\n" + + " : ID | ID {;} ;\n" + + "ID : 'a'..'z'+ ;\n"+ + "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "abc", true); + String expecting = + "Decision 0:\n" + + "s0-ID->:s1^=>1\n"; // ctx sensitive + assertEquals(expecting, result); + assertEquals("line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", + this.stderrDuringParse); + } + + public String testCtxSensitiveDFA(String input) { + String grammar = + "grammar T;\n"+ + "s @after {dumpDFA();}\n" + + " : '$' a | '@' b ;\n" + + "a : e ID ;\n" + + "b : e INT ID ;\n" + + "e : INT | ;\n" + + "ID : 'a'..'z'+ ;\n"+ + "INT : '0'..'9'+ ;\n"+ + "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, true); + } + + @Test + public void testCtxSensitiveDFA1() { + String result = testCtxSensitiveDFA("$ 34 abc"); + String expecting = + "Decision 1:\n" + + "s0-INT->s1\n" + + "s1-ID->:s2^=>1\n"; + assertEquals(expecting, result); + assertEquals("line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + + "line 1:2 reportContextSensitivity d=1 (e), input='34'\n", + this.stderrDuringParse); + } + + @Test + public void testCtxSensitiveDFA2() { + String result = testCtxSensitiveDFA("@ 34 abc"); + String expecting = + "Decision 1:\n" + + "s0-INT->s1\n" + + "s1-ID->:s2^=>1\n"; + assertEquals(expecting, result); + assertEquals("line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + + "line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n", + this.stderrDuringParse); + } + + @Test public void testCtxSensitiveDFATwoDiffInput() { + String grammar = + "grammar T;\n"+ + "s @after {dumpDFA();}\n" + + " : ('$' a | '@' b)+ ;\n" + + "a : e ID ;\n" + + "b : e INT ID ;\n" + + "e : INT | ;\n" + + "ID : 'a'..'z'+ ;\n"+ + "INT : '0'..'9'+ ;\n"+ + "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "$ 34 abc @ 34 abc", true); + String expecting = + "Decision 2:\n" + + "s0-INT->s1\n" + + "s1-ID->:s2^=>1\n"; + assertEquals(expecting, result); + assertEquals("line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\n" + + "line 1:2 reportContextSensitivity d=2 (e), input='34'\n" + + "line 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\n" + + "line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", + this.stderrDuringParse); + } + + @Test + public void testSLLSeesEOFInLLGrammar() { + String grammar = + "grammar T;\n"+ + "s @after {dumpDFA();}\n" + + " : a ;\n" + + "a : e ID ;\n" + + "b : e INT ID ;\n" + + "e : INT | ;\n" + + "ID : 'a'..'z'+ ;\n"+ + "INT : '0'..'9'+ ;\n"+ + "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "34 abc", true); + String expecting = + "Decision 0:\n" + + "s0-INT->s1\n" + + "s1-ID->:s2^=>1\n"; // Must point at accept state + assertEquals(expecting, result); + assertEquals("line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\n" + + "line 1:0 reportContextSensitivity d=0 (e), input='34'\n", + this.stderrDuringParse); + } + + @Test public void testFullContextIF_THEN_ELSEParse() { + String grammar = + "grammar T;\n"+ + "s" + + "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + + "@after {dumpDFA();}\n" + + " : '{' stat* '}'" + + " ;\n" + + "stat: 'if' ID 'then' stat ('else' ID)?\n" + + " | 'return'\n" + + " ;" + + "ID : 'a'..'z'+ ;\n"+ + "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; + String input = "{ if x then return }"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + input, true); + String expecting = + "Decision 1:\n" + + "s0-'}'->:s1=>2\n"; + assertEquals(expecting, result); + assertEquals(null, this.stderrDuringParse); + + input = "{ if x then return else foo }"; + result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + input, true); + expecting = + "Decision 1:\n" + + "s0-'else'->:s1^=>1\n"; + assertEquals(expecting, result); + // Technically, this input sequence is not ambiguous because else + // uniquely predicts going into the optional subrule. else cannot + // be matched by exiting stat since that would only match '}' or + // the start of a stat. But, we are using the theory that + // SLL(1)=LL(1) and so we are avoiding full context parsing + // by declaring all else clause parsing to be ambiguous. + assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", + this.stderrDuringParse); + + input = + "{ if x then if y then return else foo }"; + result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + input, true); + expecting = + "Decision 1:\n" + + "s0-'}'->:s2=>2\n" + + "s0-'else'->:s1^=>1\n"; + assertEquals(expecting, result); + assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", + this.stderrDuringParse); + + // should not be ambiguous because the second 'else bar' clearly + // indicates that the first else should match to the innermost if. + // LL_EXACT_AMBIG_DETECTION makes us keep going to resolve + + input = + "{ if x then if y then return else foo else bar }"; + result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + input, true); + expecting = + "Decision 1:\n" + + "s0-'else'->:s1^=>1\n"; + assertEquals(expecting, result); + assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\n" + + "line 1:38 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:38 reportContextSensitivity d=1 (stat), input='else'\n", + this.stderrDuringParse); + + input = + "{ if x then return else foo\n" + + "if x then if y then return else foo }"; + result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + input, true); + expecting = + "Decision 1:\n" + + "s0-'}'->:s2=>2\n" + + "s0-'else'->:s1^=>1\n"; + assertEquals(expecting, result); + assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", + this.stderrDuringParse); + + input = + "{ if x then return else foo\n" + + "if x then if y then return else foo }"; + result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + input, true); + expecting = + "Decision 1:\n" + + "s0-'}'->:s2=>2\n" + + "s0-'else'->:s1^=>1\n"; + assertEquals(expecting, result); + assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + + "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", + this.stderrDuringParse); + } + + /** + * Tests predictions for the following case involving closures. + * http://www.antlr.org/wiki/display/~admin/2011/12/29/Flaw+in+ANTLR+v3+LL(*)+analysis+algorithm + */ + @Test + public void testLoopsSimulateTailRecursion() throws Exception { + String grammar = + "grammar T;\n" + + "prog\n" + + "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + + " : expr_or_assign*;\n" + + "expr_or_assign\n" + + " : expr '++' {System.out.println(\"fail.\");}\n" + + " | expr {System.out.println(\"pass: \"+$expr.text);}\n" + + " ;\n" + + "expr: expr_primary ('<-' ID)? ;\n" + + "expr_primary\n" + + " : '(' ID ')'\n" + + " | ID '(' ID ')'\n" + + " | ID\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + + ""; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a(i)<-x", true); + assertEquals("pass: a(i)<-x\n", found); + + String expecting = + "line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\n" + + "line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n"; + assertEquals(expecting, this.stderrDuringParse); + } + + @Test + public void testAmbiguityNoLoop() throws Exception { + // simpler version of testLoopsSimulateTailRecursion, no loops + String grammar = + "grammar T;\n" + + "prog\n" + + "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + + " : expr expr {System.out.println(\"alt 1\");}\n" + + " | expr\n" + + " ;\n" + + "expr: '@'\n" + + " | ID '@'\n" + + " | ID\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a@", true); + assertEquals("alt 1\n", found); + + String expecting = + "line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\n" + + "line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\n" + + "line 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\n" + + "line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n"; + assertEquals(expecting, this.stderrDuringParse); + } + + @Test + public void testExprAmbiguity() throws Exception { + // translated left-recursive expr rule to test ambig detection + String grammar = + "grammar T;\n" + + "s\n" + + "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + + " : expr[0] {System.out.println($expr.ctx.toStringTree(this));} ;\n" + + "\n" + + "expr[int _p]\n" + + " : ID\n" + + " ( {5 >= $_p}? '*' expr[6]\n" + + " | {4 >= $_p}? '+' expr[5]\n" + + " )*\n" + + " ;\n" + + "\n" + + "ID : [a-zA-Z]+ ; // match identifiers\n" + + "WS : [ \\t\\r\\n]+ -> skip ; // toss out whitespace\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a+b", true); + assertEquals("(expr a + (expr b))\n", found); + + String expecting = + "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + + "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n"; + assertEquals(expecting, this.stderrDuringParse); + + found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a+b*c", true); + assertEquals("(expr a + (expr b * (expr c)))\n", found); + + expecting = + "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + + "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" + + "line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" + + "line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n"; + assertEquals(expecting, this.stderrDuringParse); + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestGraphNodes.java b/tool/test/org/antlr/v4/test/tool/TestGraphNodes.java new file mode 100644 index 000000000..8081afc54 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestGraphNodes.java @@ -0,0 +1,906 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.atn.ArrayPredictionContext; +import org.antlr.v4.runtime.atn.PredictionContext; +import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.atn.SingletonPredictionContext; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.IdentityHashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +public class TestGraphNodes { + PredictionContextCache contextCache; + + @Before + public void setUp() { + PredictionContext.globalNodeCount = 1; + contextCache = new PredictionContextCache(); + } + + public boolean rootIsWildcard() { return true; } + public boolean fullCtx() { return false; } + + @Test public void test_$_$() { + PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, + PredictionContext.EMPTY, + rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"*\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_$_$_fullctx() { + PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, + PredictionContext.EMPTY, + fullCtx(), null); + System.out.println(toDOTString(r, fullCtx())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"$\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, fullCtx())); + } + + @Test public void test_x_$() { + PredictionContext r = PredictionContext.merge(x(), PredictionContext.EMPTY, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"*\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_x_$_fullctx() { + PredictionContext r = PredictionContext.merge(x(), PredictionContext.EMPTY, fullCtx(), null); + System.out.println(toDOTString(r, fullCtx())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|$\"];\n" + + " s1[label=\"$\"];\n" + + " s0:p0->s1[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, fullCtx())); + } + + @Test public void test_$_x() { + PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, x(), rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"*\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_$_x_fullctx() { + PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, x(), fullCtx(), null); + System.out.println(toDOTString(r, fullCtx())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|$\"];\n" + + " s1[label=\"$\"];\n" + + " s0:p0->s1[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, fullCtx())); + } + + @Test public void test_a_a() { + PredictionContext r = PredictionContext.merge(a(), a(), rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_a$_ax() { + PredictionContext a1 = a(); + PredictionContext x = x(); + PredictionContext a2 = createSingleton(x, 1); + PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_a$_ax_fullctx() { + PredictionContext a1 = a(); + PredictionContext x = x(); + PredictionContext a2 = createSingleton(x, 1); + PredictionContext r = PredictionContext.merge(a1, a2, fullCtx(), null); + System.out.println(toDOTString(r, fullCtx())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[shape=record, label=\"|$\"];\n" + + " s2[label=\"$\"];\n" + + " s0->s1[label=\"1\"];\n" + + " s1:p0->s2[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, fullCtx())); + } + + @Test public void test_ax$_a$() { + PredictionContext x = x(); + PredictionContext a1 = createSingleton(x, 1); + PredictionContext a2 = a(); + PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_aa$_a$_$_fullCtx() { + PredictionContext empty = PredictionContext.EMPTY; + PredictionContext child1 = createSingleton(empty, 8); + PredictionContext right = PredictionContext.merge(empty, child1, false, null); + PredictionContext left = createSingleton(right, 8); + PredictionContext merged = PredictionContext.merge(left, right, false, null); + String actual = toDOTString(merged, false); + System.out.println(actual); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|$\"];\n" + + " s1[shape=record, label=\"|$\"];\n" + + " s2[label=\"$\"];\n" + + " s0:p0->s1[label=\"8\"];\n" + + " s1:p0->s2[label=\"8\"];\n" + + "}\n"; + assertEquals(expecting, actual); + } + + @Test public void test_ax$_a$_fullctx() { + PredictionContext x = x(); + PredictionContext a1 = createSingleton(x, 1); + PredictionContext a2 = a(); + PredictionContext r = PredictionContext.merge(a1, a2, fullCtx(), null); + System.out.println(toDOTString(r, fullCtx())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[shape=record, label=\"|$\"];\n" + + " s2[label=\"$\"];\n" + + " s0->s1[label=\"1\"];\n" + + " s1:p0->s2[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, fullCtx())); + } + + @Test public void test_a_b() { + PredictionContext r = PredictionContext.merge(a(), b(), rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s1[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_ax_ax_same() { + PredictionContext x = x(); + PredictionContext a1 = createSingleton(x, 1); + PredictionContext a2 = createSingleton(x, 1); + PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[label=\"1\"];\n" + + " s2[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + " s1->s2[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_ax_ax() { + PredictionContext x1 = x(); + PredictionContext x2 = x(); + PredictionContext a1 = createSingleton(x1, 1); + PredictionContext a2 = createSingleton(x2, 1); + PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[label=\"1\"];\n" + + " s2[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + " s1->s2[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_abx_abx() { + PredictionContext x1 = x(); + PredictionContext x2 = x(); + PredictionContext b1 = createSingleton(x1, 2); + PredictionContext b2 = createSingleton(x2, 2); + PredictionContext a1 = createSingleton(b1, 1); + PredictionContext a2 = createSingleton(b2, 1); + PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[label=\"1\"];\n" + + " s2[label=\"2\"];\n" + + " s3[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + " s1->s2[label=\"2\"];\n" + + " s2->s3[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_abx_acx() { + PredictionContext x1 = x(); + PredictionContext x2 = x(); + PredictionContext b = createSingleton(x1, 2); + PredictionContext c = createSingleton(x2, 3); + PredictionContext a1 = createSingleton(b, 1); + PredictionContext a2 = createSingleton(c, 1); + PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[shape=record, label=\"|\"];\n" + + " s2[label=\"2\"];\n" + + " s3[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + " s1:p0->s2[label=\"2\"];\n" + + " s1:p1->s2[label=\"3\"];\n" + + " s2->s3[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_ax_bx_same() { + PredictionContext x = x(); + PredictionContext a = createSingleton(x, 1); + PredictionContext b = createSingleton(x, 2); + PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s1[label=\"1\"];\n" + + " s2[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + " s1->s2[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_ax_bx() { + PredictionContext x1 = x(); + PredictionContext x2 = x(); + PredictionContext a = createSingleton(x1, 1); + PredictionContext b = createSingleton(x2, 2); + PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s1[label=\"1\"];\n" + + " s2[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + " s1->s2[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_ax_by() { + PredictionContext a = createSingleton(x(), 1); + PredictionContext b = createSingleton(y(), 2); + PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s2[label=\"2\"];\n" + + " s3[label=\"*\"];\n" + + " s1[label=\"1\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s2->s3[label=\"10\"];\n" + + " s1->s3[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_a$_bx() { + PredictionContext x2 = x(); + PredictionContext a = a(); + PredictionContext b = createSingleton(x2, 2); + PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s2[label=\"2\"];\n" + + " s1[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s2->s1[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_a$_bx_fullctx() { + PredictionContext x2 = x(); + PredictionContext a = a(); + PredictionContext b = createSingleton(x2, 2); + PredictionContext r = PredictionContext.merge(a, b, fullCtx(), null); + System.out.println(toDOTString(r, fullCtx())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s2[label=\"2\"];\n" + + " s1[label=\"$\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s2->s1[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, fullCtx())); + } + + @Ignore("Known inefficiency but deferring resolving the issue for now") + @Test public void test_aex_bfx() { + // TJP: this is inefficient as it leaves the top x nodes unmerged. + PredictionContext x1 = x(); + PredictionContext x2 = x(); + PredictionContext e = createSingleton(x1, 5); + PredictionContext f = createSingleton(x2, 6); + PredictionContext a = createSingleton(e, 1); + PredictionContext b = createSingleton(f, 2); + PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s2[label=\"2\"];\n" + + " s3[label=\"3\"];\n" + + " s4[label=\"*\"];\n" + + " s1[label=\"1\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s2->s3[label=\"6\"];\n" + + " s3->s4[label=\"9\"];\n" + + " s1->s3[label=\"5\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + // Array merges + + @Test public void test_A$_A$_fullctx() { + ArrayPredictionContext A1 = array(PredictionContext.EMPTY); + ArrayPredictionContext A2 = array(PredictionContext.EMPTY); + PredictionContext r = PredictionContext.merge(A1, A2, fullCtx(), null); + System.out.println(toDOTString(r, fullCtx())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"$\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, fullCtx())); + } + + @Test public void test_Aab_Ac() { // a,b + c + SingletonPredictionContext a = a(); + SingletonPredictionContext b = b(); + SingletonPredictionContext c = c(); + ArrayPredictionContext A1 = array(a, b); + ArrayPredictionContext A2 = array(c); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"||\"];\n" + + " s1[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + " s0:p2->s1[label=\"3\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aa_Aa() { + SingletonPredictionContext a1 = a(); + SingletonPredictionContext a2 = a(); + ArrayPredictionContext A1 = array(a1); + ArrayPredictionContext A2 = array(a2); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aa_Abc() { // a + b,c + SingletonPredictionContext a = a(); + SingletonPredictionContext b = b(); + SingletonPredictionContext c = c(); + ArrayPredictionContext A1 = array(a); + ArrayPredictionContext A2 = array(b, c); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"||\"];\n" + + " s1[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + " s0:p2->s1[label=\"3\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aac_Ab() { // a,c + b + SingletonPredictionContext a = a(); + SingletonPredictionContext b = b(); + SingletonPredictionContext c = c(); + ArrayPredictionContext A1 = array(a, c); + ArrayPredictionContext A2 = array(b); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"||\"];\n" + + " s1[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + " s0:p2->s1[label=\"3\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aab_Aa() { // a,b + a + ArrayPredictionContext A1 = array(a(), b()); + ArrayPredictionContext A2 = array(a()); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s1[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aab_Ab() { // a,b + b + ArrayPredictionContext A1 = array(a(), b()); + ArrayPredictionContext A2 = array(b()); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s1[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aax_Aby() { // ax + by but in arrays + SingletonPredictionContext a = createSingleton(x(), 1); + SingletonPredictionContext b = createSingleton(y(), 2); + ArrayPredictionContext A1 = array(a); + ArrayPredictionContext A2 = array(b); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|\"];\n" + + " s2[label=\"2\"];\n" + + " s3[label=\"*\"];\n" + + " s1[label=\"1\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s2->s3[label=\"10\"];\n" + + " s1->s3[label=\"9\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aax_Aay() { // ax + ay -> merged singleton a, array parent + SingletonPredictionContext a1 = createSingleton(x(), 1); + SingletonPredictionContext a2 = createSingleton(y(), 1); + ArrayPredictionContext A1 = array(a1); + ArrayPredictionContext A2 = array(a2); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[label=\"0\"];\n" + + " s1[shape=record, label=\"|\"];\n" + + " s2[label=\"*\"];\n" + + " s0->s1[label=\"1\"];\n" + + " s1:p0->s2[label=\"9\"];\n" + + " s1:p1->s2[label=\"10\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aaxc_Aayd() { // ax,c + ay,d -> merged a, array parent + SingletonPredictionContext a1 = createSingleton(x(), 1); + SingletonPredictionContext a2 = createSingleton(y(), 1); + ArrayPredictionContext A1 = array(a1, c()); + ArrayPredictionContext A2 = array(a2, d()); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"||\"];\n" + + " s2[label=\"*\"];\n" + + " s1[shape=record, label=\"|\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"3\"];\n" + + " s0:p2->s2[label=\"4\"];\n" + + " s1:p0->s2[label=\"9\"];\n" + + " s1:p1->s2[label=\"10\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aaubv_Acwdx() { // au,bv + cw,dx -> [a,b,c,d]->[u,v,w,x] + SingletonPredictionContext a = createSingleton(u(), 1); + SingletonPredictionContext b = createSingleton(v(), 2); + SingletonPredictionContext c = createSingleton(w(), 3); + SingletonPredictionContext d = createSingleton(x(), 4); + ArrayPredictionContext A1 = array(a, b); + ArrayPredictionContext A2 = array(c, d); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|||\"];\n" + + " s4[label=\"4\"];\n" + + " s5[label=\"*\"];\n" + + " s3[label=\"3\"];\n" + + " s2[label=\"2\"];\n" + + " s1[label=\"1\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s0:p2->s3[label=\"3\"];\n" + + " s0:p3->s4[label=\"4\"];\n" + + " s4->s5[label=\"9\"];\n" + + " s3->s5[label=\"8\"];\n" + + " s2->s5[label=\"7\"];\n" + + " s1->s5[label=\"6\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aaubv_Abvdx() { // au,bv + bv,dx -> [a,b,d]->[u,v,x] + SingletonPredictionContext a = createSingleton(u(), 1); + SingletonPredictionContext b1 = createSingleton(v(), 2); + SingletonPredictionContext b2 = createSingleton(v(), 2); + SingletonPredictionContext d = createSingleton(x(), 4); + ArrayPredictionContext A1 = array(a, b1); + ArrayPredictionContext A2 = array(b2, d); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"||\"];\n" + + " s3[label=\"3\"];\n" + + " s4[label=\"*\"];\n" + + " s2[label=\"2\"];\n" + + " s1[label=\"1\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s0:p2->s3[label=\"4\"];\n" + + " s3->s4[label=\"9\"];\n" + + " s2->s4[label=\"7\"];\n" + + " s1->s4[label=\"6\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aaubv_Abwdx() { // au,bv + bw,dx -> [a,b,d]->[u,[v,w],x] + SingletonPredictionContext a = createSingleton(u(), 1); + SingletonPredictionContext b1 = createSingleton(v(), 2); + SingletonPredictionContext b2 = createSingleton(w(), 2); + SingletonPredictionContext d = createSingleton(x(), 4); + ArrayPredictionContext A1 = array(a, b1); + ArrayPredictionContext A2 = array(b2, d); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"||\"];\n" + + " s3[label=\"3\"];\n" + + " s4[label=\"*\"];\n" + + " s2[shape=record, label=\"|\"];\n" + + " s1[label=\"1\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s0:p2->s3[label=\"4\"];\n" + + " s3->s4[label=\"9\"];\n" + + " s2:p0->s4[label=\"7\"];\n" + + " s2:p1->s4[label=\"8\"];\n" + + " s1->s4[label=\"6\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aaubv_Abvdu() { // au,bv + bv,du -> [a,b,d]->[u,v,u]; u,v shared + SingletonPredictionContext a = createSingleton(u(), 1); + SingletonPredictionContext b1 = createSingleton(v(), 2); + SingletonPredictionContext b2 = createSingleton(v(), 2); + SingletonPredictionContext d = createSingleton(u(), 4); + ArrayPredictionContext A1 = array(a, b1); + ArrayPredictionContext A2 = array(b2, d); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"||\"];\n" + + " s2[label=\"2\"];\n" + + " s3[label=\"*\"];\n" + + " s1[label=\"1\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s2[label=\"2\"];\n" + + " s0:p2->s1[label=\"4\"];\n" + + " s2->s3[label=\"7\"];\n" + + " s1->s3[label=\"6\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + @Test public void test_Aaubu_Acudu() { // au,bu + cu,du -> [a,b,c,d]->[u,u,u,u] + SingletonPredictionContext a = createSingleton(u(), 1); + SingletonPredictionContext b = createSingleton(u(), 2); + SingletonPredictionContext c = createSingleton(u(), 3); + SingletonPredictionContext d = createSingleton(u(), 4); + ArrayPredictionContext A1 = array(a, b); + ArrayPredictionContext A2 = array(c, d); + PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); + System.out.println(toDOTString(r, rootIsWildcard())); + String expecting = + "digraph G {\n" + + "rankdir=LR;\n" + + " s0[shape=record, label=\"|||\"];\n" + + " s1[label=\"1\"];\n" + + " s2[label=\"*\"];\n" + + " s0:p0->s1[label=\"1\"];\n" + + " s0:p1->s1[label=\"2\"];\n" + + " s0:p2->s1[label=\"3\"];\n" + + " s0:p3->s1[label=\"4\"];\n" + + " s1->s2[label=\"6\"];\n" + + "}\n"; + assertEquals(expecting, toDOTString(r, rootIsWildcard())); + } + + + // ------------ SUPPORT ------------------------- + + protected SingletonPredictionContext a() { + return createSingleton(PredictionContext.EMPTY, 1); + } + + private SingletonPredictionContext b() { + return createSingleton(PredictionContext.EMPTY, 2); + } + + private SingletonPredictionContext c() { + return createSingleton(PredictionContext.EMPTY, 3); + } + + private SingletonPredictionContext d() { + return createSingleton(PredictionContext.EMPTY, 4); + } + + private SingletonPredictionContext u() { + return createSingleton(PredictionContext.EMPTY, 6); + } + + private SingletonPredictionContext v() { + return createSingleton(PredictionContext.EMPTY, 7); + } + + private SingletonPredictionContext w() { + return createSingleton(PredictionContext.EMPTY, 8); + } + + private SingletonPredictionContext x() { + return createSingleton(PredictionContext.EMPTY, 9); + } + + private SingletonPredictionContext y() { + return createSingleton(PredictionContext.EMPTY, 10); + } + + public SingletonPredictionContext createSingleton(PredictionContext parent, int payload) { + SingletonPredictionContext a = SingletonPredictionContext.create(parent, payload); + return a; + } + + public ArrayPredictionContext array(SingletonPredictionContext... nodes) { + PredictionContext[] parents = new PredictionContext[nodes.length]; + int[] invokingStates = new int[nodes.length]; + for (int i=0; i visited = new IdentityHashMap(); + Map contextIds = new IdentityHashMap(); + Deque workList = new ArrayDeque(); + visited.put(context, context); + contextIds.put(context, contextIds.size()); + workList.add(context); + while (!workList.isEmpty()) { + PredictionContext current = workList.pop(); + nodes.append(" s").append(contextIds.get(current)).append('['); + + if (current.size() > 1) { + nodes.append("shape=record, "); + } + + nodes.append("label=\""); + + if (current.isEmpty()) { + nodes.append(rootIsWildcard ? '*' : '$'); + } else if (current.size() > 1) { + for (int i = 0; i < current.size(); i++) { + if (i > 0) { + nodes.append('|'); + } + + nodes.append("'); + if (current.getReturnState(i) == PredictionContext.EMPTY_RETURN_STATE) { + nodes.append(rootIsWildcard ? '*' : '$'); + } + } + } else { + nodes.append(contextIds.get(current)); + } + + nodes.append("\"];\n"); + + if (current.isEmpty()) { + continue; + } + + for (int i = 0; i < current.size(); i++) { + if (current.getReturnState(i) == PredictionContext.EMPTY_RETURN_STATE) { + continue; + } + + if (visited.put(current.getParent(i), current.getParent(i)) == null) { + contextIds.put(current.getParent(i), contextIds.size()); + workList.push(current.getParent(i)); + } + + edges.append(" s").append(contextIds.get(current)); + if (current.size() > 1) { + edges.append(":p").append(i); + } + + edges.append("->"); + edges.append('s').append(contextIds.get(current.getParent(i))); + edges.append("[label=\"").append(current.getReturnState(i)).append("\"]"); + edges.append(";\n"); + } + } + + StringBuilder builder = new StringBuilder(); + builder.append("digraph G {\n"); + builder.append("rankdir=LR;\n"); + builder.append(nodes); + builder.append(edges); + builder.append("}\n"); + return builder.toString(); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestIntervalSet.java b/tool/test/org/antlr/v4/test/tool/TestIntervalSet.java new file mode 100644 index 000000000..d6979e34a --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestIntervalSet.java @@ -0,0 +1,453 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.misc.IntervalSet; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestIntervalSet extends BaseTest { + + /** Public default constructor used by TestRig */ + public TestIntervalSet() { + } + + @Test public void testSingleElement() throws Exception { + IntervalSet s = IntervalSet.of(99); + String expecting = "99"; + assertEquals(s.toString(), expecting); + } + + @Test public void testMin() throws Exception { + assertEquals(0, IntervalSet.COMPLETE_CHAR_SET.getMinElement()); + assertEquals(Token.EPSILON, IntervalSet.COMPLETE_CHAR_SET.or(IntervalSet.of(Token.EPSILON)).getMinElement()); + assertEquals(Token.EOF, IntervalSet.COMPLETE_CHAR_SET.or(IntervalSet.of(Token.EOF)).getMinElement()); + } + + @Test public void testIsolatedElements() throws Exception { + IntervalSet s = new IntervalSet(); + s.add(1); + s.add('z'); + s.add('\uFFF0'); + String expecting = "{1, 122, 65520}"; + assertEquals(s.toString(), expecting); + } + + @Test public void testMixedRangesAndElements() throws Exception { + IntervalSet s = new IntervalSet(); + s.add(1); + s.add('a','z'); + s.add('0','9'); + String expecting = "{1, 48..57, 97..122}"; + assertEquals(s.toString(), expecting); + } + + @Test public void testSimpleAnd() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + IntervalSet s2 = IntervalSet.of(13,15); + String expecting = "{13..15}"; + String result = (s.and(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testRangeAndIsolatedElement() throws Exception { + IntervalSet s = IntervalSet.of('a','z'); + IntervalSet s2 = IntervalSet.of('d'); + String expecting = "100"; + String result = (s.and(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testEmptyIntersection() throws Exception { + IntervalSet s = IntervalSet.of('a','z'); + IntervalSet s2 = IntervalSet.of('0','9'); + String expecting = "{}"; + String result = (s.and(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testEmptyIntersectionSingleElements() throws Exception { + IntervalSet s = IntervalSet.of('a'); + IntervalSet s2 = IntervalSet.of('d'); + String expecting = "{}"; + String result = (s.and(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testNotSingleElement() throws Exception { + IntervalSet vocabulary = IntervalSet.of(1,1000); + vocabulary.add(2000,3000); + IntervalSet s = IntervalSet.of(50,50); + String expecting = "{1..49, 51..1000, 2000..3000}"; + String result = (s.complement(vocabulary)).toString(); + assertEquals(expecting, result); + } + + @Test public void testNotSet() throws Exception { + IntervalSet vocabulary = IntervalSet.of(1,1000); + IntervalSet s = IntervalSet.of(50,60); + s.add(5); + s.add(250,300); + String expecting = "{1..4, 6..49, 61..249, 301..1000}"; + String result = (s.complement(vocabulary)).toString(); + assertEquals(expecting, result); + } + + @Test public void testNotEqualSet() throws Exception { + IntervalSet vocabulary = IntervalSet.of(1,1000); + IntervalSet s = IntervalSet.of(1,1000); + String expecting = "{}"; + String result = (s.complement(vocabulary)).toString(); + assertEquals(expecting, result); + } + + @Test public void testNotSetEdgeElement() throws Exception { + IntervalSet vocabulary = IntervalSet.of(1,2); + IntervalSet s = IntervalSet.of(1); + String expecting = "2"; + String result = (s.complement(vocabulary)).toString(); + assertEquals(expecting, result); + } + + @Test public void testNotSetFragmentedVocabulary() throws Exception { + IntervalSet vocabulary = IntervalSet.of(1,255); + vocabulary.add(1000,2000); + vocabulary.add(9999); + IntervalSet s = IntervalSet.of(50, 60); + s.add(3); + s.add(250,300); + s.add(10000); // this is outside range of vocab and should be ignored + String expecting = "{1..2, 4..49, 61..249, 1000..2000, 9999}"; + String result = (s.complement(vocabulary)).toString(); + assertEquals(expecting, result); + } + + @Test public void testSubtractOfCompletelyContainedRange() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + IntervalSet s2 = IntervalSet.of(12,15); + String expecting = "{10..11, 16..20}"; + String result = (s.subtract(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testSubtractFromSetWithEOF() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + s.add(Token.EOF); + IntervalSet s2 = IntervalSet.of(12,15); + String expecting = "{, 10..11, 16..20}"; + String result = (s.subtract(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testSubtractOfOverlappingRangeFromLeft() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + IntervalSet s2 = IntervalSet.of(5,11); + String expecting = "{12..20}"; + String result = (s.subtract(s2)).toString(); + assertEquals(expecting, result); + + IntervalSet s3 = IntervalSet.of(5,10); + expecting = "{11..20}"; + result = (s.subtract(s3)).toString(); + assertEquals(expecting, result); + } + + @Test public void testSubtractOfOverlappingRangeFromRight() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + IntervalSet s2 = IntervalSet.of(15,25); + String expecting = "{10..14}"; + String result = (s.subtract(s2)).toString(); + assertEquals(expecting, result); + + IntervalSet s3 = IntervalSet.of(20,25); + expecting = "{10..19}"; + result = (s.subtract(s3)).toString(); + assertEquals(expecting, result); + } + + @Test public void testSubtractOfCompletelyCoveredRange() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + IntervalSet s2 = IntervalSet.of(1,25); + String expecting = "{}"; + String result = (s.subtract(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testSubtractOfRangeSpanningMultipleRanges() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + s.add(30,40); + s.add(50,60); // s has 3 ranges now: 10..20, 30..40, 50..60 + IntervalSet s2 = IntervalSet.of(5,55); // covers one and touches 2nd range + String expecting = "{56..60}"; + String result = (s.subtract(s2)).toString(); + assertEquals(expecting, result); + + IntervalSet s3 = IntervalSet.of(15,55); // touches both + expecting = "{10..14, 56..60}"; + result = (s.subtract(s3)).toString(); + assertEquals(expecting, result); + } + + /** The following was broken: + {0..113, 115..65534}-{0..115, 117..65534}=116..65534 + */ + @Test public void testSubtractOfWackyRange() throws Exception { + IntervalSet s = IntervalSet.of(0,113); + s.add(115,200); + IntervalSet s2 = IntervalSet.of(0,115); + s2.add(117,200); + String expecting = "116"; + String result = (s.subtract(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testSimpleEquals() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + IntervalSet s2 = IntervalSet.of(10,20); + assertEquals(s, s2); + + IntervalSet s3 = IntervalSet.of(15,55); + assertFalse(s.equals(s3)); + } + + @Test public void testEquals() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + s.add(2); + s.add(499,501); + IntervalSet s2 = IntervalSet.of(10,20); + s2.add(2); + s2.add(499,501); + assertEquals(s, s2); + + IntervalSet s3 = IntervalSet.of(10,20); + s3.add(2); + assertFalse(s.equals(s3)); + } + + @Test public void testSingleElementMinusDisjointSet() throws Exception { + IntervalSet s = IntervalSet.of(15,15); + IntervalSet s2 = IntervalSet.of(1,5); + s2.add(10,20); + String expecting = "{}"; // 15 - {1..5, 10..20} = {} + String result = s.subtract(s2).toString(); + assertEquals(expecting, result); + } + + @Test public void testMembership() throws Exception { + IntervalSet s = IntervalSet.of(15,15); + s.add(50,60); + assertTrue(!s.contains(0)); + assertTrue(!s.contains(20)); + assertTrue(!s.contains(100)); + assertTrue(s.contains(15)); + assertTrue(s.contains(55)); + assertTrue(s.contains(50)); + assertTrue(s.contains(60)); + } + + // {2,15,18} & 10..20 + @Test public void testIntersectionWithTwoContainedElements() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + IntervalSet s2 = IntervalSet.of(2,2); + s2.add(15); + s2.add(18); + String expecting = "{15, 18}"; + String result = (s.and(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testIntersectionWithTwoContainedElementsReversed() throws Exception { + IntervalSet s = IntervalSet.of(10,20); + IntervalSet s2 = IntervalSet.of(2,2); + s2.add(15); + s2.add(18); + String expecting = "{15, 18}"; + String result = (s2.and(s)).toString(); + assertEquals(expecting, result); + } + + @Test public void testComplement() throws Exception { + IntervalSet s = IntervalSet.of(100,100); + s.add(101,101); + IntervalSet s2 = IntervalSet.of(100,102); + String expecting = "102"; + String result = (s.complement(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testComplement2() throws Exception { + IntervalSet s = IntervalSet.of(100,101); + IntervalSet s2 = IntervalSet.of(100,102); + String expecting = "102"; + String result = (s.complement(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testComplement3() throws Exception { + IntervalSet s = IntervalSet.of(1,96); + s.add(99, Lexer.MAX_CHAR_VALUE); + String expecting = "{97..98}"; + String result = (s.complement(1, Lexer.MAX_CHAR_VALUE)).toString(); + assertEquals(expecting, result); + } + + @Test public void testMergeOfRangesAndSingleValues() throws Exception { + // {0..41, 42, 43..65534} + IntervalSet s = IntervalSet.of(0,41); + s.add(42); + s.add(43,65534); + String expecting = "{0..65534}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + @Test public void testMergeOfRangesAndSingleValuesReverse() throws Exception { + IntervalSet s = IntervalSet.of(43,65534); + s.add(42); + s.add(0,41); + String expecting = "{0..65534}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + @Test public void testMergeWhereAdditionMergesTwoExistingIntervals() throws Exception { + // 42, 10, {0..9, 11..41, 43..65534} + IntervalSet s = IntervalSet.of(42); + s.add(10); + s.add(0,9); + s.add(43,65534); + s.add(11,41); + String expecting = "{0..65534}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + /** + * This case is responsible for antlr/antlr4#153. + * https://github.com/antlr/antlr4/issues/153 + */ + @Test public void testMergeWhereAdditionMergesThreeExistingIntervals() throws Exception { + IntervalSet s = new IntervalSet(); + s.add(0); + s.add(3); + s.add(5); + s.add(0, 7); + String expecting = "{0..7}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + @Test public void testMergeWithDoubleOverlap() throws Exception { + IntervalSet s = IntervalSet.of(1,10); + s.add(20,30); + s.add(5,25); // overlaps two! + String expecting = "{1..30}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + @Test public void testSize() throws Exception { + IntervalSet s = IntervalSet.of(20,30); + s.add(50,55); + s.add(5,19); + String expecting = "32"; + String result = String.valueOf(s.size()); + assertEquals(expecting, result); + } + + @Test public void testToList() throws Exception { + IntervalSet s = IntervalSet.of(20,25); + s.add(50,55); + s.add(5,5); + String expecting = "[5, 20, 21, 22, 23, 24, 25, 50, 51, 52, 53, 54, 55]"; + String result = String.valueOf(s.toList()); + assertEquals(expecting, result); + } + + /** The following was broken: + {'\u0000'..'s', 'u'..'\uFFFE'} & {'\u0000'..'q', 's'..'\uFFFE'}= + {'\u0000'..'q', 's'}!!!! broken... + 'q' is 113 ascii + 'u' is 117 + */ + @Test public void testNotRIntersectionNotT() throws Exception { + IntervalSet s = IntervalSet.of(0,'s'); + s.add('u',200); + IntervalSet s2 = IntervalSet.of(0,'q'); + s2.add('s',200); + String expecting = "{0..113, 115, 117..200}"; + String result = (s.and(s2)).toString(); + assertEquals(expecting, result); + } + + @Test public void testRmSingleElement() throws Exception { + IntervalSet s = IntervalSet.of(1,10); + s.add(-3,-3); + s.remove(-3); + String expecting = "{1..10}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + @Test public void testRmLeftSide() throws Exception { + IntervalSet s = IntervalSet.of(1,10); + s.add(-3,-3); + s.remove(1); + String expecting = "{-3, 2..10}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + @Test public void testRmRightSide() throws Exception { + IntervalSet s = IntervalSet.of(1,10); + s.add(-3,-3); + s.remove(10); + String expecting = "{-3, 1..9}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + @Test public void testRmMiddleRange() throws Exception { + IntervalSet s = IntervalSet.of(1,10); + s.add(-3,-3); + s.remove(5); + String expecting = "{-3, 1..4, 6..10}"; + String result = s.toString(); + assertEquals(expecting, result); + } + + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestLeftRecursion.java b/tool/test/org/antlr/v4/test/tool/TestLeftRecursion.java new file mode 100644 index 000000000..a14c0f8a3 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestLeftRecursion.java @@ -0,0 +1,732 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.tool.ErrorType; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** */ +public class TestLeftRecursion extends BaseTest { + protected boolean debug = false; + + @Test public void testSimple() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + + "a : a ID\n" + + " | ID" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "s", "x", debug); + String expecting = "(s (a x))\n"; + assertEquals(expecting, found); + + found = execParser("T.g4", grammar, "TParser", "TLexer", + "s", "x y", debug); + expecting = "(s (a (a x) y))\n"; + assertEquals(expecting, found); + + found = execParser("T.g4", grammar, "TParser", "TLexer", + "s", "x y z", debug); + expecting = "(s (a (a (a x) y) z))\n"; + assertEquals(expecting, found); + } + + /** + * This is a regression test for "Support direct calls to left-recursive + * rules". + * https://github.com/antlr/antlr4/issues/161 + */ + @Test public void testDirectCallToLeftRecursiveRule() throws Exception { + String grammar = + "grammar T;\n" + + "a @after {System.out.println($ctx.toStringTree(this));} : a ID\n" + + " | ID" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "x", debug); + String expecting = "(a x)\n"; + assertEquals(expecting, found); + + found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "x y", debug); + expecting = "(a (a x) y)\n"; + assertEquals(expecting, found); + + found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "x y z", debug); + expecting = "(a (a (a x) y) z)\n"; + assertEquals(expecting, found); + } + + @Test public void testSemPred() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + + "a : a {true}? ID\n" + + " | ID" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "s", "x y z", debug); + String expecting = "(s (a (a (a x) y) z))\n"; + assertEquals(expecting, found); + } + + @Test + public void testSemPredFailOption() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + + "a : a ID {false}?\n" + + " | ID" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "s", "x y z", debug); + String expecting = "(s (a (a x) y z))\n"; + assertEquals(expecting, found); + assertEquals("line 1:4 rule a custom message\n", stderrDuringParse); + } + + @Test public void testTernaryExpr() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow or 'a' won't match + "e : e '*' e" + + " | e '+' e" + + " | e '?' e ':' e" + + " | e '=' e" + + " | ID" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "a", "(s (e a) )", + "a+b", "(s (e (e a) + (e b)) )", + "a*b", "(s (e (e a) * (e b)) )", + "a?b:c", "(s (e (e a) ? (e b) : (e c)) )", + "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", + "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", + "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", + "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", + "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )", + }; + runTests(grammar, tests, "s"); + } + + /** + * This is a regression test for antlr/antlr4#542 "First alternative cannot + * be right-associative". + * https://github.com/antlr/antlr4/issues/542 + */ + @Test public void testTernaryExprExplicitAssociativity() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow or 'a' won't match + "e : e '*' e" + + " | e '+' e" + + " | e '?' e ':' e" + + " | e '=' e" + + " | ID" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "a", "(s (e a) )", + "a+b", "(s (e (e a) + (e b)) )", + "a*b", "(s (e (e a) * (e b)) )", + "a?b:c", "(s (e (e a) ? (e b) : (e c)) )", + "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", + "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", + "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", + "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", + "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )", + }; + runTests(grammar, tests, "s"); + } + + @Test public void testExpressions() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow + "e : e '.' ID\n" + + " | e '.' 'this'\n" + + " | '-' e\n" + + " | e '*' e\n" + + " | e ('+'|'-') e\n" + + " | INT\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "a", "(s (e a) )", + "1", "(s (e 1) )", + "a-1", "(s (e (e a) - (e 1)) )", + "a.b", "(s (e (e a) . b) )", + "a.this", "(s (e (e a) . this) )", + "-a", "(s (e - (e a)) )", + "-a+b", "(s (e (e - (e a)) + (e b)) )", + }; + runTests(grammar, tests, "s"); + } + + @Test public void testJavaExpressions() throws Exception { + // Generates about 7k in bytecodes for generated e_ rule; + // Well within the 64k method limit. e_primary compiles + // to about 2k in bytecodes. + // this is simplified from real java + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow + "expressionList\n" + + " : e (',' e)*\n" + + " ;\n" + + "e : '(' e ')'\n" + + " | 'this' \n" + + " | 'super'\n" + + " | INT\n" + + " | ID\n" + + " | type '.' 'class'\n" + + " | e '.' ID\n" + + " | e '.' 'this'\n" + + " | e '.' 'super' '(' expressionList? ')'\n" + + " | e '.' 'new' ID '(' expressionList? ')'\n" + + " | 'new' type ( '(' expressionList? ')' | ('[' e ']')+)\n" + + " | e '[' e ']'\n" + + " | '(' type ')' e\n" + + " | e ('++' | '--')\n" + + " | e '(' expressionList? ')'\n" + + " | ('+'|'-'|'++'|'--') e\n" + + " | ('~'|'!') e\n" + + " | e ('*'|'/'|'%') e\n" + + " | e ('+'|'-') e\n" + + " | e ('<<' | '>>>' | '>>') e\n" + + " | e ('<=' | '>=' | '>' | '<') e\n" + + " | e 'instanceof' e\n" + + " | e ('==' | '!=') e\n" + + " | e '&' e\n" + + " | e '^' e\n" + + " | e '|' e\n" + + " | e '&&' e\n" + + " | e '||' e\n" + + " | e '?' e ':' e\n" + + " |" + + " e ('='\n" + + " |'+='\n" + + " |'-='\n" + + " |'*='\n" + + " |'/='\n" + + " |'&='\n" + + " |'|='\n" + + " |'^='\n" + + " |'>>='\n" + + " |'>>>='\n" + + " |'<<='\n" + + " |'%=') e\n" + + " ;\n" + + "type: ID \n" + + " | ID '[' ']'\n" + + " | 'int'\n" + + " | 'int' '[' ']' \n" + + " ;\n" + + "ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "a|b&c", "(s (e (e a) | (e (e b) & (e c))) )", + "(a|b)&c", "(s (e (e ( (e (e a) | (e b)) )) & (e c)) )", + "a > b", "(s (e (e a) > (e b)) )", + "a >> b", "(s (e (e a) >> (e b)) )", + "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", + "a^b^c", "(s (e (e a) ^ (e (e b) ^ (e c))) )", + "(T)x", "(s (e ( (type T) ) (e x)) )", + "new A().b", "(s (e (e new (type A) ( )) . b) )", + "(T)t.f()", "(s (e (e ( (type T) ) (e (e t) . f)) ( )) )", + "a.f(x)==T.c", "(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )", + "a.f().g(x,1)", "(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )", + "new T[((n-1) * x) + 1]", "(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )", + }; + runTests(grammar, tests, "s"); + } + + @Test public void testDeclarations() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : declarator EOF ;\n" + // must indicate EOF can follow + "declarator\n" + + " : declarator '[' e ']'\n" + + " | declarator '[' ']'\n" + + " | declarator '(' ')'\n" + + " | '*' declarator\n" + // binds less tight than suffixes + " | '(' declarator ')'\n" + + " | ID\n" + + " ;\n" + + "e : INT ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "a", "(s (declarator a) )", + "*a", "(s (declarator * (declarator a)) )", + "**a", "(s (declarator * (declarator * (declarator a))) )", + "a[3]", "(s (declarator (declarator a) [ (e 3) ]) )", + "b[]", "(s (declarator (declarator b) [ ]) )", + "(a)", "(s (declarator ( (declarator a) )) )", + "a[]()", "(s (declarator (declarator (declarator a) [ ]) ( )) )", + "a[][]", "(s (declarator (declarator (declarator a) [ ]) [ ]) )", + "*a[]", "(s (declarator * (declarator (declarator a) [ ])) )", + "(*a)[]", "(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )", + }; + runTests(grammar, tests, "s"); + } + + @Test public void testReturnValueAndActions() throws Exception { + String grammar = + "grammar T;\n" + + "s : e {System.out.println($e.v);} ;\n" + + "e returns [int v, List ignored]\n" + + " : a=e '*' b=e {$v = $a.v * $b.v;}\n" + + " | a=e '+' b=e {$v = $a.v + $b.v;}\n" + + " | INT {$v = $INT.int;}\n" + + " | '(' x=e ')' {$v = $x.v;}\n" + + " ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "4", "4", + "1+2", "3", + "1+2*3", "7", + "(1+2)*3", "9", + }; + runTests(grammar, tests, "s"); + } + + /** + * This is a regression test for antlr/antlr4#677 "labels not working in + * grammar file". + * https://github.com/antlr/antlr4/issues/677 + * + *

This test treats {@code ,} and {@code >>} as part of a single compound + * operator (similar to a ternary operator).

+ */ + @Test public void testReturnValueAndActionsList1() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" + + "expr:\n" + + " a=expr '*' a=expr #Factor\n" + + " | b+=expr (',' b+=expr)* '>>' c=expr #Send\n" + + " | ID #JustId //semantic check on modifiers\n" + + ";\n" + + "\n" + + "ID : ('a'..'z'|'A'..'Z'|'_')\n" + + " ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" + + ";\n" + + "\n" + + "WS : [ \\t\\n]+ -> skip ;\n"; + String[] tests = { + "a*b", "(s (expr (expr a) * (expr b)) )", + "a,c>>x", "(s (expr (expr a) , (expr c) >> (expr x)) )", + "x", "(s (expr x) )", + "a*b,c,x*y>>r", "(s (expr (expr (expr a) * (expr b)) , (expr c) , (expr (expr x) * (expr y)) >> (expr r)) )", + }; + runTests(grammar, tests, "s"); + } + + /** + * This is a regression test for antlr/antlr4#677 "labels not working in + * grammar file". + * https://github.com/antlr/antlr4/issues/677 + * + *

This test treats the {@code ,} and {@code >>} operators separately.

+ */ + @Test public void testReturnValueAndActionsList2() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" + + "expr:\n" + + " a=expr '*' a=expr #Factor\n" + + " | b+=expr ',' b+=expr #Comma\n" + + " | b+=expr '>>' c=expr #Send\n" + + " | ID #JustId //semantic check on modifiers\n" + + ";\n" + + "\n" + + "ID : ('a'..'z'|'A'..'Z'|'_')\n" + + " ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" + + ";\n" + + "\n" + + "WS : [ \\t\\n]+ -> skip ;\n"; + String[] tests = { + "a*b", "(s (expr (expr a) * (expr b)) )", + "a,c>>x", "(s (expr (expr (expr a) , (expr c)) >> (expr x)) )", + "x", "(s (expr x) )", + "a*b,c,x*y>>r", "(s (expr (expr (expr (expr (expr a) * (expr b)) , (expr c)) , (expr (expr x) * (expr y))) >> (expr r)) )", + }; + runTests(grammar, tests, "s"); + } + + @Test public void testLabelsOnOpSubrule() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + + "e : a=e op=('*'|'/') b=e {}\n" + + " | INT {}\n" + + " | '(' x=e ')' {}\n" + + " ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "4", "(s (e 4))", + "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", + "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", + }; + runTests(grammar, tests, "s"); + } + + @Test public void testReturnValueAndActionsAndLabels() throws Exception { + String grammar = + "grammar T;\n" + + "s : q=e {System.out.println($e.v);} ;\n" + + "\n" + + "e returns [int v]\n" + + " : a=e op='*' b=e {$v = $a.v * $b.v;} # mult\n" + + " | a=e '+' b=e {$v = $a.v + $b.v;} # add\n" + + " | INT {$v = $INT.int;} # anInt\n" + + " | '(' x=e ')' {$v = $x.v;} # parens\n" + + " | x=e '++' {$v = $x.v+1;} # inc\n" + + " | e '--' # dec\n" + + " | ID {$v = 3;} # anID\n" + + " ; \n" + + "\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "4", "4", + "1+2", "3", + "1+2*3", "7", + "i++*3", "12", + }; + runTests(grammar, tests, "s"); + } + + /** + * This is a regression test for antlr/antlr4#433 "Not all context accessor + * methods are generated when an alternative rule label is used for multiple + * alternatives". + * https://github.com/antlr/antlr4/issues/433 + */ + @Test public void testMultipleAlternativesWithCommonLabel() throws Exception { + String grammar = + "grammar T;\n" + + "s : e {System.out.println($e.v);} ;\n" + + "\n" + + "e returns [int v]\n" + + " : e '*' e {$v = ((BinaryContext)$ctx).e(0).v * ((BinaryContext)$ctx).e(1).v;} # binary\n" + + " | e '+' e {$v = ((BinaryContext)$ctx).e(0).v + ((BinaryContext)$ctx).e(1).v;} # binary\n" + + " | INT {$v = $INT.int;} # anInt\n" + + " | '(' e ')' {$v = $e.v;} # parens\n" + + " | left=e INC {assert(((UnaryContext)$ctx).INC() != null); $v = $left.v + 1;} # unary\n" + + " | left=e DEC {assert(((UnaryContext)$ctx).DEC() != null); $v = $left.v - 1;} # unary\n" + + " | ID {$v = 3;} # anID\n" + + " ; \n" + + "\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "INC : '++' ;\n" + + "DEC : '--' ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "4", "4", + "1+2", "3", + "1+2*3", "7", + "i++*3", "12", + }; + runTests(grammar, tests, "s"); + } + + @Test public void testPrefixOpWithActionAndLabel() throws Exception { + String grammar = + "grammar T;\n" + + "s : e {System.out.println($e.result);} ;\n" + + "\n" + + "e returns [String result]\n" + + " : ID '=' e1=e { $result = \"(\" + $ID.getText() + \"=\" + $e1.result + \")\"; }\n" + + " | ID { $result = $ID.getText(); }\n" + + " | e1=e '+' e2=e { $result = \"(\" + $e1.result + \"+\" + $e2.result + \")\"; }\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "a", "a", + "a+b", "(a+b)", + "a=b+c", "((a=b)+c)", + }; + runTests(grammar, tests, "s"); + } + + @Test + public void testAmbigLR() throws Exception { + String grammar = + "grammar Expr;\n" + + "prog: stat ;\n" + + "stat: expr NEWLINE # printExpr\n" + + " | ID '=' expr NEWLINE # assign\n" + + " | NEWLINE # blank\n" + + " ;\n" + + "expr: expr ('*'|'/') expr # MulDiv\n" + + " | expr ('+'|'-') expr # AddSub\n" + + " | INT # int\n" + + " | ID # id\n" + + " | '(' expr ')' # parens\n" + + " ;\n" + + "\n" + + "MUL : '*' ; // assigns token name to '*' used above in grammar\n" + + "DIV : '/' ;\n" + + "ADD : '+' ;\n" + + "SUB : '-' ;\n" + + "ID : [a-zA-Z]+ ; // match identifiers\n" + + "INT : [0-9]+ ; // match integers\n" + + "NEWLINE:'\\r'? '\\n' ; // return newlines to parser (is end-statement signal)\n" + + "WS : [ \\t]+ -> skip ; // toss out whitespace\n"; + String result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "1\n", true); + assertNull(stderrDuringParse); + + result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "a = 5\n", true); + assertNull(stderrDuringParse); + + result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "b = 6\n", true); + assertNull(stderrDuringParse); + + result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "a+b*2\n", true); + assertNull(stderrDuringParse); + + result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "(1+2)*3\n", true); + assertNull(stderrDuringParse); + } + + @Test public void testCheckForNonLeftRecursiveRule() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + + "a : a ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String expected = + "error(" + ErrorType.NO_NON_LR_ALTS.code + "): T.g4:3:0: left recursive rule a must contain an alternative which is not left recursive\n"; + testErrors(new String[] { grammar, expected }, false); + } + + @Test public void testCheckForLeftRecursiveEmptyFollow() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + + "a : a ID?\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String expected = + "error(" + ErrorType.EPSILON_LR_FOLLOW.code + "): T.g4:3:0: left recursive rule a contains a left recursive alternative which can be followed by the empty string\n"; + testErrors(new String[] { grammar, expected }, false); + } + + /** + * This is a regression test for #239 "recoursive parser using implicit + * tokens ignore white space lexer rule". + * https://github.com/antlr/antlr4/issues/239 + */ + @Test public void testWhitespaceInfluence() { + String grammar = + "grammar Expr;\n" + + "prog : expression EOF;\n" + + "expression\n" + + " : ID '(' expression (',' expression)* ')' # doFunction\n" + + " | '(' expression ')' # doParenthesis\n" + + " | '!' expression # doNot\n" + + " | '-' expression # doNegate\n" + + " | '+' expression # doPositiv\n" + + " | expression '^' expression # doPower\n" + + " | expression '*' expression # doMultipy\n" + + " | expression '/' expression # doDivide\n" + + " | expression '%' expression # doModulo\n" + + " | expression '-' expression # doMinus\n" + + " | expression '+' expression # doPlus\n" + + " | expression '=' expression # doEqual\n" + + " | expression '!=' expression # doNotEqual\n" + + " | expression '>' expression # doGreather\n" + + " | expression '>=' expression # doGreatherEqual\n" + + " | expression '<' expression # doLesser\n" + + " | expression '<=' expression # doLesserEqual\n" + + " | expression K_IN '(' expression (',' expression)* ')' # doIn\n" + + " | expression ( '&' | K_AND) expression # doAnd\n" + + " | expression ( '|' | K_OR) expression # doOr\n" + + " | '[' expression (',' expression)* ']' # newArray\n" + + " | K_TRUE # newTrueBoolean\n" + + " | K_FALSE # newFalseBoolean\n" + + " | NUMBER # newNumber\n" + + " | DATE # newDateTime\n" + + " | ID # newIdentifier\n" + + " | SQ_STRING # newString\n" + + " | K_NULL # newNull\n" + + " ;\n" + + "\n" + + "// Fragments\n" + + "fragment DIGIT : '0' .. '9'; \n" + + "fragment UPPER : 'A' .. 'Z';\n" + + "fragment LOWER : 'a' .. 'z';\n" + + "fragment LETTER : LOWER | UPPER;\n" + + "fragment WORD : LETTER | '_' | '$' | '#' | '.';\n" + + "fragment ALPHANUM : WORD | DIGIT; \n" + + "\n" + + "// Tokens\n" + + "ID : LETTER ALPHANUM*;\n" + + "NUMBER : DIGIT+ ('.' DIGIT+)? (('e'|'E')('+'|'-')? DIGIT+)?;\n" + + "DATE : '\\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT (' ' DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT ('.' DIGIT+)?)? '\\'';\n" + + "SQ_STRING : '\\'' ('\\'\\'' | ~'\\'')* '\\'';\n" + + "DQ_STRING : '\"' ('\\\\\"' | ~'\"')* '\"';\n" + + "WS : [ \\t\\n\\r]+ -> skip ;\n" + + "COMMENTS : ('/*' .*? '*/' | '//' ~'\\n'* '\\n' ) -> skip;\n"; + + String expected = + ""; + String result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "Test(1,3)", false); + assertEquals(expected, result); + assertNull(stderrDuringParse); + + expected = + ""; + result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "Test(1, 3)", false); + assertEquals(expected, result); + assertNull(stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#509 "Incorrect rule chosen in + * unambiguous grammar". + * https://github.com/antlr/antlr4/issues/509 + */ + @Test public void testPrecedenceFilterConsidersContext() throws Exception { + String grammar = + "grammar T;\n" + + "prog\n" + + "@after {System.out.println($ctx.toStringTree(this));}\n" + + ": statement* EOF {};\n" + + "statement: letterA | statement letterA 'b' ;\n" + + "letterA: 'a';\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", + "aa", false); + assertEquals("(prog (statement (letterA a)) (statement (letterA a)) )\n", found); + } + + /** + * This is a regression test for antlr/antlr4#625 "Duplicate action breaks + * operator precedence" + * https://github.com/antlr/antlr4/issues/625 + */ + @Test public void testMultipleActions() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + + "e : a=e op=('*'|'/') b=e {}{}\n" + + " | INT {}{}\n" + + " | '(' x=e ')' {}{}\n" + + " ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "4", "(s (e 4))", + "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", + "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", + }; + runTests(grammar, tests, "s"); + } + + /** + * This is a regression test for antlr/antlr4#625 "Duplicate action breaks + * operator precedence" + * https://github.com/antlr/antlr4/issues/625 + */ + @Test public void testMultipleActionsPredicatesOptions() throws Exception { + String grammar = + "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + + "e : a=e op=('*'|'/') b=e {}{true}?\n" + + " | a=e op=('+'|'-') b=e {}{true}?\n" + + " | INT {}{}\n" + + " | '(' x=e ')' {}{}\n" + + " ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + String[] tests = { + "4", "(s (e 4))", + "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", + "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", + }; + runTests(grammar, tests, "s"); + } + + public void runTests(String grammar, String[] tests, String startRule) { + boolean success = rawGenerateAndBuildRecognizer("T.g4", grammar, "TParser", "TLexer"); + assertTrue(success); + writeRecognizerAndCompile("TParser", + "TLexer", + startRule, + debug, + false); + + for (int i=0; i "+found); + assertEquals(expecting, found); + } + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestLexerActions.java b/tool/test/org/antlr/v4/test/tool/TestLexerActions.java new file mode 100644 index 000000000..4b0caf218 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestLexerActions.java @@ -0,0 +1,283 @@ +package org.antlr.v4.test.tool; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestLexerActions extends BaseTest { + // ----- ACTIONS -------------------------------------------------------- + + @Test public void testActionExecutedInDFA() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34 34"); + String expecting = + "I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,3:4='34',<1>,1:3]\n" + + "[@2,5:4='',<-1>,1:5]\n"; + assertEquals(expecting, found); + } + + @Test public void testActionEvalsAtCorrectIndex() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : [0-9] {System.out.println(\"2nd char: \"+(char)_input.LA(1));} [0-9]+ ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "123 45"); + String expecting = + "2nd char: 2\n" + + "2nd char: 5\n" + + "[@0,0:2='123',<1>,1:0]\n" + + "[@1,4:5='45',<1>,1:4]\n" + + "[@2,6:5='',<-1>,1:6]\n"; + assertEquals(expecting, found); + } + + /** + * This is a regressing test for antlr/antlr4#469 "Not all internal lexer + * rule actions are executed". + * https://github.com/antlr/antlr4/issues/469 + */ + @Test public void testEvalMultipleActions() throws Exception { + String grammar = + "lexer grammar L;\n" + + "\n" + + "@lexer::members\n" + + "{\n" + + "class Marker\n" + + "{\n" + + " Marker (Lexer lexer) { this.lexer = lexer; }\n" + + "\n" + + " public String getText ()\n" + + " {\n" + + " return lexer._input.getText (new Interval (start_index, stop_index));\n" + + " }\n" + + "\n" + + " public void start () { start_index = lexer._input.index (); System.out.println (\"Start:\" + start_index);}\n" + + " public void stop () { stop_index = lexer._input.index (); System.out.println (\"Stop:\" + stop_index);}\n" + + "\n" + + " private int start_index = 0;\n" + + " private int stop_index = 0;\n" + + " private Lexer lexer;\n" + + "}\n" + + "\n" + + "Marker m_name = new Marker (this);\n" + + "}\n" + + "\n" + + "HELLO: 'hello' WS { m_name.start (); } NAME { m_name.stop (); } '\\n' { System.out.println (\"Hello: \" + m_name.getText ()); };\n" + + "NAME: ('a'..'z' | 'A'..'Z')+ ('\\n')?;\n" + + "\n" + + "fragment WS: [ \\r\\t\\n]+ ;\n"; + String found = execLexer("L.g4", grammar, "L", "hello Steve\n"); + String expecting = + "Start:6\n" + + "Stop:11\n" + + "Hello: Steve\n" + + "\n" + + "[@0,0:11='hello Steve\\n',<1>,1:0]\n" + + "[@1,12:11='',<-1>,2:12]\n"; + assertEquals(expecting, found); + } + + @Test public void test2ActionsIn1Rule() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : [0-9] {System.out.println(\"x\");} [0-9]+ {System.out.println(\"y\");} ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "123 45"); + String expecting = + "x\n" + + "y\n" + + "x\n" + + "y\n" + + "[@0,0:2='123',<1>,1:0]\n" + + "[@1,4:5='45',<1>,1:4]\n" + + "[@2,6:5='',<-1>,1:6]\n"; + assertEquals(expecting, found); + } + + @Test public void testAltActionsIn1Rule() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : ( [0-9]+ {System.out.print(\"int\");}\n" + + " | [a-z]+ {System.out.print(\"id\");}\n" + + " )\n" + + " {System.out.println(\" last\");}\n" + + " ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "123 ab"); + String expecting = + "int last\n" + + "id last\n" + + "[@0,0:2='123',<1>,1:0]\n" + + "[@1,4:5='ab',<1>,1:4]\n" + + "[@2,6:5='',<-1>,1:6]\n"; + assertEquals(expecting, found); + } + + @Test public void testActionPlusCommand() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : '0'..'9'+ {System.out.println(\"I\");} -> skip ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34 34"); + String expecting = + "I\n" + + "I\n" + + "[@0,5:4='',<-1>,1:5]\n"; + assertEquals(expecting, found); + } + + // ----- COMMANDS -------------------------------------------------------- + + @Test public void testSkipCommand() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34 34"); + String expecting = + "I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,3:4='34',<1>,1:3]\n" + + "[@2,5:4='',<-1>,1:5]\n"; + assertEquals(expecting, found); + } + + @Test public void testMoreCommand() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "WS : '#' -> more ;"; + String found = execLexer("L.g4", grammar, "L", "34#10"); + String expecting = + "I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,2:4='#10',<1>,1:2]\n" + + "[@2,5:4='',<-1>,1:5]\n"; + assertEquals(expecting, found); + } + + @Test public void testTypeCommand() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "HASH : '#' -> type(HASH) ;"; + String found = execLexer("L.g4", grammar, "L", "34#"); + String expecting = + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,2:2='#',<2>,1:2]\n" + + "[@2,3:2='',<-1>,1:3]\n"; + assertEquals(expecting, found); + } + + @Test public void testCombinedCommand() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "HASH : '#' -> type(100), skip, more ;"; + String found = execLexer("L.g4", grammar, "L", "34#11"); + String expecting = + "I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,2:4='#11',<1>,1:2]\n" + + "[@2,5:4='',<-1>,1:5]\n"; + assertEquals(expecting, found); + } + + @Test public void testLexerMode() throws Exception { + String grammar = + "lexer grammar L;\n" + + "STRING_START : '\"' -> pushMode(STRING_MODE), more;\n" + + "WS : (' '|'\\n') -> skip ;\n"+ + "mode STRING_MODE;\n"+ + "STRING : '\"' -> popMode;\n"+ + "ANY : . -> more;\n"; + String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); + String expecting = + "[@0,0:4='\"abc\"',<2>,1:0]\n" + + "[@1,6:9='\"ab\"',<2>,1:6]\n" + + "[@2,10:9='',<-1>,1:10]\n"; + assertEquals(expecting, found); + } + + @Test public void testLexerPushPopModeAction() throws Exception { + String grammar = + "lexer grammar L;\n" + + "STRING_START : '\"' -> pushMode(STRING_MODE), more ;\n" + + "WS : (' '|'\\n') -> skip ;\n"+ + "mode STRING_MODE;\n"+ + "STRING : '\"' -> popMode ;\n"+ // token type 2 + "ANY : . -> more ;\n"; + String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); + String expecting = + "[@0,0:4='\"abc\"',<2>,1:0]\n" + + "[@1,6:9='\"ab\"',<2>,1:6]\n" + + "[@2,10:9='',<-1>,1:10]\n"; + assertEquals(expecting, found); + } + + @Test public void testLexerModeAction() throws Exception { + String grammar = + "lexer grammar L;\n" + + "STRING_START : '\"' -> mode(STRING_MODE), more ;\n" + + "WS : (' '|'\\n') -> skip ;\n"+ + "mode STRING_MODE;\n"+ + "STRING : '\"' -> mode(DEFAULT_MODE) ;\n"+ // ttype 2 since '"' ambiguity + "ANY : . -> more ;\n"; + String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); + String expecting = + "[@0,0:4='\"abc\"',<2>,1:0]\n" + + "[@1,6:9='\"ab\"',<2>,1:6]\n" + + "[@2,10:9='',<-1>,1:10]\n"; + assertEquals(expecting, found); + } + + // ----- PREDICATES -------------------------------------------------------- + + /** + * This is a regression test for antlr/antlr4#398 "Lexer: literal matches + * while negated char set fail to match" + * https://github.com/antlr/antlr4/issues/398 + */ + @Test + public void testFailingPredicateEvalIsNotCached() { + String grammar = + "lexer grammar TestLexer;\n" + + "\n" + + "fragment WS: [ \\t]+;\n" + + "fragment EOL: '\\r'? '\\n';\n" + + "\n" + + "LINE: WS? ~[\\r\\n]* EOL { !getText().trim().startsWith(\"Item:\") }?;\n" + + "ITEM: WS? 'Item:' -> pushMode(ITEM_HEADING_MODE);\n" + + "\n" + + "mode ITEM_HEADING_MODE;\n" + + "\n" + + "NAME: ~[\\r\\n]+;\n" + + "SECTION_HEADING_END: EOL -> popMode;\n"; + String input = + "A line here.\n" + + "Item: name of item\n" + + "Another line.\n" + + "More line.\n"; + String found = execLexer("TestLexer.g4", grammar, "TestLexer", input); + String expecting = + "[@0,0:12='A line here.\\n',<1>,1:0]\n" + + "[@1,13:17='Item:',<2>,2:0]\n" + + "[@2,18:30=' name of item',<3>,2:5]\n" + + "[@3,31:31='\\n',<4>,2:18]\n" + + "[@4,32:45='Another line.\\n',<1>,3:0]\n" + + "[@5,46:56='More line.\\n',<1>,4:0]\n" + + "[@6,57:56='',<-1>,5:11]\n"; + assertEquals(expecting, found); + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestLexerErrors.java b/tool/test/org/antlr/v4/test/tool/TestLexerErrors.java new file mode 100644 index 000000000..c288cf5e2 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestLexerErrors.java @@ -0,0 +1,213 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestLexerErrors extends BaseTest { + // TEST DETECTION + @Test public void testInvalidCharAtStart() throws Exception { + String grammar = + "lexer grammar L;\n" + + "A : 'a' 'b' ;\n"; + String tokens = execLexer("L.g4", grammar, "L", "x"); + String expectingTokens = + "[@0,1:0='',<-1>,1:1]\n"; + assertEquals(expectingTokens, tokens); + String expectingError = "line 1:0 token recognition error at: 'x'\n"; + String error = stderrDuringParse; + assertEquals(expectingError, error); + } + + @Test + public void testStringsEmbeddedInActions() { + String grammar = + "lexer grammar Actions;\n" + + "ACTION2 : '[' (STRING | ~'\"')*? ']';\n" + + "STRING : '\"' ('\\\"' | .)*? '\"';\n" + + "WS : [ \\t\\r\\n]+ -> skip;\n"; + String tokens = execLexer("Actions.g4", grammar, "Actions", "[\"foo\"]"); + String expectingTokens = + "[@0,0:6='[\"foo\"]',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n"; + assertEquals(expectingTokens, tokens); + assertNull(stderrDuringParse); + + tokens = execLexer("Actions.g4", grammar, "Actions", "[\"foo]"); + expectingTokens = + "[@0,6:5='',<-1>,1:6]\n"; + assertEquals(expectingTokens, tokens); + assertEquals("line 1:0 token recognition error at: '[\"foo]'\n", stderrDuringParse); + } + + @Test public void testEnforcedGreedyNestedBrances() { + String grammar = + "lexer grammar R;\n" + + "ACTION : '{' (ACTION | ~[{}])* '}';\n" + + "WS : [ \\r\\n\\t]+ -> skip;\n"; + String tokens = execLexer("R.g4", grammar, "R", "{ { } }"); + String expectingTokens = + "[@0,0:6='{ { } }',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n"; + assertEquals(expectingTokens, tokens); + assertEquals(null, stderrDuringParse); + + tokens = execLexer("R.g4", grammar, "R", "{ { }"); + expectingTokens = + "[@0,5:4='',<-1>,1:5]\n"; + assertEquals(expectingTokens, tokens); + assertEquals("line 1:0 token recognition error at: '{ { }'\n", stderrDuringParse); + } + + @Test public void testInvalidCharAtStartAfterDFACache() throws Exception { + String grammar = + "lexer grammar L;\n" + + "A : 'a' 'b' ;\n"; + String tokens = execLexer("L.g4", grammar, "L", "abx"); + String expectingTokens = + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n"; + assertEquals(expectingTokens, tokens); + String expectingError = "line 1:2 token recognition error at: 'x'\n"; + String error = stderrDuringParse; + assertEquals(expectingError, error); + } + + @Test public void testInvalidCharInToken() throws Exception { + String grammar = + "lexer grammar L;\n" + + "A : 'a' 'b' ;\n"; + String tokens = execLexer("L.g4", grammar, "L", "ax"); + String expectingTokens = + "[@0,2:1='',<-1>,1:2]\n"; + assertEquals(expectingTokens, tokens); + String expectingError = "line 1:0 token recognition error at: 'ax'\n"; + String error = stderrDuringParse; + assertEquals(expectingError, error); + } + + @Test public void testInvalidCharInTokenAfterDFACache() throws Exception { + String grammar = + "lexer grammar L;\n" + + "A : 'a' 'b' ;\n"; + String tokens = execLexer("L.g4", grammar, "L", "abax"); + String expectingTokens = + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,4:3='',<-1>,1:4]\n"; + assertEquals(expectingTokens, tokens); + String expectingError = "line 1:2 token recognition error at: 'ax'\n"; + String error = stderrDuringParse; + assertEquals(expectingError, error); + } + + @Test public void testDFAToATNThatFailsBackToDFA() throws Exception { + String grammar = + "lexer grammar L;\n" + + "A : 'ab' ;\n"+ + "B : 'abc' ;\n"; + // The first ab caches the DFA then abx goes through the DFA but + // into the ATN for the x, which fails. Must go back into DFA + // and return to previous dfa accept state + String tokens = execLexer("L.g4", grammar, "L", "ababx"); + String expectingTokens = + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:3='ab',<1>,1:2]\n" + + "[@2,5:4='',<-1>,1:5]\n"; + assertEquals(expectingTokens, tokens); + String expectingError = "line 1:4 token recognition error at: 'x'\n"; + String error = stderrDuringParse; + assertEquals(expectingError, error); + } + + @Test public void testDFAToATNThatMatchesThenFailsInATN() throws Exception { + String grammar = + "lexer grammar L;\n" + + "A : 'ab' ;\n"+ + "B : 'abc' ;\n"+ + "C : 'abcd' ;\n"; + // The first ab caches the DFA then abx goes through the DFA but + // into the ATN for the c. It marks that hasn't except state + // and then keeps going in the ATN. It fails on the x, but + // uses the previous accepted in the ATN not DFA + String tokens = execLexer("L.g4", grammar, "L", "ababcx"); + String expectingTokens = + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:4='abc',<2>,1:2]\n" + + "[@2,6:5='',<-1>,1:6]\n"; + assertEquals(expectingTokens, tokens); + String expectingError = "line 1:5 token recognition error at: 'x'\n"; + String error = stderrDuringParse; + assertEquals(expectingError, error); + } + + @Test public void testErrorInMiddle() throws Exception { + String grammar = + "lexer grammar L;\n" + + "A : 'abc' ;\n"; + String tokens = execLexer("L.g4", grammar, "L", "abx"); + String expectingTokens = + "[@0,3:2='',<-1>,1:3]\n"; + assertEquals(expectingTokens, tokens); + String expectingError = "line 1:0 token recognition error at: 'abx'\n"; + String error = stderrDuringParse; + assertEquals(expectingError, error); + } + + // TEST RECOVERY + + /** + * This is a regression test for #45 "NullPointerException in LexerATNSimulator.execDFA". + * https://github.com/antlr/antlr4/issues/46 + */ + @Test + public void testLexerExecDFA() throws Exception { + String grammar = + "grammar T;\n" + + "start : ID ':' expr;\n" + + "expr : primary expr? {} | expr '->' ID;\n" + + "primary : ID;\n" + + "ID : [a-z]+;\n" + + "\n"; + String result = execLexer("T.g4", grammar, "TLexer", "x : x", false); + String expecting = + "[@0,0:0='x',<3>,1:0]\n" + + "[@1,2:2=':',<1>,1:2]\n" + + "[@2,4:4='x',<3>,1:4]\n" + + "[@3,5:4='',<-1>,1:5]\n"; + assertEquals(expecting, result); + assertEquals("line 1:1 token recognition error at: ' '\n" + + "line 1:3 token recognition error at: ' '\n", + this.stderrDuringParse); + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestLexerExec.java b/tool/test/org/antlr/v4/test/tool/TestLexerExec.java new file mode 100644 index 000000000..6cbc0209a --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestLexerExec.java @@ -0,0 +1,690 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.misc.Nullable; +import org.junit.Test; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class TestLexerExec extends BaseTest { + @Test public void testQuoteTranslation() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "QUOTE : '\"' ;\n"; // make sure this compiles + String found = execLexer("L.g4", grammar, "L", "\""); + String expecting = + "[@0,0:0='\"',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n"; + assertEquals(expecting, found); + } + + @Test public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "A : '-' I ;\n" + + "I : '0'..'9'+ ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34 -21 3"); + String expecting = + "[@0,0:1='34',<2>,1:0]\n" + + "[@1,3:5='-21',<1>,1:3]\n" + + "[@2,7:7='3',<2>,1:7]\n" + + "[@3,8:7='',<-1>,1:8]\n"; // EOF has no length so range is 8:7 not 8:8 + assertEquals(expecting, found); + } + + @Test public void testSlashes() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "Backslash : '\\\\';\n" + + "Slash : '/';\n" + + "Vee : '\\\\/';\n" + + "Wedge : '/\\\\';\n"+ + "WS : [ \\t] -> skip;"; + String found = execLexer("L.g4", grammar, "L", "\\ / \\/ /\\"); + String expecting = + "[@0,0:0='\\',<1>,1:0]\n" + + "[@1,2:2='/',<2>,1:2]\n" + + "[@2,4:5='\\/',<3>,1:4]\n" + + "[@3,7:8='/\\',<4>,1:7]\n" + + "[@4,9:8='',<-1>,1:9]\n"; + assertEquals(expecting, found); + } + + /** + * This is a regression test for antlr/antlr4#224: "Parentheses without + * quantifier in lexer rules have unclear effect". + * https://github.com/antlr/antlr4/issues/224 + */ + @Test public void testParentheses() { + String grammar = + "lexer grammar Demo;\n" + + "\n" + + "START_BLOCK: '-.-.-';\n" + + "\n" + + "ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+;\n" + + "fragment LETTER: L_A|L_K;\n" + + "fragment L_A: '.-';\n" + + "fragment L_K: '-.-';\n" + + "\n" + + "SEPARATOR: '!';\n"; + String found = execLexer("Demo.g4", grammar, "Demo", "-.-.-!"); + String expecting = + "[@0,0:4='-.-.-',<1>,1:0]\n" + + "[@1,5:5='!',<3>,1:5]\n" + + "[@2,6:5='',<-1>,1:6]\n"; + assertEquals(expecting, found); + } + + @Test + public void testNonGreedyTermination() throws Exception { + String grammar = + "lexer grammar L;\n" + + "STRING : '\"' ('\"\"' | .)*? '\"';"; + + String found = execLexer("L.g4", grammar, "L", "\"hi\"\"mom\""); + assertEquals( + "[@0,0:3='\"hi\"',<1>,1:0]\n" + + "[@1,4:8='\"mom\"',<1>,1:4]\n" + + "[@2,9:8='',<-1>,1:9]\n", found); + assertNull(stderrDuringParse); + } + + @Test + public void testNonGreedyTermination2() throws Exception { + String grammar = + "lexer grammar L;\n" + + "STRING : '\"' ('\"\"' | .)+? '\"';"; + + String found = execLexer("L.g4", grammar, "L", "\"\"\"mom\""); + assertEquals( + "[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" + + "[@1,7:6='',<-1>,1:7]\n", found); + assertNull(stderrDuringParse); + } + + @Test + public void testGreedyOptional() throws Exception { + String grammar = + "lexer grammar L;\n" + + "CMT : '//' .*? '\\n' CMT?;\n" + + "WS : (' '|'\\t')+;"; + + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals( + "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", found); + assertNull(stderrDuringParse); + } + + @Test + public void testNonGreedyOptional() throws Exception { + String grammar = + "lexer grammar L;\n" + + "CMT : '//' .*? '\\n' CMT??;\n" + + "WS : (' '|'\\t')+;"; + + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals( + "[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", found); + assertNull(stderrDuringParse); + } + + @Test + public void testGreedyClosure() throws Exception { + String grammar = + "lexer grammar L;\n" + + "CMT : '//' .*? '\\n' CMT*;\n" + + "WS : (' '|'\\t')+;"; + + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals( + "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", found); + assertNull(stderrDuringParse); + } + + @Test + public void testNonGreedyClosure() throws Exception { + String grammar = + "lexer grammar L;\n" + + "CMT : '//' .*? '\\n' CMT*?;\n" + + "WS : (' '|'\\t')+;"; + + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals( + "[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", found); + assertNull(stderrDuringParse); + } + + @Test + public void testGreedyPositiveClosure() throws Exception { + String grammar = + "lexer grammar L;\n" + + "CMT : ('//' .*? '\\n')+;\n" + + "WS : (' '|'\\t')+;"; + + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals( + "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + + "[@1,14:13='',<-1>,3:14]\n", found); + assertNull(stderrDuringParse); + } + + @Test + public void testNonGreedyPositiveClosure() throws Exception { + String grammar = + "lexer grammar L;\n" + + "CMT : ('//' .*? '\\n')+?;\n" + + "WS : (' '|'\\t')+;"; + + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + assertEquals( + "[@0,0:6='//blah\\n',<1>,1:0]\n" + + "[@1,7:13='//blah\\n',<1>,2:0]\n" + + "[@2,14:13='',<-1>,3:7]\n", found); + assertNull(stderrDuringParse); + } + + @Test + public void testRecursiveLexerRuleRefWithWildcardStar1() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "CMT : '/*' (CMT | .)*? '*/' ;\n" + + "WS : (' '|'\\n')+ ;\n" + /*+ "ANY : .;"*/; + + String expecting = + "[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,9:9='\\n',<2>,1:9]\n" + + "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,35:35='\\n',<2>,3:16]\n" + + "[@4,36:35='',<-1>,4:17]\n"; + + // stuff on end of comment matches another rule + String found = execLexer("L.g4", grammar, "L", + "/* ick */\n" + + "/* /* */\n" + + "/* /*nested*/ */\n"); + assertEquals(expecting, found); + assertNull(stderrDuringParse); + } + + @Test + public void testRecursiveLexerRuleRefWithWildcardStar2() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "CMT : '/*' (CMT | .)*? '*/' ;\n" + + "WS : (' '|'\\n')+ ;\n" + /*+ "ANY : .;"*/; + + // stuff on end of comment doesn't match another rule + String expecting = + "[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,10:10='\\n',<2>,1:10]\n" + + "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,38:38='\\n',<2>,3:17]\n" + + "[@4,39:38='',<-1>,4:18]\n"; + String found = execLexer("L.g4", grammar, "L", + "/* ick */x\n" + + "/* /* */x\n" + + "/* /*nested*/ */x\n"); + assertEquals(expecting, found); + assertEquals( + "line 1:9 token recognition error at: 'x'\n" + + "line 3:16 token recognition error at: 'x'\n", stderrDuringParse); + } + + @Test + public void testRecursiveLexerRuleRefWithWildcardPlus1() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "CMT : '/*' (CMT | .)+? '*/' ;\n" + + "WS : (' '|'\\n')+ ;\n" + /*+ "ANY : .;"*/; + + String expecting = + "[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,9:9='\\n',<2>,1:9]\n" + + "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,35:35='\\n',<2>,3:16]\n" + + "[@4,36:35='',<-1>,4:17]\n"; + + // stuff on end of comment matches another rule + String found = execLexer("L.g4", grammar, "L", + "/* ick */\n" + + "/* /* */\n" + + "/* /*nested*/ */\n"); + assertEquals(expecting, found); + assertNull(stderrDuringParse); + } + + @Test + public void testRecursiveLexerRuleRefWithWildcardPlus2() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "CMT : '/*' (CMT | .)+? '*/' ;\n" + + "WS : (' '|'\\n')+ ;\n" + /*+ "ANY : .;"*/; + + // stuff on end of comment doesn't match another rule + String expecting = + "[@0,0:8='/* ick */',<1>,1:0]\n" + + "[@1,10:10='\\n',<2>,1:10]\n" + + "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + + "[@3,38:38='\\n',<2>,3:17]\n" + + "[@4,39:38='',<-1>,4:18]\n"; + String found = execLexer("L.g4", grammar, "L", + "/* ick */x\n" + + "/* /* */x\n" + + "/* /*nested*/ */x\n"); + assertEquals(expecting, found); + assertEquals( + "line 1:9 token recognition error at: 'x'\n" + + "line 3:16 token recognition error at: 'x'\n", stderrDuringParse); + } + + @Test public void testActionPlacement() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : ({System.out.println(\"stuff fail: \" + getText());} 'a' | {System.out.println(\"stuff0: \" + getText());} 'a' {System.out.println(\"stuff1: \" + getText());} 'b' {System.out.println(\"stuff2: \" + getText());}) {System.out.println(getText());} ;\n"+ + "WS : (' '|'\\n') -> skip ;\n" + + "J : .;\n"; + String found = execLexer("L.g4", grammar, "L", "ab"); + String expecting = + "stuff0: \n" + + "stuff1: a\n" + + "stuff2: ab\n" + + "ab\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n"; + assertEquals(expecting, found); + } + + @Test public void testGreedyConfigs() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : ('a' | 'ab') {System.out.println(getText());} ;\n"+ + "WS : (' '|'\\n') -> skip ;\n" + + "J : .;\n"; + String found = execLexer("L.g4", grammar, "L", "ab"); + String expecting = + "ab\n" + + "[@0,0:1='ab',<1>,1:0]\n" + + "[@1,2:1='',<-1>,1:2]\n"; + assertEquals(expecting, found); + } + + @Test public void testNonGreedyConfigs() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : .*? ('a' | 'ab') {System.out.println(getText());} ;\n"+ + "WS : (' '|'\\n') -> skip ;\n" + + "J : . {System.out.println(getText());};\n"; + String found = execLexer("L.g4", grammar, "L", "ab"); + String expecting = + "a\n" + + "b\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,1:1='b',<3>,1:1]\n" + + "[@2,2:1='',<-1>,1:2]\n"; + assertEquals(expecting, found); + } + + @Test public void testKeywordID() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "KEND : 'end' ;\n" + // has priority + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n')+ ;"; + String found = execLexer("L.g4", grammar, "L", "end eend ending a"); + String expecting = + "[@0,0:2='end',<1>,1:0]\n" + + "[@1,3:3=' ',<3>,1:3]\n" + + "[@2,4:7='eend',<2>,1:4]\n" + + "[@3,8:8=' ',<3>,1:8]\n" + + "[@4,9:14='ending',<2>,1:9]\n" + + "[@5,15:15=' ',<3>,1:15]\n" + + "[@6,16:16='a',<2>,1:16]\n" + + "[@7,17:16='',<-1>,1:17]\n"; + assertEquals(expecting, found); + } + + @Test public void testHexVsID() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "HexLiteral : '0' ('x'|'X') HexDigit+ ;\n"+ + "DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ;\n" + + "FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ;\n" + + "DOT : '.' ;\n" + + "ID : 'a'..'z'+ ;\n" + + "fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;\n" + + "WS : (' '|'\\n')+ ;"; + String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l"); + String expecting = + "[@0,0:0='x',<5>,1:0]\n" + + "[@1,1:1=' ',<6>,1:1]\n" + + "[@2,2:2='0',<2>,1:2]\n" + + "[@3,3:3=' ',<6>,1:3]\n" + + "[@4,4:4='1',<2>,1:4]\n" + + "[@5,5:5=' ',<6>,1:5]\n" + + "[@6,6:6='a',<5>,1:6]\n" + + "[@7,7:7='.',<4>,1:7]\n" + + "[@8,8:8='b',<5>,1:8]\n" + + "[@9,9:9=' ',<6>,1:9]\n" + + "[@10,10:10='a',<5>,1:10]\n" + + "[@11,11:11='.',<4>,1:11]\n" + + "[@12,12:12='l',<5>,1:12]\n" + + "[@13,13:12='',<-1>,1:13]\n"; + assertEquals(expecting, found); + } + + // must get DONE EOF + @Test public void testEOFByItself() throws Exception { + String grammar = + "lexer grammar L;\n" + + "DONE : EOF ;\n" + + "A : 'a';\n"; + String found = execLexer("L.g4", grammar, "L", ""); + String expecting = + "[@0,0:-1='',<1>,1:0]\n" + + "[@1,0:-1='',<-1>,1:0]\n"; + assertEquals(expecting, found); + } + + @Test public void testEOFSuffixInFirstRule() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "A : 'a' EOF ;\n"+ + "B : 'a';\n"+ + "C : 'c';\n"; + String found = execLexer("L.g4", grammar, "L", ""); + String expecting = + "[@0,0:-1='',<-1>,1:0]\n"; + assertEquals(expecting, found); + + found = execLexer("L.g4", grammar, "L", "a"); + expecting = + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSet() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "WS : [ \\n\\u000D] -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); + String expecting = + "I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,5:6='34',<1>,2:1]\n" + + "[@2,7:6='',<-1>,2:3]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetPlus() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "WS : [ \\n\\u000D]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); + String expecting = + "I\n" + + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,5:6='34',<1>,2:1]\n" + + "[@2,7:6='',<-1>,2:3]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetNot() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : ~[ab \\n] ~[ \\ncd]* {System.out.println(\"I\");} ;\n"+ + "WS : [ \\n\\u000D]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "xaf"); + String expecting = + "I\n" + + "[@0,0:2='xaf',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetInSet() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : (~[ab \\n]|'a') {System.out.println(\"I\");} ;\n"+ + "WS : [ \\n\\u000D]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "a x"); + String expecting = + "I\n" + + "I\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,2:2='x',<1>,1:2]\n" + + "[@2,3:2='',<-1>,1:3]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetRange() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : [0-9]+ {System.out.println(\"I\");} ;\n"+ + "ID : [a-zA-Z] [a-zA-Z0-9]* {System.out.println(\"ID\");} ;\n"+ + "WS : [ \\n\\u0009\\r]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34\r 34 a2 abc \n "); + String expecting = + "I\n" + + "I\n" + + "ID\n" + + "ID\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,4:5='34',<1>,1:4]\n" + + "[@2,7:8='a2',<2>,1:7]\n" + + "[@3,10:12='abc',<2>,1:10]\n" + + "[@4,18:17='',<-1>,2:3]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetWithMissingEndRange() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : [0-]+ {System.out.println(\"I\");} ;\n"+ + "WS : [ \\n\\u000D]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "00\r\n"); + String expecting = + "I\n" + + "[@0,0:1='00',<1>,1:0]\n" + + "[@1,4:3='',<-1>,2:0]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetWithMissingEscapeChar() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "I : [0-9]+ {System.out.println(\"I\");} ;\n"+ + "WS : [ \\u]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "34 "); + String expecting = + "I\n" + + "[@0,0:1='34',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetWithEscapedChar() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "DASHBRACK : [\\-\\]]+ {System.out.println(\"DASHBRACK\");} ;\n"+ + "WS : [ \\u]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "- ] "); + String expecting = + "DASHBRACK\n" + + "DASHBRACK\n" + + "[@0,0:0='-',<1>,1:0]\n" + + "[@1,2:2=']',<1>,1:2]\n" + + "[@2,4:3='',<-1>,1:4]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetWithReversedRange() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "A : [z-a9]+ {System.out.println(\"A\");} ;\n"+ + "WS : [ \\u]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "9"); + String expecting = + "A\n" + + "[@0,0:0='9',<1>,1:0]\n" + + "[@1,1:0='',<-1>,1:1]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetWithQuote() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "A : [\"a-z]+ {System.out.println(\"A\");} ;\n"+ + "WS : [ \\n\\t]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "b\"a"); + String expecting = + "A\n" + + "[@0,0:2='b\"a',<1>,1:0]\n" + + "[@1,3:2='',<-1>,1:3]\n"; + assertEquals(expecting, found); + } + + @Test public void testCharSetWithQuote2() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "A : [\"\\\\ab]+ {System.out.println(\"A\");} ;\n"+ + "WS : [ \\n\\t]+ -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "b\"\\a"); + String expecting = + "A\n" + + "[@0,0:3='b\"\\a',<1>,1:0]\n" + + "[@1,4:3='',<-1>,1:4]\n"; + assertEquals(expecting, found); + } + + @Test + public void testPositionAdjustingLexer() throws Exception { + String grammar = load("PositionAdjustingLexer.g4", null); + String input = + "tokens\n" + + "tokens {\n" + + "notLabel\n" + + "label1 =\n" + + "label2 +=\n" + + "notLabel\n"; + String found = execLexer("PositionAdjustingLexer.g4", grammar, "PositionAdjustingLexer", input); + + final int TOKENS = 4; + final int LABEL = 5; + final int IDENTIFIER = 6; + String expecting = + "[@0,0:5='tokens',<" + IDENTIFIER + ">,1:0]\n" + + "[@1,7:12='tokens',<" + TOKENS + ">,2:0]\n" + + "[@2,14:14='{',<3>,2:7]\n" + + "[@3,16:23='notLabel',<" + IDENTIFIER + ">,3:0]\n" + + "[@4,25:30='label1',<" + LABEL + ">,4:0]\n" + + "[@5,32:32='=',<1>,4:7]\n" + + "[@6,34:39='label2',<" + LABEL + ">,5:0]\n" + + "[@7,41:42='+=',<2>,5:7]\n" + + "[@8,44:51='notLabel',<" + IDENTIFIER + ">,6:0]\n" + + "[@9,53:52='',<-1>,7:0]\n"; + + assertEquals(expecting, found); + } + + /** + * This is a regression test for antlr/antlr4#76 "Serialized ATN strings + * should be split when longer than 2^16 bytes (class file limitation)" + * https://github.com/antlr/antlr4/issues/76 + */ + @Test + public void testLargeLexer() throws Exception { + StringBuilder grammar = new StringBuilder(); + grammar.append("lexer grammar L;\n"); + grammar.append("WS : [ \\t\\r\\n]+ -> skip;\n"); + for (int i = 0; i < 4000; i++) { + grammar.append("KW").append(i).append(" : 'KW' '").append(i).append("';\n"); + } + + String input = "KW400"; + String found = execLexer("L.g4", grammar.toString(), "L", input); + String expecting = + "[@0,0:4='KW400',<402>,1:0]\n" + + "[@1,5:4='',<-1>,1:5]\n"; + assertEquals(expecting, found); + } + + /** + * This is a regression test for antlr/antlr4#687 "Empty zero-length tokens + * cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match + * zero-length tokens" + * https://github.com/antlr/antlr4/issues/687 + * https://github.com/antlr/antlr4/issues/688 + */ + @Test public void testZeroLengthToken() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "\n" + + "BeginString\n" + + " : '\\'' -> more, pushMode(StringMode)\n" + + " ;\n" + + "\n" + + "mode StringMode;\n" + + "\n" + + " StringMode_X : 'x' -> more;\n" + + " StringMode_Done : -> more, mode(EndStringMode);\n" + + "\n" + + "mode EndStringMode; \n" + + "\n" + + " EndString : '\\'' -> popMode;\n"; + String found = execLexer("L.g4", grammar, "L", "'xxx'"); + String expecting = + "[@0,0:4=''xxx'',<1>,1:0]\n" + + "[@1,5:4='',<-1>,1:5]\n"; + assertEquals(expecting, found); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestListeners.java b/tool/test/org/antlr/v4/test/tool/TestListeners.java new file mode 100644 index 000000000..94dd6793a --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestListeners.java @@ -0,0 +1,226 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestListeners extends BaseTest { + @Test public void testBasic() throws Exception { + String grammar = + "grammar T;\n" + + "@header {import org.antlr.v4.runtime.tree.*;}\n"+ + "@parser::members {\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void visitTerminal(TerminalNode node) {\n" + + " System.out.println(node.getSymbol().getText());\n" + + " }\n" + + " }}\n" + + "s\n" + + "@after {" + + " System.out.println($r.ctx.toStringTree(this));" + + " ParseTreeWalker walker = new ParseTreeWalker();\n" + + " walker.walk(new LeafListener(), $r.ctx);" + + "}\n" + + " : r=a ;\n" + + "a : INT INT" + + " | ID" + + " ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); + String expecting = "(a 1 2)\n" + + "1\n" + + "2\n"; + assertEquals(expecting, result); + } + + @Test public void testTokenGetters() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void exitA(TParser.AContext ctx) {\n" + + " if (ctx.getChildCount()==2) System.out.printf(\"%s %s %s\",ctx.INT(0).getSymbol().getText(),ctx.INT(1).getSymbol().getText(),ctx.INT());\n" + + " else System.out.println(ctx.ID().getSymbol());\n" + + " }\n" + + " }}\n" + + "s\n" + + "@after {" + + " System.out.println($r.ctx.toStringTree(this));" + + " ParseTreeWalker walker = new ParseTreeWalker();\n" + + " walker.walk(new LeafListener(), $r.ctx);" + + "}\n" + + " : r=a ;\n" + + "a : INT INT" + + " | ID" + + " ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); + String expecting = + "(a 1 2)\n" + + "1 2 [1, 2]\n"; + assertEquals(expecting, result); + + result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); + expecting = "(a abc)\n" + + "[@0,0:2='abc',<4>,1:0]\n"; + assertEquals(expecting, result); + } + + @Test public void testRuleGetters() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void exitA(TParser.AContext ctx) {\n" + + " if (ctx.getChildCount()==2) {\n" + + " System.out.printf(\"%s %s %s\",ctx.b(0).start.getText(),\n" + + " ctx.b(1).start.getText(),ctx.b().get(0).start.getText());\n" + + " }\n" + + " else System.out.println(ctx.b(0).start.getText());\n" + + " }\n" + + " }}\n" + + "s\n" + + "@after {" + + " System.out.println($r.ctx.toStringTree(this));" + + " ParseTreeWalker walker = new ParseTreeWalker();\n" + + " walker.walk(new LeafListener(), $r.ctx);" + + "}\n" + + " : r=a ;\n" + + "a : b b" + // forces list + " | b" + // a list still + " ;\n" + + "b : ID | INT ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); + String expecting = "(a (b 1) (b 2))\n" + + "1 2 1\n"; + assertEquals(expecting, result); + + result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); + expecting = "(a (b abc))\n" + + "abc\n"; + assertEquals(expecting, result); + } + + @Test public void testLR() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void exitE(TParser.EContext ctx) {\n" + + " if (ctx.getChildCount()==3) {\n" + + " System.out.printf(\"%s %s %s\\n\",ctx.e(0).start.getText(),\n" + + " ctx.e(1).start.getText()," + + " ctx.e().get(0).start.getText());\n" + + " }\n" + + " else System.out.println(ctx.INT().getSymbol().getText());\n" + + " }\n" + + " }" + + "}\n" + + "s\n" + + "@after {" + + " System.out.println($r.ctx.toStringTree(this));" + + " ParseTreeWalker walker = new ParseTreeWalker();\n" + + " walker.walk(new LeafListener(), $r.ctx);" + + "}\n" + + " : r=e ;\n" + + "e : e op='*' e\n" + + " | e op='+' e\n" + + " | INT\n" + + " ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1+2*3", false); + String expecting = + "(e (e 1) + (e (e 2) * (e 3)))\n" + + "1\n" + + "2\n" + + "3\n" + + "2 3 2\n" + + "1 2 1\n"; + assertEquals(expecting, result); + } + + @Test public void testLRWithLabels() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {\n" + + " public static class LeafListener extends TBaseListener {\n" + + " public void exitCall(TParser.CallContext ctx) {\n" + + " System.out.printf(\"%s %s\",ctx.e().start.getText(),\n" + + " ctx.eList());\n" + + " }\n" + + " public void exitInt(TParser.IntContext ctx) {\n" + + " System.out.println(ctx.INT().getSymbol().getText());\n" + + " }\n" + + " }\n" + + "}\n" + + "s\n" + + "@after {" + + " System.out.println($r.ctx.toStringTree(this));" + + " ParseTreeWalker walker = new ParseTreeWalker();\n" + + " walker.walk(new LeafListener(), $r.ctx);" + + "}\n" + + " : r=e ;\n" + + "e : e '(' eList ')' # Call\n" + + " | INT # Int\n" + + " ; \n" + + "eList : e (',' e)* ;\n" + + "MULT: '*' ;\n" + + "ADD : '+' ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\t\\n]+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1(2,3)", false); + String expecting = + "(e (e 1) ( (eList (e 2) , (e 3)) ))\n" + + "1\n" + + "2\n" + + "3\n" + + "1 [13 6]\n"; + assertEquals(expecting, result); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestParseErrors.java b/tool/test/org/antlr/v4/test/tool/TestParseErrors.java new file mode 100644 index 000000000..523969b16 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestParseErrors.java @@ -0,0 +1,376 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.atn.ATNSerializer; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** test runtime parse errors */ +public class TestParseErrors extends BaseTest { + @Test public void testTokenMismatch() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' 'b' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aa", false); + String expecting = "line 1:1 mismatched input 'a' expecting 'b'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testSingleTokenDeletion() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' 'b' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false); + String expecting = "line 1:1 extraneous input 'a' expecting 'b'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testSingleTokenDeletionExpectingSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' ('b'|'c') ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false); + String expecting = "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testSingleTokenInsertion() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' 'b' 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); + String expecting = "line 1:1 missing 'b' at 'c'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testConjuringUpToken() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); + String expecting = "conjured=[@-1,-1:-1='',<2>,1:1]\n"; + assertEquals(expecting, result); + } + + @Test public void testSingleSetInsertion() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' ('b'|'c') 'd' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); + String expecting = "line 1:1 missing {'b', 'c'} at 'd'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testConjuringUpTokenFromSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); + String expecting = "conjured=[@-1,-1:-1='',<2>,1:1]\n"; + assertEquals(expecting, result); + } + + @Test public void testLL2() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' 'b'" + + " | 'a' 'c'" + + ";\n" + + "q : 'e' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ae", false); + String expecting = "line 1:1 no viable alternative at input 'ae'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testLL3() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' 'b'* 'c'" + + " | 'a' 'b' 'd'" + + " ;\n" + + "q : 'e' ;\n"; + System.out.println(grammar); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abe", false); + String expecting = "line 1:2 no viable alternative at input 'abe'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testLLStar() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a'+ 'b'" + + " | 'a'+ 'c'" + + ";\n" + + "q : 'e' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aaae", false); + String expecting = "line 1:3 no viable alternative at input 'aaae'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testSingleTokenDeletionBeforeLoop() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' 'b'*;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false); + String expecting = "line 1:1 extraneous input 'a' expecting {, 'b'}\n" + + "line 1:3 token recognition error at: 'c'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testMultiTokenDeletionBeforeLoop() throws Exception { + // can only delete 1 before loop + String grammar = + "grammar T;\n" + + "a : 'a' 'b'* 'c';"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false); + String expecting = + "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testSingleTokenDeletionDuringLoop() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' 'b'* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false); + String expecting = "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testMultiTokenDeletionDuringLoop() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' 'b'* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false); + String expecting = + "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n" + + "line 1:6 extraneous input 'a' expecting {'b', 'c'}\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + // ------ + + @Test public void testSingleTokenDeletionBeforeLoop2() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' ('b'|'z'{;})*;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false); + String expecting = "line 1:1 extraneous input 'a' expecting {, 'b', 'z'}\n" + + "line 1:3 token recognition error at: 'c'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testMultiTokenDeletionBeforeLoop2() throws Exception { + // can only delete 1 before loop + String grammar = + "grammar T;\n" + + "a : 'a' ('b'|'z'{;})* 'c';"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false); + String expecting = + "line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testSingleTokenDeletionDuringLoop2() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' ('b'|'z'{;})* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false); + String expecting = "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testMultiTokenDeletionDuringLoop2() throws Exception { + String grammar = + "grammar T;\n" + + "a : 'a' ('b'|'z'{;})* 'c' ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false); + String expecting = + "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n" + + "line 1:6 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testLL1ErrorInfo() throws Exception { + String grammar = + "grammar T;\n" + + "start : animal (AND acClass)? service EOF;\n" + + "animal : (DOG | CAT );\n" + + "service : (HARDWARE | SOFTWARE) ;\n" + + "AND : 'and';\n" + + "DOG : 'dog';\n" + + "CAT : 'cat';\n" + + "HARDWARE: 'hardware';\n" + + "SOFTWARE: 'software';\n" + + "WS : ' ' -> skip ;" + + "acClass\n" + + "@init\n" + + "{ System.out.println(getExpectedTokens().toString(tokenNames)); }\n" + + " : ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "dog and software", false); + String expecting = "{'hardware', 'software'}\n"; + assertEquals(expecting, result); + } + + /** + * This is a regression test for #6 "NullPointerException in getMissingSymbol". + * https://github.com/antlr/antlr4/issues/6 + */ + @Test + public void testInvalidEmptyInput() throws Exception { + String grammar = + "grammar T;\n" + + "start : ID+;\n" + + "ID : [a-z]+;\n" + + "\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "", true); + String expecting = ""; + assertEquals(expecting, result); + assertEquals("line 1:0 missing ID at ''\n", this.stderrDuringParse); + } + + /** + * Regression test for "Getter for context is not a list when it should be". + * https://github.com/antlr/antlr4/issues/19 + */ + @Test + public void testContextListGetters() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members{\n" + + " void foo() {\n" + + " SContext s = null;\n" + + " List a = s.a();\n" + + " List b = s.b();\n" + + " }\n" + + "}\n" + + "s : (a | b)+;\n" + + "a : 'a' {System.out.print('a');};\n" + + "b : 'b' {System.out.print('b');};\n" + + ""; + String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "abab", true); + String expecting = "abab\n"; + assertEquals(expecting, result); + assertNull(this.stderrDuringParse); + } + + /** + * This is a regression test for #26 "an exception upon simple rule with double recursion in an alternative". + * https://github.com/antlr/antlr4/issues/26 + */ + @Test + public void testDuplicatedLeftRecursiveCall() throws Exception { + String grammar = + "grammar T;\n" + + "start : expr EOF;\n" + + "expr : 'x'\n" + + " | expr expr\n" + + " ;\n" + + "\n"; + + String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x", true); + assertEquals("", result); + assertNull(this.stderrDuringParse); + + result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xx", true); + assertEquals("", result); + assertNull(this.stderrDuringParse); + + result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xxx", true); + assertEquals("", result); + assertNull(this.stderrDuringParse); + + result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xxxx", true); + assertEquals("", result); + assertNull(this.stderrDuringParse); + } + + /** + * This is a regression test for #45 "NullPointerException in ATNConfig.hashCode". + * https://github.com/antlr/antlr4/issues/45 + *

+ * The original cause of this issue was an error in the tool's ATN state optimization, + * which is now detected early in {@link ATNSerializer} by ensuring that all + * serialized transitions point to states which were not removed. + */ + @Test + public void testInvalidATNStateRemoval() throws Exception { + String grammar = + "grammar T;\n" + + "start : ID ':' expr;\n" + + "expr : primary expr? {} | expr '->' ID;\n" + + "primary : ID;\n" + + "ID : [a-z]+;\n" + + "\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x:x", true); + String expecting = ""; + assertEquals(expecting, result); + assertNull(this.stderrDuringParse); + } + + @Test public void testNoViableAltAvoidance() throws Exception { + // "a." matches 'a' to rule e but then realizes '.' won't match. + // previously would cause noviablealt. now prediction pretends to + // have "a' predict 2nd alt of e. Will get syntax error later so + // let it get farther. + String grammar = + "grammar T;\n" + + "s : e '!' ;\n" + + "e : 'a' 'b'\n" + + " | 'a'\n" + + " ;\n" + + "DOT : '.' ;\n" + + "WS : [ \\t\\r\\n]+ -> skip;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a.", false); + String expecting = + "line 1:1 mismatched input '.' expecting '!'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java b/tool/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java new file mode 100644 index 000000000..9a664c66b --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java @@ -0,0 +1,464 @@ +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.InputMismatchException; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.pattern.ParseTreeMatch; +import org.antlr.v4.runtime.tree.pattern.ParseTreePattern; +import org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher; +import org.junit.Test; + +import java.lang.reflect.Constructor; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class TestParseTreeMatcher extends BaseTest { + @Test public void testChunking() throws Exception { + ParseTreePatternMatcher m = new ParseTreePatternMatcher(null, null); + assertEquals("[ID, ' = ', expr, ' ;']", m.split(" = ;").toString()); + assertEquals("[' ', ID, ' = ', expr]", m.split(" = ").toString()); + assertEquals("[ID, ' = ', expr]", m.split(" = ").toString()); + assertEquals("[expr]", m.split("").toString()); + assertEquals("[' foo']", m.split("\\ foo").toString()); + assertEquals("['foo bar ', tag]", m.split("foo \\ bar ").toString()); + } + + @Test public void testDelimiters() throws Exception { + ParseTreePatternMatcher m = new ParseTreePatternMatcher(null, null); + m.setDelimiters("<<", ">>", "$"); + String result = m.split("<> = <> ;$<< ick $>>").toString(); + assertEquals("[ID, ' = ', expr, ' ;<< ick >>']", result); + } + + @Test public void testInvertedTags() throws Exception { + ParseTreePatternMatcher m= new ParseTreePatternMatcher(null, null); + String result = null; + try { + m.split(">expr<"); + } + catch (IllegalArgumentException iae) { + result = iae.getMessage(); + } + String expected = "tag delimiters out of order in pattern: >expr<"; + assertEquals(expected, result); + } + + @Test public void testUnclosedTag() throws Exception { + ParseTreePatternMatcher m = new ParseTreePatternMatcher(null, null); + String result = null; + try { + m.split(" >"); + } + catch (IllegalArgumentException iae) { + result = iae.getMessage(); + } + String expected = "missing start tag in pattern: >"; + assertEquals(expected, result); + } + + @Test public void testTokenizingPattern() throws Exception { + String grammar = + "grammar X1;\n" + + "s : ID '=' expr ';' ;\n" + + "expr : ID | INT ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + boolean ok = + rawGenerateAndBuildRecognizer("X1.g4", grammar, "X1Parser", "X1Lexer", false); + assertTrue(ok); + + ParseTreePatternMatcher m = getPatternMatcher("X1"); + + List tokens = m.tokenize(" = ;"); + String results = tokens.toString(); + String expected = "[ID:3, [@-1,1:1='=',<1>,1:1], expr:7, [@-1,1:1=';',<2>,1:1]]"; + assertEquals(expected, results); + } + + @Test + public void testCompilingPattern() throws Exception { + String grammar = + "grammar X2;\n" + + "s : ID '=' expr ';' ;\n" + + "expr : ID | INT ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + boolean ok = + rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); + assertTrue(ok); + + ParseTreePatternMatcher m = getPatternMatcher("X2"); + + ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); + String results = t.getPatternTree().toStringTree(m.getParser()); + String expected = "(s = (expr ) ;)"; + assertEquals(expected, results); + } + + @Test + public void testCompilingPatternConsumesAllTokens() throws Exception { + String grammar = + "grammar X2;\n" + + "s : ID '=' expr ';' ;\n" + + "expr : ID | INT ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + boolean ok = + rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); + assertTrue(ok); + + ParseTreePatternMatcher m = getPatternMatcher("X2"); + + boolean failed = false; + try { + m.compile(" = ; extra", m.getParser().getRuleIndex("s")); + } + catch (ParseTreePatternMatcher.StartRuleDoesNotConsumeFullPattern e) { + failed = true; + } + assertTrue(failed); + } + + @Test + public void testPatternMatchesStartRule() throws Exception { + String grammar = + "grammar X2;\n" + + "s : ID '=' expr ';' ;\n" + + "expr : ID | INT ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + boolean ok = + rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); + assertTrue(ok); + + ParseTreePatternMatcher m = getPatternMatcher("X2"); + + boolean failed = false; + try { + m.compile(" ;", m.getParser().getRuleIndex("s")); + } + catch (InputMismatchException e) { + failed = true; + } + assertTrue(failed); + } + + @Test + public void testPatternMatchesStartRule2() throws Exception { + String grammar = + "grammar X2;\n" + + "s : ID '=' expr ';' | expr ';' ;\n" + + "expr : ID | INT ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + boolean ok = + rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); + assertTrue(ok); + + ParseTreePatternMatcher m = getPatternMatcher("X2"); + + boolean failed = false; + try { + m.compile(" ;", m.getParser().getRuleIndex("s")); + } + catch (NoViableAltException e) { + failed = true; + } + assertTrue(failed); + } + + @Test + public void testHiddenTokensNotSeenByTreePatternParser() throws Exception { + String grammar = + "grammar X2;\n" + + "s : ID '=' expr ';' ;\n" + + "expr : ID | INT ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> channel(HIDDEN) ;\n"; + boolean ok = + rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); + assertTrue(ok); + + ParseTreePatternMatcher m = getPatternMatcher("X2"); + + ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); + String results = t.getPatternTree().toStringTree(m.getParser()); + String expected = "(s = (expr ) ;)"; + assertEquals(expected, results); + } + + @Test + public void testCompilingMultipleTokens() throws Exception { + String grammar = + "grammar X2;\n" + + "s : ID '=' ID ';' ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + boolean ok = + rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); + assertTrue(ok); + + ParseTreePatternMatcher m = getPatternMatcher("X2"); + + ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); + String results = t.getPatternTree().toStringTree(m.getParser()); + String expected = "(s = ;)"; + assertEquals(expected, results); + } + + @Test public void testIDNodeMatches() throws Exception { + String grammar = + "grammar X3;\n" + + "s : ID ';' ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String input = "x ;"; + String pattern = ";"; + checkPatternMatch(grammar, "s", input, pattern, "X3"); + } + + @Test public void testIDNodeWithLabelMatches() throws Exception { + String grammar = + "grammar X8;\n" + + "s : ID ';' ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String input = "x ;"; + String pattern = ";"; + ParseTreeMatch m = checkPatternMatch(grammar, "s", input, pattern, "X8"); + assertEquals("{ID=[x], id=[x]}", m.getLabels().toString()); + assertNotNull(m.get("id")); + assertNotNull(m.get("ID")); + assertEquals("x", m.get("id").getText()); + assertEquals("x", m.get("ID").getText()); + assertEquals("[x]", m.getAll("id").toString()); + assertEquals("[x]", m.getAll("ID").toString()); + + assertNull(m.get("undefined")); + assertEquals("[]", m.getAll("undefined").toString()); + } + + @Test public void testLabelGetsLastIDNode() throws Exception { + String grammar = + "grammar X9;\n" + + "s : ID ID ';' ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String input = "x y;"; + String pattern = " ;"; + ParseTreeMatch m = checkPatternMatch(grammar, "s", input, pattern, "X9"); + assertEquals("{ID=[x, y], id=[x, y]}", m.getLabels().toString()); + assertNotNull(m.get("id")); + assertNotNull(m.get("ID")); + assertEquals("y", m.get("id").getText()); + assertEquals("y", m.get("ID").getText()); + assertEquals("[x, y]", m.getAll("id").toString()); + assertEquals("[x, y]", m.getAll("ID").toString()); + + assertNull(m.get("undefined")); + assertEquals("[]", m.getAll("undefined").toString()); + } + + @Test public void testIDNodeWithMultipleLabelMatches() throws Exception { + String grammar = + "grammar X7;\n" + + "s : ID ID ID ';' ;\n" + + "ID : [a-z]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String input = "x y z;"; + String pattern = " ;"; + ParseTreeMatch m = checkPatternMatch(grammar, "s", input, pattern, "X7"); + assertEquals("{ID=[x, y, z], a=[x, z], b=[y]}", m.getLabels().toString()); + assertNotNull(m.get("a")); // get first + assertNotNull(m.get("b")); + assertNotNull(m.get("ID")); + assertEquals("z", m.get("a").getText()); + assertEquals("y", m.get("b").getText()); + assertEquals("z", m.get("ID").getText()); // get last + assertEquals("[x, z]", m.getAll("a").toString()); + assertEquals("[y]", m.getAll("b").toString()); + assertEquals("[x, y, z]", m.getAll("ID").toString()); // ordered + + assertEquals("xyz;", m.getTree().getText()); // whitespace stripped by lexer + + assertNull(m.get("undefined")); + assertEquals("[]", m.getAll("undefined").toString()); + } + + @Test public void testTokenAndRuleMatch() throws Exception { + String grammar = + "grammar X4;\n" + + "s : ID '=' expr ';' ;\n" + + "expr : ID | INT ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String input = "x = 99;"; + String pattern = " = ;"; + checkPatternMatch(grammar, "s", input, pattern, "X4"); + } + + @Test public void testTokenTextMatch() throws Exception { + String grammar = + "grammar X4;\n" + + "s : ID '=' expr ';' ;\n" + + "expr : ID | INT ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String input = "x = 0;"; + String pattern = " = 1;"; + boolean invertMatch = true; // 0!=1 + checkPatternMatch(grammar, "s", input, pattern, "X4", invertMatch); + + input = "x = 0;"; + pattern = " = 0;"; + invertMatch = false; + checkPatternMatch(grammar, "s", input, pattern, "X4", invertMatch); + + input = "x = 0;"; + pattern = "x = 0;"; + invertMatch = false; + checkPatternMatch(grammar, "s", input, pattern, "X4", invertMatch); + + input = "x = 0;"; + pattern = "y = 0;"; + invertMatch = true; + checkPatternMatch(grammar, "s", input, pattern, "X4", invertMatch); + } + + @Test public void testAssign() throws Exception { + String grammar = + "grammar X5;\n" + + "s : expr ';'\n" + + //" | 'return' expr ';'\n" + + " ;\n" + + "expr: expr '.' ID\n" + + " | expr '*' expr\n" + + " | expr '=' expr\n" + + " | ID\n" + + " | INT\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String input = "x = 99;"; + String pattern = " = ;"; + checkPatternMatch(grammar, "s", input, pattern, "X5"); + } + + @Test public void testLRecursiveExpr() throws Exception { + String grammar = + "grammar X6;\n" + + "s : expr ';'\n" + + " ;\n" + + "expr: expr '.' ID\n" + + " | expr '*' expr\n" + + " | expr '=' expr\n" + + " | ID\n" + + " | INT\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "WS : [ \\r\\n\\t]+ -> skip ;\n"; + + String input = "3*4*5"; + String pattern = " * * "; + checkPatternMatch(grammar, "expr", input, pattern, "X6"); + } + + public ParseTreeMatch checkPatternMatch(String grammar, String startRule, + String input, String pattern, + String grammarName) + throws Exception + { + return checkPatternMatch(grammar, startRule, input, pattern, grammarName, false); + } + + public ParseTreeMatch checkPatternMatch(String grammar, String startRule, + String input, String pattern, + String grammarName, boolean invertMatch) + throws Exception + { + String grammarFileName = grammarName+".g4"; + String parserName = grammarName+"Parser"; + String lexerName = grammarName+"Lexer"; + boolean ok = + rawGenerateAndBuildRecognizer(grammarFileName, grammar, parserName, lexerName, false); + assertTrue(ok); + + ParseTree result = execParser(startRule, input, parserName, lexerName); + + ParseTreePattern p = getPattern(grammarName, pattern, startRule); + ParseTreeMatch match = p.match(result); + boolean matched = match.succeeded(); + if ( invertMatch ) assertFalse(matched); + else assertTrue(matched); + return match; + } + + public ParseTreePattern getPattern(String grammarName, String pattern, String ruleName) + throws Exception + { + Class lexerClass = loadLexerClassFromTempDir(grammarName + "Lexer"); + Constructor ctor = lexerClass.getConstructor(CharStream.class); + Lexer lexer = ctor.newInstance((CharStream) null); + + Class parserClass = loadParserClassFromTempDir(grammarName + "Parser"); + Constructor pctor = parserClass.getConstructor(TokenStream.class); + Parser parser = pctor.newInstance(new CommonTokenStream(lexer)); + + return parser.compileParseTreePattern(pattern, parser.getRuleIndex(ruleName)); + } + + public ParseTreePatternMatcher getPatternMatcher(String grammarName) + throws Exception + { + Class lexerClass = loadLexerClassFromTempDir(grammarName + "Lexer"); + Constructor ctor = lexerClass.getConstructor(CharStream.class); + Lexer lexer = ctor.newInstance((CharStream) null); + + Class parserClass = loadParserClassFromTempDir(grammarName + "Parser"); + Constructor pctor = parserClass.getConstructor(TokenStream.class); + Parser parser = pctor.newInstance(new CommonTokenStream(lexer)); + + return new ParseTreePatternMatcher(lexer, parser); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestParseTrees.java b/tool/test/org/antlr/v4/test/tool/TestParseTrees.java new file mode 100644 index 000000000..d9fd5b9ac --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestParseTrees.java @@ -0,0 +1,154 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestParseTrees extends BaseTest { + @Test public void testTokenAndRuleContextString() throws Exception { + String grammar = + "grammar T;\n" + + "s\n" + + "@init {setBuildParseTree(true);}\n" + + "@after {System.out.println($r.ctx.toStringTree(this));}\n" + + " :r=a ;\n" + + "a : 'x' {System.out.println(getRuleInvocationStack());} ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false); + String expecting = "[a, s]\n(a x)\n"; + assertEquals(expecting, result); + } + + @Test public void testToken2() throws Exception { + String grammar = + "grammar T;\n" + + "s\n" + + "@init {setBuildParseTree(true);}\n" + + "@after {System.out.println($r.ctx.toStringTree(this));}\n" + + " :r=a ;\n" + + "a : 'x' 'y'\n" + + " ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xy", false); + String expecting = "(a x y)\n"; + assertEquals(expecting, result); + } + + @Test public void test2Alts() throws Exception { + String grammar = + "grammar T;\n" + + "s\n" + + "@init {setBuildParseTree(true);}\n" + + "@after {System.out.println($r.ctx.toStringTree(this));}\n" + + " :r=a ;\n" + + "a : 'x' | 'y'\n" + + " ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y", false); + String expecting = "(a y)\n"; + assertEquals(expecting, result); + } + + @Test public void test2AltLoop() throws Exception { + String grammar = + "grammar T;\n" + + "s\n" + + "@init {setBuildParseTree(true);}\n" + + "@after {System.out.println($r.ctx.toStringTree(this));}\n" + + " :r=a ;\n" + + "a : ('x' | 'y')* 'z'\n" + + " ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xyyxyxz", false); + String expecting = "(a x y y x y x z)\n"; + assertEquals(expecting, result); + } + + @Test public void testRuleRef() throws Exception { + String grammar = + "grammar T;\n" + + "s\n" + + "@init {setBuildParseTree(true);}\n" + + "@after {System.out.println($r.ctx.toStringTree(this));}\n" + + " : r=a ;\n" + + "a : b 'x'\n" + + " ;\n" + + "b : 'y' ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "yx", false); + String expecting = "(a (b y) x)\n"; + assertEquals(expecting, result); + } + + // ERRORS + + @Test public void testExtraToken() throws Exception { + String grammar = + "grammar T;\n" + + "s\n" + + "@init {setBuildParseTree(true);}\n" + + "@after {System.out.println($r.ctx.toStringTree(this));}\n" + + " : r=a ;\n" + + "a : 'x' 'y'\n" + + " ;\n" + + "Z : 'z'; \n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzy", false); + String expecting = "(a x z y)\n"; // ERRORs not shown. z is colored red in tree view + assertEquals(expecting, result); + } + + @Test public void testNoViableAlt() throws Exception { + String grammar = + "grammar T;\n" + + "s\n" + + "@init {setBuildParseTree(true);}\n" + + "@after {System.out.println($r.ctx.toStringTree(this));}\n" + + " : r=a ;\n" + + "a : 'x' | 'y'\n" + + " ;\n" + + "Z : 'z'; \n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "z", false); + String expecting = "(a z)\n"; + assertEquals(expecting, result); + } + + @Test public void testSync() throws Exception { + String grammar = + "grammar T;\n" + + "s\n" + + "@init {setBuildParseTree(true);}\n" + + "@after {System.out.println($r.ctx.toStringTree(this));}\n" + + " : r=a ;\n" + + "a : 'x' 'y'* '!'\n" + + " ;\n" + + "Z : 'z'; \n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzyy!", false); + String expecting = "(a x z y y !)\n"; + assertEquals(expecting, result); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestParserExec.java b/tool/test/org/antlr/v4/test/tool/TestParserExec.java new file mode 100644 index 000000000..a935486d8 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestParserExec.java @@ -0,0 +1,597 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.junit.Ignore; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** Test parser execution. + * + * For the non-greedy stuff, the rule is that .* or any other non-greedy loop + * (any + or * loop that has an alternative with '.' in it is automatically + * non-greedy) never sees past the end of the rule containing that loop. + * There is no automatic way to detect when the exit branch of a non-greedy + * loop has seen enough input to determine how much the loop should consume + * yet still allow matching the entire input. Of course, this is extremely + * inefficient, particularly for things like + * + * block : '{' (block|.)* '}' ; + * + * that need only see one symbol to know when it hits a '}'. So, I + * came up with a practical solution. During prediction, the ATN + * simulator never fall off the end of a rule to compute the global + * FOLLOW. Instead, we terminate the loop, choosing the exit branch. + * Otherwise, we predict to reenter the loop. For example, input + * "{ foo }" will allow the loop to match foo, but that's it. During + * prediction, the ATN simulator will see that '}' reaches the end of a + * rule that contains a non-greedy loop and stop prediction. It will choose + * the exit branch of the inner loop. So, the way in which you construct + * the rule containing a non-greedy loop dictates how far it will scan ahead. + * Include everything after the non-greedy loop that you know it must scan + * in order to properly make a prediction decision. these beasts are tricky, + * so be careful. don't liberally sprinkle them around your code. + * + * To simulate filter mode, use ( .* (pattern1|pattern2|...) )* + * + * Nongreedy loops match as much input as possible while still allowing + * the remaining input to match. + */ +public class TestParserExec extends BaseTest { + @Test public void testLabels() throws Exception { + String grammar = + "grammar T;\n" + + "a : b1=b b2+=b* b3+=';' ;\n" + + "b : id=ID val+=INT*;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "abc 34;", false); + assertEquals("", found); + assertEquals(null, stderrDuringParse); + } + + /** + * This is a regression test for #270 "Fix operator += applied to a set of + * tokens". + * https://github.com/antlr/antlr4/issues/270 + */ + @Test public void testListLabelOnSet() { + String grammar = + "grammar T;\n" + + "a : b b* ';' ;\n" + + "b : ID val+=(INT | FLOAT)*;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "FLOAT : [0-9]+ '.' [0-9]+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "abc 34;", false); + assertEquals("", found); + assertEquals(null, stderrDuringParse); + } + + @Test public void testBasic() throws Exception { + String grammar = + "grammar T;\n" + + "a : ID INT {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "abc 34", false); + assertEquals("abc34\n", found); + } + + @Test public void testAorB() throws Exception { + String grammar = + "grammar T;\n" + + "a : ID {System.out.println(\" alt 1\");}" + + " | INT {System.out.println(\"alt 2\");}" + + ";\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "34", false); + assertEquals("alt 2\n", found); + } + + @Test public void testAPlus() throws Exception { + String grammar = + "grammar T;\n" + + "a : ID+ {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "a b c", false); + assertEquals("abc\n", found); + } + + // force complex decision + @Test public void testAorAPlus() throws Exception { + String grammar = + "grammar T;\n" + + "a : (ID|ID)+ {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "a b c", false); + assertEquals("abc\n", found); + } + + private static final String ifIfElseGrammarFormat = + "grammar T;\n" + + "start : statement+ ;\n" + + "statement : 'x' | ifStatement;\n" + + "ifStatement : 'if' 'y' statement %s {System.out.println($text);};\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> channel(HIDDEN);\n"; + + @Test public void testIfIfElseGreedyBinding1() throws Exception { + final String input = "if y if y x else x"; + final String expectedInnerBound = "if y x else x\nif y if y x else x\n"; + + String grammar = String.format(ifIfElseGrammarFormat, "('else' statement)?"); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); + assertEquals(expectedInnerBound, found); + + } + + @Test public void testIfIfElseGreedyBinding2() throws Exception { + final String input = "if y if y x else x"; + final String expectedInnerBound = "if y x else x\nif y if y x else x\n"; + + String grammar = String.format(ifIfElseGrammarFormat, "('else' statement|)"); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); + assertEquals(expectedInnerBound, found); + } + + @Test public void testIfIfElseNonGreedyBinding() throws Exception { + final String input = "if y if y x else x"; + final String expectedOuterBound = "if y x\nif y if y x else x\n"; + + String grammar = String.format(ifIfElseGrammarFormat, "('else' statement)??"); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); + assertEquals(expectedOuterBound, found); + + grammar = String.format(ifIfElseGrammarFormat, "(|'else' statement)"); + found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); + assertEquals(expectedOuterBound, found); + } + + @Test public void testAStar() throws Exception { + String grammar = + "grammar T;\n" + + "a : ID* {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "", false); + assertEquals("\n", found); + found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "a b c", false); + assertEquals("abc\n", found); + } + + @Test public void testLL1OptionalBlock() throws Exception { + String grammar = + "grammar T;\n" + + "a : (ID|{}INT)? {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "", false); + assertEquals("\n", found); + found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "a", false); + assertEquals("a\n", found); + } + + // force complex decision + @Test public void testAorAStar() throws Exception { + String grammar = + "grammar T;\n" + + "a : (ID|ID)* {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "", false); + assertEquals("\n", found); + found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "a b c", false); + assertEquals("abc\n", found); + } + + @Test public void testAorBPlus() throws Exception { + String grammar = + "grammar T;\n" + + "a : (ID|INT{;})+ {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "a 34 c", false); + assertEquals("a34c\n", found); + } + + @Test public void testAorBStar() throws Exception { + String grammar = + "grammar T;\n" + + "a : (ID|INT{;})* {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "", false); + assertEquals("\n", found); + found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "a 34 c", false); + assertEquals("a34c\n", found); + } + + + /** + * This test is meant to detect regressions of bug antlr/antlr4#41. + * https://github.com/antlr/antlr4/issues/41 + */ + @Test + public void testOptional1() throws Exception { + String grammar = + "grammar T;\n" + + "stat : ifstat | 'x';\n" + + "ifstat : 'if' stat ('else' stat)?;\n" + + "WS : [ \\n\\t]+ -> skip ;" + ; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "x", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptional2() throws Exception { + String grammar = + "grammar T;\n" + + "stat : ifstat | 'x';\n" + + "ifstat : 'if' stat ('else' stat)?;\n" + + "WS : [ \\n\\t]+ -> skip ;" + ; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if x else x", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptional3() throws Exception { + String grammar = + "grammar T;\n" + + "stat : ifstat | 'x';\n" + + "ifstat : 'if' stat ('else' stat)?;\n" + + "WS : [ \\n\\t]+ -> skip ;" + ; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if x", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testOptional4() throws Exception { + String grammar = + "grammar T;\n" + + "stat : ifstat | 'x';\n" + + "ifstat : 'if' stat ('else' stat)?;\n" + + "WS : [ \\n\\t]+ -> skip ;" + ; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if if x else x", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + /** + * This test is meant to test the expected solution to antlr/antlr4#42. + * https://github.com/antlr/antlr4/issues/42 + */ + @Test + public void testPredicatedIfIfElse() throws Exception { + String grammar = + "grammar T;\n" + + "s : stmt EOF ;\n" + + "stmt : ifStmt | ID;\n" + + "ifStmt : 'if' ID stmt ('else' stmt | {_input.LA(1) != ELSE}?);\n" + + "ELSE : 'else';\n" + + "ID : [a-zA-Z]+;\n" + + "WS : [ \\n\\t]+ -> skip;\n" + ; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "if x if x a else b", true); + String expecting = ""; + assertEquals(expecting, found); + assertNull(this.stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#118. + * https://github.com/antlr/antlr4/issues/118 + */ + @Ignore("Performance impact of passing this test may not be worthwhile") + @Test public void testStartRuleWithoutEOF() { + String grammar = + "grammar T;\n"+ + "s @after {dumpDFA();}\n" + + " : ID | ID INT ID ;\n" + + "ID : 'a'..'z'+ ;\n"+ + "INT : '0'..'9'+ ;\n"+ + "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; + String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "abc 34", true); + String expecting = + "Decision 0:\n" + + "s0-ID->s1\n" + + "s1-INT->s2\n" + + "s2-EOF->:s3=>1\n"; // Must point at accept state + assertEquals(expecting, result); + assertNull(this.stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#195 "label 'label' type + * mismatch with previous definition: TOKEN_LABEL!=RULE_LABEL" + * https://github.com/antlr/antlr4/issues/195 + */ + @Test public void testLabelAliasingAcrossLabeledAlternatives() throws Exception { + String grammar = + "grammar T;\n" + + "start : a* EOF;\n" + + "a\n" + + " : label=subrule {System.out.println($label.text);} #One\n" + + " | label='y' {System.out.println($label.text);} #Two\n" + + " ;\n" + + "subrule : 'x';\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", + "xy", false); + assertEquals("x\ny\n", found); + } + + /** + * This is a regression test for antlr/antlr4#334 "BailErrorStrategy: bails + * out on proper input". + * https://github.com/antlr/antlr4/issues/334 + */ + @Test public void testPredictionIssue334() { + String grammar = + "grammar T;\n" + + "\n" + + "file @init{setErrorHandler(new BailErrorStrategy());} \n" + + "@after {System.out.println($ctx.toStringTree(this));}\n" + + " : item (SEMICOLON item)* SEMICOLON? EOF ;\n" + + "item : A B?;\n" + + "\n" + + "\n" + + "\n" + + "SEMICOLON: ';';\n" + + "\n" + + "A : 'a'|'A';\n" + + "B : 'b'|'B';\n" + + "\n" + + "WS : [ \\r\\t\\n]+ -> skip;\n"; + + String input = "a"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "file", input, false); + assertEquals("(file (item a) )\n", found); + assertNull(stderrDuringParse); + } + + /** + * This is a regressino test for antlr/antlr4#299 "Repeating subtree not + * accessible in visitor". + * https://github.com/antlr/antlr4/issues/299 + */ + @Test public void testListLabelForClosureContext() throws Exception { + String grammar = + "grammar T;\n" + + "ifStatement\n" + + "@after { List items = $ctx.elseIfStatement(); }\n" + + " : 'if' expression\n" + + " ( ( 'then'\n" + + " executableStatement*\n" + + " elseIfStatement* // <--- problem is here\n" + + " elseStatement?\n" + + " 'end' 'if'\n" + + " ) | executableStatement )\n" + + " ;\n" + + "\n" + + "elseIfStatement\n" + + " : 'else' 'if' expression 'then' executableStatement*\n" + + " ;\n" + + "expression : 'a' ;\n" + + "executableStatement : 'a' ;\n" + + "elseStatement : 'a' ;\n"; + String input = "a"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "expression", input, false); + assertEquals("", found); + assertNull(stderrDuringParse); + } + + /** + * This test ensures that {@link ParserATNSimulator} produces a correct + * result when the grammar contains multiple explicit references to + * {@code EOF} inside of parser rules. + */ + @Test + public void testMultipleEOFHandling() throws Exception { + String grammar = + "grammar T;\n" + + "prog : ('x' | 'x' 'y') EOF EOF;\n"; + String input = "x"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", input, false); + assertEquals("", found); + assertNull(stderrDuringParse); + } + + /** + * This test ensures that {@link ParserATNSimulator} does not produce a + * {@link StackOverflowError} when it encounters an {@code EOF} transition + * inside a closure. + */ + @Test + public void testEOFInClosure() throws Exception { + String grammar = + "grammar T;\n" + + "prog : stat EOF;\n" + + "stat : 'x' ('y' | EOF)*?;\n"; + String input = "x"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", input, false); + assertEquals("", found); + assertNull(stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#561 "Issue with parser + * generation in 4.2.2" + * https://github.com/antlr/antlr4/issues/561 + */ + @Test public void testReferenceToATN() throws Exception { + String grammar = + "grammar T;\n" + + "a : (ID|ATN)* ATN? {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "ATN : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "", false); + assertEquals("\n", found); + found = execParser("T.g4", grammar, "TParser", "TLexer", "a", + "a 34 c", false); + assertEquals("a34c\n", found); + } + + /** + * This is a regression test for antlr/antlr4#588 "ClassCastException during + * semantic predicate handling". + * https://github.com/antlr/antlr4/issues/588 + */ + @Test public void testFailedPredicateExceptionState() throws Exception { + String grammar = load("Psl.g4", "UTF-8"); + String found = execParser("Psl.g4", grammar, "PslParser", "PslLexer", "floating_constant", " . 234", false); + assertEquals("", found); + assertEquals("line 1:6 rule floating_constant DEC:A floating-point constant cannot have internal white space\n", stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#563 "Inconsistent token + * handling in ANTLR4". + * https://github.com/antlr/antlr4/issues/563 + */ + @Test public void testAlternateQuotes() throws Exception { + String lexerGrammar = + "lexer grammar ModeTagsLexer;\n" + + "\n" + + "// Default mode rules (the SEA)\n" + + "OPEN : '«' -> mode(ISLAND) ; // switch to ISLAND mode\n" + + "TEXT : ~'«'+ ; // clump all text together\n" + + "\n" + + "mode ISLAND;\n" + + "CLOSE : '»' -> mode(DEFAULT_MODE) ; // back to SEA mode \n" + + "SLASH : '/' ;\n" + + "ID : [a-zA-Z]+ ; // match/send ID in tag to parser\n"; + String parserGrammar = + "parser grammar ModeTagsParser;\n" + + "\n" + + "options { tokenVocab=ModeTagsLexer; } // use tokens from ModeTagsLexer.g4\n" + + "\n" + + "file: (tag | TEXT)* ;\n" + + "\n" + + "tag : '«' ID '»'\n" + + " | '«' '/' ID '»'\n" + + " ;"; + + boolean success = rawGenerateAndBuildRecognizer("ModeTagsLexer.g4", + lexerGrammar, + null, + "ModeTagsLexer"); + assertTrue(success); + + String found = execParser("ModeTagsParser.g4", parserGrammar, "ModeTagsParser", "ModeTagsLexer", "file", "", false); + assertEquals("", found); + assertNull(stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#672 "Initialization failed in + * locals". + * https://github.com/antlr/antlr4/issues/672 + */ + @Test public void testAttributeValueInitialization() throws Exception { + String grammar = + "grammar Data; \n" + + "\n" + + "file : group+ EOF; \n" + + "\n" + + "group: INT sequence {System.out.println($sequence.values.size());} ; \n" + + "\n" + + "sequence returns [List values = new ArrayList()] \n" + + " locals[List localValues = new ArrayList()]\n" + + " : (INT {$localValues.add($INT.int);})* {$values.addAll($localValues);}\n" + + "; \n" + + "\n" + + "INT : [0-9]+ ; // match integers \n" + + "WS : [ \\t\\n\\r]+ -> skip ; // toss out all whitespace\n"; + + String input = "2 9 10 3 1 2 3"; + String found = execParser("Data.g4", grammar, "DataParser", "DataLexer", "file", input, false); + assertEquals("6\n", found); + assertNull(stderrDuringParse); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestParserInterpreter.java b/tool/test/org/antlr/v4/test/tool/TestParserInterpreter.java new file mode 100644 index 000000000..10b45c2d0 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestParserInterpreter.java @@ -0,0 +1,235 @@ +/* + * [The "BSD license"] + * Copyright (c) 2013 Terence Parr + * Copyright (c) 2013 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.LexerInterpreter; +import org.antlr.v4.runtime.ParserInterpreter; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + + +public class TestParserInterpreter extends BaseTest { + @Test public void testA() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : A ;", + lg); + + testInterp(lg, g, "s", "a", "(s a)"); + } + + @Test public void testAorB() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "s : A{;} | B ;", + lg); + testInterp(lg, g, "s", "a", "(s a)"); + testInterp(lg, g, "s", "b", "(s b)"); + } + + @Test public void testCall() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "s : t C ;\n" + + "t : A{;} | B ;\n", + lg); + + testInterp(lg, g, "s", "ac", "(s (t a) c)"); + testInterp(lg, g, "s", "bc", "(s (t b) c)"); + } + + @Test public void testCall2() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n"+ + "s : t C ;\n" + + "t : u ;\n" + + "u : A{;} | B ;\n", + lg); + + testInterp(lg, g, "s", "ac", "(s (t (u a)) c)"); + testInterp(lg, g, "s", "bc", "(s (t (u b)) c)"); + } + + @Test public void testOptionalA() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : A? B ;\n", + lg); + + testInterp(lg, g, "s", "b", "(s b)"); + testInterp(lg, g, "s", "ab", "(s a b)"); + } + + @Test public void testOptionalAorB() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : (A{;}|B)? C ;\n", + lg); + + testInterp(lg, g, "s", "c", "(s c)"); + testInterp(lg, g, "s", "ac", "(s a c)"); + testInterp(lg, g, "s", "bc", "(s b c)"); + } + + @Test public void testStarA() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : A* B ;\n", + lg); + + testInterp(lg, g, "s", "b", "(s b)"); + testInterp(lg, g, "s", "ab", "(s a b)"); + testInterp(lg, g, "s", "aaaaaab", "(s a a a a a a b)"); + } + + @Test public void testStarAorB() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : (A{;}|B)* C ;\n", + lg); + + testInterp(lg, g, "s", "c", "(s c)"); + testInterp(lg, g, "s", "ac", "(s a c)"); + testInterp(lg, g, "s", "bc", "(s b c)"); + testInterp(lg, g, "s", "abaaabc", "(s a b a a a b c)"); + testInterp(lg, g, "s", "babac", "(s b a b a c)"); + } + + @Test public void testLeftRecursion() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "PLUS : '+' ;\n" + + "MULT : '*' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : e ;\n" + + "e : e MULT e\n" + + " | e PLUS e\n" + + " | A\n" + + " ;\n", + lg); + + testInterp(lg, g, "s", "a", "(s (e a))"); + testInterp(lg, g, "s", "a+a", "(s (e (e a) + (e a)))"); + testInterp(lg, g, "s", "a*a", "(s (e (e a) * (e a)))"); + testInterp(lg, g, "s", "a+a+a", "(s (e (e (e a) + (e a)) + (e a)))"); + testInterp(lg, g, "s", "a*a+a", "(s (e (e (e a) * (e a)) + (e a)))"); + testInterp(lg, g, "s", "a+a*a", "(s (e (e a) + (e (e a) * (e a))))"); + } + + /** + * This is a regression test for antlr/antlr4#461. + * https://github.com/antlr/antlr4/issues/461 + */ + @Test public void testLeftRecursiveStartRule() throws Exception { + LexerGrammar lg = new LexerGrammar( + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n" + + "C : 'c' ;\n" + + "PLUS : '+' ;\n" + + "MULT : '*' ;\n"); + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : e ;\n" + + "e : e MULT e\n" + + " | e PLUS e\n" + + " | A\n" + + " ;\n", + lg); + + testInterp(lg, g, "e", "a", "(e a)"); + testInterp(lg, g, "e", "a+a", "(e (e a) + (e a))"); + testInterp(lg, g, "e", "a*a", "(e (e a) * (e a))"); + testInterp(lg, g, "e", "a+a+a", "(e (e (e a) + (e a)) + (e a))"); + testInterp(lg, g, "e", "a*a+a", "(e (e (e a) * (e a)) + (e a))"); + testInterp(lg, g, "e", "a+a*a", "(e (e a) + (e (e a) * (e a)))"); + } + + void testInterp(LexerGrammar lg, Grammar g, + String startRule, String input, + String parseTree) + { + LexerInterpreter lexEngine = lg.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream tokens = new CommonTokenStream(lexEngine); + ParserInterpreter parser = g.createParserInterpreter(tokens); + ParseTree t = parser.parse(g.rules.get(startRule).index); + System.out.println("parse tree: "+t.toStringTree(parser)); + assertEquals(parseTree, t.toStringTree(parser)); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestParserProfiler.java b/tool/test/org/antlr/v4/test/tool/TestParserProfiler.java new file mode 100644 index 000000000..09d9e4a12 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestParserProfiler.java @@ -0,0 +1,280 @@ +/* + * [The "BSD license"] + * Copyright (c) 2014 Terence Parr + * Copyright (c) 2014 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.LexerInterpreter; +import org.antlr.v4.runtime.ParserInterpreter; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.atn.DecisionInfo; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; +import org.antlr.v4.tool.Rule; +import org.junit.Ignore; +import org.junit.Test; + +import java.util.Arrays; + +import static org.junit.Assert.assertEquals; + +public class TestParserProfiler extends BaseTest { + LexerGrammar lg; + + @Override + public void setUp() throws Exception { + super.setUp(); + lg = new LexerGrammar( + "lexer grammar L;\n" + + "WS : [ \\r\\t\\n]+ -> channel(HIDDEN) ;\n" + + "SEMI : ';' ;\n" + + "DOT : '.' ;\n" + + "ID : [a-zA-Z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "PLUS : '+' ;\n" + + "MULT : '*' ;\n"); + } + + @Test public void testLL1() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : ';'{}\n" + + " | '.'\n" + + " ;\n", + lg); + + DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", ";"); + assertEquals(1, info.length); + String expecting = + "{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=1, " + + "SLL_ATNTransitions=1, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}"; + assertEquals(expecting, info[0].toString()); + } + + @Test public void testLL2() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : ID ';'{}\n" + + " | ID '.'\n" + + " ;\n", + lg); + + DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "xyz;"); + assertEquals(1, info.length); + String expecting = + "{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=2, " + + "SLL_ATNTransitions=2, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}"; + assertEquals(expecting, info[0].toString()); + } + + @Test public void testRepeatedLL2() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : ID ';'{}\n" + + " | ID '.'\n" + + " ;\n", + lg); + + DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "xyz;", "abc;"); + assertEquals(1, info.length); + String expecting = + "{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=4, " + + "SLL_ATNTransitions=2, SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}"; + assertEquals(expecting, info[0].toString()); + } + + @Test public void test3xLL2() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : ID ';'{}\n" + + " | ID '.'\n" + + " ;\n", + lg); + + // The '.' vs ';' causes another ATN transition + DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "xyz;", "abc;", "z."); + assertEquals(1, info.length); + String expecting = + "{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, " + + "SLL_ATNTransitions=3, SLL_DFATransitions=3, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}"; + assertEquals(expecting, info[0].toString()); + } + + @Test public void testOptional() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : ID ('.' ID)? ';'\n" + + " | ID INT \n" + + " ;\n", + lg); + + DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "a.b;"); + assertEquals(2, info.length); + String expecting = + "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=1, " + + "SLL_ATNTransitions=1, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}, " + + "{decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=2, " + + "SLL_ATNTransitions=2, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]"; + assertEquals(expecting, Arrays.toString(info)); + } + + @Test public void test2xOptional() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : ID ('.' ID)? ';'\n" + + " | ID INT \n" + + " ;\n", + lg); + + DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "a.b;", "a.b;"); + assertEquals(2, info.length); + String expecting = + "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=2, " + + "SLL_ATNTransitions=1, SLL_DFATransitions=1, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}, " + + "{decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=4, " + + "SLL_ATNTransitions=2, SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]"; + assertEquals(expecting, Arrays.toString(info)); + } + + @Test public void testContextSensitivity() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n"+ + "a : '.' e ID \n" + + " | ';' e INT ID ;\n" + + "e : INT | ;\n", + lg); + DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "a", "; 1 x"); + assertEquals(2, info.length); + String expecting = + "{decision=1, contextSensitivities=1, errors=0, ambiguities=0, SLL_lookahead=3, SLL_ATNTransitions=2, SLL_DFATransitions=0, LL_Fallback=1, LL_lookahead=3, LL_ATNTransitions=2}"; + assertEquals(expecting, info[1].toString()); + } + + @Ignore + @Test public void testSimpleLanguage() throws Exception { + Grammar g = new Grammar(TestXPath.grammar); + String input = + "def f(x,y) { x = 3+4*1*1/5*1*1+1*1+1; y; ; }\n" + + "def g(x,a,b,c,d,e) { return 1+2*x; }\n"+ + "def h(x) { a=3; x=0+1; return a*x; }\n"; + DecisionInfo[] info = interpAndGetDecisionInfo(g.getImplicitLexer(), g, "prog", input); + String expecting = + "[{decision=0, contextSensitivities=1, errors=0, ambiguities=0, SLL_lookahead=3, " + + "SLL_ATNTransitions=2, SLL_DFATransitions=0, LL_Fallback=1, LL_ATNTransitions=1}]"; + + + assertEquals(expecting, Arrays.toString(info)); + assertEquals(1, info.length); + } + + @Ignore + @Test public void testDeepLookahead() throws Exception { + Grammar g = new Grammar( + "parser grammar T;\n" + + "s : e ';'\n" + + " | e '.' \n" + + " ;\n" + + "e : (ID|INT) ({true}? '+' e)*\n" + // d=1 entry, d=2 bypass + " ;\n", + lg); + + // pred forces to + // ambig and ('+' e)* tail recursion forces lookahead to fall out of e + // any non-precedence predicates are always evaluated as true by the interpreter + DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "a+b+c;"); + // at "+b" it uses k=1 and enters loop then calls e for b... + // e matches and d=2 uses "+c;" for k=3 + assertEquals(2, info.length); + String expecting = + "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, " + + "SLL_ATNTransitions=6, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}, " + + "{decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=4, " + + "SLL_ATNTransitions=2, SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]"; + assertEquals(expecting, Arrays.toString(info)); + } + + @Test public void testProfilerGeneratedCode() throws Exception { + String grammar = + "grammar T;\n" + + "s : a+ ID EOF ;\n" + + "a : ID ';'{}\n" + + " | ID '.'\n" + + " ;\n"+ + "WS : [ \\r\\t\\n]+ -> channel(HIDDEN) ;\n" + + "SEMI : ';' ;\n" + + "DOT : '.' ;\n" + + "ID : [a-zA-Z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "PLUS : '+' ;\n" + + "MULT : '*' ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "xyz;abc;z.q", false, true); + String expecting = + "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, SLL_ATNTransitions=4, " + + "SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}," + + " {decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, " + + "SLL_ATNTransitions=3, SLL_DFATransitions=3, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]\n"; + assertEquals(expecting, found); + assertEquals(null, stderrDuringParse); + } + + public DecisionInfo[] interpAndGetDecisionInfo( + LexerGrammar lg, Grammar g, + String startRule, String... input) + { + + LexerInterpreter lexEngine = lg.createLexerInterpreter(null); + ParserInterpreter parser = g.createParserInterpreter(null); + parser.setProfile(true); + for (String s : input) { + lexEngine.reset(); + parser.reset(); + lexEngine.setInputStream(new ANTLRInputStream(s)); + CommonTokenStream tokens = new CommonTokenStream(lexEngine); + parser.setInputStream(tokens); + Rule r = g.rules.get(startRule); + if ( r==null ) { + return parser.getParseInfo().getDecisionInfo(); + } + ParserRuleContext t = parser.parse(r.index); +// try { +// Utils.waitForClose(t.inspect(parser).get()); +// } +// catch (Exception e) { +// e.printStackTrace(); +// } +// +// System.out.println(t.toStringTree(parser)); + } + return parser.getParseInfo().getDecisionInfo(); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestPerformance.java b/tool/test/org/antlr/v4/test/tool/TestPerformance.java new file mode 100644 index 000000000..d87df1095 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestPerformance.java @@ -0,0 +1,2031 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.ANTLRFileStream; +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.BailErrorStrategy; +import org.antlr.v4.runtime.BaseErrorListener; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.DefaultErrorStrategy; +import org.antlr.v4.runtime.DiagnosticErrorListener; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserInterpreter; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.Recognizer; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenSource; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNConfig; +import org.antlr.v4.runtime.atn.ATNConfigSet; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.atn.PredictionMode; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.dfa.DFAState; +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.runtime.misc.NotNull; +import org.antlr.v4.runtime.misc.Nullable; +import org.antlr.v4.runtime.misc.ParseCancellationException; +import org.antlr.v4.runtime.misc.Utils; +import org.antlr.v4.runtime.tree.ErrorNode; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.ParseTreeListener; +import org.antlr.v4.runtime.tree.ParseTreeWalker; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.junit.Assert; +import org.junit.Test; + +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.lang.ref.Reference; +import java.lang.ref.SoftReference; +import java.lang.ref.WeakReference; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerArray; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.zip.CRC32; +import java.util.zip.Checksum; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + +public class TestPerformance extends BaseTest { + /** + * Parse all java files under this package within the JDK_SOURCE_ROOT + * (environment variable or property defined on the Java command line). + */ + private static final String TOP_PACKAGE = "java.lang"; + /** + * {@code true} to load java files from sub-packages of + * {@link #TOP_PACKAGE}. + */ + private static final boolean RECURSIVE = true; + /** + * {@code true} to read all source files from disk into memory before + * starting the parse. The default value is {@code true} to help prevent + * drive speed from affecting the performance results. This value may be set + * to {@code false} to support parsing large input sets which would not + * otherwise fit into memory. + */ + private static final boolean PRELOAD_SOURCES = true; + /** + * The encoding to use when reading source files. + */ + private static final String ENCODING = "UTF-8"; + /** + * The maximum number of files to parse in a single iteration. + */ + private static final int MAX_FILES_PER_PARSE_ITERATION = Integer.MAX_VALUE; + + /** + * {@code true} to call {@link Collections#shuffle} on the list of input + * files before the first parse iteration. + */ + private static final boolean SHUFFLE_FILES_AT_START = false; + /** + * {@code true} to call {@link Collections#shuffle} before each parse + * iteration after the first. + */ + private static final boolean SHUFFLE_FILES_AFTER_ITERATIONS = false; + /** + * The instance of {@link Random} passed when calling + * {@link Collections#shuffle}. + */ + private static final Random RANDOM = new Random(); + + /** + * {@code true} to use the Java grammar with expressions in the v4 + * left-recursive syntax (Java-LR.g4). {@code false} to use the standard + * grammar (Java.g4). In either case, the grammar is renamed in the + * temporary directory to Java.g4 before compiling. + */ + private static final boolean USE_LR_GRAMMAR = true; + /** + * {@code true} to specify the {@code -Xforce-atn} option when generating + * the grammar, forcing all decisions in {@code JavaParser} to be handled by + * {@link ParserATNSimulator#adaptivePredict}. + */ + private static final boolean FORCE_ATN = false; + /** + * {@code true} to specify the {@code -atn} option when generating the + * grammar. This will cause ANTLR to export the ATN for each decision as a + * DOT (GraphViz) file. + */ + private static final boolean EXPORT_ATN_GRAPHS = true; + /** + * {@code true} to specify the {@code -XdbgST} option when generating the + * grammar. + */ + private static final boolean DEBUG_TEMPLATES = false; + /** + * {@code true} to specify the {@code -XdbgSTWait} option when generating the + * grammar. + */ + private static final boolean DEBUG_TEMPLATES_WAIT = DEBUG_TEMPLATES; + /** + * {@code true} to delete temporary (generated and compiled) files when the + * test completes. + */ + private static final boolean DELETE_TEMP_FILES = true; + /** + * {@code true} to use a {@link ParserInterpreter} for parsing instead of + * generated parser. + */ + private static final boolean USE_PARSER_INTERPRETER = false; + + /** + * {@code true} to call {@link System#gc} and then wait for 5 seconds at the + * end of the test to make it easier for a profiler to grab a heap dump at + * the end of the test run. + */ + private static final boolean PAUSE_FOR_HEAP_DUMP = false; + + /** + * Parse each file with {@code JavaParser.compilationUnit}. + */ + private static final boolean RUN_PARSER = true; + /** + * {@code true} to use {@link BailErrorStrategy}, {@code false} to use + * {@link DefaultErrorStrategy}. + */ + private static final boolean BAIL_ON_ERROR = false; + /** + * {@code true} to compute a checksum for verifying consistency across + * optimizations and multiple passes. + */ + private static final boolean COMPUTE_CHECKSUM = true; + /** + * This value is passed to {@link Parser#setBuildParseTree}. + */ + private static final boolean BUILD_PARSE_TREES = false; + /** + * Use + * {@link ParseTreeWalker#DEFAULT}{@code .}{@link ParseTreeWalker#walk walk} + * with the {@code JavaParserBaseListener} to show parse tree walking + * overhead. If {@link #BUILD_PARSE_TREES} is {@code false}, the listener + * will instead be called during the parsing process via + * {@link Parser#addParseListener}. + */ + private static final boolean BLANK_LISTENER = false; + + /** + * Shows the number of {@link DFAState} and {@link ATNConfig} instances in + * the DFA cache at the end of each pass. If {@link #REUSE_LEXER_DFA} and/or + * {@link #REUSE_PARSER_DFA} are false, the corresponding instance numbers + * will only apply to one file (the last file if {@link #NUMBER_OF_THREADS} + * is 0, otherwise the last file which was parsed on the first thread). + */ + private static final boolean SHOW_DFA_STATE_STATS = true; + /** + * If {@code true}, the DFA state statistics report includes a breakdown of + * the number of DFA states contained in each decision (with rule names). + */ + private static final boolean DETAILED_DFA_STATE_STATS = true; + + /** + * Specify the {@link PredictionMode} used by the + * {@link ParserATNSimulator}. If {@link #TWO_STAGE_PARSING} is + * {@code true}, this value only applies to the second stage, as the first + * stage will always use {@link PredictionMode#SLL}. + */ + private static final PredictionMode PREDICTION_MODE = PredictionMode.LL; + + private static final boolean TWO_STAGE_PARSING = true; + + private static final boolean SHOW_CONFIG_STATS = false; + + /** + * If {@code true}, detailed statistics for the number of DFA edges were + * taken while parsing each file, as well as the number of DFA edges which + * required on-the-fly computation. + */ + private static final boolean COMPUTE_TRANSITION_STATS = false; + private static final boolean SHOW_TRANSITION_STATS_PER_FILE = false; + /** + * If {@code true}, the transition statistics will be adjusted to a running + * total before reporting the final results. + */ + private static final boolean TRANSITION_RUNNING_AVERAGE = false; + /** + * If {@code true}, transition statistics will be weighted according to the + * total number of transitions taken during the parsing of each file. + */ + private static final boolean TRANSITION_WEIGHTED_AVERAGE = false; + + /** + * If {@code true}, after each pass a summary of the time required to parse + * each file will be printed. + */ + private static final boolean COMPUTE_TIMING_STATS = false; + /** + * If {@code true}, the timing statistics for {@link #COMPUTE_TIMING_STATS} + * will be cumulative (i.e. the time reported for the nth file will + * be the total time required to parse the first n files). + */ + private static final boolean TIMING_CUMULATIVE = false; + /** + * If {@code true}, the timing statistics will include the parser only. This + * flag allows for targeted measurements, and helps eliminate variance when + * {@link #PRELOAD_SOURCES} is {@code false}. + *

+ * This flag has no impact when {@link #RUN_PARSER} is {@code false}. + */ + private static final boolean TIME_PARSE_ONLY = false; + + /** + * When {@code true}, messages will be printed to {@link System#err} when + * the first stage (SLL) parsing resulted in a syntax error. This option is + * ignored when {@link #TWO_STAGE_PARSING} is {@code false}. + */ + private static final boolean REPORT_SECOND_STAGE_RETRY = true; + private static final boolean REPORT_SYNTAX_ERRORS = true; + private static final boolean REPORT_AMBIGUITIES = false; + private static final boolean REPORT_FULL_CONTEXT = false; + private static final boolean REPORT_CONTEXT_SENSITIVITY = REPORT_FULL_CONTEXT; + + /** + * If {@code true}, a single {@code JavaLexer} will be used, and + * {@link Lexer#setInputStream} will be called to initialize it for each + * source file. Otherwise, a new instance will be created for each file. + */ + private static final boolean REUSE_LEXER = false; + /** + * If {@code true}, a single DFA will be used for lexing which is shared + * across all threads and files. Otherwise, each file will be lexed with its + * own DFA which is accomplished by creating one ATN instance per thread and + * clearing its DFA cache before lexing each file. + */ + private static final boolean REUSE_LEXER_DFA = true; + /** + * If {@code true}, a single {@code JavaParser} will be used, and + * {@link Parser#setInputStream} will be called to initialize it for each + * source file. Otherwise, a new instance will be created for each file. + */ + private static final boolean REUSE_PARSER = false; + /** + * If {@code true}, a single DFA will be used for parsing which is shared + * across all threads and files. Otherwise, each file will be parsed with + * its own DFA which is accomplished by creating one ATN instance per thread + * and clearing its DFA cache before parsing each file. + */ + private static final boolean REUSE_PARSER_DFA = true; + /** + * If {@code true}, the shared lexer and parser are reset after each pass. + * If {@code false}, all passes after the first will be fully "warmed up", + * which makes them faster and can compare them to the first warm-up pass, + * but it will not distinguish bytecode load/JIT time from warm-up time + * during the first pass. + */ + private static final boolean CLEAR_DFA = false; + /** + * Total number of passes to make over the source. + */ + private static final int PASSES = 4; + + /** + * This option controls the granularity of multi-threaded parse operations. + * If {@code true}, the parsing operation will be parallelized across files; + * otherwise the parsing will be parallelized across multiple iterations. + */ + private static final boolean FILE_GRANULARITY = true; + + /** + * Number of parser threads to use. + */ + private static final int NUMBER_OF_THREADS = 1; + + private static final Lexer[] sharedLexers = new Lexer[NUMBER_OF_THREADS]; + + private static final Parser[] sharedParsers = new Parser[NUMBER_OF_THREADS]; + + private static final ParseTreeListener[] sharedListeners = new ParseTreeListener[NUMBER_OF_THREADS]; + + private static final long[][] totalTransitionsPerFile; + private static final long[][] computedTransitionsPerFile; + static { + if (COMPUTE_TRANSITION_STATS) { + totalTransitionsPerFile = new long[PASSES][]; + computedTransitionsPerFile = new long[PASSES][]; + } else { + totalTransitionsPerFile = null; + computedTransitionsPerFile = null; + } + } + + private static final long[][][] decisionInvocationsPerFile; + private static final long[][][] fullContextFallbackPerFile; + private static final long[][][] nonSllPerFile; + private static final long[][][] totalTransitionsPerDecisionPerFile; + private static final long[][][] computedTransitionsPerDecisionPerFile; + private static final long[][][] fullContextTransitionsPerDecisionPerFile; + static { + if (COMPUTE_TRANSITION_STATS && DETAILED_DFA_STATE_STATS) { + decisionInvocationsPerFile = new long[PASSES][][]; + fullContextFallbackPerFile = new long[PASSES][][]; + nonSllPerFile = new long[PASSES][][]; + totalTransitionsPerDecisionPerFile = new long[PASSES][][]; + computedTransitionsPerDecisionPerFile = new long[PASSES][][]; + fullContextTransitionsPerDecisionPerFile = new long[PASSES][][]; + } else { + decisionInvocationsPerFile = null; + fullContextFallbackPerFile = null; + nonSllPerFile = null; + totalTransitionsPerDecisionPerFile = null; + computedTransitionsPerDecisionPerFile = null; + fullContextTransitionsPerDecisionPerFile = null; + } + } + + private static final long[][] timePerFile; + private static final int[][] tokensPerFile; + static { + if (COMPUTE_TIMING_STATS) { + timePerFile = new long[PASSES][]; + tokensPerFile = new int[PASSES][]; + } else { + timePerFile = null; + tokensPerFile = null; + } + } + + private final AtomicIntegerArray tokenCount = new AtomicIntegerArray(PASSES); + + @Test + //@org.junit.Ignore + public void compileJdk() throws IOException, InterruptedException, ExecutionException { + String jdkSourceRoot = getSourceRoot("JDK"); + assertTrue("The JDK_SOURCE_ROOT environment variable must be set for performance testing.", jdkSourceRoot != null && !jdkSourceRoot.isEmpty()); + + compileJavaParser(USE_LR_GRAMMAR); + final String lexerName = "JavaLexer"; + final String parserName = "JavaParser"; + final String listenerName = "JavaBaseListener"; + final String entryPoint = "compilationUnit"; + final ParserFactory factory = getParserFactory(lexerName, parserName, listenerName, entryPoint); + + if (!TOP_PACKAGE.isEmpty()) { + jdkSourceRoot = jdkSourceRoot + '/' + TOP_PACKAGE.replace('.', '/'); + } + + File directory = new File(jdkSourceRoot); + assertTrue(directory.isDirectory()); + + FilenameFilter filesFilter = FilenameFilters.extension(".java", false); + FilenameFilter directoriesFilter = FilenameFilters.ALL_FILES; + final List sources = loadSources(directory, filesFilter, directoriesFilter, RECURSIVE); + + for (int i = 0; i < PASSES; i++) { + if (COMPUTE_TRANSITION_STATS) { + totalTransitionsPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)]; + computedTransitionsPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)]; + + if (DETAILED_DFA_STATE_STATS) { + decisionInvocationsPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; + fullContextFallbackPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; + nonSllPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; + totalTransitionsPerDecisionPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; + computedTransitionsPerDecisionPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; + fullContextTransitionsPerDecisionPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; + } + } + + if (COMPUTE_TIMING_STATS) { + timePerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)]; + tokensPerFile[i] = new int[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)]; + } + } + + System.out.format("Located %d source files.%n", sources.size()); + System.out.print(getOptionsDescription(TOP_PACKAGE)); + + ExecutorService executorService = Executors.newFixedThreadPool(FILE_GRANULARITY ? 1 : NUMBER_OF_THREADS, new NumberedThreadFactory()); + + List> passResults = new ArrayList>(); + passResults.add(executorService.submit(new Runnable() { + @Override + public void run() { + try { + parse1(0, factory, sources, SHUFFLE_FILES_AT_START); + } catch (InterruptedException ex) { + Logger.getLogger(TestPerformance.class.getName()).log(Level.SEVERE, null, ex); + } + } + })); + for (int i = 0; i < PASSES - 1; i++) { + final int currentPass = i + 1; + passResults.add(executorService.submit(new Runnable() { + @Override + public void run() { + if (CLEAR_DFA) { + int index = FILE_GRANULARITY ? 0 : ((NumberedThread)Thread.currentThread()).getThreadNumber(); + if (sharedLexers.length > 0 && sharedLexers[index] != null) { + ATN atn = sharedLexers[index].getATN(); + for (int j = 0; j < sharedLexers[index].getInterpreter().decisionToDFA.length; j++) { + sharedLexers[index].getInterpreter().decisionToDFA[j] = new DFA(atn.getDecisionState(j), j); + } + } + + if (sharedParsers.length > 0 && sharedParsers[index] != null) { + ATN atn = sharedParsers[index].getATN(); + for (int j = 0; j < sharedParsers[index].getInterpreter().decisionToDFA.length; j++) { + sharedParsers[index].getInterpreter().decisionToDFA[j] = new DFA(atn.getDecisionState(j), j); + } + } + + if (FILE_GRANULARITY) { + Arrays.fill(sharedLexers, null); + Arrays.fill(sharedParsers, null); + } + } + + try { + parse2(currentPass, factory, sources, SHUFFLE_FILES_AFTER_ITERATIONS); + } catch (InterruptedException ex) { + Logger.getLogger(TestPerformance.class.getName()).log(Level.SEVERE, null, ex); + } + } + })); + } + + for (Future passResult : passResults) { + passResult.get(); + } + + executorService.shutdown(); + executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + + if (COMPUTE_TRANSITION_STATS && SHOW_TRANSITION_STATS_PER_FILE) { + computeTransitionStatistics(); + } + + if (COMPUTE_TIMING_STATS) { + computeTimingStatistics(); + } + + sources.clear(); + if (PAUSE_FOR_HEAP_DUMP) { + System.gc(); + System.out.println("Pausing before application exit."); + try { + Thread.sleep(4000); + } catch (InterruptedException ex) { + Logger.getLogger(TestPerformance.class.getName()).log(Level.SEVERE, null, ex); + } + } + } + + /** + * Compute and print ATN/DFA transition statistics. + */ + private void computeTransitionStatistics() { + if (TRANSITION_RUNNING_AVERAGE) { + for (int i = 0; i < PASSES; i++) { + long[] data = computedTransitionsPerFile[i]; + for (int j = 0; j < data.length - 1; j++) { + data[j + 1] += data[j]; + } + + data = totalTransitionsPerFile[i]; + for (int j = 0; j < data.length - 1; j++) { + data[j + 1] += data[j]; + } + } + } + + long[] sumNum = new long[totalTransitionsPerFile[0].length]; + long[] sumDen = new long[totalTransitionsPerFile[0].length]; + double[] sumNormalized = new double[totalTransitionsPerFile[0].length]; + for (int i = 0; i < PASSES; i++) { + long[] num = computedTransitionsPerFile[i]; + long[] den = totalTransitionsPerFile[i]; + for (int j = 0; j < den.length; j++) { + sumNum[j] += num[j]; + sumDen[j] += den[j]; + if (den[j] > 0) { + sumNormalized[j] += (double)num[j] / (double)den[j]; + } + } + } + + double[] weightedAverage = new double[totalTransitionsPerFile[0].length]; + double[] average = new double[totalTransitionsPerFile[0].length]; + for (int i = 0; i < average.length; i++) { + if (sumDen[i] > 0) { + weightedAverage[i] = (double)sumNum[i] / (double)sumDen[i]; + } + else { + weightedAverage[i] = 0; + } + + average[i] = sumNormalized[i] / PASSES; + } + + double[] low95 = new double[totalTransitionsPerFile[0].length]; + double[] high95 = new double[totalTransitionsPerFile[0].length]; + double[] low67 = new double[totalTransitionsPerFile[0].length]; + double[] high67 = new double[totalTransitionsPerFile[0].length]; + double[] stddev = new double[totalTransitionsPerFile[0].length]; + for (int i = 0; i < stddev.length; i++) { + double[] points = new double[PASSES]; + for (int j = 0; j < PASSES; j++) { + long totalTransitions = totalTransitionsPerFile[j][i]; + if (totalTransitions > 0) { + points[j] = ((double)computedTransitionsPerFile[j][i] / (double)totalTransitionsPerFile[j][i]); + } + else { + points[j] = 0; + } + } + + Arrays.sort(points); + + final double averageValue = TRANSITION_WEIGHTED_AVERAGE ? weightedAverage[i] : average[i]; + double value = 0; + for (int j = 0; j < PASSES; j++) { + double diff = points[j] - averageValue; + value += diff * diff; + } + + int ignoreCount95 = (int)Math.round(PASSES * (1 - 0.95) / 2.0); + int ignoreCount67 = (int)Math.round(PASSES * (1 - 0.667) / 2.0); + low95[i] = points[ignoreCount95]; + high95[i] = points[points.length - 1 - ignoreCount95]; + low67[i] = points[ignoreCount67]; + high67[i] = points[points.length - 1 - ignoreCount67]; + stddev[i] = Math.sqrt(value / PASSES); + } + + System.out.format("File\tAverage\tStd. Dev.\t95%% Low\t95%% High\t66.7%% Low\t66.7%% High%n"); + for (int i = 0; i < stddev.length; i++) { + final double averageValue = TRANSITION_WEIGHTED_AVERAGE ? weightedAverage[i] : average[i]; + System.out.format("%d\t%e\t%e\t%e\t%e\t%e\t%e%n", i + 1, averageValue, stddev[i], averageValue - low95[i], high95[i] - averageValue, averageValue - low67[i], high67[i] - averageValue); + } + } + + /** + * Compute and print timing statistics. + */ + private void computeTimingStatistics() { + if (TIMING_CUMULATIVE) { + for (int i = 0; i < PASSES; i++) { + long[] data = timePerFile[i]; + for (int j = 0; j < data.length - 1; j++) { + data[j + 1] += data[j]; + } + + int[] data2 = tokensPerFile[i]; + for (int j = 0; j < data2.length - 1; j++) { + data2[j + 1] += data2[j]; + } + } + } + + final int fileCount = timePerFile[0].length; + double[] sum = new double[fileCount]; + for (int i = 0; i < PASSES; i++) { + long[] data = timePerFile[i]; + int[] tokenData = tokensPerFile[i]; + for (int j = 0; j < data.length; j++) { + sum[j] += (double)data[j] / (double)tokenData[j]; + } + } + + double[] average = new double[fileCount]; + for (int i = 0; i < average.length; i++) { + average[i] = sum[i] / PASSES; + } + + double[] low95 = new double[fileCount]; + double[] high95 = new double[fileCount]; + double[] low67 = new double[fileCount]; + double[] high67 = new double[fileCount]; + double[] stddev = new double[fileCount]; + for (int i = 0; i < stddev.length; i++) { + double[] points = new double[PASSES]; + for (int j = 0; j < PASSES; j++) { + points[j] = (double)timePerFile[j][i] / (double)tokensPerFile[j][i]; + } + + Arrays.sort(points); + + final double averageValue = average[i]; + double value = 0; + for (int j = 0; j < PASSES; j++) { + double diff = points[j] - averageValue; + value += diff * diff; + } + + int ignoreCount95 = (int)Math.round(PASSES * (1 - 0.95) / 2.0); + int ignoreCount67 = (int)Math.round(PASSES * (1 - 0.667) / 2.0); + low95[i] = points[ignoreCount95]; + high95[i] = points[points.length - 1 - ignoreCount95]; + low67[i] = points[ignoreCount67]; + high67[i] = points[points.length - 1 - ignoreCount67]; + stddev[i] = Math.sqrt(value / PASSES); + } + + System.out.format("File\tAverage\tStd. Dev.\t95%% Low\t95%% High\t66.7%% Low\t66.7%% High%n"); + for (int i = 0; i < stddev.length; i++) { + final double averageValue = average[i]; + System.out.format("%d\t%e\t%e\t%e\t%e\t%e\t%e%n", i + 1, averageValue, stddev[i], averageValue - low95[i], high95[i] - averageValue, averageValue - low67[i], high67[i] - averageValue); + } + } + + private String getSourceRoot(String prefix) { + String sourceRoot = System.getenv(prefix+"_SOURCE_ROOT"); + if (sourceRoot == null) { + sourceRoot = System.getProperty(prefix+"_SOURCE_ROOT"); + } + + return sourceRoot; + } + + @Override + protected void eraseTempDir() { + if (DELETE_TEMP_FILES) { + super.eraseTempDir(); + } + } + + public static String getOptionsDescription(String topPackage) { + StringBuilder builder = new StringBuilder(); + builder.append("Input="); + if (topPackage.isEmpty()) { + builder.append("*"); + } + else { + builder.append(topPackage).append(".*"); + } + + builder.append(", Grammar=").append(USE_LR_GRAMMAR ? "LR" : "Standard"); + builder.append(", ForceAtn=").append(FORCE_ATN); + + builder.append(newline); + + builder.append("Op=Lex").append(RUN_PARSER ? "+Parse" : " only"); + builder.append(", Strategy=").append(BAIL_ON_ERROR ? BailErrorStrategy.class.getSimpleName() : DefaultErrorStrategy.class.getSimpleName()); + builder.append(", BuildParseTree=").append(BUILD_PARSE_TREES); + builder.append(", WalkBlankListener=").append(BLANK_LISTENER); + + builder.append(newline); + + builder.append("Lexer=").append(REUSE_LEXER ? "setInputStream" : "newInstance"); + builder.append(", Parser=").append(REUSE_PARSER ? "setInputStream" : "newInstance"); + builder.append(", AfterPass=").append(CLEAR_DFA ? "newInstance" : "setInputStream"); + + builder.append(newline); + + return builder.toString(); + } + + /** + * This method is separate from {@link #parse2} so the first pass can be distinguished when analyzing + * profiler results. + */ + protected void parse1(int currentPass, ParserFactory factory, Collection sources, boolean shuffleSources) throws InterruptedException { + if (FILE_GRANULARITY) { + System.gc(); + } + + parseSources(currentPass, factory, sources, shuffleSources); + } + + /** + * This method is separate from {@link #parse1} so the first pass can be distinguished when analyzing + * profiler results. + */ + protected void parse2(int currentPass, ParserFactory factory, Collection sources, boolean shuffleSources) throws InterruptedException { + if (FILE_GRANULARITY) { + System.gc(); + } + + parseSources(currentPass, factory, sources, shuffleSources); + } + + protected List loadSources(File directory, FilenameFilter filesFilter, FilenameFilter directoriesFilter, boolean recursive) { + List result = new ArrayList(); + loadSources(directory, filesFilter, directoriesFilter, recursive, result); + return result; + } + + protected void loadSources(File directory, FilenameFilter filesFilter, FilenameFilter directoriesFilter, boolean recursive, Collection result) { + assert directory.isDirectory(); + + File[] sources = directory.listFiles(filesFilter); + for (File file : sources) { + if (!file.isFile()) { + continue; + } + + result.add(new InputDescriptor(file.getAbsolutePath())); + } + + if (recursive) { + File[] children = directory.listFiles(directoriesFilter); + for (File child : children) { + if (child.isDirectory()) { + loadSources(child, filesFilter, directoriesFilter, true, result); + } + } + } + } + + int configOutputSize = 0; + + @SuppressWarnings("unused") + protected void parseSources(final int currentPass, final ParserFactory factory, Collection sources, boolean shuffleSources) throws InterruptedException { + if (shuffleSources) { + List sourcesList = new ArrayList(sources); + synchronized (RANDOM) { + Collections.shuffle(sourcesList, RANDOM); + } + + sources = sourcesList; + } + + long startTime = System.nanoTime(); + tokenCount.set(currentPass, 0); + int inputSize = 0; + int inputCount = 0; + + Collection> results = new ArrayList>(); + ExecutorService executorService; + if (FILE_GRANULARITY) { + executorService = Executors.newFixedThreadPool(FILE_GRANULARITY ? NUMBER_OF_THREADS : 1, new NumberedThreadFactory()); + } else { + executorService = Executors.newSingleThreadExecutor(new FixedThreadNumberFactory(((NumberedThread)Thread.currentThread()).getThreadNumber())); + } + + for (InputDescriptor inputDescriptor : sources) { + if (inputCount >= MAX_FILES_PER_PARSE_ITERATION) { + break; + } + + final CharStream input = inputDescriptor.getInputStream(); + input.seek(0); + inputSize += input.size(); + inputCount++; + Future futureChecksum = executorService.submit(new Callable() { + @Override + public FileParseResult call() { + // this incurred a great deal of overhead and was causing significant variations in performance results. + //System.out.format("Parsing file %s\n", input.getSourceName()); + try { + return factory.parseFile(input, currentPass, ((NumberedThread)Thread.currentThread()).getThreadNumber()); + } catch (IllegalStateException ex) { + ex.printStackTrace(System.err); + } catch (Throwable t) { + t.printStackTrace(System.err); + } + + return null; + } + }); + + results.add(futureChecksum); + } + + Checksum checksum = new CRC32(); + int currentIndex = -1; + for (Future future : results) { + currentIndex++; + int fileChecksum = 0; + try { + FileParseResult fileResult = future.get(); + if (COMPUTE_TRANSITION_STATS) { + totalTransitionsPerFile[currentPass][currentIndex] = sum(fileResult.parserTotalTransitions); + computedTransitionsPerFile[currentPass][currentIndex] = sum(fileResult.parserComputedTransitions); + + if (DETAILED_DFA_STATE_STATS) { + decisionInvocationsPerFile[currentPass][currentIndex] = fileResult.decisionInvocations; + fullContextFallbackPerFile[currentPass][currentIndex] = fileResult.fullContextFallback; + nonSllPerFile[currentPass][currentIndex] = fileResult.nonSll; + totalTransitionsPerDecisionPerFile[currentPass][currentIndex] = fileResult.parserTotalTransitions; + computedTransitionsPerDecisionPerFile[currentPass][currentIndex] = fileResult.parserComputedTransitions; + fullContextTransitionsPerDecisionPerFile[currentPass][currentIndex] = fileResult.parserFullContextTransitions; + } + } + + if (COMPUTE_TIMING_STATS) { + timePerFile[currentPass][currentIndex] = fileResult.endTime - fileResult.startTime; + tokensPerFile[currentPass][currentIndex] = fileResult.tokenCount; + } + + fileChecksum = fileResult.checksum; + } catch (ExecutionException ex) { + Logger.getLogger(TestPerformance.class.getName()).log(Level.SEVERE, null, ex); + } + + if (COMPUTE_CHECKSUM) { + updateChecksum(checksum, fileChecksum); + } + } + + executorService.shutdown(); + executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + + System.out.format("%d. Total parse time for %d files (%d KB, %d tokens%s): %.0fms%n", + currentPass + 1, + inputCount, + inputSize / 1024, + tokenCount.get(currentPass), + COMPUTE_CHECKSUM ? String.format(", checksum 0x%8X", checksum.getValue()) : "", + (double)(System.nanoTime() - startTime) / 1000000.0); + + if (sharedLexers.length > 0) { + int index = FILE_GRANULARITY ? 0 : ((NumberedThread)Thread.currentThread()).getThreadNumber(); + Lexer lexer = sharedLexers[index]; + final LexerATNSimulator lexerInterpreter = lexer.getInterpreter(); + final DFA[] modeToDFA = lexerInterpreter.decisionToDFA; + if (SHOW_DFA_STATE_STATS) { + int states = 0; + int configs = 0; + Set uniqueConfigs = new HashSet(); + + for (int i = 0; i < modeToDFA.length; i++) { + DFA dfa = modeToDFA[i]; + if (dfa == null) { + continue; + } + + states += dfa.states.size(); + for (DFAState state : dfa.states.values()) { + configs += state.configs.size(); + uniqueConfigs.addAll(state.configs); + } + } + + System.out.format("There are %d lexer DFAState instances, %d configs (%d unique).%n", states, configs, uniqueConfigs.size()); + + if (DETAILED_DFA_STATE_STATS) { + System.out.format("\tMode\tStates\tConfigs\tMode%n"); + for (int i = 0; i < modeToDFA.length; i++) { + DFA dfa = modeToDFA[i]; + if (dfa == null || dfa.states.isEmpty()) { + continue; + } + + int modeConfigs = 0; + for (DFAState state : dfa.states.values()) { + modeConfigs += state.configs.size(); + } + + String modeName = lexer.getModeNames()[i]; + System.out.format("\t%d\t%d\t%d\t%s%n", dfa.decision, dfa.states.size(), modeConfigs, modeName); + } + } + } + } + + if (RUN_PARSER && sharedParsers.length > 0) { + int index = FILE_GRANULARITY ? 0 : ((NumberedThread)Thread.currentThread()).getThreadNumber(); + Parser parser = sharedParsers[index]; + // make sure the individual DFAState objects actually have unique ATNConfig arrays + final ParserATNSimulator interpreter = parser.getInterpreter(); + final DFA[] decisionToDFA = interpreter.decisionToDFA; + + if (SHOW_DFA_STATE_STATS) { + int states = 0; + int configs = 0; + Set uniqueConfigs = new HashSet(); + + for (int i = 0; i < decisionToDFA.length; i++) { + DFA dfa = decisionToDFA[i]; + if (dfa == null) { + continue; + } + + states += dfa.states.size(); + for (DFAState state : dfa.states.values()) { + configs += state.configs.size(); + uniqueConfigs.addAll(state.configs); + } + } + + System.out.format("There are %d parser DFAState instances, %d configs (%d unique).%n", states, configs, uniqueConfigs.size()); + + if (DETAILED_DFA_STATE_STATS) { + if (COMPUTE_TRANSITION_STATS) { + System.out.format("\tDecision\tStates\tConfigs\tPredict (ALL)\tPredict (LL)\tNon-SLL\tTransitions\tTransitions (ATN)\tTransitions (LL)\tLA (SLL)\tLA (LL)\tRule%n"); + } + else { + System.out.format("\tDecision\tStates\tConfigs\tRule%n"); + } + + for (int i = 0; i < decisionToDFA.length; i++) { + DFA dfa = decisionToDFA[i]; + if (dfa == null || dfa.states.isEmpty()) { + continue; + } + + int decisionConfigs = 0; + for (DFAState state : dfa.states.values()) { + decisionConfigs += state.configs.size(); + } + + String ruleName = parser.getRuleNames()[parser.getATN().decisionToState.get(dfa.decision).ruleIndex]; + + long calls = 0; + long fullContextCalls = 0; + long nonSllCalls = 0; + long transitions = 0; + long computedTransitions = 0; + long fullContextTransitions = 0; + double lookahead = 0; + double fullContextLookahead = 0; + String formatString; + if (COMPUTE_TRANSITION_STATS) { + for (long[] data : decisionInvocationsPerFile[currentPass]) { + calls += data[i]; + } + + for (long[] data : fullContextFallbackPerFile[currentPass]) { + fullContextCalls += data[i]; + } + + for (long[] data : nonSllPerFile[currentPass]) { + nonSllCalls += data[i]; + } + + for (long[] data : totalTransitionsPerDecisionPerFile[currentPass]) { + transitions += data[i]; + } + + for (long[] data : computedTransitionsPerDecisionPerFile[currentPass]) { + computedTransitions += data[i]; + } + + for (long[] data : fullContextTransitionsPerDecisionPerFile[currentPass]) { + fullContextTransitions += data[i]; + } + + if (calls > 0) { + lookahead = (double)(transitions - fullContextTransitions) / (double)calls; + } + + if (fullContextCalls > 0) { + fullContextLookahead = (double)fullContextTransitions / (double)fullContextCalls; + } + + formatString = "\t%1$d\t%2$d\t%3$d\t%4$d\t%5$d\t%6$d\t%7$d\t%8$d\t%9$d\t%10$f\t%11$f\t%12$s%n"; + } + else { + calls = 0; + formatString = "\t%1$d\t%2$d\t%3$d\t%12$s%n"; + } + + System.out.format(formatString, dfa.decision, dfa.states.size(), decisionConfigs, calls, fullContextCalls, nonSllCalls, transitions, computedTransitions, fullContextTransitions, lookahead, fullContextLookahead, ruleName); + } + } + } + + int localDfaCount = 0; + int globalDfaCount = 0; + int localConfigCount = 0; + int globalConfigCount = 0; + int[] contextsInDFAState = new int[0]; + + for (int i = 0; i < decisionToDFA.length; i++) { + DFA dfa = decisionToDFA[i]; + if (dfa == null) { + continue; + } + + if (SHOW_CONFIG_STATS) { + for (DFAState state : dfa.states.keySet()) { + if (state.configs.size() >= contextsInDFAState.length) { + contextsInDFAState = Arrays.copyOf(contextsInDFAState, state.configs.size() + 1); + } + + if (state.isAcceptState) { + boolean hasGlobal = false; + for (ATNConfig config : state.configs) { + if (config.reachesIntoOuterContext > 0) { + globalConfigCount++; + hasGlobal = true; + } else { + localConfigCount++; + } + } + + if (hasGlobal) { + globalDfaCount++; + } else { + localDfaCount++; + } + } + + contextsInDFAState[state.configs.size()]++; + } + } + } + + if (SHOW_CONFIG_STATS && currentPass == 0) { + System.out.format(" DFA accept states: %d total, %d with only local context, %d with a global context%n", localDfaCount + globalDfaCount, localDfaCount, globalDfaCount); + System.out.format(" Config stats: %d total, %d local, %d global%n", localConfigCount + globalConfigCount, localConfigCount, globalConfigCount); + if (SHOW_DFA_STATE_STATS) { + for (int i = 0; i < contextsInDFAState.length; i++) { + if (contextsInDFAState[i] != 0) { + System.out.format(" %d configs = %d%n", i, contextsInDFAState[i]); + } + } + } + } + } + + if (COMPUTE_TIMING_STATS) { + System.out.format("File\tTokens\tTime%n"); + for (int i = 0; i< timePerFile[currentPass].length; i++) { + System.out.format("%d\t%d\t%d%n", i + 1, tokensPerFile[currentPass][i], timePerFile[currentPass][i]); + } + } + } + + private static long sum(long[] array) { + long result = 0; + for (int i = 0; i < array.length; i++) { + result += array[i]; + } + + return result; + } + + protected void compileJavaParser(boolean leftRecursive) throws IOException { + String grammarFileName = "Java.g4"; + String sourceName = leftRecursive ? "Java-LR.g4" : "Java.g4"; + String body = load(sourceName, null); + List extraOptions = new ArrayList(); + extraOptions.add("-Werror"); + if (FORCE_ATN) { + extraOptions.add("-Xforce-atn"); + } + if (EXPORT_ATN_GRAPHS) { + extraOptions.add("-atn"); + } + if (DEBUG_TEMPLATES) { + extraOptions.add("-XdbgST"); + if (DEBUG_TEMPLATES_WAIT) { + extraOptions.add("-XdbgSTWait"); + } + } + extraOptions.add("-visitor"); + String[] extraOptionsArray = extraOptions.toArray(new String[extraOptions.size()]); + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, "JavaParser", "JavaLexer", true, extraOptionsArray); + assertTrue(success); + } + + private static void updateChecksum(Checksum checksum, int value) { + checksum.update((value) & 0xFF); + checksum.update((value >>> 8) & 0xFF); + checksum.update((value >>> 16) & 0xFF); + checksum.update((value >>> 24) & 0xFF); + } + + private static void updateChecksum(Checksum checksum, Token token) { + if (token == null) { + checksum.update(0); + return; + } + + updateChecksum(checksum, token.getStartIndex()); + updateChecksum(checksum, token.getStopIndex()); + updateChecksum(checksum, token.getLine()); + updateChecksum(checksum, token.getCharPositionInLine()); + updateChecksum(checksum, token.getType()); + updateChecksum(checksum, token.getChannel()); + } + + protected ParserFactory getParserFactory(String lexerName, String parserName, String listenerName, final String entryPoint) { + try { + ClassLoader loader = new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, ClassLoader.getSystemClassLoader()); + final Class lexerClass = loader.loadClass(lexerName).asSubclass(Lexer.class); + final Class parserClass = loader.loadClass(parserName).asSubclass(Parser.class); + final Class listenerClass = loader.loadClass(listenerName).asSubclass(ParseTreeListener.class); + + final Constructor lexerCtor = lexerClass.getConstructor(CharStream.class); + final Constructor parserCtor = parserClass.getConstructor(TokenStream.class); + + // construct initial instances of the lexer and parser to deserialize their ATNs + TokenSource tokenSource = lexerCtor.newInstance(new ANTLRInputStream("")); + parserCtor.newInstance(new CommonTokenStream(tokenSource)); + + return new ParserFactory() { + @Override + public FileParseResult parseFile(CharStream input, int currentPass, int thread) { + final Checksum checksum = new CRC32(); + + final long startTime = System.nanoTime(); + assert thread >= 0 && thread < NUMBER_OF_THREADS; + + try { + ParseTreeListener listener = sharedListeners[thread]; + if (listener == null) { + listener = listenerClass.newInstance(); + sharedListeners[thread] = listener; + } + + Lexer lexer = sharedLexers[thread]; + if (REUSE_LEXER && lexer != null) { + lexer.setInputStream(input); + } else { + Lexer previousLexer = lexer; + lexer = lexerCtor.newInstance(input); + DFA[] decisionToDFA = (FILE_GRANULARITY || previousLexer == null ? lexer : previousLexer).getInterpreter().decisionToDFA; + if (!REUSE_LEXER_DFA || (!FILE_GRANULARITY && previousLexer == null)) { + decisionToDFA = new DFA[decisionToDFA.length]; + } + + if (COMPUTE_TRANSITION_STATS) { + lexer.setInterpreter(new StatisticsLexerATNSimulator(lexer, lexer.getATN(), decisionToDFA, lexer.getInterpreter().getSharedContextCache())); + } else if (!REUSE_LEXER_DFA) { + lexer.setInterpreter(new LexerATNSimulator(lexer, lexer.getATN(), decisionToDFA, lexer.getInterpreter().getSharedContextCache())); + } + + sharedLexers[thread] = lexer; + } + + lexer.removeErrorListeners(); + lexer.addErrorListener(DescriptiveErrorListener.INSTANCE); + + if (lexer.getInterpreter().decisionToDFA[0] == null) { + ATN atn = lexer.getATN(); + for (int i = 0; i < lexer.getInterpreter().decisionToDFA.length; i++) { + lexer.getInterpreter().decisionToDFA[i] = new DFA(atn.getDecisionState(i), i); + } + } + + CommonTokenStream tokens = new CommonTokenStream(lexer); + tokens.fill(); + tokenCount.addAndGet(currentPass, tokens.size()); + + if (COMPUTE_CHECKSUM) { + for (Token token : tokens.getTokens()) { + updateChecksum(checksum, token); + } + } + + if (!RUN_PARSER) { + return new FileParseResult(input.getSourceName(), (int)checksum.getValue(), null, tokens.size(), startTime, lexer, null); + } + + final long parseStartTime = System.nanoTime(); + Parser parser = sharedParsers[thread]; + if (REUSE_PARSER && parser != null) { + parser.setInputStream(tokens); + } else { + Parser previousParser = parser; + + if (USE_PARSER_INTERPRETER) { + Parser referenceParser = parserCtor.newInstance(tokens); + parser = new ParserInterpreter(referenceParser.getGrammarFileName(), referenceParser.getVocabulary(), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens); + } + else { + parser = parserCtor.newInstance(tokens); + } + + DFA[] decisionToDFA = (FILE_GRANULARITY || previousParser == null ? parser : previousParser).getInterpreter().decisionToDFA; + if (!REUSE_PARSER_DFA || (!FILE_GRANULARITY && previousParser == null)) { + decisionToDFA = new DFA[decisionToDFA.length]; + } + + if (COMPUTE_TRANSITION_STATS) { + parser.setInterpreter(new StatisticsParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache())); + } else if (!REUSE_PARSER_DFA) { + parser.setInterpreter(new ParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache())); + } + + sharedParsers[thread] = parser; + } + + parser.removeParseListeners(); + parser.removeErrorListeners(); + if (!TWO_STAGE_PARSING) { + parser.addErrorListener(DescriptiveErrorListener.INSTANCE); + parser.addErrorListener(new SummarizingDiagnosticErrorListener()); + } + + if (parser.getInterpreter().decisionToDFA[0] == null) { + ATN atn = parser.getATN(); + for (int i = 0; i < parser.getInterpreter().decisionToDFA.length; i++) { + parser.getInterpreter().decisionToDFA[i] = new DFA(atn.getDecisionState(i), i); + } + } + + parser.getInterpreter().setPredictionMode(TWO_STAGE_PARSING ? PredictionMode.SLL : PREDICTION_MODE); + parser.setBuildParseTree(BUILD_PARSE_TREES); + if (!BUILD_PARSE_TREES && BLANK_LISTENER) { + parser.addParseListener(listener); + } + if (BAIL_ON_ERROR || TWO_STAGE_PARSING) { + parser.setErrorHandler(new BailErrorStrategy()); + } + + Method parseMethod = parserClass.getMethod(entryPoint); + Object parseResult; + + try { + if (COMPUTE_CHECKSUM && !BUILD_PARSE_TREES) { + parser.addParseListener(new ChecksumParseTreeListener(checksum)); + } + + if (USE_PARSER_INTERPRETER) { + ParserInterpreter parserInterpreter = (ParserInterpreter)parser; + parseResult = parserInterpreter.parse(Collections.lastIndexOfSubList(Arrays.asList(parser.getRuleNames()), Collections.singletonList(entryPoint))); + } + else { + parseResult = parseMethod.invoke(parser); + } + } catch (InvocationTargetException ex) { + if (!TWO_STAGE_PARSING) { + throw ex; + } + + String sourceName = tokens.getSourceName(); + sourceName = sourceName != null && !sourceName.isEmpty() ? sourceName+": " : ""; + if (REPORT_SECOND_STAGE_RETRY) { + System.err.println(sourceName+"Forced to retry with full context."); + } + + if (!(ex.getCause() instanceof ParseCancellationException)) { + throw ex; + } + + tokens.reset(); + if (REUSE_PARSER && parser != null) { + parser.setInputStream(tokens); + } else { + Parser previousParser = parser; + + if (USE_PARSER_INTERPRETER) { + Parser referenceParser = parserCtor.newInstance(tokens); + parser = new ParserInterpreter(referenceParser.getGrammarFileName(), referenceParser.getVocabulary(), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens); + } + else { + parser = parserCtor.newInstance(tokens); + } + + DFA[] decisionToDFA = previousParser.getInterpreter().decisionToDFA; + if (COMPUTE_TRANSITION_STATS) { + parser.setInterpreter(new StatisticsParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache())); + } else if (!REUSE_PARSER_DFA) { + parser.setInterpreter(new ParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache())); + } + + sharedParsers[thread] = parser; + } + + parser.removeParseListeners(); + parser.removeErrorListeners(); + parser.addErrorListener(DescriptiveErrorListener.INSTANCE); + parser.addErrorListener(new SummarizingDiagnosticErrorListener()); + parser.getInterpreter().setPredictionMode(PredictionMode.LL); + parser.setBuildParseTree(BUILD_PARSE_TREES); + if (COMPUTE_CHECKSUM && !BUILD_PARSE_TREES) { + parser.addParseListener(new ChecksumParseTreeListener(checksum)); + } + if (!BUILD_PARSE_TREES && BLANK_LISTENER) { + parser.addParseListener(listener); + } + if (BAIL_ON_ERROR) { + parser.setErrorHandler(new BailErrorStrategy()); + } + + parseResult = parseMethod.invoke(parser); + } + + assertThat(parseResult, instanceOf(ParseTree.class)); + if (COMPUTE_CHECKSUM && BUILD_PARSE_TREES) { + ParseTreeWalker.DEFAULT.walk(new ChecksumParseTreeListener(checksum), (ParseTree)parseResult); + } + if (BUILD_PARSE_TREES && BLANK_LISTENER) { + ParseTreeWalker.DEFAULT.walk(listener, (ParseTree)parseResult); + } + + return new FileParseResult(input.getSourceName(), (int)checksum.getValue(), (ParseTree)parseResult, tokens.size(), TIME_PARSE_ONLY ? parseStartTime : startTime, lexer, parser); + } catch (Exception e) { + if (!REPORT_SYNTAX_ERRORS && e instanceof ParseCancellationException) { + return new FileParseResult("unknown", (int)checksum.getValue(), null, 0, startTime, null, null); + } + + e.printStackTrace(System.out); + throw new IllegalStateException(e); + } + } + }; + } catch (Exception e) { + e.printStackTrace(System.out); + Assert.fail(e.getMessage()); + throw new IllegalStateException(e); + } + } + + protected interface ParserFactory { + FileParseResult parseFile(CharStream input, int currentPass, int thread); + } + + protected static class FileParseResult { + public final String sourceName; + public final int checksum; + public final ParseTree parseTree; + public final int tokenCount; + public final long startTime; + public final long endTime; + + public final int lexerDFASize; + public final long lexerTotalTransitions; + public final long lexerComputedTransitions; + + public final int parserDFASize; + public final long[] decisionInvocations; + public final long[] fullContextFallback; + public final long[] nonSll; + public final long[] parserTotalTransitions; + public final long[] parserComputedTransitions; + public final long[] parserFullContextTransitions; + + public FileParseResult(String sourceName, int checksum, @Nullable ParseTree parseTree, int tokenCount, long startTime, Lexer lexer, Parser parser) { + this.sourceName = sourceName; + this.checksum = checksum; + this.parseTree = parseTree; + this.tokenCount = tokenCount; + this.startTime = startTime; + this.endTime = System.nanoTime(); + + if (lexer != null) { + LexerATNSimulator interpreter = lexer.getInterpreter(); + if (interpreter instanceof StatisticsLexerATNSimulator) { + lexerTotalTransitions = ((StatisticsLexerATNSimulator)interpreter).totalTransitions; + lexerComputedTransitions = ((StatisticsLexerATNSimulator)interpreter).computedTransitions; + } else { + lexerTotalTransitions = 0; + lexerComputedTransitions = 0; + } + + int dfaSize = 0; + for (DFA dfa : interpreter.decisionToDFA) { + if (dfa != null) { + dfaSize += dfa.states.size(); + } + } + + lexerDFASize = dfaSize; + } else { + lexerDFASize = 0; + lexerTotalTransitions = 0; + lexerComputedTransitions = 0; + } + + if (parser != null) { + ParserATNSimulator interpreter = parser.getInterpreter(); + if (interpreter instanceof StatisticsParserATNSimulator) { + decisionInvocations = ((StatisticsParserATNSimulator)interpreter).decisionInvocations; + fullContextFallback = ((StatisticsParserATNSimulator)interpreter).fullContextFallback; + nonSll = ((StatisticsParserATNSimulator)interpreter).nonSll; + parserTotalTransitions = ((StatisticsParserATNSimulator)interpreter).totalTransitions; + parserComputedTransitions = ((StatisticsParserATNSimulator)interpreter).computedTransitions; + parserFullContextTransitions = ((StatisticsParserATNSimulator)interpreter).fullContextTransitions; + } else { + decisionInvocations = new long[0]; + fullContextFallback = new long[0]; + nonSll = new long[0]; + parserTotalTransitions = new long[0]; + parserComputedTransitions = new long[0]; + parserFullContextTransitions = new long[0]; + } + + int dfaSize = 0; + for (DFA dfa : interpreter.decisionToDFA) { + if (dfa != null) { + dfaSize += dfa.states.size(); + } + } + + parserDFASize = dfaSize; + } else { + parserDFASize = 0; + decisionInvocations = new long[0]; + fullContextFallback = new long[0]; + nonSll = new long[0]; + parserTotalTransitions = new long[0]; + parserComputedTransitions = new long[0]; + parserFullContextTransitions = new long[0]; + } + } + } + + private static class StatisticsLexerATNSimulator extends LexerATNSimulator { + + public long totalTransitions; + public long computedTransitions; + + public StatisticsLexerATNSimulator(ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) { + super(atn, decisionToDFA, sharedContextCache); + } + + public StatisticsLexerATNSimulator(Lexer recog, ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) { + super(recog, atn, decisionToDFA, sharedContextCache); + } + + @Override + protected DFAState getExistingTargetState(DFAState s, int t) { + totalTransitions++; + return super.getExistingTargetState(s, t); + } + + @Override + protected DFAState computeTargetState(CharStream input, DFAState s, int t) { + computedTransitions++; + return super.computeTargetState(input, s, t); + } + } + + private static class StatisticsParserATNSimulator extends ParserATNSimulator { + + public final long[] decisionInvocations; + public final long[] fullContextFallback; + public final long[] nonSll; + public final long[] totalTransitions; + public final long[] computedTransitions; + public final long[] fullContextTransitions; + + private int decision; + + public StatisticsParserATNSimulator(ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) { + super(atn, decisionToDFA, sharedContextCache); + decisionInvocations = new long[atn.decisionToState.size()]; + fullContextFallback = new long[atn.decisionToState.size()]; + nonSll = new long[atn.decisionToState.size()]; + totalTransitions = new long[atn.decisionToState.size()]; + computedTransitions = new long[atn.decisionToState.size()]; + fullContextTransitions = new long[atn.decisionToState.size()]; + } + + public StatisticsParserATNSimulator(Parser parser, ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) { + super(parser, atn, decisionToDFA, sharedContextCache); + decisionInvocations = new long[atn.decisionToState.size()]; + fullContextFallback = new long[atn.decisionToState.size()]; + nonSll = new long[atn.decisionToState.size()]; + totalTransitions = new long[atn.decisionToState.size()]; + computedTransitions = new long[atn.decisionToState.size()]; + fullContextTransitions = new long[atn.decisionToState.size()]; + } + + @Override + public int adaptivePredict(TokenStream input, int decision, ParserRuleContext outerContext) { + try { + this.decision = decision; + decisionInvocations[decision]++; + return super.adaptivePredict(input, decision, outerContext); + } + finally { + this.decision = -1; + } + } + + @Override + protected int execATNWithFullContext(DFA dfa, DFAState D, ATNConfigSet s0, TokenStream input, int startIndex, ParserRuleContext outerContext) { + fullContextFallback[decision]++; + return super.execATNWithFullContext(dfa, D, s0, input, startIndex, outerContext); + } + + @Override + protected DFAState getExistingTargetState(DFAState previousD, int t) { + totalTransitions[decision]++; + return super.getExistingTargetState(previousD, t); + } + + @Override + protected DFAState computeTargetState(DFA dfa, DFAState previousD, int t) { + computedTransitions[decision]++; + return super.computeTargetState(dfa, previousD, t); + } + + @Override + protected ATNConfigSet computeReachSet(ATNConfigSet closure, int t, boolean fullCtx) { + if (fullCtx) { + totalTransitions[decision]++; + computedTransitions[decision]++; + fullContextTransitions[decision]++; + } + + return super.computeReachSet(closure, t, fullCtx); + } + } + + private static class DescriptiveErrorListener extends BaseErrorListener { + public static DescriptiveErrorListener INSTANCE = new DescriptiveErrorListener(); + + @Override + public void syntaxError(Recognizer recognizer, Object offendingSymbol, + int line, int charPositionInLine, + String msg, RecognitionException e) + { + if (!REPORT_SYNTAX_ERRORS) { + return; + } + + String sourceName = recognizer.getInputStream().getSourceName(); + if (!sourceName.isEmpty()) { + sourceName = String.format("%s:%d:%d: ", sourceName, line, charPositionInLine); + } + + System.err.println(sourceName+"line "+line+":"+charPositionInLine+" "+msg); + } + + } + + private static class SummarizingDiagnosticErrorListener extends DiagnosticErrorListener { + private BitSet _sllConflict; + private ATNConfigSet _sllConfigs; + + @Override + public void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, boolean exact, BitSet ambigAlts, ATNConfigSet configs) { + if (COMPUTE_TRANSITION_STATS && DETAILED_DFA_STATE_STATS) { + BitSet sllPredictions = getConflictingAlts(_sllConflict, _sllConfigs); + int sllPrediction = sllPredictions.nextSetBit(0); + BitSet llPredictions = getConflictingAlts(ambigAlts, configs); + int llPrediction = llPredictions.cardinality() == 0 ? ATN.INVALID_ALT_NUMBER : llPredictions.nextSetBit(0); + if (sllPrediction != llPrediction) { + ((StatisticsParserATNSimulator)recognizer.getInterpreter()).nonSll[dfa.decision]++; + } + } + + if (!REPORT_AMBIGUITIES) { + return; + } + + // show the rule name along with the decision + String format = "reportAmbiguity d=%d (%s): ambigAlts=%s, input='%s'"; + int decision = dfa.decision; + String rule = recognizer.getRuleNames()[dfa.atnStartState.ruleIndex]; + String input = recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)); + recognizer.notifyErrorListeners(String.format(format, decision, rule, ambigAlts, input)); + } + + @Override + public void reportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) { + _sllConflict = conflictingAlts; + _sllConfigs = configs; + if (!REPORT_FULL_CONTEXT) { + return; + } + + // show the rule name and viable configs along with the base info + String format = "reportAttemptingFullContext d=%d (%s), input='%s', viable=%s"; + int decision = dfa.decision; + String rule = recognizer.getRuleNames()[dfa.atnStartState.ruleIndex]; + String input = recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)); + BitSet representedAlts = getConflictingAlts(conflictingAlts, configs); + recognizer.notifyErrorListeners(String.format(format, decision, rule, input, representedAlts)); + } + + @Override + public void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, ATNConfigSet configs) { + if (COMPUTE_TRANSITION_STATS && DETAILED_DFA_STATE_STATS) { + BitSet sllPredictions = getConflictingAlts(_sllConflict, _sllConfigs); + int sllPrediction = sllPredictions.nextSetBit(0); + if (sllPrediction != prediction) { + ((StatisticsParserATNSimulator)recognizer.getInterpreter()).nonSll[dfa.decision]++; + } + } + + if (!REPORT_CONTEXT_SENSITIVITY) { + return; + } + + // show the rule name and viable configs along with the base info + String format = "reportContextSensitivity d=%d (%s), input='%s', viable={%d}"; + int decision = dfa.decision; + String rule = recognizer.getRuleNames()[dfa.atnStartState.ruleIndex]; + String input = recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)); + recognizer.notifyErrorListeners(String.format(format, decision, rule, input, prediction)); + } + + } + + protected static final class FilenameFilters { + public static final FilenameFilter ALL_FILES = new FilenameFilter() { + + @Override + public boolean accept(File dir, String name) { + return true; + } + + }; + + public static FilenameFilter extension(String extension) { + return extension(extension, true); + } + + public static FilenameFilter extension(String extension, boolean caseSensitive) { + return new FileExtensionFilenameFilter(extension, caseSensitive); + } + + public static FilenameFilter name(String filename) { + return name(filename, true); + } + + public static FilenameFilter name(String filename, boolean caseSensitive) { + return new FileNameFilenameFilter(filename, caseSensitive); + } + + public static FilenameFilter all(FilenameFilter... filters) { + return new AllFilenameFilter(filters); + } + + public static FilenameFilter any(FilenameFilter... filters) { + return new AnyFilenameFilter(filters); + } + + public static FilenameFilter none(FilenameFilter... filters) { + return not(any(filters)); + } + + public static FilenameFilter not(FilenameFilter filter) { + return new NotFilenameFilter(filter); + } + + private FilenameFilters() { + } + + protected static class FileExtensionFilenameFilter implements FilenameFilter { + + private final String extension; + private final boolean caseSensitive; + + public FileExtensionFilenameFilter(String extension, boolean caseSensitive) { + if (!extension.startsWith(".")) { + extension = '.' + extension; + } + + this.extension = extension; + this.caseSensitive = caseSensitive; + } + + @Override + public boolean accept(File dir, String name) { + if (caseSensitive) { + return name.endsWith(extension); + } else { + return name.toLowerCase().endsWith(extension); + } + } + } + + protected static class FileNameFilenameFilter implements FilenameFilter { + + private final String filename; + private final boolean caseSensitive; + + public FileNameFilenameFilter(String filename, boolean caseSensitive) { + this.filename = filename; + this.caseSensitive = caseSensitive; + } + + @Override + public boolean accept(File dir, String name) { + if (caseSensitive) { + return name.equals(filename); + } else { + return name.toLowerCase().equals(filename); + } + } + } + + protected static class AllFilenameFilter implements FilenameFilter { + + private final FilenameFilter[] filters; + + public AllFilenameFilter(FilenameFilter[] filters) { + this.filters = filters; + } + + @Override + public boolean accept(File dir, String name) { + for (FilenameFilter filter : filters) { + if (!filter.accept(dir, name)) { + return false; + } + } + + return true; + } + } + + protected static class AnyFilenameFilter implements FilenameFilter { + + private final FilenameFilter[] filters; + + public AnyFilenameFilter(FilenameFilter[] filters) { + this.filters = filters; + } + + @Override + public boolean accept(File dir, String name) { + for (FilenameFilter filter : filters) { + if (filter.accept(dir, name)) { + return true; + } + } + + return false; + } + } + + protected static class NotFilenameFilter implements FilenameFilter { + + private final FilenameFilter filter; + + public NotFilenameFilter(FilenameFilter filter) { + this.filter = filter; + } + + @Override + public boolean accept(File dir, String name) { + return !filter.accept(dir, name); + } + } + } + + protected static class NumberedThread extends Thread { + private final int threadNumber; + + public NumberedThread(Runnable target, int threadNumber) { + super(target); + this.threadNumber = threadNumber; + } + + public final int getThreadNumber() { + return threadNumber; + } + + } + + protected static class NumberedThreadFactory implements ThreadFactory { + private final AtomicInteger nextThread = new AtomicInteger(); + + @Override + public Thread newThread(Runnable r) { + int threadNumber = nextThread.getAndIncrement(); + assert threadNumber < NUMBER_OF_THREADS; + return new NumberedThread(r, threadNumber); + } + + } + + protected static class FixedThreadNumberFactory implements ThreadFactory { + private final int threadNumber; + + public FixedThreadNumberFactory(int threadNumber) { + this.threadNumber = threadNumber; + } + + @Override + public Thread newThread(Runnable r) { + assert threadNumber < NUMBER_OF_THREADS; + return new NumberedThread(r, threadNumber); + } + } + + protected static class ChecksumParseTreeListener implements ParseTreeListener { + private static final int VISIT_TERMINAL = 1; + private static final int VISIT_ERROR_NODE = 2; + private static final int ENTER_RULE = 3; + private static final int EXIT_RULE = 4; + + private final Checksum checksum; + + public ChecksumParseTreeListener(Checksum checksum) { + this.checksum = checksum; + } + + @Override + public void visitTerminal(TerminalNode node) { + checksum.update(VISIT_TERMINAL); + updateChecksum(checksum, node.getSymbol()); + } + + @Override + public void visitErrorNode(ErrorNode node) { + checksum.update(VISIT_ERROR_NODE); + updateChecksum(checksum, node.getSymbol()); + } + + @Override + public void enterEveryRule(ParserRuleContext ctx) { + checksum.update(ENTER_RULE); + updateChecksum(checksum, ctx.getRuleIndex()); + updateChecksum(checksum, ctx.getStart()); + } + + @Override + public void exitEveryRule(ParserRuleContext ctx) { + checksum.update(EXIT_RULE); + updateChecksum(checksum, ctx.getRuleIndex()); + updateChecksum(checksum, ctx.getStop()); + } + + } + + protected static final class InputDescriptor { + private final String source; + private Reference inputStream; + + public InputDescriptor(@NotNull String source) { + this.source = source; + if (PRELOAD_SOURCES) { + getInputStream(); + } + } + + @NotNull + public synchronized CharStream getInputStream() { + CloneableANTLRFileStream stream = inputStream != null ? inputStream.get() : null; + if (stream == null) { + try { + stream = new CloneableANTLRFileStream(source, ENCODING); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + + if (PRELOAD_SOURCES) { + inputStream = new StrongReference(stream); + } else { + inputStream = new SoftReference(stream); + } + } + + return new JavaUnicodeInputStream(stream.createCopy()); + } + } + + protected static class CloneableANTLRFileStream extends ANTLRFileStream { + + public CloneableANTLRFileStream(String fileName, String encoding) throws IOException { + super(fileName, encoding); + } + + public ANTLRInputStream createCopy() { + ANTLRInputStream stream = new ANTLRInputStream(this.data, this.n); + stream.name = this.getSourceName(); + return stream; + } + } + + public static class StrongReference extends WeakReference { + public final T referent; + + public StrongReference(T referent) { + super(referent); + this.referent = referent; + } + + @Override + public T get() { + return referent; + } + } + + /** + * This is a regression test for antlr/antlr4#192 "Poor performance of + * expression parsing". + * https://github.com/antlr/antlr4/issues/192 + */ + @Test(timeout = 60000) + public void testExpressionGrammar() { + String grammar = + "grammar Expr;\n" + + "\n" + + "program: expr EOF;\n" + + "\n" + + "expr: ID\n" + + " | 'not' expr\n" + + " | expr 'and' expr\n" + + " | expr 'or' expr\n" + + " ;\n" + + "\n" + + "ID: [a-zA-Z_][a-zA-Z_0-9]*;\n" + + "WS: [ \\t\\n\\r\\f]+ -> skip;\n" + + "ERROR: .;\n"; + String input = + "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + + " X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" + + "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12\n"; + + String found = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "program", + input, false); + Assert.assertEquals("", found); + Assert.assertEquals(null, stderrDuringParse); + + List inputs = new ArrayList(); + for (int i = 0; i < 10; i++) { + inputs.add(input); + } + + input = Utils.join(inputs.iterator(), " or\n"); + found = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "program", + input, false); + Assert.assertEquals("", found); + Assert.assertEquals(null, stderrDuringParse); + } + + @Test(timeout = 20000) + public void testExponentialInclude() { + String grammarFormat = + "parser grammar Level_%d_%d;\n" + + "\n" + + "%s import Level_%d_1, Level_%d_2;\n" + + "\n" + + "rule_%d_%d : EOF;\n"; + + System.out.println("dir "+tmpdir); + mkdir(tmpdir); + + long startTime = System.nanoTime(); + + int levels = 20; + for (int level = 0; level < levels; level++) { + String leafPrefix = level == levels - 1 ? "//" : ""; + String grammar1 = String.format(grammarFormat, level, 1, leafPrefix, level + 1, level + 1, level, 1); + writeFile(tmpdir, "Level_" + level + "_1.g4", grammar1); + if (level > 0) { + String grammar2 = String.format(grammarFormat, level, 2, leafPrefix, level + 1, level + 1, level, 1); + writeFile(tmpdir, "Level_" + level + "_2.g4", grammar2); + } + } + + ErrorQueue equeue = antlr("Level_0_1.g4", false); + Assert.assertTrue(equeue.errors.isEmpty()); + + long endTime = System.nanoTime(); + System.out.format("%s milliseconds.%n", (endTime - startTime) / 1000000.0); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestScopeParsing.java b/tool/test/org/antlr/v4/test/tool/TestScopeParsing.java new file mode 100644 index 000000000..f284cecfa --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestScopeParsing.java @@ -0,0 +1,68 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.parse.ScopeParser; +import org.antlr.v4.tool.ErrorManager; +import org.antlr.v4.tool.Grammar; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestScopeParsing extends BaseTest { + String[] argPairs = { + "", "{}", + " ", "{}", + "int i", "{i=int i}", + "int[] i, int j[]", "{i=int[] i, j=int [] j}", + "Map[] i, int j[]", "{i=Map[] i, j=int [] j}", + "Map>[] i", "{i=Map>[] i}", + "int i = 34+a[3], int j[] = new int[34]", + "{i=int i= 34+a[3], j=int [] j= new int[34]}", + "char *foo32[3] = {1,2,3}", "{3=char *foo32[] 3= {1,2,3}}", + "String[] headers", "{headers=String[] headers}", + + // python/ruby style + "i", "{i=null i}", + "i,j", "{i=null i, j=null j}", + "i,j, k", "{i=null i, j=null j, k=null k}", + }; + + @Test public void testArgs() throws Exception { + for (int i = 0; i < argPairs.length; i+=2) { + String input = argPairs[i]; + String expected = argPairs[i+1]; + Grammar dummy = new Grammar("grammar T; a:'a';"); + String actual = ScopeParser.parseTypedArgList(null, input, dummy).attributes.toString(); + assertEquals(expected, actual); + } + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestSemPredEvalLexer.java b/tool/test/org/antlr/v4/test/tool/TestSemPredEvalLexer.java new file mode 100644 index 000000000..862871c3d --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestSemPredEvalLexer.java @@ -0,0 +1,183 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestSemPredEvalLexer extends BaseTest { + + @Test public void testDisableRule() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "E1 : 'enum' {false}? ;\n" + + "E2 : 'enum' {true}? ;\n" + // winner not E1 or ID + "ID : 'a'..'z'+ ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "enum abc", true); + String expecting = + "[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<3>,1:5]\n" + + "[@2,8:7='',<-1>,1:8]\n" + + "s0-' '->:s5=>4\n" + + "s0-'a'->:s6=>3\n" + + "s0-'e'->:s1=>3\n" + + ":s1=>3-'n'->:s2=>3\n" + + ":s2=>3-'u'->:s3=>3\n" + + ":s6=>3-'b'->:s6=>3\n" + + ":s6=>3-'c'->:s6=>3\n"; + assertEquals(expecting, found); + } + + @Test public void testIDvsEnum() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "ENUM : 'enum' {false}? ;\n" + + "ID : 'a'..'z'+ ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); + String expecting = + "[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<2>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s5=>3\n" + + "s0-'a'->:s4=>2\n" + + "s0-'e'->:s1=>2\n" + + ":s1=>2-'n'->:s2=>2\n" + + ":s2=>2-'u'->:s3=>2\n" + + ":s4=>2-'b'->:s4=>2\n" + + ":s4=>2-'c'->:s4=>2\n"; // no 'm'-> transition...conflicts with pred + assertEquals(expecting, found); + } + + @Test public void testIDnotEnum() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "ENUM : [a-z]+ {false}? ;\n" + + "ID : [a-z]+ ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); + String expecting = + "[@0,0:3='enum',<2>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<2>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s2=>3\n"; // no edges in DFA for enum/id. all paths lead to pred. + assertEquals(expecting, found); + } + + @Test public void testEnumNotID() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "ENUM : [a-z]+ {getText().equals(\"enum\")}? ;\n" + + "ID : [a-z]+ ;\n"+ + "WS : (' '|'\\n') -> skip ;"; + String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); + String expecting = + "[@0,0:3='enum',<1>,1:0]\n" + + "[@1,5:7='abc',<2>,1:5]\n" + + "[@2,9:12='enum',<1>,1:9]\n" + + "[@3,13:12='',<-1>,1:13]\n" + + "s0-' '->:s3=>3\n"; // no edges in DFA for enum/id. all paths lead to pred. + assertEquals(expecting, found); + } + + @Test public void testIndent() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "ID : [a-z]+ ;\n"+ + "INDENT : [ \\t]+ {_tokenStartCharPositionInLine==0}? \n" + + " {System.out.println(\"INDENT\");} ;"+ + "NL : '\\n' ;"+ + "WS : [ \\t]+ ;"; + String found = execLexer("L.g4", grammar, "L", "abc\n def \n", true); + String expecting = + "INDENT\n" + // action output + "[@0,0:2='abc',<1>,1:0]\n" + // ID + "[@1,3:3='\\n',<3>,1:3]\n" + // NL + "[@2,4:5=' ',<2>,2:0]\n" + // INDENT + "[@3,6:8='def',<1>,2:2]\n" + // ID + "[@4,9:10=' ',<4>,2:5]\n" + // WS + "[@5,11:11='\\n',<3>,2:7]\n" + + "[@6,12:11='',<-1>,3:8]\n" + + "s0-'\n" + + "'->:s2=>3\n" + + "s0-'a'->:s1=>1\n" + + "s0-'d'->:s1=>1\n" + + ":s1=>1-'b'->:s1=>1\n" + + ":s1=>1-'c'->:s1=>1\n" + + ":s1=>1-'e'->:s1=>1\n" + + ":s1=>1-'f'->:s1=>1\n"; + assertEquals(expecting, found); + } + + @Test public void testLexerInputPositionSensitivePredicates() throws Exception { + String grammar = + "lexer grammar L;\n"+ + "WORD1 : ID1+ {System.out.println(getText());} ;\n"+ + "WORD2 : ID2+ {System.out.println(getText());} ;\n"+ + "fragment ID1 : {getCharPositionInLine()<2}? [a-zA-Z];\n"+ + "fragment ID2 : {getCharPositionInLine()>=2}? [a-zA-Z];\n"+ + "WS : (' '|'\\n') -> skip;\n"; + String found = execLexer("L.g4", grammar, "L", "a cde\nabcde\n"); + String expecting = + "a\n" + + "cde\n" + + "ab\n" + + "cde\n" + + "[@0,0:0='a',<1>,1:0]\n" + + "[@1,2:4='cde',<2>,1:2]\n" + + "[@2,6:7='ab',<1>,2:0]\n" + + "[@3,8:10='cde',<2>,2:2]\n" + + "[@4,12:11='',<-1>,3:0]\n"; + assertEquals(expecting, found); + } + + @Test public void testPredicatedKeywords() { + String grammar = + "lexer grammar A;" + + "ENUM : [a-z]+ {getText().equals(\"enum\")}? {System.out.println(\"enum!\");} ;\n" + + "ID : [a-z]+ {System.out.println(\"ID \"+getText());} ;\n" + + "WS : [ \\n] -> skip ;"; + String found = execLexer("A.g4", grammar, "A", "enum enu a"); + String expecting = + "enum!\n" + + "ID enu\n" + + "ID a\n" + + "[@0,0:3='enum',<1>,1:0]\n" + + "[@1,5:7='enu',<2>,1:5]\n" + + "[@2,9:9='a',<2>,1:9]\n" + + "[@3,10:9='',<-1>,1:10]\n"; + assertEquals(expecting, found); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestSemPredEvalParser.java b/tool/test/org/antlr/v4/test/tool/TestSemPredEvalParser.java new file mode 100644 index 000000000..1be6d0699 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestSemPredEvalParser.java @@ -0,0 +1,626 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class TestSemPredEvalParser extends BaseTest { + // TEST VALIDATING PREDS + + @Test public void testSimpleValidate() throws Exception { + String grammar = + "grammar T;\n" + + "s : a ;\n" + + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + /*String found = */execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x", false); + + String expecting = "line 1:0 no viable alternative at input 'x'\n"; + assertEquals(expecting, stderrDuringParse); + } + + @Test public void testSimpleValidate2() throws Exception { + String grammar = + "grammar T;\n" + + "s : a a a;\n" + + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "3 4 x", false); + String expecting = + "alt 2\n" + + "alt 2\n"; + assertEquals(expecting, found); + + expecting = "line 1:4 no viable alternative at input 'x'\n"; + assertEquals(expecting, stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#196 + * "element+ in expression grammar doesn't parse properly" + * https://github.com/antlr/antlr4/issues/196 + */ + @Test public void testAtomWithClosureInTranslatedLRRule() throws Exception { + String grammar = + "grammar T;\n" + + "start : e[0] EOF;\n" + + "e[int _p]\n" + + " : ( 'a'\n" + + " | 'b'+\n" + + " )\n" + + " ( {3 >= $_p}? '+' e[4]\n" + + " )*\n" + + " ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", + "a+b+a", false); + String expecting = ""; + assertEquals(expecting, found); + assertNull(stderrDuringParse); + } + + @Test public void testValidateInDFA() throws Exception { + String grammar = + "grammar T;\n" + + "s : a ';' a;\n" + + // ';' helps us to resynchronize without consuming + // 2nd 'a' reference. We our testing that the DFA also + // throws an exception if the validating predicate fails + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x ; y", false); + String expecting = ""; + assertEquals(expecting, found); + + expecting = + "line 1:0 no viable alternative at input 'x'\n" + + "line 1:4 no viable alternative at input 'y'\n"; + assertEquals(expecting, stderrDuringParse); + } + + // TEST DISAMBIG PREDS + + @Test public void testSimple() throws Exception { + String grammar = + "grammar T;\n" + + "s : a a a;\n" + // do 3x: once in ATN, next in DFA then INT in ATN + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? ID {System.out.println(\"alt 2\");}\n" + + " | INT {System.out.println(\"alt 3\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x y 3", false); + String expecting = + "alt 2\n" + + "alt 2\n" + + "alt 3\n"; + assertEquals(expecting, found); + } + + @Test public void testOrder() throws Exception { + // Under new predicate ordering rules (see antlr/antlr4#29), the first + // alt with an acceptable config (unpredicated, or predicated and evaluates + // to true) is chosen. + String grammar = + "grammar T;\n" + + "s : a {} a;\n" + // do 2x: once in ATN, next in DFA; + // action blocks lookahead from falling off of 'a' + // and looking into 2nd 'a' ref. !ctx dependent pred + "a : ID {System.out.println(\"alt 1\");}\n" + + " | {true}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x y", false); + String expecting = + "alt 1\n" + + "alt 1\n"; + assertEquals(expecting, found); + } + + @Test public void test2UnpredicatedAlts() throws Exception { + // We have n-2 predicates for n alternatives. pick first alt + String grammar = + "grammar T;\n" + + "@header {" + + "import java.util.*;" + + "}" + + "s : {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + + " a ';' a;\n" + // do 2x: once in ATN, next in DFA + "a : ID {System.out.println(\"alt 1\");}\n" + + " | ID {System.out.println(\"alt 2\");}\n" + + " | {false}? ID {System.out.println(\"alt 3\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x; y", true); + String expecting = + "alt 1\n" + + "alt 1\n"; + assertEquals(expecting, found); + assertEquals("line 1:0 reportAttemptingFullContext d=0 (a), input='x'\n" + + "line 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x'\n" + + "line 1:3 reportAttemptingFullContext d=0 (a), input='y'\n" + + "line 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y'\n", + this.stderrDuringParse); + } + + @Test public void test2UnpredicatedAltsAndOneOrthogonalAlt() throws Exception { + String grammar = + "grammar T;\n" + + "@header {" + + "import java.util.*;" + + "}" + + "s : {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + + " a ';' a ';' a;\n" + + "a : INT {System.out.println(\"alt 1\");}\n" + + " | ID {System.out.println(\"alt 2\");}\n" + // must pick this one for ID since pred is false + " | ID {System.out.println(\"alt 3\");}\n" + + " | {false}? ID {System.out.println(\"alt 4\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "34; x; y", true); + String expecting = + "alt 1\n" + + "alt 2\n" + + "alt 2\n"; + assertEquals(expecting, found); + assertEquals("line 1:4 reportAttemptingFullContext d=0 (a), input='x'\n" + + "line 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x'\n" + + "line 1:7 reportAttemptingFullContext d=0 (a), input='y'\n" + + "line 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y'\n", + this.stderrDuringParse); + } + + @Test public void testRewindBeforePredEval() throws Exception { + // The parser consumes ID and moves to the 2nd token INT. + // To properly evaluate the predicates after matching ID INT, + // we must correctly see come back to starting index so LT(1) works + String grammar = + "grammar T;\n" + + "s : a a;\n" + + "a : {_input.LT(1).getText().equals(\"x\")}? ID INT {System.out.println(\"alt 1\");}\n" + + " | {_input.LT(1).getText().equals(\"y\")}? ID INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "y 3 x 4", false); + String expecting = + "alt 2\n" + + "alt 1\n"; + assertEquals(expecting, found); + } + + @Test public void testNoTruePredsThrowsNoViableAlt() throws Exception { + // checks that we throw exception if all alts + // are covered with a predicate and none succeeds + String grammar = + "grammar T;\n" + + "s : a a;\n" + + "a : {false}? ID INT {System.out.println(\"alt 1\");}\n" + + " | {false}? ID INT {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + execParser("T.g4", grammar, "TParser", "TLexer", "s", + "y 3 x 4", false); + String expecting = "line 1:0 no viable alternative at input 'y'\n"; + String result = stderrDuringParse; + assertEquals(expecting, result); + } + + @Test public void testToLeft() throws Exception { + String grammar = + "grammar T;\n" + + "s : a+ ;\n" + + "a : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x x y", false); + String expecting = + "alt 2\n" + + "alt 2\n" + + "alt 2\n"; + assertEquals(expecting, found); + } + + @Test + public void testUnpredicatedPathsInAlt() throws Exception{ + String grammar = + "grammar T;\n" + + "s : a {System.out.println(\"alt 1\");}\n" + + " | b {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "a : {false}? ID INT\n" + + " | ID INT\n" + + " ;\n" + + "b : ID ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x 4", false); + String expecting = + "alt 1\n"; + assertEquals(expecting, found); + + expecting = null; + assertEquals(expecting, stderrDuringParse); + } + + @Test public void testActionHidesPreds() throws Exception { + // can't see preds, resolves to first alt found (1 in this case) + String grammar = + "grammar T;\n" + + "@parser::members {int i;}\n" + + "s : a+ ;\n" + + "a : {i=1;} ID {i==1}? {System.out.println(\"alt 1\");}\n" + + " | {i=2;} ID {i==2}? {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x x y", false); + String expecting = + "alt 1\n" + + "alt 1\n" + + "alt 1\n"; + assertEquals(expecting, found); + } + + /** In this case, we use predicates that depend on global information + * like we would do for a symbol table. We simply execute + * the predicates assuming that all necessary information is available. + * The i++ action is done outside of the prediction and so it is executed. + */ + @Test public void testToLeftWithVaryingPredicate() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {int i=0;}\n" + + "s : ({i++; System.out.println(\"i=\"+i);} a)+ ;\n" + + "a : {i % 2 == 0}? ID {System.out.println(\"alt 1\");}\n" + + " | {i % 2 != 0}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "x x y", false); + String expecting = + "i=1\n" + + "alt 2\n" + + "i=2\n" + + "alt 1\n" + + "i=3\n" + + "alt 2\n"; + assertEquals(expecting, found); + } + + /** + * In this case, we're passing a parameter into a rule that uses that + * information to predict the alternatives. This is the special case + * where we know exactly which context we are in. The context stack + * is empty and we have not dipped into the outer context to make a decision. + */ + @Test public void testPredicateDependentOnArg() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {int i=0;}\n" + + "s : a[2] a[1];\n" + + "a[int i]" + + " : {$i==1}? ID {System.out.println(\"alt 1\");}\n" + + " | {$i==2}? ID {System.out.println(\"alt 2\");}\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "a b", false); + String expecting = + "alt 2\n" + + "alt 1\n"; + assertEquals(expecting, found); + } + + /** In this case, we have to ensure that the predicates are not + tested during the closure after recognizing the 1st ID. The + closure will fall off the end of 'a' 1st time and reach into the + a[1] rule invocation. It should not execute predicates because it + does not know what the parameter is. The context stack will not + be empty and so they should be ignored. It will not affect + recognition, however. We are really making sure the ATN + simulation doesn't crash with context object issues when it + encounters preds during FOLLOW. + */ + @Test public void testPredicateDependentOnArg2() throws Exception { + String grammar = + "grammar T;\n" + + "s : a[2] a[1];\n" + + "a[int i]" + + " : {$i==1}? ID\n" + + " | {$i==2}? ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "a b", false); + String expecting = + ""; + assertEquals(expecting, found); + } + + @Test public void testDependentPredNotInOuterCtxShouldBeIgnored() throws Exception { + // uses ID ';' or ID '.' lookahead to solve s. preds not tested. + String grammar = + "grammar T;\n" + + "s : b[2] ';' | b[2] '.' ;\n" + // decision in s drills down to ctx-dependent pred in a; + "b[int i] : a[i] ;\n" + + "a[int i]" + + " : {$i==1}? ID {System.out.println(\"alt 1\");}\n" + + " | {$i==2}? ID {System.out.println(\"alt 2\");}\n" + + " ;" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "a;", false); + String expecting = + "alt 2\n"; + assertEquals(expecting, found); + } + + @Test public void testIndependentPredNotPassedOuterCtxToAvoidCastException() throws Exception { + String grammar = + "grammar T;\n" + + "s : b ';' | b '.' ;\n" + + "b : a ;\n" + + "a" + + " : {false}? ID {System.out.println(\"alt 1\");}\n" + + " | {true}? ID {System.out.println(\"alt 2\");}\n" + + " ;" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "a;", false); + String expecting = + "alt 2\n"; + assertEquals(expecting, found); + } + + /** During a global follow operation, we still collect semantic + * predicates as long as they are not dependent on local context + */ + @Test public void testPredsInGlobalFOLLOW() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {" + + "void f(Object s) {System.out.println(s);}\n" + + "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + + "}\n" + + "s : e {p(true)}? {f(\"parse\");} '!' ;\n" + + "t : e {p(false)}? ID ;\n" + + "e : ID | ;\n" + // non-LL(1) so we use ATN + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "a!", false); + String expecting = + "eval=true\n" + // now we are parsing + "parse\n"; + assertEquals(expecting, found); + } + + /** We cannot collect predicates that are dependent on local context if + * we are doing a global follow. They appear as if they were not there at all. + */ + @Test public void testDepedentPredsInGlobalFOLLOW() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {" + + "void f(Object s) {System.out.println(s);}\n" + + "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + + "}\n" + + "s : a[99] ;\n" + + "a[int i] : e {p($i==99)}? {f(\"parse\");} '!' ;\n" + + "b[int i] : e {p($i==99)}? ID ;\n" + + "e : ID | ;\n" + // non-LL(1) so we use ATN + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "a!", false); + String expecting = + "eval=true\n" + + "parse\n"; + assertEquals(expecting, found); + } + + /** Regular non-forced actions can create side effects used by semantic + * predicates and so we cannot evaluate any semantic predicate + * encountered after having seen a regular action. This includes + * during global follow operations. + */ + @Test public void testActionsHidePredsInGlobalFOLLOW() throws Exception { + String grammar = + "grammar T;\n" + + "@parser::members {" + + "void f(Object s) {System.out.println(s);}\n" + + "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + + "}\n" + + "s : e {} {p(true)}? {f(\"parse\");} '!' ;\n" + + "t : e {} {p(false)}? ID ;\n" + + "e : ID | ;\n" + // non-LL(1) so we use ATN + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", + "a!", false); + String expecting = + "eval=true\n" + + "parse\n"; + assertEquals(expecting, found); + } + + @Test public void testPredTestedEvenWhenUnAmbig() throws Exception { + String grammar = + "grammar T;\n" + + "\n" + + "@parser::members {boolean enumKeyword = true;}\n" + + "\n" + + "primary\n" + + " : ID {System.out.println(\"ID \"+$ID.text);}\n" + + " | {!enumKeyword}? 'enum' {System.out.println(\"enum\");}\n" + + " ;\n" + + "\n" + + "ID : [a-z]+ ;\n" + + "\n" + + "WS : [ \\t\\n\\r]+ -> skip ;\n"; + + String found = execParser("T.g4", grammar, "TParser", "TLexer", "primary", + "abc", false); + assertEquals("ID abc\n", found); + + execParser("T.g4", grammar, "TParser", "TLexer", "primary", + "enum", false); + assertEquals("line 1:0 no viable alternative at input 'enum'\n", stderrDuringParse); + } + + /** + * This is a regression test for antlr/antlr4#218 "ANTLR4 EOF Related Bug". + * https://github.com/antlr/antlr4/issues/218 + */ + @Test public void testDisabledAlternative() { + String grammar = + "grammar AnnotProcessor;\n" + + "\n" + + "cppCompilationUnit : content+ EOF;\n" + + "\n" + + "content: anything | {false}? .;\n" + + "\n" + + "anything: ANY_CHAR;\n" + + "\n" + + "ANY_CHAR: [_a-zA-Z0-9];\n"; + + String input = "hello"; + String found = execParser("AnnotProcessor.g4", grammar, "AnnotProcessorParser", "AnnotProcessorLexer", "cppCompilationUnit", + input, false); + assertEquals("", found); + assertNull(stderrDuringParse); + } + + /** Loopback doesn't eval predicate at start of alt */ + @Test public void testPredFromAltTestedInLoopBack() { + String grammar = + "grammar T2;\n" + + "\n" + + "file\n" + + "@after {System.out.println($ctx.toStringTree(this));}\n" + + " : para para EOF ;" + + "para: paraContent NL NL ;\n"+ + "paraContent : ('s'|'x'|{_input.LA(2)!=NL}? NL)+ ;\n"+ + "NL : '\\n' ;\n"+ + "S : 's' ;\n"+ + "X : 'x' ;\n"; + + String input = "s\n\n\nx\n"; + String found = execParser("T2.g4", grammar, "T2Parser", "T2Lexer", "file", + input, true); + assertEquals("(file (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n", found); + assertEquals(stderrDuringParse, "line 5:2 mismatched input '' expecting '\n'\n"); + + input = "s\n\n\nx\n\n"; + found = execParser("T2.g4", grammar, "T2Parser", "T2Lexer", "file", + input, true); + assertEquals("(file (para (paraContent s) \\n \\n) (para (paraContent \\n x) \\n \\n) )\n", found); + + assertNull(stderrDuringParse); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestSets.java b/tool/test/org/antlr/v4/test/tool/TestSets.java new file mode 100644 index 000000000..790d01497 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestSets.java @@ -0,0 +1,283 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.v4.tool.ErrorType; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** Test the set stuff in lexer and parser */ +public class TestSets extends BaseTest { + protected boolean debug = false; + + /** Public default constructor used by TestRig */ + public TestSets() { + } + + @Test public void testSeqDoesNotBecomeSet() throws Exception { + // this must return A not I to the parser; calling a nonfragment rule + // from a nonfragment rule does not set the overall token. + String grammar = + "grammar P;\n" + + "a : C {System.out.println(_input.getText());} ;\n" + + "fragment A : '1' | '2';\n" + + "fragment B : '3' '4';\n" + + "C : A | B;\n"; + String found = execParser("P.g4", grammar, "PParser", "PLexer", + "a", "34", debug); + assertEquals("34\n", found); + } + + @Test public void testParserSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : t=('x'|'y') {System.out.println($t.text);} ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "x", debug); + assertEquals("x\n", found); + } + + @Test public void testParserNotSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "zz", debug); + assertEquals("z\n", found); + } + + @Test public void testParserNotToken() throws Exception { + String grammar = + "grammar T;\n" + + "a : ~'x' 'z' {System.out.println(_input.getText());} ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "zz", debug); + assertEquals("zz\n", found); + } + + @Test public void testParserNotTokenWithLabel() throws Exception { + String grammar = + "grammar T;\n" + + "a : t=~'x' 'z' {System.out.println($t.text);} ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "zz", debug); + assertEquals("z\n", found); + } + + @Test public void testRuleAsSet() throws Exception { + String grammar = + "grammar T;\n" + + "a @after {System.out.println(_input.getText());} : 'a' | 'b' |'c' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "b", debug); + assertEquals("b\n", found); + } + + @Test public void testNotChar() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "A : ~'b' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "x", debug); + assertEquals("x\n", found); + } + + @Test public void testOptionalSingleElement() throws Exception { + String grammar = + "grammar T;\n" + + "a : A? 'c' {System.out.println(_input.getText());} ;\n" + + "A : 'b' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "bc", debug); + assertEquals("bc\n", found); + } + + @Test public void testOptionalLexerSingleElement() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println(_input.getText());} ;\n" + + "A : 'b'? 'c' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "bc", debug); + assertEquals("bc\n", found); + } + + @Test public void testStarLexerSingleElement() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println(_input.getText());} ;\n" + + "A : 'b'* 'c' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "bbbbc", debug); + assertEquals("bbbbc\n", found); + found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "c", debug); + assertEquals("c\n", found); + } + + @Test public void testPlusLexerSingleElement() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println(_input.getText());} ;\n" + + "A : 'b'+ 'c' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "bbbbc", debug); + assertEquals("bbbbc\n", found); + } + + @Test public void testOptionalSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : ('a'|'b')? 'c' {System.out.println(_input.getText());} ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "ac", debug); + assertEquals("ac\n", found); + } + + @Test public void testStarSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : ('a'|'b')* 'c' {System.out.println(_input.getText());} ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "abaac", debug); + assertEquals("abaac\n", found); + } + + @Test public void testPlusSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : ('a'|'b')+ 'c' {System.out.println(_input.getText());} ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "abaac", debug); + assertEquals("abaac\n", found); + } + + @Test public void testLexerOptionalSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println(_input.getText());} ;\n" + + "A : ('a'|'b')? 'c' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "ac", debug); + assertEquals("ac\n", found); + } + + @Test public void testLexerStarSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println(_input.getText());} ;\n" + + "A : ('a'|'b')* 'c' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "abaac", debug); + assertEquals("abaac\n", found); + } + + @Test public void testLexerPlusSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println(_input.getText());} ;\n" + + "A : ('a'|'b')+ 'c' ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "abaac", debug); + assertEquals("abaac\n", found); + } + + @Test public void testNotCharSet() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "A : ~('b'|'c') ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "x", debug); + assertEquals("x\n", found); + } + + @Test public void testNotCharSetWithLabel() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "A : h=~('b'|'c') ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "x", debug); + assertEquals("x\n", found); + } + + @Test public void testNotCharSetWithRuleRef() throws Exception { + // might be a useful feature to add someday + String[] pair = new String[] { + "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "A : ~('a'|B) ;\n" + + "B : 'b' ;\n", + "error(" + ErrorType.UNSUPPORTED_REFERENCE_IN_LEXER_SET.code + "): T.g4:3:10: rule reference B is not currently supported in a set\n" + }; + super.testErrors(pair, true); + } + + @Test public void testNotCharSetWithString() throws Exception { + // might be a useful feature to add someday + String[] pair = new String[] { + "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "A : ~('a'|'aa') ;\n" + + "B : 'b' ;\n", + "error(" + ErrorType.INVALID_LITERAL_IN_LEXER_SET.code + "): T.g4:3:10: multi-character literals are not allowed in lexer sets: 'aa'\n" + }; + super.testErrors(pair, true); + } + + @Test public void testNotCharSetWithRuleRef3() throws Exception { + String grammar = + "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "A : ('a'|B) ;\n" + // this doesn't collapse to set but works + "fragment\n" + + "B : ~('a'|'c') ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "x", debug); + assertEquals("x\n", found); + } + + @Test public void testCharSetLiteral() throws Exception { + String grammar = + "grammar T;\n" + + "a : (A {System.out.println($A.text);})+ ;\n" + + "A : [AaBb] ;\n" + + "WS : (' '|'\\n')+ -> skip ;\n"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", + "a", "A a B b", debug); + assertEquals("A\n" + + "a\n" + + "B\n" + + "b\n", found); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestSymbolIssues.java b/tool/test/org/antlr/v4/test/tool/TestSymbolIssues.java new file mode 100644 index 000000000..a2f5d8227 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestSymbolIssues.java @@ -0,0 +1,171 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.tool.ErrorType; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** */ +public class TestSymbolIssues extends BaseTest { + static String[] A = { + // INPUT + "grammar A;\n" + + "options { opt='sss'; k=3; }\n" + + "\n" + + "@members {foo}\n" + + "@members {bar}\n" + + "@lexer::header {package jj;}\n" + + "@lexer::header {package kk;}\n" + + "\n" + + "a[int i] returns [foo f] : X ID a[3] b[34] c ;\n" + + "b returns [int g] : Y 'y' 'if' a ;\n" + + "c : FJKD ;\n" + + "\n" + + "ID : 'a'..'z'+ ID ;", + // YIELDS + "error(" + ErrorType.ACTION_REDEFINITION.code + "): A.g4:5:1: redefinition of members action\n" + + "error(" + ErrorType.ACTION_REDEFINITION.code + "): A.g4:7:1: redefinition of header action\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): A.g4:2:10: unsupported option opt\n" + + "warning(" + ErrorType.ILLEGAL_OPTION.code + "): A.g4:2:21: unsupported option k\n" + + "error(" + ErrorType.ACTION_REDEFINITION.code + "): A.g4:5:1: redefinition of members action\n" + + "warning(" + ErrorType.IMPLICIT_TOKEN_DEFINITION.code + "): A.g4:9:27: implicit definition of token X in parser\n" + + "warning(" + ErrorType.IMPLICIT_TOKEN_DEFINITION.code + "): A.g4:10:20: implicit definition of token Y in parser\n" + + "warning(" + ErrorType.IMPLICIT_TOKEN_DEFINITION.code + "): A.g4:11:4: implicit definition of token FJKD in parser\n" + + "error(" + ErrorType.RULE_HAS_NO_ARGS.code + "): A.g4:9:37: rule b has no defined parameters\n" + + "error(" + ErrorType.MISSING_RULE_ARGS.code + "): A.g4:10:31: missing arguments(s) on rule reference: a\n" + }; + + static String[] B = { + // INPUT + "parser grammar B;\n" + + "tokens { ID, FOO, X, Y }\n" + + "\n" + + "a : s=ID b+=ID X=ID '.' ;\n" + + "\n" + + "b : x=ID x+=ID ;\n" + + "\n" + + "s : FOO ;", + // YIELDS + "error(" + ErrorType.LABEL_CONFLICTS_WITH_RULE.code + "): B.g4:4:4: label s conflicts with rule with same name\n" + + "error(" + ErrorType.LABEL_CONFLICTS_WITH_RULE.code + "): B.g4:4:9: label b conflicts with rule with same name\n" + + "error(" + ErrorType.LABEL_CONFLICTS_WITH_TOKEN.code + "): B.g4:4:15: label X conflicts with token with same name\n" + + "error(" + ErrorType.LABEL_TYPE_CONFLICT.code + "): B.g4:6:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n" + + "error(" + ErrorType.IMPLICIT_STRING_DEFINITION.code + "): B.g4:4:20: cannot create implicit token for string literal in non-combined grammar: '.'\n" + }; + + static String[] D = { + // INPUT + "parser grammar D;\n" + + "tokens{ID}\n" + + "a[int j] \n" + + " : i=ID j=ID ;\n" + + "\n" + + "b[int i] returns [int i] : ID ;\n" + + "\n" + + "c[int i] returns [String k]\n" + + " : ID ;", + + // YIELDS + "error(" + ErrorType.LABEL_CONFLICTS_WITH_ARG.code + "): D.g4:4:21: label j conflicts with parameter with same name\n" + + "error(" + ErrorType.RETVAL_CONFLICTS_WITH_ARG.code + "): D.g4:6:22: return value i conflicts with parameter with same name\n" + }; + + static String[] E = { + // INPUT + "grammar E;\n" + + "tokens {\n" + + " A, A,\n" + + " B,\n" + + " C\n" + + "}\n" + + "a : A ;\n", + + // YIELDS + "warning(" + ErrorType.TOKEN_NAME_REASSIGNMENT.code + "): E.g4:3:4: token name A is already defined\n" + }; + + @Test public void testA() { super.testErrors(A, false); } + @Test public void testB() { super.testErrors(B, false); } + @Test public void testD() { super.testErrors(D, false); } + @Test public void testE() { super.testErrors(E, false); } + + @Test public void testStringLiteralRedefs() throws Exception { + String grammar = + "lexer grammar L;\n" + + "A : 'a' ;\n" + + "mode X;\n"+ + "B : 'a' ;\n"+ + "mode Y;\n"+ + "C : 'a' ;\n"; + + LexerGrammar g = new LexerGrammar(grammar); + + String expectedTokenIDToTypeMap = "{EOF=-1, A=1, B=2, C=3}"; + String expectedStringLiteralToTypeMap = "{}"; + String expectedTypeToTokenList = "[A, B, C]"; + + assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString()); + assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); + assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); + } + + @Test public void testEmptyLexerModeDetection() throws Exception { + String[] test = { + "lexer grammar L;\n" + + "A : 'a';\n" + + "mode X;\n" + + "fragment B : 'b';", + + "error(" + ErrorType.MODE_WITHOUT_RULES.code + "): L.g4:3:5: lexer mode X must contain at least one non-fragment rule\n" + }; + + testErrors(test, false); + } + + @Test public void testEmptyLexerRuleDetection() throws Exception { + String[] test = { + "lexer grammar L;\n" + + "A : 'a';\n" + + "WS : [ \t]* -> skip;\n" + + "mode X;\n" + + " B : C;\n" + + " fragment C : A | (A C)?;", + + "warning(" + ErrorType.EPSILON_TOKEN.code + "): L.g4:3:0: non-fragment lexer rule WS can match the empty string\n" + + "warning(" + ErrorType.EPSILON_TOKEN.code + "): L.g4:5:2: non-fragment lexer rule B can match the empty string\n" + }; + + testErrors(test, false); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestTokenPositionOptions.java b/tool/test/org/antlr/v4/test/tool/TestTokenPositionOptions.java new file mode 100644 index 000000000..888f761b2 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestTokenPositionOptions.java @@ -0,0 +1,179 @@ +/* + * [The "BSD license"] + * Copyright (c) 2014 Terence Parr + * Copyright (c) 2014 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.runtime.Token; +import org.antlr.v4.misc.Utils; +import org.antlr.v4.parse.ANTLRParser; +import org.antlr.v4.runtime.misc.IntervalSet; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.ast.GrammarAST; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class TestTokenPositionOptions extends BaseTest { + @Test public void testLeftRecursionRewrite() throws Exception { + Grammar g = new Grammar( + "grammar T;\n" + + "s : e ';' ;\n" + + "e : e '*' e\n" + + " | e '+' e\n" + + " | e '.' ID\n" + + " | '-' e\n" + + " | ID\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + ); + + String expectedTree = + "(COMBINED_GRAMMAR T (RULES (RULE s (BLOCK (ALT e ';'))) (RULE e (BLOCK (ALT (BLOCK (ALT {} ('-' (ELEMENT_OPTIONS (= tokenIndex 43))) (e (ELEMENT_OPTIONS (= tokenIndex 45) (= p 2)))) (ALT (ID (ELEMENT_OPTIONS (= tokenIndex 49))))) (* (BLOCK (ALT ({precpred(_ctx, 5)}? (ELEMENT_OPTIONS (= p 5))) ('*' (ELEMENT_OPTIONS (= tokenIndex 21))) (e (ELEMENT_OPTIONS (= tokenIndex 23) (= p 6)))) (ALT ({precpred(_ctx, 4)}? (ELEMENT_OPTIONS (= p 4))) ('+' (ELEMENT_OPTIONS (= tokenIndex 29))) (e (ELEMENT_OPTIONS (= tokenIndex 31) (= p 5)))) (ALT ({precpred(_ctx, 3)}? (ELEMENT_OPTIONS (= p 3))) ('.' (ELEMENT_OPTIONS (= tokenIndex 37))) (ID (ELEMENT_OPTIONS (= tokenIndex 39)))))))))))"; + assertEquals(expectedTree, g.ast.toStringTree()); + + String expectedElementTokens = + "[@5,11:11='s',<57>,2:0]\n" + + "[@9,15:15='e',<57>,2:4]\n" + + "[@11,17:19='';'',<62>,2:6]\n" + + "[@15,23:23='e',<57>,3:0]\n" + + "[@43,64:66=''-'',<62>,6:4]\n" + + "[@45,68:68='e',<57>,6:8]\n" + + "[@49,74:75='ID',<66>,7:4]\n" + + "[@21,29:31=''*'',<62>,3:6]\n" + + "[@23,33:33='e',<57>,3:10]\n" + + "[@29,41:43=''+'',<62>,4:6]\n" + + "[@31,45:45='e',<57>,4:10]\n" + + "[@37,53:55=''.'',<62>,5:6]\n" + + "[@39,57:58='ID',<66>,5:10]"; + + IntervalSet types = + new IntervalSet(ANTLRParser.TOKEN_REF, + ANTLRParser.STRING_LITERAL, + ANTLRParser.RULE_REF); + List nodes = g.ast.getNodesWithTypePreorderDFS(types); + List tokens = new ArrayList(); + for (GrammarAST node : nodes) { + tokens.add(node.getToken()); + } + assertEquals(expectedElementTokens, Utils.join(tokens.toArray(), "\n")); + } + + @Test public void testLeftRecursionWithLabels() throws Exception { + Grammar g = new Grammar( + "grammar T;\n" + + "s : e ';' ;\n" + + "e : e '*' x=e\n" + + " | e '+' e\n" + + " | e '.' y=ID\n" + + " | '-' e\n" + + " | ID\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + ); + + String expectedTree = + "(COMBINED_GRAMMAR T (RULES (RULE s (BLOCK (ALT e ';'))) (RULE e (BLOCK (ALT (BLOCK (ALT {} ('-' (ELEMENT_OPTIONS (= tokenIndex 47))) (e (ELEMENT_OPTIONS (= tokenIndex 49) (= p 2)))) (ALT (ID (ELEMENT_OPTIONS (= tokenIndex 53))))) (* (BLOCK (ALT ({precpred(_ctx, 5)}? (ELEMENT_OPTIONS (= p 5))) ('*' (ELEMENT_OPTIONS (= tokenIndex 21))) (= x (e (ELEMENT_OPTIONS (= tokenIndex 25) (= p 6))))) (ALT ({precpred(_ctx, 4)}? (ELEMENT_OPTIONS (= p 4))) ('+' (ELEMENT_OPTIONS (= tokenIndex 31))) (e (ELEMENT_OPTIONS (= tokenIndex 33) (= p 5)))) (ALT ({precpred(_ctx, 3)}? (ELEMENT_OPTIONS (= p 3))) ('.' (ELEMENT_OPTIONS (= tokenIndex 39))) (= y (ID (ELEMENT_OPTIONS (= tokenIndex 43))))))))))))"; + assertEquals(expectedTree, g.ast.toStringTree()); + + String expectedElementTokens = + "[@5,11:11='s',<57>,2:0]\n" + + "[@9,15:15='e',<57>,2:4]\n" + + "[@11,17:19='';'',<62>,2:6]\n" + + "[@15,23:23='e',<57>,3:0]\n" + + "[@47,68:70=''-'',<62>,6:4]\n" + + "[@49,72:72='e',<57>,6:8]\n" + + "[@53,78:79='ID',<66>,7:4]\n" + + "[@21,29:31=''*'',<62>,3:6]\n" + + "[@25,35:35='e',<57>,3:12]\n" + + "[@31,43:45=''+'',<62>,4:6]\n" + + "[@33,47:47='e',<57>,4:10]\n" + + "[@39,55:57=''.'',<62>,5:6]\n" + + "[@43,61:62='ID',<66>,5:12]"; + + IntervalSet types = + new IntervalSet(ANTLRParser.TOKEN_REF, + ANTLRParser.STRING_LITERAL, + ANTLRParser.RULE_REF); + List nodes = g.ast.getNodesWithTypePreorderDFS(types); + List tokens = new ArrayList(); + for (GrammarAST node : nodes) { + tokens.add(node.getToken()); + } + assertEquals(expectedElementTokens, Utils.join(tokens.toArray(), "\n")); + } + + @Test public void testLeftRecursionWithSet() throws Exception { + Grammar g = new Grammar( + "grammar T;\n" + + "s : e ';' ;\n" + + "e : e op=('*'|'/') e\n" + + " | e '+' e\n" + + " | e '.' ID\n" + + " | '-' e\n" + + " | ID\n" + + " ;\n" + + "ID : [a-z]+ ;\n" + ); + + String expectedTree = + "(COMBINED_GRAMMAR T (RULES (RULE s (BLOCK (ALT e ';'))) (RULE e (BLOCK (ALT (BLOCK (ALT {} ('-' (ELEMENT_OPTIONS (= tokenIndex 49))) (e (ELEMENT_OPTIONS (= tokenIndex 51) (= p 2)))) (ALT (ID (ELEMENT_OPTIONS (= tokenIndex 55))))) (* (BLOCK (ALT ({precpred(_ctx, 5)}? (ELEMENT_OPTIONS (= p 5))) (= op (SET ('*' (ELEMENT_OPTIONS (= tokenIndex 24))) ('/' (ELEMENT_OPTIONS (= tokenIndex 26))))) (e (ELEMENT_OPTIONS (= tokenIndex 29) (= p 6)))) (ALT ({precpred(_ctx, 4)}? (ELEMENT_OPTIONS (= p 4))) ('+' (ELEMENT_OPTIONS (= tokenIndex 35))) (e (ELEMENT_OPTIONS (= tokenIndex 37) (= p 5)))) (ALT ({precpred(_ctx, 3)}? (ELEMENT_OPTIONS (= p 3))) ('.' (ELEMENT_OPTIONS (= tokenIndex 43))) (ID (ELEMENT_OPTIONS (= tokenIndex 45)))))))))))"; + assertEquals(expectedTree, g.ast.toStringTree()); + + String expectedElementTokens = + "[@5,11:11='s',<57>,2:0]\n" + + "[@9,15:15='e',<57>,2:4]\n" + + "[@11,17:19='';'',<62>,2:6]\n" + + "[@15,23:23='e',<57>,3:0]\n" + + "[@49,73:75=''-'',<62>,6:4]\n" + + "[@51,77:77='e',<57>,6:8]\n" + + "[@55,83:84='ID',<66>,7:4]\n" + + "[@24,33:35=''*'',<62>,3:10]\n" + + "[@26,37:39=''/'',<62>,3:14]\n" + + "[@29,42:42='e',<57>,3:19]\n" + + "[@35,50:52=''+'',<62>,4:6]\n" + + "[@37,54:54='e',<57>,4:10]\n" + + "[@43,62:64=''.'',<62>,5:6]\n" + + "[@45,66:67='ID',<66>,5:10]"; + + IntervalSet types = + new IntervalSet(ANTLRParser.TOKEN_REF, + ANTLRParser.STRING_LITERAL, + ANTLRParser.RULE_REF); + List nodes = g.ast.getNodesWithTypePreorderDFS(types); + List tokens = new ArrayList(); + for (GrammarAST node : nodes) { + tokens.add(node.getToken()); + } + assertEquals(expectedElementTokens, Utils.join(tokens.toArray(), "\n")); + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestTokenStreamRewriter.java b/tool/test/org/antlr/v4/test/tool/TestTokenStreamRewriter.java new file mode 100644 index 000000000..2d995aa2c --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestTokenStreamRewriter.java @@ -0,0 +1,884 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.LexerInterpreter; +import org.antlr.v4.runtime.TokenStreamRewriter; +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +public class TestTokenStreamRewriter extends BaseTest { + + /** Public default constructor used by TestRig */ + public TestTokenStreamRewriter() { + } + + @Test public void testInsertBeforeIndex0() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream("abc")); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(0, "0"); + String result = tokens.getText(); + String expecting = "0abc"; + assertEquals(expecting, result); + } + + @Test public void testInsertAfterLastIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertAfter(2, "x"); + String result = tokens.getText(); + String expecting = "abcx"; + assertEquals(expecting, result); + } + + @Test public void test2InsertBeforeAfterMiddleIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(1, "x"); + tokens.insertAfter(1, "x"); + String result = tokens.getText(); + String expecting = "axbxc"; + assertEquals(expecting, result); + } + + @Test public void testReplaceIndex0() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(0, "x"); + String result = tokens.getText(); + String expecting = "xbc"; + assertEquals(expecting, result); + } + + @Test public void testReplaceLastIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, "x"); + String result = tokens.getText(); + String expecting = "abx"; + assertEquals(expecting, result); + } + + @Test public void testReplaceMiddleIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(1, "x"); + String result = tokens.getText(); + String expecting = "axc"; + assertEquals(expecting, result); + } + + @Test public void testToStringStartStop() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "MUL : '*';\n" + + "ASSIGN : '=';\n" + + "WS : ' '+;\n"); + // Tokens: 0123456789 + // Input: x = 3 * 0; + String input = "x = 3 * 0;"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(4, 8, "0"); + stream.fill(); +// replace 3 * 0 with 0 + + String result = tokens.getTokenStream().getText(); + String expecting = "x = 3 * 0;"; + assertEquals(expecting, result); + + result = tokens.getText(); + expecting = "x = 0;"; + assertEquals(expecting, result); + + result = tokens.getText(Interval.of(0, 9)); + expecting = "x = 0;"; + assertEquals(expecting, result); + + result = tokens.getText(Interval.of(4, 8)); + expecting = "0"; + assertEquals(expecting, result); + } + + @Test public void testToStringStartStop2() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 3 * 0 + 2 * 0; + String input = "x = 3 * 0 + 2 * 0;"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + + String result = tokens.getTokenStream().getText(); + String expecting = "x = 3 * 0 + 2 * 0;"; + assertEquals(expecting, result); + + tokens.replace(4, 8, "0"); + stream.fill(); +// replace 3 * 0 with 0 + result = tokens.getText(); + expecting = "x = 0 + 2 * 0;"; + assertEquals(expecting, result); + + result = tokens.getText(Interval.of(0, 17)); + expecting = "x = 0 + 2 * 0;"; + assertEquals(expecting, result); + + result = tokens.getText(Interval.of(4, 8)); + expecting = "0"; + assertEquals(expecting, result); + + result = tokens.getText(Interval.of(0, 8)); + expecting = "x = 0"; + assertEquals(expecting, result); + + result = tokens.getText(Interval.of(12, 16)); + expecting = "2 * 0"; + assertEquals(expecting, result); + + tokens.insertAfter(17, "// comment"); + result = tokens.getText(Interval.of(12, 18)); + expecting = "2 * 0;// comment"; + assertEquals(expecting, result); + + result = tokens.getText(Interval.of(0, 8)); + stream.fill(); +// try again after insert at end + expecting = "x = 0"; + assertEquals(expecting, result); + } + + + @Test public void test2ReplaceMiddleIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(1, "x"); + tokens.replace(1, "y"); + String result = tokens.getText(); + String expecting = "ayc"; + assertEquals(expecting, result); + } + + @Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(0, "_"); + tokens.replace(1, "x"); + tokens.replace(1, "y"); + String result = tokens.getText(); + String expecting = "_ayc"; + assertEquals(expecting, result); + } + + @Test public void testReplaceThenDeleteMiddleIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(1, "x"); + tokens.delete(1); + String result = tokens.getText(); + String expecting = "ac"; + assertEquals(expecting, result); + } + + @Test public void testInsertInPriorReplace() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(0, 2, "x"); + tokens.insertBefore(1, "0"); + Exception exc = null; + try { + tokens.getText(); + } + catch (IllegalArgumentException iae) { + exc = iae; + } + String expecting = "insert op ,1:1]:\"0\"> within boundaries of previous ,1:0]..[@2,2:2='c',<3>,1:2]:\"x\">"; + assertNotNull(exc); + assertEquals(expecting, exc.getMessage()); + } + + @Test public void testInsertThenReplaceSameIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(0, "0"); + tokens.replace(0, "x"); + stream.fill(); +// supercedes insert at 0 + String result = tokens.getText(); + String expecting = "0xbc"; + assertEquals(expecting, result); + } + + @Test public void test2InsertMiddleIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(1, "x"); + tokens.insertBefore(1, "y"); + String result = tokens.getText(); + String expecting = "ayxbc"; + assertEquals(expecting, result); + } + + @Test public void test2InsertThenReplaceIndex0() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(0, "x"); + tokens.insertBefore(0, "y"); + tokens.replace(0, "z"); + String result = tokens.getText(); + String expecting = "yxzbc"; + assertEquals(expecting, result); + } + + @Test public void testReplaceThenInsertBeforeLastIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, "x"); + tokens.insertBefore(2, "y"); + String result = tokens.getText(); + String expecting = "abyx"; + assertEquals(expecting, result); + } + + @Test public void testInsertThenReplaceLastIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(2, "y"); + tokens.replace(2, "x"); + String result = tokens.getText(); + String expecting = "abyx"; + assertEquals(expecting, result); + } + + @Test public void testReplaceThenInsertAfterLastIndex() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, "x"); + tokens.insertAfter(2, "y"); + String result = tokens.getText(); + String expecting = "abxy"; + assertEquals(expecting, result); + } + + @Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcccba"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, 4, "x"); + tokens.insertBefore(2, "y"); + String result = tokens.getText(); + String expecting = "abyxba"; + assertEquals(expecting, result); + } + + @Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcccba"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, 4, "x"); + tokens.insertBefore(4, "y"); + stream.fill(); // no effect; within range of a replace + Exception exc = null; + try { + tokens.getText(); + } + catch (IllegalArgumentException iae) { + exc = iae; + } + String expecting = "insert op ,1:4]:\"y\"> within boundaries of previous ,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">"; + assertNotNull(exc); + assertEquals(expecting, exc.getMessage()); + } + + @Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcccba"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, 4, "x"); + tokens.insertAfter(4, "y"); + String result = tokens.getText(); + String expecting = "abxyba"; + assertEquals(expecting, result); + } + + @Test public void testReplaceAll() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcccba"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(0, 6, "x"); + String result = tokens.getText(); + String expecting = "x"; + assertEquals(expecting, result); + } + + @Test public void testReplaceSubsetThenFetch() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcccba"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, 4, "xyz"); + String result = tokens.getText(Interval.of(0, 6)); + String expecting = "abxyzba"; + assertEquals(expecting, result); + } + + @Test public void testReplaceThenReplaceSuperset() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcccba"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, 4, "xyz"); + tokens.replace(3, 5, "foo"); + stream.fill(); +// overlaps, error + Exception exc = null; + try { + tokens.getText(); + } + catch (IllegalArgumentException iae) { + exc = iae; + } + String expecting = "replace op boundaries of ,1:3]..[@5,5:5='b',<2>,1:5]:\"foo\"> overlap with previous ,1:2]..[@4,4:4='c',<3>,1:4]:\"xyz\">"; + assertNotNull(exc); + assertEquals(expecting, exc.getMessage()); + } + + @Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcccba"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, 4, "xyz"); + tokens.replace(1, 3, "foo"); + stream.fill(); +// overlap, error + Exception exc = null; + try { + tokens.getText(); + } + catch (IllegalArgumentException iae) { + exc = iae; + } + String expecting = "replace op boundaries of ,1:1]..[@3,3:3='c',<3>,1:3]:\"foo\"> overlap with previous ,1:2]..[@4,4:4='c',<3>,1:4]:\"xyz\">"; + assertNotNull(exc); + assertEquals(expecting, exc.getMessage()); + } + + @Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcba"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, 2, "xyz"); + tokens.replace(0, 3, "foo"); + String result = tokens.getText(); + String expecting = "fooa"; + assertEquals(expecting, result); + } + + @Test public void testCombineInserts() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(0, "x"); + tokens.insertBefore(0, "y"); + String result = tokens.getText(); + String expecting = "yxabc"; + assertEquals(expecting, result); + } + + @Test public void testCombine3Inserts() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(1, "x"); + tokens.insertBefore(0, "y"); + tokens.insertBefore(1, "z"); + String result = tokens.getText(); + String expecting = "yazxbc"; + assertEquals(expecting, result); + } + + @Test public void testCombineInsertOnLeftWithReplace() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(0, 2, "foo"); + tokens.insertBefore(0, "z"); + stream.fill(); +// combine with left edge of rewrite + String result = tokens.getText(); + String expecting = "zfoo"; + assertEquals(expecting, result); + } + + @Test public void testCombineInsertOnLeftWithDelete() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.delete(0, 2); + tokens.insertBefore(0, "z"); + stream.fill(); +// combine with left edge of rewrite + String result = tokens.getText(); + String expecting = "z"; + stream.fill(); +// make sure combo is not znull + assertEquals(expecting, result); + } + + @Test public void testDisjointInserts() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(1, "x"); + tokens.insertBefore(2, "y"); + tokens.insertBefore(0, "z"); + String result = tokens.getText(); + String expecting = "zaxbyc"; + assertEquals(expecting, result); + } + + @Test public void testOverlappingReplace() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(1, 2, "foo"); + tokens.replace(0, 3, "bar"); + stream.fill(); +// wipes prior nested replace + String result = tokens.getText(); + String expecting = "bar"; + assertEquals(expecting, result); + } + + @Test public void testOverlappingReplace2() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(0, 3, "bar"); + tokens.replace(1, 2, "foo"); + stream.fill(); +// cannot split earlier replace + Exception exc = null; + try { + tokens.getText(); + } + catch (IllegalArgumentException iae) { + exc = iae; + } + String expecting = "replace op boundaries of ,1:1]..[@2,2:2='c',<3>,1:2]:\"foo\"> overlap with previous ,1:0]..[@3,3:3='c',<3>,1:3]:\"bar\">"; + assertNotNull(exc); + assertEquals(expecting, exc.getMessage()); + } + + @Test public void testOverlappingReplace3() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(1, 2, "foo"); + tokens.replace(0, 2, "bar"); + stream.fill(); +// wipes prior nested replace + String result = tokens.getText(); + String expecting = "barc"; + assertEquals(expecting, result); + } + + @Test public void testOverlappingReplace4() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(1, 2, "foo"); + tokens.replace(1, 3, "bar"); + stream.fill(); +// wipes prior nested replace + String result = tokens.getText(); + String expecting = "abar"; + assertEquals(expecting, result); + } + + @Test public void testDropIdenticalReplace() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(1, 2, "foo"); + tokens.replace(1, 2, "foo"); + stream.fill(); +// drop previous, identical + String result = tokens.getText(); + String expecting = "afooc"; + assertEquals(expecting, result); + } + + @Test public void testDropPrevCoveredInsert() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(1, "foo"); + tokens.replace(1, 2, "foo"); + stream.fill(); +// kill prev insert + String result = tokens.getText(); + String expecting = "afoofoo"; + assertEquals(expecting, result); + } + + @Test public void testLeaveAloneDisjointInsert() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(1, "x"); + tokens.replace(2, 3, "foo"); + String result = tokens.getText(); + String expecting = "axbfoo"; + assertEquals(expecting, result); + } + + @Test public void testLeaveAloneDisjointInsert2() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abcc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.replace(2, 3, "foo"); + tokens.insertBefore(1, "x"); + String result = tokens.getText(); + String expecting = "axbfoo"; + assertEquals(expecting, result); + } + + @Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar T;\n"+ + "A : 'a';\n" + + "B : 'b';\n" + + "C : 'c';\n"); + String input = "abc"; + LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); + CommonTokenStream stream = new CommonTokenStream(lexEngine); + stream.fill(); + TokenStreamRewriter tokens = new TokenStreamRewriter(stream); + tokens.insertBefore(2, "y"); + tokens.delete(2); + String result = tokens.getText(); + String expecting = "aby"; + assertEquals(expecting, result); + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestTokenTypeAssignment.java b/tool/test/org/antlr/v4/test/tool/TestTokenTypeAssignment.java new file mode 100644 index 000000000..ea37a1ed9 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestTokenTypeAssignment.java @@ -0,0 +1,214 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.Token; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.Set; +import java.util.StringTokenizer; + +import static org.junit.Assert.*; + +public class TestTokenTypeAssignment extends BaseTest { + + @Test + public void testParserSimpleTokens() throws Exception { + Grammar g = new Grammar( + "parser grammar t;\n"+ + "a : A | B;\n" + + "b : C ;"); + String rules = "a, b"; + String tokenNames = "A, B, C"; + checkSymbols(g, rules, tokenNames); + } + + @Test public void testParserTokensSection() throws Exception { + Grammar g = new Grammar( + "parser grammar t;\n" + + "tokens {\n" + + " C,\n" + + " D" + + "}\n"+ + "a : A | B;\n" + + "b : C ;"); + String rules = "a, b"; + String tokenNames = "A, B, C, D"; + checkSymbols(g, rules, tokenNames); + } + + @Test public void testLexerTokensSection() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n" + + "tokens {\n" + + " C,\n" + + " D" + + "}\n"+ + "A : 'a';\n" + + "C : 'c' ;"); + String rules = "A, C"; + String tokenNames = "A, C, D"; + checkSymbols(g, rules, tokenNames); + } + + @Test public void testCombinedGrammarLiterals() throws Exception { + Grammar g = new Grammar( + "grammar t;\n"+ + "a : 'begin' b 'end';\n" + + "b : C ';' ;\n" + + "ID : 'a' ;\n" + + "FOO : 'foo' ;\n" + // "foo" is not a token name + "C : 'c' ;\n"); // nor is 'c' + String rules = "a, b"; + String tokenNames = "C, FOO, ID, 'begin', 'end', ';'"; + checkSymbols(g, rules, tokenNames); + } + + @Test public void testLiteralInParserAndLexer() throws Exception { + // 'x' is token and char in lexer rule + Grammar g = new Grammar( + "grammar t;\n" + + "a : 'x' E ; \n" + + "E: 'x' '0' ;\n"); + + String literals = "['x']"; + String foundLiterals = g.stringLiteralToTypeMap.keySet().toString(); + assertEquals(literals, foundLiterals); + + foundLiterals = g.implicitLexer.stringLiteralToTypeMap.keySet().toString(); + assertEquals("['x']", foundLiterals); // pushed in lexer from parser + + String[] typeToTokenName = g.getTokenDisplayNames(); + Set tokens = new LinkedHashSet(); + for (String t : typeToTokenName) if ( t!=null ) tokens.add(t); + assertEquals("[, 'x', E]", tokens.toString()); + } + + @Test public void testPredDoesNotHideNameToLiteralMapInLexer() throws Exception { + // 'x' is token and char in lexer rule + Grammar g = new Grammar( + "grammar t;\n" + + "a : 'x' X ; \n" + + "X: 'x' {true}?;\n"); // must match as alias even with pred + + assertEquals("{'x'=1}", g.stringLiteralToTypeMap.toString()); + assertEquals("{EOF=-1, X=1}", g.tokenNameToTypeMap.toString()); + + // pushed in lexer from parser + assertEquals("{'x'=1}", g.implicitLexer.stringLiteralToTypeMap.toString()); + assertEquals("{EOF=-1, X=1}", g.implicitLexer.tokenNameToTypeMap.toString()); + } + + @Test public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception { + Grammar g = new Grammar( + "grammar t;\n"+ + "a : 'a' ;\n" + + "A : 'a' ;\n"); + String rules = "a"; + String tokenNames = "A, 'a'"; + checkSymbols(g, rules, tokenNames); + } + + @Test public void testSetDoesNotMissTokenAliases() throws Exception { + Grammar g = new Grammar( + "grammar t;\n"+ + "a : 'a'|'b' ;\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n"); + String rules = "a"; + String tokenNames = "A, 'a', B, 'b'"; + checkSymbols(g, rules, tokenNames); + } + + // T E S T L I T E R A L E S C A P E S + + @Test public void testParserCharLiteralWithEscape() throws Exception { + Grammar g = new Grammar( + "grammar t;\n"+ + "a : '\\n';\n"); + Set literals = g.stringLiteralToTypeMap.keySet(); + // must store literals how they appear in the antlr grammar + assertEquals("'\\n'", literals.toArray()[0]); + } + + protected void checkSymbols(Grammar g, + String rulesStr, + String allValidTokensStr) + throws Exception + { + String[] typeToTokenName = g.getTokenNames(); + Set tokens = new HashSet(); + for (int i = 0; i < typeToTokenName.length; i++) { + String t = typeToTokenName[i]; + if ( t!=null ) { + if (t.startsWith(Grammar.AUTO_GENERATED_TOKEN_NAME_PREFIX)) { + tokens.add(g.getTokenDisplayName(i)); + } + else { + tokens.add(t); + } + } + } + + // make sure expected tokens are there + StringTokenizer st = new StringTokenizer(allValidTokensStr, ", "); + while ( st.hasMoreTokens() ) { + String tokenName = st.nextToken(); + assertTrue("token "+tokenName+" expected, but was undefined", + g.getTokenType(tokenName) != Token.INVALID_TYPE); + tokens.remove(tokenName); + } + // make sure there are not any others (other than etc...) + for (String tokenName : tokens) { + assertTrue("unexpected token name "+tokenName, + g.getTokenType(tokenName) < Token.MIN_USER_TOKEN_TYPE); + } + + // make sure all expected rules are there + st = new StringTokenizer(rulesStr, ", "); + int n = 0; + while ( st.hasMoreTokens() ) { + String ruleName = st.nextToken(); + assertNotNull("rule "+ruleName+" expected", g.getRule(ruleName)); + n++; + } + //System.out.println("rules="+rules); + // make sure there are no extra rules + assertEquals("number of rules mismatch; expecting "+n+"; found "+g.rules.size(), + n, g.rules.size()); + + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java b/tool/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java new file mode 100644 index 000000000..7a80ce573 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java @@ -0,0 +1,656 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.Tool; +import org.antlr.v4.tool.ErrorType; +import org.junit.Test; + +public class TestToolSyntaxErrors extends BaseTest { + static String[] A = { + // INPUT + "grammar A;\n" + + "", + // YIELDS + "error(" + ErrorType.NO_RULES.code + "): A.g4::: grammar A has no rules\n", + + "A;", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:1:0: syntax error: 'A' came as a complete surprise to me\n", + + "grammar ;", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:1:8: syntax error: ';' came as a complete surprise to me while looking for an identifier\n", + + "grammar A\n" + + "a : ID ;\n", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:0: syntax error: missing SEMI at 'a'\n", + + "grammar A;\n" + + "a : ID ;;\n"+ + "b : B ;", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:8: syntax error: ';' came as a complete surprise to me\n", + + "grammar A;;\n" + + "a : ID ;\n", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A;.g4:1:10: syntax error: ';' came as a complete surprise to me\n", + + "grammar A;\n" + + "a @init : ID ;\n", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:8: syntax error: mismatched input ':' expecting ACTION while matching rule preamble\n", + + "grammar A;\n" + + "a ( A | B ) D ;\n" + + "b : B ;", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:3: syntax error: '(' came as a complete surprise to me while matching rule preamble\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:11: syntax error: mismatched input ')' expecting SEMI while matching a rule\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:15: syntax error: mismatched input ';' expecting COLON while matching a lexer rule\n", + }; + + @Test public void testA() { super.testErrors(A, true); } + + @Test public void testExtraColon() { + String[] pair = new String[] { + "grammar A;\n" + + "a : : A ;\n" + + "b : B ;", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: ':' came as a complete surprise to me while matching alternative\n", + }; + super.testErrors(pair, true); + } + + @Test public void testMissingRuleSemi() { + String[] pair = new String[] { + "grammar A;\n" + + "a : A \n" + + "b : B ;", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:0: syntax error: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n", + }; + super.testErrors(pair, true); + } + + @Test public void testMissingRuleSemi2() { + String[] pair = new String[] { + "lexer grammar A;\n" + + "A : 'a' \n" + + "B : 'b' ;", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:0: syntax error: unterminated rule (missing ';') detected at 'B :' while looking for lexer rule element\n", + }; + super.testErrors(pair, true); + } + + @Test public void testMissingRuleSemi3() { + String[] pair = new String[] { + "grammar A;\n" + + "a : A \n" + + "b[int i] returns [int y] : B ;", + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:9: syntax error: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n" + }; + super.testErrors(pair, true); + } + + @Test public void testMissingRuleSemi4() { + String[] pair = new String[] { + "grammar A;\n" + + "a : b \n" + + " catch [Exception e] {...}\n" + + "b : B ;\n", + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n" + }; + super.testErrors(pair, true); + } + + @Test public void testMissingRuleSemi5() { + String[] pair = new String[] { + "grammar A;\n" + + "a : A \n" + + " catch [Exception e] {...}\n", + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n" + }; + super.testErrors(pair, true); + } + + @Test public void testBadRulePrequelStart() { + String[] pair = new String[] { + "grammar A;\n" + + "a @ options {k=1;} : A ;\n" + + "b : B ;", + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: 'options {' came as a complete surprise to me while looking for an identifier\n" + }; + super.testErrors(pair, true); + } + + @Test public void testBadRulePrequelStart2() { + String[] pair = new String[] { + "grammar A;\n" + + "a } : A ;\n" + + "b : B ;", + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:2: syntax error: '}' came as a complete surprise to me while matching rule preamble\n" + }; + super.testErrors(pair, true); + } + + @Test public void testModeInParser() { + String[] pair = new String[] { + "grammar A;\n" + + "a : A ;\n" + + "mode foo;\n" + + "b : B ;", + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:4:0: syntax error: 'b' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:4:6: syntax error: mismatched input ';' expecting COLON while matching a lexer rule\n" + }; + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#243 + * "Generate a good message for unterminated strings" + * https://github.com/antlr/antlr4/issues/243 + */ + @Test public void testUnterminatedStringLiteral() { + String[] pair = new String[] { + "grammar A;\n" + + "a : 'x\n" + + " ;\n", + + "error(" + ErrorType.UNTERMINATED_STRING_LITERAL.code + "): A.g4:2:4: unterminated string literal\n" + }; + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#262 + * "Parser Rule Name Starting With an Underscore" + * https://github.com/antlr/antlr4/issues/262 + */ + @Test public void testParserRuleNameStartingWithUnderscore() { + String[] pair = new String[] { + "grammar A;\n" + + "_a : 'x' ;\n", + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:0: syntax error: '_' came as a complete surprise to me\n" + }; + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#194 + * "NullPointerException on 'options{}' in grammar file" + * https://github.com/antlr/antlr4/issues/194 + */ + @Test public void testEmptyGrammarOptions() { + String[] pair = new String[] { + "grammar A;\n" + + "options {}\n" + + "a : 'x' ;\n", + + "" + }; + super.testErrors(pair, true); + } + + /** + * This is a "related" regression test for antlr/antlr4#194 + * "NullPointerException on 'options{}' in grammar file" + * https://github.com/antlr/antlr4/issues/194 + */ + @Test public void testEmptyRuleOptions() { + String[] pair = new String[] { + "grammar A;\n" + + "a options{} : 'x' ;\n", + + "" + }; + super.testErrors(pair, true); + } + + /** + * This is a "related" regression test for antlr/antlr4#194 + * "NullPointerException on 'options{}' in grammar file" + * https://github.com/antlr/antlr4/issues/194 + */ + @Test public void testEmptyBlockOptions() { + String[] pair = new String[] { + "grammar A;\n" + + "a : (options{} : 'x') ;\n", + + "" + }; + super.testErrors(pair, true); + } + + @Test public void testEmptyTokensBlock() { + String[] pair = new String[] { + "grammar A;\n" + + "tokens {}\n" + + "a : 'x' ;\n", + + "" + }; + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#190 + * "NullPointerException building lexer grammar using bogus 'token' action" + * https://github.com/antlr/antlr4/issues/190 + */ + @Test public void testInvalidLexerCommand() { + String[] pair = new String[] { + "grammar A;\n" + + "tokens{Foo}\n" + + "b : Foo ;\n" + + "X : 'foo' -> popmode;\n" + // "meant" to use -> popMode + "Y : 'foo' -> token(Foo);", // "meant" to use -> type(Foo) + + "error(" + ErrorType.INVALID_LEXER_COMMAND.code + "): A.g4:4:13: lexer command popmode does not exist or is not supported by the current target\n" + + "error(" + ErrorType.INVALID_LEXER_COMMAND.code + "): A.g4:5:13: lexer command token does not exist or is not supported by the current target\n" + }; + super.testErrors(pair, true); + } + + @Test public void testLexerCommandArgumentValidation() { + String[] pair = new String[] { + "grammar A;\n" + + "tokens{Foo}\n" + + "b : Foo ;\n" + + "X : 'foo' -> popMode(Foo);\n" + // "meant" to use -> popMode + "Y : 'foo' -> type;", // "meant" to use -> type(Foo) + + "error(" + ErrorType.UNWANTED_LEXER_COMMAND_ARGUMENT.code + "): A.g4:4:13: lexer command popMode does not take any arguments\n" + + "error(" + ErrorType.MISSING_LEXER_COMMAND_ARGUMENT.code + "): A.g4:5:13: missing argument for lexer command type\n" + }; + super.testErrors(pair, true); + } + + @Test public void testRuleRedefinition() { + String[] pair = new String[] { + "grammar Oops;\n" + + "\n" + + "ret_ty : A ;\n" + + "ret_ty : B ;\n" + + "\n" + + "A : 'a' ;\n" + + "B : 'b' ;\n", + + "error(" + ErrorType.RULE_REDEFINITION.code + "): Oops.g4:4:0: rule ret_ty redefinition; previous at line 3\n" + }; + super.testErrors(pair, true); + } + + @Test public void testEpsilonClosureAnalysis() { + String grammar = + "grammar A;\n" + + "x : ;\n" + + "y1 : x+;\n" + + "y2 : x*;\n" + + "z1 : ('foo' | 'bar'? 'bar2'?)*;\n" + + "z2 : ('foo' | 'bar' 'bar2'? | 'bar2')*;\n"; + String expected = + "error(" + ErrorType.EPSILON_CLOSURE.code + "): A.g4:3:0: rule y1 contains a closure with at least one alternative that can match an empty string\n" + + "error(" + ErrorType.EPSILON_CLOSURE.code + "): A.g4:4:0: rule y2 contains a closure with at least one alternative that can match an empty string\n" + + "error(" + ErrorType.EPSILON_CLOSURE.code + "): A.g4:5:0: rule z1 contains a closure with at least one alternative that can match an empty string\n"; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + @Test public void testEpsilonOptionalAnalysis() { + String grammar = + "grammar A;\n" + + "x : ;\n" + + "y : x?;\n" + + "z1 : ('foo' | 'bar'? 'bar2'?)?;\n" + + "z2 : ('foo' | 'bar' 'bar2'? | 'bar2')?;\n"; + String expected = + "warning(" + ErrorType.EPSILON_OPTIONAL.code + "): A.g4:3:0: rule y contains an optional block with at least one alternative that can match an empty string\n" + + "warning(" + ErrorType.EPSILON_OPTIONAL.code + "): A.g4:4:0: rule z1 contains an optional block with at least one alternative that can match an empty string\n"; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#315 + * "Inconsistent lexer error msg for actions" + * https://github.com/antlr/antlr4/issues/315 + */ + @Test public void testActionAtEndOfOneLexerAlternative() { + String grammar = + "grammar A;\n" + + "stat : 'start' CharacterLiteral 'end' EOF;\n" + + "\n" + + "// Lexer\n" + + "\n" + + "CharacterLiteral\n" + + " : '\\'' SingleCharacter '\\''\n" + + " | '\\'' ~[\\r\\n] {notifyErrorListeners(\"unclosed character literal\");}\n" + + " ;\n" + + "\n" + + "fragment\n" + + "SingleCharacter\n" + + " : ~['\\\\\\r\\n]\n" + + " ;\n" + + "\n" + + "WS : [ \\r\\t\\n]+ -> skip ;\n"; + String expected = + ""; + + String[] pair = new String[] { grammar, expected }; + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#308 "NullPointer exception" + * https://github.com/antlr/antlr4/issues/308 + */ + @Test public void testDoubleQuotedStringLiteral() { + String grammar = + "lexer grammar A;\n" + + "WHITESPACE : (\" \" | \"\\t\" | \"\\n\" | \"\\r\" | \"\\f\");\n"; + String expected = + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:14: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:16: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:20: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:21: syntax error: '\\' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:23: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:27: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:28: syntax error: '\\' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:30: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:34: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:35: syntax error: '\\' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:37: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:41: syntax error: '\"' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:42: syntax error: '\\' came as a complete surprise to me\n" + + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:44: syntax error: '\"' came as a complete surprise to me\n"; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + /** + * This test ensures that the {@link ErrorType#INVALID_ESCAPE_SEQUENCE} + * error is not reported for escape sequences that are known to be valid. + */ + @Test public void testValidEscapeSequences() { + String grammar = + "lexer grammar A;\n" + + "NORMAL_ESCAPE : '\\b \\t \\n \\f \\r \\\" \\' \\\\';\n" + + "UNICODE_ESCAPE : '\\u0001 \\u00A1 \\u00a1 \\uaaaa \\uAAAA';\n"; + String expected = + ""; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#507 "NullPointerException When + * Generating Code from Grammar". + * https://github.com/antlr/antlr4/issues/507 + */ + @Test public void testInvalidEscapeSequences() { + String grammar = + "lexer grammar A;\n" + + "RULE : 'Foo \\uAABG \\x \\u';\n"; + String expected = + "error(" + ErrorType.INVALID_ESCAPE_SEQUENCE.code + "): A.g4:2:12: invalid escape sequence\n" + + "error(" + ErrorType.INVALID_ESCAPE_SEQUENCE.code + "): A.g4:2:19: invalid escape sequence\n" + + "error(" + ErrorType.INVALID_ESCAPE_SEQUENCE.code + "): A.g4:2:22: invalid escape sequence\n"; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + /** + * This test ensures the {@link ErrorType#UNRECOGNIZED_ASSOC_OPTION} warning + * is produced as described in the documentation. + */ + @Test public void testUnrecognizedAssocOption() { + String grammar = + "grammar A;\n" + + "x : 'x'\n" + + " | x '+' x // warning 157\n" + + " | x '*' x // ok\n" + + " ;\n"; + String expected = + "warning(" + ErrorType.UNRECOGNIZED_ASSOC_OPTION.code + "): A.g4:3:10: rule x contains an assoc terminal option in an unrecognized location\n"; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + /** + * This test ensures the {@link ErrorType#FRAGMENT_ACTION_IGNORED} warning + * is produced as described in the documentation. + */ + @Test public void testFragmentActionIgnored() { + String grammar = + "lexer grammar A;\n" + + "X1 : 'x' -> more // ok\n" + + " ;\n" + + "Y1 : 'x' {more();} // ok\n" + + " ;\n" + + "fragment\n" + + "X2 : 'x' -> more // warning 158\n" + + " ;\n" + + "fragment\n" + + "Y2 : 'x' {more();} // warning 158\n" + + " ;\n"; + String expected = + "warning(" + ErrorType.FRAGMENT_ACTION_IGNORED.code + "): A.g4:7:12: fragment rule X2 contains an action or command which can never be executed\n" + + "warning(" + ErrorType.FRAGMENT_ACTION_IGNORED.code + "): A.g4:10:9: fragment rule Y2 contains an action or command which can never be executed\n"; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#500 "Array Index Out Of + * Bounds". + * https://github.com/antlr/antlr4/issues/500 + */ + @Test public void testTokenNamedEOF() { + String grammar = + "lexer grammar A;\n" + + "WS : ' ';\n" + + " EOF : 'a';\n"; + String expected = + "error(" + ErrorType.RESERVED_RULE_NAME.code + "): A.g4:3:1: cannot declare a rule with reserved name EOF\n"; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#649 "unknown target causes + * null ptr exception.". + * https://github.com/antlr/antlr4/issues/649 + * Stops before processing the lexer + */ + @Test public void testInvalidLanguageInGrammarWithLexerCommand() throws Exception { + String grammar = + "grammar T;\n" + + "options { language=Foo; }\n" + + "start : 'T' EOF;\n" + + "Something : 'something' -> channel(CUSTOM);"; + String expected = + "error(" + ErrorType.CANNOT_CREATE_TARGET_GENERATOR.code + "): ANTLR cannot generate Foo code as of version " + Tool.VERSION + "\n"; + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#649 "unknown target causes + * null ptr exception.". + * https://github.com/antlr/antlr4/issues/649 + */ + @Test public void testInvalidLanguageInGrammar() throws Exception { + String grammar = + "grammar T;\n" + + "options { language=Foo; }\n" + + "start : 'T' EOF;\n"; + String expected = + "error(" + ErrorType.CANNOT_CREATE_TARGET_GENERATOR.code + "): ANTLR cannot generate Foo code as of version " + Tool.VERSION + "\n"; + + String[] pair = new String[] { + grammar, + expected + }; + + super.testErrors(pair, true); + } + + @Test public void testChannelDefinitionInLexer() throws Exception { + String grammar = + "lexer grammar T;\n" + + "\n" + + "channels {\n" + + " WHITESPACE_CHANNEL,\n" + + " COMMENT_CHANNEL\n" + + "}\n" + + "\n" + + "COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" + + "WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n"; + + String expected = ""; + + String[] pair = { grammar, expected }; + super.testErrors(pair, true); + } + + @Test public void testChannelDefinitionInParser() throws Exception { + String grammar = + "parser grammar T;\n" + + "\n" + + "channels {\n" + + " WHITESPACE_CHANNEL,\n" + + " COMMENT_CHANNEL\n" + + "}\n" + + "\n" + + "start : EOF;\n"; + + String expected = + "error(" + ErrorType.CHANNELS_BLOCK_IN_PARSER_GRAMMAR.code + "): T.g4:3:0: custom channels are not supported in parser grammars\n"; + + String[] pair = { grammar, expected }; + super.testErrors(pair, true); + } + + @Test public void testChannelDefinitionInCombined() throws Exception { + String grammar = + "grammar T;\n" + + "\n" + + "channels {\n" + + " WHITESPACE_CHANNEL,\n" + + " COMMENT_CHANNEL\n" + + "}\n" + + "\n" + + "start : EOF;\n" + + "\n" + + "COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" + + "WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n"; + + String expected = + "warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:10:35: rule COMMENT contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n" + + "warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:11:35: rule WHITESPACE contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n" + + "error(" + ErrorType.CHANNELS_BLOCK_IN_COMBINED_GRAMMAR.code + "): T.g4:3:0: custom channels are not supported in combined grammars\n"; + + String[] pair = { grammar, expected }; + super.testErrors(pair, true); + } + + /** + * This is a regression test for antlr/antlr4#497 now that antlr/antlr4#309 + * is resolved. + * https://github.com/antlr/antlr4/issues/497 + * https://github.com/antlr/antlr4/issues/309 + */ + @Test public void testChannelDefinitions() throws Exception { + String grammar = + "lexer grammar T;\n" + + "\n" + + "channels {\n" + + " WHITESPACE_CHANNEL,\n" + + " COMMENT_CHANNEL\n" + + "}\n" + + "\n" + + "COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" + + "WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n" + + "NEWLINE: '\\r'? '\\n' -> channel(NEWLINE_CHANNEL);"; + + // WHITESPACE_CHANNEL and COMMENT_CHANNEL are defined, but NEWLINE_CHANNEL is not + String expected = + "warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:10:34: rule NEWLINE contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n"; + + String[] pair = { grammar, expected }; + super.testErrors(pair, true); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestTopologicalSort.java b/tool/test/org/antlr/v4/test/tool/TestTopologicalSort.java new file mode 100644 index 000000000..96b4d1977 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestTopologicalSort.java @@ -0,0 +1,117 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.v4.misc.Graph; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.*; + +/** Test topo sort in GraphNode. */ +public class TestTopologicalSort extends BaseTest { + @Test + public void testFairlyLargeGraph() throws Exception { + Graph g = new Graph(); + g.addEdge("C", "F"); + g.addEdge("C", "G"); + g.addEdge("C", "A"); + g.addEdge("C", "B"); + g.addEdge("A", "D"); + g.addEdge("A", "E"); + g.addEdge("B", "E"); + g.addEdge("D", "E"); + g.addEdge("D", "F"); + g.addEdge("F", "H"); + g.addEdge("E", "F"); + + String expecting = "[H, F, G, E, D, A, B, C]"; + List nodes = g.sort(); + String result = nodes.toString(); + assertEquals(expecting, result); + } + + @Test + public void testCyclicGraph() throws Exception { + Graph g = new Graph(); + g.addEdge("A", "B"); + g.addEdge("B", "C"); + g.addEdge("C", "A"); + g.addEdge("C", "D"); + + String expecting = "[D, C, B, A]"; + List nodes = g.sort(); + String result = nodes.toString(); + assertEquals(expecting, result); + } + + @Test + public void testRepeatedEdges() throws Exception { + Graph g = new Graph(); + g.addEdge("A", "B"); + g.addEdge("B", "C"); + g.addEdge("A", "B"); // dup + g.addEdge("C", "D"); + + String expecting = "[D, C, B, A]"; + List nodes = g.sort(); + String result = nodes.toString(); + assertEquals(expecting, result); + } + + @Test + public void testSimpleTokenDependence() throws Exception { + Graph g = new Graph(); + g.addEdge("Java.g4", "MyJava.tokens"); // Java feeds off manual token file + g.addEdge("Java.tokens", "Java.g4"); + g.addEdge("Def.g4", "Java.tokens"); // walkers feed off generated tokens + g.addEdge("Ref.g4", "Java.tokens"); + + String expecting = "[MyJava.tokens, Java.g4, Java.tokens, Def.g4, Ref.g4]"; + List nodes = g.sort(); + String result = nodes.toString(); + assertEquals(expecting, result); + } + + @Test + public void testParserLexerCombo() throws Exception { + Graph g = new Graph(); + g.addEdge("JavaLexer.tokens", "JavaLexer.g4"); + g.addEdge("JavaParser.g4", "JavaLexer.tokens"); + g.addEdge("Def.g4", "JavaLexer.tokens"); + g.addEdge("Ref.g4", "JavaLexer.tokens"); + + String expecting = "[JavaLexer.g4, JavaLexer.tokens, JavaParser.g4, Def.g4, Ref.g4]"; + List nodes = g.sort(); + String result = nodes.toString(); + assertEquals(expecting, result); + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java b/tool/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java new file mode 100644 index 000000000..f23ab3316 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java @@ -0,0 +1,367 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CommonTokenFactory; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.IntStream; +import org.antlr.v4.runtime.LexerInterpreter; +import org.antlr.v4.runtime.UnbufferedCharStream; +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import java.io.Reader; +import java.io.StringReader; + +import static org.junit.Assert.assertEquals; + +public class TestUnbufferedCharStream extends BaseTest { + @Test public void testNoChar() throws Exception { + CharStream input = createStream(""); + assertEquals(IntStream.EOF, input.LA(1)); + assertEquals(IntStream.EOF, input.LA(2)); + } + + /** + * The {@link IntStream} interface does not specify the behavior when the + * EOF symbol is consumed, but {@link UnbufferedCharStream} handles this + * particular case by throwing an {@link IllegalStateException}. + */ + @Test(expected = IllegalStateException.class) + public void testConsumeEOF() throws Exception { + CharStream input = createStream(""); + assertEquals(IntStream.EOF, input.LA(1)); + input.consume(); + input.consume(); + } + + @Test(expected = IllegalArgumentException.class) + public void testNegativeSeek() { + CharStream input = createStream(""); + input.seek(-1); + } + + @Test + public void testSeekPastEOF() { + CharStream input = createStream(""); + assertEquals(0, input.index()); + input.seek(1); + assertEquals(0, input.index()); + } + + /** + * The {@link IntStream} interface does not specify the behavior when marks + * are not released in the reversed order they were created, but + * {@link UnbufferedCharStream} handles this case by throwing an + * {@link IllegalStateException}. + */ + @Test(expected = IllegalStateException.class) + public void testMarkReleaseOutOfOrder() { + CharStream input = createStream(""); + int m1 = input.mark(); + int m2 = input.mark(); + input.release(m1); + } + + /** + * The {@link IntStream} interface does not specify the behavior when a mark + * is released twice, but {@link UnbufferedCharStream} handles this case by + * throwing an {@link IllegalStateException}. + */ + @Test(expected = IllegalStateException.class) + public void testMarkReleasedTwice() { + CharStream input = createStream(""); + int m1 = input.mark(); + input.release(m1); + input.release(m1); + } + + /** + * The {@link IntStream} interface does not specify the behavior when a mark + * is released twice, but {@link UnbufferedCharStream} handles this case by + * throwing an {@link IllegalStateException}. + */ + @Test(expected = IllegalStateException.class) + public void testNestedMarkReleasedTwice() { + CharStream input = createStream(""); + int m1 = input.mark(); + int m2 = input.mark(); + input.release(m2); + input.release(m2); + } + + /** + * It is not valid to pass a mark to {@link IntStream#seek}, but + * {@link UnbufferedCharStream} creates marks in such a way that this + * invalid usage results in an {@link IllegalArgumentException}. + */ + @Test(expected = IllegalArgumentException.class) + public void testMarkPassedToSeek() { + CharStream input = createStream(""); + int m1 = input.mark(); + input.seek(m1); + } + + @Test(expected = IllegalArgumentException.class) + public void testSeekBeforeBufferStart() { + CharStream input = createStream("xyz"); + input.consume(); + int m1 = input.mark(); + assertEquals(1, input.index()); + input.consume(); + input.seek(0); + } + + @Test(expected = UnsupportedOperationException.class) + public void testGetTextBeforeBufferStart() { + CharStream input = createStream("xyz"); + input.consume(); + int m1 = input.mark(); + assertEquals(1, input.index()); + input.getText(new Interval(0, 1)); + } + + @Test + public void testGetTextInMarkedRange() { + CharStream input = createStream("xyz"); + input.consume(); + int m1 = input.mark(); + assertEquals(1, input.index()); + input.consume(); + input.consume(); + assertEquals("yz", input.getText(new Interval(1, 2))); + } + + @Test + public void testLastChar() { + CharStream input = createStream("abcdef"); + + input.consume(); + assertEquals('a', input.LA(-1)); + + int m1 = input.mark(); + input.consume(); + input.consume(); + input.consume(); + assertEquals('d', input.LA(-1)); + + input.seek(2); + assertEquals('b', input.LA(-1)); + + input.release(m1); + input.seek(3); + assertEquals('c', input.LA(-1)); + // this special case is not required by the IntStream interface, but + // UnbufferedCharStream allows it so we have to make sure the resulting + // state is consistent + input.seek(2); + assertEquals('b', input.LA(-1)); + } + + @Test public void test1Char() throws Exception { + TestingUnbufferedCharStream input = createStream("x"); + assertEquals('x', input.LA(1)); + input.consume(); + assertEquals(IntStream.EOF, input.LA(1)); + String r = input.getRemainingBuffer(); + assertEquals("\uFFFF", r); // shouldn't include x + assertEquals("\uFFFF", input.getBuffer()); // whole buffer + } + + @Test public void test2Char() throws Exception { + TestingUnbufferedCharStream input = createStream("xy"); + assertEquals('x', input.LA(1)); + input.consume(); + assertEquals('y', input.LA(1)); + assertEquals("y", input.getRemainingBuffer()); // shouldn't include x + assertEquals("y", input.getBuffer()); + input.consume(); + assertEquals(IntStream.EOF, input.LA(1)); + assertEquals("\uFFFF", input.getBuffer()); + } + + @Test public void test2CharAhead() throws Exception { + CharStream input = createStream("xy"); + assertEquals('x', input.LA(1)); + assertEquals('y', input.LA(2)); + assertEquals(IntStream.EOF, input.LA(3)); + } + + @Test public void testBufferExpand() throws Exception { + TestingUnbufferedCharStream input = createStream("01234", 2); + assertEquals('0', input.LA(1)); + assertEquals('1', input.LA(2)); + assertEquals('2', input.LA(3)); + assertEquals('3', input.LA(4)); + assertEquals('4', input.LA(5)); + assertEquals("01234", input.getBuffer()); + assertEquals(IntStream.EOF, input.LA(6)); + } + + @Test public void testBufferWrapSize1() throws Exception { + CharStream input = createStream("01234", 1); + assertEquals('0', input.LA(1)); + input.consume(); + assertEquals('1', input.LA(1)); + input.consume(); + assertEquals('2', input.LA(1)); + input.consume(); + assertEquals('3', input.LA(1)); + input.consume(); + assertEquals('4', input.LA(1)); + input.consume(); + assertEquals(IntStream.EOF, input.LA(1)); + } + + @Test public void testBufferWrapSize2() throws Exception { + CharStream input = createStream("01234", 2); + assertEquals('0', input.LA(1)); + input.consume(); + assertEquals('1', input.LA(1)); + input.consume(); + assertEquals('2', input.LA(1)); + input.consume(); + assertEquals('3', input.LA(1)); + input.consume(); + assertEquals('4', input.LA(1)); + input.consume(); + assertEquals(IntStream.EOF, input.LA(1)); + } + + @Test public void test1Mark() throws Exception { + TestingUnbufferedCharStream input = createStream("xyz"); + int m = input.mark(); + assertEquals('x', input.LA(1)); + assertEquals('y', input.LA(2)); + assertEquals('z', input.LA(3)); + input.release(m); + assertEquals(IntStream.EOF, input.LA(4)); + assertEquals("xyz\uFFFF", input.getBuffer()); + } + + @Test public void test1MarkWithConsumesInSequence() throws Exception { + TestingUnbufferedCharStream input = createStream("xyz"); + int m = input.mark(); + input.consume(); // x, moves to y + input.consume(); // y + input.consume(); // z, moves to EOF + assertEquals(IntStream.EOF, input.LA(1)); + assertEquals("xyz\uFFFF", input.getBuffer()); + input.release(m); // wipes buffer + assertEquals("\uFFFF", input.getBuffer()); + } + + @Test public void test2Mark() throws Exception { + TestingUnbufferedCharStream input = createStream("xyz", 100); + assertEquals('x', input.LA(1)); + input.consume(); // reset buffer index (p) to 0 + int m1 = input.mark(); + assertEquals('y', input.LA(1)); + input.consume(); + int m2 = input.mark(); + assertEquals("yz", input.getBuffer()); + input.release(m2); // drop to 1 marker + input.consume(); + input.release(m1); // shifts remaining char to beginning + assertEquals(IntStream.EOF, input.LA(1)); + assertEquals("\uFFFF", input.getBuffer()); + } + + @Test public void testAFewTokens() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 3 * 0 + 2 * 0; + TestingUnbufferedCharStream input = createStream("x = 302 * 91 + 20234234 * 0;"); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + // copy text into tokens from char stream + lexEngine.setTokenFactory(new CommonTokenFactory(true)); + CommonTokenStream tokens = new CommonTokenStream(lexEngine); + String result = tokens.LT(1).getText(); + String expecting = "x"; + assertEquals(expecting, result); + tokens.fill(); + expecting = + "[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1], [@2,2:2='=',<4>,1:2]," + + " [@3,3:3=' ',<7>,1:3], [@4,4:6='302',<2>,1:4], [@5,7:7=' ',<7>,1:7]," + + " [@6,8:8='*',<6>,1:8], [@7,9:9=' ',<7>,1:9], [@8,10:11='91',<2>,1:10]," + + " [@9,12:12=' ',<7>,1:12], [@10,13:13='+',<5>,1:13], [@11,14:14=' ',<7>,1:14]," + + " [@12,15:22='20234234',<2>,1:15], [@13,23:23=' ',<7>,1:23]," + + " [@14,24:24='*',<6>,1:24], [@15,25:25=' ',<7>,1:25], [@16,26:26='0',<2>,1:26]," + + " [@17,27:27=';',<3>,1:27], [@18,28:27='',<-1>,1:28]]"; + assertEquals(expecting, tokens.getTokens().toString()); + } + + protected static TestingUnbufferedCharStream createStream(String text) { + return new TestingUnbufferedCharStream(new StringReader(text)); + } + + protected static TestingUnbufferedCharStream createStream(String text, int bufferSize) { + return new TestingUnbufferedCharStream(new StringReader(text), bufferSize); + } + + protected static class TestingUnbufferedCharStream extends UnbufferedCharStream { + + public TestingUnbufferedCharStream(Reader input) { + super(input); + } + + public TestingUnbufferedCharStream(Reader input, int bufferSize) { + super(input, bufferSize); + } + + /** For testing. What's in moving window into data stream from + * current index, LA(1) or data[p], to end of buffer? + */ + public String getRemainingBuffer() { + if ( n==0 ) return ""; + return new String(data,p,n-p); + } + + /** For testing. What's in moving window buffer into data stream. + * From 0..p-1 have been consume. + */ + public String getBuffer() { + if ( n==0 ) return ""; + return new String(data,0,n); + } + + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java b/tool/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java new file mode 100644 index 000000000..7fb3e773d --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java @@ -0,0 +1,223 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.LexerInterpreter; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenSource; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.UnbufferedTokenStream; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.Test; + +import java.io.StringReader; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class TestUnbufferedTokenStream extends BaseTest { + @Test public void testLookahead() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 302; + CharStream input = new ANTLRInputStream( + new StringReader("x = 302;") + ); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TokenStream tokens = new UnbufferedTokenStream(lexEngine); + + assertEquals("x", tokens.LT(1).getText()); + assertEquals(" ", tokens.LT(2).getText()); + assertEquals("=", tokens.LT(3).getText()); + assertEquals(" ", tokens.LT(4).getText()); + assertEquals("302", tokens.LT(5).getText()); + assertEquals(";", tokens.LT(6).getText()); + } + + @Test public void testNoBuffering() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 302; + CharStream input = new ANTLRInputStream( + new StringReader("x = 302;") + ); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TestingUnbufferedTokenStream tokens = new TestingUnbufferedTokenStream(lexEngine); + + assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString()); + assertEquals("x", tokens.LT(1).getText()); + tokens.consume(); // move to WS + assertEquals(" ", tokens.LT(1).getText()); + assertEquals("[[@1,1:1=' ',<7>,1:1]]", tokens.getRemainingBuffer().toString()); + tokens.consume(); + assertEquals("=", tokens.LT(1).getText()); + assertEquals("[[@2,2:2='=',<4>,1:2]]", tokens.getRemainingBuffer().toString()); + tokens.consume(); + assertEquals(" ", tokens.LT(1).getText()); + assertEquals("[[@3,3:3=' ',<7>,1:3]]", tokens.getRemainingBuffer().toString()); + tokens.consume(); + assertEquals("302", tokens.LT(1).getText()); + assertEquals("[[@4,4:6='302',<2>,1:4]]", tokens.getRemainingBuffer().toString()); + tokens.consume(); + assertEquals(";", tokens.LT(1).getText()); + assertEquals("[[@5,7:7=';',<3>,1:7]]", tokens.getRemainingBuffer().toString()); + } + + @Test public void testMarkStart() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 302; + CharStream input = new ANTLRInputStream( + new StringReader("x = 302;") + ); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TestingUnbufferedTokenStream tokens = new TestingUnbufferedTokenStream(lexEngine); + + int m = tokens.mark(); + assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString()); + assertEquals("x", tokens.LT(1).getText()); + tokens.consume(); // consume x + assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString()); + tokens.consume(); // ' ' + tokens.consume(); // = + tokens.consume(); // ' ' + tokens.consume(); // 302 + tokens.consume(); // ; + assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]," + + " [@2,2:2='=',<4>,1:2], [@3,3:3=' ',<7>,1:3]," + + " [@4,4:6='302',<2>,1:4], [@5,7:7=';',<3>,1:7]," + + " [@6,8:7='',<-1>,1:8]]", + tokens.getBuffer().toString()); + } + + @Test public void testMarkThenRelease() throws Exception { + LexerGrammar g = new LexerGrammar( + "lexer grammar t;\n"+ + "ID : 'a'..'z'+;\n" + + "INT : '0'..'9'+;\n" + + "SEMI : ';';\n" + + "ASSIGN : '=';\n" + + "PLUS : '+';\n" + + "MULT : '*';\n" + + "WS : ' '+;\n"); + // Tokens: 012345678901234567 + // Input: x = 302; + CharStream input = new ANTLRInputStream( + new StringReader("x = 302 + 1;") + ); + LexerInterpreter lexEngine = g.createLexerInterpreter(input); + TestingUnbufferedTokenStream tokens = new TestingUnbufferedTokenStream(lexEngine); + + int m = tokens.mark(); + assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString()); + assertEquals("x", tokens.LT(1).getText()); + tokens.consume(); // consume x + assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString()); + tokens.consume(); // ' ' + tokens.consume(); // = + tokens.consume(); // ' ' + assertEquals("302", tokens.LT(1).getText()); + tokens.release(m); // "x = 302" is in buffer. will kill buffer + tokens.consume(); // 302 + tokens.consume(); // ' ' + m = tokens.mark(); // mark at the + + assertEquals("+", tokens.LT(1).getText()); + tokens.consume(); // '+' + tokens.consume(); // ' ' + tokens.consume(); // 1 + tokens.consume(); // ; + assertEquals("", tokens.LT(1).getText()); + // we marked at the +, so that should be the start of the buffer + assertEquals("[[@6,8:8='+',<5>,1:8], [@7,9:9=' ',<7>,1:9]," + + " [@8,10:10='1',<2>,1:10], [@9,11:11=';',<3>,1:11]," + + " [@10,12:11='',<-1>,1:12]]", + tokens.getBuffer().toString()); + tokens.release(m); + } + + protected static class TestingUnbufferedTokenStream extends UnbufferedTokenStream { + + public TestingUnbufferedTokenStream(TokenSource tokenSource) { + super(tokenSource); + } + + /** For testing. What's in moving window into token stream from + * current index, LT(1) or tokens[p], to end of buffer? + */ + protected List getRemainingBuffer() { + if ( n==0 ) { + return Collections.emptyList(); + } + + return Arrays.asList(tokens).subList(p, n); + } + + /** For testing. What's in moving window buffer into data stream. + * From 0..p-1 have been consume. + */ + protected List getBuffer() { + if ( n==0 ) { + return Collections.emptyList(); + } + + return Arrays.asList(tokens).subList(0, n); + } + + } +} diff --git a/tool/test/org/antlr/v4/test/tool/TestVocabulary.java b/tool/test/org/antlr/v4/test/tool/TestVocabulary.java new file mode 100644 index 000000000..3d173baf8 --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestVocabulary.java @@ -0,0 +1,79 @@ +/* + * [The "BSD license"] + * Copyright (c) 2014 Terence Parr + * Copyright (c) 2014 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.junit.Assert; +import org.junit.Test; + +/** + * + * @author Sam Harwell + */ +public class TestVocabulary extends BaseTest { + + @Test + public void testEmptyVocabulary() { + Assert.assertNotNull(VocabularyImpl.EMPTY_VOCABULARY); + Assert.assertEquals("EOF", VocabularyImpl.EMPTY_VOCABULARY.getSymbolicName(Token.EOF)); + Assert.assertEquals("0", VocabularyImpl.EMPTY_VOCABULARY.getDisplayName(Token.INVALID_TYPE)); + } + + @Test + public void testVocabularyFromTokenNames() { + String[] tokenNames = { + "", + "TOKEN_REF", "RULE_REF", "'//'", "'/'", "'*'", "'!'", "ID", "STRING" + }; + + Vocabulary vocabulary = VocabularyImpl.fromTokenNames(tokenNames); + Assert.assertNotNull(vocabulary); + Assert.assertEquals("EOF", vocabulary.getSymbolicName(Token.EOF)); + for (int i = 0; i < tokenNames.length; i++) { + Assert.assertEquals(tokenNames[i], vocabulary.getDisplayName(i)); + + if (tokenNames[i].startsWith("'")) { + Assert.assertEquals(tokenNames[i], vocabulary.getLiteralName(i)); + Assert.assertNull(vocabulary.getSymbolicName(i)); + } + else if (Character.isUpperCase(tokenNames[i].charAt(0))) { + Assert.assertNull(vocabulary.getLiteralName(i)); + Assert.assertEquals(tokenNames[i], vocabulary.getSymbolicName(i)); + } + else { + Assert.assertNull(vocabulary.getLiteralName(i)); + Assert.assertNull(vocabulary.getSymbolicName(i)); + } + } + } + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestXPath.java b/tool/test/org/antlr/v4/test/tool/TestXPath.java new file mode 100644 index 000000000..89725bafb --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestXPath.java @@ -0,0 +1,228 @@ +package org.antlr.v4.test.tool; + +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.antlr.v4.runtime.tree.xpath.XPath; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class TestXPath extends BaseTest { + public static final String grammar = + "grammar Expr;\n" + + "prog: func+ ;\n" + + "func: 'def' ID '(' arg (',' arg)* ')' body ;\n" + + "body: '{' stat+ '}' ;\n" + + "arg : ID ;\n" + + "stat: expr ';' # printExpr\n" + + " | ID '=' expr ';' # assign\n" + + " | 'return' expr ';' # ret\n" + + " | ';' # blank\n" + + " ;\n" + + "expr: expr ('*'|'/') expr # MulDiv\n" + + " | expr ('+'|'-') expr # AddSub\n" + + " | primary # prim\n" + + " ;\n" + + "primary" + + " : INT # int\n" + + " | ID # id\n" + + " | '(' expr ')' # parens\n" + + " ;" + + "\n" + + "MUL : '*' ; // assigns token name to '*' used above in grammar\n" + + "DIV : '/' ;\n" + + "ADD : '+' ;\n" + + "SUB : '-' ;\n" + + "RETURN : 'return' ;\n" + + "ID : [a-zA-Z]+ ; // match identifiers\n" + + "INT : [0-9]+ ; // match integers\n" + + "NEWLINE:'\\r'? '\\n' -> skip; // return newlines to parser (is end-statement signal)\n" + + "WS : [ \\t]+ -> skip ; // toss out whitespace\n"; + public static final String SAMPLE_PROGRAM = + "def f(x,y) { x = 3+4; y; ; }\n" + + "def g(x) { return 1+2*x; }\n"; + + @Test public void testValidPaths() throws Exception { + boolean ok = + rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", + "ExprLexer", false); + assertTrue(ok); + + String xpath[] = { + "/prog/func", // all funcs under prog at root + "/prog/*", // all children of prog at root + "/*/func", // all func kids of any root node + "prog", // prog must be root node + "/prog", // prog must be root node + "/*", // any root + "*", // any root + "//ID", // any ID in tree + "//expr/primary/ID",// any ID child of a primary under any expr + "//body//ID", // any ID under a body + "//'return'", // any 'return' literal in tree, matched by literal name + "//RETURN", // any 'return' literal in tree, matched by symbolic name + "//primary/*", // all kids of any primary + "//func/*/stat", // all stat nodes grandkids of any func node + "/prog/func/'def'", // all def literal kids of func kid of prog + "//stat/';'", // all ';' under any stat node + "//expr/primary/!ID", // anything but ID under primary under any expr node + "//expr/!primary", // anything but primary under any expr node + "//!*", // nothing anywhere + "/!*", // nothing at root + "//expr//ID", // any ID under any expression (tests antlr/antlr4#370) + }; + String expected[] = { + "[func, func]", + "[func, func]", + "[func, func]", + "[prog]", + "[prog]", + "[prog]", + "[prog]", + "[f, x, y, x, y, g, x, x]", + "[y, x]", + "[x, y, x]", + "[return]", + "[return]", + "[3, 4, y, 1, 2, x]", + "[stat, stat, stat, stat]", + "[def, def]", + "[;, ;, ;, ;]", + "[3, 4, 1, 2]", + "[expr, expr, expr, expr, expr, expr]", + "[]", + "[]", + "[y, x]", + }; + + for (int i=0; i nodes = getNodeStrings(SAMPLE_PROGRAM, xpath[i], "prog", "ExprParser", "ExprLexer"); + String result = nodes.toString(); + assertEquals("path "+xpath[i]+" failed", expected[i], result); + } + } + + @Test public void testWeirdChar() throws Exception { + boolean ok = + rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", + "ExprLexer", false); + assertTrue(ok); + + String path = "&"; + String expected = "Invalid tokens or characters at index 0 in path '&'"; + + testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + } + + @Test public void testWeirdChar2() throws Exception { + boolean ok = + rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", + "ExprLexer", false); + assertTrue(ok); + + String path = "//w&e/"; + String expected = "Invalid tokens or characters at index 3 in path '//w&e/'"; + + testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + } + + @Test public void testBadSyntax() throws Exception { + boolean ok = + rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", + "ExprLexer", false); + assertTrue(ok); + + String path = "///"; + String expected = "/ at index 2 isn't a valid rule name"; + + testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + } + + @Test public void testMissingWordAtEnd() throws Exception { + boolean ok = + rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", + "ExprLexer", false); + assertTrue(ok); + + String path = "//"; + String expected = "Missing path element at end of path"; + + testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + } + + @Test public void testBadTokenName() throws Exception { + boolean ok = + rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", + "ExprLexer", false); + assertTrue(ok); + + String path = "//Ick"; + String expected = "Ick at index 2 isn't a valid token name"; + + testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + } + + @Test public void testBadRuleName() throws Exception { + boolean ok = + rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", + "ExprLexer", false); + assertTrue(ok); + + String path = "/prog/ick"; + String expected = "ick at index 6 isn't a valid rule name"; + + testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + } + + protected void testError(String input, String path, String expected, + String startRuleName, + String parserName, String lexerName) + throws Exception + { + Pair pl = getParserAndLexer(input, parserName, lexerName); + Parser parser = pl.a; + ParseTree tree = execStartRule(startRuleName, parser); + + IllegalArgumentException e = null; + try { + XPath.findAll(tree, path, parser); + } + catch (IllegalArgumentException iae) { + e = iae; + } + assertNotNull(e); + assertEquals(expected, e.getMessage()); + } + + public List getNodeStrings(String input, String xpath, + String startRuleName, + String parserName, String lexerName) + throws Exception + { + Pair pl = getParserAndLexer(input, parserName, lexerName); + Parser parser = pl.a; + ParseTree tree = execStartRule(startRuleName, parser); + + List nodes = new ArrayList(); + for (ParseTree t : XPath.findAll(tree, xpath, parser) ) { + if ( t instanceof RuleContext) { + RuleContext r = (RuleContext)t; + nodes.add(parser.getRuleNames()[r.getRuleIndex()]); + } + else { + TerminalNode token = (TerminalNode)t; + nodes.add(token.getText()); + } + } + return nodes; + } +} diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/ActionPlacement.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/ActionPlacement.st deleted file mode 100644 index ad130a0fa..000000000 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/ActionPlacement.st +++ /dev/null @@ -1,8 +0,0 @@ -lexer grammar ; -I : ({} 'a' -| {} - 'a' {} - 'b' {}) - {} ; -WS : (' '|'\n') -> skip ; -J : .; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/LargeLexer.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/LargeLexer.st deleted file mode 100644 index 5a040ea9f..000000000 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/LargeLexer.st +++ /dev/null @@ -1,4002 +0,0 @@ -lexer grammar ; -WS : [ \t\r\n]+ -> skip; -KW0 : 'KW0'; -KW1 : 'KW1'; -KW2 : 'KW2'; -KW3 : 'KW3'; -KW4 : 'KW4'; -KW5 : 'KW5'; -KW6 : 'KW6'; -KW7 : 'KW7'; -KW8 : 'KW8'; -KW9 : 'KW9'; -KW10 : 'KW10'; -KW11 : 'KW11'; -KW12 : 'KW12'; -KW13 : 'KW13'; -KW14 : 'KW14'; -KW15 : 'KW15'; -KW16 : 'KW16'; -KW17 : 'KW17'; -KW18 : 'KW18'; -KW19 : 'KW19'; -KW20 : 'KW20'; -KW21 : 'KW21'; -KW22 : 'KW22'; -KW23 : 'KW23'; -KW24 : 'KW24'; -KW25 : 'KW25'; -KW26 : 'KW26'; -KW27 : 'KW27'; -KW28 : 'KW28'; -KW29 : 'KW29'; -KW30 : 'KW30'; -KW31 : 'KW31'; -KW32 : 'KW32'; -KW33 : 'KW33'; -KW34 : 'KW34'; -KW35 : 'KW35'; -KW36 : 'KW36'; -KW37 : 'KW37'; -KW38 : 'KW38'; -KW39 : 'KW39'; -KW40 : 'KW40'; -KW41 : 'KW41'; -KW42 : 'KW42'; -KW43 : 'KW43'; -KW44 : 'KW44'; -KW45 : 'KW45'; -KW46 : 'KW46'; -KW47 : 'KW47'; -KW48 : 'KW48'; -KW49 : 'KW49'; -KW50 : 'KW50'; -KW51 : 'KW51'; -KW52 : 'KW52'; -KW53 : 'KW53'; -KW54 : 'KW54'; -KW55 : 'KW55'; -KW56 : 'KW56'; -KW57 : 'KW57'; -KW58 : 'KW58'; -KW59 : 'KW59'; -KW60 : 'KW60'; -KW61 : 'KW61'; -KW62 : 'KW62'; -KW63 : 'KW63'; -KW64 : 'KW64'; -KW65 : 'KW65'; -KW66 : 'KW66'; -KW67 : 'KW67'; -KW68 : 'KW68'; -KW69 : 'KW69'; -KW70 : 'KW70'; -KW71 : 'KW71'; -KW72 : 'KW72'; -KW73 : 'KW73'; -KW74 : 'KW74'; -KW75 : 'KW75'; -KW76 : 'KW76'; -KW77 : 'KW77'; -KW78 : 'KW78'; -KW79 : 'KW79'; -KW80 : 'KW80'; -KW81 : 'KW81'; -KW82 : 'KW82'; -KW83 : 'KW83'; -KW84 : 'KW84'; -KW85 : 'KW85'; -KW86 : 'KW86'; -KW87 : 'KW87'; -KW88 : 'KW88'; -KW89 : 'KW89'; -KW90 : 'KW90'; -KW91 : 'KW91'; -KW92 : 'KW92'; -KW93 : 'KW93'; -KW94 : 'KW94'; -KW95 : 'KW95'; -KW96 : 'KW96'; -KW97 : 'KW97'; -KW98 : 'KW98'; -KW99 : 'KW99'; -KW100 : 'KW100'; -KW101 : 'KW101'; -KW102 : 'KW102'; -KW103 : 'KW103'; -KW104 : 'KW104'; -KW105 : 'KW105'; -KW106 : 'KW106'; -KW107 : 'KW107'; -KW108 : 'KW108'; -KW109 : 'KW109'; -KW110 : 'KW110'; -KW111 : 'KW111'; -KW112 : 'KW112'; -KW113 : 'KW113'; -KW114 : 'KW114'; -KW115 : 'KW115'; -KW116 : 'KW116'; -KW117 : 'KW117'; -KW118 : 'KW118'; -KW119 : 'KW119'; -KW120 : 'KW120'; -KW121 : 'KW121'; -KW122 : 'KW122'; -KW123 : 'KW123'; -KW124 : 'KW124'; -KW125 : 'KW125'; -KW126 : 'KW126'; -KW127 : 'KW127'; -KW128 : 'KW128'; -KW129 : 'KW129'; -KW130 : 'KW130'; -KW131 : 'KW131'; -KW132 : 'KW132'; -KW133 : 'KW133'; -KW134 : 'KW134'; -KW135 : 'KW135'; -KW136 : 'KW136'; -KW137 : 'KW137'; -KW138 : 'KW138'; -KW139 : 'KW139'; -KW140 : 'KW140'; -KW141 : 'KW141'; -KW142 : 'KW142'; -KW143 : 'KW143'; -KW144 : 'KW144'; -KW145 : 'KW145'; -KW146 : 'KW146'; -KW147 : 'KW147'; -KW148 : 'KW148'; -KW149 : 'KW149'; -KW150 : 'KW150'; -KW151 : 'KW151'; -KW152 : 'KW152'; -KW153 : 'KW153'; -KW154 : 'KW154'; -KW155 : 'KW155'; -KW156 : 'KW156'; -KW157 : 'KW157'; -KW158 : 'KW158'; -KW159 : 'KW159'; -KW160 : 'KW160'; -KW161 : 'KW161'; -KW162 : 'KW162'; -KW163 : 'KW163'; -KW164 : 'KW164'; -KW165 : 'KW165'; -KW166 : 'KW166'; -KW167 : 'KW167'; -KW168 : 'KW168'; -KW169 : 'KW169'; -KW170 : 'KW170'; -KW171 : 'KW171'; -KW172 : 'KW172'; -KW173 : 'KW173'; -KW174 : 'KW174'; -KW175 : 'KW175'; -KW176 : 'KW176'; -KW177 : 'KW177'; -KW178 : 'KW178'; -KW179 : 'KW179'; -KW180 : 'KW180'; -KW181 : 'KW181'; -KW182 : 'KW182'; -KW183 : 'KW183'; -KW184 : 'KW184'; -KW185 : 'KW185'; -KW186 : 'KW186'; -KW187 : 'KW187'; -KW188 : 'KW188'; -KW189 : 'KW189'; -KW190 : 'KW190'; -KW191 : 'KW191'; -KW192 : 'KW192'; -KW193 : 'KW193'; -KW194 : 'KW194'; -KW195 : 'KW195'; -KW196 : 'KW196'; -KW197 : 'KW197'; -KW198 : 'KW198'; -KW199 : 'KW199'; -KW200 : 'KW200'; -KW201 : 'KW201'; -KW202 : 'KW202'; -KW203 : 'KW203'; -KW204 : 'KW204'; -KW205 : 'KW205'; -KW206 : 'KW206'; -KW207 : 'KW207'; -KW208 : 'KW208'; -KW209 : 'KW209'; -KW210 : 'KW210'; -KW211 : 'KW211'; -KW212 : 'KW212'; -KW213 : 'KW213'; -KW214 : 'KW214'; -KW215 : 'KW215'; -KW216 : 'KW216'; -KW217 : 'KW217'; -KW218 : 'KW218'; -KW219 : 'KW219'; -KW220 : 'KW220'; -KW221 : 'KW221'; -KW222 : 'KW222'; -KW223 : 'KW223'; -KW224 : 'KW224'; -KW225 : 'KW225'; -KW226 : 'KW226'; -KW227 : 'KW227'; -KW228 : 'KW228'; -KW229 : 'KW229'; -KW230 : 'KW230'; -KW231 : 'KW231'; -KW232 : 'KW232'; -KW233 : 'KW233'; -KW234 : 'KW234'; -KW235 : 'KW235'; -KW236 : 'KW236'; -KW237 : 'KW237'; -KW238 : 'KW238'; -KW239 : 'KW239'; -KW240 : 'KW240'; -KW241 : 'KW241'; -KW242 : 'KW242'; -KW243 : 'KW243'; -KW244 : 'KW244'; -KW245 : 'KW245'; -KW246 : 'KW246'; -KW247 : 'KW247'; -KW248 : 'KW248'; -KW249 : 'KW249'; -KW250 : 'KW250'; -KW251 : 'KW251'; -KW252 : 'KW252'; -KW253 : 'KW253'; -KW254 : 'KW254'; -KW255 : 'KW255'; -KW256 : 'KW256'; -KW257 : 'KW257'; -KW258 : 'KW258'; -KW259 : 'KW259'; -KW260 : 'KW260'; -KW261 : 'KW261'; -KW262 : 'KW262'; -KW263 : 'KW263'; -KW264 : 'KW264'; -KW265 : 'KW265'; -KW266 : 'KW266'; -KW267 : 'KW267'; -KW268 : 'KW268'; -KW269 : 'KW269'; -KW270 : 'KW270'; -KW271 : 'KW271'; -KW272 : 'KW272'; -KW273 : 'KW273'; -KW274 : 'KW274'; -KW275 : 'KW275'; -KW276 : 'KW276'; -KW277 : 'KW277'; -KW278 : 'KW278'; -KW279 : 'KW279'; -KW280 : 'KW280'; -KW281 : 'KW281'; -KW282 : 'KW282'; -KW283 : 'KW283'; -KW284 : 'KW284'; -KW285 : 'KW285'; -KW286 : 'KW286'; -KW287 : 'KW287'; -KW288 : 'KW288'; -KW289 : 'KW289'; -KW290 : 'KW290'; -KW291 : 'KW291'; -KW292 : 'KW292'; -KW293 : 'KW293'; -KW294 : 'KW294'; -KW295 : 'KW295'; -KW296 : 'KW296'; -KW297 : 'KW297'; -KW298 : 'KW298'; -KW299 : 'KW299'; -KW300 : 'KW300'; -KW301 : 'KW301'; -KW302 : 'KW302'; -KW303 : 'KW303'; -KW304 : 'KW304'; -KW305 : 'KW305'; -KW306 : 'KW306'; -KW307 : 'KW307'; -KW308 : 'KW308'; -KW309 : 'KW309'; -KW310 : 'KW310'; -KW311 : 'KW311'; -KW312 : 'KW312'; -KW313 : 'KW313'; -KW314 : 'KW314'; -KW315 : 'KW315'; -KW316 : 'KW316'; -KW317 : 'KW317'; -KW318 : 'KW318'; -KW319 : 'KW319'; -KW320 : 'KW320'; -KW321 : 'KW321'; -KW322 : 'KW322'; -KW323 : 'KW323'; -KW324 : 'KW324'; -KW325 : 'KW325'; -KW326 : 'KW326'; -KW327 : 'KW327'; -KW328 : 'KW328'; -KW329 : 'KW329'; -KW330 : 'KW330'; -KW331 : 'KW331'; -KW332 : 'KW332'; -KW333 : 'KW333'; -KW334 : 'KW334'; -KW335 : 'KW335'; -KW336 : 'KW336'; -KW337 : 'KW337'; -KW338 : 'KW338'; -KW339 : 'KW339'; -KW340 : 'KW340'; -KW341 : 'KW341'; -KW342 : 'KW342'; -KW343 : 'KW343'; -KW344 : 'KW344'; -KW345 : 'KW345'; -KW346 : 'KW346'; -KW347 : 'KW347'; -KW348 : 'KW348'; -KW349 : 'KW349'; -KW350 : 'KW350'; -KW351 : 'KW351'; -KW352 : 'KW352'; -KW353 : 'KW353'; -KW354 : 'KW354'; -KW355 : 'KW355'; -KW356 : 'KW356'; -KW357 : 'KW357'; -KW358 : 'KW358'; -KW359 : 'KW359'; -KW360 : 'KW360'; -KW361 : 'KW361'; -KW362 : 'KW362'; -KW363 : 'KW363'; -KW364 : 'KW364'; -KW365 : 'KW365'; -KW366 : 'KW366'; -KW367 : 'KW367'; -KW368 : 'KW368'; -KW369 : 'KW369'; -KW370 : 'KW370'; -KW371 : 'KW371'; -KW372 : 'KW372'; -KW373 : 'KW373'; -KW374 : 'KW374'; -KW375 : 'KW375'; -KW376 : 'KW376'; -KW377 : 'KW377'; -KW378 : 'KW378'; -KW379 : 'KW379'; -KW380 : 'KW380'; -KW381 : 'KW381'; -KW382 : 'KW382'; -KW383 : 'KW383'; -KW384 : 'KW384'; -KW385 : 'KW385'; -KW386 : 'KW386'; -KW387 : 'KW387'; -KW388 : 'KW388'; -KW389 : 'KW389'; -KW390 : 'KW390'; -KW391 : 'KW391'; -KW392 : 'KW392'; -KW393 : 'KW393'; -KW394 : 'KW394'; -KW395 : 'KW395'; -KW396 : 'KW396'; -KW397 : 'KW397'; -KW398 : 'KW398'; -KW399 : 'KW399'; -KW400 : 'KW400'; -KW401 : 'KW401'; -KW402 : 'KW402'; -KW403 : 'KW403'; -KW404 : 'KW404'; -KW405 : 'KW405'; -KW406 : 'KW406'; -KW407 : 'KW407'; -KW408 : 'KW408'; -KW409 : 'KW409'; -KW410 : 'KW410'; -KW411 : 'KW411'; -KW412 : 'KW412'; -KW413 : 'KW413'; -KW414 : 'KW414'; -KW415 : 'KW415'; -KW416 : 'KW416'; -KW417 : 'KW417'; -KW418 : 'KW418'; -KW419 : 'KW419'; -KW420 : 'KW420'; -KW421 : 'KW421'; -KW422 : 'KW422'; -KW423 : 'KW423'; -KW424 : 'KW424'; -KW425 : 'KW425'; -KW426 : 'KW426'; -KW427 : 'KW427'; -KW428 : 'KW428'; -KW429 : 'KW429'; -KW430 : 'KW430'; -KW431 : 'KW431'; -KW432 : 'KW432'; -KW433 : 'KW433'; -KW434 : 'KW434'; -KW435 : 'KW435'; -KW436 : 'KW436'; -KW437 : 'KW437'; -KW438 : 'KW438'; -KW439 : 'KW439'; -KW440 : 'KW440'; -KW441 : 'KW441'; -KW442 : 'KW442'; -KW443 : 'KW443'; -KW444 : 'KW444'; -KW445 : 'KW445'; -KW446 : 'KW446'; -KW447 : 'KW447'; -KW448 : 'KW448'; -KW449 : 'KW449'; -KW450 : 'KW450'; -KW451 : 'KW451'; -KW452 : 'KW452'; -KW453 : 'KW453'; -KW454 : 'KW454'; -KW455 : 'KW455'; -KW456 : 'KW456'; -KW457 : 'KW457'; -KW458 : 'KW458'; -KW459 : 'KW459'; -KW460 : 'KW460'; -KW461 : 'KW461'; -KW462 : 'KW462'; -KW463 : 'KW463'; -KW464 : 'KW464'; -KW465 : 'KW465'; -KW466 : 'KW466'; -KW467 : 'KW467'; -KW468 : 'KW468'; -KW469 : 'KW469'; -KW470 : 'KW470'; -KW471 : 'KW471'; -KW472 : 'KW472'; -KW473 : 'KW473'; -KW474 : 'KW474'; -KW475 : 'KW475'; -KW476 : 'KW476'; -KW477 : 'KW477'; -KW478 : 'KW478'; -KW479 : 'KW479'; -KW480 : 'KW480'; -KW481 : 'KW481'; -KW482 : 'KW482'; -KW483 : 'KW483'; -KW484 : 'KW484'; -KW485 : 'KW485'; -KW486 : 'KW486'; -KW487 : 'KW487'; -KW488 : 'KW488'; -KW489 : 'KW489'; -KW490 : 'KW490'; -KW491 : 'KW491'; -KW492 : 'KW492'; -KW493 : 'KW493'; -KW494 : 'KW494'; -KW495 : 'KW495'; -KW496 : 'KW496'; -KW497 : 'KW497'; -KW498 : 'KW498'; -KW499 : 'KW499'; -KW500 : 'KW500'; -KW501 : 'KW501'; -KW502 : 'KW502'; -KW503 : 'KW503'; -KW504 : 'KW504'; -KW505 : 'KW505'; -KW506 : 'KW506'; -KW507 : 'KW507'; -KW508 : 'KW508'; -KW509 : 'KW509'; -KW510 : 'KW510'; -KW511 : 'KW511'; -KW512 : 'KW512'; -KW513 : 'KW513'; -KW514 : 'KW514'; -KW515 : 'KW515'; -KW516 : 'KW516'; -KW517 : 'KW517'; -KW518 : 'KW518'; -KW519 : 'KW519'; -KW520 : 'KW520'; -KW521 : 'KW521'; -KW522 : 'KW522'; -KW523 : 'KW523'; -KW524 : 'KW524'; -KW525 : 'KW525'; -KW526 : 'KW526'; -KW527 : 'KW527'; -KW528 : 'KW528'; -KW529 : 'KW529'; -KW530 : 'KW530'; -KW531 : 'KW531'; -KW532 : 'KW532'; -KW533 : 'KW533'; -KW534 : 'KW534'; -KW535 : 'KW535'; -KW536 : 'KW536'; -KW537 : 'KW537'; -KW538 : 'KW538'; -KW539 : 'KW539'; -KW540 : 'KW540'; -KW541 : 'KW541'; -KW542 : 'KW542'; -KW543 : 'KW543'; -KW544 : 'KW544'; -KW545 : 'KW545'; -KW546 : 'KW546'; -KW547 : 'KW547'; -KW548 : 'KW548'; -KW549 : 'KW549'; -KW550 : 'KW550'; -KW551 : 'KW551'; -KW552 : 'KW552'; -KW553 : 'KW553'; -KW554 : 'KW554'; -KW555 : 'KW555'; -KW556 : 'KW556'; -KW557 : 'KW557'; -KW558 : 'KW558'; -KW559 : 'KW559'; -KW560 : 'KW560'; -KW561 : 'KW561'; -KW562 : 'KW562'; -KW563 : 'KW563'; -KW564 : 'KW564'; -KW565 : 'KW565'; -KW566 : 'KW566'; -KW567 : 'KW567'; -KW568 : 'KW568'; -KW569 : 'KW569'; -KW570 : 'KW570'; -KW571 : 'KW571'; -KW572 : 'KW572'; -KW573 : 'KW573'; -KW574 : 'KW574'; -KW575 : 'KW575'; -KW576 : 'KW576'; -KW577 : 'KW577'; -KW578 : 'KW578'; -KW579 : 'KW579'; -KW580 : 'KW580'; -KW581 : 'KW581'; -KW582 : 'KW582'; -KW583 : 'KW583'; -KW584 : 'KW584'; -KW585 : 'KW585'; -KW586 : 'KW586'; -KW587 : 'KW587'; -KW588 : 'KW588'; -KW589 : 'KW589'; -KW590 : 'KW590'; -KW591 : 'KW591'; -KW592 : 'KW592'; -KW593 : 'KW593'; -KW594 : 'KW594'; -KW595 : 'KW595'; -KW596 : 'KW596'; -KW597 : 'KW597'; -KW598 : 'KW598'; -KW599 : 'KW599'; -KW600 : 'KW600'; -KW601 : 'KW601'; -KW602 : 'KW602'; -KW603 : 'KW603'; -KW604 : 'KW604'; -KW605 : 'KW605'; -KW606 : 'KW606'; -KW607 : 'KW607'; -KW608 : 'KW608'; -KW609 : 'KW609'; -KW610 : 'KW610'; -KW611 : 'KW611'; -KW612 : 'KW612'; -KW613 : 'KW613'; -KW614 : 'KW614'; -KW615 : 'KW615'; -KW616 : 'KW616'; -KW617 : 'KW617'; -KW618 : 'KW618'; -KW619 : 'KW619'; -KW620 : 'KW620'; -KW621 : 'KW621'; -KW622 : 'KW622'; -KW623 : 'KW623'; -KW624 : 'KW624'; -KW625 : 'KW625'; -KW626 : 'KW626'; -KW627 : 'KW627'; -KW628 : 'KW628'; -KW629 : 'KW629'; -KW630 : 'KW630'; -KW631 : 'KW631'; -KW632 : 'KW632'; -KW633 : 'KW633'; -KW634 : 'KW634'; -KW635 : 'KW635'; -KW636 : 'KW636'; -KW637 : 'KW637'; -KW638 : 'KW638'; -KW639 : 'KW639'; -KW640 : 'KW640'; -KW641 : 'KW641'; -KW642 : 'KW642'; -KW643 : 'KW643'; -KW644 : 'KW644'; -KW645 : 'KW645'; -KW646 : 'KW646'; -KW647 : 'KW647'; -KW648 : 'KW648'; -KW649 : 'KW649'; -KW650 : 'KW650'; -KW651 : 'KW651'; -KW652 : 'KW652'; -KW653 : 'KW653'; -KW654 : 'KW654'; -KW655 : 'KW655'; -KW656 : 'KW656'; -KW657 : 'KW657'; -KW658 : 'KW658'; -KW659 : 'KW659'; -KW660 : 'KW660'; -KW661 : 'KW661'; -KW662 : 'KW662'; -KW663 : 'KW663'; -KW664 : 'KW664'; -KW665 : 'KW665'; -KW666 : 'KW666'; -KW667 : 'KW667'; -KW668 : 'KW668'; -KW669 : 'KW669'; -KW670 : 'KW670'; -KW671 : 'KW671'; -KW672 : 'KW672'; -KW673 : 'KW673'; -KW674 : 'KW674'; -KW675 : 'KW675'; -KW676 : 'KW676'; -KW677 : 'KW677'; -KW678 : 'KW678'; -KW679 : 'KW679'; -KW680 : 'KW680'; -KW681 : 'KW681'; -KW682 : 'KW682'; -KW683 : 'KW683'; -KW684 : 'KW684'; -KW685 : 'KW685'; -KW686 : 'KW686'; -KW687 : 'KW687'; -KW688 : 'KW688'; -KW689 : 'KW689'; -KW690 : 'KW690'; -KW691 : 'KW691'; -KW692 : 'KW692'; -KW693 : 'KW693'; -KW694 : 'KW694'; -KW695 : 'KW695'; -KW696 : 'KW696'; -KW697 : 'KW697'; -KW698 : 'KW698'; -KW699 : 'KW699'; -KW700 : 'KW700'; -KW701 : 'KW701'; -KW702 : 'KW702'; -KW703 : 'KW703'; -KW704 : 'KW704'; -KW705 : 'KW705'; -KW706 : 'KW706'; -KW707 : 'KW707'; -KW708 : 'KW708'; -KW709 : 'KW709'; -KW710 : 'KW710'; -KW711 : 'KW711'; -KW712 : 'KW712'; -KW713 : 'KW713'; -KW714 : 'KW714'; -KW715 : 'KW715'; -KW716 : 'KW716'; -KW717 : 'KW717'; -KW718 : 'KW718'; -KW719 : 'KW719'; -KW720 : 'KW720'; -KW721 : 'KW721'; -KW722 : 'KW722'; -KW723 : 'KW723'; -KW724 : 'KW724'; -KW725 : 'KW725'; -KW726 : 'KW726'; -KW727 : 'KW727'; -KW728 : 'KW728'; -KW729 : 'KW729'; -KW730 : 'KW730'; -KW731 : 'KW731'; -KW732 : 'KW732'; -KW733 : 'KW733'; -KW734 : 'KW734'; -KW735 : 'KW735'; -KW736 : 'KW736'; -KW737 : 'KW737'; -KW738 : 'KW738'; -KW739 : 'KW739'; -KW740 : 'KW740'; -KW741 : 'KW741'; -KW742 : 'KW742'; -KW743 : 'KW743'; -KW744 : 'KW744'; -KW745 : 'KW745'; -KW746 : 'KW746'; -KW747 : 'KW747'; -KW748 : 'KW748'; -KW749 : 'KW749'; -KW750 : 'KW750'; -KW751 : 'KW751'; -KW752 : 'KW752'; -KW753 : 'KW753'; -KW754 : 'KW754'; -KW755 : 'KW755'; -KW756 : 'KW756'; -KW757 : 'KW757'; -KW758 : 'KW758'; -KW759 : 'KW759'; -KW760 : 'KW760'; -KW761 : 'KW761'; -KW762 : 'KW762'; -KW763 : 'KW763'; -KW764 : 'KW764'; -KW765 : 'KW765'; -KW766 : 'KW766'; -KW767 : 'KW767'; -KW768 : 'KW768'; -KW769 : 'KW769'; -KW770 : 'KW770'; -KW771 : 'KW771'; -KW772 : 'KW772'; -KW773 : 'KW773'; -KW774 : 'KW774'; -KW775 : 'KW775'; -KW776 : 'KW776'; -KW777 : 'KW777'; -KW778 : 'KW778'; -KW779 : 'KW779'; -KW780 : 'KW780'; -KW781 : 'KW781'; -KW782 : 'KW782'; -KW783 : 'KW783'; -KW784 : 'KW784'; -KW785 : 'KW785'; -KW786 : 'KW786'; -KW787 : 'KW787'; -KW788 : 'KW788'; -KW789 : 'KW789'; -KW790 : 'KW790'; -KW791 : 'KW791'; -KW792 : 'KW792'; -KW793 : 'KW793'; -KW794 : 'KW794'; -KW795 : 'KW795'; -KW796 : 'KW796'; -KW797 : 'KW797'; -KW798 : 'KW798'; -KW799 : 'KW799'; -KW800 : 'KW800'; -KW801 : 'KW801'; -KW802 : 'KW802'; -KW803 : 'KW803'; -KW804 : 'KW804'; -KW805 : 'KW805'; -KW806 : 'KW806'; -KW807 : 'KW807'; -KW808 : 'KW808'; -KW809 : 'KW809'; -KW810 : 'KW810'; -KW811 : 'KW811'; -KW812 : 'KW812'; -KW813 : 'KW813'; -KW814 : 'KW814'; -KW815 : 'KW815'; -KW816 : 'KW816'; -KW817 : 'KW817'; -KW818 : 'KW818'; -KW819 : 'KW819'; -KW820 : 'KW820'; -KW821 : 'KW821'; -KW822 : 'KW822'; -KW823 : 'KW823'; -KW824 : 'KW824'; -KW825 : 'KW825'; -KW826 : 'KW826'; -KW827 : 'KW827'; -KW828 : 'KW828'; -KW829 : 'KW829'; -KW830 : 'KW830'; -KW831 : 'KW831'; -KW832 : 'KW832'; -KW833 : 'KW833'; -KW834 : 'KW834'; -KW835 : 'KW835'; -KW836 : 'KW836'; -KW837 : 'KW837'; -KW838 : 'KW838'; -KW839 : 'KW839'; -KW840 : 'KW840'; -KW841 : 'KW841'; -KW842 : 'KW842'; -KW843 : 'KW843'; -KW844 : 'KW844'; -KW845 : 'KW845'; -KW846 : 'KW846'; -KW847 : 'KW847'; -KW848 : 'KW848'; -KW849 : 'KW849'; -KW850 : 'KW850'; -KW851 : 'KW851'; -KW852 : 'KW852'; -KW853 : 'KW853'; -KW854 : 'KW854'; -KW855 : 'KW855'; -KW856 : 'KW856'; -KW857 : 'KW857'; -KW858 : 'KW858'; -KW859 : 'KW859'; -KW860 : 'KW860'; -KW861 : 'KW861'; -KW862 : 'KW862'; -KW863 : 'KW863'; -KW864 : 'KW864'; -KW865 : 'KW865'; -KW866 : 'KW866'; -KW867 : 'KW867'; -KW868 : 'KW868'; -KW869 : 'KW869'; -KW870 : 'KW870'; -KW871 : 'KW871'; -KW872 : 'KW872'; -KW873 : 'KW873'; -KW874 : 'KW874'; -KW875 : 'KW875'; -KW876 : 'KW876'; -KW877 : 'KW877'; -KW878 : 'KW878'; -KW879 : 'KW879'; -KW880 : 'KW880'; -KW881 : 'KW881'; -KW882 : 'KW882'; -KW883 : 'KW883'; -KW884 : 'KW884'; -KW885 : 'KW885'; -KW886 : 'KW886'; -KW887 : 'KW887'; -KW888 : 'KW888'; -KW889 : 'KW889'; -KW890 : 'KW890'; -KW891 : 'KW891'; -KW892 : 'KW892'; -KW893 : 'KW893'; -KW894 : 'KW894'; -KW895 : 'KW895'; -KW896 : 'KW896'; -KW897 : 'KW897'; -KW898 : 'KW898'; -KW899 : 'KW899'; -KW900 : 'KW900'; -KW901 : 'KW901'; -KW902 : 'KW902'; -KW903 : 'KW903'; -KW904 : 'KW904'; -KW905 : 'KW905'; -KW906 : 'KW906'; -KW907 : 'KW907'; -KW908 : 'KW908'; -KW909 : 'KW909'; -KW910 : 'KW910'; -KW911 : 'KW911'; -KW912 : 'KW912'; -KW913 : 'KW913'; -KW914 : 'KW914'; -KW915 : 'KW915'; -KW916 : 'KW916'; -KW917 : 'KW917'; -KW918 : 'KW918'; -KW919 : 'KW919'; -KW920 : 'KW920'; -KW921 : 'KW921'; -KW922 : 'KW922'; -KW923 : 'KW923'; -KW924 : 'KW924'; -KW925 : 'KW925'; -KW926 : 'KW926'; -KW927 : 'KW927'; -KW928 : 'KW928'; -KW929 : 'KW929'; -KW930 : 'KW930'; -KW931 : 'KW931'; -KW932 : 'KW932'; -KW933 : 'KW933'; -KW934 : 'KW934'; -KW935 : 'KW935'; -KW936 : 'KW936'; -KW937 : 'KW937'; -KW938 : 'KW938'; -KW939 : 'KW939'; -KW940 : 'KW940'; -KW941 : 'KW941'; -KW942 : 'KW942'; -KW943 : 'KW943'; -KW944 : 'KW944'; -KW945 : 'KW945'; -KW946 : 'KW946'; -KW947 : 'KW947'; -KW948 : 'KW948'; -KW949 : 'KW949'; -KW950 : 'KW950'; -KW951 : 'KW951'; -KW952 : 'KW952'; -KW953 : 'KW953'; -KW954 : 'KW954'; -KW955 : 'KW955'; -KW956 : 'KW956'; -KW957 : 'KW957'; -KW958 : 'KW958'; -KW959 : 'KW959'; -KW960 : 'KW960'; -KW961 : 'KW961'; -KW962 : 'KW962'; -KW963 : 'KW963'; -KW964 : 'KW964'; -KW965 : 'KW965'; -KW966 : 'KW966'; -KW967 : 'KW967'; -KW968 : 'KW968'; -KW969 : 'KW969'; -KW970 : 'KW970'; -KW971 : 'KW971'; -KW972 : 'KW972'; -KW973 : 'KW973'; -KW974 : 'KW974'; -KW975 : 'KW975'; -KW976 : 'KW976'; -KW977 : 'KW977'; -KW978 : 'KW978'; -KW979 : 'KW979'; -KW980 : 'KW980'; -KW981 : 'KW981'; -KW982 : 'KW982'; -KW983 : 'KW983'; -KW984 : 'KW984'; -KW985 : 'KW985'; -KW986 : 'KW986'; -KW987 : 'KW987'; -KW988 : 'KW988'; -KW989 : 'KW989'; -KW990 : 'KW990'; -KW991 : 'KW991'; -KW992 : 'KW992'; -KW993 : 'KW993'; -KW994 : 'KW994'; -KW995 : 'KW995'; -KW996 : 'KW996'; -KW997 : 'KW997'; -KW998 : 'KW998'; -KW999 : 'KW999'; -KW1000 : 'KW1000'; -KW1001 : 'KW1001'; -KW1002 : 'KW1002'; -KW1003 : 'KW1003'; -KW1004 : 'KW1004'; -KW1005 : 'KW1005'; -KW1006 : 'KW1006'; -KW1007 : 'KW1007'; -KW1008 : 'KW1008'; -KW1009 : 'KW1009'; -KW1010 : 'KW1010'; -KW1011 : 'KW1011'; -KW1012 : 'KW1012'; -KW1013 : 'KW1013'; -KW1014 : 'KW1014'; -KW1015 : 'KW1015'; -KW1016 : 'KW1016'; -KW1017 : 'KW1017'; -KW1018 : 'KW1018'; -KW1019 : 'KW1019'; -KW1020 : 'KW1020'; -KW1021 : 'KW1021'; -KW1022 : 'KW1022'; -KW1023 : 'KW1023'; -KW1024 : 'KW1024'; -KW1025 : 'KW1025'; -KW1026 : 'KW1026'; -KW1027 : 'KW1027'; -KW1028 : 'KW1028'; -KW1029 : 'KW1029'; -KW1030 : 'KW1030'; -KW1031 : 'KW1031'; -KW1032 : 'KW1032'; -KW1033 : 'KW1033'; -KW1034 : 'KW1034'; -KW1035 : 'KW1035'; -KW1036 : 'KW1036'; -KW1037 : 'KW1037'; -KW1038 : 'KW1038'; -KW1039 : 'KW1039'; -KW1040 : 'KW1040'; -KW1041 : 'KW1041'; -KW1042 : 'KW1042'; -KW1043 : 'KW1043'; -KW1044 : 'KW1044'; -KW1045 : 'KW1045'; -KW1046 : 'KW1046'; -KW1047 : 'KW1047'; -KW1048 : 'KW1048'; -KW1049 : 'KW1049'; -KW1050 : 'KW1050'; -KW1051 : 'KW1051'; -KW1052 : 'KW1052'; -KW1053 : 'KW1053'; -KW1054 : 'KW1054'; -KW1055 : 'KW1055'; -KW1056 : 'KW1056'; -KW1057 : 'KW1057'; -KW1058 : 'KW1058'; -KW1059 : 'KW1059'; -KW1060 : 'KW1060'; -KW1061 : 'KW1061'; -KW1062 : 'KW1062'; -KW1063 : 'KW1063'; -KW1064 : 'KW1064'; -KW1065 : 'KW1065'; -KW1066 : 'KW1066'; -KW1067 : 'KW1067'; -KW1068 : 'KW1068'; -KW1069 : 'KW1069'; -KW1070 : 'KW1070'; -KW1071 : 'KW1071'; -KW1072 : 'KW1072'; -KW1073 : 'KW1073'; -KW1074 : 'KW1074'; -KW1075 : 'KW1075'; -KW1076 : 'KW1076'; -KW1077 : 'KW1077'; -KW1078 : 'KW1078'; -KW1079 : 'KW1079'; -KW1080 : 'KW1080'; -KW1081 : 'KW1081'; -KW1082 : 'KW1082'; -KW1083 : 'KW1083'; -KW1084 : 'KW1084'; -KW1085 : 'KW1085'; -KW1086 : 'KW1086'; -KW1087 : 'KW1087'; -KW1088 : 'KW1088'; -KW1089 : 'KW1089'; -KW1090 : 'KW1090'; -KW1091 : 'KW1091'; -KW1092 : 'KW1092'; -KW1093 : 'KW1093'; -KW1094 : 'KW1094'; -KW1095 : 'KW1095'; -KW1096 : 'KW1096'; -KW1097 : 'KW1097'; -KW1098 : 'KW1098'; -KW1099 : 'KW1099'; -KW1100 : 'KW1100'; -KW1101 : 'KW1101'; -KW1102 : 'KW1102'; -KW1103 : 'KW1103'; -KW1104 : 'KW1104'; -KW1105 : 'KW1105'; -KW1106 : 'KW1106'; -KW1107 : 'KW1107'; -KW1108 : 'KW1108'; -KW1109 : 'KW1109'; -KW1110 : 'KW1110'; -KW1111 : 'KW1111'; -KW1112 : 'KW1112'; -KW1113 : 'KW1113'; -KW1114 : 'KW1114'; -KW1115 : 'KW1115'; -KW1116 : 'KW1116'; -KW1117 : 'KW1117'; -KW1118 : 'KW1118'; -KW1119 : 'KW1119'; -KW1120 : 'KW1120'; -KW1121 : 'KW1121'; -KW1122 : 'KW1122'; -KW1123 : 'KW1123'; -KW1124 : 'KW1124'; -KW1125 : 'KW1125'; -KW1126 : 'KW1126'; -KW1127 : 'KW1127'; -KW1128 : 'KW1128'; -KW1129 : 'KW1129'; -KW1130 : 'KW1130'; -KW1131 : 'KW1131'; -KW1132 : 'KW1132'; -KW1133 : 'KW1133'; -KW1134 : 'KW1134'; -KW1135 : 'KW1135'; -KW1136 : 'KW1136'; -KW1137 : 'KW1137'; -KW1138 : 'KW1138'; -KW1139 : 'KW1139'; -KW1140 : 'KW1140'; -KW1141 : 'KW1141'; -KW1142 : 'KW1142'; -KW1143 : 'KW1143'; -KW1144 : 'KW1144'; -KW1145 : 'KW1145'; -KW1146 : 'KW1146'; -KW1147 : 'KW1147'; -KW1148 : 'KW1148'; -KW1149 : 'KW1149'; -KW1150 : 'KW1150'; -KW1151 : 'KW1151'; -KW1152 : 'KW1152'; -KW1153 : 'KW1153'; -KW1154 : 'KW1154'; -KW1155 : 'KW1155'; -KW1156 : 'KW1156'; -KW1157 : 'KW1157'; -KW1158 : 'KW1158'; -KW1159 : 'KW1159'; -KW1160 : 'KW1160'; -KW1161 : 'KW1161'; -KW1162 : 'KW1162'; -KW1163 : 'KW1163'; -KW1164 : 'KW1164'; -KW1165 : 'KW1165'; -KW1166 : 'KW1166'; -KW1167 : 'KW1167'; -KW1168 : 'KW1168'; -KW1169 : 'KW1169'; -KW1170 : 'KW1170'; -KW1171 : 'KW1171'; -KW1172 : 'KW1172'; -KW1173 : 'KW1173'; -KW1174 : 'KW1174'; -KW1175 : 'KW1175'; -KW1176 : 'KW1176'; -KW1177 : 'KW1177'; -KW1178 : 'KW1178'; -KW1179 : 'KW1179'; -KW1180 : 'KW1180'; -KW1181 : 'KW1181'; -KW1182 : 'KW1182'; -KW1183 : 'KW1183'; -KW1184 : 'KW1184'; -KW1185 : 'KW1185'; -KW1186 : 'KW1186'; -KW1187 : 'KW1187'; -KW1188 : 'KW1188'; -KW1189 : 'KW1189'; -KW1190 : 'KW1190'; -KW1191 : 'KW1191'; -KW1192 : 'KW1192'; -KW1193 : 'KW1193'; -KW1194 : 'KW1194'; -KW1195 : 'KW1195'; -KW1196 : 'KW1196'; -KW1197 : 'KW1197'; -KW1198 : 'KW1198'; -KW1199 : 'KW1199'; -KW1200 : 'KW1200'; -KW1201 : 'KW1201'; -KW1202 : 'KW1202'; -KW1203 : 'KW1203'; -KW1204 : 'KW1204'; -KW1205 : 'KW1205'; -KW1206 : 'KW1206'; -KW1207 : 'KW1207'; -KW1208 : 'KW1208'; -KW1209 : 'KW1209'; -KW1210 : 'KW1210'; -KW1211 : 'KW1211'; -KW1212 : 'KW1212'; -KW1213 : 'KW1213'; -KW1214 : 'KW1214'; -KW1215 : 'KW1215'; -KW1216 : 'KW1216'; -KW1217 : 'KW1217'; -KW1218 : 'KW1218'; -KW1219 : 'KW1219'; -KW1220 : 'KW1220'; -KW1221 : 'KW1221'; -KW1222 : 'KW1222'; -KW1223 : 'KW1223'; -KW1224 : 'KW1224'; -KW1225 : 'KW1225'; -KW1226 : 'KW1226'; -KW1227 : 'KW1227'; -KW1228 : 'KW1228'; -KW1229 : 'KW1229'; -KW1230 : 'KW1230'; -KW1231 : 'KW1231'; -KW1232 : 'KW1232'; -KW1233 : 'KW1233'; -KW1234 : 'KW1234'; -KW1235 : 'KW1235'; -KW1236 : 'KW1236'; -KW1237 : 'KW1237'; -KW1238 : 'KW1238'; -KW1239 : 'KW1239'; -KW1240 : 'KW1240'; -KW1241 : 'KW1241'; -KW1242 : 'KW1242'; -KW1243 : 'KW1243'; -KW1244 : 'KW1244'; -KW1245 : 'KW1245'; -KW1246 : 'KW1246'; -KW1247 : 'KW1247'; -KW1248 : 'KW1248'; -KW1249 : 'KW1249'; -KW1250 : 'KW1250'; -KW1251 : 'KW1251'; -KW1252 : 'KW1252'; -KW1253 : 'KW1253'; -KW1254 : 'KW1254'; -KW1255 : 'KW1255'; -KW1256 : 'KW1256'; -KW1257 : 'KW1257'; -KW1258 : 'KW1258'; -KW1259 : 'KW1259'; -KW1260 : 'KW1260'; -KW1261 : 'KW1261'; -KW1262 : 'KW1262'; -KW1263 : 'KW1263'; -KW1264 : 'KW1264'; -KW1265 : 'KW1265'; -KW1266 : 'KW1266'; -KW1267 : 'KW1267'; -KW1268 : 'KW1268'; -KW1269 : 'KW1269'; -KW1270 : 'KW1270'; -KW1271 : 'KW1271'; -KW1272 : 'KW1272'; -KW1273 : 'KW1273'; -KW1274 : 'KW1274'; -KW1275 : 'KW1275'; -KW1276 : 'KW1276'; -KW1277 : 'KW1277'; -KW1278 : 'KW1278'; -KW1279 : 'KW1279'; -KW1280 : 'KW1280'; -KW1281 : 'KW1281'; -KW1282 : 'KW1282'; -KW1283 : 'KW1283'; -KW1284 : 'KW1284'; -KW1285 : 'KW1285'; -KW1286 : 'KW1286'; -KW1287 : 'KW1287'; -KW1288 : 'KW1288'; -KW1289 : 'KW1289'; -KW1290 : 'KW1290'; -KW1291 : 'KW1291'; -KW1292 : 'KW1292'; -KW1293 : 'KW1293'; -KW1294 : 'KW1294'; -KW1295 : 'KW1295'; -KW1296 : 'KW1296'; -KW1297 : 'KW1297'; -KW1298 : 'KW1298'; -KW1299 : 'KW1299'; -KW1300 : 'KW1300'; -KW1301 : 'KW1301'; -KW1302 : 'KW1302'; -KW1303 : 'KW1303'; -KW1304 : 'KW1304'; -KW1305 : 'KW1305'; -KW1306 : 'KW1306'; -KW1307 : 'KW1307'; -KW1308 : 'KW1308'; -KW1309 : 'KW1309'; -KW1310 : 'KW1310'; -KW1311 : 'KW1311'; -KW1312 : 'KW1312'; -KW1313 : 'KW1313'; -KW1314 : 'KW1314'; -KW1315 : 'KW1315'; -KW1316 : 'KW1316'; -KW1317 : 'KW1317'; -KW1318 : 'KW1318'; -KW1319 : 'KW1319'; -KW1320 : 'KW1320'; -KW1321 : 'KW1321'; -KW1322 : 'KW1322'; -KW1323 : 'KW1323'; -KW1324 : 'KW1324'; -KW1325 : 'KW1325'; -KW1326 : 'KW1326'; -KW1327 : 'KW1327'; -KW1328 : 'KW1328'; -KW1329 : 'KW1329'; -KW1330 : 'KW1330'; -KW1331 : 'KW1331'; -KW1332 : 'KW1332'; -KW1333 : 'KW1333'; -KW1334 : 'KW1334'; -KW1335 : 'KW1335'; -KW1336 : 'KW1336'; -KW1337 : 'KW1337'; -KW1338 : 'KW1338'; -KW1339 : 'KW1339'; -KW1340 : 'KW1340'; -KW1341 : 'KW1341'; -KW1342 : 'KW1342'; -KW1343 : 'KW1343'; -KW1344 : 'KW1344'; -KW1345 : 'KW1345'; -KW1346 : 'KW1346'; -KW1347 : 'KW1347'; -KW1348 : 'KW1348'; -KW1349 : 'KW1349'; -KW1350 : 'KW1350'; -KW1351 : 'KW1351'; -KW1352 : 'KW1352'; -KW1353 : 'KW1353'; -KW1354 : 'KW1354'; -KW1355 : 'KW1355'; -KW1356 : 'KW1356'; -KW1357 : 'KW1357'; -KW1358 : 'KW1358'; -KW1359 : 'KW1359'; -KW1360 : 'KW1360'; -KW1361 : 'KW1361'; -KW1362 : 'KW1362'; -KW1363 : 'KW1363'; -KW1364 : 'KW1364'; -KW1365 : 'KW1365'; -KW1366 : 'KW1366'; -KW1367 : 'KW1367'; -KW1368 : 'KW1368'; -KW1369 : 'KW1369'; -KW1370 : 'KW1370'; -KW1371 : 'KW1371'; -KW1372 : 'KW1372'; -KW1373 : 'KW1373'; -KW1374 : 'KW1374'; -KW1375 : 'KW1375'; -KW1376 : 'KW1376'; -KW1377 : 'KW1377'; -KW1378 : 'KW1378'; -KW1379 : 'KW1379'; -KW1380 : 'KW1380'; -KW1381 : 'KW1381'; -KW1382 : 'KW1382'; -KW1383 : 'KW1383'; -KW1384 : 'KW1384'; -KW1385 : 'KW1385'; -KW1386 : 'KW1386'; -KW1387 : 'KW1387'; -KW1388 : 'KW1388'; -KW1389 : 'KW1389'; -KW1390 : 'KW1390'; -KW1391 : 'KW1391'; -KW1392 : 'KW1392'; -KW1393 : 'KW1393'; -KW1394 : 'KW1394'; -KW1395 : 'KW1395'; -KW1396 : 'KW1396'; -KW1397 : 'KW1397'; -KW1398 : 'KW1398'; -KW1399 : 'KW1399'; -KW1400 : 'KW1400'; -KW1401 : 'KW1401'; -KW1402 : 'KW1402'; -KW1403 : 'KW1403'; -KW1404 : 'KW1404'; -KW1405 : 'KW1405'; -KW1406 : 'KW1406'; -KW1407 : 'KW1407'; -KW1408 : 'KW1408'; -KW1409 : 'KW1409'; -KW1410 : 'KW1410'; -KW1411 : 'KW1411'; -KW1412 : 'KW1412'; -KW1413 : 'KW1413'; -KW1414 : 'KW1414'; -KW1415 : 'KW1415'; -KW1416 : 'KW1416'; -KW1417 : 'KW1417'; -KW1418 : 'KW1418'; -KW1419 : 'KW1419'; -KW1420 : 'KW1420'; -KW1421 : 'KW1421'; -KW1422 : 'KW1422'; -KW1423 : 'KW1423'; -KW1424 : 'KW1424'; -KW1425 : 'KW1425'; -KW1426 : 'KW1426'; -KW1427 : 'KW1427'; -KW1428 : 'KW1428'; -KW1429 : 'KW1429'; -KW1430 : 'KW1430'; -KW1431 : 'KW1431'; -KW1432 : 'KW1432'; -KW1433 : 'KW1433'; -KW1434 : 'KW1434'; -KW1435 : 'KW1435'; -KW1436 : 'KW1436'; -KW1437 : 'KW1437'; -KW1438 : 'KW1438'; -KW1439 : 'KW1439'; -KW1440 : 'KW1440'; -KW1441 : 'KW1441'; -KW1442 : 'KW1442'; -KW1443 : 'KW1443'; -KW1444 : 'KW1444'; -KW1445 : 'KW1445'; -KW1446 : 'KW1446'; -KW1447 : 'KW1447'; -KW1448 : 'KW1448'; -KW1449 : 'KW1449'; -KW1450 : 'KW1450'; -KW1451 : 'KW1451'; -KW1452 : 'KW1452'; -KW1453 : 'KW1453'; -KW1454 : 'KW1454'; -KW1455 : 'KW1455'; -KW1456 : 'KW1456'; -KW1457 : 'KW1457'; -KW1458 : 'KW1458'; -KW1459 : 'KW1459'; -KW1460 : 'KW1460'; -KW1461 : 'KW1461'; -KW1462 : 'KW1462'; -KW1463 : 'KW1463'; -KW1464 : 'KW1464'; -KW1465 : 'KW1465'; -KW1466 : 'KW1466'; -KW1467 : 'KW1467'; -KW1468 : 'KW1468'; -KW1469 : 'KW1469'; -KW1470 : 'KW1470'; -KW1471 : 'KW1471'; -KW1472 : 'KW1472'; -KW1473 : 'KW1473'; -KW1474 : 'KW1474'; -KW1475 : 'KW1475'; -KW1476 : 'KW1476'; -KW1477 : 'KW1477'; -KW1478 : 'KW1478'; -KW1479 : 'KW1479'; -KW1480 : 'KW1480'; -KW1481 : 'KW1481'; -KW1482 : 'KW1482'; -KW1483 : 'KW1483'; -KW1484 : 'KW1484'; -KW1485 : 'KW1485'; -KW1486 : 'KW1486'; -KW1487 : 'KW1487'; -KW1488 : 'KW1488'; -KW1489 : 'KW1489'; -KW1490 : 'KW1490'; -KW1491 : 'KW1491'; -KW1492 : 'KW1492'; -KW1493 : 'KW1493'; -KW1494 : 'KW1494'; -KW1495 : 'KW1495'; -KW1496 : 'KW1496'; -KW1497 : 'KW1497'; -KW1498 : 'KW1498'; -KW1499 : 'KW1499'; -KW1500 : 'KW1500'; -KW1501 : 'KW1501'; -KW1502 : 'KW1502'; -KW1503 : 'KW1503'; -KW1504 : 'KW1504'; -KW1505 : 'KW1505'; -KW1506 : 'KW1506'; -KW1507 : 'KW1507'; -KW1508 : 'KW1508'; -KW1509 : 'KW1509'; -KW1510 : 'KW1510'; -KW1511 : 'KW1511'; -KW1512 : 'KW1512'; -KW1513 : 'KW1513'; -KW1514 : 'KW1514'; -KW1515 : 'KW1515'; -KW1516 : 'KW1516'; -KW1517 : 'KW1517'; -KW1518 : 'KW1518'; -KW1519 : 'KW1519'; -KW1520 : 'KW1520'; -KW1521 : 'KW1521'; -KW1522 : 'KW1522'; -KW1523 : 'KW1523'; -KW1524 : 'KW1524'; -KW1525 : 'KW1525'; -KW1526 : 'KW1526'; -KW1527 : 'KW1527'; -KW1528 : 'KW1528'; -KW1529 : 'KW1529'; -KW1530 : 'KW1530'; -KW1531 : 'KW1531'; -KW1532 : 'KW1532'; -KW1533 : 'KW1533'; -KW1534 : 'KW1534'; -KW1535 : 'KW1535'; -KW1536 : 'KW1536'; -KW1537 : 'KW1537'; -KW1538 : 'KW1538'; -KW1539 : 'KW1539'; -KW1540 : 'KW1540'; -KW1541 : 'KW1541'; -KW1542 : 'KW1542'; -KW1543 : 'KW1543'; -KW1544 : 'KW1544'; -KW1545 : 'KW1545'; -KW1546 : 'KW1546'; -KW1547 : 'KW1547'; -KW1548 : 'KW1548'; -KW1549 : 'KW1549'; -KW1550 : 'KW1550'; -KW1551 : 'KW1551'; -KW1552 : 'KW1552'; -KW1553 : 'KW1553'; -KW1554 : 'KW1554'; -KW1555 : 'KW1555'; -KW1556 : 'KW1556'; -KW1557 : 'KW1557'; -KW1558 : 'KW1558'; -KW1559 : 'KW1559'; -KW1560 : 'KW1560'; -KW1561 : 'KW1561'; -KW1562 : 'KW1562'; -KW1563 : 'KW1563'; -KW1564 : 'KW1564'; -KW1565 : 'KW1565'; -KW1566 : 'KW1566'; -KW1567 : 'KW1567'; -KW1568 : 'KW1568'; -KW1569 : 'KW1569'; -KW1570 : 'KW1570'; -KW1571 : 'KW1571'; -KW1572 : 'KW1572'; -KW1573 : 'KW1573'; -KW1574 : 'KW1574'; -KW1575 : 'KW1575'; -KW1576 : 'KW1576'; -KW1577 : 'KW1577'; -KW1578 : 'KW1578'; -KW1579 : 'KW1579'; -KW1580 : 'KW1580'; -KW1581 : 'KW1581'; -KW1582 : 'KW1582'; -KW1583 : 'KW1583'; -KW1584 : 'KW1584'; -KW1585 : 'KW1585'; -KW1586 : 'KW1586'; -KW1587 : 'KW1587'; -KW1588 : 'KW1588'; -KW1589 : 'KW1589'; -KW1590 : 'KW1590'; -KW1591 : 'KW1591'; -KW1592 : 'KW1592'; -KW1593 : 'KW1593'; -KW1594 : 'KW1594'; -KW1595 : 'KW1595'; -KW1596 : 'KW1596'; -KW1597 : 'KW1597'; -KW1598 : 'KW1598'; -KW1599 : 'KW1599'; -KW1600 : 'KW1600'; -KW1601 : 'KW1601'; -KW1602 : 'KW1602'; -KW1603 : 'KW1603'; -KW1604 : 'KW1604'; -KW1605 : 'KW1605'; -KW1606 : 'KW1606'; -KW1607 : 'KW1607'; -KW1608 : 'KW1608'; -KW1609 : 'KW1609'; -KW1610 : 'KW1610'; -KW1611 : 'KW1611'; -KW1612 : 'KW1612'; -KW1613 : 'KW1613'; -KW1614 : 'KW1614'; -KW1615 : 'KW1615'; -KW1616 : 'KW1616'; -KW1617 : 'KW1617'; -KW1618 : 'KW1618'; -KW1619 : 'KW1619'; -KW1620 : 'KW1620'; -KW1621 : 'KW1621'; -KW1622 : 'KW1622'; -KW1623 : 'KW1623'; -KW1624 : 'KW1624'; -KW1625 : 'KW1625'; -KW1626 : 'KW1626'; -KW1627 : 'KW1627'; -KW1628 : 'KW1628'; -KW1629 : 'KW1629'; -KW1630 : 'KW1630'; -KW1631 : 'KW1631'; -KW1632 : 'KW1632'; -KW1633 : 'KW1633'; -KW1634 : 'KW1634'; -KW1635 : 'KW1635'; -KW1636 : 'KW1636'; -KW1637 : 'KW1637'; -KW1638 : 'KW1638'; -KW1639 : 'KW1639'; -KW1640 : 'KW1640'; -KW1641 : 'KW1641'; -KW1642 : 'KW1642'; -KW1643 : 'KW1643'; -KW1644 : 'KW1644'; -KW1645 : 'KW1645'; -KW1646 : 'KW1646'; -KW1647 : 'KW1647'; -KW1648 : 'KW1648'; -KW1649 : 'KW1649'; -KW1650 : 'KW1650'; -KW1651 : 'KW1651'; -KW1652 : 'KW1652'; -KW1653 : 'KW1653'; -KW1654 : 'KW1654'; -KW1655 : 'KW1655'; -KW1656 : 'KW1656'; -KW1657 : 'KW1657'; -KW1658 : 'KW1658'; -KW1659 : 'KW1659'; -KW1660 : 'KW1660'; -KW1661 : 'KW1661'; -KW1662 : 'KW1662'; -KW1663 : 'KW1663'; -KW1664 : 'KW1664'; -KW1665 : 'KW1665'; -KW1666 : 'KW1666'; -KW1667 : 'KW1667'; -KW1668 : 'KW1668'; -KW1669 : 'KW1669'; -KW1670 : 'KW1670'; -KW1671 : 'KW1671'; -KW1672 : 'KW1672'; -KW1673 : 'KW1673'; -KW1674 : 'KW1674'; -KW1675 : 'KW1675'; -KW1676 : 'KW1676'; -KW1677 : 'KW1677'; -KW1678 : 'KW1678'; -KW1679 : 'KW1679'; -KW1680 : 'KW1680'; -KW1681 : 'KW1681'; -KW1682 : 'KW1682'; -KW1683 : 'KW1683'; -KW1684 : 'KW1684'; -KW1685 : 'KW1685'; -KW1686 : 'KW1686'; -KW1687 : 'KW1687'; -KW1688 : 'KW1688'; -KW1689 : 'KW1689'; -KW1690 : 'KW1690'; -KW1691 : 'KW1691'; -KW1692 : 'KW1692'; -KW1693 : 'KW1693'; -KW1694 : 'KW1694'; -KW1695 : 'KW1695'; -KW1696 : 'KW1696'; -KW1697 : 'KW1697'; -KW1698 : 'KW1698'; -KW1699 : 'KW1699'; -KW1700 : 'KW1700'; -KW1701 : 'KW1701'; -KW1702 : 'KW1702'; -KW1703 : 'KW1703'; -KW1704 : 'KW1704'; -KW1705 : 'KW1705'; -KW1706 : 'KW1706'; -KW1707 : 'KW1707'; -KW1708 : 'KW1708'; -KW1709 : 'KW1709'; -KW1710 : 'KW1710'; -KW1711 : 'KW1711'; -KW1712 : 'KW1712'; -KW1713 : 'KW1713'; -KW1714 : 'KW1714'; -KW1715 : 'KW1715'; -KW1716 : 'KW1716'; -KW1717 : 'KW1717'; -KW1718 : 'KW1718'; -KW1719 : 'KW1719'; -KW1720 : 'KW1720'; -KW1721 : 'KW1721'; -KW1722 : 'KW1722'; -KW1723 : 'KW1723'; -KW1724 : 'KW1724'; -KW1725 : 'KW1725'; -KW1726 : 'KW1726'; -KW1727 : 'KW1727'; -KW1728 : 'KW1728'; -KW1729 : 'KW1729'; -KW1730 : 'KW1730'; -KW1731 : 'KW1731'; -KW1732 : 'KW1732'; -KW1733 : 'KW1733'; -KW1734 : 'KW1734'; -KW1735 : 'KW1735'; -KW1736 : 'KW1736'; -KW1737 : 'KW1737'; -KW1738 : 'KW1738'; -KW1739 : 'KW1739'; -KW1740 : 'KW1740'; -KW1741 : 'KW1741'; -KW1742 : 'KW1742'; -KW1743 : 'KW1743'; -KW1744 : 'KW1744'; -KW1745 : 'KW1745'; -KW1746 : 'KW1746'; -KW1747 : 'KW1747'; -KW1748 : 'KW1748'; -KW1749 : 'KW1749'; -KW1750 : 'KW1750'; -KW1751 : 'KW1751'; -KW1752 : 'KW1752'; -KW1753 : 'KW1753'; -KW1754 : 'KW1754'; -KW1755 : 'KW1755'; -KW1756 : 'KW1756'; -KW1757 : 'KW1757'; -KW1758 : 'KW1758'; -KW1759 : 'KW1759'; -KW1760 : 'KW1760'; -KW1761 : 'KW1761'; -KW1762 : 'KW1762'; -KW1763 : 'KW1763'; -KW1764 : 'KW1764'; -KW1765 : 'KW1765'; -KW1766 : 'KW1766'; -KW1767 : 'KW1767'; -KW1768 : 'KW1768'; -KW1769 : 'KW1769'; -KW1770 : 'KW1770'; -KW1771 : 'KW1771'; -KW1772 : 'KW1772'; -KW1773 : 'KW1773'; -KW1774 : 'KW1774'; -KW1775 : 'KW1775'; -KW1776 : 'KW1776'; -KW1777 : 'KW1777'; -KW1778 : 'KW1778'; -KW1779 : 'KW1779'; -KW1780 : 'KW1780'; -KW1781 : 'KW1781'; -KW1782 : 'KW1782'; -KW1783 : 'KW1783'; -KW1784 : 'KW1784'; -KW1785 : 'KW1785'; -KW1786 : 'KW1786'; -KW1787 : 'KW1787'; -KW1788 : 'KW1788'; -KW1789 : 'KW1789'; -KW1790 : 'KW1790'; -KW1791 : 'KW1791'; -KW1792 : 'KW1792'; -KW1793 : 'KW1793'; -KW1794 : 'KW1794'; -KW1795 : 'KW1795'; -KW1796 : 'KW1796'; -KW1797 : 'KW1797'; -KW1798 : 'KW1798'; -KW1799 : 'KW1799'; -KW1800 : 'KW1800'; -KW1801 : 'KW1801'; -KW1802 : 'KW1802'; -KW1803 : 'KW1803'; -KW1804 : 'KW1804'; -KW1805 : 'KW1805'; -KW1806 : 'KW1806'; -KW1807 : 'KW1807'; -KW1808 : 'KW1808'; -KW1809 : 'KW1809'; -KW1810 : 'KW1810'; -KW1811 : 'KW1811'; -KW1812 : 'KW1812'; -KW1813 : 'KW1813'; -KW1814 : 'KW1814'; -KW1815 : 'KW1815'; -KW1816 : 'KW1816'; -KW1817 : 'KW1817'; -KW1818 : 'KW1818'; -KW1819 : 'KW1819'; -KW1820 : 'KW1820'; -KW1821 : 'KW1821'; -KW1822 : 'KW1822'; -KW1823 : 'KW1823'; -KW1824 : 'KW1824'; -KW1825 : 'KW1825'; -KW1826 : 'KW1826'; -KW1827 : 'KW1827'; -KW1828 : 'KW1828'; -KW1829 : 'KW1829'; -KW1830 : 'KW1830'; -KW1831 : 'KW1831'; -KW1832 : 'KW1832'; -KW1833 : 'KW1833'; -KW1834 : 'KW1834'; -KW1835 : 'KW1835'; -KW1836 : 'KW1836'; -KW1837 : 'KW1837'; -KW1838 : 'KW1838'; -KW1839 : 'KW1839'; -KW1840 : 'KW1840'; -KW1841 : 'KW1841'; -KW1842 : 'KW1842'; -KW1843 : 'KW1843'; -KW1844 : 'KW1844'; -KW1845 : 'KW1845'; -KW1846 : 'KW1846'; -KW1847 : 'KW1847'; -KW1848 : 'KW1848'; -KW1849 : 'KW1849'; -KW1850 : 'KW1850'; -KW1851 : 'KW1851'; -KW1852 : 'KW1852'; -KW1853 : 'KW1853'; -KW1854 : 'KW1854'; -KW1855 : 'KW1855'; -KW1856 : 'KW1856'; -KW1857 : 'KW1857'; -KW1858 : 'KW1858'; -KW1859 : 'KW1859'; -KW1860 : 'KW1860'; -KW1861 : 'KW1861'; -KW1862 : 'KW1862'; -KW1863 : 'KW1863'; -KW1864 : 'KW1864'; -KW1865 : 'KW1865'; -KW1866 : 'KW1866'; -KW1867 : 'KW1867'; -KW1868 : 'KW1868'; -KW1869 : 'KW1869'; -KW1870 : 'KW1870'; -KW1871 : 'KW1871'; -KW1872 : 'KW1872'; -KW1873 : 'KW1873'; -KW1874 : 'KW1874'; -KW1875 : 'KW1875'; -KW1876 : 'KW1876'; -KW1877 : 'KW1877'; -KW1878 : 'KW1878'; -KW1879 : 'KW1879'; -KW1880 : 'KW1880'; -KW1881 : 'KW1881'; -KW1882 : 'KW1882'; -KW1883 : 'KW1883'; -KW1884 : 'KW1884'; -KW1885 : 'KW1885'; -KW1886 : 'KW1886'; -KW1887 : 'KW1887'; -KW1888 : 'KW1888'; -KW1889 : 'KW1889'; -KW1890 : 'KW1890'; -KW1891 : 'KW1891'; -KW1892 : 'KW1892'; -KW1893 : 'KW1893'; -KW1894 : 'KW1894'; -KW1895 : 'KW1895'; -KW1896 : 'KW1896'; -KW1897 : 'KW1897'; -KW1898 : 'KW1898'; -KW1899 : 'KW1899'; -KW1900 : 'KW1900'; -KW1901 : 'KW1901'; -KW1902 : 'KW1902'; -KW1903 : 'KW1903'; -KW1904 : 'KW1904'; -KW1905 : 'KW1905'; -KW1906 : 'KW1906'; -KW1907 : 'KW1907'; -KW1908 : 'KW1908'; -KW1909 : 'KW1909'; -KW1910 : 'KW1910'; -KW1911 : 'KW1911'; -KW1912 : 'KW1912'; -KW1913 : 'KW1913'; -KW1914 : 'KW1914'; -KW1915 : 'KW1915'; -KW1916 : 'KW1916'; -KW1917 : 'KW1917'; -KW1918 : 'KW1918'; -KW1919 : 'KW1919'; -KW1920 : 'KW1920'; -KW1921 : 'KW1921'; -KW1922 : 'KW1922'; -KW1923 : 'KW1923'; -KW1924 : 'KW1924'; -KW1925 : 'KW1925'; -KW1926 : 'KW1926'; -KW1927 : 'KW1927'; -KW1928 : 'KW1928'; -KW1929 : 'KW1929'; -KW1930 : 'KW1930'; -KW1931 : 'KW1931'; -KW1932 : 'KW1932'; -KW1933 : 'KW1933'; -KW1934 : 'KW1934'; -KW1935 : 'KW1935'; -KW1936 : 'KW1936'; -KW1937 : 'KW1937'; -KW1938 : 'KW1938'; -KW1939 : 'KW1939'; -KW1940 : 'KW1940'; -KW1941 : 'KW1941'; -KW1942 : 'KW1942'; -KW1943 : 'KW1943'; -KW1944 : 'KW1944'; -KW1945 : 'KW1945'; -KW1946 : 'KW1946'; -KW1947 : 'KW1947'; -KW1948 : 'KW1948'; -KW1949 : 'KW1949'; -KW1950 : 'KW1950'; -KW1951 : 'KW1951'; -KW1952 : 'KW1952'; -KW1953 : 'KW1953'; -KW1954 : 'KW1954'; -KW1955 : 'KW1955'; -KW1956 : 'KW1956'; -KW1957 : 'KW1957'; -KW1958 : 'KW1958'; -KW1959 : 'KW1959'; -KW1960 : 'KW1960'; -KW1961 : 'KW1961'; -KW1962 : 'KW1962'; -KW1963 : 'KW1963'; -KW1964 : 'KW1964'; -KW1965 : 'KW1965'; -KW1966 : 'KW1966'; -KW1967 : 'KW1967'; -KW1968 : 'KW1968'; -KW1969 : 'KW1969'; -KW1970 : 'KW1970'; -KW1971 : 'KW1971'; -KW1972 : 'KW1972'; -KW1973 : 'KW1973'; -KW1974 : 'KW1974'; -KW1975 : 'KW1975'; -KW1976 : 'KW1976'; -KW1977 : 'KW1977'; -KW1978 : 'KW1978'; -KW1979 : 'KW1979'; -KW1980 : 'KW1980'; -KW1981 : 'KW1981'; -KW1982 : 'KW1982'; -KW1983 : 'KW1983'; -KW1984 : 'KW1984'; -KW1985 : 'KW1985'; -KW1986 : 'KW1986'; -KW1987 : 'KW1987'; -KW1988 : 'KW1988'; -KW1989 : 'KW1989'; -KW1990 : 'KW1990'; -KW1991 : 'KW1991'; -KW1992 : 'KW1992'; -KW1993 : 'KW1993'; -KW1994 : 'KW1994'; -KW1995 : 'KW1995'; -KW1996 : 'KW1996'; -KW1997 : 'KW1997'; -KW1998 : 'KW1998'; -KW1999 : 'KW1999'; -KW2000 : 'KW2000'; -KW2001 : 'KW2001'; -KW2002 : 'KW2002'; -KW2003 : 'KW2003'; -KW2004 : 'KW2004'; -KW2005 : 'KW2005'; -KW2006 : 'KW2006'; -KW2007 : 'KW2007'; -KW2008 : 'KW2008'; -KW2009 : 'KW2009'; -KW2010 : 'KW2010'; -KW2011 : 'KW2011'; -KW2012 : 'KW2012'; -KW2013 : 'KW2013'; -KW2014 : 'KW2014'; -KW2015 : 'KW2015'; -KW2016 : 'KW2016'; -KW2017 : 'KW2017'; -KW2018 : 'KW2018'; -KW2019 : 'KW2019'; -KW2020 : 'KW2020'; -KW2021 : 'KW2021'; -KW2022 : 'KW2022'; -KW2023 : 'KW2023'; -KW2024 : 'KW2024'; -KW2025 : 'KW2025'; -KW2026 : 'KW2026'; -KW2027 : 'KW2027'; -KW2028 : 'KW2028'; -KW2029 : 'KW2029'; -KW2030 : 'KW2030'; -KW2031 : 'KW2031'; -KW2032 : 'KW2032'; -KW2033 : 'KW2033'; -KW2034 : 'KW2034'; -KW2035 : 'KW2035'; -KW2036 : 'KW2036'; -KW2037 : 'KW2037'; -KW2038 : 'KW2038'; -KW2039 : 'KW2039'; -KW2040 : 'KW2040'; -KW2041 : 'KW2041'; -KW2042 : 'KW2042'; -KW2043 : 'KW2043'; -KW2044 : 'KW2044'; -KW2045 : 'KW2045'; -KW2046 : 'KW2046'; -KW2047 : 'KW2047'; -KW2048 : 'KW2048'; -KW2049 : 'KW2049'; -KW2050 : 'KW2050'; -KW2051 : 'KW2051'; -KW2052 : 'KW2052'; -KW2053 : 'KW2053'; -KW2054 : 'KW2054'; -KW2055 : 'KW2055'; -KW2056 : 'KW2056'; -KW2057 : 'KW2057'; -KW2058 : 'KW2058'; -KW2059 : 'KW2059'; -KW2060 : 'KW2060'; -KW2061 : 'KW2061'; -KW2062 : 'KW2062'; -KW2063 : 'KW2063'; -KW2064 : 'KW2064'; -KW2065 : 'KW2065'; -KW2066 : 'KW2066'; -KW2067 : 'KW2067'; -KW2068 : 'KW2068'; -KW2069 : 'KW2069'; -KW2070 : 'KW2070'; -KW2071 : 'KW2071'; -KW2072 : 'KW2072'; -KW2073 : 'KW2073'; -KW2074 : 'KW2074'; -KW2075 : 'KW2075'; -KW2076 : 'KW2076'; -KW2077 : 'KW2077'; -KW2078 : 'KW2078'; -KW2079 : 'KW2079'; -KW2080 : 'KW2080'; -KW2081 : 'KW2081'; -KW2082 : 'KW2082'; -KW2083 : 'KW2083'; -KW2084 : 'KW2084'; -KW2085 : 'KW2085'; -KW2086 : 'KW2086'; -KW2087 : 'KW2087'; -KW2088 : 'KW2088'; -KW2089 : 'KW2089'; -KW2090 : 'KW2090'; -KW2091 : 'KW2091'; -KW2092 : 'KW2092'; -KW2093 : 'KW2093'; -KW2094 : 'KW2094'; -KW2095 : 'KW2095'; -KW2096 : 'KW2096'; -KW2097 : 'KW2097'; -KW2098 : 'KW2098'; -KW2099 : 'KW2099'; -KW2100 : 'KW2100'; -KW2101 : 'KW2101'; -KW2102 : 'KW2102'; -KW2103 : 'KW2103'; -KW2104 : 'KW2104'; -KW2105 : 'KW2105'; -KW2106 : 'KW2106'; -KW2107 : 'KW2107'; -KW2108 : 'KW2108'; -KW2109 : 'KW2109'; -KW2110 : 'KW2110'; -KW2111 : 'KW2111'; -KW2112 : 'KW2112'; -KW2113 : 'KW2113'; -KW2114 : 'KW2114'; -KW2115 : 'KW2115'; -KW2116 : 'KW2116'; -KW2117 : 'KW2117'; -KW2118 : 'KW2118'; -KW2119 : 'KW2119'; -KW2120 : 'KW2120'; -KW2121 : 'KW2121'; -KW2122 : 'KW2122'; -KW2123 : 'KW2123'; -KW2124 : 'KW2124'; -KW2125 : 'KW2125'; -KW2126 : 'KW2126'; -KW2127 : 'KW2127'; -KW2128 : 'KW2128'; -KW2129 : 'KW2129'; -KW2130 : 'KW2130'; -KW2131 : 'KW2131'; -KW2132 : 'KW2132'; -KW2133 : 'KW2133'; -KW2134 : 'KW2134'; -KW2135 : 'KW2135'; -KW2136 : 'KW2136'; -KW2137 : 'KW2137'; -KW2138 : 'KW2138'; -KW2139 : 'KW2139'; -KW2140 : 'KW2140'; -KW2141 : 'KW2141'; -KW2142 : 'KW2142'; -KW2143 : 'KW2143'; -KW2144 : 'KW2144'; -KW2145 : 'KW2145'; -KW2146 : 'KW2146'; -KW2147 : 'KW2147'; -KW2148 : 'KW2148'; -KW2149 : 'KW2149'; -KW2150 : 'KW2150'; -KW2151 : 'KW2151'; -KW2152 : 'KW2152'; -KW2153 : 'KW2153'; -KW2154 : 'KW2154'; -KW2155 : 'KW2155'; -KW2156 : 'KW2156'; -KW2157 : 'KW2157'; -KW2158 : 'KW2158'; -KW2159 : 'KW2159'; -KW2160 : 'KW2160'; -KW2161 : 'KW2161'; -KW2162 : 'KW2162'; -KW2163 : 'KW2163'; -KW2164 : 'KW2164'; -KW2165 : 'KW2165'; -KW2166 : 'KW2166'; -KW2167 : 'KW2167'; -KW2168 : 'KW2168'; -KW2169 : 'KW2169'; -KW2170 : 'KW2170'; -KW2171 : 'KW2171'; -KW2172 : 'KW2172'; -KW2173 : 'KW2173'; -KW2174 : 'KW2174'; -KW2175 : 'KW2175'; -KW2176 : 'KW2176'; -KW2177 : 'KW2177'; -KW2178 : 'KW2178'; -KW2179 : 'KW2179'; -KW2180 : 'KW2180'; -KW2181 : 'KW2181'; -KW2182 : 'KW2182'; -KW2183 : 'KW2183'; -KW2184 : 'KW2184'; -KW2185 : 'KW2185'; -KW2186 : 'KW2186'; -KW2187 : 'KW2187'; -KW2188 : 'KW2188'; -KW2189 : 'KW2189'; -KW2190 : 'KW2190'; -KW2191 : 'KW2191'; -KW2192 : 'KW2192'; -KW2193 : 'KW2193'; -KW2194 : 'KW2194'; -KW2195 : 'KW2195'; -KW2196 : 'KW2196'; -KW2197 : 'KW2197'; -KW2198 : 'KW2198'; -KW2199 : 'KW2199'; -KW2200 : 'KW2200'; -KW2201 : 'KW2201'; -KW2202 : 'KW2202'; -KW2203 : 'KW2203'; -KW2204 : 'KW2204'; -KW2205 : 'KW2205'; -KW2206 : 'KW2206'; -KW2207 : 'KW2207'; -KW2208 : 'KW2208'; -KW2209 : 'KW2209'; -KW2210 : 'KW2210'; -KW2211 : 'KW2211'; -KW2212 : 'KW2212'; -KW2213 : 'KW2213'; -KW2214 : 'KW2214'; -KW2215 : 'KW2215'; -KW2216 : 'KW2216'; -KW2217 : 'KW2217'; -KW2218 : 'KW2218'; -KW2219 : 'KW2219'; -KW2220 : 'KW2220'; -KW2221 : 'KW2221'; -KW2222 : 'KW2222'; -KW2223 : 'KW2223'; -KW2224 : 'KW2224'; -KW2225 : 'KW2225'; -KW2226 : 'KW2226'; -KW2227 : 'KW2227'; -KW2228 : 'KW2228'; -KW2229 : 'KW2229'; -KW2230 : 'KW2230'; -KW2231 : 'KW2231'; -KW2232 : 'KW2232'; -KW2233 : 'KW2233'; -KW2234 : 'KW2234'; -KW2235 : 'KW2235'; -KW2236 : 'KW2236'; -KW2237 : 'KW2237'; -KW2238 : 'KW2238'; -KW2239 : 'KW2239'; -KW2240 : 'KW2240'; -KW2241 : 'KW2241'; -KW2242 : 'KW2242'; -KW2243 : 'KW2243'; -KW2244 : 'KW2244'; -KW2245 : 'KW2245'; -KW2246 : 'KW2246'; -KW2247 : 'KW2247'; -KW2248 : 'KW2248'; -KW2249 : 'KW2249'; -KW2250 : 'KW2250'; -KW2251 : 'KW2251'; -KW2252 : 'KW2252'; -KW2253 : 'KW2253'; -KW2254 : 'KW2254'; -KW2255 : 'KW2255'; -KW2256 : 'KW2256'; -KW2257 : 'KW2257'; -KW2258 : 'KW2258'; -KW2259 : 'KW2259'; -KW2260 : 'KW2260'; -KW2261 : 'KW2261'; -KW2262 : 'KW2262'; -KW2263 : 'KW2263'; -KW2264 : 'KW2264'; -KW2265 : 'KW2265'; -KW2266 : 'KW2266'; -KW2267 : 'KW2267'; -KW2268 : 'KW2268'; -KW2269 : 'KW2269'; -KW2270 : 'KW2270'; -KW2271 : 'KW2271'; -KW2272 : 'KW2272'; -KW2273 : 'KW2273'; -KW2274 : 'KW2274'; -KW2275 : 'KW2275'; -KW2276 : 'KW2276'; -KW2277 : 'KW2277'; -KW2278 : 'KW2278'; -KW2279 : 'KW2279'; -KW2280 : 'KW2280'; -KW2281 : 'KW2281'; -KW2282 : 'KW2282'; -KW2283 : 'KW2283'; -KW2284 : 'KW2284'; -KW2285 : 'KW2285'; -KW2286 : 'KW2286'; -KW2287 : 'KW2287'; -KW2288 : 'KW2288'; -KW2289 : 'KW2289'; -KW2290 : 'KW2290'; -KW2291 : 'KW2291'; -KW2292 : 'KW2292'; -KW2293 : 'KW2293'; -KW2294 : 'KW2294'; -KW2295 : 'KW2295'; -KW2296 : 'KW2296'; -KW2297 : 'KW2297'; -KW2298 : 'KW2298'; -KW2299 : 'KW2299'; -KW2300 : 'KW2300'; -KW2301 : 'KW2301'; -KW2302 : 'KW2302'; -KW2303 : 'KW2303'; -KW2304 : 'KW2304'; -KW2305 : 'KW2305'; -KW2306 : 'KW2306'; -KW2307 : 'KW2307'; -KW2308 : 'KW2308'; -KW2309 : 'KW2309'; -KW2310 : 'KW2310'; -KW2311 : 'KW2311'; -KW2312 : 'KW2312'; -KW2313 : 'KW2313'; -KW2314 : 'KW2314'; -KW2315 : 'KW2315'; -KW2316 : 'KW2316'; -KW2317 : 'KW2317'; -KW2318 : 'KW2318'; -KW2319 : 'KW2319'; -KW2320 : 'KW2320'; -KW2321 : 'KW2321'; -KW2322 : 'KW2322'; -KW2323 : 'KW2323'; -KW2324 : 'KW2324'; -KW2325 : 'KW2325'; -KW2326 : 'KW2326'; -KW2327 : 'KW2327'; -KW2328 : 'KW2328'; -KW2329 : 'KW2329'; -KW2330 : 'KW2330'; -KW2331 : 'KW2331'; -KW2332 : 'KW2332'; -KW2333 : 'KW2333'; -KW2334 : 'KW2334'; -KW2335 : 'KW2335'; -KW2336 : 'KW2336'; -KW2337 : 'KW2337'; -KW2338 : 'KW2338'; -KW2339 : 'KW2339'; -KW2340 : 'KW2340'; -KW2341 : 'KW2341'; -KW2342 : 'KW2342'; -KW2343 : 'KW2343'; -KW2344 : 'KW2344'; -KW2345 : 'KW2345'; -KW2346 : 'KW2346'; -KW2347 : 'KW2347'; -KW2348 : 'KW2348'; -KW2349 : 'KW2349'; -KW2350 : 'KW2350'; -KW2351 : 'KW2351'; -KW2352 : 'KW2352'; -KW2353 : 'KW2353'; -KW2354 : 'KW2354'; -KW2355 : 'KW2355'; -KW2356 : 'KW2356'; -KW2357 : 'KW2357'; -KW2358 : 'KW2358'; -KW2359 : 'KW2359'; -KW2360 : 'KW2360'; -KW2361 : 'KW2361'; -KW2362 : 'KW2362'; -KW2363 : 'KW2363'; -KW2364 : 'KW2364'; -KW2365 : 'KW2365'; -KW2366 : 'KW2366'; -KW2367 : 'KW2367'; -KW2368 : 'KW2368'; -KW2369 : 'KW2369'; -KW2370 : 'KW2370'; -KW2371 : 'KW2371'; -KW2372 : 'KW2372'; -KW2373 : 'KW2373'; -KW2374 : 'KW2374'; -KW2375 : 'KW2375'; -KW2376 : 'KW2376'; -KW2377 : 'KW2377'; -KW2378 : 'KW2378'; -KW2379 : 'KW2379'; -KW2380 : 'KW2380'; -KW2381 : 'KW2381'; -KW2382 : 'KW2382'; -KW2383 : 'KW2383'; -KW2384 : 'KW2384'; -KW2385 : 'KW2385'; -KW2386 : 'KW2386'; -KW2387 : 'KW2387'; -KW2388 : 'KW2388'; -KW2389 : 'KW2389'; -KW2390 : 'KW2390'; -KW2391 : 'KW2391'; -KW2392 : 'KW2392'; -KW2393 : 'KW2393'; -KW2394 : 'KW2394'; -KW2395 : 'KW2395'; -KW2396 : 'KW2396'; -KW2397 : 'KW2397'; -KW2398 : 'KW2398'; -KW2399 : 'KW2399'; -KW2400 : 'KW2400'; -KW2401 : 'KW2401'; -KW2402 : 'KW2402'; -KW2403 : 'KW2403'; -KW2404 : 'KW2404'; -KW2405 : 'KW2405'; -KW2406 : 'KW2406'; -KW2407 : 'KW2407'; -KW2408 : 'KW2408'; -KW2409 : 'KW2409'; -KW2410 : 'KW2410'; -KW2411 : 'KW2411'; -KW2412 : 'KW2412'; -KW2413 : 'KW2413'; -KW2414 : 'KW2414'; -KW2415 : 'KW2415'; -KW2416 : 'KW2416'; -KW2417 : 'KW2417'; -KW2418 : 'KW2418'; -KW2419 : 'KW2419'; -KW2420 : 'KW2420'; -KW2421 : 'KW2421'; -KW2422 : 'KW2422'; -KW2423 : 'KW2423'; -KW2424 : 'KW2424'; -KW2425 : 'KW2425'; -KW2426 : 'KW2426'; -KW2427 : 'KW2427'; -KW2428 : 'KW2428'; -KW2429 : 'KW2429'; -KW2430 : 'KW2430'; -KW2431 : 'KW2431'; -KW2432 : 'KW2432'; -KW2433 : 'KW2433'; -KW2434 : 'KW2434'; -KW2435 : 'KW2435'; -KW2436 : 'KW2436'; -KW2437 : 'KW2437'; -KW2438 : 'KW2438'; -KW2439 : 'KW2439'; -KW2440 : 'KW2440'; -KW2441 : 'KW2441'; -KW2442 : 'KW2442'; -KW2443 : 'KW2443'; -KW2444 : 'KW2444'; -KW2445 : 'KW2445'; -KW2446 : 'KW2446'; -KW2447 : 'KW2447'; -KW2448 : 'KW2448'; -KW2449 : 'KW2449'; -KW2450 : 'KW2450'; -KW2451 : 'KW2451'; -KW2452 : 'KW2452'; -KW2453 : 'KW2453'; -KW2454 : 'KW2454'; -KW2455 : 'KW2455'; -KW2456 : 'KW2456'; -KW2457 : 'KW2457'; -KW2458 : 'KW2458'; -KW2459 : 'KW2459'; -KW2460 : 'KW2460'; -KW2461 : 'KW2461'; -KW2462 : 'KW2462'; -KW2463 : 'KW2463'; -KW2464 : 'KW2464'; -KW2465 : 'KW2465'; -KW2466 : 'KW2466'; -KW2467 : 'KW2467'; -KW2468 : 'KW2468'; -KW2469 : 'KW2469'; -KW2470 : 'KW2470'; -KW2471 : 'KW2471'; -KW2472 : 'KW2472'; -KW2473 : 'KW2473'; -KW2474 : 'KW2474'; -KW2475 : 'KW2475'; -KW2476 : 'KW2476'; -KW2477 : 'KW2477'; -KW2478 : 'KW2478'; -KW2479 : 'KW2479'; -KW2480 : 'KW2480'; -KW2481 : 'KW2481'; -KW2482 : 'KW2482'; -KW2483 : 'KW2483'; -KW2484 : 'KW2484'; -KW2485 : 'KW2485'; -KW2486 : 'KW2486'; -KW2487 : 'KW2487'; -KW2488 : 'KW2488'; -KW2489 : 'KW2489'; -KW2490 : 'KW2490'; -KW2491 : 'KW2491'; -KW2492 : 'KW2492'; -KW2493 : 'KW2493'; -KW2494 : 'KW2494'; -KW2495 : 'KW2495'; -KW2496 : 'KW2496'; -KW2497 : 'KW2497'; -KW2498 : 'KW2498'; -KW2499 : 'KW2499'; -KW2500 : 'KW2500'; -KW2501 : 'KW2501'; -KW2502 : 'KW2502'; -KW2503 : 'KW2503'; -KW2504 : 'KW2504'; -KW2505 : 'KW2505'; -KW2506 : 'KW2506'; -KW2507 : 'KW2507'; -KW2508 : 'KW2508'; -KW2509 : 'KW2509'; -KW2510 : 'KW2510'; -KW2511 : 'KW2511'; -KW2512 : 'KW2512'; -KW2513 : 'KW2513'; -KW2514 : 'KW2514'; -KW2515 : 'KW2515'; -KW2516 : 'KW2516'; -KW2517 : 'KW2517'; -KW2518 : 'KW2518'; -KW2519 : 'KW2519'; -KW2520 : 'KW2520'; -KW2521 : 'KW2521'; -KW2522 : 'KW2522'; -KW2523 : 'KW2523'; -KW2524 : 'KW2524'; -KW2525 : 'KW2525'; -KW2526 : 'KW2526'; -KW2527 : 'KW2527'; -KW2528 : 'KW2528'; -KW2529 : 'KW2529'; -KW2530 : 'KW2530'; -KW2531 : 'KW2531'; -KW2532 : 'KW2532'; -KW2533 : 'KW2533'; -KW2534 : 'KW2534'; -KW2535 : 'KW2535'; -KW2536 : 'KW2536'; -KW2537 : 'KW2537'; -KW2538 : 'KW2538'; -KW2539 : 'KW2539'; -KW2540 : 'KW2540'; -KW2541 : 'KW2541'; -KW2542 : 'KW2542'; -KW2543 : 'KW2543'; -KW2544 : 'KW2544'; -KW2545 : 'KW2545'; -KW2546 : 'KW2546'; -KW2547 : 'KW2547'; -KW2548 : 'KW2548'; -KW2549 : 'KW2549'; -KW2550 : 'KW2550'; -KW2551 : 'KW2551'; -KW2552 : 'KW2552'; -KW2553 : 'KW2553'; -KW2554 : 'KW2554'; -KW2555 : 'KW2555'; -KW2556 : 'KW2556'; -KW2557 : 'KW2557'; -KW2558 : 'KW2558'; -KW2559 : 'KW2559'; -KW2560 : 'KW2560'; -KW2561 : 'KW2561'; -KW2562 : 'KW2562'; -KW2563 : 'KW2563'; -KW2564 : 'KW2564'; -KW2565 : 'KW2565'; -KW2566 : 'KW2566'; -KW2567 : 'KW2567'; -KW2568 : 'KW2568'; -KW2569 : 'KW2569'; -KW2570 : 'KW2570'; -KW2571 : 'KW2571'; -KW2572 : 'KW2572'; -KW2573 : 'KW2573'; -KW2574 : 'KW2574'; -KW2575 : 'KW2575'; -KW2576 : 'KW2576'; -KW2577 : 'KW2577'; -KW2578 : 'KW2578'; -KW2579 : 'KW2579'; -KW2580 : 'KW2580'; -KW2581 : 'KW2581'; -KW2582 : 'KW2582'; -KW2583 : 'KW2583'; -KW2584 : 'KW2584'; -KW2585 : 'KW2585'; -KW2586 : 'KW2586'; -KW2587 : 'KW2587'; -KW2588 : 'KW2588'; -KW2589 : 'KW2589'; -KW2590 : 'KW2590'; -KW2591 : 'KW2591'; -KW2592 : 'KW2592'; -KW2593 : 'KW2593'; -KW2594 : 'KW2594'; -KW2595 : 'KW2595'; -KW2596 : 'KW2596'; -KW2597 : 'KW2597'; -KW2598 : 'KW2598'; -KW2599 : 'KW2599'; -KW2600 : 'KW2600'; -KW2601 : 'KW2601'; -KW2602 : 'KW2602'; -KW2603 : 'KW2603'; -KW2604 : 'KW2604'; -KW2605 : 'KW2605'; -KW2606 : 'KW2606'; -KW2607 : 'KW2607'; -KW2608 : 'KW2608'; -KW2609 : 'KW2609'; -KW2610 : 'KW2610'; -KW2611 : 'KW2611'; -KW2612 : 'KW2612'; -KW2613 : 'KW2613'; -KW2614 : 'KW2614'; -KW2615 : 'KW2615'; -KW2616 : 'KW2616'; -KW2617 : 'KW2617'; -KW2618 : 'KW2618'; -KW2619 : 'KW2619'; -KW2620 : 'KW2620'; -KW2621 : 'KW2621'; -KW2622 : 'KW2622'; -KW2623 : 'KW2623'; -KW2624 : 'KW2624'; -KW2625 : 'KW2625'; -KW2626 : 'KW2626'; -KW2627 : 'KW2627'; -KW2628 : 'KW2628'; -KW2629 : 'KW2629'; -KW2630 : 'KW2630'; -KW2631 : 'KW2631'; -KW2632 : 'KW2632'; -KW2633 : 'KW2633'; -KW2634 : 'KW2634'; -KW2635 : 'KW2635'; -KW2636 : 'KW2636'; -KW2637 : 'KW2637'; -KW2638 : 'KW2638'; -KW2639 : 'KW2639'; -KW2640 : 'KW2640'; -KW2641 : 'KW2641'; -KW2642 : 'KW2642'; -KW2643 : 'KW2643'; -KW2644 : 'KW2644'; -KW2645 : 'KW2645'; -KW2646 : 'KW2646'; -KW2647 : 'KW2647'; -KW2648 : 'KW2648'; -KW2649 : 'KW2649'; -KW2650 : 'KW2650'; -KW2651 : 'KW2651'; -KW2652 : 'KW2652'; -KW2653 : 'KW2653'; -KW2654 : 'KW2654'; -KW2655 : 'KW2655'; -KW2656 : 'KW2656'; -KW2657 : 'KW2657'; -KW2658 : 'KW2658'; -KW2659 : 'KW2659'; -KW2660 : 'KW2660'; -KW2661 : 'KW2661'; -KW2662 : 'KW2662'; -KW2663 : 'KW2663'; -KW2664 : 'KW2664'; -KW2665 : 'KW2665'; -KW2666 : 'KW2666'; -KW2667 : 'KW2667'; -KW2668 : 'KW2668'; -KW2669 : 'KW2669'; -KW2670 : 'KW2670'; -KW2671 : 'KW2671'; -KW2672 : 'KW2672'; -KW2673 : 'KW2673'; -KW2674 : 'KW2674'; -KW2675 : 'KW2675'; -KW2676 : 'KW2676'; -KW2677 : 'KW2677'; -KW2678 : 'KW2678'; -KW2679 : 'KW2679'; -KW2680 : 'KW2680'; -KW2681 : 'KW2681'; -KW2682 : 'KW2682'; -KW2683 : 'KW2683'; -KW2684 : 'KW2684'; -KW2685 : 'KW2685'; -KW2686 : 'KW2686'; -KW2687 : 'KW2687'; -KW2688 : 'KW2688'; -KW2689 : 'KW2689'; -KW2690 : 'KW2690'; -KW2691 : 'KW2691'; -KW2692 : 'KW2692'; -KW2693 : 'KW2693'; -KW2694 : 'KW2694'; -KW2695 : 'KW2695'; -KW2696 : 'KW2696'; -KW2697 : 'KW2697'; -KW2698 : 'KW2698'; -KW2699 : 'KW2699'; -KW2700 : 'KW2700'; -KW2701 : 'KW2701'; -KW2702 : 'KW2702'; -KW2703 : 'KW2703'; -KW2704 : 'KW2704'; -KW2705 : 'KW2705'; -KW2706 : 'KW2706'; -KW2707 : 'KW2707'; -KW2708 : 'KW2708'; -KW2709 : 'KW2709'; -KW2710 : 'KW2710'; -KW2711 : 'KW2711'; -KW2712 : 'KW2712'; -KW2713 : 'KW2713'; -KW2714 : 'KW2714'; -KW2715 : 'KW2715'; -KW2716 : 'KW2716'; -KW2717 : 'KW2717'; -KW2718 : 'KW2718'; -KW2719 : 'KW2719'; -KW2720 : 'KW2720'; -KW2721 : 'KW2721'; -KW2722 : 'KW2722'; -KW2723 : 'KW2723'; -KW2724 : 'KW2724'; -KW2725 : 'KW2725'; -KW2726 : 'KW2726'; -KW2727 : 'KW2727'; -KW2728 : 'KW2728'; -KW2729 : 'KW2729'; -KW2730 : 'KW2730'; -KW2731 : 'KW2731'; -KW2732 : 'KW2732'; -KW2733 : 'KW2733'; -KW2734 : 'KW2734'; -KW2735 : 'KW2735'; -KW2736 : 'KW2736'; -KW2737 : 'KW2737'; -KW2738 : 'KW2738'; -KW2739 : 'KW2739'; -KW2740 : 'KW2740'; -KW2741 : 'KW2741'; -KW2742 : 'KW2742'; -KW2743 : 'KW2743'; -KW2744 : 'KW2744'; -KW2745 : 'KW2745'; -KW2746 : 'KW2746'; -KW2747 : 'KW2747'; -KW2748 : 'KW2748'; -KW2749 : 'KW2749'; -KW2750 : 'KW2750'; -KW2751 : 'KW2751'; -KW2752 : 'KW2752'; -KW2753 : 'KW2753'; -KW2754 : 'KW2754'; -KW2755 : 'KW2755'; -KW2756 : 'KW2756'; -KW2757 : 'KW2757'; -KW2758 : 'KW2758'; -KW2759 : 'KW2759'; -KW2760 : 'KW2760'; -KW2761 : 'KW2761'; -KW2762 : 'KW2762'; -KW2763 : 'KW2763'; -KW2764 : 'KW2764'; -KW2765 : 'KW2765'; -KW2766 : 'KW2766'; -KW2767 : 'KW2767'; -KW2768 : 'KW2768'; -KW2769 : 'KW2769'; -KW2770 : 'KW2770'; -KW2771 : 'KW2771'; -KW2772 : 'KW2772'; -KW2773 : 'KW2773'; -KW2774 : 'KW2774'; -KW2775 : 'KW2775'; -KW2776 : 'KW2776'; -KW2777 : 'KW2777'; -KW2778 : 'KW2778'; -KW2779 : 'KW2779'; -KW2780 : 'KW2780'; -KW2781 : 'KW2781'; -KW2782 : 'KW2782'; -KW2783 : 'KW2783'; -KW2784 : 'KW2784'; -KW2785 : 'KW2785'; -KW2786 : 'KW2786'; -KW2787 : 'KW2787'; -KW2788 : 'KW2788'; -KW2789 : 'KW2789'; -KW2790 : 'KW2790'; -KW2791 : 'KW2791'; -KW2792 : 'KW2792'; -KW2793 : 'KW2793'; -KW2794 : 'KW2794'; -KW2795 : 'KW2795'; -KW2796 : 'KW2796'; -KW2797 : 'KW2797'; -KW2798 : 'KW2798'; -KW2799 : 'KW2799'; -KW2800 : 'KW2800'; -KW2801 : 'KW2801'; -KW2802 : 'KW2802'; -KW2803 : 'KW2803'; -KW2804 : 'KW2804'; -KW2805 : 'KW2805'; -KW2806 : 'KW2806'; -KW2807 : 'KW2807'; -KW2808 : 'KW2808'; -KW2809 : 'KW2809'; -KW2810 : 'KW2810'; -KW2811 : 'KW2811'; -KW2812 : 'KW2812'; -KW2813 : 'KW2813'; -KW2814 : 'KW2814'; -KW2815 : 'KW2815'; -KW2816 : 'KW2816'; -KW2817 : 'KW2817'; -KW2818 : 'KW2818'; -KW2819 : 'KW2819'; -KW2820 : 'KW2820'; -KW2821 : 'KW2821'; -KW2822 : 'KW2822'; -KW2823 : 'KW2823'; -KW2824 : 'KW2824'; -KW2825 : 'KW2825'; -KW2826 : 'KW2826'; -KW2827 : 'KW2827'; -KW2828 : 'KW2828'; -KW2829 : 'KW2829'; -KW2830 : 'KW2830'; -KW2831 : 'KW2831'; -KW2832 : 'KW2832'; -KW2833 : 'KW2833'; -KW2834 : 'KW2834'; -KW2835 : 'KW2835'; -KW2836 : 'KW2836'; -KW2837 : 'KW2837'; -KW2838 : 'KW2838'; -KW2839 : 'KW2839'; -KW2840 : 'KW2840'; -KW2841 : 'KW2841'; -KW2842 : 'KW2842'; -KW2843 : 'KW2843'; -KW2844 : 'KW2844'; -KW2845 : 'KW2845'; -KW2846 : 'KW2846'; -KW2847 : 'KW2847'; -KW2848 : 'KW2848'; -KW2849 : 'KW2849'; -KW2850 : 'KW2850'; -KW2851 : 'KW2851'; -KW2852 : 'KW2852'; -KW2853 : 'KW2853'; -KW2854 : 'KW2854'; -KW2855 : 'KW2855'; -KW2856 : 'KW2856'; -KW2857 : 'KW2857'; -KW2858 : 'KW2858'; -KW2859 : 'KW2859'; -KW2860 : 'KW2860'; -KW2861 : 'KW2861'; -KW2862 : 'KW2862'; -KW2863 : 'KW2863'; -KW2864 : 'KW2864'; -KW2865 : 'KW2865'; -KW2866 : 'KW2866'; -KW2867 : 'KW2867'; -KW2868 : 'KW2868'; -KW2869 : 'KW2869'; -KW2870 : 'KW2870'; -KW2871 : 'KW2871'; -KW2872 : 'KW2872'; -KW2873 : 'KW2873'; -KW2874 : 'KW2874'; -KW2875 : 'KW2875'; -KW2876 : 'KW2876'; -KW2877 : 'KW2877'; -KW2878 : 'KW2878'; -KW2879 : 'KW2879'; -KW2880 : 'KW2880'; -KW2881 : 'KW2881'; -KW2882 : 'KW2882'; -KW2883 : 'KW2883'; -KW2884 : 'KW2884'; -KW2885 : 'KW2885'; -KW2886 : 'KW2886'; -KW2887 : 'KW2887'; -KW2888 : 'KW2888'; -KW2889 : 'KW2889'; -KW2890 : 'KW2890'; -KW2891 : 'KW2891'; -KW2892 : 'KW2892'; -KW2893 : 'KW2893'; -KW2894 : 'KW2894'; -KW2895 : 'KW2895'; -KW2896 : 'KW2896'; -KW2897 : 'KW2897'; -KW2898 : 'KW2898'; -KW2899 : 'KW2899'; -KW2900 : 'KW2900'; -KW2901 : 'KW2901'; -KW2902 : 'KW2902'; -KW2903 : 'KW2903'; -KW2904 : 'KW2904'; -KW2905 : 'KW2905'; -KW2906 : 'KW2906'; -KW2907 : 'KW2907'; -KW2908 : 'KW2908'; -KW2909 : 'KW2909'; -KW2910 : 'KW2910'; -KW2911 : 'KW2911'; -KW2912 : 'KW2912'; -KW2913 : 'KW2913'; -KW2914 : 'KW2914'; -KW2915 : 'KW2915'; -KW2916 : 'KW2916'; -KW2917 : 'KW2917'; -KW2918 : 'KW2918'; -KW2919 : 'KW2919'; -KW2920 : 'KW2920'; -KW2921 : 'KW2921'; -KW2922 : 'KW2922'; -KW2923 : 'KW2923'; -KW2924 : 'KW2924'; -KW2925 : 'KW2925'; -KW2926 : 'KW2926'; -KW2927 : 'KW2927'; -KW2928 : 'KW2928'; -KW2929 : 'KW2929'; -KW2930 : 'KW2930'; -KW2931 : 'KW2931'; -KW2932 : 'KW2932'; -KW2933 : 'KW2933'; -KW2934 : 'KW2934'; -KW2935 : 'KW2935'; -KW2936 : 'KW2936'; -KW2937 : 'KW2937'; -KW2938 : 'KW2938'; -KW2939 : 'KW2939'; -KW2940 : 'KW2940'; -KW2941 : 'KW2941'; -KW2942 : 'KW2942'; -KW2943 : 'KW2943'; -KW2944 : 'KW2944'; -KW2945 : 'KW2945'; -KW2946 : 'KW2946'; -KW2947 : 'KW2947'; -KW2948 : 'KW2948'; -KW2949 : 'KW2949'; -KW2950 : 'KW2950'; -KW2951 : 'KW2951'; -KW2952 : 'KW2952'; -KW2953 : 'KW2953'; -KW2954 : 'KW2954'; -KW2955 : 'KW2955'; -KW2956 : 'KW2956'; -KW2957 : 'KW2957'; -KW2958 : 'KW2958'; -KW2959 : 'KW2959'; -KW2960 : 'KW2960'; -KW2961 : 'KW2961'; -KW2962 : 'KW2962'; -KW2963 : 'KW2963'; -KW2964 : 'KW2964'; -KW2965 : 'KW2965'; -KW2966 : 'KW2966'; -KW2967 : 'KW2967'; -KW2968 : 'KW2968'; -KW2969 : 'KW2969'; -KW2970 : 'KW2970'; -KW2971 : 'KW2971'; -KW2972 : 'KW2972'; -KW2973 : 'KW2973'; -KW2974 : 'KW2974'; -KW2975 : 'KW2975'; -KW2976 : 'KW2976'; -KW2977 : 'KW2977'; -KW2978 : 'KW2978'; -KW2979 : 'KW2979'; -KW2980 : 'KW2980'; -KW2981 : 'KW2981'; -KW2982 : 'KW2982'; -KW2983 : 'KW2983'; -KW2984 : 'KW2984'; -KW2985 : 'KW2985'; -KW2986 : 'KW2986'; -KW2987 : 'KW2987'; -KW2988 : 'KW2988'; -KW2989 : 'KW2989'; -KW2990 : 'KW2990'; -KW2991 : 'KW2991'; -KW2992 : 'KW2992'; -KW2993 : 'KW2993'; -KW2994 : 'KW2994'; -KW2995 : 'KW2995'; -KW2996 : 'KW2996'; -KW2997 : 'KW2997'; -KW2998 : 'KW2998'; -KW2999 : 'KW2999'; -KW3000 : 'KW3000'; -KW3001 : 'KW3001'; -KW3002 : 'KW3002'; -KW3003 : 'KW3003'; -KW3004 : 'KW3004'; -KW3005 : 'KW3005'; -KW3006 : 'KW3006'; -KW3007 : 'KW3007'; -KW3008 : 'KW3008'; -KW3009 : 'KW3009'; -KW3010 : 'KW3010'; -KW3011 : 'KW3011'; -KW3012 : 'KW3012'; -KW3013 : 'KW3013'; -KW3014 : 'KW3014'; -KW3015 : 'KW3015'; -KW3016 : 'KW3016'; -KW3017 : 'KW3017'; -KW3018 : 'KW3018'; -KW3019 : 'KW3019'; -KW3020 : 'KW3020'; -KW3021 : 'KW3021'; -KW3022 : 'KW3022'; -KW3023 : 'KW3023'; -KW3024 : 'KW3024'; -KW3025 : 'KW3025'; -KW3026 : 'KW3026'; -KW3027 : 'KW3027'; -KW3028 : 'KW3028'; -KW3029 : 'KW3029'; -KW3030 : 'KW3030'; -KW3031 : 'KW3031'; -KW3032 : 'KW3032'; -KW3033 : 'KW3033'; -KW3034 : 'KW3034'; -KW3035 : 'KW3035'; -KW3036 : 'KW3036'; -KW3037 : 'KW3037'; -KW3038 : 'KW3038'; -KW3039 : 'KW3039'; -KW3040 : 'KW3040'; -KW3041 : 'KW3041'; -KW3042 : 'KW3042'; -KW3043 : 'KW3043'; -KW3044 : 'KW3044'; -KW3045 : 'KW3045'; -KW3046 : 'KW3046'; -KW3047 : 'KW3047'; -KW3048 : 'KW3048'; -KW3049 : 'KW3049'; -KW3050 : 'KW3050'; -KW3051 : 'KW3051'; -KW3052 : 'KW3052'; -KW3053 : 'KW3053'; -KW3054 : 'KW3054'; -KW3055 : 'KW3055'; -KW3056 : 'KW3056'; -KW3057 : 'KW3057'; -KW3058 : 'KW3058'; -KW3059 : 'KW3059'; -KW3060 : 'KW3060'; -KW3061 : 'KW3061'; -KW3062 : 'KW3062'; -KW3063 : 'KW3063'; -KW3064 : 'KW3064'; -KW3065 : 'KW3065'; -KW3066 : 'KW3066'; -KW3067 : 'KW3067'; -KW3068 : 'KW3068'; -KW3069 : 'KW3069'; -KW3070 : 'KW3070'; -KW3071 : 'KW3071'; -KW3072 : 'KW3072'; -KW3073 : 'KW3073'; -KW3074 : 'KW3074'; -KW3075 : 'KW3075'; -KW3076 : 'KW3076'; -KW3077 : 'KW3077'; -KW3078 : 'KW3078'; -KW3079 : 'KW3079'; -KW3080 : 'KW3080'; -KW3081 : 'KW3081'; -KW3082 : 'KW3082'; -KW3083 : 'KW3083'; -KW3084 : 'KW3084'; -KW3085 : 'KW3085'; -KW3086 : 'KW3086'; -KW3087 : 'KW3087'; -KW3088 : 'KW3088'; -KW3089 : 'KW3089'; -KW3090 : 'KW3090'; -KW3091 : 'KW3091'; -KW3092 : 'KW3092'; -KW3093 : 'KW3093'; -KW3094 : 'KW3094'; -KW3095 : 'KW3095'; -KW3096 : 'KW3096'; -KW3097 : 'KW3097'; -KW3098 : 'KW3098'; -KW3099 : 'KW3099'; -KW3100 : 'KW3100'; -KW3101 : 'KW3101'; -KW3102 : 'KW3102'; -KW3103 : 'KW3103'; -KW3104 : 'KW3104'; -KW3105 : 'KW3105'; -KW3106 : 'KW3106'; -KW3107 : 'KW3107'; -KW3108 : 'KW3108'; -KW3109 : 'KW3109'; -KW3110 : 'KW3110'; -KW3111 : 'KW3111'; -KW3112 : 'KW3112'; -KW3113 : 'KW3113'; -KW3114 : 'KW3114'; -KW3115 : 'KW3115'; -KW3116 : 'KW3116'; -KW3117 : 'KW3117'; -KW3118 : 'KW3118'; -KW3119 : 'KW3119'; -KW3120 : 'KW3120'; -KW3121 : 'KW3121'; -KW3122 : 'KW3122'; -KW3123 : 'KW3123'; -KW3124 : 'KW3124'; -KW3125 : 'KW3125'; -KW3126 : 'KW3126'; -KW3127 : 'KW3127'; -KW3128 : 'KW3128'; -KW3129 : 'KW3129'; -KW3130 : 'KW3130'; -KW3131 : 'KW3131'; -KW3132 : 'KW3132'; -KW3133 : 'KW3133'; -KW3134 : 'KW3134'; -KW3135 : 'KW3135'; -KW3136 : 'KW3136'; -KW3137 : 'KW3137'; -KW3138 : 'KW3138'; -KW3139 : 'KW3139'; -KW3140 : 'KW3140'; -KW3141 : 'KW3141'; -KW3142 : 'KW3142'; -KW3143 : 'KW3143'; -KW3144 : 'KW3144'; -KW3145 : 'KW3145'; -KW3146 : 'KW3146'; -KW3147 : 'KW3147'; -KW3148 : 'KW3148'; -KW3149 : 'KW3149'; -KW3150 : 'KW3150'; -KW3151 : 'KW3151'; -KW3152 : 'KW3152'; -KW3153 : 'KW3153'; -KW3154 : 'KW3154'; -KW3155 : 'KW3155'; -KW3156 : 'KW3156'; -KW3157 : 'KW3157'; -KW3158 : 'KW3158'; -KW3159 : 'KW3159'; -KW3160 : 'KW3160'; -KW3161 : 'KW3161'; -KW3162 : 'KW3162'; -KW3163 : 'KW3163'; -KW3164 : 'KW3164'; -KW3165 : 'KW3165'; -KW3166 : 'KW3166'; -KW3167 : 'KW3167'; -KW3168 : 'KW3168'; -KW3169 : 'KW3169'; -KW3170 : 'KW3170'; -KW3171 : 'KW3171'; -KW3172 : 'KW3172'; -KW3173 : 'KW3173'; -KW3174 : 'KW3174'; -KW3175 : 'KW3175'; -KW3176 : 'KW3176'; -KW3177 : 'KW3177'; -KW3178 : 'KW3178'; -KW3179 : 'KW3179'; -KW3180 : 'KW3180'; -KW3181 : 'KW3181'; -KW3182 : 'KW3182'; -KW3183 : 'KW3183'; -KW3184 : 'KW3184'; -KW3185 : 'KW3185'; -KW3186 : 'KW3186'; -KW3187 : 'KW3187'; -KW3188 : 'KW3188'; -KW3189 : 'KW3189'; -KW3190 : 'KW3190'; -KW3191 : 'KW3191'; -KW3192 : 'KW3192'; -KW3193 : 'KW3193'; -KW3194 : 'KW3194'; -KW3195 : 'KW3195'; -KW3196 : 'KW3196'; -KW3197 : 'KW3197'; -KW3198 : 'KW3198'; -KW3199 : 'KW3199'; -KW3200 : 'KW3200'; -KW3201 : 'KW3201'; -KW3202 : 'KW3202'; -KW3203 : 'KW3203'; -KW3204 : 'KW3204'; -KW3205 : 'KW3205'; -KW3206 : 'KW3206'; -KW3207 : 'KW3207'; -KW3208 : 'KW3208'; -KW3209 : 'KW3209'; -KW3210 : 'KW3210'; -KW3211 : 'KW3211'; -KW3212 : 'KW3212'; -KW3213 : 'KW3213'; -KW3214 : 'KW3214'; -KW3215 : 'KW3215'; -KW3216 : 'KW3216'; -KW3217 : 'KW3217'; -KW3218 : 'KW3218'; -KW3219 : 'KW3219'; -KW3220 : 'KW3220'; -KW3221 : 'KW3221'; -KW3222 : 'KW3222'; -KW3223 : 'KW3223'; -KW3224 : 'KW3224'; -KW3225 : 'KW3225'; -KW3226 : 'KW3226'; -KW3227 : 'KW3227'; -KW3228 : 'KW3228'; -KW3229 : 'KW3229'; -KW3230 : 'KW3230'; -KW3231 : 'KW3231'; -KW3232 : 'KW3232'; -KW3233 : 'KW3233'; -KW3234 : 'KW3234'; -KW3235 : 'KW3235'; -KW3236 : 'KW3236'; -KW3237 : 'KW3237'; -KW3238 : 'KW3238'; -KW3239 : 'KW3239'; -KW3240 : 'KW3240'; -KW3241 : 'KW3241'; -KW3242 : 'KW3242'; -KW3243 : 'KW3243'; -KW3244 : 'KW3244'; -KW3245 : 'KW3245'; -KW3246 : 'KW3246'; -KW3247 : 'KW3247'; -KW3248 : 'KW3248'; -KW3249 : 'KW3249'; -KW3250 : 'KW3250'; -KW3251 : 'KW3251'; -KW3252 : 'KW3252'; -KW3253 : 'KW3253'; -KW3254 : 'KW3254'; -KW3255 : 'KW3255'; -KW3256 : 'KW3256'; -KW3257 : 'KW3257'; -KW3258 : 'KW3258'; -KW3259 : 'KW3259'; -KW3260 : 'KW3260'; -KW3261 : 'KW3261'; -KW3262 : 'KW3262'; -KW3263 : 'KW3263'; -KW3264 : 'KW3264'; -KW3265 : 'KW3265'; -KW3266 : 'KW3266'; -KW3267 : 'KW3267'; -KW3268 : 'KW3268'; -KW3269 : 'KW3269'; -KW3270 : 'KW3270'; -KW3271 : 'KW3271'; -KW3272 : 'KW3272'; -KW3273 : 'KW3273'; -KW3274 : 'KW3274'; -KW3275 : 'KW3275'; -KW3276 : 'KW3276'; -KW3277 : 'KW3277'; -KW3278 : 'KW3278'; -KW3279 : 'KW3279'; -KW3280 : 'KW3280'; -KW3281 : 'KW3281'; -KW3282 : 'KW3282'; -KW3283 : 'KW3283'; -KW3284 : 'KW3284'; -KW3285 : 'KW3285'; -KW3286 : 'KW3286'; -KW3287 : 'KW3287'; -KW3288 : 'KW3288'; -KW3289 : 'KW3289'; -KW3290 : 'KW3290'; -KW3291 : 'KW3291'; -KW3292 : 'KW3292'; -KW3293 : 'KW3293'; -KW3294 : 'KW3294'; -KW3295 : 'KW3295'; -KW3296 : 'KW3296'; -KW3297 : 'KW3297'; -KW3298 : 'KW3298'; -KW3299 : 'KW3299'; -KW3300 : 'KW3300'; -KW3301 : 'KW3301'; -KW3302 : 'KW3302'; -KW3303 : 'KW3303'; -KW3304 : 'KW3304'; -KW3305 : 'KW3305'; -KW3306 : 'KW3306'; -KW3307 : 'KW3307'; -KW3308 : 'KW3308'; -KW3309 : 'KW3309'; -KW3310 : 'KW3310'; -KW3311 : 'KW3311'; -KW3312 : 'KW3312'; -KW3313 : 'KW3313'; -KW3314 : 'KW3314'; -KW3315 : 'KW3315'; -KW3316 : 'KW3316'; -KW3317 : 'KW3317'; -KW3318 : 'KW3318'; -KW3319 : 'KW3319'; -KW3320 : 'KW3320'; -KW3321 : 'KW3321'; -KW3322 : 'KW3322'; -KW3323 : 'KW3323'; -KW3324 : 'KW3324'; -KW3325 : 'KW3325'; -KW3326 : 'KW3326'; -KW3327 : 'KW3327'; -KW3328 : 'KW3328'; -KW3329 : 'KW3329'; -KW3330 : 'KW3330'; -KW3331 : 'KW3331'; -KW3332 : 'KW3332'; -KW3333 : 'KW3333'; -KW3334 : 'KW3334'; -KW3335 : 'KW3335'; -KW3336 : 'KW3336'; -KW3337 : 'KW3337'; -KW3338 : 'KW3338'; -KW3339 : 'KW3339'; -KW3340 : 'KW3340'; -KW3341 : 'KW3341'; -KW3342 : 'KW3342'; -KW3343 : 'KW3343'; -KW3344 : 'KW3344'; -KW3345 : 'KW3345'; -KW3346 : 'KW3346'; -KW3347 : 'KW3347'; -KW3348 : 'KW3348'; -KW3349 : 'KW3349'; -KW3350 : 'KW3350'; -KW3351 : 'KW3351'; -KW3352 : 'KW3352'; -KW3353 : 'KW3353'; -KW3354 : 'KW3354'; -KW3355 : 'KW3355'; -KW3356 : 'KW3356'; -KW3357 : 'KW3357'; -KW3358 : 'KW3358'; -KW3359 : 'KW3359'; -KW3360 : 'KW3360'; -KW3361 : 'KW3361'; -KW3362 : 'KW3362'; -KW3363 : 'KW3363'; -KW3364 : 'KW3364'; -KW3365 : 'KW3365'; -KW3366 : 'KW3366'; -KW3367 : 'KW3367'; -KW3368 : 'KW3368'; -KW3369 : 'KW3369'; -KW3370 : 'KW3370'; -KW3371 : 'KW3371'; -KW3372 : 'KW3372'; -KW3373 : 'KW3373'; -KW3374 : 'KW3374'; -KW3375 : 'KW3375'; -KW3376 : 'KW3376'; -KW3377 : 'KW3377'; -KW3378 : 'KW3378'; -KW3379 : 'KW3379'; -KW3380 : 'KW3380'; -KW3381 : 'KW3381'; -KW3382 : 'KW3382'; -KW3383 : 'KW3383'; -KW3384 : 'KW3384'; -KW3385 : 'KW3385'; -KW3386 : 'KW3386'; -KW3387 : 'KW3387'; -KW3388 : 'KW3388'; -KW3389 : 'KW3389'; -KW3390 : 'KW3390'; -KW3391 : 'KW3391'; -KW3392 : 'KW3392'; -KW3393 : 'KW3393'; -KW3394 : 'KW3394'; -KW3395 : 'KW3395'; -KW3396 : 'KW3396'; -KW3397 : 'KW3397'; -KW3398 : 'KW3398'; -KW3399 : 'KW3399'; -KW3400 : 'KW3400'; -KW3401 : 'KW3401'; -KW3402 : 'KW3402'; -KW3403 : 'KW3403'; -KW3404 : 'KW3404'; -KW3405 : 'KW3405'; -KW3406 : 'KW3406'; -KW3407 : 'KW3407'; -KW3408 : 'KW3408'; -KW3409 : 'KW3409'; -KW3410 : 'KW3410'; -KW3411 : 'KW3411'; -KW3412 : 'KW3412'; -KW3413 : 'KW3413'; -KW3414 : 'KW3414'; -KW3415 : 'KW3415'; -KW3416 : 'KW3416'; -KW3417 : 'KW3417'; -KW3418 : 'KW3418'; -KW3419 : 'KW3419'; -KW3420 : 'KW3420'; -KW3421 : 'KW3421'; -KW3422 : 'KW3422'; -KW3423 : 'KW3423'; -KW3424 : 'KW3424'; -KW3425 : 'KW3425'; -KW3426 : 'KW3426'; -KW3427 : 'KW3427'; -KW3428 : 'KW3428'; -KW3429 : 'KW3429'; -KW3430 : 'KW3430'; -KW3431 : 'KW3431'; -KW3432 : 'KW3432'; -KW3433 : 'KW3433'; -KW3434 : 'KW3434'; -KW3435 : 'KW3435'; -KW3436 : 'KW3436'; -KW3437 : 'KW3437'; -KW3438 : 'KW3438'; -KW3439 : 'KW3439'; -KW3440 : 'KW3440'; -KW3441 : 'KW3441'; -KW3442 : 'KW3442'; -KW3443 : 'KW3443'; -KW3444 : 'KW3444'; -KW3445 : 'KW3445'; -KW3446 : 'KW3446'; -KW3447 : 'KW3447'; -KW3448 : 'KW3448'; -KW3449 : 'KW3449'; -KW3450 : 'KW3450'; -KW3451 : 'KW3451'; -KW3452 : 'KW3452'; -KW3453 : 'KW3453'; -KW3454 : 'KW3454'; -KW3455 : 'KW3455'; -KW3456 : 'KW3456'; -KW3457 : 'KW3457'; -KW3458 : 'KW3458'; -KW3459 : 'KW3459'; -KW3460 : 'KW3460'; -KW3461 : 'KW3461'; -KW3462 : 'KW3462'; -KW3463 : 'KW3463'; -KW3464 : 'KW3464'; -KW3465 : 'KW3465'; -KW3466 : 'KW3466'; -KW3467 : 'KW3467'; -KW3468 : 'KW3468'; -KW3469 : 'KW3469'; -KW3470 : 'KW3470'; -KW3471 : 'KW3471'; -KW3472 : 'KW3472'; -KW3473 : 'KW3473'; -KW3474 : 'KW3474'; -KW3475 : 'KW3475'; -KW3476 : 'KW3476'; -KW3477 : 'KW3477'; -KW3478 : 'KW3478'; -KW3479 : 'KW3479'; -KW3480 : 'KW3480'; -KW3481 : 'KW3481'; -KW3482 : 'KW3482'; -KW3483 : 'KW3483'; -KW3484 : 'KW3484'; -KW3485 : 'KW3485'; -KW3486 : 'KW3486'; -KW3487 : 'KW3487'; -KW3488 : 'KW3488'; -KW3489 : 'KW3489'; -KW3490 : 'KW3490'; -KW3491 : 'KW3491'; -KW3492 : 'KW3492'; -KW3493 : 'KW3493'; -KW3494 : 'KW3494'; -KW3495 : 'KW3495'; -KW3496 : 'KW3496'; -KW3497 : 'KW3497'; -KW3498 : 'KW3498'; -KW3499 : 'KW3499'; -KW3500 : 'KW3500'; -KW3501 : 'KW3501'; -KW3502 : 'KW3502'; -KW3503 : 'KW3503'; -KW3504 : 'KW3504'; -KW3505 : 'KW3505'; -KW3506 : 'KW3506'; -KW3507 : 'KW3507'; -KW3508 : 'KW3508'; -KW3509 : 'KW3509'; -KW3510 : 'KW3510'; -KW3511 : 'KW3511'; -KW3512 : 'KW3512'; -KW3513 : 'KW3513'; -KW3514 : 'KW3514'; -KW3515 : 'KW3515'; -KW3516 : 'KW3516'; -KW3517 : 'KW3517'; -KW3518 : 'KW3518'; -KW3519 : 'KW3519'; -KW3520 : 'KW3520'; -KW3521 : 'KW3521'; -KW3522 : 'KW3522'; -KW3523 : 'KW3523'; -KW3524 : 'KW3524'; -KW3525 : 'KW3525'; -KW3526 : 'KW3526'; -KW3527 : 'KW3527'; -KW3528 : 'KW3528'; -KW3529 : 'KW3529'; -KW3530 : 'KW3530'; -KW3531 : 'KW3531'; -KW3532 : 'KW3532'; -KW3533 : 'KW3533'; -KW3534 : 'KW3534'; -KW3535 : 'KW3535'; -KW3536 : 'KW3536'; -KW3537 : 'KW3537'; -KW3538 : 'KW3538'; -KW3539 : 'KW3539'; -KW3540 : 'KW3540'; -KW3541 : 'KW3541'; -KW3542 : 'KW3542'; -KW3543 : 'KW3543'; -KW3544 : 'KW3544'; -KW3545 : 'KW3545'; -KW3546 : 'KW3546'; -KW3547 : 'KW3547'; -KW3548 : 'KW3548'; -KW3549 : 'KW3549'; -KW3550 : 'KW3550'; -KW3551 : 'KW3551'; -KW3552 : 'KW3552'; -KW3553 : 'KW3553'; -KW3554 : 'KW3554'; -KW3555 : 'KW3555'; -KW3556 : 'KW3556'; -KW3557 : 'KW3557'; -KW3558 : 'KW3558'; -KW3559 : 'KW3559'; -KW3560 : 'KW3560'; -KW3561 : 'KW3561'; -KW3562 : 'KW3562'; -KW3563 : 'KW3563'; -KW3564 : 'KW3564'; -KW3565 : 'KW3565'; -KW3566 : 'KW3566'; -KW3567 : 'KW3567'; -KW3568 : 'KW3568'; -KW3569 : 'KW3569'; -KW3570 : 'KW3570'; -KW3571 : 'KW3571'; -KW3572 : 'KW3572'; -KW3573 : 'KW3573'; -KW3574 : 'KW3574'; -KW3575 : 'KW3575'; -KW3576 : 'KW3576'; -KW3577 : 'KW3577'; -KW3578 : 'KW3578'; -KW3579 : 'KW3579'; -KW3580 : 'KW3580'; -KW3581 : 'KW3581'; -KW3582 : 'KW3582'; -KW3583 : 'KW3583'; -KW3584 : 'KW3584'; -KW3585 : 'KW3585'; -KW3586 : 'KW3586'; -KW3587 : 'KW3587'; -KW3588 : 'KW3588'; -KW3589 : 'KW3589'; -KW3590 : 'KW3590'; -KW3591 : 'KW3591'; -KW3592 : 'KW3592'; -KW3593 : 'KW3593'; -KW3594 : 'KW3594'; -KW3595 : 'KW3595'; -KW3596 : 'KW3596'; -KW3597 : 'KW3597'; -KW3598 : 'KW3598'; -KW3599 : 'KW3599'; -KW3600 : 'KW3600'; -KW3601 : 'KW3601'; -KW3602 : 'KW3602'; -KW3603 : 'KW3603'; -KW3604 : 'KW3604'; -KW3605 : 'KW3605'; -KW3606 : 'KW3606'; -KW3607 : 'KW3607'; -KW3608 : 'KW3608'; -KW3609 : 'KW3609'; -KW3610 : 'KW3610'; -KW3611 : 'KW3611'; -KW3612 : 'KW3612'; -KW3613 : 'KW3613'; -KW3614 : 'KW3614'; -KW3615 : 'KW3615'; -KW3616 : 'KW3616'; -KW3617 : 'KW3617'; -KW3618 : 'KW3618'; -KW3619 : 'KW3619'; -KW3620 : 'KW3620'; -KW3621 : 'KW3621'; -KW3622 : 'KW3622'; -KW3623 : 'KW3623'; -KW3624 : 'KW3624'; -KW3625 : 'KW3625'; -KW3626 : 'KW3626'; -KW3627 : 'KW3627'; -KW3628 : 'KW3628'; -KW3629 : 'KW3629'; -KW3630 : 'KW3630'; -KW3631 : 'KW3631'; -KW3632 : 'KW3632'; -KW3633 : 'KW3633'; -KW3634 : 'KW3634'; -KW3635 : 'KW3635'; -KW3636 : 'KW3636'; -KW3637 : 'KW3637'; -KW3638 : 'KW3638'; -KW3639 : 'KW3639'; -KW3640 : 'KW3640'; -KW3641 : 'KW3641'; -KW3642 : 'KW3642'; -KW3643 : 'KW3643'; -KW3644 : 'KW3644'; -KW3645 : 'KW3645'; -KW3646 : 'KW3646'; -KW3647 : 'KW3647'; -KW3648 : 'KW3648'; -KW3649 : 'KW3649'; -KW3650 : 'KW3650'; -KW3651 : 'KW3651'; -KW3652 : 'KW3652'; -KW3653 : 'KW3653'; -KW3654 : 'KW3654'; -KW3655 : 'KW3655'; -KW3656 : 'KW3656'; -KW3657 : 'KW3657'; -KW3658 : 'KW3658'; -KW3659 : 'KW3659'; -KW3660 : 'KW3660'; -KW3661 : 'KW3661'; -KW3662 : 'KW3662'; -KW3663 : 'KW3663'; -KW3664 : 'KW3664'; -KW3665 : 'KW3665'; -KW3666 : 'KW3666'; -KW3667 : 'KW3667'; -KW3668 : 'KW3668'; -KW3669 : 'KW3669'; -KW3670 : 'KW3670'; -KW3671 : 'KW3671'; -KW3672 : 'KW3672'; -KW3673 : 'KW3673'; -KW3674 : 'KW3674'; -KW3675 : 'KW3675'; -KW3676 : 'KW3676'; -KW3677 : 'KW3677'; -KW3678 : 'KW3678'; -KW3679 : 'KW3679'; -KW3680 : 'KW3680'; -KW3681 : 'KW3681'; -KW3682 : 'KW3682'; -KW3683 : 'KW3683'; -KW3684 : 'KW3684'; -KW3685 : 'KW3685'; -KW3686 : 'KW3686'; -KW3687 : 'KW3687'; -KW3688 : 'KW3688'; -KW3689 : 'KW3689'; -KW3690 : 'KW3690'; -KW3691 : 'KW3691'; -KW3692 : 'KW3692'; -KW3693 : 'KW3693'; -KW3694 : 'KW3694'; -KW3695 : 'KW3695'; -KW3696 : 'KW3696'; -KW3697 : 'KW3697'; -KW3698 : 'KW3698'; -KW3699 : 'KW3699'; -KW3700 : 'KW3700'; -KW3701 : 'KW3701'; -KW3702 : 'KW3702'; -KW3703 : 'KW3703'; -KW3704 : 'KW3704'; -KW3705 : 'KW3705'; -KW3706 : 'KW3706'; -KW3707 : 'KW3707'; -KW3708 : 'KW3708'; -KW3709 : 'KW3709'; -KW3710 : 'KW3710'; -KW3711 : 'KW3711'; -KW3712 : 'KW3712'; -KW3713 : 'KW3713'; -KW3714 : 'KW3714'; -KW3715 : 'KW3715'; -KW3716 : 'KW3716'; -KW3717 : 'KW3717'; -KW3718 : 'KW3718'; -KW3719 : 'KW3719'; -KW3720 : 'KW3720'; -KW3721 : 'KW3721'; -KW3722 : 'KW3722'; -KW3723 : 'KW3723'; -KW3724 : 'KW3724'; -KW3725 : 'KW3725'; -KW3726 : 'KW3726'; -KW3727 : 'KW3727'; -KW3728 : 'KW3728'; -KW3729 : 'KW3729'; -KW3730 : 'KW3730'; -KW3731 : 'KW3731'; -KW3732 : 'KW3732'; -KW3733 : 'KW3733'; -KW3734 : 'KW3734'; -KW3735 : 'KW3735'; -KW3736 : 'KW3736'; -KW3737 : 'KW3737'; -KW3738 : 'KW3738'; -KW3739 : 'KW3739'; -KW3740 : 'KW3740'; -KW3741 : 'KW3741'; -KW3742 : 'KW3742'; -KW3743 : 'KW3743'; -KW3744 : 'KW3744'; -KW3745 : 'KW3745'; -KW3746 : 'KW3746'; -KW3747 : 'KW3747'; -KW3748 : 'KW3748'; -KW3749 : 'KW3749'; -KW3750 : 'KW3750'; -KW3751 : 'KW3751'; -KW3752 : 'KW3752'; -KW3753 : 'KW3753'; -KW3754 : 'KW3754'; -KW3755 : 'KW3755'; -KW3756 : 'KW3756'; -KW3757 : 'KW3757'; -KW3758 : 'KW3758'; -KW3759 : 'KW3759'; -KW3760 : 'KW3760'; -KW3761 : 'KW3761'; -KW3762 : 'KW3762'; -KW3763 : 'KW3763'; -KW3764 : 'KW3764'; -KW3765 : 'KW3765'; -KW3766 : 'KW3766'; -KW3767 : 'KW3767'; -KW3768 : 'KW3768'; -KW3769 : 'KW3769'; -KW3770 : 'KW3770'; -KW3771 : 'KW3771'; -KW3772 : 'KW3772'; -KW3773 : 'KW3773'; -KW3774 : 'KW3774'; -KW3775 : 'KW3775'; -KW3776 : 'KW3776'; -KW3777 : 'KW3777'; -KW3778 : 'KW3778'; -KW3779 : 'KW3779'; -KW3780 : 'KW3780'; -KW3781 : 'KW3781'; -KW3782 : 'KW3782'; -KW3783 : 'KW3783'; -KW3784 : 'KW3784'; -KW3785 : 'KW3785'; -KW3786 : 'KW3786'; -KW3787 : 'KW3787'; -KW3788 : 'KW3788'; -KW3789 : 'KW3789'; -KW3790 : 'KW3790'; -KW3791 : 'KW3791'; -KW3792 : 'KW3792'; -KW3793 : 'KW3793'; -KW3794 : 'KW3794'; -KW3795 : 'KW3795'; -KW3796 : 'KW3796'; -KW3797 : 'KW3797'; -KW3798 : 'KW3798'; -KW3799 : 'KW3799'; -KW3800 : 'KW3800'; -KW3801 : 'KW3801'; -KW3802 : 'KW3802'; -KW3803 : 'KW3803'; -KW3804 : 'KW3804'; -KW3805 : 'KW3805'; -KW3806 : 'KW3806'; -KW3807 : 'KW3807'; -KW3808 : 'KW3808'; -KW3809 : 'KW3809'; -KW3810 : 'KW3810'; -KW3811 : 'KW3811'; -KW3812 : 'KW3812'; -KW3813 : 'KW3813'; -KW3814 : 'KW3814'; -KW3815 : 'KW3815'; -KW3816 : 'KW3816'; -KW3817 : 'KW3817'; -KW3818 : 'KW3818'; -KW3819 : 'KW3819'; -KW3820 : 'KW3820'; -KW3821 : 'KW3821'; -KW3822 : 'KW3822'; -KW3823 : 'KW3823'; -KW3824 : 'KW3824'; -KW3825 : 'KW3825'; -KW3826 : 'KW3826'; -KW3827 : 'KW3827'; -KW3828 : 'KW3828'; -KW3829 : 'KW3829'; -KW3830 : 'KW3830'; -KW3831 : 'KW3831'; -KW3832 : 'KW3832'; -KW3833 : 'KW3833'; -KW3834 : 'KW3834'; -KW3835 : 'KW3835'; -KW3836 : 'KW3836'; -KW3837 : 'KW3837'; -KW3838 : 'KW3838'; -KW3839 : 'KW3839'; -KW3840 : 'KW3840'; -KW3841 : 'KW3841'; -KW3842 : 'KW3842'; -KW3843 : 'KW3843'; -KW3844 : 'KW3844'; -KW3845 : 'KW3845'; -KW3846 : 'KW3846'; -KW3847 : 'KW3847'; -KW3848 : 'KW3848'; -KW3849 : 'KW3849'; -KW3850 : 'KW3850'; -KW3851 : 'KW3851'; -KW3852 : 'KW3852'; -KW3853 : 'KW3853'; -KW3854 : 'KW3854'; -KW3855 : 'KW3855'; -KW3856 : 'KW3856'; -KW3857 : 'KW3857'; -KW3858 : 'KW3858'; -KW3859 : 'KW3859'; -KW3860 : 'KW3860'; -KW3861 : 'KW3861'; -KW3862 : 'KW3862'; -KW3863 : 'KW3863'; -KW3864 : 'KW3864'; -KW3865 : 'KW3865'; -KW3866 : 'KW3866'; -KW3867 : 'KW3867'; -KW3868 : 'KW3868'; -KW3869 : 'KW3869'; -KW3870 : 'KW3870'; -KW3871 : 'KW3871'; -KW3872 : 'KW3872'; -KW3873 : 'KW3873'; -KW3874 : 'KW3874'; -KW3875 : 'KW3875'; -KW3876 : 'KW3876'; -KW3877 : 'KW3877'; -KW3878 : 'KW3878'; -KW3879 : 'KW3879'; -KW3880 : 'KW3880'; -KW3881 : 'KW3881'; -KW3882 : 'KW3882'; -KW3883 : 'KW3883'; -KW3884 : 'KW3884'; -KW3885 : 'KW3885'; -KW3886 : 'KW3886'; -KW3887 : 'KW3887'; -KW3888 : 'KW3888'; -KW3889 : 'KW3889'; -KW3890 : 'KW3890'; -KW3891 : 'KW3891'; -KW3892 : 'KW3892'; -KW3893 : 'KW3893'; -KW3894 : 'KW3894'; -KW3895 : 'KW3895'; -KW3896 : 'KW3896'; -KW3897 : 'KW3897'; -KW3898 : 'KW3898'; -KW3899 : 'KW3899'; -KW3900 : 'KW3900'; -KW3901 : 'KW3901'; -KW3902 : 'KW3902'; -KW3903 : 'KW3903'; -KW3904 : 'KW3904'; -KW3905 : 'KW3905'; -KW3906 : 'KW3906'; -KW3907 : 'KW3907'; -KW3908 : 'KW3908'; -KW3909 : 'KW3909'; -KW3910 : 'KW3910'; -KW3911 : 'KW3911'; -KW3912 : 'KW3912'; -KW3913 : 'KW3913'; -KW3914 : 'KW3914'; -KW3915 : 'KW3915'; -KW3916 : 'KW3916'; -KW3917 : 'KW3917'; -KW3918 : 'KW3918'; -KW3919 : 'KW3919'; -KW3920 : 'KW3920'; -KW3921 : 'KW3921'; -KW3922 : 'KW3922'; -KW3923 : 'KW3923'; -KW3924 : 'KW3924'; -KW3925 : 'KW3925'; -KW3926 : 'KW3926'; -KW3927 : 'KW3927'; -KW3928 : 'KW3928'; -KW3929 : 'KW3929'; -KW3930 : 'KW3930'; -KW3931 : 'KW3931'; -KW3932 : 'KW3932'; -KW3933 : 'KW3933'; -KW3934 : 'KW3934'; -KW3935 : 'KW3935'; -KW3936 : 'KW3936'; -KW3937 : 'KW3937'; -KW3938 : 'KW3938'; -KW3939 : 'KW3939'; -KW3940 : 'KW3940'; -KW3941 : 'KW3941'; -KW3942 : 'KW3942'; -KW3943 : 'KW3943'; -KW3944 : 'KW3944'; -KW3945 : 'KW3945'; -KW3946 : 'KW3946'; -KW3947 : 'KW3947'; -KW3948 : 'KW3948'; -KW3949 : 'KW3949'; -KW3950 : 'KW3950'; -KW3951 : 'KW3951'; -KW3952 : 'KW3952'; -KW3953 : 'KW3953'; -KW3954 : 'KW3954'; -KW3955 : 'KW3955'; -KW3956 : 'KW3956'; -KW3957 : 'KW3957'; -KW3958 : 'KW3958'; -KW3959 : 'KW3959'; -KW3960 : 'KW3960'; -KW3961 : 'KW3961'; -KW3962 : 'KW3962'; -KW3963 : 'KW3963'; -KW3964 : 'KW3964'; -KW3965 : 'KW3965'; -KW3966 : 'KW3966'; -KW3967 : 'KW3967'; -KW3968 : 'KW3968'; -KW3969 : 'KW3969'; -KW3970 : 'KW3970'; -KW3971 : 'KW3971'; -KW3972 : 'KW3972'; -KW3973 : 'KW3973'; -KW3974 : 'KW3974'; -KW3975 : 'KW3975'; -KW3976 : 'KW3976'; -KW3977 : 'KW3977'; -KW3978 : 'KW3978'; -KW3979 : 'KW3979'; -KW3980 : 'KW3980'; -KW3981 : 'KW3981'; -KW3982 : 'KW3982'; -KW3983 : 'KW3983'; -KW3984 : 'KW3984'; -KW3985 : 'KW3985'; -KW3986 : 'KW3986'; -KW3987 : 'KW3987'; -KW3988 : 'KW3988'; -KW3989 : 'KW3989'; -KW3990 : 'KW3990'; -KW3991 : 'KW3991'; -KW3992 : 'KW3992'; -KW3993 : 'KW3993'; -KW3994 : 'KW3994'; -KW3995 : 'KW3995'; -KW3996 : 'KW3996'; -KW3997 : 'KW3997'; -KW3998 : 'KW3998'; -KW3999 : 'KW3999'; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyConfigs.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyConfigs.st deleted file mode 100644 index 2bf9aa18e..000000000 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyConfigs.st +++ /dev/null @@ -1,4 +0,0 @@ -lexer grammar ; -I : .*? ('a' | 'ab') {} ; -WS : (' '|'\n') -> skip ; -J : . {}; diff --git a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyTermination.st b/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyTermination.st deleted file mode 100644 index 616d0d4d1..000000000 --- a/tool/test/org/antlr/v4/testgen/grammars/LexerExec/NonGreedyTermination.st +++ /dev/null @@ -1,2 +0,0 @@ -lexer grammar ; -STRING : '\"' ('\"\"' | .)*? '\"'; diff --git a/tool/test/org/antlr/v4/test/BaseTest.java b/tool/test/org/antlr/v4/xtest/BaseTest.java similarity index 99% rename from tool/test/org/antlr/v4/test/BaseTest.java rename to tool/test/org/antlr/v4/xtest/BaseTest.java index 0fafb0c72..6db87bea7 100644 --- a/tool/test/org/antlr/v4/test/BaseTest.java +++ b/tool/test/org/antlr/v4/xtest/BaseTest.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.Tool; import org.antlr.v4.automata.ATNFactory; diff --git a/tool/test/org/antlr/v4/test/ErrorQueue.java b/tool/test/org/antlr/v4/xtest/ErrorQueue.java similarity index 99% rename from tool/test/org/antlr/v4/test/ErrorQueue.java rename to tool/test/org/antlr/v4/xtest/ErrorQueue.java index 91295bace..8cc5aba04 100644 --- a/tool/test/org/antlr/v4/test/ErrorQueue.java +++ b/tool/test/org/antlr/v4/xtest/ErrorQueue.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.Tool; import org.antlr.v4.runtime.misc.Utils; diff --git a/tool/test/org/antlr/v4/xtest/Java-LR.g4 b/tool/test/org/antlr/v4/xtest/Java-LR.g4 new file mode 100644 index 000000000..9d38d029b --- /dev/null +++ b/tool/test/org/antlr/v4/xtest/Java-LR.g4 @@ -0,0 +1,1248 @@ +/* + [The "BSD licence"] + Copyright (c) 2007-2008 Terence Parr + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +/** A Java 1.5 grammar for ANTLR v3 derived from the spec + * + * This is a very close representation of the spec; the changes + * are comestic (remove left recursion) and also fixes (the spec + * isn't exactly perfect). I have run this on the 1.4.2 source + * and some nasty looking enums from 1.5, but have not really + * tested for 1.5 compatibility. + * + * I built this with: java -Xmx100M org.antlr.Tool java.g + * and got two errors that are ok (for now): + * java.g:691:9: Decision can match input such as + * "'0'..'9'{'E', 'e'}{'+', '-'}'0'..'9'{'D', 'F', 'd', 'f'}" + * using multiple alternatives: 3, 4 + * As a result, alternative(s) 4 were disabled for that input + * java.g:734:35: Decision can match input such as "{'$', 'A'..'Z', + * '_', 'a'..'z', '\u00C0'..'\u00D6', '\u00D8'..'\u00F6', + * '\u00F8'..'\u1FFF', '\u3040'..'\u318F', '\u3300'..'\u337F', + * '\u3400'..'\u3D2D', '\u4E00'..'\u9FFF', '\uF900'..'\uFAFF'}" + * using multiple alternatives: 1, 2 + * As a result, alternative(s) 2 were disabled for that input + * + * You can turn enum on/off as a keyword :) + * + * Version 1.0 -- initial release July 5, 2006 (requires 3.0b2 or higher) + * + * Primary author: Terence Parr, July 2006 + * + * Version 1.0.1 -- corrections by Koen Vanderkimpen & Marko van Dooren, + * October 25, 2006; + * fixed normalInterfaceDeclaration: now uses typeParameters instead + * of typeParameter (according to JLS, 3rd edition) + * fixed castExpression: no longer allows expression next to type + * (according to semantics in JLS, in contrast with syntax in JLS) + * + * Version 1.0.2 -- Terence Parr, Nov 27, 2006 + * java spec I built this from had some bizarre for-loop control. + * Looked weird and so I looked elsewhere...Yep, it's messed up. + * simplified. + * + * Version 1.0.3 -- Chris Hogue, Feb 26, 2007 + * Factored out an annotationName rule and used it in the annotation rule. + * Not sure why, but typeName wasn't recognizing references to inner + * annotations (e.g. @InterfaceName.InnerAnnotation()) + * Factored out the elementValue section of an annotation reference. Created + * elementValuePair and elementValuePairs rules, then used them in the + * annotation rule. Allows it to recognize annotation references with + * multiple, comma separated attributes. + * Updated elementValueArrayInitializer so that it allows multiple elements. + * (It was only allowing 0 or 1 element). + * Updated localVariableDeclaration to allow annotations. Interestingly the JLS + * doesn't appear to indicate this is legal, but it does work as of at least + * JDK 1.5.0_06. + * Moved the Identifier portion of annotationTypeElementRest to annotationMethodRest. + * Because annotationConstantRest already references variableDeclarator which + * has the Identifier portion in it, the parser would fail on constants in + * annotation definitions because it expected two identifiers. + * Added optional trailing ';' to the alternatives in annotationTypeElementRest. + * Wouldn't handle an inner interface that has a trailing ';'. + * Swapped the expression and type rule reference order in castExpression to + * make it check for genericized casts first. It was failing to recognize a + * statement like "Class TYPE = (Class)...;" because it was seeing + * 'Class'. + * Changed createdName to use typeArguments instead of nonWildcardTypeArguments. + * Changed the 'this' alternative in primary to allow 'identifierSuffix' rather than + * just 'arguments'. The case it couldn't handle was a call to an explicit + * generic method invocation (e.g. this.doSomething()). Using identifierSuffix + * may be overly aggressive--perhaps should create a more constrained thisSuffix rule? + * + * Version 1.0.4 -- Hiroaki Nakamura, May 3, 2007 + * + * Fixed formalParameterDecls, localVariableDeclaration, forInit, + * and forVarControl to use variableModifier* not 'final'? (annotation)? + * + * Version 1.0.5 -- Terence, June 21, 2007 + * --a[i].foo didn't work. Fixed unaryExpression + * + * Version 1.0.6 -- John Ridgway, March 17, 2008 + * Made "assert" a switchable keyword like "enum". + * Fixed compilationUnit to disallow "annotation importDeclaration ...". + * Changed "Identifier ('.' Identifier)*" to "qualifiedName" in more + * places. + * Changed modifier* and/or variableModifier* to classOrInterfaceModifiers, + * modifiers or variableModifiers, as appropriate. + * Renamed "bound" to "typeBound" to better match language in the JLS. + * Added "memberDeclaration" which rewrites to methodDeclaration or + * fieldDeclaration and pulled type into memberDeclaration. So we parse + * type and then move on to decide whether we're dealing with a field + * or a method. + * Modified "constructorDeclaration" to use "constructorBody" instead of + * "methodBody". constructorBody starts with explicitConstructorInvocation, + * then goes on to blockStatement*. Pulling explicitConstructorInvocation + * out of expressions allowed me to simplify "primary". + * Changed variableDeclarator to simplify it. + * Changed type to use classOrInterfaceType, thus simplifying it; of course + * I then had to add classOrInterfaceType, but it is used in several + * places. + * Fixed annotations, old version allowed "@X(y,z)", which is illegal. + * Added optional comma to end of "elementValueArrayInitializer"; as per JLS. + * Changed annotationTypeElementRest to use normalClassDeclaration and + * normalInterfaceDeclaration rather than classDeclaration and + * interfaceDeclaration, thus getting rid of a couple of grammar ambiguities. + * Split localVariableDeclaration into localVariableDeclarationStatement + * (includes the terminating semi-colon) and localVariableDeclaration. + * This allowed me to use localVariableDeclaration in "forInit" clauses, + * simplifying them. + * Changed switchBlockStatementGroup to use multiple labels. This adds an + * ambiguity, but if one uses appropriately greedy parsing it yields the + * parse that is closest to the meaning of the switch statement. + * Renamed "forVarControl" to "enhancedForControl" -- JLS language. + * Added semantic predicates to test for shift operations rather than other + * things. Thus, for instance, the string "< <" will never be treated + * as a left-shift operator. + * In "creator" we rule out "nonWildcardTypeArguments" on arrayCreation, + * which are illegal. + * Moved "nonWildcardTypeArguments into innerCreator. + * Removed 'super' superSuffix from explicitGenericInvocation, since that + * is only used in explicitConstructorInvocation at the beginning of a + * constructorBody. (This is part of the simplification of expressions + * mentioned earlier.) + * Simplified primary (got rid of those things that are only used in + * explicitConstructorInvocation). + * Lexer -- removed "Exponent?" from FloatingPointLiteral choice 4, since it + * led to an ambiguity. + * + * This grammar successfully parses every .java file in the JDK 1.5 source + * tree (excluding those whose file names include '-', which are not + * valid Java compilation units). + * + * June 26, 2008 + * + * conditionalExpression had wrong precedence x?y:z. + * + * February 26, 2011 + * added left-recursive expression rule + * + * Known remaining problems: + * "Letter" and "JavaIDDigit" are wrong. The actual specification of + * "Letter" should be "a character for which the method + * Character.isJavaIdentifierStart(int) returns true." A "Java + * letter-or-digit is a character for which the method + * Character.isJavaIdentifierPart(int) returns true." + */ +grammar Java; + +// starting point for parsing a java file +/* The annotations are separated out to make parsing faster, but must be associated with + a packageDeclaration or a typeDeclaration (and not an empty one). */ +compilationUnit + : annotations + ( packageDeclaration importDeclaration* typeDeclaration* + | classOrInterfaceDeclaration typeDeclaration* + ) + EOF + | packageDeclaration? importDeclaration* typeDeclaration* + EOF + ; + +packageDeclaration + : 'package' qualifiedName ';' + ; + +importDeclaration + : 'import' 'static'? qualifiedName ('.' '*')? ';' + ; + +typeDeclaration + : classOrInterfaceDeclaration + | ';' + ; + +classOrInterfaceDeclaration + : classOrInterfaceModifiers (classDeclaration | interfaceDeclaration) + ; + +classOrInterfaceModifiers + : classOrInterfaceModifier* + ; + +classOrInterfaceModifier + : annotation // class or interface + | ( 'public' // class or interface + | 'protected' // class or interface + | 'private' // class or interface + | 'abstract' // class or interface + | 'static' // class or interface + | 'final' // class only -- does not apply to interfaces + | 'strictfp' // class or interface + ) + ; + +modifiers + : modifier* + ; + +classDeclaration + : normalClassDeclaration + | enumDeclaration + ; + +normalClassDeclaration + : 'class' Identifier typeParameters? + ('extends' type)? + ('implements' typeList)? + classBody + ; + +typeParameters + : '<' typeParameter (',' typeParameter)* '>' + ; + +typeParameter + : Identifier ('extends' typeBound)? + ; + +typeBound + : type ('&' type)* + ; + +enumDeclaration + : ENUM Identifier ('implements' typeList)? enumBody + ; + +enumBody + : '{' enumConstants? ','? enumBodyDeclarations? '}' + ; + +enumConstants + : enumConstant (',' enumConstant)* + ; + +enumConstant + : annotations? Identifier arguments? classBody? + ; + +enumBodyDeclarations + : ';' (classBodyDeclaration)* + ; + +interfaceDeclaration + : normalInterfaceDeclaration + | annotationTypeDeclaration + ; + +normalInterfaceDeclaration + : 'interface' Identifier typeParameters? ('extends' typeList)? interfaceBody + ; + +typeList + : type (',' type)* + ; + +classBody + : '{' classBodyDeclaration* '}' + ; + +interfaceBody + : '{' interfaceBodyDeclaration* '}' + ; + +classBodyDeclaration + : ';' + | 'static'? block + | modifiers memberDecl + ; + +memberDecl + : genericMethodOrConstructorDecl + | memberDeclaration + | 'void' Identifier voidMethodDeclaratorRest + | Identifier constructorDeclaratorRest + | interfaceDeclaration + | classDeclaration + ; + +memberDeclaration + : type (methodDeclaration | fieldDeclaration) + ; + +genericMethodOrConstructorDecl + : typeParameters genericMethodOrConstructorRest + ; + +genericMethodOrConstructorRest + : (type | 'void') Identifier methodDeclaratorRest + | Identifier constructorDeclaratorRest + ; + +methodDeclaration + : Identifier methodDeclaratorRest + ; + +fieldDeclaration + : variableDeclarators ';' + ; + +interfaceBodyDeclaration + : modifiers interfaceMemberDecl + | ';' + ; + +interfaceMemberDecl + : interfaceMethodOrFieldDecl + | interfaceGenericMethodDecl + | 'void' Identifier voidInterfaceMethodDeclaratorRest + | interfaceDeclaration + | classDeclaration + ; + +interfaceMethodOrFieldDecl + : type Identifier interfaceMethodOrFieldRest + ; + +interfaceMethodOrFieldRest + : constantDeclaratorsRest ';' + | interfaceMethodDeclaratorRest + ; + +methodDeclaratorRest + : formalParameters ('[' ']')* + ('throws' qualifiedNameList)? + ( methodBody + | ';' + ) + ; + +voidMethodDeclaratorRest + : formalParameters ('throws' qualifiedNameList)? + ( methodBody + | ';' + ) + ; + +interfaceMethodDeclaratorRest + : formalParameters ('[' ']')* ('throws' qualifiedNameList)? ';' + ; + +interfaceGenericMethodDecl + : typeParameters (type | 'void') Identifier + interfaceMethodDeclaratorRest + ; + +voidInterfaceMethodDeclaratorRest + : formalParameters ('throws' qualifiedNameList)? ';' + ; + +constructorDeclaratorRest + : formalParameters ('throws' qualifiedNameList)? constructorBody + ; + +constantDeclarator + : Identifier constantDeclaratorRest + ; + +variableDeclarators + : variableDeclarator (',' variableDeclarator)* + ; + +variableDeclarator + : variableDeclaratorId ('=' variableInitializer)? + ; + +constantDeclaratorsRest + : constantDeclaratorRest (',' constantDeclarator)* + ; + +constantDeclaratorRest + : ('[' ']')* '=' variableInitializer + ; + +variableDeclaratorId + : Identifier ('[' ']')* + ; + +variableInitializer + : arrayInitializer + | expression + ; + +arrayInitializer + : '{' (variableInitializer (',' variableInitializer)* (',')? )? '}' + ; + +modifier + : annotation + | ( 'public' + | 'protected' + | 'private' + | 'static' + | 'abstract' + | 'final' + | 'native' + | 'synchronized' + | 'transient' + | 'volatile' + | 'strictfp' + ) + ; + +packageOrTypeName + : qualifiedName + ; + +enumConstantName + : Identifier + ; + +typeName + : qualifiedName + ; + +type + : classOrInterfaceType ('[' ']')* + | primitiveType ('[' ']')* + ; + +classOrInterfaceType + : Identifier typeArguments? ('.' Identifier typeArguments? )* + ; + +primitiveType + : 'boolean' + | 'char' + | 'byte' + | 'short' + | 'int' + | 'long' + | 'float' + | 'double' + ; + +variableModifier + : 'final' + | annotation + ; + +typeArguments + : '<' typeArgument (',' typeArgument)* '>' + ; + +typeArgument + : type + | '?' (('extends' | 'super') type)? + ; + +qualifiedNameList + : qualifiedName (',' qualifiedName)* + ; + +formalParameters + : '(' formalParameterDecls? ')' + ; + +formalParameterDecls + : variableModifiers type formalParameterDeclsRest + ; + +formalParameterDeclsRest + : variableDeclaratorId (',' formalParameterDecls)? + | '...' variableDeclaratorId + ; + +methodBody + : block + ; + +constructorBody + : block + ; + +qualifiedName + : Identifier ('.' Identifier)* + ; + +literal + : IntegerLiteral + | FloatingPointLiteral + | CharacterLiteral + | StringLiteral + | BooleanLiteral + | 'null' + ; + +// ANNOTATIONS + +annotations + : annotation+ + ; + +annotation + : '@' annotationName ( '(' ( elementValuePairs | elementValue )? ')' )? + ; + +annotationName + : Identifier ('.' Identifier)* + ; + +elementValuePairs + : elementValuePair (',' elementValuePair)* + ; + +elementValuePair + : Identifier '=' elementValue + ; + +elementValue + : expression + | annotation + | elementValueArrayInitializer + ; + +elementValueArrayInitializer + : '{' (elementValue (',' elementValue)*)? (',')? '}' + ; + +annotationTypeDeclaration + : '@' 'interface' Identifier annotationTypeBody + ; + +annotationTypeBody + : '{' (annotationTypeElementDeclaration)* '}' + ; + +annotationTypeElementDeclaration + : modifiers annotationTypeElementRest + | ';' // this is not allowed by the grammar, but apparently allowed by the actual compiler + ; + +annotationTypeElementRest + : type annotationMethodOrConstantRest ';' + | normalClassDeclaration ';'? + | normalInterfaceDeclaration ';'? + | enumDeclaration ';'? + | annotationTypeDeclaration ';'? + ; + +annotationMethodOrConstantRest + : annotationMethodRest + | annotationConstantRest + ; + +annotationMethodRest + : Identifier '(' ')' defaultValue? + ; + +annotationConstantRest + : variableDeclarators + ; + +defaultValue + : 'default' elementValue + ; + +// STATEMENTS / BLOCKS + +block + : '{' blockStatement* '}' + ; + +blockStatement + : localVariableDeclarationStatement + | classOrInterfaceDeclaration + | statement + ; + +localVariableDeclarationStatement + : localVariableDeclaration ';' + ; + +localVariableDeclaration + : variableModifiers type variableDeclarators + ; + +variableModifiers + : variableModifier* + ; + +statement + : block + | ASSERT expression (':' expression)? ';' + | 'if' parExpression statement ('else' statement)? + | 'for' '(' forControl ')' statement + | 'while' parExpression statement + | 'do' statement 'while' parExpression ';' + | 'try' block (catches finallyBlock? | finallyBlock) + | 'try' resourceSpecification block catches? finallyBlock? + | 'switch' parExpression '{' switchBlockStatementGroups '}' + | 'synchronized' parExpression block + | 'return' expression? ';' + | 'throw' expression ';' + | 'break' Identifier? ';' + | 'continue' Identifier? ';' + | ';' + | statementExpression ';' + | Identifier ':' statement + ; + +catches + : catchClause+ + ; + +catchClause + : 'catch' '(' variableModifiers catchType Identifier ')' block + ; + +catchType + : qualifiedName ('|' qualifiedName)* + ; + +finallyBlock + : 'finally' block + ; + +resourceSpecification + : '(' resources ';'? ')' + ; + +resources + : resource (';' resource)* + ; + +resource + : variableModifiers classOrInterfaceType variableDeclaratorId '=' expression + ; + +formalParameter + : variableModifiers type variableDeclaratorId + ; + +switchBlockStatementGroups + : (switchBlockStatementGroup)* + ; + +/* The change here (switchLabel -> switchLabel+) technically makes this grammar + ambiguous; but with appropriately greedy parsing it yields the most + appropriate AST, one in which each group, except possibly the last one, has + labels and statements. */ +switchBlockStatementGroup + : switchLabel+ blockStatement* + ; + +switchLabel + : 'case' constantExpression ':' + | 'case' enumConstantName ':' + | 'default' ':' + ; + +forControl + : enhancedForControl + | forInit? ';' expression? ';' forUpdate? + ; + +forInit + : localVariableDeclaration + | expressionList + ; + +enhancedForControl + : variableModifiers type Identifier ':' expression + ; + +forUpdate + : expressionList + ; + +// EXPRESSIONS + +parExpression + : '(' expression ')' + ; + +expressionList + : expression (',' expression)* + ; + +statementExpression + : expression + ; + +constantExpression + : expression + ; + +expression + : primary + | expression '.' Identifier + | expression '.' 'this' + | expression '.' 'new' nonWildcardTypeArguments? innerCreator + | expression '.' 'super' superSuffix + | expression '.' explicitGenericInvocation + | 'new' creator + | expression '[' expression ']' + | '(' type ')' expression + | expression ('++' | '--') + | expression '(' expressionList? ')' + | ('+'|'-'|'++'|'--') expression + | ('~'|'!') expression + | expression ('*'|'/'|'%') expression + | expression ('+'|'-') expression + | expression ('<' '<' | '>' '>' '>' | '>' '>') expression + | expression ('<=' | '>=' | '>' | '<') expression + | expression 'instanceof' type + | expression ('==' | '!=') expression + | expression '&' expression + | expression '^' expression + | expression '|' expression + | expression '&&' expression + | expression '||' expression + | expression '?' expression ':' expression + | expression + ( '=' + | '+=' + | '-=' + | '*=' + | '/=' + | '&=' + | '|=' + | '^=' + | '>>=' + | '>>>=' + | '<<=' + | '%=' + ) + expression + ; + +primary + : '(' expression ')' + | 'this' + | 'super' + | literal + | Identifier + | type '.' 'class' + | 'void' '.' 'class' + | nonWildcardTypeArguments (explicitGenericInvocationSuffix | 'this' arguments) + ; + +creator + : nonWildcardTypeArguments createdName classCreatorRest + | createdName (arrayCreatorRest | classCreatorRest) + ; + +createdName + : Identifier typeArgumentsOrDiamond? ('.' Identifier typeArgumentsOrDiamond?)* + | primitiveType + ; + +innerCreator + : Identifier nonWildcardTypeArgumentsOrDiamond? classCreatorRest + ; + +arrayCreatorRest + : '[' + ( ']' ('[' ']')* arrayInitializer + | expression ']' ('[' expression ']')* ('[' ']')* + ) + ; + +classCreatorRest + : arguments classBody? + ; + +explicitGenericInvocation + : nonWildcardTypeArguments explicitGenericInvocationSuffix + ; + +nonWildcardTypeArguments + : '<' typeList '>' + ; + +typeArgumentsOrDiamond + : '<' '>' + | typeArguments + ; + +nonWildcardTypeArgumentsOrDiamond + : '<' '>' + | nonWildcardTypeArguments + ; + +superSuffix + : arguments + | '.' Identifier arguments? + ; + +explicitGenericInvocationSuffix + : 'super' superSuffix + | Identifier arguments + ; + +arguments + : '(' expressionList? ')' + ; + +// LEXER + +// §3.9 Keywords + +ABSTRACT : 'abstract'; +ASSERT : 'assert'; +BOOLEAN : 'boolean'; +BREAK : 'break'; +BYTE : 'byte'; +CASE : 'case'; +CATCH : 'catch'; +CHAR : 'char'; +CLASS : 'class'; +CONST : 'const'; +CONTINUE : 'continue'; +DEFAULT : 'default'; +DO : 'do'; +DOUBLE : 'double'; +ELSE : 'else'; +ENUM : 'enum'; +EXTENDS : 'extends'; +FINAL : 'final'; +FINALLY : 'finally'; +FLOAT : 'float'; +FOR : 'for'; +IF : 'if'; +GOTO : 'goto'; +IMPLEMENTS : 'implements'; +IMPORT : 'import'; +INSTANCEOF : 'instanceof'; +INT : 'int'; +INTERFACE : 'interface'; +LONG : 'long'; +NATIVE : 'native'; +NEW : 'new'; +PACKAGE : 'package'; +PRIVATE : 'private'; +PROTECTED : 'protected'; +PUBLIC : 'public'; +RETURN : 'return'; +SHORT : 'short'; +STATIC : 'static'; +STRICTFP : 'strictfp'; +SUPER : 'super'; +SWITCH : 'switch'; +SYNCHRONIZED : 'synchronized'; +THIS : 'this'; +THROW : 'throw'; +THROWS : 'throws'; +TRANSIENT : 'transient'; +TRY : 'try'; +VOID : 'void'; +VOLATILE : 'volatile'; +WHILE : 'while'; + +// §3.10.1 Integer Literals + +IntegerLiteral + : DecimalIntegerLiteral + | HexIntegerLiteral + | OctalIntegerLiteral + | BinaryIntegerLiteral + ; + +fragment +DecimalIntegerLiteral + : DecimalNumeral IntegerTypeSuffix? + ; + +fragment +HexIntegerLiteral + : HexNumeral IntegerTypeSuffix? + ; + +fragment +OctalIntegerLiteral + : OctalNumeral IntegerTypeSuffix? + ; + +fragment +BinaryIntegerLiteral + : BinaryNumeral IntegerTypeSuffix? + ; + +fragment +IntegerTypeSuffix + : [lL] + ; + +fragment +DecimalNumeral + : '0' + | NonZeroDigit (Digits? | Underscores Digits) + ; + +fragment +Digits + : Digit (DigitsAndUnderscores? Digit)? + ; + +fragment +Digit + : '0' + | NonZeroDigit + ; + +fragment +NonZeroDigit + : [1-9] + ; + +fragment +DigitsAndUnderscores + : DigitOrUnderscore+ + ; + +fragment +DigitOrUnderscore + : Digit + | '_' + ; + +fragment +Underscores + : '_'+ + ; + +fragment +HexNumeral + : '0' [xX] HexDigits + ; + +fragment +HexDigits + : HexDigit (HexDigitsAndUnderscores? HexDigit)? + ; + +fragment +HexDigit + : [0-9a-fA-F] + ; + +fragment +HexDigitsAndUnderscores + : HexDigitOrUnderscore+ + ; + +fragment +HexDigitOrUnderscore + : HexDigit + | '_' + ; + +fragment +OctalNumeral + : '0' Underscores? OctalDigits + ; + +fragment +OctalDigits + : OctalDigit (OctalDigitsAndUnderscores? OctalDigit)? + ; + +fragment +OctalDigit + : [0-7] + ; + +fragment +OctalDigitsAndUnderscores + : OctalDigitOrUnderscore+ + ; + +fragment +OctalDigitOrUnderscore + : OctalDigit + | '_' + ; + +fragment +BinaryNumeral + : '0' [bB] BinaryDigits + ; + +fragment +BinaryDigits + : BinaryDigit (BinaryDigitsAndUnderscores? BinaryDigit)? + ; + +fragment +BinaryDigit + : [01] + ; + +fragment +BinaryDigitsAndUnderscores + : BinaryDigitOrUnderscore+ + ; + +fragment +BinaryDigitOrUnderscore + : BinaryDigit + | '_' + ; + +// §3.10.2 Floating-Point Literals + +FloatingPointLiteral + : DecimalFloatingPointLiteral + | HexadecimalFloatingPointLiteral + ; + +fragment +DecimalFloatingPointLiteral + : Digits '.' Digits? ExponentPart? FloatTypeSuffix? + | '.' Digits ExponentPart? FloatTypeSuffix? + | Digits ExponentPart FloatTypeSuffix? + | Digits FloatTypeSuffix + ; + +fragment +ExponentPart + : ExponentIndicator SignedInteger + ; + +fragment +ExponentIndicator + : [eE] + ; + +fragment +SignedInteger + : Sign? Digits + ; + +fragment +Sign + : [+-] + ; + +fragment +FloatTypeSuffix + : [fFdD] + ; + +fragment +HexadecimalFloatingPointLiteral + : HexSignificand BinaryExponent FloatTypeSuffix? + ; + +fragment +HexSignificand + : HexNumeral '.'? + | '0' [xX] HexDigits? '.' HexDigits + ; + +fragment +BinaryExponent + : BinaryExponentIndicator SignedInteger + ; + +fragment +BinaryExponentIndicator + : [pP] + ; + +// §3.10.3 Boolean Literals + +BooleanLiteral + : 'true' + | 'false' + ; + +// §3.10.4 Character Literals + +CharacterLiteral + : '\'' SingleCharacter '\'' + | '\'' EscapeSequence '\'' + ; + +fragment +SingleCharacter + : ~['\\] + ; + +// §3.10.5 String Literals + +StringLiteral + : '"' StringCharacters? '"' + ; + +fragment +StringCharacters + : StringCharacter+ + ; + +fragment +StringCharacter + : ~["\\] + | EscapeSequence + ; + +// §3.10.6 Escape Sequences for Character and String Literals + +fragment +EscapeSequence + : '\\' [btnfr"'\\] + | OctalEscape + ; + +fragment +OctalEscape + : '\\' OctalDigit + | '\\' OctalDigit OctalDigit + | '\\' ZeroToThree OctalDigit OctalDigit + ; + +fragment +ZeroToThree + : [0-3] + ; + +// §3.10.7 The Null Literal + +NullLiteral + : 'null' + ; + +// §3.11 Separators + +LPAREN : '('; +RPAREN : ')'; +LBRACE : '{'; +RBRACE : '}'; +LBRACK : '['; +RBRACK : ']'; +SEMI : ';'; +COMMA : ','; +DOT : '.'; + +// §3.12 Operators + +ASSIGN : '='; +GT : '>'; +LT : '<'; +BANG : '!'; +TILDE : '~'; +QUESTION : '?'; +COLON : ':'; +EQUAL : '=='; +LE : '<='; +GE : '>='; +NOTEQUAL : '!='; +AND : '&&'; +OR : '||'; +INC : '++'; +DEC : '--'; +ADD : '+'; +SUB : '-'; +MUL : '*'; +DIV : '/'; +BITAND : '&'; +BITOR : '|'; +CARET : '^'; +MOD : '%'; + +ADD_ASSIGN : '+='; +SUB_ASSIGN : '-='; +MUL_ASSIGN : '*='; +DIV_ASSIGN : '/='; +AND_ASSIGN : '&='; +OR_ASSIGN : '|='; +XOR_ASSIGN : '^='; +MOD_ASSIGN : '%='; +LSHIFT_ASSIGN : '<<='; +RSHIFT_ASSIGN : '>>='; +URSHIFT_ASSIGN : '>>>='; + +// §3.8 Identifiers (must appear after all keywords in the grammar) + +Identifier + : JavaLetter JavaLetterOrDigit* + ; + +fragment +JavaLetter + : [a-zA-Z$_] // these are the "java letters" below 0xFF + | // covers all characters above 0xFF which are not a surrogate + ~[\u0000-\u00FF\uD800-\uDBFF] + {Character.isJavaIdentifierStart(_input.LA(-1))}? + | // covers UTF-16 surrogate pairs encodings for U+10000 to U+10FFFF + [\uD800-\uDBFF] [\uDC00-\uDFFF] + {Character.isJavaIdentifierStart(Character.toCodePoint((char)_input.LA(-2), (char)_input.LA(-1)))}? + ; + +fragment +JavaLetterOrDigit + : [a-zA-Z0-9$_] // these are the "java letters or digits" below 0xFF + | // covers all characters above 0xFF which are not a surrogate + ~[\u0000-\u00FF\uD800-\uDBFF] + {Character.isJavaIdentifierPart(_input.LA(-1))}? + | // covers UTF-16 surrogate pairs encodings for U+10000 to U+10FFFF + [\uD800-\uDBFF] [\uDC00-\uDFFF] + {Character.isJavaIdentifierPart(Character.toCodePoint((char)_input.LA(-2), (char)_input.LA(-1)))}? + ; + +// +// Additional symbols not defined in the lexical specification +// + +AT : '@'; +ELLIPSIS : '...'; + +// +// Whitespace and comments +// + +WS : [ \t\r\n\u000C]+ -> skip + ; + +COMMENT + : '/*' .*? '*/' -> skip + ; + +LINE_COMMENT + : '//' ~[\r\n]* -> skip + ; diff --git a/tool/test/org/antlr/v4/xtest/Java.g4 b/tool/test/org/antlr/v4/xtest/Java.g4 new file mode 100644 index 000000000..e3e39f679 --- /dev/null +++ b/tool/test/org/antlr/v4/xtest/Java.g4 @@ -0,0 +1,1332 @@ +/* + [The "BSD licence"] + Copyright (c) 2007-2008 Terence Parr + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +/** A Java 1.5 grammar for ANTLR v3 derived from the spec + * + * This is a very close representation of the spec; the changes + * are comestic (remove left recursion) and also fixes (the spec + * isn't exactly perfect). I have run this on the 1.4.2 source + * and some nasty looking enums from 1.5, but have not really + * tested for 1.5 compatibility. + * + * I built this with: java -Xmx100M org.antlr.Tool java.g + * and got two errors that are ok (for now): + * java.g:691:9: Decision can match input such as + * "'0'..'9'{'E', 'e'}{'+', '-'}'0'..'9'{'D', 'F', 'd', 'f'}" + * using multiple alternatives: 3, 4 + * As a result, alternative(s) 4 were disabled for that input + * java.g:734:35: Decision can match input such as "{'$', 'A'..'Z', + * '_', 'a'..'z', '\u00C0'..'\u00D6', '\u00D8'..'\u00F6', + * '\u00F8'..'\u1FFF', '\u3040'..'\u318F', '\u3300'..'\u337F', + * '\u3400'..'\u3D2D', '\u4E00'..'\u9FFF', '\uF900'..'\uFAFF'}" + * using multiple alternatives: 1, 2 + * As a result, alternative(s) 2 were disabled for that input + * + * You can turn enum on/off as a keyword :) + * + * Version 1.0 -- initial release July 5, 2006 (requires 3.0b2 or higher) + * + * Primary author: Terence Parr, July 2006 + * + * Version 1.0.1 -- corrections by Koen Vanderkimpen & Marko van Dooren, + * October 25, 2006; + * fixed normalInterfaceDeclaration: now uses typeParameters instead + * of typeParameter (according to JLS, 3rd edition) + * fixed castExpression: no longer allows expression next to type + * (according to semantics in JLS, in contrast with syntax in JLS) + * + * Version 1.0.2 -- Terence Parr, Nov 27, 2006 + * java spec I built this from had some bizarre for-loop control. + * Looked weird and so I looked elsewhere...Yep, it's messed up. + * simplified. + * + * Version 1.0.3 -- Chris Hogue, Feb 26, 2007 + * Factored out an annotationName rule and used it in the annotation rule. + * Not sure why, but typeName wasn't recognizing references to inner + * annotations (e.g. @InterfaceName.InnerAnnotation()) + * Factored out the elementValue section of an annotation reference. Created + * elementValuePair and elementValuePairs rules, then used them in the + * annotation rule. Allows it to recognize annotation references with + * multiple, comma separated attributes. + * Updated elementValueArrayInitializer so that it allows multiple elements. + * (It was only allowing 0 or 1 element). + * Updated localVariableDeclaration to allow annotations. Interestingly the JLS + * doesn't appear to indicate this is legal, but it does work as of at least + * JDK 1.5.0_06. + * Moved the Identifier portion of annotationTypeElementRest to annotationMethodRest. + * Because annotationConstantRest already references variableDeclarator which + * has the Identifier portion in it, the parser would fail on constants in + * annotation definitions because it expected two identifiers. + * Added optional trailing ';' to the alternatives in annotationTypeElementRest. + * Wouldn't handle an inner interface that has a trailing ';'. + * Swapped the expression and type rule reference order in castExpression to + * make it check for genericized casts first. It was failing to recognize a + * statement like "Class TYPE = (Class)...;" because it was seeing + * 'Class'. + * Changed createdName to use typeArguments instead of nonWildcardTypeArguments. + * Changed the 'this' alternative in primary to allow 'identifierSuffix' rather than + * just 'arguments'. The case it couldn't handle was a call to an explicit + * generic method invocation (e.g. this.doSomething()). Using identifierSuffix + * may be overly aggressive--perhaps should create a more constrained thisSuffix rule? + * + * Version 1.0.4 -- Hiroaki Nakamura, May 3, 2007 + * + * Fixed formalParameterDecls, localVariableDeclaration, forInit, + * and forVarControl to use variableModifier* not 'final'? (annotation)? + * + * Version 1.0.5 -- Terence, June 21, 2007 + * --a[i].foo didn't work. Fixed unaryExpression + * + * Version 1.0.6 -- John Ridgway, March 17, 2008 + * Made "assert" a switchable keyword like "enum". + * Fixed compilationUnit to disallow "annotation importDeclaration ...". + * Changed "Identifier ('.' Identifier)*" to "qualifiedName" in more + * places. + * Changed modifier* and/or variableModifier* to classOrInterfaceModifiers, + * modifiers or variableModifiers, as appropriate. + * Renamed "bound" to "typeBound" to better match language in the JLS. + * Added "memberDeclaration" which rewrites to methodDeclaration or + * fieldDeclaration and pulled type into memberDeclaration. So we parse + * type and then move on to decide whether we're dealing with a field + * or a method. + * Modified "constructorDeclaration" to use "constructorBody" instead of + * "methodBody". constructorBody starts with explicitConstructorInvocation, + * then goes on to blockStatement*. Pulling explicitConstructorInvocation + * out of expressions allowed me to simplify "primary". + * Changed variableDeclarator to simplify it. + * Changed type to use classOrInterfaceType, thus simplifying it; of course + * I then had to add classOrInterfaceType, but it is used in several + * places. + * Fixed annotations, old version allowed "@X(y,z)", which is illegal. + * Added optional comma to end of "elementValueArrayInitializer"; as per JLS. + * Changed annotationTypeElementRest to use normalClassDeclaration and + * normalInterfaceDeclaration rather than classDeclaration and + * interfaceDeclaration, thus getting rid of a couple of grammar ambiguities. + * Split localVariableDeclaration into localVariableDeclarationStatement + * (includes the terminating semi-colon) and localVariableDeclaration. + * This allowed me to use localVariableDeclaration in "forInit" clauses, + * simplifying them. + * Changed switchBlockStatementGroup to use multiple labels. This adds an + * ambiguity, but if one uses appropriately greedy parsing it yields the + * parse that is closest to the meaning of the switch statement. + * Renamed "forVarControl" to "enhancedForControl" -- JLS language. + * Added semantic predicates to test for shift operations rather than other + * things. Thus, for instance, the string "< <" will never be treated + * as a left-shift operator. + * In "creator" we rule out "nonWildcardTypeArguments" on arrayCreation, + * which are illegal. + * Moved "nonWildcardTypeArguments into innerCreator. + * Removed 'super' superSuffix from explicitGenericInvocation, since that + * is only used in explicitConstructorInvocation at the beginning of a + * constructorBody. (This is part of the simplification of expressions + * mentioned earlier.) + * Simplified primary (got rid of those things that are only used in + * explicitConstructorInvocation). + * Lexer -- removed "Exponent?" from FloatingPointLiteral choice 4, since it + * led to an ambiguity. + * + * This grammar successfully parses every .java file in the JDK 1.5 source + * tree (excluding those whose file names include '-', which are not + * valid Java compilation units). + * + * June 26, 2008 + * + * conditionalExpression had wrong precedence x?y:z. + * + * Known remaining problems: + * "Letter" and "JavaIDDigit" are wrong. The actual specification of + * "Letter" should be "a character for which the method + * Character.isJavaIdentifierStart(int) returns true." A "Java + * letter-or-digit is a character for which the method + * Character.isJavaIdentifierPart(int) returns true." + */ +grammar Java; + +// starting point for parsing a java file +/* The annotations are separated out to make parsing faster, but must be associated with + a packageDeclaration or a typeDeclaration (and not an empty one). */ +compilationUnit + : annotations + ( packageDeclaration importDeclaration* typeDeclaration* + | classOrInterfaceDeclaration typeDeclaration* + ) + EOF + | packageDeclaration? importDeclaration* typeDeclaration* + EOF + ; + +packageDeclaration + : 'package' qualifiedName ';' + ; + +importDeclaration + : 'import' 'static'? qualifiedName ('.' '*')? ';' + ; + +typeDeclaration + : classOrInterfaceDeclaration + | ';' + ; + +classOrInterfaceDeclaration + : classOrInterfaceModifiers (classDeclaration | interfaceDeclaration) + ; + +classOrInterfaceModifiers + : classOrInterfaceModifier* + ; + +classOrInterfaceModifier + : annotation // class or interface + | ( 'public' // class or interface + | 'protected' // class or interface + | 'private' // class or interface + | 'abstract' // class or interface + | 'static' // class or interface + | 'final' // class only -- does not apply to interfaces + | 'strictfp' // class or interface + ) + ; + +modifiers + : modifier* + ; + +classDeclaration + : normalClassDeclaration + | enumDeclaration + ; + +normalClassDeclaration + : 'class' Identifier typeParameters? + ('extends' type)? + ('implements' typeList)? + classBody + ; + +typeParameters + : '<' typeParameter (',' typeParameter)* '>' + ; + +typeParameter + : Identifier ('extends' typeBound)? + ; + +typeBound + : type ('&' type)* + ; + +enumDeclaration + : ENUM Identifier ('implements' typeList)? enumBody + ; + +enumBody + : '{' enumConstants? ','? enumBodyDeclarations? '}' + ; + +enumConstants + : enumConstant (',' enumConstant)* + ; + +enumConstant + : annotations? Identifier arguments? classBody? + ; + +enumBodyDeclarations + : ';' (classBodyDeclaration)* + ; + +interfaceDeclaration + : normalInterfaceDeclaration + | annotationTypeDeclaration + ; + +normalInterfaceDeclaration + : 'interface' Identifier typeParameters? ('extends' typeList)? interfaceBody + ; + +typeList + : type (',' type)* + ; + +classBody + : '{' classBodyDeclaration* '}' + ; + +interfaceBody + : '{' interfaceBodyDeclaration* '}' + ; + +classBodyDeclaration + : ';' + | 'static'? block + | modifiers memberDecl + ; + +memberDecl + : genericMethodOrConstructorDecl + | memberDeclaration + | 'void' Identifier voidMethodDeclaratorRest + | Identifier constructorDeclaratorRest + | interfaceDeclaration + | classDeclaration + ; + +memberDeclaration + : type (methodDeclaration | fieldDeclaration) + ; + +genericMethodOrConstructorDecl + : typeParameters genericMethodOrConstructorRest + ; + +genericMethodOrConstructorRest + : (type | 'void') Identifier methodDeclaratorRest + | Identifier constructorDeclaratorRest + ; + +methodDeclaration + : Identifier methodDeclaratorRest + ; + +fieldDeclaration + : variableDeclarators ';' + ; + +interfaceBodyDeclaration + : modifiers interfaceMemberDecl + | ';' + ; + +interfaceMemberDecl + : interfaceMethodOrFieldDecl + | interfaceGenericMethodDecl + | 'void' Identifier voidInterfaceMethodDeclaratorRest + | interfaceDeclaration + | classDeclaration + ; + +interfaceMethodOrFieldDecl + : type Identifier interfaceMethodOrFieldRest + ; + +interfaceMethodOrFieldRest + : constantDeclaratorsRest ';' + | interfaceMethodDeclaratorRest + ; + +methodDeclaratorRest + : formalParameters ('[' ']')* + ('throws' qualifiedNameList)? + ( methodBody + | ';' + ) + ; + +voidMethodDeclaratorRest + : formalParameters ('throws' qualifiedNameList)? + ( methodBody + | ';' + ) + ; + +interfaceMethodDeclaratorRest + : formalParameters ('[' ']')* ('throws' qualifiedNameList)? ';' + ; + +interfaceGenericMethodDecl + : typeParameters (type | 'void') Identifier + interfaceMethodDeclaratorRest + ; + +voidInterfaceMethodDeclaratorRest + : formalParameters ('throws' qualifiedNameList)? ';' + ; + +constructorDeclaratorRest + : formalParameters ('throws' qualifiedNameList)? constructorBody + ; + +constantDeclarator + : Identifier constantDeclaratorRest + ; + +variableDeclarators + : variableDeclarator (',' variableDeclarator)* + ; + +variableDeclarator + : variableDeclaratorId ('=' variableInitializer)? + ; + +constantDeclaratorsRest + : constantDeclaratorRest (',' constantDeclarator)* + ; + +constantDeclaratorRest + : ('[' ']')* '=' variableInitializer + ; + +variableDeclaratorId + : Identifier ('[' ']')* + ; + +variableInitializer + : arrayInitializer + | expression + ; + +arrayInitializer + : '{' (variableInitializer (',' variableInitializer)* (',')? )? '}' + ; + +modifier + : annotation + | ( 'public' + | 'protected' + | 'private' + | 'static' + | 'abstract' + | 'final' + | 'native' + | 'synchronized' + | 'transient' + | 'volatile' + | 'strictfp' + ) + ; + +packageOrTypeName + : qualifiedName + ; + +enumConstantName + : Identifier + ; + +typeName + : qualifiedName + ; + +type + : classOrInterfaceType ('[' ']')* + | primitiveType ('[' ']')* + ; + +classOrInterfaceType + : Identifier typeArguments? ('.' Identifier typeArguments? )* + ; + +primitiveType + : 'boolean' + | 'char' + | 'byte' + | 'short' + | 'int' + | 'long' + | 'float' + | 'double' + ; + +variableModifier + : 'final' + | annotation + ; + +typeArguments + : '<' typeArgument (',' typeArgument)* '>' + ; + +typeArgument + : type + | '?' (('extends' | 'super') type)? + ; + +qualifiedNameList + : qualifiedName (',' qualifiedName)* + ; + +formalParameters + : '(' formalParameterDecls? ')' + ; + +formalParameterDecls + : variableModifiers type formalParameterDeclsRest + ; + +formalParameterDeclsRest + : variableDeclaratorId (',' formalParameterDecls)? + | '...' variableDeclaratorId + ; + +methodBody + : block + ; + +constructorBody + : block + ; + +qualifiedName + : Identifier ('.' Identifier)* + ; + +literal + : IntegerLiteral + | FloatingPointLiteral + | CharacterLiteral + | StringLiteral + | BooleanLiteral + | 'null' + ; + +// ANNOTATIONS + +annotations + : annotation+ + ; + +annotation + : '@' annotationName ( '(' ( elementValuePairs | elementValue )? ')' )? + ; + +annotationName + : Identifier ('.' Identifier)* + ; + +elementValuePairs + : elementValuePair (',' elementValuePair)* + ; + +elementValuePair + : Identifier '=' elementValue + ; + +elementValue + : conditionalExpression + | annotation + | elementValueArrayInitializer + ; + +elementValueArrayInitializer + : '{' (elementValue (',' elementValue)*)? (',')? '}' + ; + +annotationTypeDeclaration + : '@' 'interface' Identifier annotationTypeBody + ; + +annotationTypeBody + : '{' (annotationTypeElementDeclaration)* '}' + ; + +annotationTypeElementDeclaration + : modifiers annotationTypeElementRest + | ';' // this is not allowed by the grammar, but apparently allowed by the actual compiler + ; + +annotationTypeElementRest + : type annotationMethodOrConstantRest ';' + | normalClassDeclaration ';'? + | normalInterfaceDeclaration ';'? + | enumDeclaration ';'? + | annotationTypeDeclaration ';'? + ; + +annotationMethodOrConstantRest + : annotationMethodRest + | annotationConstantRest + ; + +annotationMethodRest + : Identifier '(' ')' defaultValue? + ; + +annotationConstantRest + : variableDeclarators + ; + +defaultValue + : 'default' elementValue + ; + +// STATEMENTS / BLOCKS + +block + : '{' blockStatement* '}' + ; + +blockStatement + : localVariableDeclarationStatement + | classOrInterfaceDeclaration + | statement + ; + +localVariableDeclarationStatement + : localVariableDeclaration ';' + ; + +localVariableDeclaration + : variableModifiers type variableDeclarators + ; + +variableModifiers + : variableModifier* + ; + +statement + : block + | ASSERT expression (':' expression)? ';' + | 'if' parExpression statement ('else' statement)? + | 'for' '(' forControl ')' statement + | 'while' parExpression statement + | 'do' statement 'while' parExpression ';' + | 'try' block (catches finallyBlock? | finallyBlock) + | 'try' resourceSpecification block catches? finallyBlock? + | 'switch' parExpression '{' switchBlockStatementGroups '}' + | 'synchronized' parExpression block + | 'return' expression? ';' + | 'throw' expression ';' + | 'break' Identifier? ';' + | 'continue' Identifier? ';' + | ';' + | statementExpression ';' + | Identifier ':' statement + ; + +catches + : catchClause+ + ; + +catchClause + : 'catch' '(' variableModifiers catchType Identifier ')' block + ; + +catchType + : qualifiedName ('|' qualifiedName)* + ; + +finallyBlock + : 'finally' block + ; + +resourceSpecification + : '(' resources ';'? ')' + ; + +resources + : resource (';' resource)* + ; + +resource + : variableModifiers classOrInterfaceType variableDeclaratorId '=' expression + ; + +formalParameter + : variableModifiers type variableDeclaratorId + ; + +switchBlockStatementGroups + : (switchBlockStatementGroup)* + ; + +/* The change here (switchLabel -> switchLabel+) technically makes this grammar + ambiguous; but with appropriately greedy parsing it yields the most + appropriate AST, one in which each group, except possibly the last one, has + labels and statements. */ +switchBlockStatementGroup + : switchLabel+ blockStatement* + ; + +switchLabel + : 'case' constantExpression ':' + | 'case' enumConstantName ':' + | 'default' ':' + ; + +forControl + : enhancedForControl + | forInit? ';' expression? ';' forUpdate? + ; + +forInit + : localVariableDeclaration + | expressionList + ; + +enhancedForControl + : variableModifiers type Identifier ':' expression + ; + +forUpdate + : expressionList + ; + +// EXPRESSIONS + +parExpression + : '(' expression ')' + ; + +expressionList + : expression (',' expression)* + ; + +statementExpression + : expression + ; + +constantExpression + : expression + ; + +expression + : conditionalExpression (assignmentOperator expression)? + ; + +assignmentOperator + : '=' + | '+=' + | '-=' + | '*=' + | '/=' + | '&=' + | '|=' + | '^=' + | '%=' + | '<<=' + | '>>=' + | '>>>=' + ; + +conditionalExpression + : conditionalOrExpression ( '?' expression ':' conditionalExpression )? + ; + +conditionalOrExpression + : conditionalAndExpression ( '||' conditionalAndExpression )* + ; + +conditionalAndExpression + : inclusiveOrExpression ( '&&' inclusiveOrExpression )* + ; + +inclusiveOrExpression + : exclusiveOrExpression ( '|' exclusiveOrExpression )* + ; + +exclusiveOrExpression + : andExpression ( '^' andExpression )* + ; + +andExpression + : equalityExpression ( '&' equalityExpression )* + ; + +equalityExpression + : instanceOfExpression ( ('==' | '!=') instanceOfExpression )* + ; + +instanceOfExpression + : relationalExpression ('instanceof' type)? + ; + +relationalExpression + : shiftExpression ( relationalOp shiftExpression )* + ; + +relationalOp + : '<=' + | '>=' + | '<' + | '>' + ; + +shiftExpression + : additiveExpression ( shiftOp additiveExpression )* + ; + +shiftOp + : t1='<' t2='<' +// { $t1.getLine() == $t2.getLine() && +// $t1.getCharPositionInLine() + 1 == $t2.getCharPositionInLine() }? + | t1='>' t2='>' t3='>' +// { $t1.getLine() == $t2.getLine() && +// $t1.getCharPositionInLine() + 1 == $t2.getCharPositionInLine() && +// $t2.getLine() == $t3.getLine() && +// $t2.getCharPositionInLine() + 1 == $t3.getCharPositionInLine() }? + | t1='>' t2='>' +// { $t1.getLine() == $t2.getLine() && +// $t1.getCharPositionInLine() + 1 == $t2.getCharPositionInLine() }? + ; + + +additiveExpression + : multiplicativeExpression ( ('+' | '-') multiplicativeExpression )* + ; + +multiplicativeExpression + : unaryExpression ( ( '*' | '/' | '%' ) unaryExpression )* + ; + +unaryExpression + : '+' unaryExpression + | '-' unaryExpression + | '++' unaryExpression + | '--' unaryExpression + | unaryExpressionNotPlusMinus + ; + +unaryExpressionNotPlusMinus + : '~' unaryExpression + | '!' unaryExpression + | castExpression + | primary selector* ('++'|'--')? + ; + +castExpression + : '(' primitiveType ')' unaryExpression + | '(' (type | expression) ')' unaryExpressionNotPlusMinus + ; + +primary + : parExpression + | 'this' arguments? + | 'super' superSuffix + | literal + | 'new' creator + | nonWildcardTypeArguments (explicitGenericInvocationSuffix | 'this' arguments) + | Identifier ('.' Identifier)* identifierSuffix? + | primitiveType ('[' ']')* '.' 'class' + | 'void' '.' 'class' + ; + +identifierSuffix + : ('[' ']')+ '.' 'class' + | '[' expression ']' + | arguments + | '.' 'class' + | '.' explicitGenericInvocation + | '.' 'this' + | '.' 'super' arguments + | '.' 'new' nonWildcardTypeArguments? innerCreator + ; + +creator + : nonWildcardTypeArguments createdName classCreatorRest + | createdName (arrayCreatorRest | classCreatorRest) + ; + +createdName + : Identifier typeArgumentsOrDiamond? ('.' Identifier typeArgumentsOrDiamond?)* + | primitiveType + ; + +innerCreator + : Identifier nonWildcardTypeArgumentsOrDiamond? classCreatorRest + ; + +arrayCreatorRest + : '[' + ( ']' ('[' ']')* arrayInitializer + | expression ']' ('[' expression ']')* ('[' ']')* + ) + ; + +classCreatorRest + : arguments classBody? + ; + +explicitGenericInvocation + : nonWildcardTypeArguments explicitGenericInvocationSuffix + ; + +nonWildcardTypeArguments + : '<' typeList '>' + ; + +typeArgumentsOrDiamond + : '<' '>' + | typeArguments + ; + +nonWildcardTypeArgumentsOrDiamond + : '<' '>' + | nonWildcardTypeArguments + ; + +selector + : '.' Identifier arguments? + | '.' explicitGenericInvocation + | '.' 'this' + | '.' 'super' superSuffix + | '.' 'new' nonWildcardTypeArguments? innerCreator + | '[' expression ']' + ; + +superSuffix + : arguments + | '.' Identifier arguments? + ; + +explicitGenericInvocationSuffix + : 'super' superSuffix + | Identifier arguments + ; + +arguments + : '(' expressionList? ')' + ; + +// LEXER + +// §3.9 Keywords + +ABSTRACT : 'abstract'; +ASSERT : 'assert'; +BOOLEAN : 'boolean'; +BREAK : 'break'; +BYTE : 'byte'; +CASE : 'case'; +CATCH : 'catch'; +CHAR : 'char'; +CLASS : 'class'; +CONST : 'const'; +CONTINUE : 'continue'; +DEFAULT : 'default'; +DO : 'do'; +DOUBLE : 'double'; +ELSE : 'else'; +ENUM : 'enum'; +EXTENDS : 'extends'; +FINAL : 'final'; +FINALLY : 'finally'; +FLOAT : 'float'; +FOR : 'for'; +IF : 'if'; +GOTO : 'goto'; +IMPLEMENTS : 'implements'; +IMPORT : 'import'; +INSTANCEOF : 'instanceof'; +INT : 'int'; +INTERFACE : 'interface'; +LONG : 'long'; +NATIVE : 'native'; +NEW : 'new'; +PACKAGE : 'package'; +PRIVATE : 'private'; +PROTECTED : 'protected'; +PUBLIC : 'public'; +RETURN : 'return'; +SHORT : 'short'; +STATIC : 'static'; +STRICTFP : 'strictfp'; +SUPER : 'super'; +SWITCH : 'switch'; +SYNCHRONIZED : 'synchronized'; +THIS : 'this'; +THROW : 'throw'; +THROWS : 'throws'; +TRANSIENT : 'transient'; +TRY : 'try'; +VOID : 'void'; +VOLATILE : 'volatile'; +WHILE : 'while'; + +// §3.10.1 Integer Literals + +IntegerLiteral + : DecimalIntegerLiteral + | HexIntegerLiteral + | OctalIntegerLiteral + | BinaryIntegerLiteral + ; + +fragment +DecimalIntegerLiteral + : DecimalNumeral IntegerTypeSuffix? + ; + +fragment +HexIntegerLiteral + : HexNumeral IntegerTypeSuffix? + ; + +fragment +OctalIntegerLiteral + : OctalNumeral IntegerTypeSuffix? + ; + +fragment +BinaryIntegerLiteral + : BinaryNumeral IntegerTypeSuffix? + ; + +fragment +IntegerTypeSuffix + : [lL] + ; + +fragment +DecimalNumeral + : '0' + | NonZeroDigit (Digits? | Underscores Digits) + ; + +fragment +Digits + : Digit (DigitsAndUnderscores? Digit)? + ; + +fragment +Digit + : '0' + | NonZeroDigit + ; + +fragment +NonZeroDigit + : [1-9] + ; + +fragment +DigitsAndUnderscores + : DigitOrUnderscore+ + ; + +fragment +DigitOrUnderscore + : Digit + | '_' + ; + +fragment +Underscores + : '_'+ + ; + +fragment +HexNumeral + : '0' [xX] HexDigits + ; + +fragment +HexDigits + : HexDigit (HexDigitsAndUnderscores? HexDigit)? + ; + +fragment +HexDigit + : [0-9a-fA-F] + ; + +fragment +HexDigitsAndUnderscores + : HexDigitOrUnderscore+ + ; + +fragment +HexDigitOrUnderscore + : HexDigit + | '_' + ; + +fragment +OctalNumeral + : '0' Underscores? OctalDigits + ; + +fragment +OctalDigits + : OctalDigit (OctalDigitsAndUnderscores? OctalDigit)? + ; + +fragment +OctalDigit + : [0-7] + ; + +fragment +OctalDigitsAndUnderscores + : OctalDigitOrUnderscore+ + ; + +fragment +OctalDigitOrUnderscore + : OctalDigit + | '_' + ; + +fragment +BinaryNumeral + : '0' [bB] BinaryDigits + ; + +fragment +BinaryDigits + : BinaryDigit (BinaryDigitsAndUnderscores? BinaryDigit)? + ; + +fragment +BinaryDigit + : [01] + ; + +fragment +BinaryDigitsAndUnderscores + : BinaryDigitOrUnderscore+ + ; + +fragment +BinaryDigitOrUnderscore + : BinaryDigit + | '_' + ; + +// §3.10.2 Floating-Point Literals + +FloatingPointLiteral + : DecimalFloatingPointLiteral + | HexadecimalFloatingPointLiteral + ; + +fragment +DecimalFloatingPointLiteral + : Digits '.' Digits? ExponentPart? FloatTypeSuffix? + | '.' Digits ExponentPart? FloatTypeSuffix? + | Digits ExponentPart FloatTypeSuffix? + | Digits FloatTypeSuffix + ; + +fragment +ExponentPart + : ExponentIndicator SignedInteger + ; + +fragment +ExponentIndicator + : [eE] + ; + +fragment +SignedInteger + : Sign? Digits + ; + +fragment +Sign + : [+-] + ; + +fragment +FloatTypeSuffix + : [fFdD] + ; + +fragment +HexadecimalFloatingPointLiteral + : HexSignificand BinaryExponent FloatTypeSuffix? + ; + +fragment +HexSignificand + : HexNumeral '.'? + | '0' [xX] HexDigits? '.' HexDigits + ; + +fragment +BinaryExponent + : BinaryExponentIndicator SignedInteger + ; + +fragment +BinaryExponentIndicator + : [pP] + ; + +// §3.10.3 Boolean Literals + +BooleanLiteral + : 'true' + | 'false' + ; + +// §3.10.4 Character Literals + +CharacterLiteral + : '\'' SingleCharacter '\'' + | '\'' EscapeSequence '\'' + ; + +fragment +SingleCharacter + : ~['\\] + ; + +// §3.10.5 String Literals + +StringLiteral + : '"' StringCharacters? '"' + ; + +fragment +StringCharacters + : StringCharacter+ + ; + +fragment +StringCharacter + : ~["\\] + | EscapeSequence + ; + +// §3.10.6 Escape Sequences for Character and String Literals + +fragment +EscapeSequence + : '\\' [btnfr"'\\] + | OctalEscape + ; + +fragment +OctalEscape + : '\\' OctalDigit + | '\\' OctalDigit OctalDigit + | '\\' ZeroToThree OctalDigit OctalDigit + ; + +fragment +ZeroToThree + : [0-3] + ; + +// §3.10.7 The Null Literal + +NullLiteral + : 'null' + ; + +// §3.11 Separators + +LPAREN : '('; +RPAREN : ')'; +LBRACE : '{'; +RBRACE : '}'; +LBRACK : '['; +RBRACK : ']'; +SEMI : ';'; +COMMA : ','; +DOT : '.'; + +// §3.12 Operators + +ASSIGN : '='; +GT : '>'; +LT : '<'; +BANG : '!'; +TILDE : '~'; +QUESTION : '?'; +COLON : ':'; +EQUAL : '=='; +LE : '<='; +GE : '>='; +NOTEQUAL : '!='; +AND : '&&'; +OR : '||'; +INC : '++'; +DEC : '--'; +ADD : '+'; +SUB : '-'; +MUL : '*'; +DIV : '/'; +BITAND : '&'; +BITOR : '|'; +CARET : '^'; +MOD : '%'; + +ADD_ASSIGN : '+='; +SUB_ASSIGN : '-='; +MUL_ASSIGN : '*='; +DIV_ASSIGN : '/='; +AND_ASSIGN : '&='; +OR_ASSIGN : '|='; +XOR_ASSIGN : '^='; +MOD_ASSIGN : '%='; +LSHIFT_ASSIGN : '<<='; +RSHIFT_ASSIGN : '>>='; +URSHIFT_ASSIGN : '>>>='; + +// §3.8 Identifiers (must appear after all keywords in the grammar) + +Identifier + : JavaLetter JavaLetterOrDigit* + ; + +fragment +JavaLetter + : [a-zA-Z$_] // these are the "java letters" below 0xFF + | // covers all characters above 0xFF which are not a surrogate + ~[\u0000-\u00FF\uD800-\uDBFF] + {Character.isJavaIdentifierStart(_input.LA(-1))}? + | // covers UTF-16 surrogate pairs encodings for U+10000 to U+10FFFF + [\uD800-\uDBFF] [\uDC00-\uDFFF] + {Character.isJavaIdentifierStart(Character.toCodePoint((char)_input.LA(-2), (char)_input.LA(-1)))}? + ; + +fragment +JavaLetterOrDigit + : [a-zA-Z0-9$_] // these are the "java letters or digits" below 0xFF + | // covers all characters above 0xFF which are not a surrogate + ~[\u0000-\u00FF\uD800-\uDBFF] + {Character.isJavaIdentifierPart(_input.LA(-1))}? + | // covers UTF-16 surrogate pairs encodings for U+10000 to U+10FFFF + [\uD800-\uDBFF] [\uDC00-\uDFFF] + {Character.isJavaIdentifierPart(Character.toCodePoint((char)_input.LA(-2), (char)_input.LA(-1)))}? + ; + +// +// Additional symbols not defined in the lexical specification +// + +AT : '@'; +ELLIPSIS : '...'; + +// +// Whitespace and comments +// + +WS : [ \t\r\n\u000C]+ -> skip + ; + +COMMENT + : '/*' .*? '*/' -> skip + ; + +LINE_COMMENT + : '//' ~[\r\n]* -> skip + ; diff --git a/tool/test/org/antlr/v4/test/JavaUnicodeInputStream.java b/tool/test/org/antlr/v4/xtest/JavaUnicodeInputStream.java similarity index 99% rename from tool/test/org/antlr/v4/test/JavaUnicodeInputStream.java rename to tool/test/org/antlr/v4/xtest/JavaUnicodeInputStream.java index d9c22dde3..d28e615d6 100644 --- a/tool/test/org/antlr/v4/test/JavaUnicodeInputStream.java +++ b/tool/test/org/antlr/v4/xtest/JavaUnicodeInputStream.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.misc.IntegerList; diff --git a/tool/test/org/antlr/v4/test/ParserInterpreterForTesting.java b/tool/test/org/antlr/v4/xtest/ParserInterpreterForTesting.java similarity index 99% rename from tool/test/org/antlr/v4/test/ParserInterpreterForTesting.java rename to tool/test/org/antlr/v4/xtest/ParserInterpreterForTesting.java index c51dc4ba8..fcf0d2e20 100644 --- a/tool/test/org/antlr/v4/test/ParserInterpreterForTesting.java +++ b/tool/test/org/antlr/v4/xtest/ParserInterpreterForTesting.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.Tool; import org.antlr.v4.runtime.Parser; diff --git a/tool/test/org/antlr/v4/xtest/PositionAdjustingLexer.g4 b/tool/test/org/antlr/v4/xtest/PositionAdjustingLexer.g4 new file mode 100644 index 000000000..4d52dfc29 --- /dev/null +++ b/tool/test/org/antlr/v4/xtest/PositionAdjustingLexer.g4 @@ -0,0 +1,141 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +lexer grammar PositionAdjustingLexer; + +@members { + @Override + public Token nextToken() { + if (!(_interp instanceof PositionAdjustingLexerATNSimulator)) { + _interp = new PositionAdjustingLexerATNSimulator(this, _ATN, _decisionToDFA, _sharedContextCache); + } + + return super.nextToken(); + } + + @Override + public Token emit() { + switch (_type) { + case TOKENS: + handleAcceptPositionForKeyword("tokens"); + break; + + case LABEL: + handleAcceptPositionForIdentifier(); + break; + + default: + break; + } + + return super.emit(); + } + + private boolean handleAcceptPositionForIdentifier() { + String tokenText = getText(); + int identifierLength = 0; + while (identifierLength < tokenText.length() && isIdentifierChar(tokenText.charAt(identifierLength))) { + identifierLength++; + } + + if (getInputStream().index() > _tokenStartCharIndex + identifierLength) { + int offset = identifierLength - 1; + getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset); + return true; + } + + return false; + } + + private boolean handleAcceptPositionForKeyword(String keyword) { + if (getInputStream().index() > _tokenStartCharIndex + keyword.length()) { + int offset = keyword.length() - 1; + getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset); + return true; + } + + return false; + } + + @Override + public PositionAdjustingLexerATNSimulator getInterpreter() { + return (PositionAdjustingLexerATNSimulator)super.getInterpreter(); + } + + private static boolean isIdentifierChar(char c) { + return Character.isLetterOrDigit(c) || c == '_'; + } + + protected static class PositionAdjustingLexerATNSimulator extends LexerATNSimulator { + + public PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn, + DFA[] decisionToDFA, + PredictionContextCache sharedContextCache) + { + super(recog, atn, decisionToDFA, sharedContextCache); + } + + protected void resetAcceptPosition(CharStream input, int index, int line, int charPositionInLine) { + input.seek(index); + this.line = line; + this.charPositionInLine = charPositionInLine; + consume(input); + } + + } +} + +ASSIGN : '=' ; +PLUS_ASSIGN : '+=' ; +LCURLY: '{'; + +// 'tokens' followed by '{' +TOKENS : 'tokens' IGNORED '{'; + +// IDENTIFIER followed by '+=' or '=' +LABEL + : IDENTIFIER IGNORED '+'? '=' + ; + +IDENTIFIER + : [a-zA-Z_] [a-zA-Z0-9_]* + ; + +fragment +IGNORED + : [ \t\r\n]* + ; + +NEWLINE + : [\r\n]+ -> skip + ; + +WS + : [ \t]+ -> skip + ; diff --git a/tool/test/org/antlr/v4/xtest/Psl.g4 b/tool/test/org/antlr/v4/xtest/Psl.g4 new file mode 100644 index 000000000..acc64488d --- /dev/null +++ b/tool/test/org/antlr/v4/xtest/Psl.g4 @@ -0,0 +1,348 @@ +grammar Psl; + +@parser::members +{ + public void printPosition(String name, Token tok) + { + System.out.printf("%s: pos %d, len %d%n", + name, tok.getCharPositionInLine(), tok.getText().length()); + } + + + /** + * Checks whether a set of digit groups and commas construct + * a valid command-number. + * + * @param digits + * The groups of digits, each group in a separate item. + * @param commas + * The commas found separating the digit groups. + * + * There should be one more digit group than commas. + * There should be no internal white space. + * + * @returns true (valid), false (invalid) + */ + + public boolean isValidCommaNumber(List digits, List commas) + { + Token[] aDigits = new Token[0]; + Token[] aCommas = new Token[0]; + int j; + + aDigits = digits.toArray(aDigits); + aCommas = commas.toArray(aCommas); + if (aDigits.length != aCommas.length + 1) + { + return false; + } + for (j = 0; j < aCommas.length; ++j) + { + int p1, p2, p3; + p1 = aDigits[j].getCharPositionInLine() + + aDigits[j].getText().length(); + p2 = aCommas[j].getCharPositionInLine(); + p3 = aDigits[j + 1].getCharPositionInLine(); + if (p1 != p2 || (p2 + 1) != p3) + { + return false; + } + } + return true; + } + + + /** + * Checks whether a the pieces of a floating-point number + * construct a valid number. + * + * @param whole + * The whole part of the number. Can be null. + * @param period + * The decimal point. + * @param fraction + * The fraction part of the number. Can be null. + * + * At least one of the whole or fraction must be present. + * The decimal point is required. + * + * @returns true (valid), false (invalid) + */ + + public boolean isValidFloatingConstant( + Token whole, + Token period, + Token fraction + ) + { + boolean foundDigits = false; + int column; + + if (whole != null) + { + foundDigits = true; + column = whole.getCharPositionInLine() + + whole.getText().length(); + if (column != period.getCharPositionInLine()) + { + return false; + } + } + if (fraction != null) + { + foundDigits = true; + column = period.getCharPositionInLine() + 1; + if (column != fraction.getCharPositionInLine()) + { + return false; + } + } + return foundDigits; + } +} + +translation_unit + : numeric_range + EOF + ; + +pattern + : numeric_range + ; + +numeric_range + : EURO_NUMBER + PAREN_LEFT + numeric_endpoint + TILDE + numeric_endpoint + PAREN_RIGHT + | NUMBER + PAREN_LEFT + numeric_endpoint + TILDE + numeric_endpoint + PAREN_RIGHT + ; + +numeric_endpoint + : ( PLUS | MINUS )? integer_constant + | ( PLUS | MINUS )? floating_constant + | ( PLUS | MINUS )? comma_number + ; + + /* Floating-point numbers and comma numbers are valid only + * as numeric endpoints in number() or euro_number(). Otherwise, + * the pieces should be parsed as separate lexical tokens, such as + * + * integer_constant '.' integer_constant + * + * Because of parser lookahead and the subtle interactions between + * the parser and the lexer, changing lexical modes from the parser + * is not safe. The code below checks the constraints for floating + * numbers, forbidding internal white space. + */ + +floating_constant + : comma_number PERIOD fraction=DIGIT_SEQUENCE? + { + isValidFloatingConstant($comma_number.stop, $PERIOD, $fraction) + }? + + /*| whole=DIGIT_SEQUENCE PERIOD fraction=DIGIT_SEQUENCE? + { + isValidFloatingConstant($whole, $PERIOD, $fraction) + }?/* */ + + | PERIOD fraction=DIGIT_SEQUENCE + { + isValidFloatingConstant(null, $PERIOD, $fraction) + }? + ; + +comma_number + : digits+=DIGIT_SEQUENCE ( commas+=COMMA digits+=DIGIT_SEQUENCE )+ + { + isValidCommaNumber($digits, $commas) + }? + ; + +term_expression + : term + | RETURN + ( + PAREN_LEFT + ( integer_constant | ALL ) + PAREN_RIGHT + )? + term + ; + +term + : pattern + | PAREN_LEFT term_expression PAREN_RIGHT + ; + +integer_constant + : DIGIT_SEQUENCE + | INTEGER_CONSTANT + | BINARY_CONSTANT + | DECIMAL_CONSTANT + | HEXADECIMAL_CONSTANT + | OCTAL_CONSTANT + ; + +// LEXER + +/* Letter fragments + */ + +fragment A: [Aa] ; +fragment B: [BB] ; +fragment C: [Cc] ; +fragment D: [Dd] ; +fragment E: [Ee] ; +fragment F: [Ff] ; +fragment G: [Gg] ; +fragment H: [Hh] ; +fragment I: [Ii] ; +fragment J: [Jj] ; +fragment K: [Kk] ; +fragment L: [Ll] ; +fragment M: [Mm] ; +fragment N: [Nn] ; +fragment O: [Oo] ; +fragment P: [Pp] ; +fragment Q: [Qq] ; +fragment R: [Rr] ; +fragment S: [Ss] ; +fragment T: [Tt] ; +fragment U: [Uu] ; +fragment V: [Vv] ; +fragment W: [Ww] ; +fragment X: [Xx] ; +fragment Y: [Yy] ; +fragment Z: [Zz] ; + + +WHITESPACE_IN_LINE + : [ \t]+ + -> skip + ; + +NEWLINE + : '\r'? '\n' + -> skip + ; + +WHITESPACE_ALL + : [ \n\r\t]+ + -> skip + ; + + + /* A sequence of decimal digits is useful on its own, + * to avoid the base-prefixes (0b, 0x, ...) that an + * INTEGER_CONTANT would allow. + * Need to define before INTEGER_CONSTANT to make sure + * DIGIT_SEQUENCE is recognized before INTEGER_CONSTANT. + */ + +DIGIT_SEQUENCE + : [0-9]+ + ; + +INTEGER_CONSTANT + : BINARY_CONSTANT + | DECIMAL_CONSTANT + | HEXADECIMAL_CONSTANT + | OCTAL_CONSTANT + ; + +BINARY_CONSTANT + : '0' [Bb] [0-1]+ + ; + +DECIMAL_CONSTANT + : ( '0' [Dd] )? [0-9]+ + ; + +HEXADECIMAL_CONSTANT + : '0' [HhXx] [0-9a-fA-F]+ + ; + +OCTAL_CONSTANT + : '0' [Oo] [0-7]+ + ; + +/* keywords + */ + +ALL + : A L L + ; + +EURO_NUMBER + : E U R O '_' N U M B E R + ; + + +NUMBER + : N U M B E R + ; + +RETURN + : R E T U R N + ; + +IDENTIFIER + : [A-Za-z][A-Za-z0-9_]* + ; + + +/* The single-character tokens. + */ + +COMMA + : ',' + ; + +MINUS + : '-' + ; + +PAREN_LEFT + : '(' + ; + +PAREN_RIGHT + : ')' + ; + +PERIOD + : '.' + ; + +PLUS + : '+' + ; + +TILDE + : '~' + ; + + /* This rule must be last (or nearly last) to avoid + * matching individual characters for other rules. + */ + +ANY_CHAR_BUT_NEWLINE + : ~[\n\r] + ; diff --git a/tool/test/org/antlr/v4/xtest/TestASTStructure.gunit b/tool/test/org/antlr/v4/xtest/TestASTStructure.gunit new file mode 100644 index 000000000..9641b13d7 --- /dev/null +++ b/tool/test/org/antlr/v4/xtest/TestASTStructure.gunit @@ -0,0 +1,155 @@ +/** Test ANTLRParser's AST construction. Translate to junit tests with: + * + * $ java org.antlr.v4.gunit.Gen TestASTStructure.gunit + + NO LONGER using gunit!!! + + */ +gunit TestASTStructure; + +@header {package org.antlr.v4.test;} +options { + adaptor = org.antlr.v4.parse.GrammarASTAdaptor; + parser = org.antlr.v4.parse.ANTLRParser; + lexer = org.antlr.v4.parse.ANTLRLexer; +} + +grammarSpec: + "parser grammar P; a : A;" + -> (PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A))))) + + << + parser grammar P; + tokens { A; B='33'; } + @header {foo} + a : A; + >> + -> + (PARSER_GRAMMAR P + (tokens { A (= B '33')) + (@ header {foo}) + (RULES (RULE a (BLOCK (ALT A))))) + + << + parser grammar P; + @header {foo} + tokens { A; B='33'; } + a : A; + >> + -> + (PARSER_GRAMMAR P + (@ header {foo}) + (tokens { A (= B '33')) + (RULES (RULE a (BLOCK (ALT A))))) + + << + parser grammar P; + import A=B, C; + a : A; + >> + -> + (PARSER_GRAMMAR P + (import (= A B) C) + (RULES (RULE a (BLOCK (ALT A))))) + +delegateGrammars: + "import A;" -> (import A) + +rule: + "a : A;" -> + (RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c)))))) + "A : B+;" -> (RULE A (BLOCK (ALT (+ (BLOCK (ALT B)))))) + + << + public a[int i] returns [int y] + options {backtrack=true;} + @init {blort} + : ID ; + >> + -> + (RULE a + (RULEMODIFIERS public) + int i + (returns int y) + (OPTIONS (= backtrack true)) + (@ init {blort}) + (BLOCK (ALT ID))) + + << + a[int i] returns [int y] + @init {blort} + options {backtrack=true;} + : ID; + >> + -> + (RULE a int i + (returns int y) + (@ init {blort}) + (OPTIONS (= backtrack true)) + (BLOCK (ALT ID))) + + << + a : ID ; + catch[A b] {foo} + finally {bar} + >> + -> + (RULE a (BLOCK (ALT ID)) + (catch A b {foo}) (finally {bar})) + + << + a : ID ; + catch[A a] {foo} + catch[B b] {fu} + finally {bar} + >> + -> + (RULE a (BLOCK (ALT ID)) + (catch A a {foo}) (catch B b {fu}) (finally {bar})) + + << + a[int i] + locals [int a, float b] + : A + ; + >> + -> (RULE a int i (locals int a, float b) (BLOCK (ALT A))) + + << + a[int i] throws a.b.c + : A + ; + >> + -> (RULE a int i (throws a.b.c) (BLOCK (ALT A))) + +ebnf: + "(A|B)" -> (BLOCK (ALT A) (ALT B)) + "(A|B)?" -> (? (BLOCK (ALT A) (ALT B))) + "(A|B)*" -> (* (BLOCK (ALT A) (ALT B))) + "(A|B)+" -> (+ (BLOCK (ALT A) (ALT B))) + +element: + "~A" -> (~ (SET A)) + "b+" -> (+ (BLOCK (ALT b))) + "(b)+" -> (+ (BLOCK (ALT b))) + "b?" -> (? (BLOCK (ALT b))) + "(b)?" -> (? (BLOCK (ALT b))) + "(b)*" -> (* (BLOCK (ALT b))) + "b*" -> (* (BLOCK (ALT b))) + "'while'*" -> (* (BLOCK (ALT 'while'))) + "'a'+" -> (+ (BLOCK (ALT 'a'))) + "a[3]" -> (a 3) + "'a'..'z'+" -> (+ (BLOCK (ALT (.. 'a' 'z')))) + "x=ID" -> (= x ID) + "x=ID?" -> (? (BLOCK (ALT (= x ID)))) + "x=ID*" -> (* (BLOCK (ALT (= x ID)))) + "x=b" -> (= x b) + "x=(A|B)" -> (= x (BLOCK (ALT A) (ALT B))) + "x=~(A|B)" -> (= x (~ (SET A B))) + "x+=~(A|B)" -> (+= x (~ (SET A B))) + "x+=~(A|B)+"-> (+ (BLOCK (ALT (+= x (~ (SET A B)))))) + "x=b+" -> (+ (BLOCK (ALT (= x b)))) + "x+=ID*" -> (* (BLOCK (ALT (+= x ID)))) + "x+='int'*" -> (* (BLOCK (ALT (+= x 'int')))) + "x+=b+" -> (+ (BLOCK (ALT (+= x b)))) + "({blort} 'x')*" -> (* (BLOCK (ALT {blort} 'x'))) diff --git a/tool/test/org/antlr/v4/test/TestASTStructure.java b/tool/test/org/antlr/v4/xtest/TestASTStructure.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestASTStructure.java rename to tool/test/org/antlr/v4/xtest/TestASTStructure.java index 90662460d..6f0c15f62 100644 --- a/tool/test/org/antlr/v4/test/TestASTStructure.java +++ b/tool/test/org/antlr/v4/xtest/TestASTStructure.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.runtime.ANTLRStringStream; import org.antlr.runtime.CharStream; diff --git a/tool/test/org/antlr/v4/test/TestATNConstruction.java b/tool/test/org/antlr/v4/xtest/TestATNConstruction.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestATNConstruction.java rename to tool/test/org/antlr/v4/xtest/TestATNConstruction.java index b99901405..68a8b98e2 100644 --- a/tool/test/org/antlr/v4/test/TestATNConstruction.java +++ b/tool/test/org/antlr/v4/xtest/TestATNConstruction.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.Tool; import org.antlr.v4.automata.ATNPrinter; diff --git a/tool/test/org/antlr/v4/test/TestATNDeserialization.java b/tool/test/org/antlr/v4/xtest/TestATNDeserialization.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestATNDeserialization.java rename to tool/test/org/antlr/v4/xtest/TestATNDeserialization.java index bf08ab592..ab252a36b 100644 --- a/tool/test/org/antlr/v4/test/TestATNDeserialization.java +++ b/tool/test/org/antlr/v4/xtest/TestATNDeserialization.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.atn.ATNDeserializer; diff --git a/tool/test/org/antlr/v4/test/TestATNInterpreter.java b/tool/test/org/antlr/v4/xtest/TestATNInterpreter.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestATNInterpreter.java rename to tool/test/org/antlr/v4/xtest/TestATNInterpreter.java index 289890f37..60b708986 100644 --- a/tool/test/org/antlr/v4/test/TestATNInterpreter.java +++ b/tool/test/org/antlr/v4/xtest/TestATNInterpreter.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.automata.ParserATNFactory; import org.antlr.v4.runtime.Lexer; diff --git a/tool/test/org/antlr/v4/test/TestATNLexerInterpreter.java b/tool/test/org/antlr/v4/xtest/TestATNLexerInterpreter.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestATNLexerInterpreter.java rename to tool/test/org/antlr/v4/xtest/TestATNLexerInterpreter.java index a88449c99..6f174f1f1 100644 --- a/tool/test/org/antlr/v4/test/TestATNLexerInterpreter.java +++ b/tool/test/org/antlr/v4/xtest/TestATNLexerInterpreter.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.CharStream; diff --git a/tool/test/org/antlr/v4/test/TestATNParserPrediction.java b/tool/test/org/antlr/v4/xtest/TestATNParserPrediction.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestATNParserPrediction.java rename to tool/test/org/antlr/v4/xtest/TestATNParserPrediction.java index 98fe64510..9ecc513e2 100644 --- a/tool/test/org/antlr/v4/test/TestATNParserPrediction.java +++ b/tool/test/org/antlr/v4/xtest/TestATNParserPrediction.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.Tool; import org.antlr.v4.automata.ParserATNFactory; diff --git a/tool/test/org/antlr/v4/test/TestATNSerialization.java b/tool/test/org/antlr/v4/xtest/TestATNSerialization.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestATNSerialization.java rename to tool/test/org/antlr/v4/xtest/TestATNSerialization.java index e707a2bea..fb091a315 100644 --- a/tool/test/org/antlr/v4/test/TestATNSerialization.java +++ b/tool/test/org/antlr/v4/xtest/TestATNSerialization.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.atn.ATNSerializer; diff --git a/tool/test/org/antlr/v4/test/TestActionSplitter.java b/tool/test/org/antlr/v4/xtest/TestActionSplitter.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestActionSplitter.java rename to tool/test/org/antlr/v4/xtest/TestActionSplitter.java index af8c11f87..7112d2ae6 100644 --- a/tool/test/org/antlr/v4/test/TestActionSplitter.java +++ b/tool/test/org/antlr/v4/xtest/TestActionSplitter.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.runtime.ANTLRStringStream; import org.antlr.runtime.Token; diff --git a/tool/test/org/antlr/v4/test/TestActionTranslation.java b/tool/test/org/antlr/v4/xtest/TestActionTranslation.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestActionTranslation.java rename to tool/test/org/antlr/v4/xtest/TestActionTranslation.java index 142d3a5fb..ade9e70db 100644 --- a/tool/test/org/antlr/v4/test/TestActionTranslation.java +++ b/tool/test/org/antlr/v4/xtest/TestActionTranslation.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestAttributeChecks.java b/tool/test/org/antlr/v4/xtest/TestAttributeChecks.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestAttributeChecks.java rename to tool/test/org/antlr/v4/xtest/TestAttributeChecks.java index 7cb8fed61..457eb9be7 100644 --- a/tool/test/org/antlr/v4/test/TestAttributeChecks.java +++ b/tool/test/org/antlr/v4/xtest/TestAttributeChecks.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.runtime.RecognitionException; import org.antlr.v4.tool.ErrorType; diff --git a/tool/test/org/antlr/v4/test/TestBasicSemanticErrors.java b/tool/test/org/antlr/v4/xtest/TestBasicSemanticErrors.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestBasicSemanticErrors.java rename to tool/test/org/antlr/v4/xtest/TestBasicSemanticErrors.java index 1b376f6a0..79f953624 100644 --- a/tool/test/org/antlr/v4/test/TestBasicSemanticErrors.java +++ b/tool/test/org/antlr/v4/xtest/TestBasicSemanticErrors.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.tool.ErrorType; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestBufferedTokenStream.java b/tool/test/org/antlr/v4/xtest/TestBufferedTokenStream.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestBufferedTokenStream.java rename to tool/test/org/antlr/v4/xtest/TestBufferedTokenStream.java index 5a635867a..940d85e6b 100644 --- a/tool/test/org/antlr/v4/test/TestBufferedTokenStream.java +++ b/tool/test/org/antlr/v4/xtest/TestBufferedTokenStream.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.BufferedTokenStream; diff --git a/tool/test/org/antlr/v4/test/TestCodeGeneration.java b/tool/test/org/antlr/v4/xtest/TestCodeGeneration.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestCodeGeneration.java rename to tool/test/org/antlr/v4/xtest/TestCodeGeneration.java index 93e234126..ba9b7e541 100644 --- a/tool/test/org/antlr/v4/test/TestCodeGeneration.java +++ b/tool/test/org/antlr/v4/xtest/TestCodeGeneration.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.runtime.RecognitionException; import org.antlr.v4.automata.ATNFactory; diff --git a/tool/test/org/antlr/v4/test/TestCommonTokenStream.java b/tool/test/org/antlr/v4/xtest/TestCommonTokenStream.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestCommonTokenStream.java rename to tool/test/org/antlr/v4/xtest/TestCommonTokenStream.java index 2578d093b..68c7df1ca 100644 --- a/tool/test/org/antlr/v4/test/TestCommonTokenStream.java +++ b/tool/test/org/antlr/v4/xtest/TestCommonTokenStream.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CommonToken; diff --git a/tool/test/org/antlr/v4/test/TestCompositeGrammars.java b/tool/test/org/antlr/v4/xtest/TestCompositeGrammars.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestCompositeGrammars.java rename to tool/test/org/antlr/v4/xtest/TestCompositeGrammars.java index a6eebbc4b..82ba4c9f4 100644 --- a/tool/test/org/antlr/v4/test/TestCompositeGrammars.java +++ b/tool/test/org/antlr/v4/xtest/TestCompositeGrammars.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.tool.ErrorType; import org.antlr.v4.tool.Grammar; diff --git a/tool/test/org/antlr/v4/test/TestFastQueue.java b/tool/test/org/antlr/v4/xtest/TestFastQueue.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestFastQueue.java rename to tool/test/org/antlr/v4/xtest/TestFastQueue.java index 2a90ef0dd..390d60428 100644 --- a/tool/test/org/antlr/v4/test/TestFastQueue.java +++ b/tool/test/org/antlr/v4/xtest/TestFastQueue.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.runtime.misc.FastQueue; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestFullContextParsing.java b/tool/test/org/antlr/v4/xtest/TestFullContextParsing.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestFullContextParsing.java rename to tool/test/org/antlr/v4/xtest/TestFullContextParsing.java index 576f32da6..c8a2b0e78 100644 --- a/tool/test/org/antlr/v4/test/TestFullContextParsing.java +++ b/tool/test/org/antlr/v4/xtest/TestFullContextParsing.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestGraphNodes.java b/tool/test/org/antlr/v4/xtest/TestGraphNodes.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestGraphNodes.java rename to tool/test/org/antlr/v4/xtest/TestGraphNodes.java index 0f64a9724..7f7f8f3ba 100644 --- a/tool/test/org/antlr/v4/test/TestGraphNodes.java +++ b/tool/test/org/antlr/v4/xtest/TestGraphNodes.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.atn.ArrayPredictionContext; import org.antlr.v4.runtime.atn.PredictionContext; diff --git a/tool/test/org/antlr/v4/test/TestIntervalSet.java b/tool/test/org/antlr/v4/xtest/TestIntervalSet.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestIntervalSet.java rename to tool/test/org/antlr/v4/xtest/TestIntervalSet.java index 256d5633c..d22803770 100644 --- a/tool/test/org/antlr/v4/test/TestIntervalSet.java +++ b/tool/test/org/antlr/v4/xtest/TestIntervalSet.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.Token; diff --git a/tool/test/org/antlr/v4/test/TestLeftRecursion.java b/tool/test/org/antlr/v4/xtest/TestLeftRecursion.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestLeftRecursion.java rename to tool/test/org/antlr/v4/xtest/TestLeftRecursion.java index b5640da4b..67e3ccd27 100644 --- a/tool/test/org/antlr/v4/test/TestLeftRecursion.java +++ b/tool/test/org/antlr/v4/xtest/TestLeftRecursion.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.tool.ErrorType; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestLexerActions.java b/tool/test/org/antlr/v4/xtest/TestLexerActions.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestLexerActions.java rename to tool/test/org/antlr/v4/xtest/TestLexerActions.java index 68f739b19..0059f404f 100644 --- a/tool/test/org/antlr/v4/test/TestLexerActions.java +++ b/tool/test/org/antlr/v4/xtest/TestLexerActions.java @@ -1,4 +1,4 @@ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestLexerErrors.java b/tool/test/org/antlr/v4/xtest/TestLexerErrors.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestLexerErrors.java rename to tool/test/org/antlr/v4/xtest/TestLexerErrors.java index 3aa08d08b..1ec038518 100644 --- a/tool/test/org/antlr/v4/test/TestLexerErrors.java +++ b/tool/test/org/antlr/v4/xtest/TestLexerErrors.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestLexerExec.java b/tool/test/org/antlr/v4/xtest/TestLexerExec.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestLexerExec.java rename to tool/test/org/antlr/v4/xtest/TestLexerExec.java index 88a060734..b0841d11a 100644 --- a/tool/test/org/antlr/v4/test/TestLexerExec.java +++ b/tool/test/org/antlr/v4/xtest/TestLexerExec.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.misc.Nullable; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestListeners.java b/tool/test/org/antlr/v4/xtest/TestListeners.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestListeners.java rename to tool/test/org/antlr/v4/xtest/TestListeners.java index 887e3ca66..32f272ab5 100644 --- a/tool/test/org/antlr/v4/test/TestListeners.java +++ b/tool/test/org/antlr/v4/xtest/TestListeners.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestParseErrors.java b/tool/test/org/antlr/v4/xtest/TestParseErrors.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestParseErrors.java rename to tool/test/org/antlr/v4/xtest/TestParseErrors.java index fd9f1c8b2..1af881bfc 100644 --- a/tool/test/org/antlr/v4/test/TestParseErrors.java +++ b/tool/test/org/antlr/v4/xtest/TestParseErrors.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.atn.ATNSerializer; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestParseTreeMatcher.java b/tool/test/org/antlr/v4/xtest/TestParseTreeMatcher.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestParseTreeMatcher.java rename to tool/test/org/antlr/v4/xtest/TestParseTreeMatcher.java index 8109191a1..c5bc753cf 100644 --- a/tool/test/org/antlr/v4/test/TestParseTreeMatcher.java +++ b/tool/test/org/antlr/v4/xtest/TestParseTreeMatcher.java @@ -1,4 +1,4 @@ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CommonTokenStream; diff --git a/tool/test/org/antlr/v4/test/TestParseTrees.java b/tool/test/org/antlr/v4/xtest/TestParseTrees.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestParseTrees.java rename to tool/test/org/antlr/v4/xtest/TestParseTrees.java index 0fa017d14..e1ba23671 100644 --- a/tool/test/org/antlr/v4/test/TestParseTrees.java +++ b/tool/test/org/antlr/v4/xtest/TestParseTrees.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestParserExec.java b/tool/test/org/antlr/v4/xtest/TestParserExec.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestParserExec.java rename to tool/test/org/antlr/v4/xtest/TestParserExec.java index 2102fd9bc..34b4d4815 100644 --- a/tool/test/org/antlr/v4/test/TestParserExec.java +++ b/tool/test/org/antlr/v4/xtest/TestParserExec.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Ignore; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestParserInterpreter.java b/tool/test/org/antlr/v4/xtest/TestParserInterpreter.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestParserInterpreter.java rename to tool/test/org/antlr/v4/xtest/TestParserInterpreter.java index 0542b5378..c58619f92 100644 --- a/tool/test/org/antlr/v4/test/TestParserInterpreter.java +++ b/tool/test/org/antlr/v4/xtest/TestParserInterpreter.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.CommonTokenStream; diff --git a/tool/test/org/antlr/v4/test/TestParserProfiler.java b/tool/test/org/antlr/v4/xtest/TestParserProfiler.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestParserProfiler.java rename to tool/test/org/antlr/v4/xtest/TestParserProfiler.java index 4c9fb9141..13cfd3043 100644 --- a/tool/test/org/antlr/v4/test/TestParserProfiler.java +++ b/tool/test/org/antlr/v4/xtest/TestParserProfiler.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.CommonTokenStream; diff --git a/tool/test/org/antlr/v4/test/TestPerformance.java b/tool/test/org/antlr/v4/xtest/TestPerformance.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestPerformance.java rename to tool/test/org/antlr/v4/xtest/TestPerformance.java index 80f829ea0..b06f925e5 100644 --- a/tool/test/org/antlr/v4/test/TestPerformance.java +++ b/tool/test/org/antlr/v4/xtest/TestPerformance.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.ANTLRFileStream; import org.antlr.v4.runtime.ANTLRInputStream; diff --git a/tool/test/org/antlr/v4/test/TestScopeParsing.java b/tool/test/org/antlr/v4/xtest/TestScopeParsing.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestScopeParsing.java rename to tool/test/org/antlr/v4/xtest/TestScopeParsing.java index 851bd4817..26d80fc0c 100644 --- a/tool/test/org/antlr/v4/test/TestScopeParsing.java +++ b/tool/test/org/antlr/v4/xtest/TestScopeParsing.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.parse.ScopeParser; import org.antlr.v4.tool.ErrorManager; diff --git a/tool/test/org/antlr/v4/test/TestSemPredEvalLexer.java b/tool/test/org/antlr/v4/xtest/TestSemPredEvalLexer.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestSemPredEvalLexer.java rename to tool/test/org/antlr/v4/xtest/TestSemPredEvalLexer.java index 95934d397..fafc1aa68 100644 --- a/tool/test/org/antlr/v4/test/TestSemPredEvalLexer.java +++ b/tool/test/org/antlr/v4/xtest/TestSemPredEvalLexer.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestSemPredEvalParser.java b/tool/test/org/antlr/v4/xtest/TestSemPredEvalParser.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestSemPredEvalParser.java rename to tool/test/org/antlr/v4/xtest/TestSemPredEvalParser.java index ed347a8ab..444c5c52d 100644 --- a/tool/test/org/antlr/v4/test/TestSemPredEvalParser.java +++ b/tool/test/org/antlr/v4/xtest/TestSemPredEvalParser.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestSets.java b/tool/test/org/antlr/v4/xtest/TestSets.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestSets.java rename to tool/test/org/antlr/v4/xtest/TestSets.java index 12f5a00d8..c55847637 100644 --- a/tool/test/org/antlr/v4/test/TestSets.java +++ b/tool/test/org/antlr/v4/xtest/TestSets.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.tool.ErrorType; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestSymbolIssues.java b/tool/test/org/antlr/v4/xtest/TestSymbolIssues.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestSymbolIssues.java rename to tool/test/org/antlr/v4/xtest/TestSymbolIssues.java index 7d6d8f92c..bb6d050f5 100644 --- a/tool/test/org/antlr/v4/test/TestSymbolIssues.java +++ b/tool/test/org/antlr/v4/xtest/TestSymbolIssues.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.tool.ErrorType; import org.antlr.v4.tool.LexerGrammar; diff --git a/tool/test/org/antlr/v4/test/TestTokenPositionOptions.java b/tool/test/org/antlr/v4/xtest/TestTokenPositionOptions.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestTokenPositionOptions.java rename to tool/test/org/antlr/v4/xtest/TestTokenPositionOptions.java index 3aa97d5d4..ecedabdb3 100644 --- a/tool/test/org/antlr/v4/test/TestTokenPositionOptions.java +++ b/tool/test/org/antlr/v4/xtest/TestTokenPositionOptions.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.runtime.Token; import org.antlr.v4.misc.Utils; diff --git a/tool/test/org/antlr/v4/test/TestTokenStreamRewriter.java b/tool/test/org/antlr/v4/xtest/TestTokenStreamRewriter.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestTokenStreamRewriter.java rename to tool/test/org/antlr/v4/xtest/TestTokenStreamRewriter.java index acac405ca..71bff0fee 100644 --- a/tool/test/org/antlr/v4/test/TestTokenStreamRewriter.java +++ b/tool/test/org/antlr/v4/xtest/TestTokenStreamRewriter.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.CommonTokenStream; diff --git a/tool/test/org/antlr/v4/test/TestTokenTypeAssignment.java b/tool/test/org/antlr/v4/xtest/TestTokenTypeAssignment.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestTokenTypeAssignment.java rename to tool/test/org/antlr/v4/xtest/TestTokenTypeAssignment.java index a399a1c75..503b9cc8e 100644 --- a/tool/test/org/antlr/v4/test/TestTokenTypeAssignment.java +++ b/tool/test/org/antlr/v4/xtest/TestTokenTypeAssignment.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.Token; import org.antlr.v4.tool.Grammar; diff --git a/tool/test/org/antlr/v4/test/TestToolSyntaxErrors.java b/tool/test/org/antlr/v4/xtest/TestToolSyntaxErrors.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestToolSyntaxErrors.java rename to tool/test/org/antlr/v4/xtest/TestToolSyntaxErrors.java index e5a5de142..231f3961e 100644 --- a/tool/test/org/antlr/v4/test/TestToolSyntaxErrors.java +++ b/tool/test/org/antlr/v4/xtest/TestToolSyntaxErrors.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.Tool; import org.antlr.v4.tool.ErrorType; diff --git a/tool/test/org/antlr/v4/test/TestTopologicalSort.java b/tool/test/org/antlr/v4/xtest/TestTopologicalSort.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestTopologicalSort.java rename to tool/test/org/antlr/v4/xtest/TestTopologicalSort.java index 7b75520c5..65df21760 100644 --- a/tool/test/org/antlr/v4/test/TestTopologicalSort.java +++ b/tool/test/org/antlr/v4/xtest/TestTopologicalSort.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.misc.Graph; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/TestUnbufferedCharStream.java b/tool/test/org/antlr/v4/xtest/TestUnbufferedCharStream.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestUnbufferedCharStream.java rename to tool/test/org/antlr/v4/xtest/TestUnbufferedCharStream.java index 0e6b02e89..a38ca66be 100644 --- a/tool/test/org/antlr/v4/test/TestUnbufferedCharStream.java +++ b/tool/test/org/antlr/v4/xtest/TestUnbufferedCharStream.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CommonTokenFactory; diff --git a/tool/test/org/antlr/v4/test/TestUnbufferedTokenStream.java b/tool/test/org/antlr/v4/xtest/TestUnbufferedTokenStream.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestUnbufferedTokenStream.java rename to tool/test/org/antlr/v4/xtest/TestUnbufferedTokenStream.java index 79accab1c..fda765562 100644 --- a/tool/test/org/antlr/v4/test/TestUnbufferedTokenStream.java +++ b/tool/test/org/antlr/v4/xtest/TestUnbufferedTokenStream.java @@ -28,7 +28,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.CharStream; diff --git a/tool/test/org/antlr/v4/test/TestVocabulary.java b/tool/test/org/antlr/v4/xtest/TestVocabulary.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestVocabulary.java rename to tool/test/org/antlr/v4/xtest/TestVocabulary.java index c39c62cff..664871339 100644 --- a/tool/test/org/antlr/v4/test/TestVocabulary.java +++ b/tool/test/org/antlr/v4/xtest/TestVocabulary.java @@ -27,7 +27,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.Vocabulary; diff --git a/tool/test/org/antlr/v4/test/TestXPath.java b/tool/test/org/antlr/v4/xtest/TestXPath.java similarity index 99% rename from tool/test/org/antlr/v4/test/TestXPath.java rename to tool/test/org/antlr/v4/xtest/TestXPath.java index f3cc5b5a3..886a04536 100644 --- a/tool/test/org/antlr/v4/test/TestXPath.java +++ b/tool/test/org/antlr/v4/xtest/TestXPath.java @@ -1,4 +1,4 @@ -package org.antlr.v4.test; +package org.antlr.v4.xtest; import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.Parser; From a683f05f6a932c3c6e13c13941b97b2c35212db6 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Fri, 24 Oct 2014 08:17:28 +0800 Subject: [PATCH 10/26] add missing test in LexerExec, fix LexerErrors --- .../org/antlr/v4/test/rt/gen/Generator.java | 12 +- .../antlr/v4/test/rt/gen/LexerTestMethod.java | 3 +- .../gen/grammars/LexerErrors/LexerExecDFA.st | 3 +- .../gen/grammars/LexerExec/ZeroLengthToken.st | 9 + .../org/antlr/v4/test/rt/java/Java.test.stg | 2 +- .../v4/test/rt/java/TestLexerErrors.java | 9 +- .../antlr/v4/test/rt/java/TestLexerExec.java | 17 + .../antlr/v4/test/tool/TestLexerErrors.java | 213 ------ .../org/antlr/v4/test/tool/TestLexerExec.java | 690 ------------------ 9 files changed, 44 insertions(+), 914 deletions(-) create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/ZeroLengthToken.st delete mode 100644 tool/test/org/antlr/v4/test/tool/TestLexerErrors.java delete mode 100644 tool/test/org/antlr/v4/test/tool/TestLexerExec.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index 72daec410..bb52d49f1 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -716,14 +716,15 @@ public class Generator { "abx", "[@0,3:2='',<-1>,1:3]\n", "line 1:0 token recognition error at: 'abx'\n"); - file.addLexerTest(input, "LexerExecDFA", "L", + LexerTestMethod tm = file.addLexerTest(input, "LexerExecDFA", "L", "x : x", "[@0,0:0='x',<3>,1:0]\n" + - "[@1,2:2=':',<2>,1:2]\n" + + "[@1,2:2=':',<1>,1:2]\n" + "[@2,4:4='x',<3>,1:4]\n" + "[@3,5:4='',<-1>,1:5]\n", "line 1:1 token recognition error at: ' '\n" + "line 1:3 token recognition error at: ' '\n"); + tm.lexerOnly = false; return file; } @@ -1173,6 +1174,13 @@ public class Generator { file.addLexerTest(input, "LargeLexer", "L", "KW400", "[@0,0:4='KW400',<402>,1:0]\n" + "[@1,5:4='',<-1>,1:5]\n", null); + /** + * This is a regression test for antlr/antlr4#687 "Empty zero-length tokens + * cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match + * zero-length tokens" */ + file.addLexerTest(input, "ZeroLengthToken", "L", "'xxx'", + "[@0,0:4=''xxx'',<1>,1:0]\n" + + "[@1,5:4='',<-1>,1:5]\n", null); return file; } diff --git a/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java index f4b027ca7..8d378891d 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java @@ -3,7 +3,8 @@ package org.antlr.v4.test.rt.gen; public class LexerTestMethod extends TestMethod { public String[] outputLines; - + public boolean lexerOnly = true; + public LexerTestMethod(String name, String grammarName, String input, String expectedOutput, String expectedErrors, Integer index) { super(name, grammarName, input, expectedOutput, expectedErrors, index); diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/LexerExecDFA.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/LexerExecDFA.st index eeb152fc4..983c36539 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/LexerExecDFA.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerErrors/LexerExecDFA.st @@ -1,6 +1,5 @@ -lexer grammar ; +grammar ; start : ID ':' expr; expr : primary expr? {} | expr '->' ID; primary : ID; ID : [a-z]+; -; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/ZeroLengthToken.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/ZeroLengthToken.st new file mode 100644 index 000000000..60b0f086a --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LexerExec/ZeroLengthToken.st @@ -0,0 +1,9 @@ +lexer grammar ; +BeginString + : '\'' -> more, pushMode(StringMode) + ; +mode StringMode; + StringMode_X : 'x' -> more; + StringMode_Done : -> more, mode(EndStringMode); +mode EndStringMode; + EndString : '\'' -> popMode; diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index 4bea570e7..bb3ed5111 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -28,7 +28,7 @@ public void test() throws Exception { };separator="\n", wrap, anchor> String grammar = };separator="\\n\" +\n", wrap, anchor>"; - String found = execLexer(".g4", grammar, "", ""); + String found = execLexer(".g4", grammar, "Lexer", ""); assertEquals(\\n"};separator=" + \n", wrap, anchor>, found); assertEquals("", this.stderrDuringParse); diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java b/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java index b42587de7..112ce021b 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java @@ -123,15 +123,14 @@ public class TestLexerErrors extends BaseTest { @Test public void testLexerExecDFA() throws Exception { - String grammar = "lexer grammar L;\n" + + String grammar = "grammar L;\n" + "start : ID ':' expr;\n" + "expr : primary expr? {} | expr '->' ID;\n" + "primary : ID;\n" + - "ID : [a-z]+;\n" + - ";"; - String found = execLexer("L.g4", grammar, "L", "x : x"); + "ID : [a-z]+;"; + String found = execLexer("L.g4", grammar, "LLexer", "x : x"); assertEquals("[@0,0:0='x',<3>,1:0]\n" + - "[@1,2:2=':',<2>,1:2]\n" + + "[@1,2:2=':',<1>,1:2]\n" + "[@2,4:4='x',<3>,1:4]\n" + "[@3,5:4='',<-1>,1:5]\n", found); assertEquals("line 1:1 token recognition error at: ' '\nline 1:3 token recognition error at: ' '\n", this.stderrDuringParse); diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java b/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java index de92cced3..9e33810c5 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java @@ -4623,5 +4623,22 @@ public class TestLexerExec extends BaseTest { assertNull(this.stderrDuringParse); } + @Test + public void testZeroLengthToken() throws Exception { + String grammar = "lexer grammar L;\n" + + "BeginString\n" + + " : '\\'' -> more, pushMode(StringMode)\n" + + " ;\n" + + "mode StringMode;\n" + + " StringMode_X : 'x' -> more;\n" + + " StringMode_Done : -> more, mode(EndStringMode);\n" + + "mode EndStringMode; \n" + + " EndString : '\\'' -> popMode;"; + String found = execLexer("L.g4", grammar, "L", "'xxx'"); + assertEquals("[@0,0:4=''xxx'',<1>,1:0]\n" + + "[@1,5:4='',<-1>,1:5]\n", found); + assertNull(this.stderrDuringParse); + } + } \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/tool/TestLexerErrors.java b/tool/test/org/antlr/v4/test/tool/TestLexerErrors.java deleted file mode 100644 index c288cf5e2..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestLexerErrors.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.test.tool; - -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestLexerErrors extends BaseTest { - // TEST DETECTION - @Test public void testInvalidCharAtStart() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' 'b' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "x"); - String expectingTokens = - "[@0,1:0='',<-1>,1:1]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:0 token recognition error at: 'x'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test - public void testStringsEmbeddedInActions() { - String grammar = - "lexer grammar Actions;\n" - + "ACTION2 : '[' (STRING | ~'\"')*? ']';\n" - + "STRING : '\"' ('\\\"' | .)*? '\"';\n" - + "WS : [ \\t\\r\\n]+ -> skip;\n"; - String tokens = execLexer("Actions.g4", grammar, "Actions", "[\"foo\"]"); - String expectingTokens = - "[@0,0:6='[\"foo\"]',<1>,1:0]\n" + - "[@1,7:6='',<-1>,1:7]\n"; - assertEquals(expectingTokens, tokens); - assertNull(stderrDuringParse); - - tokens = execLexer("Actions.g4", grammar, "Actions", "[\"foo]"); - expectingTokens = - "[@0,6:5='',<-1>,1:6]\n"; - assertEquals(expectingTokens, tokens); - assertEquals("line 1:0 token recognition error at: '[\"foo]'\n", stderrDuringParse); - } - - @Test public void testEnforcedGreedyNestedBrances() { - String grammar = - "lexer grammar R;\n" - + "ACTION : '{' (ACTION | ~[{}])* '}';\n" - + "WS : [ \\r\\n\\t]+ -> skip;\n"; - String tokens = execLexer("R.g4", grammar, "R", "{ { } }"); - String expectingTokens = - "[@0,0:6='{ { } }',<1>,1:0]\n" + - "[@1,7:6='',<-1>,1:7]\n"; - assertEquals(expectingTokens, tokens); - assertEquals(null, stderrDuringParse); - - tokens = execLexer("R.g4", grammar, "R", "{ { }"); - expectingTokens = - "[@0,5:4='',<-1>,1:5]\n"; - assertEquals(expectingTokens, tokens); - assertEquals("line 1:0 token recognition error at: '{ { }'\n", stderrDuringParse); - } - - @Test public void testInvalidCharAtStartAfterDFACache() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' 'b' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "abx"); - String expectingTokens = - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:2 token recognition error at: 'x'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testInvalidCharInToken() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' 'b' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "ax"); - String expectingTokens = - "[@0,2:1='',<-1>,1:2]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:0 token recognition error at: 'ax'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testInvalidCharInTokenAfterDFACache() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' 'b' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "abax"); - String expectingTokens = - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,4:3='',<-1>,1:4]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:2 token recognition error at: 'ax'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testDFAToATNThatFailsBackToDFA() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'ab' ;\n"+ - "B : 'abc' ;\n"; - // The first ab caches the DFA then abx goes through the DFA but - // into the ATN for the x, which fails. Must go back into DFA - // and return to previous dfa accept state - String tokens = execLexer("L.g4", grammar, "L", "ababx"); - String expectingTokens = - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:3='ab',<1>,1:2]\n" + - "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:4 token recognition error at: 'x'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testDFAToATNThatMatchesThenFailsInATN() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'ab' ;\n"+ - "B : 'abc' ;\n"+ - "C : 'abcd' ;\n"; - // The first ab caches the DFA then abx goes through the DFA but - // into the ATN for the c. It marks that hasn't except state - // and then keeps going in the ATN. It fails on the x, but - // uses the previous accepted in the ATN not DFA - String tokens = execLexer("L.g4", grammar, "L", "ababcx"); - String expectingTokens = - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:4='abc',<2>,1:2]\n" + - "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:5 token recognition error at: 'x'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testErrorInMiddle() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'abc' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "abx"); - String expectingTokens = - "[@0,3:2='',<-1>,1:3]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:0 token recognition error at: 'abx'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - // TEST RECOVERY - - /** - * This is a regression test for #45 "NullPointerException in LexerATNSimulator.execDFA". - * https://github.com/antlr/antlr4/issues/46 - */ - @Test - public void testLexerExecDFA() throws Exception { - String grammar = - "grammar T;\n" + - "start : ID ':' expr;\n" + - "expr : primary expr? {} | expr '->' ID;\n" + - "primary : ID;\n" + - "ID : [a-z]+;\n" + - "\n"; - String result = execLexer("T.g4", grammar, "TLexer", "x : x", false); - String expecting = - "[@0,0:0='x',<3>,1:0]\n" + - "[@1,2:2=':',<1>,1:2]\n" + - "[@2,4:4='x',<3>,1:4]\n" + - "[@3,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, result); - assertEquals("line 1:1 token recognition error at: ' '\n" + - "line 1:3 token recognition error at: ' '\n", - this.stderrDuringParse); - } - -} diff --git a/tool/test/org/antlr/v4/test/tool/TestLexerExec.java b/tool/test/org/antlr/v4/test/tool/TestLexerExec.java deleted file mode 100644 index 6cbc0209a..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestLexerExec.java +++ /dev/null @@ -1,690 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.test.tool; - -import org.antlr.v4.runtime.misc.Nullable; -import org.junit.Test; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class TestLexerExec extends BaseTest { - @Test public void testQuoteTranslation() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "QUOTE : '\"' ;\n"; // make sure this compiles - String found = execLexer("L.g4", grammar, "L", "\""); - String expecting = - "[@0,0:0='\"',<1>,1:0]\n" + - "[@1,1:0='',<-1>,1:1]\n"; - assertEquals(expecting, found); - } - - @Test public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : '-' I ;\n" + - "I : '0'..'9'+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 -21 3"); - String expecting = - "[@0,0:1='34',<2>,1:0]\n" + - "[@1,3:5='-21',<1>,1:3]\n" + - "[@2,7:7='3',<2>,1:7]\n" + - "[@3,8:7='',<-1>,1:8]\n"; // EOF has no length so range is 8:7 not 8:8 - assertEquals(expecting, found); - } - - @Test public void testSlashes() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "Backslash : '\\\\';\n" + - "Slash : '/';\n" + - "Vee : '\\\\/';\n" + - "Wedge : '/\\\\';\n"+ - "WS : [ \\t] -> skip;"; - String found = execLexer("L.g4", grammar, "L", "\\ / \\/ /\\"); - String expecting = - "[@0,0:0='\\',<1>,1:0]\n" + - "[@1,2:2='/',<2>,1:2]\n" + - "[@2,4:5='\\/',<3>,1:4]\n" + - "[@3,7:8='/\\',<4>,1:7]\n" + - "[@4,9:8='',<-1>,1:9]\n"; - assertEquals(expecting, found); - } - - /** - * This is a regression test for antlr/antlr4#224: "Parentheses without - * quantifier in lexer rules have unclear effect". - * https://github.com/antlr/antlr4/issues/224 - */ - @Test public void testParentheses() { - String grammar = - "lexer grammar Demo;\n" + - "\n" + - "START_BLOCK: '-.-.-';\n" + - "\n" + - "ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+;\n" + - "fragment LETTER: L_A|L_K;\n" + - "fragment L_A: '.-';\n" + - "fragment L_K: '-.-';\n" + - "\n" + - "SEPARATOR: '!';\n"; - String found = execLexer("Demo.g4", grammar, "Demo", "-.-.-!"); - String expecting = - "[@0,0:4='-.-.-',<1>,1:0]\n" + - "[@1,5:5='!',<3>,1:5]\n" + - "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expecting, found); - } - - @Test - public void testNonGreedyTermination() throws Exception { - String grammar = - "lexer grammar L;\n" - + "STRING : '\"' ('\"\"' | .)*? '\"';"; - - String found = execLexer("L.g4", grammar, "L", "\"hi\"\"mom\""); - assertEquals( - "[@0,0:3='\"hi\"',<1>,1:0]\n" + - "[@1,4:8='\"mom\"',<1>,1:4]\n" + - "[@2,9:8='',<-1>,1:9]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testNonGreedyTermination2() throws Exception { - String grammar = - "lexer grammar L;\n" - + "STRING : '\"' ('\"\"' | .)+? '\"';"; - - String found = execLexer("L.g4", grammar, "L", "\"\"\"mom\""); - assertEquals( - "[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" + - "[@1,7:6='',<-1>,1:7]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testGreedyOptional() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : '//' .*? '\\n' CMT?;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + - "[@1,14:13='',<-1>,3:14]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testNonGreedyOptional() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : '//' .*? '\\n' CMT??;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:6='//blah\\n',<1>,1:0]\n" + - "[@1,7:13='//blah\\n',<1>,2:0]\n" + - "[@2,14:13='',<-1>,3:7]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testGreedyClosure() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : '//' .*? '\\n' CMT*;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + - "[@1,14:13='',<-1>,3:14]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testNonGreedyClosure() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : '//' .*? '\\n' CMT*?;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:6='//blah\\n',<1>,1:0]\n" + - "[@1,7:13='//blah\\n',<1>,2:0]\n" + - "[@2,14:13='',<-1>,3:7]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testGreedyPositiveClosure() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : ('//' .*? '\\n')+;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + - "[@1,14:13='',<-1>,3:14]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testNonGreedyPositiveClosure() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : ('//' .*? '\\n')+?;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:6='//blah\\n',<1>,1:0]\n" + - "[@1,7:13='//blah\\n',<1>,2:0]\n" + - "[@2,14:13='',<-1>,3:7]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testRecursiveLexerRuleRefWithWildcardStar1() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)*? '*/' ;\n" + - "WS : (' '|'\\n')+ ;\n" - /*+ "ANY : .;"*/; - - String expecting = - "[@0,0:8='/* ick */',<1>,1:0]\n" + - "[@1,9:9='\\n',<2>,1:9]\n" + - "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + - "[@3,35:35='\\n',<2>,3:16]\n" + - "[@4,36:35='',<-1>,4:17]\n"; - - // stuff on end of comment matches another rule - String found = execLexer("L.g4", grammar, "L", - "/* ick */\n" + - "/* /* */\n" + - "/* /*nested*/ */\n"); - assertEquals(expecting, found); - assertNull(stderrDuringParse); - } - - @Test - public void testRecursiveLexerRuleRefWithWildcardStar2() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)*? '*/' ;\n" + - "WS : (' '|'\\n')+ ;\n" - /*+ "ANY : .;"*/; - - // stuff on end of comment doesn't match another rule - String expecting = - "[@0,0:8='/* ick */',<1>,1:0]\n" + - "[@1,10:10='\\n',<2>,1:10]\n" + - "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + - "[@3,38:38='\\n',<2>,3:17]\n" + - "[@4,39:38='',<-1>,4:18]\n"; - String found = execLexer("L.g4", grammar, "L", - "/* ick */x\n" + - "/* /* */x\n" + - "/* /*nested*/ */x\n"); - assertEquals(expecting, found); - assertEquals( - "line 1:9 token recognition error at: 'x'\n" + - "line 3:16 token recognition error at: 'x'\n", stderrDuringParse); - } - - @Test - public void testRecursiveLexerRuleRefWithWildcardPlus1() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)+? '*/' ;\n" + - "WS : (' '|'\\n')+ ;\n" - /*+ "ANY : .;"*/; - - String expecting = - "[@0,0:8='/* ick */',<1>,1:0]\n" + - "[@1,9:9='\\n',<2>,1:9]\n" + - "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + - "[@3,35:35='\\n',<2>,3:16]\n" + - "[@4,36:35='',<-1>,4:17]\n"; - - // stuff on end of comment matches another rule - String found = execLexer("L.g4", grammar, "L", - "/* ick */\n" + - "/* /* */\n" + - "/* /*nested*/ */\n"); - assertEquals(expecting, found); - assertNull(stderrDuringParse); - } - - @Test - public void testRecursiveLexerRuleRefWithWildcardPlus2() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)+? '*/' ;\n" + - "WS : (' '|'\\n')+ ;\n" - /*+ "ANY : .;"*/; - - // stuff on end of comment doesn't match another rule - String expecting = - "[@0,0:8='/* ick */',<1>,1:0]\n" + - "[@1,10:10='\\n',<2>,1:10]\n" + - "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + - "[@3,38:38='\\n',<2>,3:17]\n" + - "[@4,39:38='',<-1>,4:18]\n"; - String found = execLexer("L.g4", grammar, "L", - "/* ick */x\n" + - "/* /* */x\n" + - "/* /*nested*/ */x\n"); - assertEquals(expecting, found); - assertEquals( - "line 1:9 token recognition error at: 'x'\n" + - "line 3:16 token recognition error at: 'x'\n", stderrDuringParse); - } - - @Test public void testActionPlacement() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : ({System.out.println(\"stuff fail: \" + getText());} 'a' | {System.out.println(\"stuff0: \" + getText());} 'a' {System.out.println(\"stuff1: \" + getText());} 'b' {System.out.println(\"stuff2: \" + getText());}) {System.out.println(getText());} ;\n"+ - "WS : (' '|'\\n') -> skip ;\n" + - "J : .;\n"; - String found = execLexer("L.g4", grammar, "L", "ab"); - String expecting = - "stuff0: \n" + - "stuff1: a\n" + - "stuff2: ab\n" + - "ab\n" + - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:1='',<-1>,1:2]\n"; - assertEquals(expecting, found); - } - - @Test public void testGreedyConfigs() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : ('a' | 'ab') {System.out.println(getText());} ;\n"+ - "WS : (' '|'\\n') -> skip ;\n" + - "J : .;\n"; - String found = execLexer("L.g4", grammar, "L", "ab"); - String expecting = - "ab\n" + - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:1='',<-1>,1:2]\n"; - assertEquals(expecting, found); - } - - @Test public void testNonGreedyConfigs() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : .*? ('a' | 'ab') {System.out.println(getText());} ;\n"+ - "WS : (' '|'\\n') -> skip ;\n" + - "J : . {System.out.println(getText());};\n"; - String found = execLexer("L.g4", grammar, "L", "ab"); - String expecting = - "a\n" + - "b\n" + - "[@0,0:0='a',<1>,1:0]\n" + - "[@1,1:1='b',<3>,1:1]\n" + - "[@2,2:1='',<-1>,1:2]\n"; - assertEquals(expecting, found); - } - - @Test public void testKeywordID() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "KEND : 'end' ;\n" + // has priority - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n')+ ;"; - String found = execLexer("L.g4", grammar, "L", "end eend ending a"); - String expecting = - "[@0,0:2='end',<1>,1:0]\n" + - "[@1,3:3=' ',<3>,1:3]\n" + - "[@2,4:7='eend',<2>,1:4]\n" + - "[@3,8:8=' ',<3>,1:8]\n" + - "[@4,9:14='ending',<2>,1:9]\n" + - "[@5,15:15=' ',<3>,1:15]\n" + - "[@6,16:16='a',<2>,1:16]\n" + - "[@7,17:16='',<-1>,1:17]\n"; - assertEquals(expecting, found); - } - - @Test public void testHexVsID() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "HexLiteral : '0' ('x'|'X') HexDigit+ ;\n"+ - "DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ;\n" + - "FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ;\n" + - "DOT : '.' ;\n" + - "ID : 'a'..'z'+ ;\n" + - "fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;\n" + - "WS : (' '|'\\n')+ ;"; - String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l"); - String expecting = - "[@0,0:0='x',<5>,1:0]\n" + - "[@1,1:1=' ',<6>,1:1]\n" + - "[@2,2:2='0',<2>,1:2]\n" + - "[@3,3:3=' ',<6>,1:3]\n" + - "[@4,4:4='1',<2>,1:4]\n" + - "[@5,5:5=' ',<6>,1:5]\n" + - "[@6,6:6='a',<5>,1:6]\n" + - "[@7,7:7='.',<4>,1:7]\n" + - "[@8,8:8='b',<5>,1:8]\n" + - "[@9,9:9=' ',<6>,1:9]\n" + - "[@10,10:10='a',<5>,1:10]\n" + - "[@11,11:11='.',<4>,1:11]\n" + - "[@12,12:12='l',<5>,1:12]\n" + - "[@13,13:12='',<-1>,1:13]\n"; - assertEquals(expecting, found); - } - - // must get DONE EOF - @Test public void testEOFByItself() throws Exception { - String grammar = - "lexer grammar L;\n" + - "DONE : EOF ;\n" + - "A : 'a';\n"; - String found = execLexer("L.g4", grammar, "L", ""); - String expecting = - "[@0,0:-1='',<1>,1:0]\n" + - "[@1,0:-1='',<-1>,1:0]\n"; - assertEquals(expecting, found); - } - - @Test public void testEOFSuffixInFirstRule() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : 'a' EOF ;\n"+ - "B : 'a';\n"+ - "C : 'c';\n"; - String found = execLexer("L.g4", grammar, "L", ""); - String expecting = - "[@0,0:-1='',<-1>,1:0]\n"; - assertEquals(expecting, found); - - found = execLexer("L.g4", grammar, "L", "a"); - expecting = - "[@0,0:0='a',<1>,1:0]\n" + - "[@1,1:0='',<-1>,1:1]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSet() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D] -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,5:6='34',<1>,2:1]\n" + - "[@2,7:6='',<-1>,2:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetPlus() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,5:6='34',<1>,2:1]\n" + - "[@2,7:6='',<-1>,2:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetNot() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : ~[ab \\n] ~[ \\ncd]* {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "xaf"); - String expecting = - "I\n" + - "[@0,0:2='xaf',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetInSet() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : (~[ab \\n]|'a') {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "a x"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:0='a',<1>,1:0]\n" + - "[@1,2:2='x',<1>,1:2]\n" + - "[@2,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetRange() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : [0-9]+ {System.out.println(\"I\");} ;\n"+ - "ID : [a-zA-Z] [a-zA-Z0-9]* {System.out.println(\"ID\");} ;\n"+ - "WS : [ \\n\\u0009\\r]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r 34 a2 abc \n "); - String expecting = - "I\n" + - "I\n" + - "ID\n" + - "ID\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,4:5='34',<1>,1:4]\n" + - "[@2,7:8='a2',<2>,1:7]\n" + - "[@3,10:12='abc',<2>,1:10]\n" + - "[@4,18:17='',<-1>,2:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithMissingEndRange() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : [0-]+ {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "00\r\n"); - String expecting = - "I\n" + - "[@0,0:1='00',<1>,1:0]\n" + - "[@1,4:3='',<-1>,2:0]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithMissingEscapeChar() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : [0-9]+ {System.out.println(\"I\");} ;\n"+ - "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 "); - String expecting = - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithEscapedChar() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "DASHBRACK : [\\-\\]]+ {System.out.println(\"DASHBRACK\");} ;\n"+ - "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "- ] "); - String expecting = - "DASHBRACK\n" + - "DASHBRACK\n" + - "[@0,0:0='-',<1>,1:0]\n" + - "[@1,2:2=']',<1>,1:2]\n" + - "[@2,4:3='',<-1>,1:4]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithReversedRange() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : [z-a9]+ {System.out.println(\"A\");} ;\n"+ - "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "9"); - String expecting = - "A\n" + - "[@0,0:0='9',<1>,1:0]\n" + - "[@1,1:0='',<-1>,1:1]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithQuote() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : [\"a-z]+ {System.out.println(\"A\");} ;\n"+ - "WS : [ \\n\\t]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "b\"a"); - String expecting = - "A\n" + - "[@0,0:2='b\"a',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithQuote2() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : [\"\\\\ab]+ {System.out.println(\"A\");} ;\n"+ - "WS : [ \\n\\t]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "b\"\\a"); - String expecting = - "A\n" + - "[@0,0:3='b\"\\a',<1>,1:0]\n" + - "[@1,4:3='',<-1>,1:4]\n"; - assertEquals(expecting, found); - } - - @Test - public void testPositionAdjustingLexer() throws Exception { - String grammar = load("PositionAdjustingLexer.g4", null); - String input = - "tokens\n" + - "tokens {\n" + - "notLabel\n" + - "label1 =\n" + - "label2 +=\n" + - "notLabel\n"; - String found = execLexer("PositionAdjustingLexer.g4", grammar, "PositionAdjustingLexer", input); - - final int TOKENS = 4; - final int LABEL = 5; - final int IDENTIFIER = 6; - String expecting = - "[@0,0:5='tokens',<" + IDENTIFIER + ">,1:0]\n" + - "[@1,7:12='tokens',<" + TOKENS + ">,2:0]\n" + - "[@2,14:14='{',<3>,2:7]\n" + - "[@3,16:23='notLabel',<" + IDENTIFIER + ">,3:0]\n" + - "[@4,25:30='label1',<" + LABEL + ">,4:0]\n" + - "[@5,32:32='=',<1>,4:7]\n" + - "[@6,34:39='label2',<" + LABEL + ">,5:0]\n" + - "[@7,41:42='+=',<2>,5:7]\n" + - "[@8,44:51='notLabel',<" + IDENTIFIER + ">,6:0]\n" + - "[@9,53:52='',<-1>,7:0]\n"; - - assertEquals(expecting, found); - } - - /** - * This is a regression test for antlr/antlr4#76 "Serialized ATN strings - * should be split when longer than 2^16 bytes (class file limitation)" - * https://github.com/antlr/antlr4/issues/76 - */ - @Test - public void testLargeLexer() throws Exception { - StringBuilder grammar = new StringBuilder(); - grammar.append("lexer grammar L;\n"); - grammar.append("WS : [ \\t\\r\\n]+ -> skip;\n"); - for (int i = 0; i < 4000; i++) { - grammar.append("KW").append(i).append(" : 'KW' '").append(i).append("';\n"); - } - - String input = "KW400"; - String found = execLexer("L.g4", grammar.toString(), "L", input); - String expecting = - "[@0,0:4='KW400',<402>,1:0]\n" + - "[@1,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } - - /** - * This is a regression test for antlr/antlr4#687 "Empty zero-length tokens - * cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match - * zero-length tokens" - * https://github.com/antlr/antlr4/issues/687 - * https://github.com/antlr/antlr4/issues/688 - */ - @Test public void testZeroLengthToken() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "\n" + - "BeginString\n" + - " : '\\'' -> more, pushMode(StringMode)\n" + - " ;\n" + - "\n" + - "mode StringMode;\n" + - "\n" + - " StringMode_X : 'x' -> more;\n" + - " StringMode_Done : -> more, mode(EndStringMode);\n" + - "\n" + - "mode EndStringMode; \n" + - "\n" + - " EndString : '\\'' -> popMode;\n"; - String found = execLexer("L.g4", grammar, "L", "'xxx'"); - String expecting = - "[@0,0:4=''xxx'',<1>,1:0]\n" + - "[@1,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } -} From e97083fd81d53611caa7d8a72a59e53f26a3b559 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Fri, 24 Oct 2014 22:15:05 +0800 Subject: [PATCH 11/26] validated SemPredEvalLexer --- .../org/antlr/v4/test/rt/gen/Generator.java | 18 +- .../antlr/v4/test/rt/gen/LexerTestMethod.java | 1 + .../gen/grammars/SemPredEvalLexer/Indent.st | 4 +- .../SemPredEvalLexer/PredicatedKeywords.st | 2 +- .../org/antlr/v4/test/rt/java/Java.test.stg | 8 +- .../v4/test/rt/java/TestCompositeLexers.java | 4 +- .../v4/test/rt/java/TestLexerErrors.java | 24 +-- .../antlr/v4/test/rt/java/TestLexerExec.java | 76 ++++---- .../v4/test/rt/java/TestSemPredEvalLexer.java | 28 +-- .../v4/test/tool/TestSemPredEvalLexer.java | 183 ------------------ 10 files changed, 86 insertions(+), 262 deletions(-) delete mode 100644 tool/test/org/antlr/v4/test/tool/TestSemPredEvalLexer.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index bb52d49f1..cb3f4c15f 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -382,7 +382,7 @@ public class Generator { private TestFile buildSemPredEvalLexer() throws Exception { TestFile file = new TestFile("SemPredEvalLexer"); - file.addLexerTest(input, "DisableRule", "L", + LexerTestMethod tm = file.addLexerTest(input, "DisableRule", "L", "enum abc", "[@0,0:3='enum',<2>,1:0]\n" + "[@1,5:7='abc',<3>,1:5]\n" + @@ -395,7 +395,8 @@ public class Generator { ":s6=>3-'b'->:s6=>3\n" + ":s6=>3-'c'->:s6=>3\n", null); - file.addLexerTest(input, "IDvsEnum", "L", + tm.showDFA = true; + tm = file.addLexerTest(input, "IDvsEnum", "L", "enum abc enum", "[@0,0:3='enum',<2>,1:0]\n" + "[@1,5:7='abc',<2>,1:5]\n" + @@ -409,7 +410,8 @@ public class Generator { ":s4=>2-'b'->:s4=>2\n" + ":s4=>2-'c'->:s4=>2\n", // no 'm'-> transition...conflicts with pred null); - file.addLexerTest(input, "IDnotEnum", "L", + tm.showDFA = true; + tm = file.addLexerTest(input, "IDnotEnum", "L", "enum abc enum", "[@0,0:3='enum',<2>,1:0]\n" + "[@1,5:7='abc',<2>,1:5]\n" + @@ -417,7 +419,8 @@ public class Generator { "[@3,13:12='',<-1>,1:13]\n" + "s0-' '->:s2=>3\n", // no edges in DFA for enum/id. all paths lead to pred. null); - file.addLexerTest(input, "EnumNotID", "L", + tm.showDFA = true; + tm = file.addLexerTest(input, "EnumNotID", "L", "enum abc enum", "[@0,0:3='enum',<1>,1:0]\n" + "[@1,5:7='abc',<2>,1:5]\n" + @@ -425,7 +428,8 @@ public class Generator { "[@3,13:12='',<-1>,1:13]\n" + "s0-' '->:s3=>3\n", // no edges in DFA for enum/id. all paths lead to pred. null); - file.addLexerTest(input, "Indent", "L", + tm.showDFA = true; + tm = file.addLexerTest(input, "Indent", "L", "abc\n def \n", "INDENT\n" + // action output "[@0,0:2='abc',<1>,1:0]\n" + // ID @@ -444,7 +448,8 @@ public class Generator { ":s1=>1-'e'->:s1=>1\n" + ":s1=>1-'f'->:s1=>1\n", null); - file.addLexerTest(input, "LexerInputPositionSensitivePredicates", "L", + tm.showDFA = true; + tm = file.addLexerTest(input, "LexerInputPositionSensitivePredicates", "L", "a cde\nabcde\n", "a\n" + "cde\n" + @@ -456,6 +461,7 @@ public class Generator { "[@3,8:10='cde',<2>,2:2]\n" + "[@4,12:11='',<-1>,3:0]\n", null); + tm.showDFA = true; file.addLexerTest(input, "PredicatedKeywords", "L", "enum enu a", "enum!\n" + diff --git a/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java index 8d378891d..4de133d58 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/LexerTestMethod.java @@ -4,6 +4,7 @@ public class LexerTestMethod extends TestMethod { public String[] outputLines; public boolean lexerOnly = true; + public boolean showDFA = false; public LexerTestMethod(String name, String grammarName, String input, String expectedOutput, String expectedErrors, Integer index) { diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/Indent.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/Indent.st index 36c452401..9aa936553 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/Indent.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/Indent.st @@ -1,6 +1,6 @@ lexer grammar ; ID : [a-z]+ ; -INDENT : [ \t]+ { }? \n" + - { } ;"+ +INDENT : [ \t]+ { }? + { } ; NL : '\n'; WS : [ \t]+ ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/PredicatedKeywords.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/PredicatedKeywords.st index 529601cb6..2f3127cd5 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/PredicatedKeywords.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/PredicatedKeywords.st @@ -1,4 +1,4 @@ lexer grammar ; ENUM : [a-z]+ { }? { } ; -ID : [a-z]+ { } ; +ID : [a-z]+ { } ; WS : [ \n] -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index bb3ed5111..ce1a3f775 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -28,7 +28,7 @@ public void test() throws Exception { };separator="\n", wrap, anchor> String grammar = };separator="\\n\" +\n", wrap, anchor>"; - String found = execLexer(".g4", grammar, "Lexer", ""); + String found = execLexer(".g4", grammar, "Lexer", "", ); assertEquals(\\n"};separator=" + \n", wrap, anchor>, found); assertEquals("", this.stderrDuringParse); @@ -125,13 +125,13 @@ BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy( ToStringTree(s) ::= <%.toStringTree(null, this);%> -Column() ::= "this.column" +Column() ::= "this.getCharPositionInLine()" Text() ::= "this.getText()" ValEquals(a,b) ::= <%===%> -TextEquals(a) ::= <%this.text===""%> +TextEquals(a) ::= <%this.getText().equals("")%> PlusText(a) ::= <%"" + this.getText()%> @@ -141,7 +141,7 @@ LTEquals(i, v) ::= <%this._input.LT().text===%> LANotEquals(i, v) ::= <%this._input.LA()!=%> -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> +TokenStartColumnEquals(i) ::= <%this._tokenStartCharPositionInLine==%> ImportListener(X) ::= <Listener = require('./Listener').Listener;>> diff --git a/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java b/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java index bc0343ea0..375f38aad 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java @@ -17,7 +17,7 @@ public class TestCompositeLexers extends BaseTest { "import S;\n" + "B : 'b';\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("M.g4", grammar, "M", "abc"); + String found = execLexer("M.g4", grammar, "M", "abc", false); assertEquals("S.A\n" + "[@0,0:0='a',<3>,1:0]\n" + "[@1,1:1='b',<1>,1:1]\n" + @@ -38,7 +38,7 @@ public class TestCompositeLexers extends BaseTest { "import S;\n" + "A : 'a' B {document.getElementById('output').value += \"M.A\";};\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("M.g4", grammar, "M", "ab"); + String found = execLexer("M.g4", grammar, "M", "ab", false); assertEquals("M.A\n" + "[@0,0:1='ab',<1>,1:0]\n" + "[@1,2:1='',<-1>,1:2]\n", found); diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java b/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java index 112ce021b..890719e13 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestLexerErrors.java @@ -9,7 +9,7 @@ public class TestLexerErrors extends BaseTest { public void testInvalidCharAtStart() throws Exception { String grammar = "lexer grammar L;\n" + "A : 'a' 'b' ;"; - String found = execLexer("L.g4", grammar, "L", "x"); + String found = execLexer("L.g4", grammar, "L", "x", false); assertEquals("[@0,1:0='',<-1>,1:1]\n", found); assertEquals("line 1:0 token recognition error at: 'x'\n", this.stderrDuringParse); } @@ -20,7 +20,7 @@ public class TestLexerErrors extends BaseTest { "ACTION2 : '[' (STRING | ~'\"')*? ']';\n" + "STRING : '\"' ('\\\"' | .)*? '\"';\n" + "WS : [ \\t\\r\\n]+ -> skip;"; - String found = execLexer("L.g4", grammar, "L", "[\"foo\"]"); + String found = execLexer("L.g4", grammar, "L", "[\"foo\"]", false); assertEquals("[@0,0:6='[\"foo\"]',<1>,1:0]\n" + "[@1,7:6='',<-1>,1:7]\n", found); assertNull(this.stderrDuringParse); @@ -32,7 +32,7 @@ public class TestLexerErrors extends BaseTest { "ACTION2 : '[' (STRING | ~'\"')*? ']';\n" + "STRING : '\"' ('\\\"' | .)*? '\"';\n" + "WS : [ \\t\\r\\n]+ -> skip;"; - String found = execLexer("L.g4", grammar, "L", "[\"foo]"); + String found = execLexer("L.g4", grammar, "L", "[\"foo]", false); assertEquals("[@0,6:5='',<-1>,1:6]\n", found); assertEquals("line 1:0 token recognition error at: '[\"foo]'\n", this.stderrDuringParse); } @@ -42,7 +42,7 @@ public class TestLexerErrors extends BaseTest { String grammar = "lexer grammar L;\n" + "ACTION : '{' (ACTION | ~[{}])* '}';\n" + "WS : [ \\r\\n\\t]+ -> skip;"; - String found = execLexer("L.g4", grammar, "L", "{ { } }"); + String found = execLexer("L.g4", grammar, "L", "{ { } }", false); assertEquals("[@0,0:6='{ { } }',<1>,1:0]\n" + "[@1,7:6='',<-1>,1:7]\n", found); assertNull(this.stderrDuringParse); @@ -53,7 +53,7 @@ public class TestLexerErrors extends BaseTest { String grammar = "lexer grammar L;\n" + "ACTION : '{' (ACTION | ~[{}])* '}';\n" + "WS : [ \\r\\n\\t]+ -> skip;"; - String found = execLexer("L.g4", grammar, "L", "{ { }"); + String found = execLexer("L.g4", grammar, "L", "{ { }", false); assertEquals("[@0,5:4='',<-1>,1:5]\n", found); assertEquals("line 1:0 token recognition error at: '{ { }'\n", this.stderrDuringParse); } @@ -62,7 +62,7 @@ public class TestLexerErrors extends BaseTest { public void testInvalidCharAtStartAfterDFACache() throws Exception { String grammar = "lexer grammar L;\n" + "A : 'a' 'b' ;"; - String found = execLexer("L.g4", grammar, "L", "abx"); + String found = execLexer("L.g4", grammar, "L", "abx", false); assertEquals("[@0,0:1='ab',<1>,1:0]\n" + "[@1,3:2='',<-1>,1:3]\n", found); assertEquals("line 1:2 token recognition error at: 'x'\n", this.stderrDuringParse); @@ -72,7 +72,7 @@ public class TestLexerErrors extends BaseTest { public void testInvalidCharInToken() throws Exception { String grammar = "lexer grammar L;\n" + "A : 'a' 'b' ;"; - String found = execLexer("L.g4", grammar, "L", "ax"); + String found = execLexer("L.g4", grammar, "L", "ax", false); assertEquals("[@0,2:1='',<-1>,1:2]\n", found); assertEquals("line 1:0 token recognition error at: 'ax'\n", this.stderrDuringParse); } @@ -81,7 +81,7 @@ public class TestLexerErrors extends BaseTest { public void testInvalidCharInTokenAfterDFACache() throws Exception { String grammar = "lexer grammar L;\n" + "A : 'a' 'b' ;"; - String found = execLexer("L.g4", grammar, "L", "abax"); + String found = execLexer("L.g4", grammar, "L", "abax", false); assertEquals("[@0,0:1='ab',<1>,1:0]\n" + "[@1,4:3='',<-1>,1:4]\n", found); assertEquals("line 1:2 token recognition error at: 'ax'\n", this.stderrDuringParse); @@ -92,7 +92,7 @@ public class TestLexerErrors extends BaseTest { String grammar = "lexer grammar L;\n" + "A : 'ab' ;\n" + "B : 'abc' ;"; - String found = execLexer("L.g4", grammar, "L", "ababx"); + String found = execLexer("L.g4", grammar, "L", "ababx", false); assertEquals("[@0,0:1='ab',<1>,1:0]\n" + "[@1,2:3='ab',<1>,1:2]\n" + "[@2,5:4='',<-1>,1:5]\n", found); @@ -105,7 +105,7 @@ public class TestLexerErrors extends BaseTest { "A : 'ab' ;\n" + "B : 'abc' ;\n" + "C : 'abcd' ;"; - String found = execLexer("L.g4", grammar, "L", "ababcx"); + String found = execLexer("L.g4", grammar, "L", "ababcx", false); assertEquals("[@0,0:1='ab',<1>,1:0]\n" + "[@1,2:4='abc',<2>,1:2]\n" + "[@2,6:5='',<-1>,1:6]\n", found); @@ -116,7 +116,7 @@ public class TestLexerErrors extends BaseTest { public void testErrorInMiddle() throws Exception { String grammar = "lexer grammar L;\n" + "A : 'abc' ;"; - String found = execLexer("L.g4", grammar, "L", "abx"); + String found = execLexer("L.g4", grammar, "L", "abx", false); assertEquals("[@0,3:2='',<-1>,1:3]\n", found); assertEquals("line 1:0 token recognition error at: 'abx'\n", this.stderrDuringParse); } @@ -128,7 +128,7 @@ public class TestLexerErrors extends BaseTest { "expr : primary expr? {} | expr '->' ID;\n" + "primary : ID;\n" + "ID : [a-z]+;"; - String found = execLexer("L.g4", grammar, "LLexer", "x : x"); + String found = execLexer("L.g4", grammar, "LLexer", "x : x", false); assertEquals("[@0,0:0='x',<3>,1:0]\n" + "[@1,2:2=':',<1>,1:2]\n" + "[@2,4:4='x',<3>,1:4]\n" + diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java b/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java index 9e33810c5..51114b219 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java @@ -9,7 +9,7 @@ public class TestLexerExec extends BaseTest { public void testQuoteTranslation() throws Exception { String grammar = "lexer grammar L;\n" + "QUOTE : '\"' ; // make sure this compiles"; - String found = execLexer("L.g4", grammar, "L", "\""); + String found = execLexer("L.g4", grammar, "L", "\"", false); assertEquals("[@0,0:0='\"',<1>,1:0]\n" + "[@1,1:0='',<-1>,1:1]\n", found); assertNull(this.stderrDuringParse); @@ -21,7 +21,7 @@ public class TestLexerExec extends BaseTest { "A : '-' I ;\n" + "I : '0'..'9'+ ;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 -21 3"); + String found = execLexer("L.g4", grammar, "L", "34 -21 3", false); assertEquals("[@0,0:1='34',<2>,1:0]\n" + "[@1,3:5='-21',<1>,1:3]\n" + "[@2,7:7='3',<2>,1:7]\n" + @@ -37,7 +37,7 @@ public class TestLexerExec extends BaseTest { "Vee : '\\\\/';\n" + "Wedge : '/\\\\';\n" + "WS : [ \\t] -> skip;"; - String found = execLexer("L.g4", grammar, "L", "\\ / \\/ /\\"); + String found = execLexer("L.g4", grammar, "L", "\\ / \\/ /\\", false); assertEquals("[@0,0:0='\\',<1>,1:0]\n" + "[@1,2:2='/',<2>,1:2]\n" + "[@2,4:5='\\/',<3>,1:4]\n" + @@ -55,7 +55,7 @@ public class TestLexerExec extends BaseTest { "fragment L_A: '.-';\n" + "fragment L_K: '-.-';\n" + "SEPARATOR: '!';"; - String found = execLexer("L.g4", grammar, "L", "-.-.-!"); + String found = execLexer("L.g4", grammar, "L", "-.-.-!", false); assertEquals("[@0,0:4='-.-.-',<1>,1:0]\n" + "[@1,5:5='!',<3>,1:5]\n" + "[@2,6:5='',<-1>,1:6]\n", found); @@ -66,7 +66,7 @@ public class TestLexerExec extends BaseTest { public void testNonGreedyTermination1() throws Exception { String grammar = "lexer grammar L;\n" + "STRING : '\"' ('\"\"' | .)*? '\"';"; - String found = execLexer("L.g4", grammar, "L", "\"hi\"\"mom\""); + String found = execLexer("L.g4", grammar, "L", "\"hi\"\"mom\"", false); assertEquals("[@0,0:3='\"hi\"',<1>,1:0]\n" + "[@1,4:8='\"mom\"',<1>,1:4]\n" + "[@2,9:8='',<-1>,1:9]\n", found); @@ -77,7 +77,7 @@ public class TestLexerExec extends BaseTest { public void testNonGreedyTermination2() throws Exception { String grammar = "lexer grammar L;\n" + "STRING : '\"' ('\"\"' | .)+? '\"';"; - String found = execLexer("L.g4", grammar, "L", "\"\"\"mom\""); + String found = execLexer("L.g4", grammar, "L", "\"\"\"mom\"", false); assertEquals("[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" + "[@1,7:6='',<-1>,1:7]\n", found); assertNull(this.stderrDuringParse); @@ -88,7 +88,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : '//' .*? '\\n' CMT?;\n" + "WS : (' '|'\\t')+;"; - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n", false); assertEquals("[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + "[@1,14:13='',<-1>,3:14]\n", found); assertNull(this.stderrDuringParse); @@ -99,7 +99,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : '//' .*? '\\n' CMT??;\n" + "WS : (' '|'\\t')+;"; - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n", false); assertEquals("[@0,0:6='//blah\\n',<1>,1:0]\n" + "[@1,7:13='//blah\\n',<1>,2:0]\n" + "[@2,14:13='',<-1>,3:7]\n", found); @@ -111,7 +111,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : '//' .*? '\\n' CMT*;\n" + "WS : (' '|'\\t')+;"; - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n", false); assertEquals("[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + "[@1,14:13='',<-1>,3:14]\n", found); assertNull(this.stderrDuringParse); @@ -122,7 +122,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : '//' .*? '\\n' CMT*?;\n" + "WS : (' '|'\\t')+;"; - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n", false); assertEquals("[@0,0:6='//blah\\n',<1>,1:0]\n" + "[@1,7:13='//blah\\n',<1>,2:0]\n" + "[@2,14:13='',<-1>,3:7]\n", found); @@ -134,7 +134,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : ('//' .*? '\\n')+;\n" + "WS : (' '|'\\t')+;"; - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n", false); assertEquals("[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + "[@1,14:13='',<-1>,3:14]\n", found); assertNull(this.stderrDuringParse); @@ -145,7 +145,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : ('//' .*? '\\n')+?;\n" + "WS : (' '|'\\t')+;"; - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); + String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n", false); assertEquals("[@0,0:6='//blah\\n',<1>,1:0]\n" + "[@1,7:13='//blah\\n',<1>,2:0]\n" + "[@2,14:13='',<-1>,3:7]\n", found); @@ -157,7 +157,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : '/*' (CMT | .)*? '*/' ;\n" + "WS : (' '|'\\n')+;"; - String found = execLexer("L.g4", grammar, "L", "/* ick */\n/* /* */\n/* /*nested*/ */\n"); + String found = execLexer("L.g4", grammar, "L", "/* ick */\n/* /* */\n/* /*nested*/ */\n", false); assertEquals("[@0,0:8='/* ick */',<1>,1:0]\n" + "[@1,9:9='\\n',<2>,1:9]\n" + "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + @@ -171,7 +171,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : '/*' (CMT | .)*? '*/' ;\n" + "WS : (' '|'\\n')+;"; - String found = execLexer("L.g4", grammar, "L", "/* ick */x\n/* /* */x\n/* /*nested*/ */x\n"); + String found = execLexer("L.g4", grammar, "L", "/* ick */x\n/* /* */x\n/* /*nested*/ */x\n", false); assertEquals("[@0,0:8='/* ick */',<1>,1:0]\n" + "[@1,10:10='\\n',<2>,1:10]\n" + "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + @@ -185,7 +185,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : '/*' (CMT | .)+? '*/' ;\n" + "WS : (' '|'\\n')+;"; - String found = execLexer("L.g4", grammar, "L", "/* ick */\n/* /* */\n/* /*nested*/ */\n"); + String found = execLexer("L.g4", grammar, "L", "/* ick */\n/* /* */\n/* /*nested*/ */\n", false); assertEquals("[@0,0:8='/* ick */',<1>,1:0]\n" + "[@1,9:9='\\n',<2>,1:9]\n" + "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + @@ -199,7 +199,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "CMT : '/*' (CMT | .)+? '*/' ;\n" + "WS : (' '|'\\n')+;"; - String found = execLexer("L.g4", grammar, "L", "/* ick */x\n/* /* */x\n/* /*nested*/ */x\n"); + String found = execLexer("L.g4", grammar, "L", "/* ick */x\n/* /* */x\n/* /*nested*/ */x\n", false); assertEquals("[@0,0:8='/* ick */',<1>,1:0]\n" + "[@1,10:10='\\n',<2>,1:10]\n" + "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + @@ -218,7 +218,7 @@ public class TestLexerExec extends BaseTest { " {System.out.println(this.getText());} ;\n" + "WS : (' '|'\\n') -> skip ;\n" + "J : .;"; - String found = execLexer("L.g4", grammar, "L", "ab"); + String found = execLexer("L.g4", grammar, "L", "ab", false); assertEquals("stuff0: \n" + "stuff1: a\n" + "stuff2: ab\n" + @@ -234,7 +234,7 @@ public class TestLexerExec extends BaseTest { "I : ('a' | 'ab') {System.out.println(this.getText());} ;\n" + "WS : (' '|'\\n') -> skip ;\n" + "J : .;"; - String found = execLexer("L.g4", grammar, "L", "ab"); + String found = execLexer("L.g4", grammar, "L", "ab", false); assertEquals("ab\n" + "[@0,0:1='ab',<1>,1:0]\n" + "[@1,2:1='',<-1>,1:2]\n", found); @@ -247,7 +247,7 @@ public class TestLexerExec extends BaseTest { "I : .*? ('a' | 'ab') {System.out.println(this.getText());} ;\n" + "WS : (' '|'\\n') -> skip ;\n" + "J : . {System.out.println(this.getText());};"; - String found = execLexer("L.g4", grammar, "L", "ab"); + String found = execLexer("L.g4", grammar, "L", "ab", false); assertEquals("a\n" + "b\n" + "[@0,0:0='a',<1>,1:0]\n" + @@ -262,7 +262,7 @@ public class TestLexerExec extends BaseTest { "KEND : 'end' ; // has priority\n" + "ID : 'a'..'z'+ ;\n" + "WS : (' '|'\\n')+;"; - String found = execLexer("L.g4", grammar, "L", "end eend ending a"); + String found = execLexer("L.g4", grammar, "L", "end eend ending a", false); assertEquals("[@0,0:2='end',<1>,1:0]\n" + "[@1,3:3=' ',<3>,1:3]\n" + "[@2,4:7='eend',<2>,1:4]\n" + @@ -284,7 +284,7 @@ public class TestLexerExec extends BaseTest { "ID : 'a'..'z'+ ;\n" + "fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;\n" + "WS : (' '|'\\n')+;"; - String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l"); + String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l", false); assertEquals("[@0,0:0='x',<5>,1:0]\n" + "[@1,1:1=' ',<6>,1:1]\n" + "[@2,2:2='0',<2>,1:2]\n" + @@ -307,7 +307,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "DONE : EOF ;\n" + "A : 'a';"; - String found = execLexer("L.g4", grammar, "L", ""); + String found = execLexer("L.g4", grammar, "L", "", false); assertEquals("[@0,0:-1='',<1>,1:0]\n" + "[@1,0:-1='',<-1>,1:0]\n", found); assertNull(this.stderrDuringParse); @@ -319,7 +319,7 @@ public class TestLexerExec extends BaseTest { "A : 'a' EOF ;\n" + "B : 'a';\n" + "C : 'c';"; - String found = execLexer("L.g4", grammar, "L", ""); + String found = execLexer("L.g4", grammar, "L", "", false); assertEquals("[@0,0:-1='',<-1>,1:0]\n", found); assertNull(this.stderrDuringParse); } @@ -330,7 +330,7 @@ public class TestLexerExec extends BaseTest { "A : 'a' EOF ;\n" + "B : 'a';\n" + "C : 'c';"; - String found = execLexer("L.g4", grammar, "L", "a"); + String found = execLexer("L.g4", grammar, "L", "a", false); assertEquals("[@0,0:0='a',<1>,1:0]\n" + "[@1,1:0='',<-1>,1:1]\n", found); assertNull(this.stderrDuringParse); @@ -341,7 +341,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n" + "WS : [ \\n\\u000D] -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); + String found = execLexer("L.g4", grammar, "L", "34\r\n 34", false); assertEquals("I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + @@ -355,7 +355,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n" + "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); + String found = execLexer("L.g4", grammar, "L", "34\r\n 34", false); assertEquals("I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + @@ -369,7 +369,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "I : ~[ab \\n] ~[ \\ncd]* {System.out.println(\"I\");} ;\n" + "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "xaf"); + String found = execLexer("L.g4", grammar, "L", "xaf", false); assertEquals("I\n" + "[@0,0:2='xaf',<1>,1:0]\n" + "[@1,3:2='',<-1>,1:3]\n", found); @@ -382,7 +382,7 @@ public class TestLexerExec extends BaseTest { "I : (~[ab \\n]|'a') {System.out.println(\"I\");} ;\n" + "WS : [ \\n\\u000D]+ -> skip ;\n" + " "; - String found = execLexer("L.g4", grammar, "L", "a x"); + String found = execLexer("L.g4", grammar, "L", "a x", false); assertEquals("I\n" + "I\n" + "[@0,0:0='a',<1>,1:0]\n" + @@ -397,7 +397,7 @@ public class TestLexerExec extends BaseTest { "I : [0-9]+ {System.out.println(\"I\");} ;\n" + "ID : [a-zA-Z] [a-zA-Z0-9]* {System.out.println(\"ID\");} ;\n" + "WS : [ \\n\\u0009\\r]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r 34 a2 abc \n "); + String found = execLexer("L.g4", grammar, "L", "34\r 34 a2 abc \n ", false); assertEquals("I\n" + "I\n" + "ID\n" + @@ -415,7 +415,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "I : [0-]+ {System.out.println(\"I\");} ;\n" + "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "00\r\n"); + String found = execLexer("L.g4", grammar, "L", "00\r\n", false); assertEquals("I\n" + "[@0,0:1='00',<1>,1:0]\n" + "[@1,4:3='',<-1>,2:0]\n", found); @@ -427,7 +427,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "I : [0-9]+ {System.out.println(\"I\");} ;\n" + "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 "); + String found = execLexer("L.g4", grammar, "L", "34 ", false); assertEquals("I\n" + "[@0,0:1='34',<1>,1:0]\n" + "[@1,3:2='',<-1>,1:3]\n", found); @@ -439,7 +439,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "DASHBRACK : [\\-\\]]+ {System.out.println(\"DASHBRACK\");} ;\n" + "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "- ] "); + String found = execLexer("L.g4", grammar, "L", "- ] ", false); assertEquals("DASHBRACK\n" + "DASHBRACK\n" + "[@0,0:0='-',<1>,1:0]\n" + @@ -453,7 +453,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "A : [z-a9]+ {System.out.println(\"A\");} ;\n" + "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "9"); + String found = execLexer("L.g4", grammar, "L", "9", false); assertEquals("A\n" + "[@0,0:0='9',<1>,1:0]\n" + "[@1,1:0='',<-1>,1:1]\n", found); @@ -465,7 +465,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "A : [\"a-z]+ {System.out.println(\"A\");} ;\n" + "WS : [ \\n\\t]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "b\"a"); + String found = execLexer("L.g4", grammar, "L", "b\"a", false); assertEquals("A\n" + "[@0,0:2='b\"a',<1>,1:0]\n" + "[@1,3:2='',<-1>,1:3]\n", found); @@ -477,7 +477,7 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "A : [\"\\ab]+ {System.out.println(\"A\");} ;\n" + "WS : [ \\n\\t]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "b\"\\a"); + String found = execLexer("L.g4", grammar, "L", "b\"\\a", false); assertEquals("A\n" + "[@0,0:3='b\"\\a',<1>,1:0]\n" + "[@1,4:3='',<-1>,1:4]\n", found); @@ -599,7 +599,7 @@ public class TestLexerExec extends BaseTest { "WS\n" + " : [ \\t]+ -> skip\n" + " ;"; - String found = execLexer("PositionAdjustingLexer.g4", grammar, "PositionAdjustingLexer", "tokens\ntokens {\nnotLabel\nlabel1 =\nlabel2 +=\nnotLabel\n"); + String found = execLexer("PositionAdjustingLexer.g4", grammar, "PositionAdjustingLexer", "tokens\ntokens {\nnotLabel\nlabel1 =\nlabel2 +=\nnotLabel\n", false); assertEquals("[@0,0:5='tokens',<6>,1:0]\n" + "[@1,7:12='tokens',<4>,2:0]\n" + "[@2,14:14='{',<3>,2:7]\n" + @@ -4617,7 +4617,7 @@ public class TestLexerExec extends BaseTest { "KW3997 : 'KW' '3997';\n" + "KW3998 : 'KW' '3998';\n" + "KW3999 : 'KW' '3999';"; - String found = execLexer("L.g4", grammar, "L", "KW400"); + String found = execLexer("L.g4", grammar, "L", "KW400", false); assertEquals("[@0,0:4='KW400',<402>,1:0]\n" + "[@1,5:4='',<-1>,1:5]\n", found); assertNull(this.stderrDuringParse); @@ -4634,7 +4634,7 @@ public class TestLexerExec extends BaseTest { " StringMode_Done : -> more, mode(EndStringMode);\n" + "mode EndStringMode; \n" + " EndString : '\\'' -> popMode;"; - String found = execLexer("L.g4", grammar, "L", "'xxx'"); + String found = execLexer("L.g4", grammar, "L", "'xxx'", false); assertEquals("[@0,0:4=''xxx'',<1>,1:0]\n" + "[@1,5:4='',<-1>,1:5]\n", found); assertNull(this.stderrDuringParse); diff --git a/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalLexer.java b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalLexer.java index 1817a622b..3211cdf6a 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalLexer.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalLexer.java @@ -12,7 +12,7 @@ public class TestSemPredEvalLexer extends BaseTest { "E2 : 'enum' { true }? ; // winner not E1 or ID\n" + "ID : 'a'..'z'+ ;\n" + "WS : (' '|'\\n') -> skip;"; - String found = execLexer("L.g4", grammar, "L", "enum abc"); + String found = execLexer("L.g4", grammar, "L", "enum abc", true); assertEquals("[@0,0:3='enum',<2>,1:0]\n" + "[@1,5:7='abc',<3>,1:5]\n" + "[@2,8:7='',<-1>,1:8]\n" + @@ -32,7 +32,7 @@ public class TestSemPredEvalLexer extends BaseTest { "ENUM : 'enum' { false }? ;\n" + "ID : 'a'..'z'+ ;\n" + "WS : (' '|'\\n') -> skip;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum"); + String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); assertEquals("[@0,0:3='enum',<2>,1:0]\n" + "[@1,5:7='abc',<2>,1:5]\n" + "[@2,9:12='enum',<2>,1:9]\n" + @@ -53,7 +53,7 @@ public class TestSemPredEvalLexer extends BaseTest { "ENUM : [a-z]+ { false }? ;\n" + "ID : [a-z]+ ;\n" + "WS : (' '|'\\n') -> skip;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum"); + String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); assertEquals("[@0,0:3='enum',<2>,1:0]\n" + "[@1,5:7='abc',<2>,1:5]\n" + "[@2,9:12='enum',<2>,1:9]\n" + @@ -65,10 +65,10 @@ public class TestSemPredEvalLexer extends BaseTest { @Test public void testEnumNotID() throws Exception { String grammar = "lexer grammar L;\n" + - "ENUM : [a-z]+ { this.text===\"enum\" }? ;\n" + + "ENUM : [a-z]+ { this.getText().equals(\"enum\") }? ;\n" + "ID : [a-z]+ ;\n" + "WS : (' '|'\\n') -> skip;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum"); + String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); assertEquals("[@0,0:3='enum',<1>,1:0]\n" + "[@1,5:7='abc',<2>,1:5]\n" + "[@2,9:12='enum',<1>,1:9]\n" + @@ -81,11 +81,11 @@ public class TestSemPredEvalLexer extends BaseTest { public void testIndent() throws Exception { String grammar = "lexer grammar L;\n" + "ID : [a-z]+ ;\n" + - "INDENT : [ \\t]+ { this._tokenStartColumn===0 }? \\n\" +\n" + - " { System.out.println(\"INDENT\"); } ;\"+\n" + + "INDENT : [ \\t]+ { this._tokenStartCharPositionInLine==0 }?\n" + + " { System.out.println(\"INDENT\"); } ;\n" + "NL : '\\n';\n" + "WS : [ \\t]+ ;"; - String found = execLexer("L.g4", grammar, "L", "abc\n def \n"); + String found = execLexer("L.g4", grammar, "L", "abc\n def \n", true); assertEquals("INDENT\n" + "[@0,0:2='abc',<1>,1:0]\n" + "[@1,3:3='\\n',<3>,1:3]\n" + @@ -110,10 +110,10 @@ public class TestSemPredEvalLexer extends BaseTest { String grammar = "lexer grammar L;\n" + "WORD1 : ID1+ { System.out.println(this.getText()); } ;\n" + "WORD2 : ID2+ { System.out.println(this.getText()); } ;\n" + - "fragment ID1 : { this.column < 2 }? [a-zA-Z];\n" + - "fragment ID2 : { this.column >= 2 }? [a-zA-Z];\n" + + "fragment ID1 : { this.getCharPositionInLine() < 2 }? [a-zA-Z];\n" + + "fragment ID2 : { this.getCharPositionInLine() >= 2 }? [a-zA-Z];\n" + "WS : (' '|'\\n') -> skip;"; - String found = execLexer("L.g4", grammar, "L", "a cde\nabcde\n"); + String found = execLexer("L.g4", grammar, "L", "a cde\nabcde\n", true); assertEquals("a\n" + "cde\n" + "ab\n" + @@ -129,10 +129,10 @@ public class TestSemPredEvalLexer extends BaseTest { @Test public void testPredicatedKeywords() throws Exception { String grammar = "lexer grammar L;\n" + - "ENUM : [a-z]+ { this.text===\"enum\" }? { System.out.println(\"enum!\"); } ;\n" + - "ID : [a-z]+ { System.out.println(\"ID\" + this.getText()); } ;\n" + + "ENUM : [a-z]+ { this.getText().equals(\"enum\") }? { System.out.println(\"enum!\"); } ;\n" + + "ID : [a-z]+ { System.out.println(\"ID \" + this.getText()); } ;\n" + "WS : [ \\n] -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum enu a"); + String found = execLexer("L.g4", grammar, "L", "enum enu a", false); assertEquals("enum!\n" + "ID enu\n" + "ID a\n" + diff --git a/tool/test/org/antlr/v4/test/tool/TestSemPredEvalLexer.java b/tool/test/org/antlr/v4/test/tool/TestSemPredEvalLexer.java deleted file mode 100644 index 862871c3d..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestSemPredEvalLexer.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.test.tool; - -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestSemPredEvalLexer extends BaseTest { - - @Test public void testDisableRule() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "E1 : 'enum' {false}? ;\n" + - "E2 : 'enum' {true}? ;\n" + // winner not E1 or ID - "ID : 'a'..'z'+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum abc", true); - String expecting = - "[@0,0:3='enum',<2>,1:0]\n" + - "[@1,5:7='abc',<3>,1:5]\n" + - "[@2,8:7='',<-1>,1:8]\n" + - "s0-' '->:s5=>4\n" + - "s0-'a'->:s6=>3\n" + - "s0-'e'->:s1=>3\n" + - ":s1=>3-'n'->:s2=>3\n" + - ":s2=>3-'u'->:s3=>3\n" + - ":s6=>3-'b'->:s6=>3\n" + - ":s6=>3-'c'->:s6=>3\n"; - assertEquals(expecting, found); - } - - @Test public void testIDvsEnum() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "ENUM : 'enum' {false}? ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); - String expecting = - "[@0,0:3='enum',<2>,1:0]\n" + - "[@1,5:7='abc',<2>,1:5]\n" + - "[@2,9:12='enum',<2>,1:9]\n" + - "[@3,13:12='',<-1>,1:13]\n" + - "s0-' '->:s5=>3\n" + - "s0-'a'->:s4=>2\n" + - "s0-'e'->:s1=>2\n" + - ":s1=>2-'n'->:s2=>2\n" + - ":s2=>2-'u'->:s3=>2\n" + - ":s4=>2-'b'->:s4=>2\n" + - ":s4=>2-'c'->:s4=>2\n"; // no 'm'-> transition...conflicts with pred - assertEquals(expecting, found); - } - - @Test public void testIDnotEnum() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "ENUM : [a-z]+ {false}? ;\n" + - "ID : [a-z]+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); - String expecting = - "[@0,0:3='enum',<2>,1:0]\n" + - "[@1,5:7='abc',<2>,1:5]\n" + - "[@2,9:12='enum',<2>,1:9]\n" + - "[@3,13:12='',<-1>,1:13]\n" + - "s0-' '->:s2=>3\n"; // no edges in DFA for enum/id. all paths lead to pred. - assertEquals(expecting, found); - } - - @Test public void testEnumNotID() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "ENUM : [a-z]+ {getText().equals(\"enum\")}? ;\n" + - "ID : [a-z]+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); - String expecting = - "[@0,0:3='enum',<1>,1:0]\n" + - "[@1,5:7='abc',<2>,1:5]\n" + - "[@2,9:12='enum',<1>,1:9]\n" + - "[@3,13:12='',<-1>,1:13]\n" + - "s0-' '->:s3=>3\n"; // no edges in DFA for enum/id. all paths lead to pred. - assertEquals(expecting, found); - } - - @Test public void testIndent() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "ID : [a-z]+ ;\n"+ - "INDENT : [ \\t]+ {_tokenStartCharPositionInLine==0}? \n" + - " {System.out.println(\"INDENT\");} ;"+ - "NL : '\\n' ;"+ - "WS : [ \\t]+ ;"; - String found = execLexer("L.g4", grammar, "L", "abc\n def \n", true); - String expecting = - "INDENT\n" + // action output - "[@0,0:2='abc',<1>,1:0]\n" + // ID - "[@1,3:3='\\n',<3>,1:3]\n" + // NL - "[@2,4:5=' ',<2>,2:0]\n" + // INDENT - "[@3,6:8='def',<1>,2:2]\n" + // ID - "[@4,9:10=' ',<4>,2:5]\n" + // WS - "[@5,11:11='\\n',<3>,2:7]\n" + - "[@6,12:11='',<-1>,3:8]\n" + - "s0-'\n" + - "'->:s2=>3\n" + - "s0-'a'->:s1=>1\n" + - "s0-'d'->:s1=>1\n" + - ":s1=>1-'b'->:s1=>1\n" + - ":s1=>1-'c'->:s1=>1\n" + - ":s1=>1-'e'->:s1=>1\n" + - ":s1=>1-'f'->:s1=>1\n"; - assertEquals(expecting, found); - } - - @Test public void testLexerInputPositionSensitivePredicates() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "WORD1 : ID1+ {System.out.println(getText());} ;\n"+ - "WORD2 : ID2+ {System.out.println(getText());} ;\n"+ - "fragment ID1 : {getCharPositionInLine()<2}? [a-zA-Z];\n"+ - "fragment ID2 : {getCharPositionInLine()>=2}? [a-zA-Z];\n"+ - "WS : (' '|'\\n') -> skip;\n"; - String found = execLexer("L.g4", grammar, "L", "a cde\nabcde\n"); - String expecting = - "a\n" + - "cde\n" + - "ab\n" + - "cde\n" + - "[@0,0:0='a',<1>,1:0]\n" + - "[@1,2:4='cde',<2>,1:2]\n" + - "[@2,6:7='ab',<1>,2:0]\n" + - "[@3,8:10='cde',<2>,2:2]\n" + - "[@4,12:11='',<-1>,3:0]\n"; - assertEquals(expecting, found); - } - - @Test public void testPredicatedKeywords() { - String grammar = - "lexer grammar A;" + - "ENUM : [a-z]+ {getText().equals(\"enum\")}? {System.out.println(\"enum!\");} ;\n" + - "ID : [a-z]+ {System.out.println(\"ID \"+getText());} ;\n" + - "WS : [ \\n] -> skip ;"; - String found = execLexer("A.g4", grammar, "A", "enum enu a"); - String expecting = - "enum!\n" + - "ID enu\n" + - "ID a\n" + - "[@0,0:3='enum',<1>,1:0]\n" + - "[@1,5:7='enu',<2>,1:5]\n" + - "[@2,9:9='a',<2>,1:9]\n" + - "[@3,10:9='',<-1>,1:10]\n"; - assertEquals(expecting, found); - } -} From f99acb7202a64d561b8b279f47600a257e00c357 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Fri, 24 Oct 2014 22:26:34 +0800 Subject: [PATCH 12/26] validated CompositeLexers --- .../LexerDelegatorInvokesDelegateRule_S.st | 2 +- .../LexerDelegatorRuleOverridesDelegate.st | 2 +- .../LexerDelegatorRuleOverridesDelegate_S.st | 4 ++-- .../org/antlr/v4/test/rt/java/Java.test.stg | 2 +- .../v4/test/rt/java/TestCompositeLexers.java | 8 +++---- .../v4/test/rt/java/TestCompositeParsers.java | 22 +++++++++---------- .../v4/test/rt/java/TestParserErrors.java | 4 ++-- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st index 902a03a03..9d05c4c55 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorInvokesDelegateRule_S.st @@ -1,3 +1,3 @@ lexer grammar S; -A : 'a' {}; +A : 'a' {}; C : 'c' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st index 3db9d0e88..ed26569b1 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate.st @@ -1,4 +1,4 @@ lexer grammar M; import S; -A : 'a' B {}; +A : 'a' B {}; WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st index 272238968..a1ca69d83 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeLexers/LexerDelegatorRuleOverridesDelegate_S.st @@ -1,3 +1,3 @@ lexer grammar S; -A : 'a' {}; -B : 'b' {}; +A : 'a' {}; +B : 'b' {}; diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index ce1a3f775..b800ffb53 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -93,7 +93,7 @@ public void test() throws Exception { writeln(s) ::= <);>> -write(s) ::= <;>> +write(s) ::= <);>> assert(s) ::= <);>> diff --git a/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java b/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java index 375f38aad..115de5c57 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestCompositeLexers.java @@ -8,7 +8,7 @@ public class TestCompositeLexers extends BaseTest { @Test public void testLexerDelegatorInvokesDelegateRule() throws Exception { String slave_S = "lexer grammar S;\n" + - "A : 'a' {document.getElementById('output').value += \"S.a\";};\n" + + "A : 'a' {System.out.println(\"S.A\");};\n" + "C : 'c' ;"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); @@ -29,14 +29,14 @@ public class TestCompositeLexers extends BaseTest { @Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception { String slave_S = "lexer grammar S;\n" + - "A : 'a' {document.getElementById('output').value += \"S.A\";};\n" + - "B : 'b' {document.getElementById('output').value += \"S.B\";};"; + "A : 'a' {System.out.println(\"S.A\");};\n" + + "B : 'b' {System.out.println(\"S.B\");};"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); String grammar = "lexer grammar M;\n" + "import S;\n" + - "A : 'a' B {document.getElementById('output').value += \"M.A\";};\n" + + "A : 'a' B {System.out.println(\"M.A\");};\n" + "WS : (' '|'\\n') -> skip ;"; String found = execLexer("M.g4", grammar, "M", "ab", false); assertEquals("M.A\n" + diff --git a/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java b/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java index 96a4b7d3d..5f64d13ca 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java @@ -28,7 +28,7 @@ public class TestCompositeParsers extends BaseTest { @Test public void testBringInLiteralsFromDelegate() throws Exception { String slave_S = "parser grammar S;\n" + - "a : '=' 'a' {document.getElementById('output').value += \"S.a\";};"; + "a : '=' 'a' {System.out.print(\"S.a\");};"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); @@ -44,7 +44,7 @@ public class TestCompositeParsers extends BaseTest { @Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception { String slave_S = "parser grammar S;\n" + - "a[int x] returns [int y] : B {document.getElementById('output').value += \"S.a\";;$y=1000;};"; + "a[int x] returns [int y] : B {System.out.print(\"S.a\");;$y=1000;};"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); @@ -61,13 +61,13 @@ public class TestCompositeParsers extends BaseTest { @Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception { String slave_S = "parser grammar S;\n" + - "A : B {document.getElementById('output').value += \"S.a\";};"; + "A : B {System.out.print(\"S.a\");};"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); String grammar = "grammar M;\n" + "import S;\n" + - "s : a {document.getElementById('output').value += $a.text;} ;\n" + + "s : a {System.out.print($a.text);} ;\n" + "B : 'b' ; // defines B from inherited token space\n" + "WS : (' '|'\\n') -> skip ;"; String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "b", false); @@ -121,13 +121,13 @@ public class TestCompositeParsers extends BaseTest { public void testDelegatesSeeSameTokenType() throws Exception { String slave_S = "parser grammar S;\n" + "tokens { A, B, C }\n" + - "x : A {document.getElementById('output').value += \"S.x\";};"; + "x : A {System.out.print(\"S.x\");};"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); String slave_T = "parser grammar S;\n" + "tokens { C, B, A } // reverse order\n" + - "y : A {document.getElementById('output').value += \"T.y\";};"; + "y : A {System.out.print(\"T.y\");};"; mkdir(tmpdir); writeFile(tmpdir, "T.g4", slave_T); @@ -190,7 +190,7 @@ public class TestCompositeParsers extends BaseTest { @Test public void testDelegatorRuleOverridesDelegate() throws Exception { String slave_S = "parser grammar S;\n" + - "a : b {document.getElementById('output').value += \"S.a\";};\n" + + "a : b {System.out.print(\"S.a\");};\n" + "b : B ;"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); @@ -210,7 +210,7 @@ public class TestCompositeParsers extends BaseTest { "type_ : 'int' ;\n" + "decl : type_ ID ';'\n" + " | type_ ID init ';' {\n" + - " document.getElementById('output').value += \"decl: \" + $text;\n" + + " System.out.print(\"decl: \" + $text);\n" + " };\n" + "init : '=' INT;"; mkdir(tmpdir); @@ -231,7 +231,7 @@ public class TestCompositeParsers extends BaseTest { @Test public void testDelegatorRuleOverridesDelegates() throws Exception { String slave_S = "parser grammar S;\n" + - "a : b {document.getElementById('output').value += \"S.a\";};\n" + + "a : b {System.out.print(\"S.a\");};\n" + "b : 'b' ;\n" + " "; mkdir(tmpdir); @@ -239,13 +239,13 @@ public class TestCompositeParsers extends BaseTest { String slave_T = "parser grammar S;\n" + "tokens { A }\n" + - "b : 'b' {document.getElementById('output').value += \"T.b\";};"; + "b : 'b' {System.out.print(\"T.b\");};"; mkdir(tmpdir); writeFile(tmpdir, "T.g4", slave_T); String grammar = "grammar M;\n" + "import S, T;\n" + - "b : 'b'|'c' {document.getElementById('output').value += \"M.b\";}|B|A;\n" + + "b : 'b'|'c' {System.out.print(\"M.b\");}|B|A;\n" + "WS : (' '|'\\n') -> skip ;"; String found = execParser("M.g4", grammar, "MParser", "MLexer", "a", "c", false); assertEquals("M.b\nS.a\n", found); diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java b/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java index 91bcb633f..34b6302f5 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java @@ -219,8 +219,8 @@ public class TestParserErrors extends BaseTest { " };\n" + "}\n" + "s : (a | b)+;\n" + - "a : 'a' {document.getElementById('output').value += 'a';};\n" + - "b : 'b' {document.getElementById('output').value += 'b';};\n" + + "a : 'a' {System.out.print('a');};\n" + + "b : 'b' {System.out.print('b');};\n" + ";"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abab", false); assertEquals("abab\n", found); From e03c7d44e5c05502c15654c39db36406bd21d19e Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Fri, 24 Oct 2014 22:46:12 +0800 Subject: [PATCH 13/26] validated Sets tests --- .../rt/gen/grammars/Sets/CharSetLiteral.st | 2 +- .../rt/gen/grammars/Sets/LexerOptionalSet.st | 2 +- .../test/rt/gen/grammars/Sets/LexerPlusSet.st | 2 +- .../test/rt/gen/grammars/Sets/LexerStarSet.st | 2 +- .../v4/test/rt/gen/grammars/Sets/NotChar.st | 2 +- .../test/rt/gen/grammars/Sets/NotCharSet.st | 2 +- .../gen/grammars/Sets/NotCharSetWithLabel.st | 2 +- .../grammars/Sets/NotCharSetWithRuleRef3.st | 2 +- .../Sets/OptionalLexerSingleElement.st | 2 +- .../grammars/Sets/OptionalSingleElement.st | 2 +- .../grammars/Sets/PlusLexerSingleElement.st | 2 +- .../grammars/Sets/StarLexerSingleElement.st | 2 +- .../org/antlr/v4/test/rt/java/TestSets.java | 24 +- .../org/antlr/v4/test/tool/TestErrorSets.java | 69 +++++ .../test/org/antlr/v4/test/tool/TestSets.java | 283 ------------------ 15 files changed, 93 insertions(+), 307 deletions(-) create mode 100644 tool/test/org/antlr/v4/test/tool/TestErrorSets.java delete mode 100644 tool/test/org/antlr/v4/test/tool/TestSets.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/CharSetLiteral.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/CharSetLiteral.st index b52c6dfc7..91f68f149 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/CharSetLiteral.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/CharSetLiteral.st @@ -1,4 +1,4 @@ grammar ; a : (A {})+ ; -a : [AaBb] ; +A : [AaBb] ; WS : (' '|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerOptionalSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerOptionalSet.st index f8f0db822..090b5c7e4 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerOptionalSet.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerOptionalSet.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : ('a'|'b')? 'c' ; +A : ('a'|'b')? 'c' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerPlusSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerPlusSet.st index 48357285f..0f00f8416 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerPlusSet.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerPlusSet.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : ('a'|'b')+ 'c' ; +A : ('a'|'b')+ 'c' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerStarSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerStarSet.st index 17d4d2cd1..9407e8a03 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerStarSet.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/LexerStarSet.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : ('a'|'b')* 'c' ; +A : ('a'|'b')* 'c' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotChar.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotChar.st index 09644818b..1bbdffaf1 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotChar.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotChar.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : ~'b' ; +A : ~'b' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSet.st index a1e835e8c..878839aeb 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSet.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSet.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : ~('b'|'c') ; +A : ~('b'|'c') ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithLabel.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithLabel.st index a7b229bbe..9f2025cb0 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithLabel.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithLabel.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : h=~('b'|'c') ; +A : h=~('b'|'c') ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithRuleRef3.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithRuleRef3.st index 83aae7d73..107e6bb85 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithRuleRef3.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/NotCharSetWithRuleRef3.st @@ -1,5 +1,5 @@ grammar ; a : A {} ; -a : ('a'|B) ; // this doesn't collapse to set but works +A : ('a'|B) ; // this doesn't collapse to set but works fragment B : ~('a'|'c') ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalLexerSingleElement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalLexerSingleElement.st index 059b86b92..7f00dba67 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalLexerSingleElement.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalLexerSingleElement.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : 'b'? 'c' ; +A : 'b'? 'c' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalSingleElement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalSingleElement.st index 4a7ee5072..88b7bc673 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalSingleElement.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/OptionalSingleElement.st @@ -1,3 +1,3 @@ grammar ; a : A? 'c' {} ; -a : 'b' ; +A : 'b' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/PlusLexerSingleElement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/PlusLexerSingleElement.st index 51a16fa1d..a7e8bca8f 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/PlusLexerSingleElement.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/PlusLexerSingleElement.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : 'b'+ 'c' ; +A : 'b'+ 'c' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/StarLexerSingleElement.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/StarLexerSingleElement.st index 69d12c81e..8811e8019 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/StarLexerSingleElement.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Sets/StarLexerSingleElement.st @@ -1,3 +1,3 @@ grammar ; a : A {} ; -a : 'b'* 'c' ; +A : 'b'* 'c' ; diff --git a/tool/test/org/antlr/v4/test/rt/java/TestSets.java b/tool/test/org/antlr/v4/test/rt/java/TestSets.java index 2e7dd9e50..62c0a8957 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestSets.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestSets.java @@ -66,7 +66,7 @@ public class TestSets extends BaseTest { public void testNotChar() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println($A.text);} ;\n" + - "a : ~'b' ;"; + "A : ~'b' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); assertEquals("x\n", found); assertNull(this.stderrDuringParse); @@ -76,7 +76,7 @@ public class TestSets extends BaseTest { public void testOptionalSingleElement() throws Exception { String grammar = "grammar T;\n" + "a : A? 'c' {System.out.println(this._input.getText());} ;\n" + - "a : 'b' ;"; + "A : 'b' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "bc", false); assertEquals("bc\n", found); assertNull(this.stderrDuringParse); @@ -86,7 +86,7 @@ public class TestSets extends BaseTest { public void testOptionalLexerSingleElement() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println(this._input.getText());} ;\n" + - "a : 'b'? 'c' ;"; + "A : 'b'? 'c' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "bc", false); assertEquals("bc\n", found); assertNull(this.stderrDuringParse); @@ -95,7 +95,7 @@ public class TestSets extends BaseTest { String testStarLexerSingleElement(String input) throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println(this._input.getText());} ;\n" + - "a : 'b'* 'c' ;"; + "A : 'b'* 'c' ;"; return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); } @@ -117,7 +117,7 @@ public class TestSets extends BaseTest { public void testPlusLexerSingleElement() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println(this._input.getText());} ;\n" + - "a : 'b'+ 'c' ;"; + "A : 'b'+ 'c' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "bbbbc", false); assertEquals("bbbbc\n", found); assertNull(this.stderrDuringParse); @@ -154,7 +154,7 @@ public class TestSets extends BaseTest { public void testLexerOptionalSet() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println(this._input.getText());} ;\n" + - "a : ('a'|'b')? 'c' ;"; + "A : ('a'|'b')? 'c' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); assertEquals("ac\n", found); assertNull(this.stderrDuringParse); @@ -164,7 +164,7 @@ public class TestSets extends BaseTest { public void testLexerStarSet() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println(this._input.getText());} ;\n" + - "a : ('a'|'b')* 'c' ;"; + "A : ('a'|'b')* 'c' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaac", false); assertEquals("abaac\n", found); assertNull(this.stderrDuringParse); @@ -174,7 +174,7 @@ public class TestSets extends BaseTest { public void testLexerPlusSet() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println(this._input.getText());} ;\n" + - "a : ('a'|'b')+ 'c' ;"; + "A : ('a'|'b')+ 'c' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaac", false); assertEquals("abaac\n", found); assertNull(this.stderrDuringParse); @@ -184,7 +184,7 @@ public class TestSets extends BaseTest { public void testNotCharSet() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println($A.text);} ;\n" + - "a : ~('b'|'c') ;"; + "A : ~('b'|'c') ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); assertEquals("x\n", found); assertNull(this.stderrDuringParse); @@ -194,7 +194,7 @@ public class TestSets extends BaseTest { public void testNotCharSetWithLabel() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println($A.text);} ;\n" + - "a : h=~('b'|'c') ;"; + "A : h=~('b'|'c') ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); assertEquals("x\n", found); assertNull(this.stderrDuringParse); @@ -204,7 +204,7 @@ public class TestSets extends BaseTest { public void testNotCharSetWithRuleRef3() throws Exception { String grammar = "grammar T;\n" + "a : A {System.out.println($A.text);} ;\n" + - "a : ('a'|B) ; // this doesn't collapse to set but works\n" + + "A : ('a'|B) ; // this doesn't collapse to set but works\n" + "fragment\n" + "B : ~('a'|'c') ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", false); @@ -216,7 +216,7 @@ public class TestSets extends BaseTest { public void testCharSetLiteral() throws Exception { String grammar = "grammar T;\n" + "a : (A {System.out.println($A.text);})+ ;\n" + - "a : [AaBb] ;\n" + + "A : [AaBb] ;\n" + "WS : (' '|'\\n')+ -> skip ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "A a B b", false); assertEquals("A\na\nB\nb\n", found); diff --git a/tool/test/org/antlr/v4/test/tool/TestErrorSets.java b/tool/test/org/antlr/v4/test/tool/TestErrorSets.java new file mode 100644 index 000000000..cc65158ca --- /dev/null +++ b/tool/test/org/antlr/v4/test/tool/TestErrorSets.java @@ -0,0 +1,69 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +package org.antlr.v4.test.tool; + +import org.antlr.v4.tool.ErrorType; +import org.junit.Test; + + +/** Test errors with the set stuff in lexer and parser */ +public class TestErrorSets extends BaseTest { + protected boolean debug = false; + + /** Public default constructor used by TestRig */ + public TestErrorSets() { + } + + @Test public void testNotCharSetWithRuleRef() throws Exception { + // might be a useful feature to add someday + String[] pair = new String[] { + "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "A : ~('a'|B) ;\n" + + "B : 'b' ;\n", + "error(" + ErrorType.UNSUPPORTED_REFERENCE_IN_LEXER_SET.code + "): T.g4:3:10: rule reference B is not currently supported in a set\n" + }; + super.testErrors(pair, true); + } + + @Test public void testNotCharSetWithString() throws Exception { + // might be a useful feature to add someday + String[] pair = new String[] { + "grammar T;\n" + + "a : A {System.out.println($A.text);} ;\n" + + "A : ~('a'|'aa') ;\n" + + "B : 'b' ;\n", + "error(" + ErrorType.INVALID_LITERAL_IN_LEXER_SET.code + "): T.g4:3:10: multi-character literals are not allowed in lexer sets: 'aa'\n" + }; + super.testErrors(pair, true); + } + + +} diff --git a/tool/test/org/antlr/v4/test/tool/TestSets.java b/tool/test/org/antlr/v4/test/tool/TestSets.java deleted file mode 100644 index 790d01497..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestSets.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.test.tool; - -import org.antlr.v4.tool.ErrorType; -import org.junit.Test; - -import static org.junit.Assert.*; - -/** Test the set stuff in lexer and parser */ -public class TestSets extends BaseTest { - protected boolean debug = false; - - /** Public default constructor used by TestRig */ - public TestSets() { - } - - @Test public void testSeqDoesNotBecomeSet() throws Exception { - // this must return A not I to the parser; calling a nonfragment rule - // from a nonfragment rule does not set the overall token. - String grammar = - "grammar P;\n" + - "a : C {System.out.println(_input.getText());} ;\n" + - "fragment A : '1' | '2';\n" + - "fragment B : '3' '4';\n" + - "C : A | B;\n"; - String found = execParser("P.g4", grammar, "PParser", "PLexer", - "a", "34", debug); - assertEquals("34\n", found); - } - - @Test public void testParserSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : t=('x'|'y') {System.out.println($t.text);} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testParserNotSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "zz", debug); - assertEquals("z\n", found); - } - - @Test public void testParserNotToken() throws Exception { - String grammar = - "grammar T;\n" + - "a : ~'x' 'z' {System.out.println(_input.getText());} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "zz", debug); - assertEquals("zz\n", found); - } - - @Test public void testParserNotTokenWithLabel() throws Exception { - String grammar = - "grammar T;\n" + - "a : t=~'x' 'z' {System.out.println($t.text);} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "zz", debug); - assertEquals("z\n", found); - } - - @Test public void testRuleAsSet() throws Exception { - String grammar = - "grammar T;\n" + - "a @after {System.out.println(_input.getText());} : 'a' | 'b' |'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "b", debug); - assertEquals("b\n", found); - } - - @Test public void testNotChar() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ~'b' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testOptionalSingleElement() throws Exception { - String grammar = - "grammar T;\n" + - "a : A? 'c' {System.out.println(_input.getText());} ;\n" + - "A : 'b' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "bc", debug); - assertEquals("bc\n", found); - } - - @Test public void testOptionalLexerSingleElement() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : 'b'? 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "bc", debug); - assertEquals("bc\n", found); - } - - @Test public void testStarLexerSingleElement() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : 'b'* 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "bbbbc", debug); - assertEquals("bbbbc\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "c", debug); - assertEquals("c\n", found); - } - - @Test public void testPlusLexerSingleElement() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : 'b'+ 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "bbbbc", debug); - assertEquals("bbbbc\n", found); - } - - @Test public void testOptionalSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : ('a'|'b')? 'c' {System.out.println(_input.getText());} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "ac", debug); - assertEquals("ac\n", found); - } - - @Test public void testStarSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : ('a'|'b')* 'c' {System.out.println(_input.getText());} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "abaac", debug); - assertEquals("abaac\n", found); - } - - @Test public void testPlusSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : ('a'|'b')+ 'c' {System.out.println(_input.getText());} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "abaac", debug); - assertEquals("abaac\n", found); - } - - @Test public void testLexerOptionalSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : ('a'|'b')? 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "ac", debug); - assertEquals("ac\n", found); - } - - @Test public void testLexerStarSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : ('a'|'b')* 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "abaac", debug); - assertEquals("abaac\n", found); - } - - @Test public void testLexerPlusSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : ('a'|'b')+ 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "abaac", debug); - assertEquals("abaac\n", found); - } - - @Test public void testNotCharSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ~('b'|'c') ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testNotCharSetWithLabel() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : h=~('b'|'c') ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testNotCharSetWithRuleRef() throws Exception { - // might be a useful feature to add someday - String[] pair = new String[] { - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ~('a'|B) ;\n" + - "B : 'b' ;\n", - "error(" + ErrorType.UNSUPPORTED_REFERENCE_IN_LEXER_SET.code + "): T.g4:3:10: rule reference B is not currently supported in a set\n" - }; - super.testErrors(pair, true); - } - - @Test public void testNotCharSetWithString() throws Exception { - // might be a useful feature to add someday - String[] pair = new String[] { - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ~('a'|'aa') ;\n" + - "B : 'b' ;\n", - "error(" + ErrorType.INVALID_LITERAL_IN_LEXER_SET.code + "): T.g4:3:10: multi-character literals are not allowed in lexer sets: 'aa'\n" - }; - super.testErrors(pair, true); - } - - @Test public void testNotCharSetWithRuleRef3() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ('a'|B) ;\n" + // this doesn't collapse to set but works - "fragment\n" + - "B : ~('a'|'c') ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testCharSetLiteral() throws Exception { - String grammar = - "grammar T;\n" + - "a : (A {System.out.println($A.text);})+ ;\n" + - "A : [AaBb] ;\n" + - "WS : (' '|'\\n')+ -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "A a B b", debug); - assertEquals("A\n" + - "a\n" + - "B\n" + - "b\n", found); - } -} From 6067aee458eb74bac9b27f4dfb2d8ffe82c57f76 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 00:06:47 +0800 Subject: [PATCH 14/26] validated ParserExec tests --- .../rt/gen/CompositeParserTestMethod.java | 1 + .../org/antlr/v4/test/rt/gen/Generator.java | 29 +- .../grammars/ParserExec/AlternateQuotes.st | 6 + .../AlternateQuotes_ModeTagsLexer.st | 8 + .../gen/grammars/ParserExec/EOFInClosure.st | 3 + .../ParserExec/ListLabelForClosureContext.st | 2 +- .../ParserExec/MultipleEOFHandling.st | 2 + .../gen/grammars/ParserExec/ReferenceToATN.st | 5 + .../org/antlr/v4/test/rt/java/Java.test.stg | 12 +- .../test/rt/java/TestFullContextParsing.java | 2 +- .../v4/test/rt/java/TestLeftRecursion.java | 18 +- .../antlr/v4/test/rt/java/TestListeners.java | 10 +- .../antlr/v4/test/rt/java/TestParseTrees.java | 16 +- .../antlr/v4/test/rt/java/TestParserExec.java | 75 ++- .../test/rt/java/TestSemPredEvalParser.java | 2 +- .../antlr/v4/test/tool/TestParserExec.java | 467 +----------------- 16 files changed, 159 insertions(+), 499 deletions(-) create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AlternateQuotes.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AlternateQuotes_ModeTagsLexer.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/EOFInClosure.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/MultipleEOFHandling.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ReferenceToATN.st diff --git a/tool/test/org/antlr/v4/test/rt/gen/CompositeParserTestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/CompositeParserTestMethod.java index 139c95221..9acfe1f99 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/CompositeParserTestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/CompositeParserTestMethod.java @@ -7,6 +7,7 @@ import org.stringtemplate.v4.STGroup; public class CompositeParserTestMethod extends ParserTestMethod { public Grammar[] slaveGrammars; + public boolean slaveIsLexer = false; public CompositeParserTestMethod(String name, String grammarName, String startRule, String input, String expectedOutput, diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index cb3f4c15f..f07b6a2a7 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -1,5 +1,7 @@ package org.antlr.v4.test.rt.gen; +import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; @@ -13,6 +15,7 @@ import java.util.List; import java.util.Map; import org.antlr.v4.test.rt.java.BaseTest; +import org.junit.Test; import org.stringtemplate.v4.ST; import org.stringtemplate.v4.STGroup; import org.stringtemplate.v4.STGroupFile; @@ -1230,8 +1233,8 @@ public class Generator { private TestFile buildParserExec() throws Exception { TestFile file = new TestFile("ParserExec"); - file.addParserTest(input, "Labels", "T", "a", "abc 34", "", null); - file.addParserTest(input, "ListLabelsOnSet", "T", "a", "abc 34", "", null); + file.addParserTest(input, "Labels", "T", "a", "abc 34;", "", null); + file.addParserTest(input, "ListLabelsOnSet", "T", "a", "abc 34;", "", null); file.addParserTest(input, "AorB", "T", "a", "34", "alt 2\n", null); file.addParserTest(input, "Basic", "T", "a", "abc 34", "abc34\n", null); file.addParserTest(input, "APlus", "T", "a", "a b c", "abc\n", null); @@ -1268,6 +1271,28 @@ public class Generator { file.addParserTest(input, "LabelAliasingAcrossLabeledAlternatives", "T", "start", "xy", "x\ny\n", null); file.addParserTest(input, "PredictionIssue334", "T", "file_", "a", "(file_ (item a) )\n", null); file.addParserTest(input, "ListLabelForClosureContext", "T", "expression", "a", "", null); + /** + * This test ensures that {@link ParserATNSimulator} produces a correct + * result when the grammar contains multiple explicit references to + * {@code EOF} inside of parser rules. + */ + file.addParserTest(input, "MultipleEOFHandling", "T", "prog", "x", "", null); + /** + * This test ensures that {@link ParserATNSimulator} does not produce a + * {@link StackOverflowError} when it encounters an {@code EOF} transition + * inside a closure. + */ + file.addParserTest(input, "EOFInClosure", "T", "prog", "x", "", null); + /** + * This is a regression test for antlr/antlr4#561 "Issue with parser + * generation in 4.2.2" + * https://github.com/antlr/antlr4/issues/561 + */ + file.addParserTests(input, "ReferenceToATN", "T", "a", + "", "\n", + "a 34 c", "a34c\n"); + CompositeParserTestMethod tm = file.addCompositeParserTest(input, "AlternateQuotes", "ModeTagsParser", "file_", "", "", null, "ModeTagsLexer"); + tm.slaveIsLexer = true; return file; } diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AlternateQuotes.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AlternateQuotes.st new file mode 100644 index 000000000..6fd73cda0 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AlternateQuotes.st @@ -0,0 +1,6 @@ +parser grammar ModeTagsParser; +options { tokenVocab=ModeTagsLexer; } // use tokens from ModeTagsLexer.g4 +file_: (tag | TEXT)* ; +tag : '«' ID '»' + | '«' '/' ID '»' + ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AlternateQuotes_ModeTagsLexer.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AlternateQuotes_ModeTagsLexer.st new file mode 100644 index 000000000..c5205bc12 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/AlternateQuotes_ModeTagsLexer.st @@ -0,0 +1,8 @@ +lexer grammar ModeTagsLexer; +// Default mode rules (the SEA) +OPEN : '«' -> mode(ISLAND) ; // switch to ISLAND mode +TEXT : ~'«'+ ; // clump all text together +mode ISLAND; +CLOSE : '»' -> mode(DEFAULT_MODE) ; // back to SEA mode +SLASH : '/' ; +ID : [a-zA-Z]+ ; // match/send ID in tag to parser diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/EOFInClosure.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/EOFInClosure.st new file mode 100644 index 000000000..7c5e1f6d1 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/EOFInClosure.st @@ -0,0 +1,3 @@ +grammar ; +prog : stat EOF; +stat : 'x' ('y' | EOF)*?; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st index 8ea3bfdb6..40dcddc4f 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st @@ -1,7 +1,7 @@ grammar ; ifStatement @after { -items = $ctx.elseIfStatement() + } : 'if' expression ( ( 'then' diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/MultipleEOFHandling.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/MultipleEOFHandling.st new file mode 100644 index 000000000..660c71623 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/MultipleEOFHandling.st @@ -0,0 +1,2 @@ +grammar ; +prog : ('x' | 'x' 'y') EOF EOF; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ReferenceToATN.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ReferenceToATN.st new file mode 100644 index 000000000..6b20ad9af --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ReferenceToATN.st @@ -0,0 +1,5 @@ +grammar ; +a : (ID|ATN)* ATN? {} ; +ID : 'a'..'z'+ ; +ATN : '0'..'9'+; +WS : (' '|'\n') -> skip ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index b800ffb53..026ba348e 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -49,12 +49,16 @@ ParserTestMethod(test) ::= << public void test() throws Exception { = };separator="\\n\" +\n", wrap, anchor>"; + + rawGenerateAndBuildRecognizer(".g4", slave_, null, ""); + mkdir(tmpdir); writeFile(tmpdir, ".g4", slave_); + };separator="\n", wrap, anchor> String grammar = };separator="\\n\" +\n", wrap, anchor>"; - String found = execParser(".g4", grammar, "Parser", "Lexer", "", "", false); + String found = execParser(".g4", grammar, "Parser", "Lexer", "", "", false); assertEquals("", found); assertEquals("", this.stderrDuringParse); @@ -97,7 +101,7 @@ write(s) ::= <);>> assert(s) ::= <);>> -LocalVar() ::= "var " +DeclareLocal(s) ::= "Object ;" InitMember(n,v) ::= <%this. = ;%> @@ -121,9 +125,9 @@ Pass() ::= "" BuildParseTrees() ::= "this.buildParseTrees = true;" -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> +BailErrorStrategy() ::= <%setErrorHandler(new BailErrorStrategy());%> -ToStringTree(s) ::= <%.toStringTree(null, this);%> +ToStringTree(s) ::= <%.toStringTree(this)%> Column() ::= "this.getCharPositionInLine()" diff --git a/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java b/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java index de6bed7b3..9c91ed9fe 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java @@ -177,7 +177,7 @@ public class TestFullContextParsing extends BaseTest { String grammar = "grammar T;\n" + "s\n" + "@init {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;}\n" + - ": expr[0] {System.out.println($expr.ctx.toStringTree(null, this););};\n" + + ": expr[0] {System.out.println($expr.ctx.toStringTree(this));};\n" + " expr[int _p]\n" + " : ID \n" + " ( \n" + diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java b/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java index 7b2b4a756..7f3d38e9f 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java @@ -7,7 +7,7 @@ public class TestLeftRecursion extends BaseTest { String testSimple(String input) throws Exception { String grammar = "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(null, this););} : a ;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + "a : a ID\n" + " | ID\n" + " ;\n" + @@ -39,7 +39,7 @@ public class TestLeftRecursion extends BaseTest { String testDirectCallToLeftRecursiveRule(String input) throws Exception { String grammar = "grammar T;\n" + - "a @after {System.out.println($ctx.toStringTree(null, this););} : a ID\n" + + "a @after {System.out.println($ctx.toStringTree(this));} : a ID\n" + " | ID\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + @@ -71,7 +71,7 @@ public class TestLeftRecursion extends BaseTest { @Test public void testSemPred() throws Exception { String grammar = "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(null, this););} : a ;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + "a : a {true}? ID\n" + " | ID\n" + " ;\n" + @@ -84,7 +84,7 @@ public class TestLeftRecursion extends BaseTest { String testTernaryExpr(String input) throws Exception { String grammar = "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(null, this););} : e EOF ; // must indicate EOF can follow or 'a' won't match\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ; // must indicate EOF can follow or 'a' won't match\n" + "e : e '*' e\n" + " | e '+' e\n" + " | e '?' e ':' e\n" + @@ -161,7 +161,7 @@ public class TestLeftRecursion extends BaseTest { String testExpressions(String input) throws Exception { String grammar = "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(null, this););} : e EOF ; // must indicate EOF can follow\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ; // must indicate EOF can follow\n" + "e : e '.' ID\n" + " | e '.' 'this'\n" + " | '-' e\n" + @@ -227,7 +227,7 @@ public class TestLeftRecursion extends BaseTest { String testJavaExpressions(String input) throws Exception { String grammar = "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(null, this););} : e EOF ; // must indicate EOF can follow\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ; // must indicate EOF can follow\n" + "expressionList\n" + " : e (',' e)*\n" + " ;\n" + @@ -371,7 +371,7 @@ public class TestLeftRecursion extends BaseTest { String testDeclarations(String input) throws Exception { String grammar = "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(null, this););} : declarator EOF ; // must indicate EOF can follow\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : declarator EOF ; // must indicate EOF can follow\n" + "declarator\n" + " : declarator '[' e ']'\n" + " | declarator '[' ']'\n" + @@ -501,7 +501,7 @@ public class TestLeftRecursion extends BaseTest { String testLabelsOnOpSubrule(String input) throws Exception { String grammar = "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(null, this););} : e;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e;\n" + "e : a=e op=('*'|'/') b=e {}\n" + " | INT {}\n" + " | '(' x=e ')' {}\n" + @@ -792,7 +792,7 @@ public class TestLeftRecursion extends BaseTest { public void testPrecedenceFilterConsidersContext() throws Exception { String grammar = "grammar T;\n" + "prog \n" + - "@after {System.out.println($ctx.toStringTree(null, this););}\n" + + "@after {System.out.println($ctx.toStringTree(this));}\n" + ": statement* EOF {};\n" + "statement: letterA | statement letterA 'b' ;\n" + "letterA: 'a';"; diff --git a/tool/test/org/antlr/v4/test/rt/java/TestListeners.java b/tool/test/org/antlr/v4/test/rt/java/TestListeners.java index 955eca626..28b306a0c 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestListeners.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestListeners.java @@ -26,7 +26,7 @@ public class TestListeners extends BaseTest { "\n" + "s\n" + "@after {\n" + - "System.out.println($ctx.r.toStringTree(null, this););\n" + + "System.out.println($ctx.r.toStringTree(this));\n" + "var walker = new antlr4.tree.ParseTreeWalker();\n" + "walker.walk(new this.LeafListener(), $ctx.r);\n" + "\n" + @@ -71,7 +71,7 @@ public class TestListeners extends BaseTest { "\n" + "s\n" + "@after {\n" + - "System.out.println($ctx.r.toStringTree(null, this););\n" + + "System.out.println($ctx.r.toStringTree(this));\n" + "var walker = new antlr4.tree.ParseTreeWalker();\n" + "walker.walk(new this.LeafListener(), $ctx.r);\n" + "\n" + @@ -128,7 +128,7 @@ public class TestListeners extends BaseTest { "\n" + "s\n" + "@after {\n" + - "System.out.println($ctx.r.toStringTree(null, this););\n" + + "System.out.println($ctx.r.toStringTree(this));\n" + "var walker = new antlr4.tree.ParseTreeWalker();\n" + "walker.walk(new this.LeafListener(), $ctx.r);\n" + "\n" + @@ -187,7 +187,7 @@ public class TestListeners extends BaseTest { "\n" + "s\n" + "@after {\n" + - "System.out.println($ctx.r.toStringTree(null, this););\n" + + "System.out.println($ctx.r.toStringTree(this));\n" + "var walker = new antlr4.tree.ParseTreeWalker();\n" + "walker.walk(new this.LeafListener(), $ctx.r);\n" + "\n" + @@ -233,7 +233,7 @@ public class TestListeners extends BaseTest { "\n" + "s\n" + "@after {\n" + - "System.out.println($ctx.r.toStringTree(null, this););\n" + + "System.out.println($ctx.r.toStringTree(this));\n" + "var walker = new antlr4.tree.ParseTreeWalker();\n" + "walker.walk(new this.LeafListener(), $ctx.r);\n" + "\n" + diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java b/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java index d707d1f4a..a167df19a 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java @@ -13,7 +13,7 @@ public class TestParseTrees extends BaseTest { "this.buildParseTrees = true;\n" + "}\n" + "@after {\n" + - "System.out.println($r.ctx.toStringTree(null, this););\n" + + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : 'x' { \n" + @@ -32,7 +32,7 @@ public class TestParseTrees extends BaseTest { "this.buildParseTrees = true;\n" + "}\n" + "@after {\n" + - "System.out.println($r.ctx.toStringTree(null, this););\n" + + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : 'x' 'y'\n" + @@ -50,7 +50,7 @@ public class TestParseTrees extends BaseTest { "this.buildParseTrees = true;\n" + "}\n" + "@after {\n" + - "System.out.println($r.ctx.toStringTree(null, this););\n" + + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : 'x' | 'y'\n" + @@ -68,7 +68,7 @@ public class TestParseTrees extends BaseTest { "this.buildParseTrees = true;\n" + "}\n" + "@after {\n" + - "System.out.println($r.ctx.toStringTree(null, this););\n" + + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : ('x' | 'y')* 'z'\n" + @@ -86,7 +86,7 @@ public class TestParseTrees extends BaseTest { "this.buildParseTrees = true;\n" + "}\n" + "@after {\n" + - "System.out.println($r.ctx.toStringTree(null, this););\n" + + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : b 'x'\n" + @@ -106,7 +106,7 @@ public class TestParseTrees extends BaseTest { "this.buildParseTrees = true;\n" + "}\n" + "@after {\n" + - "System.out.println($r.ctx.toStringTree(null, this););\n" + + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : 'x' 'y'\n" + @@ -126,7 +126,7 @@ public class TestParseTrees extends BaseTest { "this.buildParseTrees = true;\n" + "}\n" + "@after {\n" + - "System.out.println($r.ctx.toStringTree(null, this););\n" + + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : 'x' | 'y'\n" + @@ -147,7 +147,7 @@ public class TestParseTrees extends BaseTest { "this.buildParseTrees = true;\n" + "}\n" + "@after {\n" + - "System.out.println($r.ctx.toStringTree(null, this););\n" + + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : 'x' 'y'* '!'\n" + diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java b/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java index 1b3cb8a32..a001a0b3b 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java @@ -13,7 +13,7 @@ public class TestParserExec extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abc 34", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abc 34;", false); assertEquals("", found); assertNull(this.stderrDuringParse); } @@ -27,7 +27,7 @@ public class TestParserExec extends BaseTest { "INT : '0'..'9'+;\n" + "FLOAT : [0-9]+ '.' [0-9]+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abc 34", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abc 34;", false); assertEquals("", found); assertNull(this.stderrDuringParse); } @@ -331,10 +331,10 @@ public class TestParserExec extends BaseTest { public void testPredictionIssue334() throws Exception { String grammar = "grammar T;\n" + "file_ @init{\n" + - "this._errHandler = new antlr4.error.BailErrorStrategy();\n" + + "setErrorHandler(new BailErrorStrategy());\n" + "} \n" + "@after {\n" + - "System.out.println($ctx.toStringTree(null, this););\n" + + "System.out.println($ctx.toStringTree(this));\n" + "}\n" + " : item (SEMICOLON item)* SEMICOLON? EOF ;\n" + "item : A B?;\n" + @@ -352,7 +352,7 @@ public class TestParserExec extends BaseTest { String grammar = "grammar T;\n" + "ifStatement\n" + "@after {\n" + - "var items = $ctx.elseIfStatement() \n" + + "Object items = $ctx.elseIfStatement(); \n" + "}\n" + " : 'if' expression\n" + " ( ( 'then'\n" + @@ -374,5 +374,70 @@ public class TestParserExec extends BaseTest { assertNull(this.stderrDuringParse); } + @Test + public void testMultipleEOFHandling() throws Exception { + String grammar = "grammar T;\n" + + "prog : ('x' | 'x' 'y') EOF EOF;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "x", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testEOFInClosure() throws Exception { + String grammar = "grammar T;\n" + + "prog : stat EOF;\n" + + "stat : 'x' ('y' | EOF)*?;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "x", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + + String testReferenceToATN(String input) throws Exception { + String grammar = "grammar T;\n" + + "a : (ID|ATN)* ATN? {System.out.println($text);} ;\n" + + "ID : 'a'..'z'+ ;\n" + + "ATN : '0'..'9'+;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); + } + + @Test + public void testReferenceToATN_1() throws Exception { + String found = testReferenceToATN(""); + assertEquals("\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReferenceToATN_2() throws Exception { + String found = testReferenceToATN("a 34 c"); + assertEquals("a34c\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testAlternateQuotes() throws Exception { + String slave_ModeTagsLexer = "lexer grammar ModeTagsLexer;\n" + + "// Default mode rules (the SEA)\n" + + "OPEN : '«' -> mode(ISLAND) ; // switch to ISLAND mode\n" + + "TEXT : ~'«'+ ; // clump all text together\n" + + "mode ISLAND;\n" + + "CLOSE : '»' -> mode(DEFAULT_MODE) ; // back to SEA mode\n" + + "SLASH : '/' ;\n" + + "ID : [a-zA-Z]+ ; // match/send ID in tag to parser"; + rawGenerateAndBuildRecognizer("ModeTagsLexer.g4", slave_ModeTagsLexer, null, "ModeTagsLexer"); + + String grammar = "parser grammar ModeTagsParser;\n" + + "options { tokenVocab=ModeTagsLexer; } // use tokens from ModeTagsLexer.g4\n" + + "file_: (tag | TEXT)* ;\n" + + "tag : '«' ID '»'\n" + + " | '«' '/' ID '»'\n" + + " ;"; + String found = execParser("ModeTagsParser.g4", grammar, "ModeTagsParser", "ModeTagsLexer", "file_", "", false); + assertEquals("", found); + assertNull(this.stderrDuringParse); + } + } \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java index 602542aa4..2f072f756 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java @@ -398,7 +398,7 @@ public class TestSemPredEvalParser extends BaseTest { String testPredFromAltTestedInLoopBack(String input) throws Exception { String grammar = "grammar T;\n" + "file_\n" + - "@after {System.out.println($ctx.toStringTree(null, this););}\n" + + "@after {System.out.println($ctx.toStringTree(this));}\n" + " : para para EOF ;\n" + "para: paraContent NL NL ;\n" + "paraContent : ('s'|'x'|{this._input.LA(2)!=NL}? NL)+ ;\n" + diff --git a/tool/test/org/antlr/v4/test/tool/TestParserExec.java b/tool/test/org/antlr/v4/test/tool/TestParserExec.java index a935486d8..150f553f5 100644 --- a/tool/test/org/antlr/v4/test/tool/TestParserExec.java +++ b/tool/test/org/antlr/v4/test/tool/TestParserExec.java @@ -33,9 +33,7 @@ package org.antlr.v4.test.tool; import org.junit.Ignore; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; /** Test parser execution. * @@ -69,300 +67,14 @@ import static org.junit.Assert.assertTrue; * the remaining input to match. */ public class TestParserExec extends BaseTest { - @Test public void testLabels() throws Exception { - String grammar = - "grammar T;\n" + - "a : b1=b b2+=b* b3+=';' ;\n" + - "b : id=ID val+=INT*;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "abc 34;", false); - assertEquals("", found); - assertEquals(null, stderrDuringParse); - } - - /** - * This is a regression test for #270 "Fix operator += applied to a set of - * tokens". - * https://github.com/antlr/antlr4/issues/270 - */ - @Test public void testListLabelOnSet() { - String grammar = - "grammar T;\n" + - "a : b b* ';' ;\n" + - "b : ID val+=(INT | FLOAT)*;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "FLOAT : [0-9]+ '.' [0-9]+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "abc 34;", false); - assertEquals("", found); - assertEquals(null, stderrDuringParse); - } - - @Test public void testBasic() throws Exception { - String grammar = - "grammar T;\n" + - "a : ID INT {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "abc 34", false); - assertEquals("abc34\n", found); - } - - @Test public void testAorB() throws Exception { - String grammar = - "grammar T;\n" + - "a : ID {System.out.println(\" alt 1\");}" + - " | INT {System.out.println(\"alt 2\");}" + - ";\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "34", false); - assertEquals("alt 2\n", found); - } - - @Test public void testAPlus() throws Exception { - String grammar = - "grammar T;\n" + - "a : ID+ {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a b c", false); - assertEquals("abc\n", found); - } - - // force complex decision - @Test public void testAorAPlus() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|ID)+ {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a b c", false); - assertEquals("abc\n", found); - } - - private static final String ifIfElseGrammarFormat = - "grammar T;\n" + - "start : statement+ ;\n" + - "statement : 'x' | ifStatement;\n" + - "ifStatement : 'if' 'y' statement %s {System.out.println($text);};\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> channel(HIDDEN);\n"; - - @Test public void testIfIfElseGreedyBinding1() throws Exception { - final String input = "if y if y x else x"; - final String expectedInnerBound = "if y x else x\nif y if y x else x\n"; - - String grammar = String.format(ifIfElseGrammarFormat, "('else' statement)?"); - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); - assertEquals(expectedInnerBound, found); - - } - - @Test public void testIfIfElseGreedyBinding2() throws Exception { - final String input = "if y if y x else x"; - final String expectedInnerBound = "if y x else x\nif y if y x else x\n"; - - String grammar = String.format(ifIfElseGrammarFormat, "('else' statement|)"); - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); - assertEquals(expectedInnerBound, found); - } - - @Test public void testIfIfElseNonGreedyBinding() throws Exception { - final String input = "if y if y x else x"; - final String expectedOuterBound = "if y x\nif y if y x else x\n"; - - String grammar = String.format(ifIfElseGrammarFormat, "('else' statement)??"); - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); - assertEquals(expectedOuterBound, found); - - grammar = String.format(ifIfElseGrammarFormat, "(|'else' statement)"); - found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); - assertEquals(expectedOuterBound, found); - } - - @Test public void testAStar() throws Exception { - String grammar = - "grammar T;\n" + - "a : ID* {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a b c", false); - assertEquals("abc\n", found); - } - - @Test public void testLL1OptionalBlock() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|{}INT)? {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a", false); - assertEquals("a\n", found); - } - - // force complex decision - @Test public void testAorAStar() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|ID)* {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a b c", false); - assertEquals("abc\n", found); - } - - @Test public void testAorBPlus() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|INT{;})+ {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a 34 c", false); - assertEquals("a34c\n", found); - } - - @Test public void testAorBStar() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|INT{;})* {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a 34 c", false); - assertEquals("a34c\n", found); - } - - - /** - * This test is meant to detect regressions of bug antlr/antlr4#41. - * https://github.com/antlr/antlr4/issues/41 - */ - @Test - public void testOptional1() throws Exception { - String grammar = - "grammar T;\n" + - "stat : ifstat | 'x';\n" + - "ifstat : 'if' stat ('else' stat)?;\n" + - "WS : [ \\n\\t]+ -> skip ;" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "x", false); - assertEquals("", found); - assertNull(this.stderrDuringParse); - } - - @Test - public void testOptional2() throws Exception { - String grammar = - "grammar T;\n" + - "stat : ifstat | 'x';\n" + - "ifstat : 'if' stat ('else' stat)?;\n" + - "WS : [ \\n\\t]+ -> skip ;" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if x else x", false); - assertEquals("", found); - assertNull(this.stderrDuringParse); - } - - @Test - public void testOptional3() throws Exception { - String grammar = - "grammar T;\n" + - "stat : ifstat | 'x';\n" + - "ifstat : 'if' stat ('else' stat)?;\n" + - "WS : [ \\n\\t]+ -> skip ;" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if x", false); - assertEquals("", found); - assertNull(this.stderrDuringParse); - } - - @Test - public void testOptional4() throws Exception { - String grammar = - "grammar T;\n" + - "stat : ifstat | 'x';\n" + - "ifstat : 'if' stat ('else' stat)?;\n" + - "WS : [ \\n\\t]+ -> skip ;" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if if x else x", false); - assertEquals("", found); - assertNull(this.stderrDuringParse); - } - - /** - * This test is meant to test the expected solution to antlr/antlr4#42. - * https://github.com/antlr/antlr4/issues/42 - */ - @Test - public void testPredicatedIfIfElse() throws Exception { - String grammar = - "grammar T;\n" + - "s : stmt EOF ;\n" + - "stmt : ifStmt | ID;\n" + - "ifStmt : 'if' ID stmt ('else' stmt | {_input.LA(1) != ELSE}?);\n" + - "ELSE : 'else';\n" + - "ID : [a-zA-Z]+;\n" + - "WS : [ \\n\\t]+ -> skip;\n" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "if x if x a else b", true); - String expecting = ""; - assertEquals(expecting, found); - assertNull(this.stderrDuringParse); - } /** * This is a regression test for antlr/antlr4#118. * https://github.com/antlr/antlr4/issues/118 */ @Ignore("Performance impact of passing this test may not be worthwhile") + // TODO: port to test framework (not ported because test currently fails) @Test public void testStartRuleWithoutEOF() { String grammar = "grammar T;\n"+ @@ -382,146 +94,12 @@ public class TestParserExec extends BaseTest { assertNull(this.stderrDuringParse); } - /** - * This is a regression test for antlr/antlr4#195 "label 'label' type - * mismatch with previous definition: TOKEN_LABEL!=RULE_LABEL" - * https://github.com/antlr/antlr4/issues/195 - */ - @Test public void testLabelAliasingAcrossLabeledAlternatives() throws Exception { - String grammar = - "grammar T;\n" + - "start : a* EOF;\n" + - "a\n" + - " : label=subrule {System.out.println($label.text);} #One\n" + - " | label='y' {System.out.println($label.text);} #Two\n" + - " ;\n" + - "subrule : 'x';\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", - "xy", false); - assertEquals("x\ny\n", found); - } - - /** - * This is a regression test for antlr/antlr4#334 "BailErrorStrategy: bails - * out on proper input". - * https://github.com/antlr/antlr4/issues/334 - */ - @Test public void testPredictionIssue334() { - String grammar = - "grammar T;\n" + - "\n" + - "file @init{setErrorHandler(new BailErrorStrategy());} \n" + - "@after {System.out.println($ctx.toStringTree(this));}\n" + - " : item (SEMICOLON item)* SEMICOLON? EOF ;\n" + - "item : A B?;\n" + - "\n" + - "\n" + - "\n" + - "SEMICOLON: ';';\n" + - "\n" + - "A : 'a'|'A';\n" + - "B : 'b'|'B';\n" + - "\n" + - "WS : [ \\r\\t\\n]+ -> skip;\n"; - - String input = "a"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "file", input, false); - assertEquals("(file (item a) )\n", found); - assertNull(stderrDuringParse); - } - - /** - * This is a regressino test for antlr/antlr4#299 "Repeating subtree not - * accessible in visitor". - * https://github.com/antlr/antlr4/issues/299 - */ - @Test public void testListLabelForClosureContext() throws Exception { - String grammar = - "grammar T;\n" + - "ifStatement\n" + - "@after { List items = $ctx.elseIfStatement(); }\n" + - " : 'if' expression\n" + - " ( ( 'then'\n" + - " executableStatement*\n" + - " elseIfStatement* // <--- problem is here\n" + - " elseStatement?\n" + - " 'end' 'if'\n" + - " ) | executableStatement )\n" + - " ;\n" + - "\n" + - "elseIfStatement\n" + - " : 'else' 'if' expression 'then' executableStatement*\n" + - " ;\n" - + "expression : 'a' ;\n" - + "executableStatement : 'a' ;\n" - + "elseStatement : 'a' ;\n"; - String input = "a"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "expression", input, false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** - * This test ensures that {@link ParserATNSimulator} produces a correct - * result when the grammar contains multiple explicit references to - * {@code EOF} inside of parser rules. - */ - @Test - public void testMultipleEOFHandling() throws Exception { - String grammar = - "grammar T;\n" + - "prog : ('x' | 'x' 'y') EOF EOF;\n"; - String input = "x"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", input, false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** - * This test ensures that {@link ParserATNSimulator} does not produce a - * {@link StackOverflowError} when it encounters an {@code EOF} transition - * inside a closure. - */ - @Test - public void testEOFInClosure() throws Exception { - String grammar = - "grammar T;\n" + - "prog : stat EOF;\n" + - "stat : 'x' ('y' | EOF)*?;\n"; - String input = "x"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", input, false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#561 "Issue with parser - * generation in 4.2.2" - * https://github.com/antlr/antlr4/issues/561 - */ - @Test public void testReferenceToATN() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|ATN)* ATN? {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "ATN : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a 34 c", false); - assertEquals("a34c\n", found); - } - /** * This is a regression test for antlr/antlr4#588 "ClassCastException during * semantic predicate handling". * https://github.com/antlr/antlr4/issues/588 */ + // TODO: port to test framework (can we simplify the Psl grammar?) @Test public void testFailedPredicateExceptionState() throws Exception { String grammar = load("Psl.g4", "UTF-8"); String found = execParser("Psl.g4", grammar, "PslParser", "PslLexer", "floating_constant", " . 234", false); @@ -529,50 +107,13 @@ public class TestParserExec extends BaseTest { assertEquals("line 1:6 rule floating_constant DEC:A floating-point constant cannot have internal white space\n", stderrDuringParse); } - /** - * This is a regression test for antlr/antlr4#563 "Inconsistent token - * handling in ANTLR4". - * https://github.com/antlr/antlr4/issues/563 - */ - @Test public void testAlternateQuotes() throws Exception { - String lexerGrammar = - "lexer grammar ModeTagsLexer;\n" + - "\n" + - "// Default mode rules (the SEA)\n" + - "OPEN : '«' -> mode(ISLAND) ; // switch to ISLAND mode\n" + - "TEXT : ~'«'+ ; // clump all text together\n" + - "\n" + - "mode ISLAND;\n" + - "CLOSE : '»' -> mode(DEFAULT_MODE) ; // back to SEA mode \n" + - "SLASH : '/' ;\n" + - "ID : [a-zA-Z]+ ; // match/send ID in tag to parser\n"; - String parserGrammar = - "parser grammar ModeTagsParser;\n" + - "\n" + - "options { tokenVocab=ModeTagsLexer; } // use tokens from ModeTagsLexer.g4\n" + - "\n" + - "file: (tag | TEXT)* ;\n" + - "\n" + - "tag : '«' ID '»'\n" + - " | '«' '/' ID '»'\n" + - " ;"; - - boolean success = rawGenerateAndBuildRecognizer("ModeTagsLexer.g4", - lexerGrammar, - null, - "ModeTagsLexer"); - assertTrue(success); - - String found = execParser("ModeTagsParser.g4", parserGrammar, "ModeTagsParser", "ModeTagsLexer", "file", "", false); - assertEquals("", found); - assertNull(stderrDuringParse); - } /** * This is a regression test for antlr/antlr4#672 "Initialization failed in * locals". * https://github.com/antlr/antlr4/issues/672 */ + // TODO: port to test framework (missing templates) @Test public void testAttributeValueInitialization() throws Exception { String grammar = "grammar Data; \n" + From 0c112e97c81f917e550e35a4da4c977f4d66d20c Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 00:30:20 +0800 Subject: [PATCH 15/26] validate ParserErrors tests --- .../org/antlr/v4/test/rt/gen/Generator.java | 8 +- .../ParserErrors/ContextListGetters.st | 1 - .../DuplicatedLeftRecursiveCall.st | 1 - .../ParserErrors/InvalidATNStateRemoval.st | 1 - .../ParserErrors/InvalidEmptyInput.st | 1 - .../org/antlr/v4/test/rt/java/Java.test.stg | 12 +- .../v4/test/rt/java/TestParserErrors.java | 32 +- .../antlr/v4/test/tool/TestParseErrors.java | 376 ------------------ 8 files changed, 24 insertions(+), 408 deletions(-) delete mode 100644 tool/test/org/antlr/v4/test/tool/TestParseErrors.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index f07b6a2a7..52ea201ad 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -536,16 +536,16 @@ public class Generator { "line 1:1 missing 'b' at 'c'\n"); file.addParserTest(input, "ConjuringUpToken", "T", "a", "ac", - "conjured=[@-1,-1:-1='',<1>,1:1]\n", - null); + "conjured=[@-1,-1:-1='',<2>,1:1]\n", + "line 1:1 missing 'b' at 'c'\n"); file.addParserTest(input, "SingleSetInsertion", "T", "a", "ad", "", "line 1:1 missing {'b', 'c'} at 'd'\n"); file.addParserTest(input, "ConjuringUpTokenFromSet", "T", "a", "ad", - "conjured=[@-1,-1:-1='',<1>,1:1]\n", - null); + "conjured=[@-1,-1:-1='',<2>,1:1]\n", + "line 1:1 missing {'b', 'c'} at 'd'\n"); file.addParserTest(input, "LL2", "T", "a", "ae", "", diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ContextListGetters.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ContextListGetters.st index 6114db743..225cb5d9b 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ContextListGetters.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ContextListGetters.st @@ -5,4 +5,3 @@ grammar ; s : (a | b)+; a : 'a' {}; b : 'b' {}; -; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st index 04f71f413..b0f163958 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/DuplicatedLeftRecursiveCall.st @@ -3,4 +3,3 @@ start : expr EOF; expr : 'x' | expr expr ; -; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidATNStateRemoval.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidATNStateRemoval.st index 75fd25550..bd9e2f93f 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidATNStateRemoval.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidATNStateRemoval.st @@ -3,4 +3,3 @@ start : ID ':' expr; expr : primary expr? {} | expr '->' ID; primary : ID; ID : [a-z]+; -; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidEmptyInput.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidEmptyInput.st index 65551a8c6..7e4e111ee 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidEmptyInput.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/InvalidEmptyInput.st @@ -1,4 +1,3 @@ grammar ; start : ID+; ID : [a-z]+; -; diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index 026ba348e..f0cdc0ab7 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -149,7 +149,7 @@ TokenStartColumnEquals(i) ::= <%this._tokenStartCharPositionInLine==%> ImportListener(X) ::= <Listener = require('./Listener').Listener;>> -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames));" +GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames)" WriteRuleInvocationStack() ::= "document.getElementById('output').value += antlr4.Utils.arrayToString(this.getRuleInvocationStack()) + '\\n';" @@ -330,11 +330,11 @@ this.LeafListener.prototype.constructor = this.LeafListener; >> DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; +void foo() { + SContext s = null; + List\ a = s.a(); + List\ b = s.b(); +} >> Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo\\n'};" diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java b/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java index 34b6302f5..f9c081c50 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java @@ -46,8 +46,8 @@ public class TestParserErrors extends BaseTest { String grammar = "grammar T;\n" + "a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); - assertEquals("conjured=[@-1,-1:-1='',<1>,1:1]\n", found); - assertNull(this.stderrDuringParse); + assertEquals("conjured=[@-1,-1:-1='',<2>,1:1]\n", found); + assertEquals("line 1:1 missing 'b' at 'c'\n", this.stderrDuringParse); } @Test @@ -64,8 +64,8 @@ public class TestParserErrors extends BaseTest { String grammar = "grammar T;\n" + "a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); - assertEquals("conjured=[@-1,-1:-1='',<1>,1:1]\n", found); - assertNull(this.stderrDuringParse); + assertEquals("conjured=[@-1,-1:-1='',<2>,1:1]\n", found); + assertEquals("line 1:1 missing {'b', 'c'} at 'd'\n", this.stderrDuringParse); } @Test @@ -190,7 +190,7 @@ public class TestParserErrors extends BaseTest { "WS : ' ' -> skip ;\n" + "acClass\n" + "@init\n" + - "{System.out.println(this.getExpectedTokens().toString(this.tokenNames)););}\n" + + "{System.out.println(this.getExpectedTokens().toString(this.tokenNames));}\n" + " : ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "dog and software", false); assertEquals("{'hardware', 'software'}\n", found); @@ -201,8 +201,7 @@ public class TestParserErrors extends BaseTest { public void testInvalidEmptyInput() throws Exception { String grammar = "grammar T;\n" + "start : ID+;\n" + - "ID : [a-z]+;\n" + - ";"; + "ID : [a-z]+;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "", false); assertEquals("", found); assertEquals("line 1:0 missing ID at ''\n", this.stderrDuringParse); @@ -212,16 +211,15 @@ public class TestParserErrors extends BaseTest { public void testContextListGetters() throws Exception { String grammar = "grammar T;\n" + "@parser::members{\n" + - " function foo() {\n" + - " var s = new SContext();\n" + - " var a = s.a();\n" + - " var b = s.b();\n" + - " };\n" + + "void foo() {\n" + + " SContext s = null;\n" + + " List a = s.a();\n" + + " List b = s.b();\n" + + "}\n" + "}\n" + "s : (a | b)+;\n" + "a : 'a' {System.out.print('a');};\n" + - "b : 'b' {System.out.print('b');};\n" + - ";"; + "b : 'b' {System.out.print('b');};"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abab", false); assertEquals("abab\n", found); assertNull(this.stderrDuringParse); @@ -232,8 +230,7 @@ public class TestParserErrors extends BaseTest { "start : expr EOF;\n" + "expr : 'x'\n" + " | expr expr\n" + - " ;\n" + - ";"; + " ;"; return execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); } @@ -264,8 +261,7 @@ public class TestParserErrors extends BaseTest { "start : ID ':' expr;\n" + "expr : primary expr? {} | expr '->' ID;\n" + "primary : ID;\n" + - "ID : [a-z]+;\n" + - ";"; + "ID : [a-z]+;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x:x", false); assertEquals("", found); assertNull(this.stderrDuringParse); diff --git a/tool/test/org/antlr/v4/test/tool/TestParseErrors.java b/tool/test/org/antlr/v4/test/tool/TestParseErrors.java deleted file mode 100644 index 523969b16..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestParseErrors.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.test.tool; - -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.junit.Test; - -import static org.junit.Assert.*; - -/** test runtime parse errors */ -public class TestParseErrors extends BaseTest { - @Test public void testTokenMismatch() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aa", false); - String expecting = "line 1:1 mismatched input 'a' expecting 'b'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletion() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false); - String expecting = "line 1:1 extraneous input 'a' expecting 'b'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletionExpectingSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'c') ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false); - String expecting = "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenInsertion() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b' 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); - String expecting = "line 1:1 missing 'b' at 'c'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testConjuringUpToken() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); - String expecting = "conjured=[@-1,-1:-1='',<2>,1:1]\n"; - assertEquals(expecting, result); - } - - @Test public void testSingleSetInsertion() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'c') 'd' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); - String expecting = "line 1:1 missing {'b', 'c'} at 'd'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testConjuringUpTokenFromSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); - String expecting = "conjured=[@-1,-1:-1='',<2>,1:1]\n"; - assertEquals(expecting, result); - } - - @Test public void testLL2() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'" + - " | 'a' 'c'" + - ";\n" + - "q : 'e' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ae", false); - String expecting = "line 1:1 no viable alternative at input 'ae'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testLL3() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'* 'c'" + - " | 'a' 'b' 'd'" + - " ;\n" + - "q : 'e' ;\n"; - System.out.println(grammar); - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abe", false); - String expecting = "line 1:2 no viable alternative at input 'abe'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testLLStar() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a'+ 'b'" + - " | 'a'+ 'c'" + - ";\n" + - "q : 'e' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aaae", false); - String expecting = "line 1:3 no viable alternative at input 'aaae'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletionBeforeLoop() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'*;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false); - String expecting = "line 1:1 extraneous input 'a' expecting {, 'b'}\n" + - "line 1:3 token recognition error at: 'c'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testMultiTokenDeletionBeforeLoop() throws Exception { - // can only delete 1 before loop - String grammar = - "grammar T;\n" + - "a : 'a' 'b'* 'c';"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false); - String expecting = - "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletionDuringLoop() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'* 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false); - String expecting = "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testMultiTokenDeletionDuringLoop() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'* 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false); - String expecting = - "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n" + - "line 1:6 extraneous input 'a' expecting {'b', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - // ------ - - @Test public void testSingleTokenDeletionBeforeLoop2() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'z'{;})*;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false); - String expecting = "line 1:1 extraneous input 'a' expecting {, 'b', 'z'}\n" + - "line 1:3 token recognition error at: 'c'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testMultiTokenDeletionBeforeLoop2() throws Exception { - // can only delete 1 before loop - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'z'{;})* 'c';"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false); - String expecting = - "line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletionDuringLoop2() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'z'{;})* 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false); - String expecting = "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testMultiTokenDeletionDuringLoop2() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'z'{;})* 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false); - String expecting = - "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n" + - "line 1:6 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testLL1ErrorInfo() throws Exception { - String grammar = - "grammar T;\n" + - "start : animal (AND acClass)? service EOF;\n" + - "animal : (DOG | CAT );\n" + - "service : (HARDWARE | SOFTWARE) ;\n" + - "AND : 'and';\n" + - "DOG : 'dog';\n" + - "CAT : 'cat';\n" + - "HARDWARE: 'hardware';\n" + - "SOFTWARE: 'software';\n" + - "WS : ' ' -> skip ;" + - "acClass\n" + - "@init\n" + - "{ System.out.println(getExpectedTokens().toString(tokenNames)); }\n" + - " : ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "dog and software", false); - String expecting = "{'hardware', 'software'}\n"; - assertEquals(expecting, result); - } - - /** - * This is a regression test for #6 "NullPointerException in getMissingSymbol". - * https://github.com/antlr/antlr4/issues/6 - */ - @Test - public void testInvalidEmptyInput() throws Exception { - String grammar = - "grammar T;\n" + - "start : ID+;\n" + - "ID : [a-z]+;\n" + - "\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "", true); - String expecting = ""; - assertEquals(expecting, result); - assertEquals("line 1:0 missing ID at ''\n", this.stderrDuringParse); - } - - /** - * Regression test for "Getter for context is not a list when it should be". - * https://github.com/antlr/antlr4/issues/19 - */ - @Test - public void testContextListGetters() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members{\n" + - " void foo() {\n" + - " SContext s = null;\n" + - " List a = s.a();\n" + - " List b = s.b();\n" + - " }\n" + - "}\n" + - "s : (a | b)+;\n" + - "a : 'a' {System.out.print('a');};\n" + - "b : 'b' {System.out.print('b');};\n" + - ""; - String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "abab", true); - String expecting = "abab\n"; - assertEquals(expecting, result); - assertNull(this.stderrDuringParse); - } - - /** - * This is a regression test for #26 "an exception upon simple rule with double recursion in an alternative". - * https://github.com/antlr/antlr4/issues/26 - */ - @Test - public void testDuplicatedLeftRecursiveCall() throws Exception { - String grammar = - "grammar T;\n" + - "start : expr EOF;\n" + - "expr : 'x'\n" + - " | expr expr\n" + - " ;\n" + - "\n"; - - String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x", true); - assertEquals("", result); - assertNull(this.stderrDuringParse); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xx", true); - assertEquals("", result); - assertNull(this.stderrDuringParse); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xxx", true); - assertEquals("", result); - assertNull(this.stderrDuringParse); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xxxx", true); - assertEquals("", result); - assertNull(this.stderrDuringParse); - } - - /** - * This is a regression test for #45 "NullPointerException in ATNConfig.hashCode". - * https://github.com/antlr/antlr4/issues/45 - *

- * The original cause of this issue was an error in the tool's ATN state optimization, - * which is now detected early in {@link ATNSerializer} by ensuring that all - * serialized transitions point to states which were not removed. - */ - @Test - public void testInvalidATNStateRemoval() throws Exception { - String grammar = - "grammar T;\n" + - "start : ID ':' expr;\n" + - "expr : primary expr? {} | expr '->' ID;\n" + - "primary : ID;\n" + - "ID : [a-z]+;\n" + - "\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x:x", true); - String expecting = ""; - assertEquals(expecting, result); - assertNull(this.stderrDuringParse); - } - - @Test public void testNoViableAltAvoidance() throws Exception { - // "a." matches 'a' to rule e but then realizes '.' won't match. - // previously would cause noviablealt. now prediction pretends to - // have "a' predict 2nd alt of e. Will get syntax error later so - // let it get farther. - String grammar = - "grammar T;\n" + - "s : e '!' ;\n" + - "e : 'a' 'b'\n" + - " | 'a'\n" + - " ;\n" + - "DOT : '.' ;\n" + - "WS : [ \\t\\r\\n]+ -> skip;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a.", false); - String expecting = - "line 1:1 mismatched input '.' expecting '!'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } -} From e39ac5a197c825d82b9831742abfcb078373434e Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 00:56:08 +0800 Subject: [PATCH 16/26] validated CompositeParsers tests --- .../org/antlr/v4/test/rt/gen/Generator.java | 3 +- .../DelegatesSeeSameTokenType_S.st | 2 +- .../DelegatesSeeSameTokenType_T.st | 2 +- ...orInvokesDelegateRuleWithReturnStruct_S.st | 2 +- .../DelegatorRuleOverridesDelegates.st | 2 +- .../DelegatorRuleOverridesDelegates_S.st | 2 +- .../DelegatorRuleOverridesDelegates_T.st | 2 +- ...gatorRuleOverridesLookaheadInDelegate_S.st | 2 +- .../ImportedRuleWithAction_S.st | 2 +- .../org/antlr/v4/test/rt/java/Java.test.stg | 2 +- .../v4/test/rt/java/TestCompositeParsers.java | 20 +- .../v4/test/tool/TestCompositeGrammars.java | 418 +----------------- 12 files changed, 22 insertions(+), 437 deletions(-) diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index 52ea201ad..55a6ad029 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -15,7 +15,6 @@ import java.util.List; import java.util.Map; import org.antlr.v4.test.rt.java.BaseTest; -import org.junit.Test; import org.stringtemplate.v4.ST; import org.stringtemplate.v4.STGroup; import org.stringtemplate.v4.STGroupFile; @@ -1199,7 +1198,7 @@ public class Generator { file.importGrammar = true; file.addCompositeParserTest(input, "DelegatorInvokesDelegateRule", "M", "s", "b", "S.a\n", null, "S"); file.addCompositeParserTest(input, "BringInLiteralsFromDelegate", "M", "s", "=a", "S.a\n", null, "S"); - file.addCompositeParserTest(input, "DelegatorInvokesDelegateRuleWithArgs", "M", "s", "a", "S.a1000\n", null, "S"); + file.addCompositeParserTest(input, "DelegatorInvokesDelegateRuleWithArgs", "M", "s", "b", "S.a1000\n", null, "S"); file.addCompositeParserTest(input, "DelegatorInvokesDelegateRuleWithReturnStruct", "M", "s", "b", "S.ab\n", null, "S"); file.addCompositeParserTest(input, "DelegatorAccessesDelegateMembers", "M", "s", "b", "foo\n", null, "S"); file.addCompositeParserTest(input, "DelegatorInvokesFirstVersionOfDelegateRule", "M", "s", "b", "S.a\n", null, "S", "T"); diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st index 4ed0581fe..b67e54223 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_S.st @@ -1,3 +1,3 @@ parser grammar S; tokens { A, B, C } -x : A {}; +x : A {}; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st index 817f5ccf2..06817ea66 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatesSeeSameTokenType_T.st @@ -1,3 +1,3 @@ parser grammar S; tokens { C, B, A } // reverse order -y : A {}; +y : A {}; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st index 9a2e9c2d0..58deba919 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct_S.st @@ -1,2 +1,2 @@ parser grammar S; -A : B {}; +a : B {}; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st index d00bb36a3..f79357b0c 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates.st @@ -1,4 +1,4 @@ grammar M; import S, T; -b : 'b'|'c' {}|B|A; +b : 'b'|'c' {}|B|A; WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st index 2ac580483..22e1ee03f 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_S.st @@ -1,4 +1,4 @@ parser grammar S; -a : b {}; +a : b {}; b : 'b' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st index 57893ca2e..69f759d51 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesDelegates_T.st @@ -1,3 +1,3 @@ parser grammar S; tokens { A } -b : 'b' {}; +b : 'b' {}; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st index 8c59175f8..c60a2288d 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st @@ -2,6 +2,6 @@ parser grammar S; type_ : 'int' ; decl : type_ ID ';' | type_ ID init ';' { - + }; init : '=' INT; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedRuleWithAction_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedRuleWithAction_S.st index e17380340..2a20ae0dd 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedRuleWithAction_S.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/ImportedRuleWithAction_S.st @@ -1,2 +1,2 @@ parser grammar S; -a : @after {} : B; +a @after {} : B; diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index f0cdc0ab7..90b3b44f8 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -337,7 +337,7 @@ void foo() { } >> -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo\\n'};" +Declare_foo() ::= <> Invoke_foo() ::= "this.foo();" diff --git a/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java b/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java index 5f64d13ca..7c4c99f84 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java @@ -53,7 +53,7 @@ public class TestCompositeParsers extends BaseTest { "s : label=a[3] {System.out.println($label.y);} ;\n" + "B : 'b' ; // defines B from inherited token space\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "a", false); + String found = execParser("M.g4", grammar, "MParser", "MLexer", "s", "b", false); assertEquals("S.a1000\n", found); assertNull(this.stderrDuringParse); } @@ -61,7 +61,7 @@ public class TestCompositeParsers extends BaseTest { @Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception { String slave_S = "parser grammar S;\n" + - "A : B {System.out.print(\"S.a\");};"; + "a : B {System.out.print(\"S.a\");};"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); @@ -79,7 +79,7 @@ public class TestCompositeParsers extends BaseTest { public void testDelegatorAccessesDelegateMembers() throws Exception { String slave_S = "parser grammar S;\n" + "@members {\n" + - "this.foo = function() {document.getElementById('output').value += 'foo\\n'};\n" + + "public void foo() {System.out.println(\"foo\");}\n" + "}\n" + "a : B;"; mkdir(tmpdir); @@ -121,13 +121,13 @@ public class TestCompositeParsers extends BaseTest { public void testDelegatesSeeSameTokenType() throws Exception { String slave_S = "parser grammar S;\n" + "tokens { A, B, C }\n" + - "x : A {System.out.print(\"S.x\");};"; + "x : A {System.out.println(\"S.x\");};"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); String slave_T = "parser grammar S;\n" + "tokens { C, B, A } // reverse order\n" + - "y : A {System.out.print(\"T.y\");};"; + "y : A {System.out.println(\"T.y\");};"; mkdir(tmpdir); writeFile(tmpdir, "T.g4", slave_T); @@ -210,7 +210,7 @@ public class TestCompositeParsers extends BaseTest { "type_ : 'int' ;\n" + "decl : type_ ID ';'\n" + " | type_ ID init ';' {\n" + - " System.out.print(\"decl: \" + $text);\n" + + " System.out.print(\"Decl: \" + $text);\n" + " };\n" + "init : '=' INT;"; mkdir(tmpdir); @@ -231,7 +231,7 @@ public class TestCompositeParsers extends BaseTest { @Test public void testDelegatorRuleOverridesDelegates() throws Exception { String slave_S = "parser grammar S;\n" + - "a : b {System.out.print(\"S.a\");};\n" + + "a : b {System.out.println(\"S.a\");};\n" + "b : 'b' ;\n" + " "; mkdir(tmpdir); @@ -239,13 +239,13 @@ public class TestCompositeParsers extends BaseTest { String slave_T = "parser grammar S;\n" + "tokens { A }\n" + - "b : 'b' {System.out.print(\"T.b\");};"; + "b : 'b' {System.out.println(\"T.b\");};"; mkdir(tmpdir); writeFile(tmpdir, "T.g4", slave_T); String grammar = "grammar M;\n" + "import S, T;\n" + - "b : 'b'|'c' {System.out.print(\"M.b\");}|B|A;\n" + + "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A;\n" + "WS : (' '|'\\n') -> skip ;"; String found = execParser("M.g4", grammar, "MParser", "MLexer", "a", "c", false); assertEquals("M.b\nS.a\n", found); @@ -272,7 +272,7 @@ public class TestCompositeParsers extends BaseTest { @Test public void testImportedRuleWithAction() throws Exception { String slave_S = "parser grammar S;\n" + - "a : @after {} : B;"; + "a @after {} : B;"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); diff --git a/tool/test/org/antlr/v4/test/tool/TestCompositeGrammars.java b/tool/test/org/antlr/v4/test/tool/TestCompositeGrammars.java index f703336f3..85b284008 100644 --- a/tool/test/org/antlr/v4/test/tool/TestCompositeGrammars.java +++ b/tool/test/org/antlr/v4/test/tool/TestCompositeGrammars.java @@ -127,224 +127,7 @@ public class TestCompositeGrammars extends BaseTest { assertEquals(0, equeue.size()); } - @Test public void testDelegatorInvokesDelegateRule() throws Exception { - String slave = - "parser grammar S;\n" + - "a : B {System.out.println(\"S.a\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("S.a\n", found); - } - @Test public void testBringInLiteralsFromDelegate() throws Exception { - String slave = - "parser grammar S;\n" + - "a : '=' 'a' {System.out.println(\"S.a\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "=a", debug); - assertEquals("S.a\n", found); - } - - @Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception { - // must generate something like: - // public int a(int x) throws RecognitionException { return gS.a(x); } - // in M. - String slave = - "parser grammar S;\n" + - "a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : label=a[3] {System.out.println($label.y);} ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("S.a1000\n", found); - } - - @Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception { - // must generate something like: - // public int a(int x) throws RecognitionException { return gS.a(x); } - // in M. - String slave = - "parser grammar S;\n" + - "a : B {System.out.print(\"S.a\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a {System.out.println($a.text);} ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("S.ab\n", found); - } - - @Test public void testDelegatorAccessesDelegateMembers() throws Exception { - String slave = - "parser grammar S;\n" + - "@parser::members {\n" + - " public void foo() {System.out.println(\"foo\");}\n" + - "}\n" + - "a : B ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + // uses no rules from the import - "import S;\n" + - "s : 'b' {foo();} ;\n" + // gS is import pointer - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("foo\n", found); - } - - @Test public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception { - String slave = - "parser grammar S;\n" + - "a : b {System.out.println(\"S.a\");} ;\n" + - "b : B ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String slave2 = - "parser grammar T;\n" + - "a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a - writeFile(tmpdir, "T.g4", slave2); - String master = - "grammar M;\n" + - "import S,T;\n" + - "s : a ;\n" + - "B : 'b' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("S.a\n", found); - } - - @Test public void testDelegatesSeeSameTokenType() throws Exception { - String slave = - "parser grammar S;\n" + // A, B, C token type order - "tokens { A, B, C }\n" + - "x : A {System.out.println(\"S.x\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String slave2 = - "parser grammar T;\n" + - "tokens { C, B, A }\n" + // reverse order - "y : A {System.out.println(\"T.y\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "T.g4", slave2); - // The lexer will create rules to match letters a, b, c. - // The associated token types A, B, C must have the same value - // and all import'd parsers. Since ANTLR regenerates all imports - // for use with the delegator M, it can generate the same token type - // mapping in each parser: - // public static final int C=6; - // public static final int EOF=-1; - // public static final int B=5; - // public static final int WS=7; - // public static final int A=4; - - String master = - "grammar M;\n" + - "import S,T;\n" + - "s : x y ;\n" + // matches AA, which should be "aa" - "B : 'b' ;\n" + // another order: B, A, C - "A : 'a' ;\n" + - "C : 'c' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "aa", debug); - assertEquals("S.x\n" + - "T.y\n", found); - } - - @Test public void testDelegatesSeeSameTokenType2() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "parser grammar S;\n" + // A, B, C token type order - "tokens { A, B, C }\n" + - "x : A {System.out.println(\"S.x\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String slave2 = - "parser grammar T;\n" + - "tokens { C, B, A }\n" + // reverse order - "y : A {System.out.println(\"T.y\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "T.g4", slave2); - - String master = - "grammar M;\n" + - "import S,T;\n" + - "s : x y ;\n" + // matches AA, which should be "aa" - "B : 'b' ;\n" + // another order: B, A, C - "A : 'a' ;\n" + - "C : 'c' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - String expectedTokenIDToTypeMap = "{EOF=-1, B=1, A=2, C=3, WS=4}"; - String expectedStringLiteralToTypeMap = "{'a'=2, 'b'=1, 'c'=3}"; - String expectedTypeToTokenList = "[B, A, C, WS]"; - - assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString()); - assertEquals(expectedStringLiteralToTypeMap, sort(g.stringLiteralToTypeMap).toString()); - assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "aa", debug); - assertEquals("S.x\n" + - "T.y\n", found); - } - - @Test public void testCombinedImportsCombined() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "grammar S;\n" + // A, B, C token type order - "tokens { A, B, C }\n" + - "x : 'x' INT {System.out.println(\"S.x\");} ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - - String master = - "grammar M;\n" + - "import S;\n" + - "s : x INT ;\n"; - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "x 34 9", debug); - assertEquals("S.x\n", found); - } @Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception { ErrorQueue equeue = new ErrorQueue(); @@ -388,145 +171,11 @@ public class TestCompositeGrammars extends BaseTest { "s : x ;\n" + "WS : (' '|'\\n') -> skip ;\n" ; writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); + /*Grammar g =*/ new Grammar(tmpdir+"/M.g4", master, equeue); assertEquals(ErrorType.SYNTAX_ERROR, equeue.errors.get(0).getErrorType()); } - @Test public void testDelegatorRuleOverridesDelegate() throws Exception { - String slave = - "parser grammar S;\n" + - "a : b {System.out.println(\"S.a\");} ;\n" + - "b : B ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "b : 'b'|'c' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "a", "c", debug); - assertEquals("S.a\n", found); - } - - @Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception { - String slave = - "parser grammar JavaDecl;\n" + - "type : 'int' ;\n" + - "decl : type ID ';'\n" + - " | type ID init ';' {System.out.println(\"JavaDecl: \"+$text);}\n" + - " ;\n" + - "init : '=' INT ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "JavaDecl.g4", slave); - String master = - "grammar Java;\n" + - "import JavaDecl;\n" + - "prog : decl ;\n" + - "type : 'int' | 'float' ;\n" + - "\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - // for float to work in decl, type must be overridden - String found = execParser("Java.g4", master, "JavaParser", "JavaLexer", - "prog", "float x = 3;", debug); - assertEquals("JavaDecl: floatx=3;\n", found); - } - - @Test public void testDelegatorRuleOverridesDelegates() throws Exception { - String slave = - "parser grammar S;\n" + - "a : b {System.out.println(\"S.a\");} ;\n" + - "b : 'b' ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - - String slave2 = - "parser grammar T;\n" + - "tokens { A }\n" + - "b : 'b' {System.out.println(\"T.b\");} ;\n"; - writeFile(tmpdir, "T.g4", slave2); - - String master = - "grammar M;\n" + - "import S, T;\n" + - "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "a", "c", debug); - assertEquals("M.b\n" + - "S.a\n", found); - } - // LEXER INHERITANCE - - @Test public void testLexerDelegatorInvokesDelegateRule() throws Exception { - String slave = - "lexer grammar S;\n" + - "A : 'a' {System.out.println(\"S.A\");} ;\n" + - "C : 'c' ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "lexer grammar M;\n" + - "import S;\n" + - "B : 'b' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String expecting = - "S.A\n" + - "[@0,0:0='a',<3>,1:0]\n" + - "[@1,1:1='b',<1>,1:1]\n" + - "[@2,2:2='c',<4>,1:2]\n" + - "[@3,3:2='',<-1>,1:3]\n"; - String found = execLexer("M.g4", master, "M", "abc", debug); - assertEquals(expecting, found); - } - - @Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception { - String slave = - "lexer grammar S;\n" + - "A : 'a' {System.out.println(\"S.A\");} ;\n" + - "B : 'b' {System.out.println(\"S.B\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "lexer grammar M;\n" + - "import S;\n" + - "A : 'a' B {System.out.println(\"M.A\");} ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execLexer("M.g4", master, "M", "ab", debug); - assertEquals("M.A\n" + - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:1='',<-1>,1:2]\n", found); - } - - @Test public void testKeywordVSIDOrder() throws Exception { - // rules in lexer are imported at END so rules in master override - // *and* get priority over imported rules. So importing ID doesn't - // mess up keywords in master grammar - ErrorQueue equeue = new ErrorQueue(); - String slave = - "lexer grammar S;\n" + - "ID : 'a'..'z'+ ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "a : A {System.out.println(\"M.a: \"+$A);} ;\n" + - "A : 'abc' {System.out.println(\"M.A\");} ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "a", "abc", debug); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size()); - - assertEquals("M.A\n" + - "M.a: [@0,0:2='abc',<1>,1:0]\n", found); - } - // Make sure that M can import S that imports T. @Test public void test3LevelImport() throws Exception { ErrorQueue equeue = new ErrorQueue(); @@ -733,76 +382,13 @@ public class TestCompositeGrammars extends BaseTest { assertEquals(expecting, equeue.errors.size()); } - @Test public void testImportedRuleWithAction() throws Exception { - // wasn't terminating. @after was injected into M as if it were @members - String slave = - "parser grammar S;\n" + - "a @after {int x;} : B ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("", found); - } - - @Test public void testImportedGrammarWithEmptyOptions() throws Exception { - String slave = - "parser grammar S;\n" + - "options {}\n" + - "a : B ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("", found); - } - - /** - * This is a regression test for antlr/antlr4#248 "Including grammar with - * only fragments breaks generated lexer". - * https://github.com/antlr/antlr4/issues/248 - */ - @Test public void testImportLexerWithOnlyFragmentRules() { - String slave = - "lexer grammar Unicode;\n" + - "\n" + - "fragment\n" + - "UNICODE_CLASS_Zs : '\\u0020' | '\\u00A0' | '\\u1680' | '\\u180E'\n" + - " | '\\u2000'..'\\u200A'\n" + - " | '\\u202F' | '\\u205F' | '\\u3000'\n" + - " ;\n"; - String master = - "grammar Test;\n" + - "import Unicode;\n" + - "\n" + - "program : 'test' 'test' ;\n" + - "\n" + - "WS : (UNICODE_CLASS_Zs)+ -> skip;\n"; - - mkdir(tmpdir); - writeFile(tmpdir, "Unicode.g4", slave); - String found = execParser("Test.g4", master, "TestParser", "TestLexer", "program", "test test", debug); - assertEquals("", found); - assertNull(stderrDuringParse); - } /** * This is a regression test for antlr/antlr4#670 "exception when importing * grammar". * https://github.com/antlr/antlr4/issues/670 */ + // TODO: migrate to test framework @Test public void testImportLargeGrammar() throws Exception { String slave = load("Java.g4", "UTF-8"); From 31812721c858c1468e54b00f714c4342d48e1501 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 01:07:31 +0800 Subject: [PATCH 17/26] validate FullContextParsing tests --- .../org/antlr/v4/test/rt/gen/Generator.java | 26 +- .../org/antlr/v4/test/rt/gen/TestMethod.java | 1 + .../org/antlr/v4/test/rt/java/Java.test.stg | 4 +- .../test/rt/java/TestFullContextParsing.java | 16 +- .../v4/test/tool/TestFullContextParsing.java | 356 ------------------ 5 files changed, 27 insertions(+), 376 deletions(-) delete mode 100644 tool/test/org/antlr/v4/test/tool/TestFullContextParsing.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index 55a6ad029..308d55ca5 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -1,7 +1,5 @@ package org.antlr.v4.test.rt.gen; -import static org.junit.Assert.assertEquals; - import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; @@ -846,11 +844,12 @@ public class Generator { private TestFile buildFullContextParsing() throws Exception { TestFile file = new TestFile("FullContextParsing"); - file.addParserTest(input, "AmbigYieldsCtxSensitiveDFA", "T", "s", "abc", + TestMethod tm = file.addParserTest(input, "AmbigYieldsCtxSensitiveDFA", "T", "s", "abc", "Decision 0:\n" + "s0-ID->:s1^=>1\n", "line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n"); - file.addParserTestsWithErrors(input, "CtxSensitiveDFA", "T", "s", + tm.debug = true; + tm = file.addParserTestsWithErrors(input, "CtxSensitiveDFA", "T", "s", "$ 34 abc", "Decision 1:\n" + "s0-INT->s1\n" + @@ -863,7 +862,8 @@ public class Generator { "s1-ID->:s2^=>1\n", "line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + "line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n"); - file.addParserTest(input, "CtxSensitiveDFATwoDiffInput", "T", "s", + tm.debug = true; + tm = file.addParserTest(input, "CtxSensitiveDFATwoDiffInput", "T", "s", "$ 34 abc @ 34 abc", "Decision 2:\n" + "s0-INT->s1\n" + @@ -872,14 +872,16 @@ public class Generator { "line 1:2 reportContextSensitivity d=2 (e), input='34'\n" + "line 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\n" + "line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n"); - file.addParserTest(input, "SLLSeesEOFInLLGrammar", "T", "s", + tm.debug = true; + tm = file.addParserTest(input, "SLLSeesEOFInLLGrammar", "T", "s", "34 abc", "Decision 0:\n" + "s0-INT->s1\n" + "s1-ID->:s2^=>1\n", "line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\n" + "line 1:0 reportContextSensitivity d=0 (e), input='34'\n"); - file.addParserTestsWithErrors(input, "FullContextIF_THEN_ELSEParse", "T", "s", + tm.debug = true; + tm = file.addParserTestsWithErrors(input, "FullContextIF_THEN_ELSEParse", "T", "s", "{ if x then return }", "Decision 1:\n" + "s0-'}'->:s1=>2\n", @@ -923,19 +925,22 @@ public class Generator { "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n"); - file.addParserTest(input, "LoopsSimulateTailRecursion", "T", "prog", + tm.debug = true; + tm = file.addParserTest(input, "LoopsSimulateTailRecursion", "T", "prog", "a(i)<-x", "pass: a(i)<-x\n", "line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\n" + "line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n"); - file.addParserTest(input, "AmbiguityNoLoop", "T", "prog", + tm.debug = true; + tm = file.addParserTest(input, "AmbiguityNoLoop", "T", "prog", "a@", "alt 1\n", "line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\n" + "line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\n" + "line 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\n" + "line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n"); - file.addParserTestsWithErrors(input, "ExprAmbiguity", "T", "s", + tm.debug = true; + tm = file.addParserTestsWithErrors(input, "ExprAmbiguity", "T", "s", "a+b", "(expr a + (expr b))\n", "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + @@ -946,6 +951,7 @@ public class Generator { "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" + "line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" + "line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n"); + tm.debug = true; return file; } diff --git a/tool/test/org/antlr/v4/test/rt/gen/TestMethod.java b/tool/test/org/antlr/v4/test/rt/gen/TestMethod.java index 7fdac4075..6cdfd8410 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/TestMethod.java +++ b/tool/test/org/antlr/v4/test/rt/gen/TestMethod.java @@ -12,6 +12,7 @@ public abstract class TestMethod { public String input; public String expectedOutput; public String expectedErrors; + public boolean debug = false; protected TestMethod(String name, String grammarName, String input, String expectedOutput, String expectedErrors, Integer index) { diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index 90b3b44f8..cb5ba186b 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -58,7 +58,7 @@ public void test() throws Exception { };separator="\n", wrap, anchor> String grammar = };separator="\\n\" +\n", wrap, anchor>"; - String found = execParser(".g4", grammar, "Parser", "Lexer", "", "", false); + String found = execParser(".g4", grammar, "Parser", "Lexer", "", "", ); assertEquals("", found); assertEquals("", this.stderrDuringParse); @@ -76,7 +76,7 @@ CompositeParserTestMethod(test) ::= << AbstractParserTestMethod(test) ::= << String test(String input) throws Exception { String grammar = };separator="\\n\" +\n", wrap, anchor>"; - return execParser(".g4", grammar, "Parser", "Lexer", "", input, false); + return execParser(".g4", grammar, "Parser", "Lexer", "", input, ); } >> diff --git a/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java b/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java index 9c91ed9fe..2af3e6830 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java @@ -12,7 +12,7 @@ public class TestFullContextParsing extends BaseTest { " : ID | ID {} ;\n" + "ID : 'a'..'z'+;\n" + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", true); assertEquals("Decision 0:\ns0-ID->:s1^=>1\n", found); assertEquals("line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", this.stderrDuringParse); } @@ -27,7 +27,7 @@ public class TestFullContextParsing extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+ ;\n" + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; - return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, true); } @Test @@ -55,7 +55,7 @@ public class TestFullContextParsing extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+ ;\n" + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "$ 34 abc @ 34 abc", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "$ 34 abc @ 34 abc", true); assertEquals("Decision 2:\ns0-INT->s1\ns1-ID->:s2^=>1\n", found); assertEquals("line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\nline 1:2 reportContextSensitivity d=2 (e), input='34'\nline 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\nline 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", this.stderrDuringParse); } @@ -71,7 +71,7 @@ public class TestFullContextParsing extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+ ;\n" + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "34 abc", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "34 abc", true); assertEquals("Decision 0:\ns0-INT->s1\ns1-ID->:s2^=>1\n", found); assertEquals("line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\nline 1:0 reportContextSensitivity d=0 (e), input='34'\n", this.stderrDuringParse); } @@ -87,7 +87,7 @@ public class TestFullContextParsing extends BaseTest { " ;\n" + "ID : 'a'..'z'+ ;\n" + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; - return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, true); } @Test @@ -149,7 +149,7 @@ public class TestFullContextParsing extends BaseTest { " | ID\n" + " ;\n" + "ID : [a-z]+ ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a(i)<-x", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a(i)<-x", true); assertEquals("pass: a(i)<-x\n", found); assertEquals("line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\nline 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n", this.stderrDuringParse); } @@ -168,7 +168,7 @@ public class TestFullContextParsing extends BaseTest { " ;\n" + "ID : [a-z]+ ;\n" + "WS : [ \\r\\n\\t]+ -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a@", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a@", true); assertEquals("alt 1\n", found); assertEquals("line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\nline 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\nline 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\nline 1:2 reportContextSensitivity d=1 (expr), input='a@'\n", this.stderrDuringParse); } @@ -187,7 +187,7 @@ public class TestFullContextParsing extends BaseTest { " ;\n" + "ID : [a-zA-Z]+ ;\n" + "WS : [ \\r\\n\\t]+ -> skip ;"; - return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, true); } @Test diff --git a/tool/test/org/antlr/v4/test/tool/TestFullContextParsing.java b/tool/test/org/antlr/v4/test/tool/TestFullContextParsing.java deleted file mode 100644 index 817032b21..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestFullContextParsing.java +++ /dev/null @@ -1,356 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.test.tool; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -/* - cover these cases: - dead end - single alt - single alt + preds - conflict - conflict + preds - - */ -public class TestFullContextParsing extends BaseTest { - @Test public void testAmbigYieldsCtxSensitiveDFA() { - String grammar = - "grammar T;\n"+ - "s" + - "@after {dumpDFA();}\n" + - " : ID | ID {;} ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "abc", true); - String expecting = - "Decision 0:\n" + - "s0-ID->:s1^=>1\n"; // ctx sensitive - assertEquals(expecting, result); - assertEquals("line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", - this.stderrDuringParse); - } - - public String testCtxSensitiveDFA(String input) { - String grammar = - "grammar T;\n"+ - "s @after {dumpDFA();}\n" + - " : '$' a | '@' b ;\n" + - "a : e ID ;\n" + - "b : e INT ID ;\n" + - "e : INT | ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "INT : '0'..'9'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, true); - } - - @Test - public void testCtxSensitiveDFA1() { - String result = testCtxSensitiveDFA("$ 34 abc"); - String expecting = - "Decision 1:\n" + - "s0-INT->s1\n" + - "s1-ID->:s2^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + - "line 1:2 reportContextSensitivity d=1 (e), input='34'\n", - this.stderrDuringParse); - } - - @Test - public void testCtxSensitiveDFA2() { - String result = testCtxSensitiveDFA("@ 34 abc"); - String expecting = - "Decision 1:\n" + - "s0-INT->s1\n" + - "s1-ID->:s2^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + - "line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n", - this.stderrDuringParse); - } - - @Test public void testCtxSensitiveDFATwoDiffInput() { - String grammar = - "grammar T;\n"+ - "s @after {dumpDFA();}\n" + - " : ('$' a | '@' b)+ ;\n" + - "a : e ID ;\n" + - "b : e INT ID ;\n" + - "e : INT | ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "INT : '0'..'9'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "$ 34 abc @ 34 abc", true); - String expecting = - "Decision 2:\n" + - "s0-INT->s1\n" + - "s1-ID->:s2^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\n" + - "line 1:2 reportContextSensitivity d=2 (e), input='34'\n" + - "line 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\n" + - "line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", - this.stderrDuringParse); - } - - @Test - public void testSLLSeesEOFInLLGrammar() { - String grammar = - "grammar T;\n"+ - "s @after {dumpDFA();}\n" + - " : a ;\n" + - "a : e ID ;\n" + - "b : e INT ID ;\n" + - "e : INT | ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "INT : '0'..'9'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "34 abc", true); - String expecting = - "Decision 0:\n" + - "s0-INT->s1\n" + - "s1-ID->:s2^=>1\n"; // Must point at accept state - assertEquals(expecting, result); - assertEquals("line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\n" + - "line 1:0 reportContextSensitivity d=0 (e), input='34'\n", - this.stderrDuringParse); - } - - @Test public void testFullContextIF_THEN_ELSEParse() { - String grammar = - "grammar T;\n"+ - "s" + - "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - "@after {dumpDFA();}\n" + - " : '{' stat* '}'" + - " ;\n" + - "stat: 'if' ID 'then' stat ('else' ID)?\n" + - " | 'return'\n" + - " ;" + - "ID : 'a'..'z'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String input = "{ if x then return }"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - String expecting = - "Decision 1:\n" + - "s0-'}'->:s1=>2\n"; - assertEquals(expecting, result); - assertEquals(null, this.stderrDuringParse); - - input = "{ if x then return else foo }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - // Technically, this input sequence is not ambiguous because else - // uniquely predicts going into the optional subrule. else cannot - // be matched by exiting stat since that would only match '}' or - // the start of a stat. But, we are using the theory that - // SLL(1)=LL(1) and so we are avoiding full context parsing - // by declaring all else clause parsing to be ambiguous. - assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", - this.stderrDuringParse); - - input = - "{ if x then if y then return else foo }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'}'->:s2=>2\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", - this.stderrDuringParse); - - // should not be ambiguous because the second 'else bar' clearly - // indicates that the first else should match to the innermost if. - // LL_EXACT_AMBIG_DETECTION makes us keep going to resolve - - input = - "{ if x then if y then return else foo else bar }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\n" + - "line 1:38 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportContextSensitivity d=1 (stat), input='else'\n", - this.stderrDuringParse); - - input = - "{ if x then return else foo\n" + - "if x then if y then return else foo }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'}'->:s2=>2\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + - "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", - this.stderrDuringParse); - - input = - "{ if x then return else foo\n" + - "if x then if y then return else foo }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'}'->:s2=>2\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + - "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", - this.stderrDuringParse); - } - - /** - * Tests predictions for the following case involving closures. - * http://www.antlr.org/wiki/display/~admin/2011/12/29/Flaw+in+ANTLR+v3+LL(*)+analysis+algorithm - */ - @Test - public void testLoopsSimulateTailRecursion() throws Exception { - String grammar = - "grammar T;\n" + - "prog\n" + - "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " : expr_or_assign*;\n" + - "expr_or_assign\n" + - " : expr '++' {System.out.println(\"fail.\");}\n" + - " | expr {System.out.println(\"pass: \"+$expr.text);}\n" + - " ;\n" + - "expr: expr_primary ('<-' ID)? ;\n" + - "expr_primary\n" + - " : '(' ID ')'\n" + - " | ID '(' ID ')'\n" + - " | ID\n" + - " ;\n" + - "ID : [a-z]+ ;\n" + - ""; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a(i)<-x", true); - assertEquals("pass: a(i)<-x\n", found); - - String expecting = - "line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\n" + - "line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n"; - assertEquals(expecting, this.stderrDuringParse); - } - - @Test - public void testAmbiguityNoLoop() throws Exception { - // simpler version of testLoopsSimulateTailRecursion, no loops - String grammar = - "grammar T;\n" + - "prog\n" + - "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " : expr expr {System.out.println(\"alt 1\");}\n" + - " | expr\n" + - " ;\n" + - "expr: '@'\n" + - " | ID '@'\n" + - " | ID\n" + - " ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a@", true); - assertEquals("alt 1\n", found); - - String expecting = - "line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\n" + - "line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\n" + - "line 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n"; - assertEquals(expecting, this.stderrDuringParse); - } - - @Test - public void testExprAmbiguity() throws Exception { - // translated left-recursive expr rule to test ambig detection - String grammar = - "grammar T;\n" + - "s\n" + - "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " : expr[0] {System.out.println($expr.ctx.toStringTree(this));} ;\n" + - "\n" + - "expr[int _p]\n" + - " : ID\n" + - " ( {5 >= $_p}? '*' expr[6]\n" + - " | {4 >= $_p}? '+' expr[5]\n" + - " )*\n" + - " ;\n" + - "\n" + - "ID : [a-zA-Z]+ ; // match identifiers\n" + - "WS : [ \\t\\r\\n]+ -> skip ; // toss out whitespace\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a+b", true); - assertEquals("(expr a + (expr b))\n", found); - - String expecting = - "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n"; - assertEquals(expecting, this.stderrDuringParse); - - found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a+b*c", true); - assertEquals("(expr a + (expr b * (expr c)))\n", found); - - expecting = - "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" + - "line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" + - "line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n"; - assertEquals(expecting, this.stderrDuringParse); - } - -} From 740c30d21d4700b8dc4fc01a2134bba7832cf558 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 02:21:41 +0800 Subject: [PATCH 18/26] validated LeftRecursion tests --- .../org/antlr/v4/test/rt/gen/Generator.java | 175 +++-- .../grammars/LeftRecursion/MultipleActions.st | 8 + .../MultipleActionsPredicatesOptions.st | 9 + .../MultipleAlternativesWithCommonLabel.st | 10 +- .../PrefixOpWithActionAndLabel.st | 4 +- .../LeftRecursion/ReturnValueAndActions.st | 2 +- .../ReturnValueAndActionsList1.st | 13 + .../ReturnValueAndActionsList2.st | 12 + .../LeftRecursion/SemPredFailOption.st | 7 + .../TernaryExprExplicitAssociativity.st | 10 + .../LeftRecursion/WhitespaceInfluence.st | 2 +- .../ParserExec/ListLabelForClosureContext.st | 2 +- .../org/antlr/v4/test/rt/java/Java.test.stg | 12 +- .../v4/test/rt/java/TestLeftRecursion.java | 377 ++++++++-- .../antlr/v4/test/tool/TestLeftRecursion.java | 665 ------------------ 15 files changed, 510 insertions(+), 798 deletions(-) create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActions.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActionsPredicatesOptions.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsList1.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsList2.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPredFailOption.st create mode 100644 tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/TernaryExprExplicitAssociativity.st diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index 308d55ca5..e03046038 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -747,61 +747,61 @@ public class Generator { file.addParserTest(input, "SemPred", "T", "s", "x y z", "(s (a (a (a x) y) z))\n", null); file.addParserTests(input, "TernaryExpr", "T", "s", - "a", "(s (e a) )", - "a+b", "(s (e (e a) + (e b)) )", - "a*b", "(s (e (e a) * (e b)) )", - "a?b:c", "(s (e (e a) ? (e b) : (e c)) )", - "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", - "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", - "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", - "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", - "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )"); + "a", "(s (e a) )\n", + "a+b", "(s (e (e a) + (e b)) )\n", + "a*b", "(s (e (e a) * (e b)) )\n", + "a?b:c", "(s (e (e a) ? (e b) : (e c)) )\n", + "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )\n", + "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )\n", + "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )\n", + "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )\n", + "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )\n"); file.addParserTests(input, "Expressions", "T", "s", - "a", "(s (e a) )", - "1", "(s (e 1) )", - "a-1", "(s (e (e a) - (e 1)) )", - "a.b", "(s (e (e a) . b) )", - "a.this", "(s (e (e a) . this) )", - "-a", "(s (e - (e a)) )", - "-a+b", "(s (e (e - (e a)) + (e b)) )"); + "a", "(s (e a) )\n", + "1", "(s (e 1) )\n", + "a-1", "(s (e (e a) - (e 1)) )\n", + "a.b", "(s (e (e a) . b) )\n", + "a.this", "(s (e (e a) . this) )\n", + "-a", "(s (e - (e a)) )\n", + "-a+b", "(s (e (e - (e a)) + (e b)) )\n"); file.addParserTests(input, "JavaExpressions", "T", "s", - "a|b&c", "(s (e (e a) | (e (e b) & (e c))) )", - "(a|b)&c", "(s (e (e ( (e (e a) | (e b)) )) & (e c)) )", - "a > b", "(s (e (e a) > (e b)) )", - "a >> b", "(s (e (e a) >> (e b)) )", - "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", - "a^b^c", "(s (e (e a) ^ (e (e b) ^ (e c))) )", - "(T)x", "(s (e ( (type T) ) (e x)) )", - "new A().b", "(s (e (e new (type A) ( )) . b) )", - "(T)t.f()", "(s (e (e ( (type T) ) (e (e t) . f)) ( )) )", - "a.f(x)==T.c", "(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )", - "a.f().g(x,1)", "(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )", - "new T[((n-1) * x) + 1]", "(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )"); + "a|b&c", "(s (e (e a) | (e (e b) & (e c))) )\n", + "(a|b)&c", "(s (e (e ( (e (e a) | (e b)) )) & (e c)) )\n", + "a > b", "(s (e (e a) > (e b)) )\n", + "a >> b", "(s (e (e a) >> (e b)) )\n", + "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )\n", + "a^b^c", "(s (e (e a) ^ (e (e b) ^ (e c))) )\n", + "(T)x", "(s (e ( (type T) ) (e x)) )\n", + "new A().b", "(s (e (e new (type A) ( )) . b) )\n", + "(T)t.f()", "(s (e (e ( (type T) ) (e (e t) . f)) ( )) )\n", + "a.f(x)==T.c", "(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )\n", + "a.f().g(x,1)", "(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )\n", + "new T[((n-1) * x) + 1]", "(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )\n"); file.addParserTests(input, "Declarations", "T", "s", - "a", "(s (declarator a) )", - "*a", "(s (declarator * (declarator a)) )", - "**a", "(s (declarator * (declarator * (declarator a))) )", - "a[3]", "(s (declarator (declarator a) [ (e 3) ]) )", - "b[]", "(s (declarator (declarator b) [ ]) )", - "(a)", "(s (declarator ( (declarator a) )) )", - "a[]()", "(s (declarator (declarator (declarator a) [ ]) ( )) )", - "a[][]", "(s (declarator (declarator (declarator a) [ ]) [ ]) )", - "*a[]", "(s (declarator * (declarator (declarator a) [ ])) )", - "(*a)[]", "(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )"); + "a", "(s (declarator a) )\n", + "*a", "(s (declarator * (declarator a)) )\n", + "**a", "(s (declarator * (declarator * (declarator a))) )\n", + "a[3]", "(s (declarator (declarator a) [ (e 3) ]) )\n", + "b[]", "(s (declarator (declarator b) [ ]) )\n", + "(a)", "(s (declarator ( (declarator a) )) )\n", + "a[]()", "(s (declarator (declarator (declarator a) [ ]) ( )) )\n", + "a[][]", "(s (declarator (declarator (declarator a) [ ]) [ ]) )\n", + "*a[]", "(s (declarator * (declarator (declarator a) [ ])) )\n", + "(*a)[]", "(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )\n"); file.addParserTests(input, "ReturnValueAndActions", "T", "s", - "4", "4", - "1+2", "3", - "1+2*3", "7", - "(1+2)*3", "9"); + "4", "4\n", + "1+2", "3\n", + "1+2*3", "7\n", + "(1+2)*3", "9\n"); file.addParserTests(input, "LabelsOnOpSubrule", "T", "s", - "4", "(s (e 4))", - "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", - "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))"); + "4", "(s (e 4))\n", + "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))\n", + "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n"); file.addParserTests(input, "ReturnValueAndActionsAndLabels", "T", "s", - "4", "4", - "1+2", "3", - "1+2*3", "7", - "i++*3", "12"); + "4", "4\n", + "1+2", "3\n", + "1+2*3", "7\n", + "i++*3", "12\n"); /** * This is a regression test for antlr/antlr4#433 "Not all context accessor * methods are generated when an alternative rule label is used for multiple @@ -809,14 +809,14 @@ public class Generator { * https://github.com/antlr/antlr4/issues/433 */ file.addParserTests(input, "MultipleAlternativesWithCommonLabel", "T", "s", - "4", "4", - "1+2", "3", - "1+2*3", "7", - "i++*3", "12"); + "4", "4\n", + "1+2", "3\n", + "1+2*3", "7\n", + "i++*3", "12\n"); file.addParserTests(input, "PrefixOpWithActionAndLabel", "T", "s", - "a", "a", - "a+b", "(a+b)", - "a=b+c", "((a=b)+c)"); + "a", "a\n", + "a+b", "(a+b)\n", + "a=b+c", "((a=b)+c)\n"); file.addParserTests(input, "AmbigLR", "Expr", "prog", "1\n", "", "a = 5\n", "", @@ -839,6 +839,69 @@ public class Generator { file.addParserTest(input, "PrecedenceFilterConsidersContext", "T", "prog", "aa", "(prog (statement (letterA a)) (statement (letterA a)) )\n", null); + /** + * This is a regression test for antlr/antlr4#625 "Duplicate action breaks + * operator precedence" + * https://github.com/antlr/antlr4/issues/625 + */ + file.addParserTests(input, "MultipleActions", "T", "s", + "4", "(s (e 4))\n", + "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))\n", + "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n"); + /** + * This is a regression test for antlr/antlr4#625 "Duplicate action breaks + * operator precedence" + * https://github.com/antlr/antlr4/issues/625 + */ + file.addParserTests(input, "MultipleActionsPredicatesOptions", "T", "s", + "4", "(s (e 4))\n", + "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))\n", + "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n"); + file.addParserTest(input, "SemPredFailOption", "T", "s", + "x y z", + "(s (a (a x) y z))\n", + "line 1:4 rule a custom message\n"); + /** + * This is a regression test for antlr/antlr4#542 "First alternative cannot + * be right-associative". + * https://github.com/antlr/antlr4/issues/542 + */ + file.addParserTests(input, "TernaryExprExplicitAssociativity", "T", "s", + "a", "(s (e a) )\n", + "a+b", "(s (e (e a) + (e b)) )\n", + "a*b", "(s (e (e a) * (e b)) )\n", + "a?b:c", "(s (e (e a) ? (e b) : (e c)) )\n", + "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )\n", + "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )\n", + "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )\n", + "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )\n", + "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )\n"); + /** + * This is a regression test for antlr/antlr4#677 "labels not working in + * grammar file". + * https://github.com/antlr/antlr4/issues/677 + * + *

This test treats {@code ,} and {@code >>} as part of a single compound + * operator (similar to a ternary operator).

+ */ + file.addParserTests(input, "ReturnValueAndActionsList1", "T", "s", + "a*b", "(s (expr (expr a) * (expr b)) )\n", + "a,c>>x", "(s (expr (expr a) , (expr c) >> (expr x)) )\n", + "x", "(s (expr x) )\n", + "a*b,c,x*y>>r", "(s (expr (expr (expr a) * (expr b)) , (expr c) , (expr (expr x) * (expr y)) >> (expr r)) )\n"); + + /** + * This is a regression test for antlr/antlr4#677 "labels not working in + * grammar file". + * https://github.com/antlr/antlr4/issues/677 + * + *

This test treats the {@code ,} and {@code >>} operators separately.

+ */ + file.addParserTests(input, "ReturnValueAndActionsList2", "T", "s", + "a*b", "(s (expr (expr a) * (expr b)) )\n", + "a,c>>x", "(s (expr (expr (expr a) , (expr c)) >> (expr x)) )\n", + "x", "(s (expr x) )\n", + "a*b,c,x*y>>r", "(s (expr (expr (expr (expr (expr a) * (expr b)) , (expr c)) , (expr (expr x) * (expr y))) >> (expr r)) )\n"); return file; } diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActions.st new file mode 100644 index 000000000..a96434467 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActions.st @@ -0,0 +1,8 @@ +grammar ; +s @after {} : e ; +e : a=e op=('*'|'/') b=e {}{} + | INT {}{} + | '(' x=e ')' {}{} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActionsPredicatesOptions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActionsPredicatesOptions.st new file mode 100644 index 000000000..91a8b0a65 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActionsPredicatesOptions.st @@ -0,0 +1,9 @@ +grammar ; +s @after {} : e ; +e : a=e op=('*'|'/') b=e {}{true}? + | a=e op=('+'|'-') b=e {}\{true}?\ + | INT {}{} + | '(' x=e ')' {}{} + ; +INT : '0'..'9'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st index e4cf19d9d..8c540c2d5 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleAlternativesWithCommonLabel.st @@ -1,13 +1,13 @@ grammar ; s : e {}; e returns [int v] - : e '*' e {$v = $ctx.e(0).v * $ctx.e(1).v;} # binary - | e '+' e {$v = $ctx.e(0).v + $ctx.e(1).v;} # binary + : e '*' e {$v = .e(0).v * .e(1).v;} # binary + | e '+' e {$v = .e(0).v + .e(1).v;} # binary | INT {$v = $INT.int;} # anInt | '(' e ')' {$v = $e.v;} # parens - | left=e INC {;$v = $left.v + 1;} # unary - | left=e DEC {;$v = $left.v - 1;} # unary - | ID {$v = 3} # anID + | left=e INC {$v = $left.v + 1;} # unary + | left=e DEC {$v = $left.v - 1;} # unary + | ID {} # anID ; ID : 'a'..'z'+ ; INT : '0'..'9'+ ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st index 73d7ea48b..d78cbe37d 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/PrefixOpWithActionAndLabel.st @@ -1,9 +1,9 @@ grammar ; s : e {} ; e returns [String result] - : ID '=' e1=e {$result = \"(\" + $ID.text + \"=\" + $e1.result + \")\";} + : ID '=' e1=e {$result = "(" + $ID.text + "=" + $e1.result + ")";} | ID {$result = $ID.text;} - | e1=e '+' e2=e {$result = \"(\" + $e1.result + \"+\" + $e2.result + \")\";} + | e1=e '+' e2=e {$result = "(" + $e1.result + "+" + $e2.result + ")";} ; ID : 'a'..'z'+ ; INT : '0'..'9'+ ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActions.st index 37aca6e6a..88ee82c58 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActions.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActions.st @@ -1,6 +1,6 @@ grammar ; s : e {}; -e returns [int v, list ignored] +e returns [int v, ignored] : a=e '*' b=e {$v = $a.v * $b.v;} | a=e '+' b=e {$v = $a.v + $b.v;} | INT {$v = $INT.int;} diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsList1.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsList1.st new file mode 100644 index 000000000..9ac58c4a7 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsList1.st @@ -0,0 +1,13 @@ +grammar ; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr (',' b+=expr)* '>>' c=expr #Send + | ID #JustId //semantic check on modifiers +; + +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; + +WS : [ \t\n]+ -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsList2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsList2.st new file mode 100644 index 000000000..eebe5a5ca --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/ReturnValueAndActionsList2.st @@ -0,0 +1,12 @@ +grammar ; +s @after {} : expr EOF; +expr: + a=expr '*' a=expr #Factor + | b+=expr ',' b+=expr #Comma + | b+=expr '>>' c=expr #Send + | ID #JustId //semantic check on modifiers + ; +ID : ('a'..'z'|'A'..'Z'|'_') + ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* +; +WS : [ \t\n]+ -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPredFailOption.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPredFailOption.st new file mode 100644 index 000000000..aac2fc7f1 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPredFailOption.st @@ -0,0 +1,7 @@ +grammar ; +s @after {} : a ; +a : a ID {false}?\ + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/TernaryExprExplicitAssociativity.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/TernaryExprExplicitAssociativity.st new file mode 100644 index 000000000..d893ed739 --- /dev/null +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/TernaryExprExplicitAssociativity.st @@ -0,0 +1,10 @@ +grammar ; +s @after {} : e EOF; // must indicate EOF can follow or 'a\' won't match +e :\ e '*' e + |\ e '+' e + |\ e '?' e ':' e + |\ e '=' e + | ID + ; +ID : 'a'..'z'+ ; +WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/WhitespaceInfluence.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/WhitespaceInfluence.st index 6551c1f7d..ba95b894d 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/WhitespaceInfluence.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/WhitespaceInfluence.st @@ -46,4 +46,4 @@ DATE : '\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT ( SQ_STRING : '\'' ('\'\'' | ~'\'')* '\''; DQ_STRING : '\"' ('\\\"' | ~'\"')* '\"'; WS : [ \t\n\r]+ -> skip ; -COMMENTS : ('/*' .*? '*/' | '//' ~'\n'* '\n' ) -> skip;\n"; +COMMENTS : ('/*' .*? '*/' | '//' ~'\n'* '\n' ) -> skip; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st index 40dcddc4f..71f8cbdc7 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ListLabelForClosureContext.st @@ -1,7 +1,7 @@ grammar ; ifStatement @after { - + } : 'if' expression ( ( 'then' diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index cb5ba186b..23c83fe1e 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -99,9 +99,15 @@ writeln(s) ::= <);>> write(s) ::= <);>> -assert(s) ::= <);>> +Assert(s) ::= <);>> -DeclareLocal(s) ::= "Object ;" +Cast(t,v) ::= "(())" + +Concat(a,b) ::= "
" + +DeclareLocal(s,v) ::= "Object = ;" + +AssignLocal(s,v) ::= " = ;" InitMember(n,v) ::= <%this. = ;%> @@ -123,6 +129,8 @@ DumpDFA() ::= "this.dumpDFA();" Pass() ::= "" +StringList() ::= "List\" + BuildParseTrees() ::= "this.buildParseTrees = true;" BailErrorStrategy() ::= <%setErrorHandler(new BailErrorStrategy());%> diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java b/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java index 7f3d38e9f..6bac42fa3 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java @@ -99,63 +99,63 @@ public class TestLeftRecursion extends BaseTest { @Test public void testTernaryExpr_1() throws Exception { String found = testTernaryExpr("a"); - assertEquals("(s (e a) )", found); + assertEquals("(s (e a) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testTernaryExpr_2() throws Exception { String found = testTernaryExpr("a+b"); - assertEquals("(s (e (e a) + (e b)) )", found); + assertEquals("(s (e (e a) + (e b)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testTernaryExpr_3() throws Exception { String found = testTernaryExpr("a*b"); - assertEquals("(s (e (e a) * (e b)) )", found); + assertEquals("(s (e (e a) * (e b)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testTernaryExpr_4() throws Exception { String found = testTernaryExpr("a?b:c"); - assertEquals("(s (e (e a) ? (e b) : (e c)) )", found); + assertEquals("(s (e (e a) ? (e b) : (e c)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testTernaryExpr_5() throws Exception { String found = testTernaryExpr("a=b=c"); - assertEquals("(s (e (e a) = (e (e b) = (e c))) )", found); + assertEquals("(s (e (e a) = (e (e b) = (e c))) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testTernaryExpr_6() throws Exception { String found = testTernaryExpr("a?b+c:d"); - assertEquals("(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", found); + assertEquals("(s (e (e a) ? (e (e b) + (e c)) : (e d)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testTernaryExpr_7() throws Exception { String found = testTernaryExpr("a?b=c:d"); - assertEquals("(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", found); + assertEquals("(s (e (e a) ? (e (e b) = (e c)) : (e d)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testTernaryExpr_8() throws Exception { String found = testTernaryExpr("a? b?c:d : e"); - assertEquals("(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", found); + assertEquals("(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testTernaryExpr_9() throws Exception { String found = testTernaryExpr("a?b: c?d:e"); - assertEquals("(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )", found); + assertEquals("(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )\n", found); assertNull(this.stderrDuringParse); } @@ -179,49 +179,49 @@ public class TestLeftRecursion extends BaseTest { @Test public void testExpressions_1() throws Exception { String found = testExpressions("a"); - assertEquals("(s (e a) )", found); + assertEquals("(s (e a) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testExpressions_2() throws Exception { String found = testExpressions("1"); - assertEquals("(s (e 1) )", found); + assertEquals("(s (e 1) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testExpressions_3() throws Exception { String found = testExpressions("a-1"); - assertEquals("(s (e (e a) - (e 1)) )", found); + assertEquals("(s (e (e a) - (e 1)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testExpressions_4() throws Exception { String found = testExpressions("a.b"); - assertEquals("(s (e (e a) . b) )", found); + assertEquals("(s (e (e a) . b) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testExpressions_5() throws Exception { String found = testExpressions("a.this"); - assertEquals("(s (e (e a) . this) )", found); + assertEquals("(s (e (e a) . this) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testExpressions_6() throws Exception { String found = testExpressions("-a"); - assertEquals("(s (e - (e a)) )", found); + assertEquals("(s (e - (e a)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testExpressions_7() throws Exception { String found = testExpressions("-a+b"); - assertEquals("(s (e (e - (e a)) + (e b)) )", found); + assertEquals("(s (e (e - (e a)) + (e b)) )\n", found); assertNull(this.stderrDuringParse); } @@ -288,84 +288,84 @@ public class TestLeftRecursion extends BaseTest { @Test public void testJavaExpressions_1() throws Exception { String found = testJavaExpressions("a|b&c"); - assertEquals("(s (e (e a) | (e (e b) & (e c))) )", found); + assertEquals("(s (e (e a) | (e (e b) & (e c))) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_2() throws Exception { String found = testJavaExpressions("(a|b)&c"); - assertEquals("(s (e (e ( (e (e a) | (e b)) )) & (e c)) )", found); + assertEquals("(s (e (e ( (e (e a) | (e b)) )) & (e c)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_3() throws Exception { String found = testJavaExpressions("a > b"); - assertEquals("(s (e (e a) > (e b)) )", found); + assertEquals("(s (e (e a) > (e b)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_4() throws Exception { String found = testJavaExpressions("a >> b"); - assertEquals("(s (e (e a) >> (e b)) )", found); + assertEquals("(s (e (e a) >> (e b)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_5() throws Exception { String found = testJavaExpressions("a=b=c"); - assertEquals("(s (e (e a) = (e (e b) = (e c))) )", found); + assertEquals("(s (e (e a) = (e (e b) = (e c))) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_6() throws Exception { String found = testJavaExpressions("a^b^c"); - assertEquals("(s (e (e a) ^ (e (e b) ^ (e c))) )", found); + assertEquals("(s (e (e a) ^ (e (e b) ^ (e c))) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_7() throws Exception { String found = testJavaExpressions("(T)x"); - assertEquals("(s (e ( (type T) ) (e x)) )", found); + assertEquals("(s (e ( (type T) ) (e x)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_8() throws Exception { String found = testJavaExpressions("new A().b"); - assertEquals("(s (e (e new (type A) ( )) . b) )", found); + assertEquals("(s (e (e new (type A) ( )) . b) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_9() throws Exception { String found = testJavaExpressions("(T)t.f()"); - assertEquals("(s (e (e ( (type T) ) (e (e t) . f)) ( )) )", found); + assertEquals("(s (e (e ( (type T) ) (e (e t) . f)) ( )) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_10() throws Exception { String found = testJavaExpressions("a.f(x)==T.c"); - assertEquals("(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )", found); + assertEquals("(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_11() throws Exception { String found = testJavaExpressions("a.f().g(x,1)"); - assertEquals("(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )", found); + assertEquals("(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_12() throws Exception { String found = testJavaExpressions("new T[((n-1) * x) + 1]"); - assertEquals("(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )", found); + assertEquals("(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )\n", found); assertNull(this.stderrDuringParse); } @@ -390,77 +390,77 @@ public class TestLeftRecursion extends BaseTest { @Test public void testDeclarations_1() throws Exception { String found = testDeclarations("a"); - assertEquals("(s (declarator a) )", found); + assertEquals("(s (declarator a) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_2() throws Exception { String found = testDeclarations("*a"); - assertEquals("(s (declarator * (declarator a)) )", found); + assertEquals("(s (declarator * (declarator a)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_3() throws Exception { String found = testDeclarations("**a"); - assertEquals("(s (declarator * (declarator * (declarator a))) )", found); + assertEquals("(s (declarator * (declarator * (declarator a))) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_4() throws Exception { String found = testDeclarations("a[3]"); - assertEquals("(s (declarator (declarator a) [ (e 3) ]) )", found); + assertEquals("(s (declarator (declarator a) [ (e 3) ]) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_5() throws Exception { String found = testDeclarations("b[]"); - assertEquals("(s (declarator (declarator b) [ ]) )", found); + assertEquals("(s (declarator (declarator b) [ ]) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_6() throws Exception { String found = testDeclarations("(a)"); - assertEquals("(s (declarator ( (declarator a) )) )", found); + assertEquals("(s (declarator ( (declarator a) )) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_7() throws Exception { String found = testDeclarations("a[]()"); - assertEquals("(s (declarator (declarator (declarator a) [ ]) ( )) )", found); + assertEquals("(s (declarator (declarator (declarator a) [ ]) ( )) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_8() throws Exception { String found = testDeclarations("a[][]"); - assertEquals("(s (declarator (declarator (declarator a) [ ]) [ ]) )", found); + assertEquals("(s (declarator (declarator (declarator a) [ ]) [ ]) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_9() throws Exception { String found = testDeclarations("*a[]"); - assertEquals("(s (declarator * (declarator (declarator a) [ ])) )", found); + assertEquals("(s (declarator * (declarator (declarator a) [ ])) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testDeclarations_10() throws Exception { String found = testDeclarations("(*a)[]"); - assertEquals("(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )", found); + assertEquals("(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )\n", found); assertNull(this.stderrDuringParse); } String testReturnValueAndActions(String input) throws Exception { String grammar = "grammar T;\n" + "s : e {System.out.println($e.v);}; \n" + - "e returns [int v, list ignored]\n" + + "e returns [int v, List ignored]\n" + " : a=e '*' b=e {$v = $a.v * $b.v;}\n" + " | a=e '+' b=e {$v = $a.v + $b.v;}\n" + " | INT {$v = $INT.int;}\n" + @@ -474,28 +474,28 @@ public class TestLeftRecursion extends BaseTest { @Test public void testReturnValueAndActions_1() throws Exception { String found = testReturnValueAndActions("4"); - assertEquals("4", found); + assertEquals("4\n", found); assertNull(this.stderrDuringParse); } @Test public void testReturnValueAndActions_2() throws Exception { String found = testReturnValueAndActions("1+2"); - assertEquals("3", found); + assertEquals("3\n", found); assertNull(this.stderrDuringParse); } @Test public void testReturnValueAndActions_3() throws Exception { String found = testReturnValueAndActions("1+2*3"); - assertEquals("7", found); + assertEquals("7\n", found); assertNull(this.stderrDuringParse); } @Test public void testReturnValueAndActions_4() throws Exception { String found = testReturnValueAndActions("(1+2)*3"); - assertEquals("9", found); + assertEquals("9\n", found); assertNull(this.stderrDuringParse); } @@ -514,21 +514,21 @@ public class TestLeftRecursion extends BaseTest { @Test public void testLabelsOnOpSubrule_1() throws Exception { String found = testLabelsOnOpSubrule("4"); - assertEquals("(s (e 4))", found); + assertEquals("(s (e 4))\n", found); assertNull(this.stderrDuringParse); } @Test public void testLabelsOnOpSubrule_2() throws Exception { String found = testLabelsOnOpSubrule("1*2/3"); - assertEquals("(s (e (e (e 1) * (e 2)) / (e 3)))", found); + assertEquals("(s (e (e (e 1) * (e 2)) / (e 3)))\n", found); assertNull(this.stderrDuringParse); } @Test public void testLabelsOnOpSubrule_3() throws Exception { String found = testLabelsOnOpSubrule("(1/2)*3"); - assertEquals("(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", found); + assertEquals("(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n", found); assertNull(this.stderrDuringParse); } @@ -553,28 +553,28 @@ public class TestLeftRecursion extends BaseTest { @Test public void testReturnValueAndActionsAndLabels_1() throws Exception { String found = testReturnValueAndActionsAndLabels("4"); - assertEquals("4", found); + assertEquals("4\n", found); assertNull(this.stderrDuringParse); } @Test public void testReturnValueAndActionsAndLabels_2() throws Exception { String found = testReturnValueAndActionsAndLabels("1+2"); - assertEquals("3", found); + assertEquals("3\n", found); assertNull(this.stderrDuringParse); } @Test public void testReturnValueAndActionsAndLabels_3() throws Exception { String found = testReturnValueAndActionsAndLabels("1+2*3"); - assertEquals("7", found); + assertEquals("7\n", found); assertNull(this.stderrDuringParse); } @Test public void testReturnValueAndActionsAndLabels_4() throws Exception { String found = testReturnValueAndActionsAndLabels("i++*3"); - assertEquals("12", found); + assertEquals("12\n", found); assertNull(this.stderrDuringParse); } @@ -582,13 +582,13 @@ public class TestLeftRecursion extends BaseTest { String grammar = "grammar T;\n" + "s : e {System.out.println($e.v);}; \n" + "e returns [int v]\n" + - " : e '*' e {$v = $ctx.e(0).v * $ctx.e(1).v;} # binary\n" + - " | e '+' e {$v = $ctx.e(0).v + $ctx.e(1).v;} # binary\n" + + " : e '*' e {$v = ((BinaryContext)$ctx).e(0).v * ((BinaryContext)$ctx).e(1).v;} # binary\n" + + " | e '+' e {$v = ((BinaryContext)$ctx).e(0).v + ((BinaryContext)$ctx).e(1).v;} # binary\n" + " | INT {$v = $INT.int;} # anInt\n" + " | '(' e ')' {$v = $e.v;} # parens\n" + - " | left=e INC {console.assert($ctx.INC() !== null);;$v = $left.v + 1;} # unary\n" + - " | left=e DEC {console.assert($ctx.DEC() !== null);;$v = $left.v - 1;} # unary\n" + - " | ID {$v = 3} # anID\n" + + " | left=e INC {assert(((UnaryContext)$ctx).INC() != null);$v = $left.v + 1;} # unary\n" + + " | left=e DEC {assert(((UnaryContext)$ctx).DEC() != null);$v = $left.v - 1;} # unary\n" + + " | ID {$v = 3;} # anID\n" + " ; \n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+ ;\n" + @@ -601,28 +601,28 @@ public class TestLeftRecursion extends BaseTest { @Test public void testMultipleAlternativesWithCommonLabel_1() throws Exception { String found = testMultipleAlternativesWithCommonLabel("4"); - assertEquals("4", found); + assertEquals("4\n", found); assertNull(this.stderrDuringParse); } @Test public void testMultipleAlternativesWithCommonLabel_2() throws Exception { String found = testMultipleAlternativesWithCommonLabel("1+2"); - assertEquals("3", found); + assertEquals("3\n", found); assertNull(this.stderrDuringParse); } @Test public void testMultipleAlternativesWithCommonLabel_3() throws Exception { String found = testMultipleAlternativesWithCommonLabel("1+2*3"); - assertEquals("7", found); + assertEquals("7\n", found); assertNull(this.stderrDuringParse); } @Test public void testMultipleAlternativesWithCommonLabel_4() throws Exception { String found = testMultipleAlternativesWithCommonLabel("i++*3"); - assertEquals("12", found); + assertEquals("12\n", found); assertNull(this.stderrDuringParse); } @@ -630,9 +630,9 @@ public class TestLeftRecursion extends BaseTest { String grammar = "grammar T;\n" + "s : e {System.out.println($e.result);} ;\n" + "e returns [String result]\n" + - " : ID '=' e1=e {$result = \\\"(\\\" + $ID.text + \\\"=\\\" + $e1.result + \\\")\\\";}\n" + + " : ID '=' e1=e {$result = \"(\" + $ID.text + \"=\" + $e1.result + \")\";}\n" + " | ID {$result = $ID.text;}\n" + - " | e1=e '+' e2=e {$result = \\\"(\\\" + $e1.result + \\\"+\\\" + $e2.result + \\\")\\\";}\n" + + " | e1=e '+' e2=e {$result = \"(\" + $e1.result + \"+\" + $e2.result + \")\";}\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+ ;\n" + @@ -643,21 +643,21 @@ public class TestLeftRecursion extends BaseTest { @Test public void testPrefixOpWithActionAndLabel_1() throws Exception { String found = testPrefixOpWithActionAndLabel("a"); - assertEquals("a", found); + assertEquals("a\n", found); assertNull(this.stderrDuringParse); } @Test public void testPrefixOpWithActionAndLabel_2() throws Exception { String found = testPrefixOpWithActionAndLabel("a+b"); - assertEquals("(a+b)", found); + assertEquals("(a+b)\n", found); assertNull(this.stderrDuringParse); } @Test public void testPrefixOpWithActionAndLabel_3() throws Exception { String found = testPrefixOpWithActionAndLabel("a=b+c"); - assertEquals("((a=b)+c)", found); + assertEquals("((a=b)+c)\n", found); assertNull(this.stderrDuringParse); } @@ -770,7 +770,7 @@ public class TestLeftRecursion extends BaseTest { "SQ_STRING : '\\'' ('\\'\\'' | ~'\\'')* '\\'';\n" + "DQ_STRING : '\\\"' ('\\\\\"' | ~'\\\"')* '\\\"';\n" + "WS : [ \\t\\n\\r]+ -> skip ;\n" + - "COMMENTS : ('/*' .*? '*/' | '//' ~'\\n'* '\\n' ) -> skip;\\n\";"; + "COMMENTS : ('/*' .*? '*/' | '//' ~'\\n'* '\\n' ) -> skip;"; return execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", input, false); } @@ -801,5 +801,252 @@ public class TestLeftRecursion extends BaseTest { assertNull(this.stderrDuringParse); } + String testMultipleActions(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + + "e : a=e op=('*'|'/') b=e {}{}\n" + + " | INT {}{}\n" + + " | '(' x=e ')' {}{}\n" + + " ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testMultipleActions_1() throws Exception { + String found = testMultipleActions("4"); + assertEquals("(s (e 4))\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testMultipleActions_2() throws Exception { + String found = testMultipleActions("1*2/3"); + assertEquals("(s (e (e (e 1) * (e 2)) / (e 3)))\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testMultipleActions_3() throws Exception { + String found = testMultipleActions("(1/2)*3"); + assertEquals("(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n", found); + assertNull(this.stderrDuringParse); + } + + String testMultipleActionsPredicatesOptions(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + + "e : a=e op=('*'|'/') b=e {}{true}?\n" + + " | a=e op=('+'|'-') b=e {}{true}?\n" + + " | INT {}{}\n" + + " | '(' x=e ')' {}{}\n" + + " ;\n" + + "INT : '0'..'9'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testMultipleActionsPredicatesOptions_1() throws Exception { + String found = testMultipleActionsPredicatesOptions("4"); + assertEquals("(s (e 4))\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testMultipleActionsPredicatesOptions_2() throws Exception { + String found = testMultipleActionsPredicatesOptions("1*2/3"); + assertEquals("(s (e (e (e 1) * (e 2)) / (e 3)))\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testMultipleActionsPredicatesOptions_3() throws Exception { + String found = testMultipleActionsPredicatesOptions("(1/2)*3"); + assertEquals("(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testSemPredFailOption() throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + + "a : a ID {false}?\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x y z", false); + assertEquals("(s (a (a x) y z))\n", found); + assertEquals("line 1:4 rule a custom message\n", this.stderrDuringParse); + } + + String testTernaryExprExplicitAssociativity(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : e EOF; // must indicate EOF can follow or 'a' won't match\n" + + "e : e '*' e\n" + + " | e '+' e\n" + + " | e '?' e ':' e\n" + + " | e '=' e\n" + + " | ID\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n" + + "WS : (' '|'\\n') -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testTernaryExprExplicitAssociativity_1() throws Exception { + String found = testTernaryExprExplicitAssociativity("a"); + assertEquals("(s (e a) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExprExplicitAssociativity_2() throws Exception { + String found = testTernaryExprExplicitAssociativity("a+b"); + assertEquals("(s (e (e a) + (e b)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExprExplicitAssociativity_3() throws Exception { + String found = testTernaryExprExplicitAssociativity("a*b"); + assertEquals("(s (e (e a) * (e b)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExprExplicitAssociativity_4() throws Exception { + String found = testTernaryExprExplicitAssociativity("a?b:c"); + assertEquals("(s (e (e a) ? (e b) : (e c)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExprExplicitAssociativity_5() throws Exception { + String found = testTernaryExprExplicitAssociativity("a=b=c"); + assertEquals("(s (e (e a) = (e (e b) = (e c))) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExprExplicitAssociativity_6() throws Exception { + String found = testTernaryExprExplicitAssociativity("a?b+c:d"); + assertEquals("(s (e (e a) ? (e (e b) + (e c)) : (e d)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExprExplicitAssociativity_7() throws Exception { + String found = testTernaryExprExplicitAssociativity("a?b=c:d"); + assertEquals("(s (e (e a) ? (e (e b) = (e c)) : (e d)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExprExplicitAssociativity_8() throws Exception { + String found = testTernaryExprExplicitAssociativity("a? b?c:d : e"); + assertEquals("(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testTernaryExprExplicitAssociativity_9() throws Exception { + String found = testTernaryExprExplicitAssociativity("a?b: c?d:e"); + assertEquals("(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )\n", found); + assertNull(this.stderrDuringParse); + } + + String testReturnValueAndActionsList1(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" + + "expr:\n" + + " a=expr '*' a=expr #Factor\n" + + " | b+=expr (',' b+=expr)* '>>' c=expr #Send\n" + + " | ID #JustId //semantic check on modifiers\n" + + ";\n" + + "\n" + + "ID : ('a'..'z'|'A'..'Z'|'_')\n" + + " ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" + + ";\n" + + "\n" + + "WS : [ \\t\\n]+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testReturnValueAndActionsList1_1() throws Exception { + String found = testReturnValueAndActionsList1("a*b"); + assertEquals("(s (expr (expr a) * (expr b)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsList1_2() throws Exception { + String found = testReturnValueAndActionsList1("a,c>>x"); + assertEquals("(s (expr (expr a) , (expr c) >> (expr x)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsList1_3() throws Exception { + String found = testReturnValueAndActionsList1("x"); + assertEquals("(s (expr x) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsList1_4() throws Exception { + String found = testReturnValueAndActionsList1("a*b,c,x*y>>r"); + assertEquals("(s (expr (expr (expr a) * (expr b)) , (expr c) , (expr (expr x) * (expr y)) >> (expr r)) )\n", found); + assertNull(this.stderrDuringParse); + } + + String testReturnValueAndActionsList2(String input) throws Exception { + String grammar = "grammar T;\n" + + "s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" + + "expr:\n" + + " a=expr '*' a=expr #Factor\n" + + " | b+=expr ',' b+=expr #Comma\n" + + " | b+=expr '>>' c=expr #Send\n" + + " | ID #JustId //semantic check on modifiers\n" + + " ;\n" + + "ID : ('a'..'z'|'A'..'Z'|'_')\n" + + " ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" + + ";\n" + + "WS : [ \\t\\n]+ -> skip ;"; + return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false); + } + + @Test + public void testReturnValueAndActionsList2_1() throws Exception { + String found = testReturnValueAndActionsList2("a*b"); + assertEquals("(s (expr (expr a) * (expr b)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsList2_2() throws Exception { + String found = testReturnValueAndActionsList2("a,c>>x"); + assertEquals("(s (expr (expr (expr a) , (expr c)) >> (expr x)) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsList2_3() throws Exception { + String found = testReturnValueAndActionsList2("x"); + assertEquals("(s (expr x) )\n", found); + assertNull(this.stderrDuringParse); + } + + @Test + public void testReturnValueAndActionsList2_4() throws Exception { + String found = testReturnValueAndActionsList2("a*b,c,x*y>>r"); + assertEquals("(s (expr (expr (expr (expr (expr a) * (expr b)) , (expr c)) , (expr (expr x) * (expr y))) >> (expr r)) )\n", found); + assertNull(this.stderrDuringParse); + } + } \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/tool/TestLeftRecursion.java b/tool/test/org/antlr/v4/test/tool/TestLeftRecursion.java index a14c0f8a3..f13b99bea 100644 --- a/tool/test/org/antlr/v4/test/tool/TestLeftRecursion.java +++ b/tool/test/org/antlr/v4/test/tool/TestLeftRecursion.java @@ -33,520 +33,10 @@ package org.antlr.v4.test.tool; import org.antlr.v4.tool.ErrorType; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - /** */ public class TestLeftRecursion extends BaseTest { protected boolean debug = false; - @Test public void testSimple() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + - "a : a ID\n" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x", debug); - String expecting = "(s (a x))\n"; - assertEquals(expecting, found); - - found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x y", debug); - expecting = "(s (a (a x) y))\n"; - assertEquals(expecting, found); - - found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x y z", debug); - expecting = "(s (a (a (a x) y) z))\n"; - assertEquals(expecting, found); - } - - /** - * This is a regression test for "Support direct calls to left-recursive - * rules". - * https://github.com/antlr/antlr4/issues/161 - */ - @Test public void testDirectCallToLeftRecursiveRule() throws Exception { - String grammar = - "grammar T;\n" + - "a @after {System.out.println($ctx.toStringTree(this));} : a ID\n" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - String expecting = "(a x)\n"; - assertEquals(expecting, found); - - found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x y", debug); - expecting = "(a (a x) y)\n"; - assertEquals(expecting, found); - - found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x y z", debug); - expecting = "(a (a (a x) y) z)\n"; - assertEquals(expecting, found); - } - - @Test public void testSemPred() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + - "a : a {true}? ID\n" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x y z", debug); - String expecting = "(s (a (a (a x) y) z))\n"; - assertEquals(expecting, found); - } - - @Test - public void testSemPredFailOption() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + - "a : a ID {false}?\n" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x y z", debug); - String expecting = "(s (a (a x) y z))\n"; - assertEquals(expecting, found); - assertEquals("line 1:4 rule a custom message\n", stderrDuringParse); - } - - @Test public void testTernaryExpr() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow or 'a' won't match - "e : e '*' e" + - " | e '+' e" + - " | e '?' e ':' e" + - " | e '=' e" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "(s (e a) )", - "a+b", "(s (e (e a) + (e b)) )", - "a*b", "(s (e (e a) * (e b)) )", - "a?b:c", "(s (e (e a) ? (e b) : (e c)) )", - "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", - "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", - "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", - "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", - "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#542 "First alternative cannot - * be right-associative". - * https://github.com/antlr/antlr4/issues/542 - */ - @Test public void testTernaryExprExplicitAssociativity() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow or 'a' won't match - "e : e '*' e" + - " | e '+' e" + - " | e '?' e ':' e" + - " | e '=' e" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "(s (e a) )", - "a+b", "(s (e (e a) + (e b)) )", - "a*b", "(s (e (e a) * (e b)) )", - "a?b:c", "(s (e (e a) ? (e b) : (e c)) )", - "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", - "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", - "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", - "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", - "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testExpressions() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow - "e : e '.' ID\n" + - " | e '.' 'this'\n" + - " | '-' e\n" + - " | e '*' e\n" + - " | e ('+'|'-') e\n" + - " | INT\n" + - " | ID\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "(s (e a) )", - "1", "(s (e 1) )", - "a-1", "(s (e (e a) - (e 1)) )", - "a.b", "(s (e (e a) . b) )", - "a.this", "(s (e (e a) . this) )", - "-a", "(s (e - (e a)) )", - "-a+b", "(s (e (e - (e a)) + (e b)) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testJavaExpressions() throws Exception { - // Generates about 7k in bytecodes for generated e_ rule; - // Well within the 64k method limit. e_primary compiles - // to about 2k in bytecodes. - // this is simplified from real java - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow - "expressionList\n" + - " : e (',' e)*\n" + - " ;\n" + - "e : '(' e ')'\n" + - " | 'this' \n" + - " | 'super'\n" + - " | INT\n" + - " | ID\n" + - " | type '.' 'class'\n" + - " | e '.' ID\n" + - " | e '.' 'this'\n" + - " | e '.' 'super' '(' expressionList? ')'\n" + - " | e '.' 'new' ID '(' expressionList? ')'\n" + - " | 'new' type ( '(' expressionList? ')' | ('[' e ']')+)\n" + - " | e '[' e ']'\n" + - " | '(' type ')' e\n" + - " | e ('++' | '--')\n" + - " | e '(' expressionList? ')'\n" + - " | ('+'|'-'|'++'|'--') e\n" + - " | ('~'|'!') e\n" + - " | e ('*'|'/'|'%') e\n" + - " | e ('+'|'-') e\n" + - " | e ('<<' | '>>>' | '>>') e\n" + - " | e ('<=' | '>=' | '>' | '<') e\n" + - " | e 'instanceof' e\n" + - " | e ('==' | '!=') e\n" + - " | e '&' e\n" + - " | e '^' e\n" + - " | e '|' e\n" + - " | e '&&' e\n" + - " | e '||' e\n" + - " | e '?' e ':' e\n" + - " |" + - " e ('='\n" + - " |'+='\n" + - " |'-='\n" + - " |'*='\n" + - " |'/='\n" + - " |'&='\n" + - " |'|='\n" + - " |'^='\n" + - " |'>>='\n" + - " |'>>>='\n" + - " |'<<='\n" + - " |'%=') e\n" + - " ;\n" + - "type: ID \n" + - " | ID '[' ']'\n" + - " | 'int'\n" + - " | 'int' '[' ']' \n" + - " ;\n" + - "ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a|b&c", "(s (e (e a) | (e (e b) & (e c))) )", - "(a|b)&c", "(s (e (e ( (e (e a) | (e b)) )) & (e c)) )", - "a > b", "(s (e (e a) > (e b)) )", - "a >> b", "(s (e (e a) >> (e b)) )", - "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", - "a^b^c", "(s (e (e a) ^ (e (e b) ^ (e c))) )", - "(T)x", "(s (e ( (type T) ) (e x)) )", - "new A().b", "(s (e (e new (type A) ( )) . b) )", - "(T)t.f()", "(s (e (e ( (type T) ) (e (e t) . f)) ( )) )", - "a.f(x)==T.c", "(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )", - "a.f().g(x,1)", "(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )", - "new T[((n-1) * x) + 1]", "(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testDeclarations() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : declarator EOF ;\n" + // must indicate EOF can follow - "declarator\n" + - " : declarator '[' e ']'\n" + - " | declarator '[' ']'\n" + - " | declarator '(' ')'\n" + - " | '*' declarator\n" + // binds less tight than suffixes - " | '(' declarator ')'\n" + - " | ID\n" + - " ;\n" + - "e : INT ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "(s (declarator a) )", - "*a", "(s (declarator * (declarator a)) )", - "**a", "(s (declarator * (declarator * (declarator a))) )", - "a[3]", "(s (declarator (declarator a) [ (e 3) ]) )", - "b[]", "(s (declarator (declarator b) [ ]) )", - "(a)", "(s (declarator ( (declarator a) )) )", - "a[]()", "(s (declarator (declarator (declarator a) [ ]) ( )) )", - "a[][]", "(s (declarator (declarator (declarator a) [ ]) [ ]) )", - "*a[]", "(s (declarator * (declarator (declarator a) [ ])) )", - "(*a)[]", "(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testReturnValueAndActions() throws Exception { - String grammar = - "grammar T;\n" + - "s : e {System.out.println($e.v);} ;\n" + - "e returns [int v, List ignored]\n" + - " : a=e '*' b=e {$v = $a.v * $b.v;}\n" + - " | a=e '+' b=e {$v = $a.v + $b.v;}\n" + - " | INT {$v = $INT.int;}\n" + - " | '(' x=e ')' {$v = $x.v;}\n" + - " ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "4", - "1+2", "3", - "1+2*3", "7", - "(1+2)*3", "9", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#677 "labels not working in - * grammar file". - * https://github.com/antlr/antlr4/issues/677 - * - *

This test treats {@code ,} and {@code >>} as part of a single compound - * operator (similar to a ternary operator).

- */ - @Test public void testReturnValueAndActionsList1() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" + - "expr:\n" + - " a=expr '*' a=expr #Factor\n" + - " | b+=expr (',' b+=expr)* '>>' c=expr #Send\n" + - " | ID #JustId //semantic check on modifiers\n" + - ";\n" + - "\n" + - "ID : ('a'..'z'|'A'..'Z'|'_')\n" + - " ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" + - ";\n" + - "\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String[] tests = { - "a*b", "(s (expr (expr a) * (expr b)) )", - "a,c>>x", "(s (expr (expr a) , (expr c) >> (expr x)) )", - "x", "(s (expr x) )", - "a*b,c,x*y>>r", "(s (expr (expr (expr a) * (expr b)) , (expr c) , (expr (expr x) * (expr y)) >> (expr r)) )", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#677 "labels not working in - * grammar file". - * https://github.com/antlr/antlr4/issues/677 - * - *

This test treats the {@code ,} and {@code >>} operators separately.

- */ - @Test public void testReturnValueAndActionsList2() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" + - "expr:\n" + - " a=expr '*' a=expr #Factor\n" + - " | b+=expr ',' b+=expr #Comma\n" + - " | b+=expr '>>' c=expr #Send\n" + - " | ID #JustId //semantic check on modifiers\n" + - ";\n" + - "\n" + - "ID : ('a'..'z'|'A'..'Z'|'_')\n" + - " ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" + - ";\n" + - "\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String[] tests = { - "a*b", "(s (expr (expr a) * (expr b)) )", - "a,c>>x", "(s (expr (expr (expr a) , (expr c)) >> (expr x)) )", - "x", "(s (expr x) )", - "a*b,c,x*y>>r", "(s (expr (expr (expr (expr (expr a) * (expr b)) , (expr c)) , (expr (expr x) * (expr y))) >> (expr r)) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testLabelsOnOpSubrule() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + - "e : a=e op=('*'|'/') b=e {}\n" + - " | INT {}\n" + - " | '(' x=e ')' {}\n" + - " ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "(s (e 4))", - "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", - "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testReturnValueAndActionsAndLabels() throws Exception { - String grammar = - "grammar T;\n" + - "s : q=e {System.out.println($e.v);} ;\n" + - "\n" + - "e returns [int v]\n" + - " : a=e op='*' b=e {$v = $a.v * $b.v;} # mult\n" + - " | a=e '+' b=e {$v = $a.v + $b.v;} # add\n" + - " | INT {$v = $INT.int;} # anInt\n" + - " | '(' x=e ')' {$v = $x.v;} # parens\n" + - " | x=e '++' {$v = $x.v+1;} # inc\n" + - " | e '--' # dec\n" + - " | ID {$v = 3;} # anID\n" + - " ; \n" + - "\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "4", - "1+2", "3", - "1+2*3", "7", - "i++*3", "12", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#433 "Not all context accessor - * methods are generated when an alternative rule label is used for multiple - * alternatives". - * https://github.com/antlr/antlr4/issues/433 - */ - @Test public void testMultipleAlternativesWithCommonLabel() throws Exception { - String grammar = - "grammar T;\n" + - "s : e {System.out.println($e.v);} ;\n" + - "\n" + - "e returns [int v]\n" + - " : e '*' e {$v = ((BinaryContext)$ctx).e(0).v * ((BinaryContext)$ctx).e(1).v;} # binary\n" + - " | e '+' e {$v = ((BinaryContext)$ctx).e(0).v + ((BinaryContext)$ctx).e(1).v;} # binary\n" + - " | INT {$v = $INT.int;} # anInt\n" + - " | '(' e ')' {$v = $e.v;} # parens\n" + - " | left=e INC {assert(((UnaryContext)$ctx).INC() != null); $v = $left.v + 1;} # unary\n" + - " | left=e DEC {assert(((UnaryContext)$ctx).DEC() != null); $v = $left.v - 1;} # unary\n" + - " | ID {$v = 3;} # anID\n" + - " ; \n" + - "\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "INC : '++' ;\n" + - "DEC : '--' ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "4", - "1+2", "3", - "1+2*3", "7", - "i++*3", "12", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testPrefixOpWithActionAndLabel() throws Exception { - String grammar = - "grammar T;\n" + - "s : e {System.out.println($e.result);} ;\n" + - "\n" + - "e returns [String result]\n" + - " : ID '=' e1=e { $result = \"(\" + $ID.getText() + \"=\" + $e1.result + \")\"; }\n" + - " | ID { $result = $ID.getText(); }\n" + - " | e1=e '+' e2=e { $result = \"(\" + $e1.result + \"+\" + $e2.result + \")\"; }\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "a", - "a+b", "(a+b)", - "a=b+c", "((a=b)+c)", - }; - runTests(grammar, tests, "s"); - } - - @Test - public void testAmbigLR() throws Exception { - String grammar = - "grammar Expr;\n" + - "prog: stat ;\n" + - "stat: expr NEWLINE # printExpr\n" + - " | ID '=' expr NEWLINE # assign\n" + - " | NEWLINE # blank\n" + - " ;\n" + - "expr: expr ('*'|'/') expr # MulDiv\n" + - " | expr ('+'|'-') expr # AddSub\n" + - " | INT # int\n" + - " | ID # id\n" + - " | '(' expr ')' # parens\n" + - " ;\n" + - "\n" + - "MUL : '*' ; // assigns token name to '*' used above in grammar\n" + - "DIV : '/' ;\n" + - "ADD : '+' ;\n" + - "SUB : '-' ;\n" + - "ID : [a-zA-Z]+ ; // match identifiers\n" + - "INT : [0-9]+ ; // match integers\n" + - "NEWLINE:'\\r'? '\\n' ; // return newlines to parser (is end-statement signal)\n" + - "WS : [ \\t]+ -> skip ; // toss out whitespace\n"; - String result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "1\n", true); - assertNull(stderrDuringParse); - - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "a = 5\n", true); - assertNull(stderrDuringParse); - - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "b = 6\n", true); - assertNull(stderrDuringParse); - - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "a+b*2\n", true); - assertNull(stderrDuringParse); - - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "(1+2)*3\n", true); - assertNull(stderrDuringParse); - } - @Test public void testCheckForNonLeftRecursiveRule() throws Exception { String grammar = "grammar T;\n" + @@ -574,159 +64,4 @@ public class TestLeftRecursion extends BaseTest { testErrors(new String[] { grammar, expected }, false); } - /** - * This is a regression test for #239 "recoursive parser using implicit - * tokens ignore white space lexer rule". - * https://github.com/antlr/antlr4/issues/239 - */ - @Test public void testWhitespaceInfluence() { - String grammar = - "grammar Expr;\n" + - "prog : expression EOF;\n" + - "expression\n" + - " : ID '(' expression (',' expression)* ')' # doFunction\n" + - " | '(' expression ')' # doParenthesis\n" + - " | '!' expression # doNot\n" + - " | '-' expression # doNegate\n" + - " | '+' expression # doPositiv\n" + - " | expression '^' expression # doPower\n" + - " | expression '*' expression # doMultipy\n" + - " | expression '/' expression # doDivide\n" + - " | expression '%' expression # doModulo\n" + - " | expression '-' expression # doMinus\n" + - " | expression '+' expression # doPlus\n" + - " | expression '=' expression # doEqual\n" + - " | expression '!=' expression # doNotEqual\n" + - " | expression '>' expression # doGreather\n" + - " | expression '>=' expression # doGreatherEqual\n" + - " | expression '<' expression # doLesser\n" + - " | expression '<=' expression # doLesserEqual\n" + - " | expression K_IN '(' expression (',' expression)* ')' # doIn\n" + - " | expression ( '&' | K_AND) expression # doAnd\n" + - " | expression ( '|' | K_OR) expression # doOr\n" + - " | '[' expression (',' expression)* ']' # newArray\n" + - " | K_TRUE # newTrueBoolean\n" + - " | K_FALSE # newFalseBoolean\n" + - " | NUMBER # newNumber\n" + - " | DATE # newDateTime\n" + - " | ID # newIdentifier\n" + - " | SQ_STRING # newString\n" + - " | K_NULL # newNull\n" + - " ;\n" + - "\n" + - "// Fragments\n" + - "fragment DIGIT : '0' .. '9'; \n" + - "fragment UPPER : 'A' .. 'Z';\n" + - "fragment LOWER : 'a' .. 'z';\n" + - "fragment LETTER : LOWER | UPPER;\n" + - "fragment WORD : LETTER | '_' | '$' | '#' | '.';\n" + - "fragment ALPHANUM : WORD | DIGIT; \n" + - "\n" + - "// Tokens\n" + - "ID : LETTER ALPHANUM*;\n" + - "NUMBER : DIGIT+ ('.' DIGIT+)? (('e'|'E')('+'|'-')? DIGIT+)?;\n" + - "DATE : '\\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT (' ' DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT ('.' DIGIT+)?)? '\\'';\n" + - "SQ_STRING : '\\'' ('\\'\\'' | ~'\\'')* '\\'';\n" + - "DQ_STRING : '\"' ('\\\\\"' | ~'\"')* '\"';\n" + - "WS : [ \\t\\n\\r]+ -> skip ;\n" + - "COMMENTS : ('/*' .*? '*/' | '//' ~'\\n'* '\\n' ) -> skip;\n"; - - String expected = - ""; - String result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "Test(1,3)", false); - assertEquals(expected, result); - assertNull(stderrDuringParse); - - expected = - ""; - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "Test(1, 3)", false); - assertEquals(expected, result); - assertNull(stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#509 "Incorrect rule chosen in - * unambiguous grammar". - * https://github.com/antlr/antlr4/issues/509 - */ - @Test public void testPrecedenceFilterConsidersContext() throws Exception { - String grammar = - "grammar T;\n" + - "prog\n" + - "@after {System.out.println($ctx.toStringTree(this));}\n" + - ": statement* EOF {};\n" + - "statement: letterA | statement letterA 'b' ;\n" + - "letterA: 'a';\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", - "aa", false); - assertEquals("(prog (statement (letterA a)) (statement (letterA a)) )\n", found); - } - - /** - * This is a regression test for antlr/antlr4#625 "Duplicate action breaks - * operator precedence" - * https://github.com/antlr/antlr4/issues/625 - */ - @Test public void testMultipleActions() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + - "e : a=e op=('*'|'/') b=e {}{}\n" + - " | INT {}{}\n" + - " | '(' x=e ')' {}{}\n" + - " ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "(s (e 4))", - "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", - "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#625 "Duplicate action breaks - * operator precedence" - * https://github.com/antlr/antlr4/issues/625 - */ - @Test public void testMultipleActionsPredicatesOptions() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + - "e : a=e op=('*'|'/') b=e {}{true}?\n" + - " | a=e op=('+'|'-') b=e {}{true}?\n" + - " | INT {}{}\n" + - " | '(' x=e ')' {}{}\n" + - " ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "(s (e 4))", - "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", - "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", - }; - runTests(grammar, tests, "s"); - } - - public void runTests(String grammar, String[] tests, String startRule) { - boolean success = rawGenerateAndBuildRecognizer("T.g4", grammar, "TParser", "TLexer"); - assertTrue(success); - writeRecognizerAndCompile("TParser", - "TLexer", - startRule, - debug, - false); - - for (int i=0; i "+found); - assertEquals(expecting, found); - } - } - } From 5ee6990f8cd79bde93ce2ca23dc9374df45601b8 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 02:39:39 +0800 Subject: [PATCH 19/26] validated Listeners tests --- .../test/rt/gen/grammars/Listeners/Basic.st | 2 +- .../v4/test/rt/gen/grammars/Listeners/LR.st | 2 +- .../rt/gen/grammars/Listeners/LRWithLabels.st | 2 +- .../rt/gen/grammars/Listeners/RuleGetters.st | 2 +- .../rt/gen/grammars/Listeners/TokenGetters.st | 2 +- .../org/antlr/v4/test/rt/java/Java.test.stg | 117 ++++----- .../antlr/v4/test/rt/java/TestListeners.java | 138 ++++------- .../org/antlr/v4/test/tool/TestListeners.java | 226 ------------------ 8 files changed, 99 insertions(+), 392 deletions(-) delete mode 100644 tool/test/org/antlr/v4/test/tool/TestListeners.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/Basic.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/Basic.st index 9446ce77e..589fbd7f7 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/Basic.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/Basic.st @@ -10,7 +10,7 @@ grammar ; s @after { - + } : r=a ; a : INT INT diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LR.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LR.st index 0bf8e3148..03bec4568 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LR.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LR.st @@ -10,7 +10,7 @@ grammar ; s @after { - + } : r=e ; e : e op='*' e diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LRWithLabels.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LRWithLabels.st index c548f899c..635088bd2 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LRWithLabels.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/LRWithLabels.st @@ -10,7 +10,7 @@ grammar ; s @after { - + } : r=e ; e : e '(' eList ')' # Call diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/RuleGetters.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/RuleGetters.st index 663a9e1d5..3eff8165d 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/RuleGetters.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/RuleGetters.st @@ -10,7 +10,7 @@ grammar ; s @after { - + } : r=a ; a : b b // forces list diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/TokenGetters.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/TokenGetters.st index fd653073a..3d89d2b2e 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/TokenGetters.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/Listeners/TokenGetters.st @@ -10,7 +10,7 @@ grammar ; s @after { - + } : r=a ; a : INT INT diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index 23c83fe1e..ba38248cc 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -155,7 +155,7 @@ LANotEquals(i, v) ::= <%this._input.LA()!=%> TokenStartColumnEquals(i) ::= <%this._tokenStartCharPositionInLine==%> -ImportListener(X) ::= <Listener = require('./Listener').Listener;>> +ImportListener(X) ::= "" GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames)" @@ -248,93 +248,64 @@ protected static class PositionAdjustingLexerATNSimulator extends LexerATNSimula >> BasicListener(X) ::= << -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - +public static class LeafListener extends TBaseListener { + public void visitTerminal(TerminalNode node) { + System.out.println(node.getSymbol().getText()); + } +} >> -walkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); - +WalkListener(s) ::= << +ParseTreeWalker walker = new ParseTreeWalker(); +walker.walk(new LeafListener(), ); >> TokenGetterListener(X) ::= << -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype);\n" + -this.LeafListener.prototype.constructor = this.LeafListener;\n" + - +public static class LeafListener extends TBaseListener { + public void exitA(TParser.AContext ctx) { + if (ctx.getChildCount()==2) + System.out.printf("%s %s %s",ctx.INT(0).getSymbol().getText(), + ctx.INT(1).getSymbol().getText(),ctx.INT()); + else + System.out.println(ctx.ID().getSymbol()); + } +} >> RuleGetterListener(X) ::= << -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype);\n" + -this.LeafListener.prototype.constructor = this.LeafListener;\n" + - +public static class LeafListener extends TBaseListener { + public void exitA(TParser.AContext ctx) { + if (ctx.getChildCount()==2) { + System.out.printf("%s %s %s",ctx.b(0).start.getText(), + ctx.b(1).start.getText(),ctx.b().get(0).start.getText()); + } else + System.out.println(ctx.b(0).start.getText()); + } +} >> LRListener(X) ::= << -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - +public static class LeafListener extends TBaseListener { + public void exitE(TParser.EContext ctx) { + if (ctx.getChildCount()==3) { + System.out.printf("%s %s %s\n",ctx.e(0).start.getText(), + ctx.e(1).start.getText(), ctx.e().get(0).start.getText()); + } else + System.out.println(ctx.INT().getSymbol().getText()); + } +} >> LRWithLabelsListener(X) ::= << -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - +public static class LeafListener extends TBaseListener { + public void exitCall(TParser.CallContext ctx) { + System.out.printf("%s %s",ctx.e().start.getText(),ctx.eList()); + } + public void exitInt(TParser.IntContext ctx) { + System.out.println(ctx.INT().getSymbol().getText()); + } +} >> DeclareContextListGettersFunction() ::= << diff --git a/tool/test/org/antlr/v4/test/rt/java/TestListeners.java b/tool/test/org/antlr/v4/test/rt/java/TestListeners.java index 28b306a0c..7cb58764a 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestListeners.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestListeners.java @@ -9,27 +9,21 @@ public class TestListeners extends BaseTest { public void testBasic() throws Exception { String grammar = "grammar T;\n" + "@parser::header {\n" + - "var TListener = require('./TListener').TListener;\n" + "}\n" + "\n" + "@parser::members {\n" + - "this.LeafListener = function() {\n" + - " this.visitTerminal = function(node) {\n" + - " document.getElementById('output').value += node.symbol.text + '\\n';\n" + - " };\n" + - " return this;\n" + - "};\n" + - "this.LeafListener.prototype = Object.create(TListener.prototype);\n" + - "this.LeafListener.prototype.constructor = this.LeafListener;\n" + - "\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void visitTerminal(TerminalNode node) {\n" + + " System.out.println(node.getSymbol().getText());\n" + + " }\n" + + "}\n" + "}\n" + "\n" + "s\n" + "@after {\n" + "System.out.println($ctx.r.toStringTree(this));\n" + - "var walker = new antlr4.tree.ParseTreeWalker();\n" + - "walker.walk(new this.LeafListener(), $ctx.r);\n" + - "\n" + + "ParseTreeWalker walker = new ParseTreeWalker();\n" + + "walker.walk(new LeafListener(), $ctx.r);\n" + "}\n" + " : r=a ;\n" + "a : INT INT\n" + @@ -48,33 +42,25 @@ public class TestListeners extends BaseTest { String testTokenGetters(String input) throws Exception { String grammar = "grammar T;\n" + "@parser::header {\n" + - "var TListener = require('./TListener').TListener;\n" + "}\n" + "\n" + "@parser::members {\n" + - "this.LeafListener = function() {\n" + - " this.exitA = function(ctx) {\n" + - " var str;\n" + - " if(ctx.getChildCount()===2) {\n" + - " str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT());\n" + - " } else {\n" + - " str = ctx.ID().symbol.toString();\n" + - " }\n" + - " document.getElementById('output').value += str + '\\n';\n" + - " };\n" + - " return this;\n" + - "};\n" + - "this.LeafListener.prototype = Object.create(TListener.prototype);\\n\" +\n" + - "this.LeafListener.prototype.constructor = this.LeafListener;\\n\" +\n" + - "\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void exitA(TParser.AContext ctx) {\n" + + " if (ctx.getChildCount()==2) \n" + + " System.out.printf(\"%s %s %s\",ctx.INT(0).getSymbol().getText(),\n" + + " ctx.INT(1).getSymbol().getText(),ctx.INT());\n" + + " else\n" + + " System.out.println(ctx.ID().getSymbol());\n" + + " }\n" + + "}\n" + "}\n" + "\n" + "s\n" + "@after {\n" + "System.out.println($ctx.r.toStringTree(this));\n" + - "var walker = new antlr4.tree.ParseTreeWalker();\n" + - "walker.walk(new this.LeafListener(), $ctx.r);\n" + - "\n" + + "ParseTreeWalker walker = new ParseTreeWalker();\n" + + "walker.walk(new LeafListener(), $ctx.r);\n" + "}\n" + " : r=a ;\n" + "a : INT INT\n" + @@ -105,33 +91,25 @@ public class TestListeners extends BaseTest { String testRuleGetters(String input) throws Exception { String grammar = "grammar T;\n" + "@parser::header {\n" + - "var TListener = require('./TListener').TListener;\n" + "}\n" + "\n" + "@parser::members {\n" + - "this.LeafListener = function() {\n" + - " this.exitA = function(ctx) {\n" + - " var str;\n" + - " if(ctx.getChildCount()===2) {\n" + - " str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text;\n" + - " } else {\n" + - " str = ctx.b(0).start.text;\n" + - " }\n" + - " document.getElementById('output').value += str + '\\n';\n" + - " };\n" + - " return this;\n" + - "};\n" + - "this.LeafListener.prototype = Object.create(TListener.prototype);\\n\" +\n" + - "this.LeafListener.prototype.constructor = this.LeafListener;\\n\" +\n" + - "\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void exitA(TParser.AContext ctx) {\n" + + " if (ctx.getChildCount()==2) {\n" + + " System.out.printf(\"%s %s %s\",ctx.b(0).start.getText(),\n" + + " ctx.b(1).start.getText(),ctx.b().get(0).start.getText());\n" + + " } else \n" + + " System.out.println(ctx.b(0).start.getText());\n" + + " }\n" + + "}\n" + "}\n" + "\n" + "s\n" + "@after {\n" + "System.out.println($ctx.r.toStringTree(this));\n" + - "var walker = new antlr4.tree.ParseTreeWalker();\n" + - "walker.walk(new this.LeafListener(), $ctx.r);\n" + - "\n" + + "ParseTreeWalker walker = new ParseTreeWalker();\n" + + "walker.walk(new LeafListener(), $ctx.r);\n" + "}\n" + " : r=a ;\n" + "a : b b // forces list\n" + @@ -164,33 +142,25 @@ public class TestListeners extends BaseTest { public void testLR() throws Exception { String grammar = "grammar T;\n" + "@parser::header {\n" + - "var TListener = require('./TListener').TListener;\n" + "}\n" + "\n" + "@parser::members {\n" + - "this.LeafListener = function() {\n" + - " this.exitE = function(ctx) {\n" + - " var str;\n" + - " if(ctx.getChildCount()===3) {\n" + - " str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text;\n" + - " } else {\n" + - " str = ctx.INT().symbol.text;\n" + - " }\n" + - " document.getElementById('output').value += str + '\\n';\n" + - " };\n" + - " return this;\n" + - "};\n" + - "this.LeafListener.prototype = Object.create(TListener.prototype);\n" + - "this.LeafListener.prototype.constructor = this.LeafListener;\n" + - "\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void exitE(TParser.EContext ctx) {\n" + + " if (ctx.getChildCount()==3) {\n" + + " System.out.printf(\"%s %s %s\\n\",ctx.e(0).start.getText(),\n" + + " ctx.e(1).start.getText(), ctx.e().get(0).start.getText());\n" + + " } else \n" + + " System.out.println(ctx.INT().getSymbol().getText());\n" + + " }\n" + + "}\n" + "}\n" + "\n" + "s\n" + "@after {\n" + "System.out.println($ctx.r.toStringTree(this));\n" + - "var walker = new antlr4.tree.ParseTreeWalker();\n" + - "walker.walk(new this.LeafListener(), $ctx.r);\n" + - "\n" + + "ParseTreeWalker walker = new ParseTreeWalker();\n" + + "walker.walk(new LeafListener(), $ctx.r);\n" + "}\n" + " : r=e ;\n" + "e : e op='*' e\n" + @@ -211,32 +181,24 @@ public class TestListeners extends BaseTest { public void testLRWithLabels() throws Exception { String grammar = "grammar T;\n" + "@parser::header {\n" + - "var TListener = require('./TListener').TListener;\n" + "}\n" + "\n" + "@parser::members {\n" + - "this.LeafListener = function() {\n" + - " this.exitCall = function(ctx) {\n" + - " var str = ctx.e().start.text + ' ' + ctx.eList();\n" + - " document.getElementById('output').value += str + '\\n';\n" + - " };\n" + - " this.exitInt = function(ctx) {\n" + - " var str = ctx.INT().symbol.text;\n" + - " document.getElementById('output').value += str + '\\n';\n" + - " };\n" + - " return this;\n" + - "};\n" + - "this.LeafListener.prototype = Object.create(TListener.prototype);\n" + - "this.LeafListener.prototype.constructor = this.LeafListener;\n" + - "\n" + + "public static class LeafListener extends TBaseListener {\n" + + " public void exitCall(TParser.CallContext ctx) {\n" + + " System.out.printf(\"%s %s\",ctx.e().start.getText(),ctx.eList());\n" + + " }\n" + + " public void exitInt(TParser.IntContext ctx) {\n" + + " System.out.println(ctx.INT().getSymbol().getText());\n" + + " }\n" + + "}\n" + "}\n" + "\n" + "s\n" + "@after {\n" + "System.out.println($ctx.r.toStringTree(this));\n" + - "var walker = new antlr4.tree.ParseTreeWalker();\n" + - "walker.walk(new this.LeafListener(), $ctx.r);\n" + - "\n" + + "ParseTreeWalker walker = new ParseTreeWalker();\n" + + "walker.walk(new LeafListener(), $ctx.r);\n" + "}\n" + " : r=e ;\n" + "e : e '(' eList ')' # Call\n" + diff --git a/tool/test/org/antlr/v4/test/tool/TestListeners.java b/tool/test/org/antlr/v4/test/tool/TestListeners.java deleted file mode 100644 index 94dd6793a..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestListeners.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.test.tool; - -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestListeners extends BaseTest { - @Test public void testBasic() throws Exception { - String grammar = - "grammar T;\n" + - "@header {import org.antlr.v4.runtime.tree.*;}\n"+ - "@parser::members {\n" + - "public static class LeafListener extends TBaseListener {\n" + - " public void visitTerminal(TerminalNode node) {\n" + - " System.out.println(node.getSymbol().getText());\n" + - " }\n" + - " }}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=a ;\n" + - "a : INT INT" + - " | ID" + - " ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); - String expecting = "(a 1 2)\n" + - "1\n" + - "2\n"; - assertEquals(expecting, result); - } - - @Test public void testTokenGetters() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {\n" + - "public static class LeafListener extends TBaseListener {\n" + - " public void exitA(TParser.AContext ctx) {\n" + - " if (ctx.getChildCount()==2) System.out.printf(\"%s %s %s\",ctx.INT(0).getSymbol().getText(),ctx.INT(1).getSymbol().getText(),ctx.INT());\n" + - " else System.out.println(ctx.ID().getSymbol());\n" + - " }\n" + - " }}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=a ;\n" + - "a : INT INT" + - " | ID" + - " ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); - String expecting = - "(a 1 2)\n" + - "1 2 [1, 2]\n"; - assertEquals(expecting, result); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); - expecting = "(a abc)\n" + - "[@0,0:2='abc',<4>,1:0]\n"; - assertEquals(expecting, result); - } - - @Test public void testRuleGetters() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {\n" + - "public static class LeafListener extends TBaseListener {\n" + - " public void exitA(TParser.AContext ctx) {\n" + - " if (ctx.getChildCount()==2) {\n" + - " System.out.printf(\"%s %s %s\",ctx.b(0).start.getText(),\n" + - " ctx.b(1).start.getText(),ctx.b().get(0).start.getText());\n" + - " }\n" + - " else System.out.println(ctx.b(0).start.getText());\n" + - " }\n" + - " }}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=a ;\n" + - "a : b b" + // forces list - " | b" + // a list still - " ;\n" + - "b : ID | INT ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); - String expecting = "(a (b 1) (b 2))\n" + - "1 2 1\n"; - assertEquals(expecting, result); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); - expecting = "(a (b abc))\n" + - "abc\n"; - assertEquals(expecting, result); - } - - @Test public void testLR() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {\n" + - "public static class LeafListener extends TBaseListener {\n" + - " public void exitE(TParser.EContext ctx) {\n" + - " if (ctx.getChildCount()==3) {\n" + - " System.out.printf(\"%s %s %s\\n\",ctx.e(0).start.getText(),\n" + - " ctx.e(1).start.getText()," + - " ctx.e().get(0).start.getText());\n" + - " }\n" + - " else System.out.println(ctx.INT().getSymbol().getText());\n" + - " }\n" + - " }" + - "}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=e ;\n" + - "e : e op='*' e\n" + - " | e op='+' e\n" + - " | INT\n" + - " ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1+2*3", false); - String expecting = - "(e (e 1) + (e (e 2) * (e 3)))\n" + - "1\n" + - "2\n" + - "3\n" + - "2 3 2\n" + - "1 2 1\n"; - assertEquals(expecting, result); - } - - @Test public void testLRWithLabels() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {\n" + - " public static class LeafListener extends TBaseListener {\n" + - " public void exitCall(TParser.CallContext ctx) {\n" + - " System.out.printf(\"%s %s\",ctx.e().start.getText(),\n" + - " ctx.eList());\n" + - " }\n" + - " public void exitInt(TParser.IntContext ctx) {\n" + - " System.out.println(ctx.INT().getSymbol().getText());\n" + - " }\n" + - " }\n" + - "}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=e ;\n" + - "e : e '(' eList ')' # Call\n" + - " | INT # Int\n" + - " ; \n" + - "eList : e (',' e)* ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1(2,3)", false); - String expecting = - "(e (e 1) ( (eList (e 2) , (e 3)) ))\n" + - "1\n" + - "2\n" + - "3\n" + - "1 [13 6]\n"; - assertEquals(expecting, result); - } -} From 2716bbd62d1c266ae8bc17137e42598efb10a430 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 02:49:22 +0800 Subject: [PATCH 20/26] validated ParseTrees tests --- .../org/antlr/v4/test/rt/gen/Generator.java | 13 +- .../ParseTrees/{test2Alts.st => 2Alts.st} | 0 .../org/antlr/v4/test/rt/java/Java.test.stg | 4 +- .../antlr/v4/test/rt/java/TestParseTrees.java | 26 +-- .../antlr/v4/test/tool/TestParseTrees.java | 154 ------------------ 5 files changed, 21 insertions(+), 176 deletions(-) rename tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/{test2Alts.st => 2Alts.st} (100%) delete mode 100644 tool/test/org/antlr/v4/test/tool/TestParseTrees.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index e03046038..e9e070a23 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -485,7 +485,7 @@ public class Generator { "xy", "(a x y)\n", null); - file.addParserTest(input, "test2Alts", "T", "s", + file.addParserTest(input, "2Alts", "T", "s", "y", "(a y)\n", null); @@ -496,20 +496,19 @@ public class Generator { file.addParserTest(input, "RuleRef", "T", "s", "yx", "(a (b y) x)\n", - null); - // ERRORs not shown. z is colored red in tree view + null); file.addParserTest(input, "ExtraToken", "T", "s", "xzy", - "(a x z y)\n", - null); + "(a x z y)\n", // ERRORs not shown. z is colored red in tree view + "line 1:1 extraneous input 'z' expecting 'y'\n"); file.addParserTest(input, "NoViableAlt", "T", "s", "z", "(a z)\n", - null); + "line 1:0 mismatched input 'z' expecting {'x', 'y'}\n"); file.addParserTest(input, "Sync", "T", "s", "xzyy!", "(a x z y y !)\n", - null); + "line 1:1 extraneous input 'z' expecting {'y', '!'}\n"); return file; } diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/test2Alts.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/2Alts.st similarity index 100% rename from tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/test2Alts.st rename to tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/2Alts.st diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index ba38248cc..0a569e96a 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -131,7 +131,7 @@ Pass() ::= "" StringList() ::= "List\" -BuildParseTrees() ::= "this.buildParseTrees = true;" +BuildParseTrees() ::= "setBuildParseTree(true);" BailErrorStrategy() ::= <%setErrorHandler(new BailErrorStrategy());%> @@ -159,7 +159,7 @@ ImportListener(X) ::= "" GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames)" -WriteRuleInvocationStack() ::= "document.getElementById('output').value += antlr4.Utils.arrayToString(this.getRuleInvocationStack()) + '\\n';" +WriteRuleInvocationStack() ::= "System.out.println(getRuleInvocationStack());" LL_EXACT_AMBIG_DETECTION() ::= <> diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java b/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java index a167df19a..6b5ac2bba 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestParseTrees.java @@ -10,14 +10,14 @@ public class TestParseTrees extends BaseTest { String grammar = "grammar T;\n" + "s\n" + "@init {\n" + - "this.buildParseTrees = true;\n" + + "setBuildParseTree(true);\n" + "}\n" + "@after {\n" + "System.out.println($r.ctx.toStringTree(this));\n" + "}\n" + " : r=a ;\n" + "a : 'x' { \n" + - "document.getElementById('output').value += antlr4.Utils.arrayToString(this.getRuleInvocationStack()) + '\\n';\n" + + "System.out.println(getRuleInvocationStack());\n" + "} ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false); assertEquals("[a, s]\n(a x)\n", found); @@ -29,7 +29,7 @@ public class TestParseTrees extends BaseTest { String grammar = "grammar T;\n" + "s\n" + "@init {\n" + - "this.buildParseTrees = true;\n" + + "setBuildParseTree(true);\n" + "}\n" + "@after {\n" + "System.out.println($r.ctx.toStringTree(this));\n" + @@ -43,11 +43,11 @@ public class TestParseTrees extends BaseTest { } @Test - public void testtest2Alts() throws Exception { + public void test2Alts() throws Exception { String grammar = "grammar T;\n" + "s\n" + "@init {\n" + - "this.buildParseTrees = true;\n" + + "setBuildParseTree(true);\n" + "}\n" + "@after {\n" + "System.out.println($r.ctx.toStringTree(this));\n" + @@ -65,7 +65,7 @@ public class TestParseTrees extends BaseTest { String grammar = "grammar T;\n" + "s\n" + "@init {\n" + - "this.buildParseTrees = true;\n" + + "setBuildParseTree(true);\n" + "}\n" + "@after {\n" + "System.out.println($r.ctx.toStringTree(this));\n" + @@ -83,7 +83,7 @@ public class TestParseTrees extends BaseTest { String grammar = "grammar T;\n" + "s\n" + "@init {\n" + - "this.buildParseTrees = true;\n" + + "setBuildParseTree(true);\n" + "}\n" + "@after {\n" + "System.out.println($r.ctx.toStringTree(this));\n" + @@ -103,7 +103,7 @@ public class TestParseTrees extends BaseTest { String grammar = "grammar T;\n" + "s\n" + "@init {\n" + - "this.buildParseTrees = true;\n" + + "setBuildParseTree(true);\n" + "}\n" + "@after {\n" + "System.out.println($r.ctx.toStringTree(this));\n" + @@ -115,7 +115,7 @@ public class TestParseTrees extends BaseTest { " ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzy", false); assertEquals("(a x z y)\n", found); - assertNull(this.stderrDuringParse); + assertEquals("line 1:1 extraneous input 'z' expecting 'y'\n", this.stderrDuringParse); } @Test @@ -123,7 +123,7 @@ public class TestParseTrees extends BaseTest { String grammar = "grammar T;\n" + "s\n" + "@init {\n" + - "this.buildParseTrees = true;\n" + + "setBuildParseTree(true);\n" + "}\n" + "@after {\n" + "System.out.println($r.ctx.toStringTree(this));\n" + @@ -136,7 +136,7 @@ public class TestParseTrees extends BaseTest { " "; String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "z", false); assertEquals("(a z)\n", found); - assertNull(this.stderrDuringParse); + assertEquals("line 1:0 mismatched input 'z' expecting {'x', 'y'}\n", this.stderrDuringParse); } @Test @@ -144,7 +144,7 @@ public class TestParseTrees extends BaseTest { String grammar = "grammar T;\n" + "s\n" + "@init {\n" + - "this.buildParseTrees = true;\n" + + "setBuildParseTree(true);\n" + "}\n" + "@after {\n" + "System.out.println($r.ctx.toStringTree(this));\n" + @@ -156,7 +156,7 @@ public class TestParseTrees extends BaseTest { " ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzyy!", false); assertEquals("(a x z y y !)\n", found); - assertNull(this.stderrDuringParse); + assertEquals("line 1:1 extraneous input 'z' expecting {'y', '!'}\n", this.stderrDuringParse); } diff --git a/tool/test/org/antlr/v4/test/tool/TestParseTrees.java b/tool/test/org/antlr/v4/test/tool/TestParseTrees.java deleted file mode 100644 index d9fd5b9ac..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestParseTrees.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.test.tool; - -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestParseTrees extends BaseTest { - @Test public void testTokenAndRuleContextString() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " :r=a ;\n" + - "a : 'x' {System.out.println(getRuleInvocationStack());} ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false); - String expecting = "[a, s]\n(a x)\n"; - assertEquals(expecting, result); - } - - @Test public void testToken2() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " :r=a ;\n" + - "a : 'x' 'y'\n" + - " ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xy", false); - String expecting = "(a x y)\n"; - assertEquals(expecting, result); - } - - @Test public void test2Alts() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " :r=a ;\n" + - "a : 'x' | 'y'\n" + - " ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y", false); - String expecting = "(a y)\n"; - assertEquals(expecting, result); - } - - @Test public void test2AltLoop() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " :r=a ;\n" + - "a : ('x' | 'y')* 'z'\n" + - " ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xyyxyxz", false); - String expecting = "(a x y y x y x z)\n"; - assertEquals(expecting, result); - } - - @Test public void testRuleRef() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " : r=a ;\n" + - "a : b 'x'\n" + - " ;\n" + - "b : 'y' ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "yx", false); - String expecting = "(a (b y) x)\n"; - assertEquals(expecting, result); - } - - // ERRORS - - @Test public void testExtraToken() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " : r=a ;\n" + - "a : 'x' 'y'\n" + - " ;\n" + - "Z : 'z'; \n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzy", false); - String expecting = "(a x z y)\n"; // ERRORs not shown. z is colored red in tree view - assertEquals(expecting, result); - } - - @Test public void testNoViableAlt() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " : r=a ;\n" + - "a : 'x' | 'y'\n" + - " ;\n" + - "Z : 'z'; \n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "z", false); - String expecting = "(a z)\n"; - assertEquals(expecting, result); - } - - @Test public void testSync() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " : r=a ;\n" + - "a : 'x' 'y'* '!'\n" + - " ;\n" + - "Z : 'z'; \n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzyy!", false); - String expecting = "(a x z y y !)\n"; - assertEquals(expecting, result); - } -} From ba76b053a7642ddb6dcbf6360645b617be37bef2 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 03:40:35 +0800 Subject: [PATCH 21/26] validate SemPredEvalParser tests --- .../org/antlr/v4/test/rt/gen/Generator.java | 72 +- .../2UnpredicatedAltsAndOneOrthogonalAlt.st | 2 +- .../SemPredEvalParser/ActionHidesPreds.st | 2 +- .../ActionsHidePredsInGlobalFOLLOW.st | 1 - ...pendentPredNotInOuterCtxShouldBeIgnored.st | 2 +- .../PredTestedEvenWhenUnAmbig.st | 2 +- .../PredicateDependentOnArg.st | 6 +- .../PredicateDependentOnArg2.st | 8 +- .../ToLeftWithVaryingPredicate.st | 5 +- .../org/antlr/v4/test/rt/java/Java.test.stg | 22 +- .../test/rt/java/TestFullContextParsing.java | 8 +- .../test/rt/java/TestSemPredEvalParser.java | 116 ++-- .../v4/test/tool/TestSemPredEvalParser.java | 626 ------------------ 13 files changed, 134 insertions(+), 738 deletions(-) delete mode 100644 tool/test/org/antlr/v4/test/tool/TestSemPredEvalParser.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index e9e070a23..16f7e0669 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -224,36 +224,40 @@ public class Generator { private TestFile buildSemPredEvalParser() throws Exception { TestFile file = new TestFile("SemPredEvalParser"); - file.addParserTest(input, "SimpleValidate", "T", "s", + TestMethod tm = file.addParserTest(input, "SimpleValidate", "T", "s", "x", "", "line 1:0 no viable alternative at input 'x'\n"); - file.addParserTest(input, "SimpleValidate2", "T", "s", + tm.debug = true; + tm = file.addParserTest(input, "SimpleValidate2", "T", "s", "3 4 x", "alt 2\n" + "alt 2\n", "line 1:4 no viable alternative at input 'x'\n"); + tm.debug = true; file.addParserTest(input, "AtomWithClosureInTranslatedLRRule", "T", "start", "a+b+a", "", null); - file.addParserTest(input, "ValidateInDFA", "T", "s", + tm = file.addParserTest(input, "ValidateInDFA", "T", "s", "x ; y", "", "line 1:0 no viable alternative at input 'x'\n" + "line 1:4 no viable alternative at input 'y'\n"); - file.addParserTest(input, "Simple", "T", "s", + tm.debug = true; + tm = file.addParserTest(input, "Simple", "T", "s", "x y 3", "alt 2\n" + "alt 2\n" + "alt 3\n", null); // Under new predicate ordering rules (see antlr/antlr4#29), the first // alt with an acceptable config (unpredicated, or predicated and evaluates // to true) is chosen. + tm.debug = true; file.addParserTest(input, "Order", "T", "s", "x y", "alt 1\n" + "alt 1\n", null); // We have n-2 predicates for n alternatives. pick first alt - file.addParserTest(input, "2UnpredicatedAlts", "T", "s", + tm = file.addParserTest(input, "2UnpredicatedAlts", "T", "s", "x; y", "alt 1\n" + "alt 1\n", @@ -261,7 +265,8 @@ public class Generator { "line 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x'\n" + "line 1:3 reportAttemptingFullContext d=0 (a), input='y'\n" + "line 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y'\n"); - file.addParserTest(input, "2UnpredicatedAltsAndOneOrthogonalAlt", "T", "s", + tm.debug = true; + tm = file.addParserTest(input, "2UnpredicatedAltsAndOneOrthogonalAlt", "T", "s", "34; x; y", "alt 1\n" + "alt 2\n" + "alt 2\n", "line 1:4 reportAttemptingFullContext d=0 (a), input='x'\n" + @@ -271,24 +276,28 @@ public class Generator { // The parser consumes ID and moves to the 2nd token INT. // To properly evaluate the predicates after matching ID INT, // we must correctly see come back to starting index so LT(1) works - file.addParserTest(input, "RewindBeforePredEval", "T", "s", + tm.debug = true; + tm = file.addParserTest(input, "RewindBeforePredEval", "T", "s", "y 3 x 4", "alt 2\n" + "alt 1\n", null); // checks that we throw exception if all alts // are covered with a predicate and none succeeds + tm.debug = true; file.addParserTest(input, "NoTruePredsThrowsNoViableAlt", "T", "s", "y 3 x 4", "", "line 1:0 no viable alternative at input 'y'\n"); - file.addParserTest(input, "ToLeft", "T", "s", + tm = file.addParserTest(input, "ToLeft", "T", "s", "x x y", "alt 2\n" + "alt 2\n" + "alt 2\n", null); - file.addParserTest(input, "UnpredicatedPathsInAlt", "T", "s", + tm.debug = true; + tm = file.addParserTest(input, "UnpredicatedPathsInAlt", "T", "s", "x 4", "alt 1\n", null); + tm.debug = true; file.addParserTest(input, "ActionHidesPreds", "T", "s", "x x y", "alt 1\n" + "alt 1\n" + "alt 1\n", @@ -298,20 +307,22 @@ public class Generator { * the predicates assuming that all necessary information is available. * The i++ action is done outside of the prediction and so it is executed. */ - file.addParserTest(input, "ToLeftWithVaryingPredicate", "T", "s", + tm = file.addParserTest(input, "ToLeftWithVaryingPredicate", "T", "s", "x x y", "i=1\n" + "alt 2\n" + "i=2\n" + "alt 1\n" + "i=3\n" + "alt 2\n", null); + tm.debug = true; /** * In this case, we're passing a parameter into a rule that uses that * information to predict the alternatives. This is the special case * where we know exactly which context we are in. The context stack * is empty and we have not dipped into the outer context to make a decision. */ - file.addParserTest(input, "PredicateDependentOnArg", "T", "s", + tm = file.addParserTest(input, "PredicateDependentOnArg", "T", "s", "a b", "alt 2\n" + "alt 1\n", null); + tm.debug = true; /** In this case, we have to ensure that the predicates are not tested during the closure after recognizing the 1st ID. The closure will fall off the end of 'a' 1st time and reach into the @@ -322,61 +333,70 @@ public class Generator { simulation doesn't crash with context object issues when it encounters preds during FOLLOW. */ - file.addParserTest(input, "PredicateDependentOnArg2", "T", "s", + tm = file.addParserTest(input, "PredicateDependentOnArg2", "T", "s", "a b", "", null); - // uses ID ';' or ID '.' lookahead to solve s. preds not tested. - file.addParserTest(input, "DependentPredNotInOuterCtxShouldBeIgnored", "T", "s", + tm.debug = true; + // uses ID ';' or ID '.' lookahead to solve s. preds not tested. + tm = file.addParserTest(input, "DependentPredNotInOuterCtxShouldBeIgnored", "T", "s", "a;", "alt 2\n", null); - file.addParserTest(input, "IndependentPredNotPassedOuterCtxToAvoidCastException", "T", "s", + tm.debug = true; + tm = file.addParserTest(input, "IndependentPredNotPassedOuterCtxToAvoidCastException", "T", "s", "a;", "alt 2\n", null); - /** During a global follow operation, we still collect semantic + tm.debug = true; + /** During a global follow operation, we still collect semantic * predicates as long as they are not dependent on local context */ - file.addParserTest(input, "PredsInGlobalFOLLOW", "T", "s", + tm = file.addParserTest(input, "PredsInGlobalFOLLOW", "T", "s", "a!", "eval=true\n" + /* now we are parsing */ "parse\n", null); - /** We cannot collect predicates that are dependent on local context if + tm.debug = true; + /** We cannot collect predicates that are dependent on local context if * we are doing a global follow. They appear as if they were not there at all. */ - file.addParserTest(input, "DepedentPredsInGlobalFOLLOW","T", "s", + tm = file.addParserTest(input, "DepedentPredsInGlobalFOLLOW","T", "s", "a!", "eval=true\n" + "parse\n", null); - /** Regular non-forced actions can create side effects used by semantic + tm.debug = true; + /** Regular non-forced actions can create side effects used by semantic * predicates and so we cannot evaluate any semantic predicate * encountered after having seen a regular action. This includes * during global follow operations. */ - file.addParserTest(input, "ActionsHidePredsInGlobalFOLLOW", "T", "s", + tm = file.addParserTest(input, "ActionsHidePredsInGlobalFOLLOW", "T", "s", "a!", "eval=true\n" + "parse\n", null); - file.addParserTestsWithErrors(input, "PredTestedEvenWhenUnAmbig", "T", "primary", + tm.debug = true; + tm = file.addParserTestsWithErrors(input, "PredTestedEvenWhenUnAmbig", "T", "primary", "abc", "ID abc\n", null, "enum", "", "line 1:0 no viable alternative at input 'enum'\n"); + tm.debug = true; /** * This is a regression test for antlr/antlr4#218 "ANTLR4 EOF Related Bug". * https://github.com/antlr/antlr4/issues/218 */ - file.addParserTest(input, "DisabledAlternative", "T", "cppCompilationUnit", + tm = file.addParserTest(input, "DisabledAlternative", "T", "cppCompilationUnit", "hello", "", null); + tm.debug = true; /** Loopback doesn't eval predicate at start of alt */ - file.addParserTestsWithErrors(input, "PredFromAltTestedInLoopBack", "T", "file_", + tm = file.addParserTestsWithErrors(input, "PredFromAltTestedInLoopBack", "T", "file_", "s\n\n\nx\n", - "(file_ (para (paraContent s) \n \n) (para (paraContent \n x \n)) )\n", + "(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n", "line 5:2 mismatched input '' expecting '\n'\n", "s\n\n\nx\n\n", - "(file_ (para (paraContent s) \n \n) (para (paraContent \n x) \n \n) )\n", + "(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x) \\n \\n) )\n", null); + tm.debug = true; return file; } diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st index 2eb5d7a60..47bc99281 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st @@ -3,7 +3,7 @@ s : {} a ';' a ';' a; a : INT {} | ID {} // must pick this one for ID since pred is false | ID {} - | {}? ID {console.log(\"alt 4\");} + | {}? ID {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionHidesPreds.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionHidesPreds.st index 6a8bc7495..90c052f6e 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionHidesPreds.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionHidesPreds.st @@ -1,5 +1,5 @@ grammar ; -@members {} +@members {} s : a+ ; a : {} ID {}? {} | {} ID {}? {} diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st index 503397837..8c4ced294 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st @@ -1,6 +1,5 @@ grammar ; @members { -this.p = function(v) { } s : e {} {}? {} '!' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st index e17248d61..266a67208 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DependentPredNotInOuterCtxShouldBeIgnored.st @@ -1,7 +1,7 @@ grammar ; s : b[2] ';' | b[2] '.' ; // decision in s drills down to ctx-dependent pred in a; b[int i] : a[i] ; -a[int i]" + +a[int i] : {}? ID {} | {}? ID {} ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st index dea5c35b0..8840f393b 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st @@ -1,5 +1,5 @@ grammar ; -@members {} +@members {} primary : ID {} | {!}? 'enum' {} diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg.st index 3a05644d7..8a91793ab 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg.st @@ -1,8 +1,8 @@ grammar ; -@members {i=0} +@members {} s : a[2] a[1]; -"a[int i]" + -" : {}? ID {} +a[int i] + : {}? ID {} | {}? ID {} ; ID : 'a'..'z'+ ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st index 355d0f458..df1142e63 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredicateDependentOnArg2.st @@ -1,9 +1,9 @@ grammar ; -@members {i=0} +@members {} s : a[2] a[1]; -a[int i]" + - : {}? ID {} - | {}? ID {} +a[int i] + : {}? ID + | {}? ID ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st index 2e9e6d10d..be00d9913 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st @@ -1,6 +1,7 @@ grammar ; -@members {this.i=0} -s : ({\n} a)+ ; +@members {} +s : ({ + } a)+ ; a : {}? ID {} | {}? ID {} ; diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index 0a569e96a..edd2f23f1 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -109,7 +109,9 @@ DeclareLocal(s,v) ::= "Object = ;" AssignLocal(s,v) ::= " = ;" -InitMember(n,v) ::= <%this. = ;%> +InitIntMember(n,v) ::= <%int = ;%> + +InitBooleanMember(n,v) ::= <%boolean = ;%> GetMember(n) ::= <%this.%> @@ -119,11 +121,11 @@ AddMember(n,v) ::= <%this. += ;%> PlusMember(v,n) ::= <% + this.%> -MemberEquals(n,v) ::= <%this. === %> +MemberEquals(n,v) ::= <%this. == %> -ModMemberEquals(n,m,v) ::= <%this. % m === %> +ModMemberEquals(n,m,v) ::= <%this. % == %> -ModMemberNotEquals(n,m,v) ::= <%this. % m != %> +ModMemberNotEquals(n,m,v) ::= <%this. % != %> DumpDFA() ::= "this.dumpDFA();" @@ -141,7 +143,7 @@ Column() ::= "this.getCharPositionInLine()" Text() ::= "this.getText()" -ValEquals(a,b) ::= <%
===%> +ValEquals(a,b) ::= <%==%> TextEquals(a) ::= <%this.getText().equals("")%> @@ -149,7 +151,7 @@ PlusText(a) ::= <%"" + this.getText()%> InputText() ::= "this._input.getText()" -LTEquals(i, v) ::= <%this._input.LT().text===%> +LTEquals(i, v) ::= <%this._input.LT().getText().equals()%> LANotEquals(i, v) ::= <%this._input.LA()!=%> @@ -161,7 +163,7 @@ GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames)" WriteRuleInvocationStack() ::= "System.out.println(getRuleInvocationStack());" -LL_EXACT_AMBIG_DETECTION() ::= <> +LL_EXACT_AMBIG_DETECTION() ::= <<_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);>> PositionAdjustingLexer() ::= << @@ -320,10 +322,10 @@ Declare_foo() ::= <> Invoke_foo() ::= "this.foo();" -Declare_pred() ::= < skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", true); assertEquals("", found); assertEquals("line 1:0 no viable alternative at input 'x'\n", this.stderrDuringParse); } @@ -30,7 +30,7 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "3 4 x", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "3 4 x", true); assertEquals("alt 2\nalt 2\n", found); assertEquals("line 1:4 no viable alternative at input 'x'\n", this.stderrDuringParse); } @@ -60,7 +60,7 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x ; y", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x ; y", true); assertEquals("", found); assertEquals("line 1:0 no viable alternative at input 'x'\nline 1:4 no viable alternative at input 'y'\n", this.stderrDuringParse); } @@ -76,7 +76,7 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x y 3", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x y 3", true); assertEquals("alt 2\nalt 2\nalt 3\n", found); assertNull(this.stderrDuringParse); } @@ -101,7 +101,7 @@ public class TestSemPredEvalParser extends BaseTest { @Test public void test2UnpredicatedAlts() throws Exception { String grammar = "grammar T;\n" + - "s : {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;} a ';' a; // do 2x: once in ATN, next in DFA\n" + + "s : {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);} a ';' a; // do 2x: once in ATN, next in DFA\n" + "a : ID {System.out.println(\"alt 1\");}\n" + " | ID {System.out.println(\"alt 2\");}\n" + " | {false}? ID {System.out.println(\"alt 3\");}\n" + @@ -109,7 +109,7 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x; y", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x; y", true); assertEquals("alt 1\nalt 1\n", found); assertEquals("line 1:0 reportAttemptingFullContext d=0 (a), input='x'\nline 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x'\nline 1:3 reportAttemptingFullContext d=0 (a), input='y'\nline 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y'\n", this.stderrDuringParse); } @@ -117,16 +117,16 @@ public class TestSemPredEvalParser extends BaseTest { @Test public void test2UnpredicatedAltsAndOneOrthogonalAlt() throws Exception { String grammar = "grammar T;\n" + - "s : {this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;} a ';' a ';' a;\n" + + "s : {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);} a ';' a ';' a;\n" + "a : INT {System.out.println(\"alt 1\");}\n" + " | ID {System.out.println(\"alt 2\");} // must pick this one for ID since pred is false\n" + " | ID {System.out.println(\"alt 3\");}\n" + - " | {false}? ID {console.log(\\\"alt 4\\\");}\n" + + " | {false}? ID {System.out.println(\"alt 4\");}\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "34; x; y", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "34; x; y", true); assertEquals("alt 1\nalt 2\nalt 2\n", found); assertEquals("line 1:4 reportAttemptingFullContext d=0 (a), input='x'\nline 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x'\nline 1:7 reportAttemptingFullContext d=0 (a), input='y'\nline 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y'\n", this.stderrDuringParse); } @@ -135,13 +135,13 @@ public class TestSemPredEvalParser extends BaseTest { public void testRewindBeforePredEval() throws Exception { String grammar = "grammar T;\n" + "s : a a;\n" + - "a : {this._input.LT(1).text===\"x\"}? ID INT {System.out.println(\"alt 1\");}\n" + - " | {this._input.LT(1).text===\"y\"}? ID INT {System.out.println(\"alt 2\");}\n" + + "a : {this._input.LT(1).getText().equals(\"x\")}? ID INT {System.out.println(\"alt 1\");}\n" + + " | {this._input.LT(1).getText().equals(\"y\")}? ID INT {System.out.println(\"alt 2\");}\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y 3 x 4", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y 3 x 4", true); assertEquals("alt 2\nalt 1\n", found); assertNull(this.stderrDuringParse); } @@ -171,7 +171,7 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x x y", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x x y", true); assertEquals("alt 2\nalt 2\nalt 2\n", found); assertNull(this.stderrDuringParse); } @@ -190,7 +190,7 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x 4", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x 4", true); assertEquals("alt 1\n", found); assertNull(this.stderrDuringParse); } @@ -198,10 +198,10 @@ public class TestSemPredEvalParser extends BaseTest { @Test public void testActionHidesPreds() throws Exception { String grammar = "grammar T;\n" + - "@members {this.i = 0;}\n" + + "@members {int i = 0;}\n" + "s : a+ ;\n" + - "a : {this.i = 1;} ID {this.i === 1}? {System.out.println(\"alt 1\");}\n" + - " | {this.i = 2;} ID {this.i === 2}? {System.out.println(\"alt 2\");}\n" + + "a : {this.i = 1;} ID {this.i == 1}? {System.out.println(\"alt 1\");}\n" + + " | {this.i = 2;} ID {this.i == 2}? {System.out.println(\"alt 2\");}\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -214,15 +214,16 @@ public class TestSemPredEvalParser extends BaseTest { @Test public void testToLeftWithVaryingPredicate() throws Exception { String grammar = "grammar T;\n" + - "@members {this.i=0}\n" + - "s : ({this.i += 1;\\nSystem.out.println(\"i=\" + this.i);} a)+ ;\n" + - "a : {this.i % m === 0}? ID {System.out.println(\"alt 1\");}\n" + - " | {this.i % m != 0}? ID {System.out.println(\"alt 2\");}\n" + + "@members {int i = 0;}\n" + + "s : ({this.i += 1;\n" + + " System.out.println(\"i=\" + this.i);} a)+ ;\n" + + "a : {this.i % 2 == 0}? ID {System.out.println(\"alt 1\");}\n" + + " | {this.i % 2 != 0}? ID {System.out.println(\"alt 2\");}\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x x y", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x x y", true); assertEquals("i=1\nalt 2\ni=2\nalt 1\ni=3\nalt 2\n", found); assertNull(this.stderrDuringParse); } @@ -230,16 +231,16 @@ public class TestSemPredEvalParser extends BaseTest { @Test public void testPredicateDependentOnArg() throws Exception { String grammar = "grammar T;\n" + - "@members {i=0}\n" + + "@members {int i = 0;}\n" + "s : a[2] a[1];\n" + - "\"a[int i]\" +\n" + - "\" : {$i===1}? ID {System.out.println(\"alt 1\");}\n" + - " | {$i===2}? ID {System.out.println(\"alt 2\");}\n" + + "a[int i]\n" + + " : {$i==1}? ID {System.out.println(\"alt 1\");}\n" + + " | {$i==2}? ID {System.out.println(\"alt 2\");}\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a b", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a b", true); assertEquals("alt 2\nalt 1\n", found); assertNull(this.stderrDuringParse); } @@ -247,16 +248,16 @@ public class TestSemPredEvalParser extends BaseTest { @Test public void testPredicateDependentOnArg2() throws Exception { String grammar = "grammar T;\n" + - "@members {i=0}\n" + + "@members {int i = 0;}\n" + "s : a[2] a[1];\n" + - "a[int i]\" +\n" + - " : {$i===1}? ID {System.out.println(\"alt 1\");}\n" + - " | {$i===2}? ID {System.out.println(\"alt 2\");}\n" + + "a[int i]\n" + + " : {$i==1}? ID \n" + + " | {$i==2}? ID \n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a b", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a b", true); assertEquals("", found); assertNull(this.stderrDuringParse); } @@ -266,14 +267,14 @@ public class TestSemPredEvalParser extends BaseTest { String grammar = "grammar T;\n" + "s : b[2] ';' | b[2] '.' ; // decision in s drills down to ctx-dependent pred in a;\n" + "b[int i] : a[i] ;\n" + - "a[int i]\" +\n" + - " : {$i===1}? ID {System.out.println(\"alt 1\");}\n" + - " | {$i===2}? ID {System.out.println(\"alt 2\");}\n" + + "a[int i]\n" + + " : {$i==1}? ID {System.out.println(\"alt 1\");}\n" + + " | {$i==2}? ID {System.out.println(\"alt 2\");}\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a;", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a;", true); assertEquals("alt 2\n", found); assertNull(this.stderrDuringParse); } @@ -290,7 +291,7 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a;", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a;", true); assertEquals("alt 2\n", found); assertNull(this.stderrDuringParse); } @@ -299,10 +300,10 @@ public class TestSemPredEvalParser extends BaseTest { public void testPredsInGlobalFOLLOW() throws Exception { String grammar = "grammar T;\n" + "@members {\n" + - "this.pred = function(v) {\n" + - " document.getElementById('output').value += 'eval=\" + v.toString() + '\\n';\n" + + "boolean pred(boolean v) {\n" + + " System.out.println(\"eval=\"+v);\n" + " return v;\n" + - "};\n" + + "}\n" + "}\n" + "s : e {this.pred(true)}? {System.out.println(\"parse\");} '!' ;\n" + "t : e {this.pred(false)}? ID ;\n" + @@ -310,7 +311,7 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", true); assertEquals("eval=true\nparse\n", found); assertNull(this.stderrDuringParse); } @@ -319,19 +320,19 @@ public class TestSemPredEvalParser extends BaseTest { public void testDepedentPredsInGlobalFOLLOW() throws Exception { String grammar = "grammar T;\n" + "@members {\n" + - "this.pred = function(v) {\n" + - " document.getElementById('output').value += 'eval=\" + v.toString() + '\\n';\n" + + "boolean pred(boolean v) {\n" + + " System.out.println(\"eval=\"+v);\n" + " return v;\n" + - "};\n" + + "}\n" + "}\n" + "s : a[99] ;\n" + - "a[int i] : e {this.pred($i===99)}? {System.out.println(\"parse\");} '!' ;\n" + - "b[int i] : e {this.pred($i===99)}? ID ;\n" + + "a[int i] : e {this.pred($i==99)}? {System.out.println(\"parse\");} '!' ;\n" + + "b[int i] : e {this.pred($i==99)}? ID ;\n" + "e : ID | ; // non-LL(1) so we use ATN\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", true); assertEquals("eval=true\nparse\n", found); assertNull(this.stderrDuringParse); } @@ -340,11 +341,10 @@ public class TestSemPredEvalParser extends BaseTest { public void testActionsHidePredsInGlobalFOLLOW() throws Exception { String grammar = "grammar T;\n" + "@members {\n" + - "this.p = function(v) {\n" + - "this.pred = function(v) {\n" + - " document.getElementById('output').value += 'eval=\" + v.toString() + '\\n';\n" + + "boolean pred(boolean v) {\n" + + " System.out.println(\"eval=\"+v);\n" + " return v;\n" + - "};\n" + + "}\n" + "}\n" + "s : e {} {this.pred(true)}? {System.out.println(\"parse\");} '!' ;\n" + "t : e {} {this.pred(false)}? ID ;\n" + @@ -352,21 +352,21 @@ public class TestSemPredEvalParser extends BaseTest { "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a!", true); assertEquals("eval=true\nparse\n", found); assertNull(this.stderrDuringParse); } String testPredTestedEvenWhenUnAmbig(String input) throws Exception { String grammar = "grammar T;\n" + - "@members {this.enumKeyword = true;}\n" + + "@members {boolean enumKeyword = true;}\n" + "primary\n" + " : ID {System.out.println(\"ID \"+$ID.text);}\n" + " | {!this.enumKeyword}? 'enum' {System.out.println(\"enum\");}\n" + " ;\n" + "ID : [a-z]+ ;\n" + "WS : [ \\t\\n\\r]+ -> skip ;"; - return execParser("T.g4", grammar, "TParser", "TLexer", "primary", input, false); + return execParser("T.g4", grammar, "TParser", "TLexer", "primary", input, true); } @Test @@ -390,7 +390,7 @@ public class TestSemPredEvalParser extends BaseTest { "content: anything | {false}? .;\n" + "anything: ANY_CHAR;\n" + "ANY_CHAR: [_a-zA-Z0-9];"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "cppCompilationUnit", "hello", false); + String found = execParser("T.g4", grammar, "TParser", "TLexer", "cppCompilationUnit", "hello", true); assertEquals("", found); assertNull(this.stderrDuringParse); } @@ -405,20 +405,20 @@ public class TestSemPredEvalParser extends BaseTest { "NL : '\\n' ;\n" + "s : 's' ;\n" + "X : 'x' ;"; - return execParser("T.g4", grammar, "TParser", "TLexer", "file_", input, false); + return execParser("T.g4", grammar, "TParser", "TLexer", "file_", input, true); } @Test public void testPredFromAltTestedInLoopBack_1() throws Exception { String found = testPredFromAltTestedInLoopBack("s\n\n\nx\n"); - assertEquals("(file_ (para (paraContent s) \n \n) (para (paraContent \n x \n)) )\n", found); + assertEquals("(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n", found); assertEquals("line 5:2 mismatched input '' expecting '\n'\n", this.stderrDuringParse); } @Test public void testPredFromAltTestedInLoopBack_2() throws Exception { String found = testPredFromAltTestedInLoopBack("s\n\n\nx\n\n"); - assertEquals("(file_ (para (paraContent s) \n \n) (para (paraContent \n x) \n \n) )\n", found); + assertEquals("(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x) \\n \\n) )\n", found); assertNull(this.stderrDuringParse); } diff --git a/tool/test/org/antlr/v4/test/tool/TestSemPredEvalParser.java b/tool/test/org/antlr/v4/test/tool/TestSemPredEvalParser.java deleted file mode 100644 index 1be6d0699..000000000 --- a/tool/test/org/antlr/v4/test/tool/TestSemPredEvalParser.java +++ /dev/null @@ -1,626 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.test.tool; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class TestSemPredEvalParser extends BaseTest { - // TEST VALIDATING PREDS - - @Test public void testSimpleValidate() throws Exception { - String grammar = - "grammar T;\n" + - "s : a ;\n" + - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - /*String found = */execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x", false); - - String expecting = "line 1:0 no viable alternative at input 'x'\n"; - assertEquals(expecting, stderrDuringParse); - } - - @Test public void testSimpleValidate2() throws Exception { - String grammar = - "grammar T;\n" + - "s : a a a;\n" + - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "3 4 x", false); - String expecting = - "alt 2\n" + - "alt 2\n"; - assertEquals(expecting, found); - - expecting = "line 1:4 no viable alternative at input 'x'\n"; - assertEquals(expecting, stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#196 - * "element+ in expression grammar doesn't parse properly" - * https://github.com/antlr/antlr4/issues/196 - */ - @Test public void testAtomWithClosureInTranslatedLRRule() throws Exception { - String grammar = - "grammar T;\n" + - "start : e[0] EOF;\n" + - "e[int _p]\n" + - " : ( 'a'\n" + - " | 'b'+\n" + - " )\n" + - " ( {3 >= $_p}? '+' e[4]\n" + - " )*\n" + - " ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", - "a+b+a", false); - String expecting = ""; - assertEquals(expecting, found); - assertNull(stderrDuringParse); - } - - @Test public void testValidateInDFA() throws Exception { - String grammar = - "grammar T;\n" + - "s : a ';' a;\n" + - // ';' helps us to resynchronize without consuming - // 2nd 'a' reference. We our testing that the DFA also - // throws an exception if the validating predicate fails - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x ; y", false); - String expecting = ""; - assertEquals(expecting, found); - - expecting = - "line 1:0 no viable alternative at input 'x'\n" + - "line 1:4 no viable alternative at input 'y'\n"; - assertEquals(expecting, stderrDuringParse); - } - - // TEST DISAMBIG PREDS - - @Test public void testSimple() throws Exception { - String grammar = - "grammar T;\n" + - "s : a a a;\n" + // do 3x: once in ATN, next in DFA then INT in ATN - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? ID {System.out.println(\"alt 2\");}\n" + - " | INT {System.out.println(\"alt 3\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x y 3", false); - String expecting = - "alt 2\n" + - "alt 2\n" + - "alt 3\n"; - assertEquals(expecting, found); - } - - @Test public void testOrder() throws Exception { - // Under new predicate ordering rules (see antlr/antlr4#29), the first - // alt with an acceptable config (unpredicated, or predicated and evaluates - // to true) is chosen. - String grammar = - "grammar T;\n" + - "s : a {} a;\n" + // do 2x: once in ATN, next in DFA; - // action blocks lookahead from falling off of 'a' - // and looking into 2nd 'a' ref. !ctx dependent pred - "a : ID {System.out.println(\"alt 1\");}\n" + - " | {true}? ID {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x y", false); - String expecting = - "alt 1\n" + - "alt 1\n"; - assertEquals(expecting, found); - } - - @Test public void test2UnpredicatedAlts() throws Exception { - // We have n-2 predicates for n alternatives. pick first alt - String grammar = - "grammar T;\n" + - "@header {" + - "import java.util.*;" + - "}" + - "s : {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " a ';' a;\n" + // do 2x: once in ATN, next in DFA - "a : ID {System.out.println(\"alt 1\");}\n" + - " | ID {System.out.println(\"alt 2\");}\n" + - " | {false}? ID {System.out.println(\"alt 3\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x; y", true); - String expecting = - "alt 1\n" + - "alt 1\n"; - assertEquals(expecting, found); - assertEquals("line 1:0 reportAttemptingFullContext d=0 (a), input='x'\n" + - "line 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x'\n" + - "line 1:3 reportAttemptingFullContext d=0 (a), input='y'\n" + - "line 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y'\n", - this.stderrDuringParse); - } - - @Test public void test2UnpredicatedAltsAndOneOrthogonalAlt() throws Exception { - String grammar = - "grammar T;\n" + - "@header {" + - "import java.util.*;" + - "}" + - "s : {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " a ';' a ';' a;\n" + - "a : INT {System.out.println(\"alt 1\");}\n" + - " | ID {System.out.println(\"alt 2\");}\n" + // must pick this one for ID since pred is false - " | ID {System.out.println(\"alt 3\");}\n" + - " | {false}? ID {System.out.println(\"alt 4\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "34; x; y", true); - String expecting = - "alt 1\n" + - "alt 2\n" + - "alt 2\n"; - assertEquals(expecting, found); - assertEquals("line 1:4 reportAttemptingFullContext d=0 (a), input='x'\n" + - "line 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x'\n" + - "line 1:7 reportAttemptingFullContext d=0 (a), input='y'\n" + - "line 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y'\n", - this.stderrDuringParse); - } - - @Test public void testRewindBeforePredEval() throws Exception { - // The parser consumes ID and moves to the 2nd token INT. - // To properly evaluate the predicates after matching ID INT, - // we must correctly see come back to starting index so LT(1) works - String grammar = - "grammar T;\n" + - "s : a a;\n" + - "a : {_input.LT(1).getText().equals(\"x\")}? ID INT {System.out.println(\"alt 1\");}\n" + - " | {_input.LT(1).getText().equals(\"y\")}? ID INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "y 3 x 4", false); - String expecting = - "alt 2\n" + - "alt 1\n"; - assertEquals(expecting, found); - } - - @Test public void testNoTruePredsThrowsNoViableAlt() throws Exception { - // checks that we throw exception if all alts - // are covered with a predicate and none succeeds - String grammar = - "grammar T;\n" + - "s : a a;\n" + - "a : {false}? ID INT {System.out.println(\"alt 1\");}\n" + - " | {false}? ID INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - execParser("T.g4", grammar, "TParser", "TLexer", "s", - "y 3 x 4", false); - String expecting = "line 1:0 no viable alternative at input 'y'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testToLeft() throws Exception { - String grammar = - "grammar T;\n" + - "s : a+ ;\n" + - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? ID {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x x y", false); - String expecting = - "alt 2\n" + - "alt 2\n" + - "alt 2\n"; - assertEquals(expecting, found); - } - - @Test - public void testUnpredicatedPathsInAlt() throws Exception{ - String grammar = - "grammar T;\n" + - "s : a {System.out.println(\"alt 1\");}\n" + - " | b {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "a : {false}? ID INT\n" + - " | ID INT\n" + - " ;\n" + - "b : ID ID\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x 4", false); - String expecting = - "alt 1\n"; - assertEquals(expecting, found); - - expecting = null; - assertEquals(expecting, stderrDuringParse); - } - - @Test public void testActionHidesPreds() throws Exception { - // can't see preds, resolves to first alt found (1 in this case) - String grammar = - "grammar T;\n" + - "@parser::members {int i;}\n" + - "s : a+ ;\n" + - "a : {i=1;} ID {i==1}? {System.out.println(\"alt 1\");}\n" + - " | {i=2;} ID {i==2}? {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x x y", false); - String expecting = - "alt 1\n" + - "alt 1\n" + - "alt 1\n"; - assertEquals(expecting, found); - } - - /** In this case, we use predicates that depend on global information - * like we would do for a symbol table. We simply execute - * the predicates assuming that all necessary information is available. - * The i++ action is done outside of the prediction and so it is executed. - */ - @Test public void testToLeftWithVaryingPredicate() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {int i=0;}\n" + - "s : ({i++; System.out.println(\"i=\"+i);} a)+ ;\n" + - "a : {i % 2 == 0}? ID {System.out.println(\"alt 1\");}\n" + - " | {i % 2 != 0}? ID {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x x y", false); - String expecting = - "i=1\n" + - "alt 2\n" + - "i=2\n" + - "alt 1\n" + - "i=3\n" + - "alt 2\n"; - assertEquals(expecting, found); - } - - /** - * In this case, we're passing a parameter into a rule that uses that - * information to predict the alternatives. This is the special case - * where we know exactly which context we are in. The context stack - * is empty and we have not dipped into the outer context to make a decision. - */ - @Test public void testPredicateDependentOnArg() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {int i=0;}\n" + - "s : a[2] a[1];\n" + - "a[int i]" + - " : {$i==1}? ID {System.out.println(\"alt 1\");}\n" + - " | {$i==2}? ID {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a b", false); - String expecting = - "alt 2\n" + - "alt 1\n"; - assertEquals(expecting, found); - } - - /** In this case, we have to ensure that the predicates are not - tested during the closure after recognizing the 1st ID. The - closure will fall off the end of 'a' 1st time and reach into the - a[1] rule invocation. It should not execute predicates because it - does not know what the parameter is. The context stack will not - be empty and so they should be ignored. It will not affect - recognition, however. We are really making sure the ATN - simulation doesn't crash with context object issues when it - encounters preds during FOLLOW. - */ - @Test public void testPredicateDependentOnArg2() throws Exception { - String grammar = - "grammar T;\n" + - "s : a[2] a[1];\n" + - "a[int i]" + - " : {$i==1}? ID\n" + - " | {$i==2}? ID\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a b", false); - String expecting = - ""; - assertEquals(expecting, found); - } - - @Test public void testDependentPredNotInOuterCtxShouldBeIgnored() throws Exception { - // uses ID ';' or ID '.' lookahead to solve s. preds not tested. - String grammar = - "grammar T;\n" + - "s : b[2] ';' | b[2] '.' ;\n" + // decision in s drills down to ctx-dependent pred in a; - "b[int i] : a[i] ;\n" + - "a[int i]" + - " : {$i==1}? ID {System.out.println(\"alt 1\");}\n" + - " | {$i==2}? ID {System.out.println(\"alt 2\");}\n" + - " ;" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a;", false); - String expecting = - "alt 2\n"; - assertEquals(expecting, found); - } - - @Test public void testIndependentPredNotPassedOuterCtxToAvoidCastException() throws Exception { - String grammar = - "grammar T;\n" + - "s : b ';' | b '.' ;\n" + - "b : a ;\n" + - "a" + - " : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? ID {System.out.println(\"alt 2\");}\n" + - " ;" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a;", false); - String expecting = - "alt 2\n"; - assertEquals(expecting, found); - } - - /** During a global follow operation, we still collect semantic - * predicates as long as they are not dependent on local context - */ - @Test public void testPredsInGlobalFOLLOW() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {" + - "void f(Object s) {System.out.println(s);}\n" + - "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + - "}\n" + - "s : e {p(true)}? {f(\"parse\");} '!' ;\n" + - "t : e {p(false)}? ID ;\n" + - "e : ID | ;\n" + // non-LL(1) so we use ATN - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a!", false); - String expecting = - "eval=true\n" + // now we are parsing - "parse\n"; - assertEquals(expecting, found); - } - - /** We cannot collect predicates that are dependent on local context if - * we are doing a global follow. They appear as if they were not there at all. - */ - @Test public void testDepedentPredsInGlobalFOLLOW() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {" + - "void f(Object s) {System.out.println(s);}\n" + - "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + - "}\n" + - "s : a[99] ;\n" + - "a[int i] : e {p($i==99)}? {f(\"parse\");} '!' ;\n" + - "b[int i] : e {p($i==99)}? ID ;\n" + - "e : ID | ;\n" + // non-LL(1) so we use ATN - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a!", false); - String expecting = - "eval=true\n" + - "parse\n"; - assertEquals(expecting, found); - } - - /** Regular non-forced actions can create side effects used by semantic - * predicates and so we cannot evaluate any semantic predicate - * encountered after having seen a regular action. This includes - * during global follow operations. - */ - @Test public void testActionsHidePredsInGlobalFOLLOW() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {" + - "void f(Object s) {System.out.println(s);}\n" + - "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + - "}\n" + - "s : e {} {p(true)}? {f(\"parse\");} '!' ;\n" + - "t : e {} {p(false)}? ID ;\n" + - "e : ID | ;\n" + // non-LL(1) so we use ATN - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a!", false); - String expecting = - "eval=true\n" + - "parse\n"; - assertEquals(expecting, found); - } - - @Test public void testPredTestedEvenWhenUnAmbig() throws Exception { - String grammar = - "grammar T;\n" + - "\n" + - "@parser::members {boolean enumKeyword = true;}\n" + - "\n" + - "primary\n" + - " : ID {System.out.println(\"ID \"+$ID.text);}\n" + - " | {!enumKeyword}? 'enum' {System.out.println(\"enum\");}\n" + - " ;\n" + - "\n" + - "ID : [a-z]+ ;\n" + - "\n" + - "WS : [ \\t\\n\\r]+ -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "primary", - "abc", false); - assertEquals("ID abc\n", found); - - execParser("T.g4", grammar, "TParser", "TLexer", "primary", - "enum", false); - assertEquals("line 1:0 no viable alternative at input 'enum'\n", stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#218 "ANTLR4 EOF Related Bug". - * https://github.com/antlr/antlr4/issues/218 - */ - @Test public void testDisabledAlternative() { - String grammar = - "grammar AnnotProcessor;\n" + - "\n" + - "cppCompilationUnit : content+ EOF;\n" + - "\n" + - "content: anything | {false}? .;\n" + - "\n" + - "anything: ANY_CHAR;\n" + - "\n" + - "ANY_CHAR: [_a-zA-Z0-9];\n"; - - String input = "hello"; - String found = execParser("AnnotProcessor.g4", grammar, "AnnotProcessorParser", "AnnotProcessorLexer", "cppCompilationUnit", - input, false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** Loopback doesn't eval predicate at start of alt */ - @Test public void testPredFromAltTestedInLoopBack() { - String grammar = - "grammar T2;\n" + - "\n" + - "file\n" + - "@after {System.out.println($ctx.toStringTree(this));}\n" + - " : para para EOF ;" + - "para: paraContent NL NL ;\n"+ - "paraContent : ('s'|'x'|{_input.LA(2)!=NL}? NL)+ ;\n"+ - "NL : '\\n' ;\n"+ - "S : 's' ;\n"+ - "X : 'x' ;\n"; - - String input = "s\n\n\nx\n"; - String found = execParser("T2.g4", grammar, "T2Parser", "T2Lexer", "file", - input, true); - assertEquals("(file (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n", found); - assertEquals(stderrDuringParse, "line 5:2 mismatched input '' expecting '\n'\n"); - - input = "s\n\n\nx\n\n"; - found = execParser("T2.g4", grammar, "T2Parser", "T2Lexer", "file", - input, true); - assertEquals("(file (para (paraContent s) \\n \\n) (para (paraContent \\n x) \\n \\n) )\n", found); - - assertNull(stderrDuringParse); - } -} From 129cbbbeced3c8f2d9bb73875c61514e33e22c5f Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 25 Oct 2014 04:07:49 +0800 Subject: [PATCH 22/26] java tests validated --- .../org/antlr/v4/test/rt/gen/Generator.java | 12 +- .../FullContextIF_THEN_ELSEParse.st | 2 +- .../test/rt/java/TestFullContextParsing.java | 8 +- .../v4/test/tool/TestATNConstruction.java | 18 +- .../v4/test/tool/TestActionTranslation.java | 1 + .../v4/test/tool/TestCodeGeneration.java | 2 +- .../v4/test/tool/TestCommonTokenStream.java | 4 +- .../v4/test/tool/TestParserProfiler.java | 1 + .../antlr/v4/test/tool/TestPerformance.java | 3 +- .../antlr/v4/test/tool/TestScopeParsing.java | 1 - .../test/tool/TestUnbufferedCharStream.java | 1 + .../test/tool/TestUnbufferedTokenStream.java | 1 + tool/test/org/antlr/v4/xtest/BaseTest.java | 1339 ----------- tool/test/org/antlr/v4/xtest/ErrorQueue.java | 108 - tool/test/org/antlr/v4/xtest/Java-LR.g4 | 1248 ---------- tool/test/org/antlr/v4/xtest/Java.g4 | 1332 ----------- .../v4/xtest/JavaUnicodeInputStream.java | 267 --- .../v4/xtest/ParserInterpreterForTesting.java | 132 -- .../antlr/v4/xtest/PositionAdjustingLexer.g4 | 141 -- tool/test/org/antlr/v4/xtest/Psl.g4 | 348 --- .../org/antlr/v4/xtest/TestASTStructure.gunit | 155 -- .../org/antlr/v4/xtest/TestASTStructure.java | 406 ---- .../antlr/v4/xtest/TestATNConstruction.java | 981 -------- .../v4/xtest/TestATNDeserialization.java | 189 -- .../antlr/v4/xtest/TestATNInterpreter.java | 409 ---- .../v4/xtest/TestATNLexerInterpreter.java | 325 --- .../v4/xtest/TestATNParserPrediction.java | 531 ----- .../antlr/v4/xtest/TestATNSerialization.java | 737 ------ .../antlr/v4/xtest/TestActionSplitter.java | 82 - .../antlr/v4/xtest/TestActionTranslation.java | 424 ---- .../antlr/v4/xtest/TestAttributeChecks.java | 273 --- .../v4/xtest/TestBasicSemanticErrors.java | 117 - .../v4/xtest/TestBufferedTokenStream.java | 180 -- .../antlr/v4/xtest/TestCodeGeneration.java | 162 -- .../antlr/v4/xtest/TestCommonTokenStream.java | 309 --- .../antlr/v4/xtest/TestCompositeGrammars.java | 820 ------- .../org/antlr/v4/xtest/TestFastQueue.java | 134 -- .../v4/xtest/TestFullContextParsing.java | 356 --- .../org/antlr/v4/xtest/TestGraphNodes.java | 906 -------- .../org/antlr/v4/xtest/TestIntervalSet.java | 453 ---- .../org/antlr/v4/xtest/TestLeftRecursion.java | 732 ------ .../org/antlr/v4/xtest/TestLexerActions.java | 283 --- .../org/antlr/v4/xtest/TestLexerErrors.java | 213 -- .../org/antlr/v4/xtest/TestLexerExec.java | 690 ------ .../org/antlr/v4/xtest/TestListeners.java | 226 -- .../org/antlr/v4/xtest/TestParseErrors.java | 376 --- .../antlr/v4/xtest/TestParseTreeMatcher.java | 464 ---- .../org/antlr/v4/xtest/TestParseTrees.java | 154 -- .../org/antlr/v4/xtest/TestParserExec.java | 597 ----- .../antlr/v4/xtest/TestParserInterpreter.java | 235 -- .../antlr/v4/xtest/TestParserProfiler.java | 280 --- .../org/antlr/v4/xtest/TestPerformance.java | 2031 ----------------- .../org/antlr/v4/xtest/TestScopeParsing.java | 68 - .../antlr/v4/xtest/TestSemPredEvalLexer.java | 183 -- .../antlr/v4/xtest/TestSemPredEvalParser.java | 626 ----- tool/test/org/antlr/v4/xtest/TestSets.java | 283 --- .../org/antlr/v4/xtest/TestSymbolIssues.java | 171 -- .../v4/xtest/TestTokenPositionOptions.java | 179 -- .../v4/xtest/TestTokenStreamRewriter.java | 884 ------- .../v4/xtest/TestTokenTypeAssignment.java | 214 -- .../antlr/v4/xtest/TestToolSyntaxErrors.java | 656 ------ .../antlr/v4/xtest/TestTopologicalSort.java | 117 - .../v4/xtest/TestUnbufferedCharStream.java | 367 --- .../v4/xtest/TestUnbufferedTokenStream.java | 223 -- .../org/antlr/v4/xtest/TestVocabulary.java | 79 - tool/test/org/antlr/v4/xtest/TestXPath.java | 228 -- 66 files changed, 29 insertions(+), 23448 deletions(-) delete mode 100644 tool/test/org/antlr/v4/xtest/BaseTest.java delete mode 100644 tool/test/org/antlr/v4/xtest/ErrorQueue.java delete mode 100644 tool/test/org/antlr/v4/xtest/Java-LR.g4 delete mode 100644 tool/test/org/antlr/v4/xtest/Java.g4 delete mode 100644 tool/test/org/antlr/v4/xtest/JavaUnicodeInputStream.java delete mode 100644 tool/test/org/antlr/v4/xtest/ParserInterpreterForTesting.java delete mode 100644 tool/test/org/antlr/v4/xtest/PositionAdjustingLexer.g4 delete mode 100644 tool/test/org/antlr/v4/xtest/Psl.g4 delete mode 100644 tool/test/org/antlr/v4/xtest/TestASTStructure.gunit delete mode 100644 tool/test/org/antlr/v4/xtest/TestASTStructure.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestATNConstruction.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestATNDeserialization.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestATNInterpreter.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestATNLexerInterpreter.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestATNParserPrediction.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestATNSerialization.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestActionSplitter.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestActionTranslation.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestAttributeChecks.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestBasicSemanticErrors.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestBufferedTokenStream.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestCodeGeneration.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestCommonTokenStream.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestCompositeGrammars.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestFastQueue.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestFullContextParsing.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestGraphNodes.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestIntervalSet.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestLeftRecursion.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestLexerActions.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestLexerErrors.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestLexerExec.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestListeners.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestParseErrors.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestParseTreeMatcher.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestParseTrees.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestParserExec.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestParserInterpreter.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestParserProfiler.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestPerformance.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestScopeParsing.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestSemPredEvalLexer.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestSemPredEvalParser.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestSets.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestSymbolIssues.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestTokenPositionOptions.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestTokenStreamRewriter.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestTokenTypeAssignment.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestToolSyntaxErrors.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestTopologicalSort.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestUnbufferedCharStream.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestUnbufferedTokenStream.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestVocabulary.java delete mode 100644 tool/test/org/antlr/v4/xtest/TestXPath.java diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index 16f7e0669..b6e062876 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -975,8 +975,8 @@ public class Generator { "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", "{ if x then if y then return else foo }", "Decision 1:\n" + - "s0-'else'->:s1^=>1\n" + - "s0-'}'->:s2=>2\n", + "s0-'}'->:s2=>2\n" + + "s0-'else'->:s1^=>1\n", "line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + "line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", // should not be ambiguous because the second 'else bar' clearly @@ -992,8 +992,8 @@ public class Generator { "{ if x then return else foo\n" + "if x then if y then return else foo }", "Decision 1:\n" + - "s0-'else'->:s1^=>1\n" + - "s0-'}'->:s2=>2\n", + "s0-'}'->:s2=>2\n" + + "s0-'else'->:s1^=>1\n", "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + @@ -1001,8 +1001,8 @@ public class Generator { "{ if x then return else foo\n" + "if x then if y then return else foo }", "Decision 1:\n" + - "s0-'else'->:s1^=>1\n" + - "s0-'}'->:s2=>2\n", + "s0-'}'->:s2=>2\n" + + "s0-'else'->:s1^=>1\n", "line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st index a7b8047fa..1dade2a7e 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/FullContextParsing/FullContextIF_THEN_ELSEParse.st @@ -4,7 +4,7 @@ s @after {} : '{' stat* '}' ; stat: 'if' ID 'then' stat ('else' ID)? - | 'return + | 'return' ; ID : 'a'..'z'+ ; WS : (' '|'\t'|'\n')+ -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java b/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java index ab232c31b..6467a0a17 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestFullContextParsing.java @@ -83,7 +83,7 @@ public class TestFullContextParsing extends BaseTest { "@after {this.dumpDFA();}\n" + " : '{' stat* '}' ;\n" + "stat: 'if' ID 'then' stat ('else' ID)?\n" + - " | 'return\n" + + " | 'return'\n" + " ;\n" + "ID : 'a'..'z'+ ;\n" + "WS : (' '|'\\t'|'\\n')+ -> skip ;"; @@ -107,7 +107,7 @@ public class TestFullContextParsing extends BaseTest { @Test public void testFullContextIF_THEN_ELSEParse_3() throws Exception { String found = testFullContextIF_THEN_ELSEParse("{ if x then if y then return else foo }"); - assertEquals("Decision 1:\ns0-'else'->:s1^=>1\ns0-'}'->:s2=>2\n", found); + assertEquals("Decision 1:\ns0-'}'->:s2=>2\ns0-'else'->:s1^=>1\n", found); assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse); } @@ -121,14 +121,14 @@ public class TestFullContextParsing extends BaseTest { @Test public void testFullContextIF_THEN_ELSEParse_5() throws Exception { String found = testFullContextIF_THEN_ELSEParse("{ if x then return else foo\nif x then if y then return else foo }"); - assertEquals("Decision 1:\ns0-'else'->:s1^=>1\ns0-'}'->:s2=>2\n", found); + assertEquals("Decision 1:\ns0-'}'->:s2=>2\ns0-'else'->:s1^=>1\n", found); assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:19 reportContextSensitivity d=1 (stat), input='else'\nline 2:27 reportAttemptingFullContext d=1 (stat), input='else'\nline 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse); } @Test public void testFullContextIF_THEN_ELSEParse_6() throws Exception { String found = testFullContextIF_THEN_ELSEParse("{ if x then return else foo\nif x then if y then return else foo }"); - assertEquals("Decision 1:\ns0-'else'->:s1^=>1\ns0-'}'->:s2=>2\n", found); + assertEquals("Decision 1:\ns0-'}'->:s2=>2\ns0-'else'->:s1^=>1\n", found); assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\nline 1:19 reportContextSensitivity d=1 (stat), input='else'\nline 2:27 reportAttemptingFullContext d=1 (stat), input='else'\nline 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", this.stderrDuringParse); } diff --git a/tool/test/org/antlr/v4/test/tool/TestATNConstruction.java b/tool/test/org/antlr/v4/test/tool/TestATNConstruction.java index 3afc2edf0..1affc18b5 100644 --- a/tool/test/org/antlr/v4/test/tool/TestATNConstruction.java +++ b/tool/test/org/antlr/v4/test/tool/TestATNConstruction.java @@ -29,6 +29,14 @@ */ package org.antlr.v4.test.tool; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + import org.antlr.v4.Tool; import org.antlr.v4.automata.ATNPrinter; import org.antlr.v4.automata.LexerATNFactory; @@ -44,16 +52,6 @@ import org.antlr.v4.tool.ast.GrammarRootAST; import org.antlr.v4.tool.ast.RuleAST; import org.junit.Test; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - public class TestATNConstruction extends BaseTest { @Test public void testA() throws Exception { diff --git a/tool/test/org/antlr/v4/test/tool/TestActionTranslation.java b/tool/test/org/antlr/v4/test/tool/TestActionTranslation.java index ed7542706..847cf515c 100644 --- a/tool/test/org/antlr/v4/test/tool/TestActionTranslation.java +++ b/tool/test/org/antlr/v4/test/tool/TestActionTranslation.java @@ -33,6 +33,7 @@ package org.antlr.v4.test.tool; import org.junit.Test; /** */ +@SuppressWarnings("unused") public class TestActionTranslation extends BaseTest { String attributeTemplate = "attributeTemplate(members,init,inline,finally,inline2) ::= <<\n" + diff --git a/tool/test/org/antlr/v4/test/tool/TestCodeGeneration.java b/tool/test/org/antlr/v4/test/tool/TestCodeGeneration.java index 32f76266c..d5443110e 100644 --- a/tool/test/org/antlr/v4/test/tool/TestCodeGeneration.java +++ b/tool/test/org/antlr/v4/test/tool/TestCodeGeneration.java @@ -56,7 +56,7 @@ import static org.junit.Assert.assertFalse; public class TestCodeGeneration extends BaseTest { @Test public void testArgDecl() throws Exception { // should use template not string - ErrorQueue equeue = new ErrorQueue(); + /*ErrorQueue equeue = */new ErrorQueue(); String g = "grammar T;\n" + "a[int xyz] : 'a' ;\n"; diff --git a/tool/test/org/antlr/v4/test/tool/TestCommonTokenStream.java b/tool/test/org/antlr/v4/test/tool/TestCommonTokenStream.java index 02f3688ee..ee28804fe 100644 --- a/tool/test/org/antlr/v4/test/tool/TestCommonTokenStream.java +++ b/tool/test/org/antlr/v4/test/tool/TestCommonTokenStream.java @@ -55,7 +55,8 @@ public class TestCommonTokenStream extends TestBufferedTokenStream { TokenSource lexer = // simulate input " x =34 ;\n" new TokenSource() { int i = 0; - WritableToken[] tokens = { + @SuppressWarnings("serial") + WritableToken[] tokens = { new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, new CommonToken(1,"x"), new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, @@ -125,6 +126,7 @@ public class TestCommonTokenStream extends TestBufferedTokenStream { // token indexes 01234 56789 new TokenSource() { int i = 0; + @SuppressWarnings("serial") WritableToken[] tokens = { new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 0 new CommonToken(1,"x"), // 1 diff --git a/tool/test/org/antlr/v4/test/tool/TestParserProfiler.java b/tool/test/org/antlr/v4/test/tool/TestParserProfiler.java index 09d9e4a12..f5068d7c7 100644 --- a/tool/test/org/antlr/v4/test/tool/TestParserProfiler.java +++ b/tool/test/org/antlr/v4/test/tool/TestParserProfiler.java @@ -46,6 +46,7 @@ import java.util.Arrays; import static org.junit.Assert.assertEquals; +@SuppressWarnings("unused") public class TestParserProfiler extends BaseTest { LexerGrammar lg; diff --git a/tool/test/org/antlr/v4/test/tool/TestPerformance.java b/tool/test/org/antlr/v4/test/tool/TestPerformance.java index d87df1095..8b4f0a38b 100644 --- a/tool/test/org/antlr/v4/test/tool/TestPerformance.java +++ b/tool/test/org/antlr/v4/test/tool/TestPerformance.java @@ -107,6 +107,7 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; +@SuppressWarnings("unused") public class TestPerformance extends BaseTest { /** * Parse all java files under this package within the JDK_SOURCE_ROOT @@ -790,7 +791,6 @@ public class TestPerformance extends BaseTest { int configOutputSize = 0; - @SuppressWarnings("unused") protected void parseSources(final int currentPass, final ParserFactory factory, Collection sources, boolean shuffleSources) throws InterruptedException { if (shuffleSources) { List sourcesList = new ArrayList(sources); @@ -1168,6 +1168,7 @@ public class TestPerformance extends BaseTest { parserCtor.newInstance(new CommonTokenStream(tokenSource)); return new ParserFactory() { + @Override public FileParseResult parseFile(CharStream input, int currentPass, int thread) { final Checksum checksum = new CRC32(); diff --git a/tool/test/org/antlr/v4/test/tool/TestScopeParsing.java b/tool/test/org/antlr/v4/test/tool/TestScopeParsing.java index f284cecfa..814add063 100644 --- a/tool/test/org/antlr/v4/test/tool/TestScopeParsing.java +++ b/tool/test/org/antlr/v4/test/tool/TestScopeParsing.java @@ -31,7 +31,6 @@ package org.antlr.v4.test.tool; import org.antlr.v4.parse.ScopeParser; -import org.antlr.v4.tool.ErrorManager; import org.antlr.v4.tool.Grammar; import org.junit.Test; diff --git a/tool/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java b/tool/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java index f23ab3316..4eea763ec 100644 --- a/tool/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java +++ b/tool/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java @@ -45,6 +45,7 @@ import java.io.StringReader; import static org.junit.Assert.assertEquals; +@SuppressWarnings("unused") public class TestUnbufferedCharStream extends BaseTest { @Test public void testNoChar() throws Exception { CharStream input = createStream(""); diff --git a/tool/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java b/tool/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java index 7fb3e773d..1dea48c61 100644 --- a/tool/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java +++ b/tool/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java @@ -47,6 +47,7 @@ import java.util.List; import static org.junit.Assert.assertEquals; +@SuppressWarnings("unused") public class TestUnbufferedTokenStream extends BaseTest { @Test public void testLookahead() throws Exception { LexerGrammar g = new LexerGrammar( diff --git a/tool/test/org/antlr/v4/xtest/BaseTest.java b/tool/test/org/antlr/v4/xtest/BaseTest.java deleted file mode 100644 index 6db87bea7..000000000 --- a/tool/test/org/antlr/v4/xtest/BaseTest.java +++ /dev/null @@ -1,1339 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.DecisionState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.runtime.misc.NotNull; -import org.antlr.v4.runtime.misc.Nullable; -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.DefaultToolListener; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.junit.Before; -import org.junit.rules.TestRule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import javax.tools.JavaCompiler; -import javax.tools.JavaFileObject; -import javax.tools.StandardJavaFileManager; -import javax.tools.ToolProvider; -import java.io.BufferedReader; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.PipedInputStream; -import java.io.PipedOutputStream; -import java.io.PrintStream; -import java.io.StringReader; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.MalformedURLException; -import java.net.URL; -import java.net.URLClassLoader; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.logging.Level; -import java.util.logging.Logger; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public abstract class BaseTest { - // -J-Dorg.antlr.v4.test.BaseTest.level=FINE - private static final Logger LOGGER = Logger.getLogger(BaseTest.class.getName()); - - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - public static final boolean TEST_IN_SAME_PROCESS = Boolean.parseBoolean(System.getProperty("antlr.testinprocess")); - - /** - * Build up the full classpath we need, including the surefire path (if present) - */ - public static final String CLASSPATH = System.getProperty("java.class.path"); - - public String tmpdir = null; - - /** If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - @org.junit.Rule - public final TestRule testWatcher = new TestWatcher() { - - @Override - protected void succeeded(Description description) { - // remove tmpdir if no error. - eraseTempDir(); - } - - }; - - @Before - public void setUp() throws Exception { - // new output dir for each test - tmpdir = new File(System.getProperty("java.io.tmpdir"), - getClass().getSimpleName()+"-"+System.currentTimeMillis()).getAbsolutePath(); -// tmpdir = "/tmp"; - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(new String[] {"-o", tmpdir}); - return tool; - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if ( g.atn==null ) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if ( g.isLexer() ) { - f = new LexerATNFactory((LexerGrammar)g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - public DFA createDFA(Grammar g, DecisionState s) { -// PredictionDFAFactory conv = new PredictionDFAFactory(g, s); -// DFA dfa = conv.createDFA(); -// conv.issueAmbiguityWarnings(); -// System.out.print("DFA="+dfa); -// return dfa; - return null; - } - -// public void minimizeDFA(DFA dfa) { -// DFAMinimizer dmin = new DFAMinimizer(dfa); -// dfa.minimized = dmin.minimize(); -// } - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if ( expecting!=null && !expecting.trim().isEmpty() ) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while ( ttype!= Token.EOF ); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, - ATN atn, - CharStream input) - { - LexerATNSimulator interp = new LexerATNSimulator(atn,new DFA[] { new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if ( hitEOF ) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if ( ttype == Token.EOF ) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if ( t==IntStream.EOF ) { - hitEOF = true; - } - } while ( ttype!=Token.EOF ); - return tokenTypes; - } - - List checkRuleDFA(String gtext, String ruleName, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - ATN atn = createATN(g, false); - ATNState s = atn.ruleToStartState[g.getRule(ruleName).index]; - if ( s==null ) { - System.err.println("no such rule: "+ruleName); - return null; - } - ATNState t = s.transition(0).target; - if ( !(t instanceof DecisionState) ) { - System.out.println(ruleName+" has no decision"); - return null; - } - DecisionState blk = (DecisionState)t; - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - List checkRuleDFA(String gtext, int decision, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(gtext, equeue); - ATN atn = createATN(g, false); - DecisionState blk = atn.decisionToState.get(decision); - checkRuleDFA(g, blk, expecting); - return equeue.all; - } - - void checkRuleDFA(Grammar g, DecisionState blk, String expecting) - throws Exception - { - DFA dfa = createDFA(g, blk); - String result = null; - if ( dfa!=null ) result = dfa.toString(); - assertEquals(expecting, result); - } - - List checkLexerDFA(String gtext, String expecting) - throws Exception - { - return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting); - } - - List checkLexerDFA(String gtext, String modeName, String expecting) - throws Exception - { - ErrorQueue equeue = new ErrorQueue(); - LexerGrammar g = new LexerGrammar(gtext, equeue); - g.atn = createATN(g, false); -// LexerATNToDFAConverter conv = new LexerATNToDFAConverter(g); -// DFA dfa = conv.createDFA(modeName); -// g.setLookaheadDFA(0, dfa); // only one decision to worry about -// -// String result = null; -// if ( dfa!=null ) result = dfa.toString(); -// assertEquals(expecting, result); -// -// return equeue.all; - return null; - } - - protected String load(String fileName, @Nullable String encoding) - throws IOException - { - if ( fileName==null ) { - return null; - } - - String fullFileName = getClass().getPackage().getName().replace('.', '/') + '/' + fileName; - int size = 65000; - InputStreamReader isr; - InputStream fis = getClass().getClassLoader().getResourceAsStream(fullFileName); - if ( encoding!=null ) { - isr = new InputStreamReader(fis, encoding); - } - else { - isr = new InputStreamReader(fis); - } - try { - char[] data = new char[size]; - int n = isr.read(data); - return new String(data, 0, n); - } - finally { - isr.close(); - } - } - - /** Wow! much faster than compiling outside of VM. Finicky though. - * Had rules called r and modulo. Wouldn't compile til I changed to 'a'. - */ - protected boolean compile(String... fileNames) { - List files = new ArrayList(); - for (String fileName : fileNames) { - File f = new File(tmpdir, fileName); - files.add(f); - } - - JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); -// DiagnosticCollector diagnostics = -// new DiagnosticCollector(); - - StandardJavaFileManager fileManager = - compiler.getStandardFileManager(null, null, null); - - Iterable compilationUnits = - fileManager.getJavaFileObjectsFromFiles(files); - - Iterable compileOptions = - Arrays.asList("-g", "-source", "1.6", "-target", "1.6", "-implicit:class", "-Xlint:-options", "-d", tmpdir, "-cp", tmpdir+pathSep+CLASSPATH); - - JavaCompiler.CompilationTask task = - compiler.getTask(null, fileManager, null, compileOptions, null, - compilationUnits); - boolean ok = task.call(); - - try { - fileManager.close(); - } - catch (IOException ioe) { - ioe.printStackTrace(System.err); - } - -// List errors = new ArrayList(); -// for (Diagnostic diagnostic : diagnostics.getDiagnostics()) { -// errors.add( -// String.valueOf(diagnostic.getLineNumber())+ -// ": " + diagnostic.getMessage(null)); -// } -// if ( errors.size()>0 ) { -// System.err.println("compile stderr from: "+cmdLine); -// System.err.println(errors); -// return false; -// } - return ok; - - /* - File outputDir = new File(tmpdir); - try { - Process process = - Runtime.getRuntime().exec(args, null, outputDir); - StreamVacuum stdout = new StreamVacuum(process.getInputStream()); - StreamVacuum stderr = new StreamVacuum(process.getErrorStream()); - stdout.start(); - stderr.start(); - process.waitFor(); - stdout.join(); - stderr.join(); - if ( stdout.toString().length()>0 ) { - System.err.println("compile stdout from: "+cmdLine); - System.err.println(stdout); - } - if ( stderr.toString().length()>0 ) { - System.err.println("compile stderr from: "+cmdLine); - System.err.println(stderr); - } - int ret = process.exitValue(); - return ret==0; - } - catch (Exception e) { - System.err.println("can't exec compilation"); - e.printStackTrace(System.err); - return false; - } - */ - } - - protected ErrorQueue antlr(String grammarFileName, boolean defaultListener, String... extraOptions) { - final List options = new ArrayList(); - Collections.addAll(options, extraOptions); - if ( !options.contains("-o") ) { - options.add("-o"); - options.add(tmpdir); - } - if ( !options.contains("-lib") ) { - options.add("-lib"); - options.add(tmpdir); - } - if ( !options.contains("-encoding") ) { - options.add("-encoding"); - options.add("UTF-8"); - } - options.add(new File(tmpdir,grammarFileName).toString()); - - final String[] optionsA = new String[options.size()]; - options.toArray(optionsA); - Tool antlr = newTool(optionsA); - ErrorQueue equeue = new ErrorQueue(antlr); - antlr.addListener(equeue); - if (defaultListener) { - antlr.addListener(new DefaultToolListener(antlr)); - } - antlr.processGrammarsOnCommandLine(); - - if ( !defaultListener && !equeue.errors.isEmpty() ) { - System.err.println("antlr reports errors from "+options); - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage msg = equeue.errors.get(i); - System.err.println(msg); - } - System.out.println("!!!\ngrammar:"); - try { - System.out.println(new String(Utils.readFile(tmpdir+"/"+grammarFileName))); - } - catch (IOException ioe) { - System.err.println(ioe.toString()); - } - System.out.println("###"); - } - if ( !defaultListener && !equeue.warnings.isEmpty() ) { - System.err.println("antlr reports warnings from "+options); - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage msg = equeue.warnings.get(i); - System.err.println(msg); - } - } - - return equeue; - } - - protected ErrorQueue antlr(String grammarFileName, String grammarStr, boolean defaultListener, String... extraOptions) { - System.out.println("dir "+tmpdir); - mkdir(tmpdir); - writeFile(tmpdir, grammarFileName, grammarStr); - return antlr(grammarFileName, defaultListener, extraOptions); - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input) - { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName); - assertTrue(success); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - compile("Test.java"); - String output = execClass("Test"); - if ( stderrDuringParse!=null && stderrDuringParse.length()>0 ) { - System.err.println(stderrDuringParse); - } - return output; - } - - public ParseTree execParser(String startRuleName, String input, - String parserName, String lexerName) - throws Exception - { - Pair pl = getParserAndLexer(input, parserName, lexerName); - Parser parser = pl.a; - return execStartRule(startRuleName, parser); - } - - public ParseTree execStartRule(String startRuleName, Parser parser) - throws IllegalAccessException, InvocationTargetException, - NoSuchMethodException - { - Method startRule = null; - Object[] args = null; - try { - startRule = parser.getClass().getMethod(startRuleName); - } - catch (NoSuchMethodException nsme) { - // try with int _p arg for recursive func - startRule = parser.getClass().getMethod(startRuleName, int.class); - args = new Integer[] {0}; - } - ParseTree result = (ParseTree)startRule.invoke(parser, args); -// System.out.println("parse tree = "+result.toStringTree(parser)); - return result; - } - - public Pair getParserAndLexer(String input, - String parserName, String lexerName) - throws Exception - { - final Class lexerClass = loadLexerClassFromTempDir(lexerName); - final Class parserClass = loadParserClassFromTempDir(parserName); - - ANTLRInputStream in = new ANTLRInputStream(new StringReader(input)); - - Class c = lexerClass.asSubclass(Lexer.class); - Constructor ctor = c.getConstructor(CharStream.class); - Lexer lexer = ctor.newInstance(in); - - Class pc = parserClass.asSubclass(Parser.class); - Constructor pctor = pc.getConstructor(TokenStream.class); - CommonTokenStream tokens = new CommonTokenStream(lexer); - Parser parser = pctor.newInstance(tokens); - return new Pair(parser, lexer); - } - - public Class loadClassFromTempDir(String name) throws Exception { - ClassLoader loader = - new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, - ClassLoader.getSystemClassLoader()); - return loader.loadClass(name); - } - - public Class loadLexerClassFromTempDir(String name) throws Exception { - return loadClassFromTempDir(name).asSubclass(Lexer.class); - } - - public Class loadParserClassFromTempDir(String name) throws Exception { - return loadClassFromTempDir(name).asSubclass(Parser.class); - } - - protected String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String startRuleName, - String input, boolean debug) - { - return execParser(grammarFileName, grammarStr, parserName, - lexerName, startRuleName, input, debug, false); - } - - protected String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String startRuleName, - String input, boolean debug, - boolean profile) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(tmpdir, "input", input); - return rawExecRecognizer(parserName, - lexerName, - startRuleName, - debug, - profile); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - @Nullable String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - @Nullable String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = - antlr(grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".java"); - } - if ( parserName!=null ) { - files.add(parserName+".java"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseListener.java"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"BaseVisitor.java"); - } - } - boolean allIsWell = compile(files.toArray(new String[files.size()])); - return allIsWell; - } - - protected String rawExecRecognizer(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) - { - this.stderrDuringParse = null; - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeTestFile(parserName, - lexerName, - parserStartRuleName, - debug, - profile); - } - - compile("Test.java"); - return execClass("Test"); - } - - public String execRecognizer() { - return execClass("Test"); - } - - public String execClass(String className) { - if (TEST_IN_SAME_PROCESS) { - try { - ClassLoader loader = new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, ClassLoader.getSystemClassLoader()); - final Class mainClass = (Class)loader.loadClass(className); - final Method mainMethod = mainClass.getDeclaredMethod("main", String[].class); - PipedInputStream stdoutIn = new PipedInputStream(); - PipedInputStream stderrIn = new PipedInputStream(); - PipedOutputStream stdoutOut = new PipedOutputStream(stdoutIn); - PipedOutputStream stderrOut = new PipedOutputStream(stderrIn); - StreamVacuum stdoutVacuum = new StreamVacuum(stdoutIn); - StreamVacuum stderrVacuum = new StreamVacuum(stderrIn); - - PrintStream originalOut = System.out; - System.setOut(new PrintStream(stdoutOut)); - try { - PrintStream originalErr = System.err; - try { - System.setErr(new PrintStream(stderrOut)); - stdoutVacuum.start(); - stderrVacuum.start(); - mainMethod.invoke(null, (Object)new String[] { new File(tmpdir, "input").getAbsolutePath() }); - } - finally { - System.setErr(originalErr); - } - } - finally { - System.setOut(originalOut); - } - - stdoutOut.close(); - stderrOut.close(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( stderrVacuum.toString().length()>0 ) { - this.stderrDuringParse = stderrVacuum.toString(); - System.err.println("exec stderrVacuum: "+ stderrVacuum); - } - return output; - } catch (MalformedURLException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } catch (IOException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } catch (InterruptedException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } catch (IllegalAccessException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } catch (IllegalArgumentException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } catch (InvocationTargetException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } catch (NoSuchMethodException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } catch (SecurityException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } catch (ClassNotFoundException ex) { - LOGGER.log(Level.SEVERE, null, ex); - throw new RuntimeException(ex); - } - } - - try { - String[] args = new String[] { - "java", "-classpath", tmpdir+pathSep+CLASSPATH, - className, new File(tmpdir, "input").getAbsolutePath() - }; - //String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath(); - //System.out.println("execParser: "+cmdLine); - Process process = - Runtime.getRuntime().exec(args, null, new File(tmpdir)); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( stderrVacuum.toString().length()>0 ) { - this.stderrDuringParse = stderrVacuum.toString(); - System.err.println("exec stderrVacuum: "+ stderrVacuum); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - public void testErrors(String[] pairs, boolean printTree) { - for (int i = 0; i < pairs.length; i+=2) { - String input = pairs[i]; - String expect = pairs[i+1]; - - String[] lines = input.split("\n"); - String fileName = getFilenameFromFirstLineOfGrammar(lines[0]); - ErrorQueue equeue = antlr(fileName, input, false); - - String actual = equeue.toString(true); - actual = actual.replace(tmpdir + File.separator, ""); - System.err.println(actual); - String msg = input; - msg = msg.replace("\n","\\n"); - msg = msg.replace("\r","\\r"); - msg = msg.replace("\t","\\t"); - - assertEquals("error in: "+msg,expect,actual); - } - } - - public String getFilenameFromFirstLineOfGrammar(String line) { - String fileName = "A" + Tool.GRAMMAR_EXTENSION; - int grIndex = line.lastIndexOf("grammar"); - int semi = line.lastIndexOf(';'); - if ( grIndex>=0 && semi>=0 ) { - int space = line.indexOf(' ', grIndex); - fileName = line.substring(space+1, semi)+Tool.GRAMMAR_EXTENSION; - } - if ( fileName.length()==Tool.GRAMMAR_EXTENSION.length() ) fileName = "A" + Tool.GRAMMAR_EXTENSION; - return fileName; - } - -// void ambig(List msgs, int[] expectedAmbigAlts, String expectedAmbigInput) -// throws Exception -// { -// ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); -// } - -// void ambig(List msgs, int i, int[] expectedAmbigAlts, String expectedAmbigInput) -// throws Exception -// { -// List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); -// AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); -// if ( a==null ) assertNull(expectedAmbigAlts); -// else { -// assertEquals(a.conflictingAlts.toString(), Arrays.toString(expectedAmbigAlts)); -// } -// assertEquals(expectedAmbigInput, a.input); -// } - -// void unreachable(List msgs, int[] expectedUnreachableAlts) -// throws Exception -// { -// unreachable(msgs, 0, expectedUnreachableAlts); -// } - -// void unreachable(List msgs, int i, int[] expectedUnreachableAlts) -// throws Exception -// { -// List amsgs = getMessagesOfType(msgs, UnreachableAltsMessage.class); -// UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); -// if ( u==null ) assertNull(expectedUnreachableAlts); -// else { -// assertEquals(u.conflictingAlts.toString(), Arrays.toString(expectedUnreachableAlts)); -// } -// } - - List getMessagesOfType(List msgs, Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if ( m.getClass() == c ) filtered.add(m); - } - return filtered; - } - - void checkRuleATN(Grammar g, String ruleName, String expecting) { - DOTGenerator dot = new DOTGenerator(g); - System.out.println(dot.getDOT(g.atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = g.atn.ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - //System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - String output = outputFileST.render(); - //System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start+b.length(),end); - assertEquals(expected, snippet); - } - if ( equeue.size()>0 ) { - System.err.println(equeue.toString()); - } - } - - public static class StreamVacuum implements Runnable { - StringBuilder buf = new StringBuilder(); - BufferedReader in; - Thread sucker; - public StreamVacuum(InputStream in) { - this.in = new BufferedReader( new InputStreamReader(in) ); - } - public void start() { - sucker = new Thread(this); - sucker.start(); - } - @Override - public void run() { - try { - String line = in.readLine(); - while (line!=null) { - buf.append(line); - buf.append('\n'); - line = in.readLine(); - } - } - catch (IOException ioe) { - System.err.println("can't read output from process"); - } - } - /** wait for the thread to finish */ - public void join() throws InterruptedException { - sucker.join(); - } - @Override - public String toString() { - return buf.toString(); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, - ANTLRMessage expectedMessage) - throws Exception - { - //System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertTrue("no error; "+expectedMessage.getErrorType()+" expected", !equeue.errors.isEmpty()); - assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1); - assertNotNull("couldn't find expected error: "+expectedMessage.getErrorType(), foundMsg); - /* - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { super(src); } - Set hide = new HashSet(); - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if ( hide.contains(t.getType()) ) { - ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - public static void writeFile(String dir, String fileName, String content) { - try { - Utils.writeFile(dir+"/"+fileName, content, "UTF-8"); - } - catch (IOException ioe) { - System.err.println("can't write file"); - ioe.printStackTrace(System.err); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) - { - ST outputFileST = new ST( - "import org.antlr.v4.runtime.*;\n" + - "import org.antlr.v4.runtime.tree.*;\n" + - "import org.antlr.v4.runtime.atn.*;\n" + - "import java.util.Arrays;\n"+ - "\n" + - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = new ANTLRFileStream(args[0]);\n" + - " lex = new (input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " \n"+ - " parser.setBuildParseTree(true);\n" + - " \n"+ - " ParserRuleContext tree = parser.();\n" + - " System.out.println(Arrays.toString(profiler.getDecisionInfo()));\n" + - " ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" + - " }\n" + - "\n" + - " static class TreeShapeListener implements ParseTreeListener {\n" + - " @Override public void visitTerminal(TerminalNode node) { }\n" + - " @Override public void visitErrorNode(ErrorNode node) { }\n" + - " @Override public void exitEveryRule(ParserRuleContext ctx) { }\n" + - "\n" + - " @Override\n" + - " public void enterEveryRule(ParserRuleContext ctx) {\n" + - " for (int i = 0; i \\< ctx.getChildCount(); i++) {\n" + - " ParseTree parent = ctx.getChild(i).getParent();\n" + - " if (!(parent instanceof RuleNode) || ((RuleNode)parent).getRuleContext() != ctx) {\n" + - " throw new IllegalStateException(\"Invalid parse tree shape detected.\");\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - ST createParserST = new ST(" parser = new (tokens);\n"); - if ( debug ) { - createParserST = - new ST( - " parser = new (tokens);\n" + - " parser.addErrorListener(new DiagnosticErrorListener());\n"); - } - if ( profile ) { - outputFileST.add("profile", - "ProfilingATNSimulator profiler = new ProfilingATNSimulator(parser);\n" + - "parser.setInterpreter(profiler);"); - } - else { - outputFileST.add("profile", new ArrayList()); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.java", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import org.antlr.v4.runtime.*;\n" + - "\n" + - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = new ANTLRFileStream(args[0]);\n" + - " lex = new (input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " tokens.fill();\n" + - " for (Object t : tokens.getTokens()) System.out.println(t);\n" + - (showDFA?"System.out.print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString());\n":"")+ - " }\n" + - "}" - ); - - outputFileST.add("lexerName", lexerName); - writeFile(tmpdir, "Test.java", outputFileST.render()); - } - - public void writeRecognizerAndCompile(String parserName, String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) { - if ( parserName==null ) { - writeLexerTestFile(lexerName, debug); - } - else { - writeTestFile(parserName, - lexerName, - parserStartRuleName, - debug, - profile); - } - - compile("Test.java"); - } - - - protected void eraseFiles(final String filesEndingWith) { - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - if ( files[i].endsWith(filesEndingWith) ) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - } - - protected void eraseFiles() { - if (tmpdir == null) { - return; - } - - File tmpdirF = new File(tmpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - new File(tmpdir+"/"+files[i]).delete(); - } - } - - protected void eraseTempDir() { - File tmpdirF = new File(tmpdir); - if ( tmpdirF.exists() ) { - eraseFiles(); - tmpdirF.delete(); - } - } - - public String getFirstLineOfException() { - if ( this.stderrDuringParse ==null ) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix="Exception in thread \"main\" "; - return lines[0].substring(prefix.length(),lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable - * we cannot rely on the output order, as the hashing algorithm or other aspects - * of the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a - * bit of a hack, but guarantees that we get the same order on all systems. We assume that - * the keys are strings. - * - * @param m The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - IntegerList types; - int p=0; - public IntTokenStream(IntegerList types) { this.types = types; } - - @Override - public void consume() { p++; } - - @Override - public int LA(int i) { return LT(i).getType(); } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { return p; } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return UNKNOWN_SOURCE_NAME; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if ( rawIndex>=types.size() ) t = new CommonToken(Token.EOF); - else t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - @NotNull - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - @NotNull - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - @NotNull - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - @NotNull - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public ,V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/tool/test/org/antlr/v4/xtest/ErrorQueue.java b/tool/test/org/antlr/v4/xtest/ErrorQueue.java deleted file mode 100644 index 8cc5aba04..000000000 --- a/tool/test/org/antlr/v4/xtest/ErrorQueue.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.v4.Tool; -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.ANTLRToolListener; -import org.antlr.v4.tool.ToolMessage; -import org.stringtemplate.v4.ST; - -import java.util.ArrayList; -import java.util.List; - -public class ErrorQueue implements ANTLRToolListener { - public final Tool tool; - public final List infos = new ArrayList(); - public final List errors = new ArrayList(); - public final List warnings = new ArrayList(); - public final List all = new ArrayList(); - - public ErrorQueue() { - this(null); - } - - public ErrorQueue(Tool tool) { - this.tool = tool; - } - - @Override - public void info(String msg) { - infos.add(msg); - } - - @Override - public void error(ANTLRMessage msg) { - errors.add(msg); - all.add(msg); - } - - @Override - public void warning(ANTLRMessage msg) { - warnings.add(msg); - all.add(msg); - } - - public void error(ToolMessage msg) { - errors.add(msg); - all.add(msg); - } - - public int size() { - return all.size() + infos.size(); - } - - @Override - public String toString() { - return toString(false); - } - - public String toString(boolean rendered) { - if (!rendered) { - return Utils.join(all.iterator(), "\n"); - } - - if (tool == null) { - throw new IllegalStateException(String.format("No %s instance is available.", Tool.class.getName())); - } - - StringBuilder buf = new StringBuilder(); - for (ANTLRMessage m : all) { - ST st = tool.errMgr.getMessageTemplate(m); - buf.append(st.render()); - buf.append("\n"); - } - - return buf.toString(); - } - -} - diff --git a/tool/test/org/antlr/v4/xtest/Java-LR.g4 b/tool/test/org/antlr/v4/xtest/Java-LR.g4 deleted file mode 100644 index 9d38d029b..000000000 --- a/tool/test/org/antlr/v4/xtest/Java-LR.g4 +++ /dev/null @@ -1,1248 +0,0 @@ -/* - [The "BSD licence"] - Copyright (c) 2007-2008 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -/** A Java 1.5 grammar for ANTLR v3 derived from the spec - * - * This is a very close representation of the spec; the changes - * are comestic (remove left recursion) and also fixes (the spec - * isn't exactly perfect). I have run this on the 1.4.2 source - * and some nasty looking enums from 1.5, but have not really - * tested for 1.5 compatibility. - * - * I built this with: java -Xmx100M org.antlr.Tool java.g - * and got two errors that are ok (for now): - * java.g:691:9: Decision can match input such as - * "'0'..'9'{'E', 'e'}{'+', '-'}'0'..'9'{'D', 'F', 'd', 'f'}" - * using multiple alternatives: 3, 4 - * As a result, alternative(s) 4 were disabled for that input - * java.g:734:35: Decision can match input such as "{'$', 'A'..'Z', - * '_', 'a'..'z', '\u00C0'..'\u00D6', '\u00D8'..'\u00F6', - * '\u00F8'..'\u1FFF', '\u3040'..'\u318F', '\u3300'..'\u337F', - * '\u3400'..'\u3D2D', '\u4E00'..'\u9FFF', '\uF900'..'\uFAFF'}" - * using multiple alternatives: 1, 2 - * As a result, alternative(s) 2 were disabled for that input - * - * You can turn enum on/off as a keyword :) - * - * Version 1.0 -- initial release July 5, 2006 (requires 3.0b2 or higher) - * - * Primary author: Terence Parr, July 2006 - * - * Version 1.0.1 -- corrections by Koen Vanderkimpen & Marko van Dooren, - * October 25, 2006; - * fixed normalInterfaceDeclaration: now uses typeParameters instead - * of typeParameter (according to JLS, 3rd edition) - * fixed castExpression: no longer allows expression next to type - * (according to semantics in JLS, in contrast with syntax in JLS) - * - * Version 1.0.2 -- Terence Parr, Nov 27, 2006 - * java spec I built this from had some bizarre for-loop control. - * Looked weird and so I looked elsewhere...Yep, it's messed up. - * simplified. - * - * Version 1.0.3 -- Chris Hogue, Feb 26, 2007 - * Factored out an annotationName rule and used it in the annotation rule. - * Not sure why, but typeName wasn't recognizing references to inner - * annotations (e.g. @InterfaceName.InnerAnnotation()) - * Factored out the elementValue section of an annotation reference. Created - * elementValuePair and elementValuePairs rules, then used them in the - * annotation rule. Allows it to recognize annotation references with - * multiple, comma separated attributes. - * Updated elementValueArrayInitializer so that it allows multiple elements. - * (It was only allowing 0 or 1 element). - * Updated localVariableDeclaration to allow annotations. Interestingly the JLS - * doesn't appear to indicate this is legal, but it does work as of at least - * JDK 1.5.0_06. - * Moved the Identifier portion of annotationTypeElementRest to annotationMethodRest. - * Because annotationConstantRest already references variableDeclarator which - * has the Identifier portion in it, the parser would fail on constants in - * annotation definitions because it expected two identifiers. - * Added optional trailing ';' to the alternatives in annotationTypeElementRest. - * Wouldn't handle an inner interface that has a trailing ';'. - * Swapped the expression and type rule reference order in castExpression to - * make it check for genericized casts first. It was failing to recognize a - * statement like "Class TYPE = (Class)...;" because it was seeing - * 'Class'. - * Changed createdName to use typeArguments instead of nonWildcardTypeArguments. - * Changed the 'this' alternative in primary to allow 'identifierSuffix' rather than - * just 'arguments'. The case it couldn't handle was a call to an explicit - * generic method invocation (e.g. this.doSomething()). Using identifierSuffix - * may be overly aggressive--perhaps should create a more constrained thisSuffix rule? - * - * Version 1.0.4 -- Hiroaki Nakamura, May 3, 2007 - * - * Fixed formalParameterDecls, localVariableDeclaration, forInit, - * and forVarControl to use variableModifier* not 'final'? (annotation)? - * - * Version 1.0.5 -- Terence, June 21, 2007 - * --a[i].foo didn't work. Fixed unaryExpression - * - * Version 1.0.6 -- John Ridgway, March 17, 2008 - * Made "assert" a switchable keyword like "enum". - * Fixed compilationUnit to disallow "annotation importDeclaration ...". - * Changed "Identifier ('.' Identifier)*" to "qualifiedName" in more - * places. - * Changed modifier* and/or variableModifier* to classOrInterfaceModifiers, - * modifiers or variableModifiers, as appropriate. - * Renamed "bound" to "typeBound" to better match language in the JLS. - * Added "memberDeclaration" which rewrites to methodDeclaration or - * fieldDeclaration and pulled type into memberDeclaration. So we parse - * type and then move on to decide whether we're dealing with a field - * or a method. - * Modified "constructorDeclaration" to use "constructorBody" instead of - * "methodBody". constructorBody starts with explicitConstructorInvocation, - * then goes on to blockStatement*. Pulling explicitConstructorInvocation - * out of expressions allowed me to simplify "primary". - * Changed variableDeclarator to simplify it. - * Changed type to use classOrInterfaceType, thus simplifying it; of course - * I then had to add classOrInterfaceType, but it is used in several - * places. - * Fixed annotations, old version allowed "@X(y,z)", which is illegal. - * Added optional comma to end of "elementValueArrayInitializer"; as per JLS. - * Changed annotationTypeElementRest to use normalClassDeclaration and - * normalInterfaceDeclaration rather than classDeclaration and - * interfaceDeclaration, thus getting rid of a couple of grammar ambiguities. - * Split localVariableDeclaration into localVariableDeclarationStatement - * (includes the terminating semi-colon) and localVariableDeclaration. - * This allowed me to use localVariableDeclaration in "forInit" clauses, - * simplifying them. - * Changed switchBlockStatementGroup to use multiple labels. This adds an - * ambiguity, but if one uses appropriately greedy parsing it yields the - * parse that is closest to the meaning of the switch statement. - * Renamed "forVarControl" to "enhancedForControl" -- JLS language. - * Added semantic predicates to test for shift operations rather than other - * things. Thus, for instance, the string "< <" will never be treated - * as a left-shift operator. - * In "creator" we rule out "nonWildcardTypeArguments" on arrayCreation, - * which are illegal. - * Moved "nonWildcardTypeArguments into innerCreator. - * Removed 'super' superSuffix from explicitGenericInvocation, since that - * is only used in explicitConstructorInvocation at the beginning of a - * constructorBody. (This is part of the simplification of expressions - * mentioned earlier.) - * Simplified primary (got rid of those things that are only used in - * explicitConstructorInvocation). - * Lexer -- removed "Exponent?" from FloatingPointLiteral choice 4, since it - * led to an ambiguity. - * - * This grammar successfully parses every .java file in the JDK 1.5 source - * tree (excluding those whose file names include '-', which are not - * valid Java compilation units). - * - * June 26, 2008 - * - * conditionalExpression had wrong precedence x?y:z. - * - * February 26, 2011 - * added left-recursive expression rule - * - * Known remaining problems: - * "Letter" and "JavaIDDigit" are wrong. The actual specification of - * "Letter" should be "a character for which the method - * Character.isJavaIdentifierStart(int) returns true." A "Java - * letter-or-digit is a character for which the method - * Character.isJavaIdentifierPart(int) returns true." - */ -grammar Java; - -// starting point for parsing a java file -/* The annotations are separated out to make parsing faster, but must be associated with - a packageDeclaration or a typeDeclaration (and not an empty one). */ -compilationUnit - : annotations - ( packageDeclaration importDeclaration* typeDeclaration* - | classOrInterfaceDeclaration typeDeclaration* - ) - EOF - | packageDeclaration? importDeclaration* typeDeclaration* - EOF - ; - -packageDeclaration - : 'package' qualifiedName ';' - ; - -importDeclaration - : 'import' 'static'? qualifiedName ('.' '*')? ';' - ; - -typeDeclaration - : classOrInterfaceDeclaration - | ';' - ; - -classOrInterfaceDeclaration - : classOrInterfaceModifiers (classDeclaration | interfaceDeclaration) - ; - -classOrInterfaceModifiers - : classOrInterfaceModifier* - ; - -classOrInterfaceModifier - : annotation // class or interface - | ( 'public' // class or interface - | 'protected' // class or interface - | 'private' // class or interface - | 'abstract' // class or interface - | 'static' // class or interface - | 'final' // class only -- does not apply to interfaces - | 'strictfp' // class or interface - ) - ; - -modifiers - : modifier* - ; - -classDeclaration - : normalClassDeclaration - | enumDeclaration - ; - -normalClassDeclaration - : 'class' Identifier typeParameters? - ('extends' type)? - ('implements' typeList)? - classBody - ; - -typeParameters - : '<' typeParameter (',' typeParameter)* '>' - ; - -typeParameter - : Identifier ('extends' typeBound)? - ; - -typeBound - : type ('&' type)* - ; - -enumDeclaration - : ENUM Identifier ('implements' typeList)? enumBody - ; - -enumBody - : '{' enumConstants? ','? enumBodyDeclarations? '}' - ; - -enumConstants - : enumConstant (',' enumConstant)* - ; - -enumConstant - : annotations? Identifier arguments? classBody? - ; - -enumBodyDeclarations - : ';' (classBodyDeclaration)* - ; - -interfaceDeclaration - : normalInterfaceDeclaration - | annotationTypeDeclaration - ; - -normalInterfaceDeclaration - : 'interface' Identifier typeParameters? ('extends' typeList)? interfaceBody - ; - -typeList - : type (',' type)* - ; - -classBody - : '{' classBodyDeclaration* '}' - ; - -interfaceBody - : '{' interfaceBodyDeclaration* '}' - ; - -classBodyDeclaration - : ';' - | 'static'? block - | modifiers memberDecl - ; - -memberDecl - : genericMethodOrConstructorDecl - | memberDeclaration - | 'void' Identifier voidMethodDeclaratorRest - | Identifier constructorDeclaratorRest - | interfaceDeclaration - | classDeclaration - ; - -memberDeclaration - : type (methodDeclaration | fieldDeclaration) - ; - -genericMethodOrConstructorDecl - : typeParameters genericMethodOrConstructorRest - ; - -genericMethodOrConstructorRest - : (type | 'void') Identifier methodDeclaratorRest - | Identifier constructorDeclaratorRest - ; - -methodDeclaration - : Identifier methodDeclaratorRest - ; - -fieldDeclaration - : variableDeclarators ';' - ; - -interfaceBodyDeclaration - : modifiers interfaceMemberDecl - | ';' - ; - -interfaceMemberDecl - : interfaceMethodOrFieldDecl - | interfaceGenericMethodDecl - | 'void' Identifier voidInterfaceMethodDeclaratorRest - | interfaceDeclaration - | classDeclaration - ; - -interfaceMethodOrFieldDecl - : type Identifier interfaceMethodOrFieldRest - ; - -interfaceMethodOrFieldRest - : constantDeclaratorsRest ';' - | interfaceMethodDeclaratorRest - ; - -methodDeclaratorRest - : formalParameters ('[' ']')* - ('throws' qualifiedNameList)? - ( methodBody - | ';' - ) - ; - -voidMethodDeclaratorRest - : formalParameters ('throws' qualifiedNameList)? - ( methodBody - | ';' - ) - ; - -interfaceMethodDeclaratorRest - : formalParameters ('[' ']')* ('throws' qualifiedNameList)? ';' - ; - -interfaceGenericMethodDecl - : typeParameters (type | 'void') Identifier - interfaceMethodDeclaratorRest - ; - -voidInterfaceMethodDeclaratorRest - : formalParameters ('throws' qualifiedNameList)? ';' - ; - -constructorDeclaratorRest - : formalParameters ('throws' qualifiedNameList)? constructorBody - ; - -constantDeclarator - : Identifier constantDeclaratorRest - ; - -variableDeclarators - : variableDeclarator (',' variableDeclarator)* - ; - -variableDeclarator - : variableDeclaratorId ('=' variableInitializer)? - ; - -constantDeclaratorsRest - : constantDeclaratorRest (',' constantDeclarator)* - ; - -constantDeclaratorRest - : ('[' ']')* '=' variableInitializer - ; - -variableDeclaratorId - : Identifier ('[' ']')* - ; - -variableInitializer - : arrayInitializer - | expression - ; - -arrayInitializer - : '{' (variableInitializer (',' variableInitializer)* (',')? )? '}' - ; - -modifier - : annotation - | ( 'public' - | 'protected' - | 'private' - | 'static' - | 'abstract' - | 'final' - | 'native' - | 'synchronized' - | 'transient' - | 'volatile' - | 'strictfp' - ) - ; - -packageOrTypeName - : qualifiedName - ; - -enumConstantName - : Identifier - ; - -typeName - : qualifiedName - ; - -type - : classOrInterfaceType ('[' ']')* - | primitiveType ('[' ']')* - ; - -classOrInterfaceType - : Identifier typeArguments? ('.' Identifier typeArguments? )* - ; - -primitiveType - : 'boolean' - | 'char' - | 'byte' - | 'short' - | 'int' - | 'long' - | 'float' - | 'double' - ; - -variableModifier - : 'final' - | annotation - ; - -typeArguments - : '<' typeArgument (',' typeArgument)* '>' - ; - -typeArgument - : type - | '?' (('extends' | 'super') type)? - ; - -qualifiedNameList - : qualifiedName (',' qualifiedName)* - ; - -formalParameters - : '(' formalParameterDecls? ')' - ; - -formalParameterDecls - : variableModifiers type formalParameterDeclsRest - ; - -formalParameterDeclsRest - : variableDeclaratorId (',' formalParameterDecls)? - | '...' variableDeclaratorId - ; - -methodBody - : block - ; - -constructorBody - : block - ; - -qualifiedName - : Identifier ('.' Identifier)* - ; - -literal - : IntegerLiteral - | FloatingPointLiteral - | CharacterLiteral - | StringLiteral - | BooleanLiteral - | 'null' - ; - -// ANNOTATIONS - -annotations - : annotation+ - ; - -annotation - : '@' annotationName ( '(' ( elementValuePairs | elementValue )? ')' )? - ; - -annotationName - : Identifier ('.' Identifier)* - ; - -elementValuePairs - : elementValuePair (',' elementValuePair)* - ; - -elementValuePair - : Identifier '=' elementValue - ; - -elementValue - : expression - | annotation - | elementValueArrayInitializer - ; - -elementValueArrayInitializer - : '{' (elementValue (',' elementValue)*)? (',')? '}' - ; - -annotationTypeDeclaration - : '@' 'interface' Identifier annotationTypeBody - ; - -annotationTypeBody - : '{' (annotationTypeElementDeclaration)* '}' - ; - -annotationTypeElementDeclaration - : modifiers annotationTypeElementRest - | ';' // this is not allowed by the grammar, but apparently allowed by the actual compiler - ; - -annotationTypeElementRest - : type annotationMethodOrConstantRest ';' - | normalClassDeclaration ';'? - | normalInterfaceDeclaration ';'? - | enumDeclaration ';'? - | annotationTypeDeclaration ';'? - ; - -annotationMethodOrConstantRest - : annotationMethodRest - | annotationConstantRest - ; - -annotationMethodRest - : Identifier '(' ')' defaultValue? - ; - -annotationConstantRest - : variableDeclarators - ; - -defaultValue - : 'default' elementValue - ; - -// STATEMENTS / BLOCKS - -block - : '{' blockStatement* '}' - ; - -blockStatement - : localVariableDeclarationStatement - | classOrInterfaceDeclaration - | statement - ; - -localVariableDeclarationStatement - : localVariableDeclaration ';' - ; - -localVariableDeclaration - : variableModifiers type variableDeclarators - ; - -variableModifiers - : variableModifier* - ; - -statement - : block - | ASSERT expression (':' expression)? ';' - | 'if' parExpression statement ('else' statement)? - | 'for' '(' forControl ')' statement - | 'while' parExpression statement - | 'do' statement 'while' parExpression ';' - | 'try' block (catches finallyBlock? | finallyBlock) - | 'try' resourceSpecification block catches? finallyBlock? - | 'switch' parExpression '{' switchBlockStatementGroups '}' - | 'synchronized' parExpression block - | 'return' expression? ';' - | 'throw' expression ';' - | 'break' Identifier? ';' - | 'continue' Identifier? ';' - | ';' - | statementExpression ';' - | Identifier ':' statement - ; - -catches - : catchClause+ - ; - -catchClause - : 'catch' '(' variableModifiers catchType Identifier ')' block - ; - -catchType - : qualifiedName ('|' qualifiedName)* - ; - -finallyBlock - : 'finally' block - ; - -resourceSpecification - : '(' resources ';'? ')' - ; - -resources - : resource (';' resource)* - ; - -resource - : variableModifiers classOrInterfaceType variableDeclaratorId '=' expression - ; - -formalParameter - : variableModifiers type variableDeclaratorId - ; - -switchBlockStatementGroups - : (switchBlockStatementGroup)* - ; - -/* The change here (switchLabel -> switchLabel+) technically makes this grammar - ambiguous; but with appropriately greedy parsing it yields the most - appropriate AST, one in which each group, except possibly the last one, has - labels and statements. */ -switchBlockStatementGroup - : switchLabel+ blockStatement* - ; - -switchLabel - : 'case' constantExpression ':' - | 'case' enumConstantName ':' - | 'default' ':' - ; - -forControl - : enhancedForControl - | forInit? ';' expression? ';' forUpdate? - ; - -forInit - : localVariableDeclaration - | expressionList - ; - -enhancedForControl - : variableModifiers type Identifier ':' expression - ; - -forUpdate - : expressionList - ; - -// EXPRESSIONS - -parExpression - : '(' expression ')' - ; - -expressionList - : expression (',' expression)* - ; - -statementExpression - : expression - ; - -constantExpression - : expression - ; - -expression - : primary - | expression '.' Identifier - | expression '.' 'this' - | expression '.' 'new' nonWildcardTypeArguments? innerCreator - | expression '.' 'super' superSuffix - | expression '.' explicitGenericInvocation - | 'new' creator - | expression '[' expression ']' - | '(' type ')' expression - | expression ('++' | '--') - | expression '(' expressionList? ')' - | ('+'|'-'|'++'|'--') expression - | ('~'|'!') expression - | expression ('*'|'/'|'%') expression - | expression ('+'|'-') expression - | expression ('<' '<' | '>' '>' '>' | '>' '>') expression - | expression ('<=' | '>=' | '>' | '<') expression - | expression 'instanceof' type - | expression ('==' | '!=') expression - | expression '&' expression - | expression '^' expression - | expression '|' expression - | expression '&&' expression - | expression '||' expression - | expression '?' expression ':' expression - | expression - ( '=' - | '+=' - | '-=' - | '*=' - | '/=' - | '&=' - | '|=' - | '^=' - | '>>=' - | '>>>=' - | '<<=' - | '%=' - ) - expression - ; - -primary - : '(' expression ')' - | 'this' - | 'super' - | literal - | Identifier - | type '.' 'class' - | 'void' '.' 'class' - | nonWildcardTypeArguments (explicitGenericInvocationSuffix | 'this' arguments) - ; - -creator - : nonWildcardTypeArguments createdName classCreatorRest - | createdName (arrayCreatorRest | classCreatorRest) - ; - -createdName - : Identifier typeArgumentsOrDiamond? ('.' Identifier typeArgumentsOrDiamond?)* - | primitiveType - ; - -innerCreator - : Identifier nonWildcardTypeArgumentsOrDiamond? classCreatorRest - ; - -arrayCreatorRest - : '[' - ( ']' ('[' ']')* arrayInitializer - | expression ']' ('[' expression ']')* ('[' ']')* - ) - ; - -classCreatorRest - : arguments classBody? - ; - -explicitGenericInvocation - : nonWildcardTypeArguments explicitGenericInvocationSuffix - ; - -nonWildcardTypeArguments - : '<' typeList '>' - ; - -typeArgumentsOrDiamond - : '<' '>' - | typeArguments - ; - -nonWildcardTypeArgumentsOrDiamond - : '<' '>' - | nonWildcardTypeArguments - ; - -superSuffix - : arguments - | '.' Identifier arguments? - ; - -explicitGenericInvocationSuffix - : 'super' superSuffix - | Identifier arguments - ; - -arguments - : '(' expressionList? ')' - ; - -// LEXER - -// §3.9 Keywords - -ABSTRACT : 'abstract'; -ASSERT : 'assert'; -BOOLEAN : 'boolean'; -BREAK : 'break'; -BYTE : 'byte'; -CASE : 'case'; -CATCH : 'catch'; -CHAR : 'char'; -CLASS : 'class'; -CONST : 'const'; -CONTINUE : 'continue'; -DEFAULT : 'default'; -DO : 'do'; -DOUBLE : 'double'; -ELSE : 'else'; -ENUM : 'enum'; -EXTENDS : 'extends'; -FINAL : 'final'; -FINALLY : 'finally'; -FLOAT : 'float'; -FOR : 'for'; -IF : 'if'; -GOTO : 'goto'; -IMPLEMENTS : 'implements'; -IMPORT : 'import'; -INSTANCEOF : 'instanceof'; -INT : 'int'; -INTERFACE : 'interface'; -LONG : 'long'; -NATIVE : 'native'; -NEW : 'new'; -PACKAGE : 'package'; -PRIVATE : 'private'; -PROTECTED : 'protected'; -PUBLIC : 'public'; -RETURN : 'return'; -SHORT : 'short'; -STATIC : 'static'; -STRICTFP : 'strictfp'; -SUPER : 'super'; -SWITCH : 'switch'; -SYNCHRONIZED : 'synchronized'; -THIS : 'this'; -THROW : 'throw'; -THROWS : 'throws'; -TRANSIENT : 'transient'; -TRY : 'try'; -VOID : 'void'; -VOLATILE : 'volatile'; -WHILE : 'while'; - -// §3.10.1 Integer Literals - -IntegerLiteral - : DecimalIntegerLiteral - | HexIntegerLiteral - | OctalIntegerLiteral - | BinaryIntegerLiteral - ; - -fragment -DecimalIntegerLiteral - : DecimalNumeral IntegerTypeSuffix? - ; - -fragment -HexIntegerLiteral - : HexNumeral IntegerTypeSuffix? - ; - -fragment -OctalIntegerLiteral - : OctalNumeral IntegerTypeSuffix? - ; - -fragment -BinaryIntegerLiteral - : BinaryNumeral IntegerTypeSuffix? - ; - -fragment -IntegerTypeSuffix - : [lL] - ; - -fragment -DecimalNumeral - : '0' - | NonZeroDigit (Digits? | Underscores Digits) - ; - -fragment -Digits - : Digit (DigitsAndUnderscores? Digit)? - ; - -fragment -Digit - : '0' - | NonZeroDigit - ; - -fragment -NonZeroDigit - : [1-9] - ; - -fragment -DigitsAndUnderscores - : DigitOrUnderscore+ - ; - -fragment -DigitOrUnderscore - : Digit - | '_' - ; - -fragment -Underscores - : '_'+ - ; - -fragment -HexNumeral - : '0' [xX] HexDigits - ; - -fragment -HexDigits - : HexDigit (HexDigitsAndUnderscores? HexDigit)? - ; - -fragment -HexDigit - : [0-9a-fA-F] - ; - -fragment -HexDigitsAndUnderscores - : HexDigitOrUnderscore+ - ; - -fragment -HexDigitOrUnderscore - : HexDigit - | '_' - ; - -fragment -OctalNumeral - : '0' Underscores? OctalDigits - ; - -fragment -OctalDigits - : OctalDigit (OctalDigitsAndUnderscores? OctalDigit)? - ; - -fragment -OctalDigit - : [0-7] - ; - -fragment -OctalDigitsAndUnderscores - : OctalDigitOrUnderscore+ - ; - -fragment -OctalDigitOrUnderscore - : OctalDigit - | '_' - ; - -fragment -BinaryNumeral - : '0' [bB] BinaryDigits - ; - -fragment -BinaryDigits - : BinaryDigit (BinaryDigitsAndUnderscores? BinaryDigit)? - ; - -fragment -BinaryDigit - : [01] - ; - -fragment -BinaryDigitsAndUnderscores - : BinaryDigitOrUnderscore+ - ; - -fragment -BinaryDigitOrUnderscore - : BinaryDigit - | '_' - ; - -// §3.10.2 Floating-Point Literals - -FloatingPointLiteral - : DecimalFloatingPointLiteral - | HexadecimalFloatingPointLiteral - ; - -fragment -DecimalFloatingPointLiteral - : Digits '.' Digits? ExponentPart? FloatTypeSuffix? - | '.' Digits ExponentPart? FloatTypeSuffix? - | Digits ExponentPart FloatTypeSuffix? - | Digits FloatTypeSuffix - ; - -fragment -ExponentPart - : ExponentIndicator SignedInteger - ; - -fragment -ExponentIndicator - : [eE] - ; - -fragment -SignedInteger - : Sign? Digits - ; - -fragment -Sign - : [+-] - ; - -fragment -FloatTypeSuffix - : [fFdD] - ; - -fragment -HexadecimalFloatingPointLiteral - : HexSignificand BinaryExponent FloatTypeSuffix? - ; - -fragment -HexSignificand - : HexNumeral '.'? - | '0' [xX] HexDigits? '.' HexDigits - ; - -fragment -BinaryExponent - : BinaryExponentIndicator SignedInteger - ; - -fragment -BinaryExponentIndicator - : [pP] - ; - -// §3.10.3 Boolean Literals - -BooleanLiteral - : 'true' - | 'false' - ; - -// §3.10.4 Character Literals - -CharacterLiteral - : '\'' SingleCharacter '\'' - | '\'' EscapeSequence '\'' - ; - -fragment -SingleCharacter - : ~['\\] - ; - -// §3.10.5 String Literals - -StringLiteral - : '"' StringCharacters? '"' - ; - -fragment -StringCharacters - : StringCharacter+ - ; - -fragment -StringCharacter - : ~["\\] - | EscapeSequence - ; - -// §3.10.6 Escape Sequences for Character and String Literals - -fragment -EscapeSequence - : '\\' [btnfr"'\\] - | OctalEscape - ; - -fragment -OctalEscape - : '\\' OctalDigit - | '\\' OctalDigit OctalDigit - | '\\' ZeroToThree OctalDigit OctalDigit - ; - -fragment -ZeroToThree - : [0-3] - ; - -// §3.10.7 The Null Literal - -NullLiteral - : 'null' - ; - -// §3.11 Separators - -LPAREN : '('; -RPAREN : ')'; -LBRACE : '{'; -RBRACE : '}'; -LBRACK : '['; -RBRACK : ']'; -SEMI : ';'; -COMMA : ','; -DOT : '.'; - -// §3.12 Operators - -ASSIGN : '='; -GT : '>'; -LT : '<'; -BANG : '!'; -TILDE : '~'; -QUESTION : '?'; -COLON : ':'; -EQUAL : '=='; -LE : '<='; -GE : '>='; -NOTEQUAL : '!='; -AND : '&&'; -OR : '||'; -INC : '++'; -DEC : '--'; -ADD : '+'; -SUB : '-'; -MUL : '*'; -DIV : '/'; -BITAND : '&'; -BITOR : '|'; -CARET : '^'; -MOD : '%'; - -ADD_ASSIGN : '+='; -SUB_ASSIGN : '-='; -MUL_ASSIGN : '*='; -DIV_ASSIGN : '/='; -AND_ASSIGN : '&='; -OR_ASSIGN : '|='; -XOR_ASSIGN : '^='; -MOD_ASSIGN : '%='; -LSHIFT_ASSIGN : '<<='; -RSHIFT_ASSIGN : '>>='; -URSHIFT_ASSIGN : '>>>='; - -// §3.8 Identifiers (must appear after all keywords in the grammar) - -Identifier - : JavaLetter JavaLetterOrDigit* - ; - -fragment -JavaLetter - : [a-zA-Z$_] // these are the "java letters" below 0xFF - | // covers all characters above 0xFF which are not a surrogate - ~[\u0000-\u00FF\uD800-\uDBFF] - {Character.isJavaIdentifierStart(_input.LA(-1))}? - | // covers UTF-16 surrogate pairs encodings for U+10000 to U+10FFFF - [\uD800-\uDBFF] [\uDC00-\uDFFF] - {Character.isJavaIdentifierStart(Character.toCodePoint((char)_input.LA(-2), (char)_input.LA(-1)))}? - ; - -fragment -JavaLetterOrDigit - : [a-zA-Z0-9$_] // these are the "java letters or digits" below 0xFF - | // covers all characters above 0xFF which are not a surrogate - ~[\u0000-\u00FF\uD800-\uDBFF] - {Character.isJavaIdentifierPart(_input.LA(-1))}? - | // covers UTF-16 surrogate pairs encodings for U+10000 to U+10FFFF - [\uD800-\uDBFF] [\uDC00-\uDFFF] - {Character.isJavaIdentifierPart(Character.toCodePoint((char)_input.LA(-2), (char)_input.LA(-1)))}? - ; - -// -// Additional symbols not defined in the lexical specification -// - -AT : '@'; -ELLIPSIS : '...'; - -// -// Whitespace and comments -// - -WS : [ \t\r\n\u000C]+ -> skip - ; - -COMMENT - : '/*' .*? '*/' -> skip - ; - -LINE_COMMENT - : '//' ~[\r\n]* -> skip - ; diff --git a/tool/test/org/antlr/v4/xtest/Java.g4 b/tool/test/org/antlr/v4/xtest/Java.g4 deleted file mode 100644 index e3e39f679..000000000 --- a/tool/test/org/antlr/v4/xtest/Java.g4 +++ /dev/null @@ -1,1332 +0,0 @@ -/* - [The "BSD licence"] - Copyright (c) 2007-2008 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -/** A Java 1.5 grammar for ANTLR v3 derived from the spec - * - * This is a very close representation of the spec; the changes - * are comestic (remove left recursion) and also fixes (the spec - * isn't exactly perfect). I have run this on the 1.4.2 source - * and some nasty looking enums from 1.5, but have not really - * tested for 1.5 compatibility. - * - * I built this with: java -Xmx100M org.antlr.Tool java.g - * and got two errors that are ok (for now): - * java.g:691:9: Decision can match input such as - * "'0'..'9'{'E', 'e'}{'+', '-'}'0'..'9'{'D', 'F', 'd', 'f'}" - * using multiple alternatives: 3, 4 - * As a result, alternative(s) 4 were disabled for that input - * java.g:734:35: Decision can match input such as "{'$', 'A'..'Z', - * '_', 'a'..'z', '\u00C0'..'\u00D6', '\u00D8'..'\u00F6', - * '\u00F8'..'\u1FFF', '\u3040'..'\u318F', '\u3300'..'\u337F', - * '\u3400'..'\u3D2D', '\u4E00'..'\u9FFF', '\uF900'..'\uFAFF'}" - * using multiple alternatives: 1, 2 - * As a result, alternative(s) 2 were disabled for that input - * - * You can turn enum on/off as a keyword :) - * - * Version 1.0 -- initial release July 5, 2006 (requires 3.0b2 or higher) - * - * Primary author: Terence Parr, July 2006 - * - * Version 1.0.1 -- corrections by Koen Vanderkimpen & Marko van Dooren, - * October 25, 2006; - * fixed normalInterfaceDeclaration: now uses typeParameters instead - * of typeParameter (according to JLS, 3rd edition) - * fixed castExpression: no longer allows expression next to type - * (according to semantics in JLS, in contrast with syntax in JLS) - * - * Version 1.0.2 -- Terence Parr, Nov 27, 2006 - * java spec I built this from had some bizarre for-loop control. - * Looked weird and so I looked elsewhere...Yep, it's messed up. - * simplified. - * - * Version 1.0.3 -- Chris Hogue, Feb 26, 2007 - * Factored out an annotationName rule and used it in the annotation rule. - * Not sure why, but typeName wasn't recognizing references to inner - * annotations (e.g. @InterfaceName.InnerAnnotation()) - * Factored out the elementValue section of an annotation reference. Created - * elementValuePair and elementValuePairs rules, then used them in the - * annotation rule. Allows it to recognize annotation references with - * multiple, comma separated attributes. - * Updated elementValueArrayInitializer so that it allows multiple elements. - * (It was only allowing 0 or 1 element). - * Updated localVariableDeclaration to allow annotations. Interestingly the JLS - * doesn't appear to indicate this is legal, but it does work as of at least - * JDK 1.5.0_06. - * Moved the Identifier portion of annotationTypeElementRest to annotationMethodRest. - * Because annotationConstantRest already references variableDeclarator which - * has the Identifier portion in it, the parser would fail on constants in - * annotation definitions because it expected two identifiers. - * Added optional trailing ';' to the alternatives in annotationTypeElementRest. - * Wouldn't handle an inner interface that has a trailing ';'. - * Swapped the expression and type rule reference order in castExpression to - * make it check for genericized casts first. It was failing to recognize a - * statement like "Class TYPE = (Class)...;" because it was seeing - * 'Class'. - * Changed createdName to use typeArguments instead of nonWildcardTypeArguments. - * Changed the 'this' alternative in primary to allow 'identifierSuffix' rather than - * just 'arguments'. The case it couldn't handle was a call to an explicit - * generic method invocation (e.g. this.doSomething()). Using identifierSuffix - * may be overly aggressive--perhaps should create a more constrained thisSuffix rule? - * - * Version 1.0.4 -- Hiroaki Nakamura, May 3, 2007 - * - * Fixed formalParameterDecls, localVariableDeclaration, forInit, - * and forVarControl to use variableModifier* not 'final'? (annotation)? - * - * Version 1.0.5 -- Terence, June 21, 2007 - * --a[i].foo didn't work. Fixed unaryExpression - * - * Version 1.0.6 -- John Ridgway, March 17, 2008 - * Made "assert" a switchable keyword like "enum". - * Fixed compilationUnit to disallow "annotation importDeclaration ...". - * Changed "Identifier ('.' Identifier)*" to "qualifiedName" in more - * places. - * Changed modifier* and/or variableModifier* to classOrInterfaceModifiers, - * modifiers or variableModifiers, as appropriate. - * Renamed "bound" to "typeBound" to better match language in the JLS. - * Added "memberDeclaration" which rewrites to methodDeclaration or - * fieldDeclaration and pulled type into memberDeclaration. So we parse - * type and then move on to decide whether we're dealing with a field - * or a method. - * Modified "constructorDeclaration" to use "constructorBody" instead of - * "methodBody". constructorBody starts with explicitConstructorInvocation, - * then goes on to blockStatement*. Pulling explicitConstructorInvocation - * out of expressions allowed me to simplify "primary". - * Changed variableDeclarator to simplify it. - * Changed type to use classOrInterfaceType, thus simplifying it; of course - * I then had to add classOrInterfaceType, but it is used in several - * places. - * Fixed annotations, old version allowed "@X(y,z)", which is illegal. - * Added optional comma to end of "elementValueArrayInitializer"; as per JLS. - * Changed annotationTypeElementRest to use normalClassDeclaration and - * normalInterfaceDeclaration rather than classDeclaration and - * interfaceDeclaration, thus getting rid of a couple of grammar ambiguities. - * Split localVariableDeclaration into localVariableDeclarationStatement - * (includes the terminating semi-colon) and localVariableDeclaration. - * This allowed me to use localVariableDeclaration in "forInit" clauses, - * simplifying them. - * Changed switchBlockStatementGroup to use multiple labels. This adds an - * ambiguity, but if one uses appropriately greedy parsing it yields the - * parse that is closest to the meaning of the switch statement. - * Renamed "forVarControl" to "enhancedForControl" -- JLS language. - * Added semantic predicates to test for shift operations rather than other - * things. Thus, for instance, the string "< <" will never be treated - * as a left-shift operator. - * In "creator" we rule out "nonWildcardTypeArguments" on arrayCreation, - * which are illegal. - * Moved "nonWildcardTypeArguments into innerCreator. - * Removed 'super' superSuffix from explicitGenericInvocation, since that - * is only used in explicitConstructorInvocation at the beginning of a - * constructorBody. (This is part of the simplification of expressions - * mentioned earlier.) - * Simplified primary (got rid of those things that are only used in - * explicitConstructorInvocation). - * Lexer -- removed "Exponent?" from FloatingPointLiteral choice 4, since it - * led to an ambiguity. - * - * This grammar successfully parses every .java file in the JDK 1.5 source - * tree (excluding those whose file names include '-', which are not - * valid Java compilation units). - * - * June 26, 2008 - * - * conditionalExpression had wrong precedence x?y:z. - * - * Known remaining problems: - * "Letter" and "JavaIDDigit" are wrong. The actual specification of - * "Letter" should be "a character for which the method - * Character.isJavaIdentifierStart(int) returns true." A "Java - * letter-or-digit is a character for which the method - * Character.isJavaIdentifierPart(int) returns true." - */ -grammar Java; - -// starting point for parsing a java file -/* The annotations are separated out to make parsing faster, but must be associated with - a packageDeclaration or a typeDeclaration (and not an empty one). */ -compilationUnit - : annotations - ( packageDeclaration importDeclaration* typeDeclaration* - | classOrInterfaceDeclaration typeDeclaration* - ) - EOF - | packageDeclaration? importDeclaration* typeDeclaration* - EOF - ; - -packageDeclaration - : 'package' qualifiedName ';' - ; - -importDeclaration - : 'import' 'static'? qualifiedName ('.' '*')? ';' - ; - -typeDeclaration - : classOrInterfaceDeclaration - | ';' - ; - -classOrInterfaceDeclaration - : classOrInterfaceModifiers (classDeclaration | interfaceDeclaration) - ; - -classOrInterfaceModifiers - : classOrInterfaceModifier* - ; - -classOrInterfaceModifier - : annotation // class or interface - | ( 'public' // class or interface - | 'protected' // class or interface - | 'private' // class or interface - | 'abstract' // class or interface - | 'static' // class or interface - | 'final' // class only -- does not apply to interfaces - | 'strictfp' // class or interface - ) - ; - -modifiers - : modifier* - ; - -classDeclaration - : normalClassDeclaration - | enumDeclaration - ; - -normalClassDeclaration - : 'class' Identifier typeParameters? - ('extends' type)? - ('implements' typeList)? - classBody - ; - -typeParameters - : '<' typeParameter (',' typeParameter)* '>' - ; - -typeParameter - : Identifier ('extends' typeBound)? - ; - -typeBound - : type ('&' type)* - ; - -enumDeclaration - : ENUM Identifier ('implements' typeList)? enumBody - ; - -enumBody - : '{' enumConstants? ','? enumBodyDeclarations? '}' - ; - -enumConstants - : enumConstant (',' enumConstant)* - ; - -enumConstant - : annotations? Identifier arguments? classBody? - ; - -enumBodyDeclarations - : ';' (classBodyDeclaration)* - ; - -interfaceDeclaration - : normalInterfaceDeclaration - | annotationTypeDeclaration - ; - -normalInterfaceDeclaration - : 'interface' Identifier typeParameters? ('extends' typeList)? interfaceBody - ; - -typeList - : type (',' type)* - ; - -classBody - : '{' classBodyDeclaration* '}' - ; - -interfaceBody - : '{' interfaceBodyDeclaration* '}' - ; - -classBodyDeclaration - : ';' - | 'static'? block - | modifiers memberDecl - ; - -memberDecl - : genericMethodOrConstructorDecl - | memberDeclaration - | 'void' Identifier voidMethodDeclaratorRest - | Identifier constructorDeclaratorRest - | interfaceDeclaration - | classDeclaration - ; - -memberDeclaration - : type (methodDeclaration | fieldDeclaration) - ; - -genericMethodOrConstructorDecl - : typeParameters genericMethodOrConstructorRest - ; - -genericMethodOrConstructorRest - : (type | 'void') Identifier methodDeclaratorRest - | Identifier constructorDeclaratorRest - ; - -methodDeclaration - : Identifier methodDeclaratorRest - ; - -fieldDeclaration - : variableDeclarators ';' - ; - -interfaceBodyDeclaration - : modifiers interfaceMemberDecl - | ';' - ; - -interfaceMemberDecl - : interfaceMethodOrFieldDecl - | interfaceGenericMethodDecl - | 'void' Identifier voidInterfaceMethodDeclaratorRest - | interfaceDeclaration - | classDeclaration - ; - -interfaceMethodOrFieldDecl - : type Identifier interfaceMethodOrFieldRest - ; - -interfaceMethodOrFieldRest - : constantDeclaratorsRest ';' - | interfaceMethodDeclaratorRest - ; - -methodDeclaratorRest - : formalParameters ('[' ']')* - ('throws' qualifiedNameList)? - ( methodBody - | ';' - ) - ; - -voidMethodDeclaratorRest - : formalParameters ('throws' qualifiedNameList)? - ( methodBody - | ';' - ) - ; - -interfaceMethodDeclaratorRest - : formalParameters ('[' ']')* ('throws' qualifiedNameList)? ';' - ; - -interfaceGenericMethodDecl - : typeParameters (type | 'void') Identifier - interfaceMethodDeclaratorRest - ; - -voidInterfaceMethodDeclaratorRest - : formalParameters ('throws' qualifiedNameList)? ';' - ; - -constructorDeclaratorRest - : formalParameters ('throws' qualifiedNameList)? constructorBody - ; - -constantDeclarator - : Identifier constantDeclaratorRest - ; - -variableDeclarators - : variableDeclarator (',' variableDeclarator)* - ; - -variableDeclarator - : variableDeclaratorId ('=' variableInitializer)? - ; - -constantDeclaratorsRest - : constantDeclaratorRest (',' constantDeclarator)* - ; - -constantDeclaratorRest - : ('[' ']')* '=' variableInitializer - ; - -variableDeclaratorId - : Identifier ('[' ']')* - ; - -variableInitializer - : arrayInitializer - | expression - ; - -arrayInitializer - : '{' (variableInitializer (',' variableInitializer)* (',')? )? '}' - ; - -modifier - : annotation - | ( 'public' - | 'protected' - | 'private' - | 'static' - | 'abstract' - | 'final' - | 'native' - | 'synchronized' - | 'transient' - | 'volatile' - | 'strictfp' - ) - ; - -packageOrTypeName - : qualifiedName - ; - -enumConstantName - : Identifier - ; - -typeName - : qualifiedName - ; - -type - : classOrInterfaceType ('[' ']')* - | primitiveType ('[' ']')* - ; - -classOrInterfaceType - : Identifier typeArguments? ('.' Identifier typeArguments? )* - ; - -primitiveType - : 'boolean' - | 'char' - | 'byte' - | 'short' - | 'int' - | 'long' - | 'float' - | 'double' - ; - -variableModifier - : 'final' - | annotation - ; - -typeArguments - : '<' typeArgument (',' typeArgument)* '>' - ; - -typeArgument - : type - | '?' (('extends' | 'super') type)? - ; - -qualifiedNameList - : qualifiedName (',' qualifiedName)* - ; - -formalParameters - : '(' formalParameterDecls? ')' - ; - -formalParameterDecls - : variableModifiers type formalParameterDeclsRest - ; - -formalParameterDeclsRest - : variableDeclaratorId (',' formalParameterDecls)? - | '...' variableDeclaratorId - ; - -methodBody - : block - ; - -constructorBody - : block - ; - -qualifiedName - : Identifier ('.' Identifier)* - ; - -literal - : IntegerLiteral - | FloatingPointLiteral - | CharacterLiteral - | StringLiteral - | BooleanLiteral - | 'null' - ; - -// ANNOTATIONS - -annotations - : annotation+ - ; - -annotation - : '@' annotationName ( '(' ( elementValuePairs | elementValue )? ')' )? - ; - -annotationName - : Identifier ('.' Identifier)* - ; - -elementValuePairs - : elementValuePair (',' elementValuePair)* - ; - -elementValuePair - : Identifier '=' elementValue - ; - -elementValue - : conditionalExpression - | annotation - | elementValueArrayInitializer - ; - -elementValueArrayInitializer - : '{' (elementValue (',' elementValue)*)? (',')? '}' - ; - -annotationTypeDeclaration - : '@' 'interface' Identifier annotationTypeBody - ; - -annotationTypeBody - : '{' (annotationTypeElementDeclaration)* '}' - ; - -annotationTypeElementDeclaration - : modifiers annotationTypeElementRest - | ';' // this is not allowed by the grammar, but apparently allowed by the actual compiler - ; - -annotationTypeElementRest - : type annotationMethodOrConstantRest ';' - | normalClassDeclaration ';'? - | normalInterfaceDeclaration ';'? - | enumDeclaration ';'? - | annotationTypeDeclaration ';'? - ; - -annotationMethodOrConstantRest - : annotationMethodRest - | annotationConstantRest - ; - -annotationMethodRest - : Identifier '(' ')' defaultValue? - ; - -annotationConstantRest - : variableDeclarators - ; - -defaultValue - : 'default' elementValue - ; - -// STATEMENTS / BLOCKS - -block - : '{' blockStatement* '}' - ; - -blockStatement - : localVariableDeclarationStatement - | classOrInterfaceDeclaration - | statement - ; - -localVariableDeclarationStatement - : localVariableDeclaration ';' - ; - -localVariableDeclaration - : variableModifiers type variableDeclarators - ; - -variableModifiers - : variableModifier* - ; - -statement - : block - | ASSERT expression (':' expression)? ';' - | 'if' parExpression statement ('else' statement)? - | 'for' '(' forControl ')' statement - | 'while' parExpression statement - | 'do' statement 'while' parExpression ';' - | 'try' block (catches finallyBlock? | finallyBlock) - | 'try' resourceSpecification block catches? finallyBlock? - | 'switch' parExpression '{' switchBlockStatementGroups '}' - | 'synchronized' parExpression block - | 'return' expression? ';' - | 'throw' expression ';' - | 'break' Identifier? ';' - | 'continue' Identifier? ';' - | ';' - | statementExpression ';' - | Identifier ':' statement - ; - -catches - : catchClause+ - ; - -catchClause - : 'catch' '(' variableModifiers catchType Identifier ')' block - ; - -catchType - : qualifiedName ('|' qualifiedName)* - ; - -finallyBlock - : 'finally' block - ; - -resourceSpecification - : '(' resources ';'? ')' - ; - -resources - : resource (';' resource)* - ; - -resource - : variableModifiers classOrInterfaceType variableDeclaratorId '=' expression - ; - -formalParameter - : variableModifiers type variableDeclaratorId - ; - -switchBlockStatementGroups - : (switchBlockStatementGroup)* - ; - -/* The change here (switchLabel -> switchLabel+) technically makes this grammar - ambiguous; but with appropriately greedy parsing it yields the most - appropriate AST, one in which each group, except possibly the last one, has - labels and statements. */ -switchBlockStatementGroup - : switchLabel+ blockStatement* - ; - -switchLabel - : 'case' constantExpression ':' - | 'case' enumConstantName ':' - | 'default' ':' - ; - -forControl - : enhancedForControl - | forInit? ';' expression? ';' forUpdate? - ; - -forInit - : localVariableDeclaration - | expressionList - ; - -enhancedForControl - : variableModifiers type Identifier ':' expression - ; - -forUpdate - : expressionList - ; - -// EXPRESSIONS - -parExpression - : '(' expression ')' - ; - -expressionList - : expression (',' expression)* - ; - -statementExpression - : expression - ; - -constantExpression - : expression - ; - -expression - : conditionalExpression (assignmentOperator expression)? - ; - -assignmentOperator - : '=' - | '+=' - | '-=' - | '*=' - | '/=' - | '&=' - | '|=' - | '^=' - | '%=' - | '<<=' - | '>>=' - | '>>>=' - ; - -conditionalExpression - : conditionalOrExpression ( '?' expression ':' conditionalExpression )? - ; - -conditionalOrExpression - : conditionalAndExpression ( '||' conditionalAndExpression )* - ; - -conditionalAndExpression - : inclusiveOrExpression ( '&&' inclusiveOrExpression )* - ; - -inclusiveOrExpression - : exclusiveOrExpression ( '|' exclusiveOrExpression )* - ; - -exclusiveOrExpression - : andExpression ( '^' andExpression )* - ; - -andExpression - : equalityExpression ( '&' equalityExpression )* - ; - -equalityExpression - : instanceOfExpression ( ('==' | '!=') instanceOfExpression )* - ; - -instanceOfExpression - : relationalExpression ('instanceof' type)? - ; - -relationalExpression - : shiftExpression ( relationalOp shiftExpression )* - ; - -relationalOp - : '<=' - | '>=' - | '<' - | '>' - ; - -shiftExpression - : additiveExpression ( shiftOp additiveExpression )* - ; - -shiftOp - : t1='<' t2='<' -// { $t1.getLine() == $t2.getLine() && -// $t1.getCharPositionInLine() + 1 == $t2.getCharPositionInLine() }? - | t1='>' t2='>' t3='>' -// { $t1.getLine() == $t2.getLine() && -// $t1.getCharPositionInLine() + 1 == $t2.getCharPositionInLine() && -// $t2.getLine() == $t3.getLine() && -// $t2.getCharPositionInLine() + 1 == $t3.getCharPositionInLine() }? - | t1='>' t2='>' -// { $t1.getLine() == $t2.getLine() && -// $t1.getCharPositionInLine() + 1 == $t2.getCharPositionInLine() }? - ; - - -additiveExpression - : multiplicativeExpression ( ('+' | '-') multiplicativeExpression )* - ; - -multiplicativeExpression - : unaryExpression ( ( '*' | '/' | '%' ) unaryExpression )* - ; - -unaryExpression - : '+' unaryExpression - | '-' unaryExpression - | '++' unaryExpression - | '--' unaryExpression - | unaryExpressionNotPlusMinus - ; - -unaryExpressionNotPlusMinus - : '~' unaryExpression - | '!' unaryExpression - | castExpression - | primary selector* ('++'|'--')? - ; - -castExpression - : '(' primitiveType ')' unaryExpression - | '(' (type | expression) ')' unaryExpressionNotPlusMinus - ; - -primary - : parExpression - | 'this' arguments? - | 'super' superSuffix - | literal - | 'new' creator - | nonWildcardTypeArguments (explicitGenericInvocationSuffix | 'this' arguments) - | Identifier ('.' Identifier)* identifierSuffix? - | primitiveType ('[' ']')* '.' 'class' - | 'void' '.' 'class' - ; - -identifierSuffix - : ('[' ']')+ '.' 'class' - | '[' expression ']' - | arguments - | '.' 'class' - | '.' explicitGenericInvocation - | '.' 'this' - | '.' 'super' arguments - | '.' 'new' nonWildcardTypeArguments? innerCreator - ; - -creator - : nonWildcardTypeArguments createdName classCreatorRest - | createdName (arrayCreatorRest | classCreatorRest) - ; - -createdName - : Identifier typeArgumentsOrDiamond? ('.' Identifier typeArgumentsOrDiamond?)* - | primitiveType - ; - -innerCreator - : Identifier nonWildcardTypeArgumentsOrDiamond? classCreatorRest - ; - -arrayCreatorRest - : '[' - ( ']' ('[' ']')* arrayInitializer - | expression ']' ('[' expression ']')* ('[' ']')* - ) - ; - -classCreatorRest - : arguments classBody? - ; - -explicitGenericInvocation - : nonWildcardTypeArguments explicitGenericInvocationSuffix - ; - -nonWildcardTypeArguments - : '<' typeList '>' - ; - -typeArgumentsOrDiamond - : '<' '>' - | typeArguments - ; - -nonWildcardTypeArgumentsOrDiamond - : '<' '>' - | nonWildcardTypeArguments - ; - -selector - : '.' Identifier arguments? - | '.' explicitGenericInvocation - | '.' 'this' - | '.' 'super' superSuffix - | '.' 'new' nonWildcardTypeArguments? innerCreator - | '[' expression ']' - ; - -superSuffix - : arguments - | '.' Identifier arguments? - ; - -explicitGenericInvocationSuffix - : 'super' superSuffix - | Identifier arguments - ; - -arguments - : '(' expressionList? ')' - ; - -// LEXER - -// §3.9 Keywords - -ABSTRACT : 'abstract'; -ASSERT : 'assert'; -BOOLEAN : 'boolean'; -BREAK : 'break'; -BYTE : 'byte'; -CASE : 'case'; -CATCH : 'catch'; -CHAR : 'char'; -CLASS : 'class'; -CONST : 'const'; -CONTINUE : 'continue'; -DEFAULT : 'default'; -DO : 'do'; -DOUBLE : 'double'; -ELSE : 'else'; -ENUM : 'enum'; -EXTENDS : 'extends'; -FINAL : 'final'; -FINALLY : 'finally'; -FLOAT : 'float'; -FOR : 'for'; -IF : 'if'; -GOTO : 'goto'; -IMPLEMENTS : 'implements'; -IMPORT : 'import'; -INSTANCEOF : 'instanceof'; -INT : 'int'; -INTERFACE : 'interface'; -LONG : 'long'; -NATIVE : 'native'; -NEW : 'new'; -PACKAGE : 'package'; -PRIVATE : 'private'; -PROTECTED : 'protected'; -PUBLIC : 'public'; -RETURN : 'return'; -SHORT : 'short'; -STATIC : 'static'; -STRICTFP : 'strictfp'; -SUPER : 'super'; -SWITCH : 'switch'; -SYNCHRONIZED : 'synchronized'; -THIS : 'this'; -THROW : 'throw'; -THROWS : 'throws'; -TRANSIENT : 'transient'; -TRY : 'try'; -VOID : 'void'; -VOLATILE : 'volatile'; -WHILE : 'while'; - -// §3.10.1 Integer Literals - -IntegerLiteral - : DecimalIntegerLiteral - | HexIntegerLiteral - | OctalIntegerLiteral - | BinaryIntegerLiteral - ; - -fragment -DecimalIntegerLiteral - : DecimalNumeral IntegerTypeSuffix? - ; - -fragment -HexIntegerLiteral - : HexNumeral IntegerTypeSuffix? - ; - -fragment -OctalIntegerLiteral - : OctalNumeral IntegerTypeSuffix? - ; - -fragment -BinaryIntegerLiteral - : BinaryNumeral IntegerTypeSuffix? - ; - -fragment -IntegerTypeSuffix - : [lL] - ; - -fragment -DecimalNumeral - : '0' - | NonZeroDigit (Digits? | Underscores Digits) - ; - -fragment -Digits - : Digit (DigitsAndUnderscores? Digit)? - ; - -fragment -Digit - : '0' - | NonZeroDigit - ; - -fragment -NonZeroDigit - : [1-9] - ; - -fragment -DigitsAndUnderscores - : DigitOrUnderscore+ - ; - -fragment -DigitOrUnderscore - : Digit - | '_' - ; - -fragment -Underscores - : '_'+ - ; - -fragment -HexNumeral - : '0' [xX] HexDigits - ; - -fragment -HexDigits - : HexDigit (HexDigitsAndUnderscores? HexDigit)? - ; - -fragment -HexDigit - : [0-9a-fA-F] - ; - -fragment -HexDigitsAndUnderscores - : HexDigitOrUnderscore+ - ; - -fragment -HexDigitOrUnderscore - : HexDigit - | '_' - ; - -fragment -OctalNumeral - : '0' Underscores? OctalDigits - ; - -fragment -OctalDigits - : OctalDigit (OctalDigitsAndUnderscores? OctalDigit)? - ; - -fragment -OctalDigit - : [0-7] - ; - -fragment -OctalDigitsAndUnderscores - : OctalDigitOrUnderscore+ - ; - -fragment -OctalDigitOrUnderscore - : OctalDigit - | '_' - ; - -fragment -BinaryNumeral - : '0' [bB] BinaryDigits - ; - -fragment -BinaryDigits - : BinaryDigit (BinaryDigitsAndUnderscores? BinaryDigit)? - ; - -fragment -BinaryDigit - : [01] - ; - -fragment -BinaryDigitsAndUnderscores - : BinaryDigitOrUnderscore+ - ; - -fragment -BinaryDigitOrUnderscore - : BinaryDigit - | '_' - ; - -// §3.10.2 Floating-Point Literals - -FloatingPointLiteral - : DecimalFloatingPointLiteral - | HexadecimalFloatingPointLiteral - ; - -fragment -DecimalFloatingPointLiteral - : Digits '.' Digits? ExponentPart? FloatTypeSuffix? - | '.' Digits ExponentPart? FloatTypeSuffix? - | Digits ExponentPart FloatTypeSuffix? - | Digits FloatTypeSuffix - ; - -fragment -ExponentPart - : ExponentIndicator SignedInteger - ; - -fragment -ExponentIndicator - : [eE] - ; - -fragment -SignedInteger - : Sign? Digits - ; - -fragment -Sign - : [+-] - ; - -fragment -FloatTypeSuffix - : [fFdD] - ; - -fragment -HexadecimalFloatingPointLiteral - : HexSignificand BinaryExponent FloatTypeSuffix? - ; - -fragment -HexSignificand - : HexNumeral '.'? - | '0' [xX] HexDigits? '.' HexDigits - ; - -fragment -BinaryExponent - : BinaryExponentIndicator SignedInteger - ; - -fragment -BinaryExponentIndicator - : [pP] - ; - -// §3.10.3 Boolean Literals - -BooleanLiteral - : 'true' - | 'false' - ; - -// §3.10.4 Character Literals - -CharacterLiteral - : '\'' SingleCharacter '\'' - | '\'' EscapeSequence '\'' - ; - -fragment -SingleCharacter - : ~['\\] - ; - -// §3.10.5 String Literals - -StringLiteral - : '"' StringCharacters? '"' - ; - -fragment -StringCharacters - : StringCharacter+ - ; - -fragment -StringCharacter - : ~["\\] - | EscapeSequence - ; - -// §3.10.6 Escape Sequences for Character and String Literals - -fragment -EscapeSequence - : '\\' [btnfr"'\\] - | OctalEscape - ; - -fragment -OctalEscape - : '\\' OctalDigit - | '\\' OctalDigit OctalDigit - | '\\' ZeroToThree OctalDigit OctalDigit - ; - -fragment -ZeroToThree - : [0-3] - ; - -// §3.10.7 The Null Literal - -NullLiteral - : 'null' - ; - -// §3.11 Separators - -LPAREN : '('; -RPAREN : ')'; -LBRACE : '{'; -RBRACE : '}'; -LBRACK : '['; -RBRACK : ']'; -SEMI : ';'; -COMMA : ','; -DOT : '.'; - -// §3.12 Operators - -ASSIGN : '='; -GT : '>'; -LT : '<'; -BANG : '!'; -TILDE : '~'; -QUESTION : '?'; -COLON : ':'; -EQUAL : '=='; -LE : '<='; -GE : '>='; -NOTEQUAL : '!='; -AND : '&&'; -OR : '||'; -INC : '++'; -DEC : '--'; -ADD : '+'; -SUB : '-'; -MUL : '*'; -DIV : '/'; -BITAND : '&'; -BITOR : '|'; -CARET : '^'; -MOD : '%'; - -ADD_ASSIGN : '+='; -SUB_ASSIGN : '-='; -MUL_ASSIGN : '*='; -DIV_ASSIGN : '/='; -AND_ASSIGN : '&='; -OR_ASSIGN : '|='; -XOR_ASSIGN : '^='; -MOD_ASSIGN : '%='; -LSHIFT_ASSIGN : '<<='; -RSHIFT_ASSIGN : '>>='; -URSHIFT_ASSIGN : '>>>='; - -// §3.8 Identifiers (must appear after all keywords in the grammar) - -Identifier - : JavaLetter JavaLetterOrDigit* - ; - -fragment -JavaLetter - : [a-zA-Z$_] // these are the "java letters" below 0xFF - | // covers all characters above 0xFF which are not a surrogate - ~[\u0000-\u00FF\uD800-\uDBFF] - {Character.isJavaIdentifierStart(_input.LA(-1))}? - | // covers UTF-16 surrogate pairs encodings for U+10000 to U+10FFFF - [\uD800-\uDBFF] [\uDC00-\uDFFF] - {Character.isJavaIdentifierStart(Character.toCodePoint((char)_input.LA(-2), (char)_input.LA(-1)))}? - ; - -fragment -JavaLetterOrDigit - : [a-zA-Z0-9$_] // these are the "java letters or digits" below 0xFF - | // covers all characters above 0xFF which are not a surrogate - ~[\u0000-\u00FF\uD800-\uDBFF] - {Character.isJavaIdentifierPart(_input.LA(-1))}? - | // covers UTF-16 surrogate pairs encodings for U+10000 to U+10FFFF - [\uD800-\uDBFF] [\uDC00-\uDFFF] - {Character.isJavaIdentifierPart(Character.toCodePoint((char)_input.LA(-2), (char)_input.LA(-1)))}? - ; - -// -// Additional symbols not defined in the lexical specification -// - -AT : '@'; -ELLIPSIS : '...'; - -// -// Whitespace and comments -// - -WS : [ \t\r\n\u000C]+ -> skip - ; - -COMMENT - : '/*' .*? '*/' -> skip - ; - -LINE_COMMENT - : '//' ~[\r\n]* -> skip - ; diff --git a/tool/test/org/antlr/v4/xtest/JavaUnicodeInputStream.java b/tool/test/org/antlr/v4/xtest/JavaUnicodeInputStream.java deleted file mode 100644 index d28e615d6..000000000 --- a/tool/test/org/antlr/v4/xtest/JavaUnicodeInputStream.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2013 Terence Parr - * Copyright (c) 2013 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.runtime.misc.NotNull; - -/** - * - * @author Sam Harwell - */ -public class JavaUnicodeInputStream implements CharStream { - @NotNull - private final CharStream source; - private final IntegerList escapeIndexes = new IntegerList(); - private final IntegerList escapeCharacters = new IntegerList(); - private final IntegerList escapeIndirectionLevels = new IntegerList(); - - private int escapeListIndex; - private int range; - private int slashCount; - - private int la1; - - public JavaUnicodeInputStream(@NotNull CharStream source) { - if (source == null) { - throw new NullPointerException("source"); - } - - this.source = source; - this.la1 = source.LA(1); - } - - @Override - public int size() { - return source.size(); - } - - @Override - public int index() { - return source.index(); - } - - @Override - public String getSourceName() { - return source.getSourceName(); - } - - @Override - public String getText(Interval interval) { - return source.getText(interval); - } - - @Override - public void consume() { - if (la1 != '\\') { - source.consume(); - la1 = source.LA(1); - range = Math.max(range, source.index()); - slashCount = 0; - return; - } - - // make sure the next character has been processed - this.LA(1); - - if (escapeListIndex >= escapeIndexes.size() || escapeIndexes.get(escapeListIndex) != index()) { - source.consume(); - slashCount++; - } - else { - int indirectionLevel = escapeIndirectionLevels.get(escapeListIndex); - for (int i = 0; i < 6 + indirectionLevel; i++) { - source.consume(); - } - - escapeListIndex++; - slashCount = 0; - } - - la1 = source.LA(1); - assert range >= index(); - } - - @Override - public int LA(int i) { - if (i == 1 && la1 != '\\') { - return la1; - } - - if (i <= 0) { - int desiredIndex = index() + i; - for (int j = escapeListIndex - 1; j >= 0; j--) { - if (escapeIndexes.get(j) + 6 + escapeIndirectionLevels.get(j) > desiredIndex) { - desiredIndex -= 5 + escapeIndirectionLevels.get(j); - } - - if (escapeIndexes.get(j) == desiredIndex) { - return escapeCharacters.get(j); - } - } - - return source.LA(desiredIndex - index()); - } - else { - int desiredIndex = index() + i - 1; - for (int j = escapeListIndex; j < escapeIndexes.size(); j++) { - if (escapeIndexes.get(j) == desiredIndex) { - return escapeCharacters.get(j); - } - else if (escapeIndexes.get(j) < desiredIndex) { - desiredIndex += 5 + escapeIndirectionLevels.get(j); - } - else { - return source.LA(desiredIndex - index() + 1); - } - } - - int[] currentIndex = { index() }; - int[] slashCountPtr = { slashCount }; - int[] indirectionLevelPtr = { 0 }; - for (int j = 0; j < i; j++) { - int previousIndex = currentIndex[0]; - int c = readCharAt(currentIndex, slashCountPtr, indirectionLevelPtr); - if (currentIndex[0] > range) { - if (currentIndex[0] - previousIndex > 1) { - escapeIndexes.add(previousIndex); - escapeCharacters.add(c); - escapeIndirectionLevels.add(indirectionLevelPtr[0]); - } - - range = currentIndex[0]; - } - - if (j == i - 1) { - return c; - } - } - - throw new IllegalStateException("shouldn't be reachable"); - } - } - - @Override - public int mark() { - return source.mark(); - } - - @Override - public void release(int marker) { - source.release(marker); - } - - @Override - public void seek(int index) { - if (index > range) { - throw new UnsupportedOperationException(); - } - - source.seek(index); - la1 = source.LA(1); - - slashCount = 0; - while (source.LA(-slashCount - 1) == '\\') { - slashCount++; - } - - escapeListIndex = escapeIndexes.binarySearch(source.index()); - if (escapeListIndex < 0) { - escapeListIndex = -escapeListIndex - 1; - } - } - - private static boolean isHexDigit(int c) { - return c >= '0' && c <= '9' - || c >= 'a' && c <= 'f' - || c >= 'A' && c <= 'F'; - } - - private static int hexValue(int c) { - if (c >= '0' && c <= '9') { - return c - '0'; - } - - if (c >= 'a' && c <= 'f') { - return c - 'a' + 10; - } - - if (c >= 'A' && c <= 'F') { - return c - 'A' + 10; - } - - throw new IllegalArgumentException("c"); - } - - private int readCharAt(int[] nextIndexPtr, int[] slashCountPtr, int[] indirectionLevelPtr) { - assert nextIndexPtr != null && nextIndexPtr.length == 1; - assert slashCountPtr != null && slashCountPtr.length == 1; - assert indirectionLevelPtr != null && indirectionLevelPtr.length == 1; - - boolean blockUnicodeEscape = (slashCountPtr[0] % 2) != 0; - - int c0 = source.LA(nextIndexPtr[0] - index() + 1); - if (c0 == '\\') { - slashCountPtr[0]++; - - if (!blockUnicodeEscape) { - int c1 = source.LA(nextIndexPtr[0] - index() + 2); - if (c1 == 'u') { - int c2 = source.LA(nextIndexPtr[0] - index() + 3); - indirectionLevelPtr[0] = 0; - while (c2 == 'u') { - indirectionLevelPtr[0]++; - c2 = source.LA(nextIndexPtr[0] - index() + 3 + indirectionLevelPtr[0]); - } - - int c3 = source.LA(nextIndexPtr[0] - index() + 4 + indirectionLevelPtr[0]); - int c4 = source.LA(nextIndexPtr[0] - index() + 5 + indirectionLevelPtr[0]); - int c5 = source.LA(nextIndexPtr[0] - index() + 6 + indirectionLevelPtr[0]); - if (isHexDigit(c2) && isHexDigit(c3) && isHexDigit(c4) && isHexDigit(c5)) { - int value = hexValue(c2); - value = (value << 4) + hexValue(c3); - value = (value << 4) + hexValue(c4); - value = (value << 4) + hexValue(c5); - - nextIndexPtr[0] += 6 + indirectionLevelPtr[0]; - slashCountPtr[0] = 0; - return value; - } - } - } - } - - nextIndexPtr[0]++; - return c0; - } -} diff --git a/tool/test/org/antlr/v4/xtest/ParserInterpreterForTesting.java b/tool/test/org/antlr/v4/xtest/ParserInterpreterForTesting.java deleted file mode 100644 index fcf0d2e20..000000000 --- a/tool/test/org/antlr/v4/xtest/ParserInterpreterForTesting.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.Tool; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.DecisionState; -import org.antlr.v4.runtime.atn.ParserATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.NotNull; -import org.antlr.v4.runtime.misc.Nullable; -import org.antlr.v4.tool.Grammar; - -public class ParserInterpreterForTesting { - public static class DummyParser extends Parser { - public final ATN atn; - public final DFA[] decisionToDFA; // not shared for interp - public final PredictionContextCache sharedContextCache = - new PredictionContextCache(); - - public Grammar g; - public DummyParser(Grammar g, ATN atn, TokenStream input) { - super(input); - this.g = g; - this.atn = atn; - this.decisionToDFA = new DFA[atn.getNumberOfDecisions()]; - for (int i = 0; i < decisionToDFA.length; i++) { - decisionToDFA[i] = new DFA(atn.getDecisionState(i), i); - } - } - - @Override - public String getGrammarFileName() { - throw new UnsupportedOperationException("not implemented"); - } - - @Override - public String[] getRuleNames() { - return g.rules.keySet().toArray(new String[g.rules.size()]); - } - - @Override - @Deprecated - public String[] getTokenNames() { - return g.getTokenNames(); - } - - @Override - public ATN getATN() { - return atn; - } - } - - protected Grammar g; - public DummyParser parser; - protected ParserATNSimulator atnSimulator; - protected TokenStream input; - - public ParserInterpreterForTesting(@NotNull Grammar g) { - this.g = g; - } - - public ParserInterpreterForTesting(@NotNull Grammar g, @NotNull TokenStream input) { - Tool antlr = new Tool(); - antlr.process(g,false); - parser = new DummyParser(g, g.atn, input); - atnSimulator = - new ParserATNSimulator(parser, g.atn, parser.decisionToDFA, - parser.sharedContextCache); - } - - public int adaptivePredict(@NotNull TokenStream input, int decision, - @Nullable ParserRuleContext outerContext) - { - return atnSimulator.adaptivePredict(input, decision, outerContext); - } - - public int matchATN(@NotNull TokenStream input, - @NotNull ATNState startState) - { - if (startState.getNumberOfTransitions() == 1) { - return 1; - } - else if (startState instanceof DecisionState) { - return atnSimulator.adaptivePredict(input, ((DecisionState)startState).decision, null); - } - else if (startState.getNumberOfTransitions() > 0) { - return 1; - } - else { - return -1; - } - } - - public ParserATNSimulator getATNSimulator() { - return atnSimulator; - } - -} diff --git a/tool/test/org/antlr/v4/xtest/PositionAdjustingLexer.g4 b/tool/test/org/antlr/v4/xtest/PositionAdjustingLexer.g4 deleted file mode 100644 index 4d52dfc29..000000000 --- a/tool/test/org/antlr/v4/xtest/PositionAdjustingLexer.g4 +++ /dev/null @@ -1,141 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -lexer grammar PositionAdjustingLexer; - -@members { - @Override - public Token nextToken() { - if (!(_interp instanceof PositionAdjustingLexerATNSimulator)) { - _interp = new PositionAdjustingLexerATNSimulator(this, _ATN, _decisionToDFA, _sharedContextCache); - } - - return super.nextToken(); - } - - @Override - public Token emit() { - switch (_type) { - case TOKENS: - handleAcceptPositionForKeyword("tokens"); - break; - - case LABEL: - handleAcceptPositionForIdentifier(); - break; - - default: - break; - } - - return super.emit(); - } - - private boolean handleAcceptPositionForIdentifier() { - String tokenText = getText(); - int identifierLength = 0; - while (identifierLength < tokenText.length() && isIdentifierChar(tokenText.charAt(identifierLength))) { - identifierLength++; - } - - if (getInputStream().index() > _tokenStartCharIndex + identifierLength) { - int offset = identifierLength - 1; - getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset); - return true; - } - - return false; - } - - private boolean handleAcceptPositionForKeyword(String keyword) { - if (getInputStream().index() > _tokenStartCharIndex + keyword.length()) { - int offset = keyword.length() - 1; - getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset); - return true; - } - - return false; - } - - @Override - public PositionAdjustingLexerATNSimulator getInterpreter() { - return (PositionAdjustingLexerATNSimulator)super.getInterpreter(); - } - - private static boolean isIdentifierChar(char c) { - return Character.isLetterOrDigit(c) || c == '_'; - } - - protected static class PositionAdjustingLexerATNSimulator extends LexerATNSimulator { - - public PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn, - DFA[] decisionToDFA, - PredictionContextCache sharedContextCache) - { - super(recog, atn, decisionToDFA, sharedContextCache); - } - - protected void resetAcceptPosition(CharStream input, int index, int line, int charPositionInLine) { - input.seek(index); - this.line = line; - this.charPositionInLine = charPositionInLine; - consume(input); - } - - } -} - -ASSIGN : '=' ; -PLUS_ASSIGN : '+=' ; -LCURLY: '{'; - -// 'tokens' followed by '{' -TOKENS : 'tokens' IGNORED '{'; - -// IDENTIFIER followed by '+=' or '=' -LABEL - : IDENTIFIER IGNORED '+'? '=' - ; - -IDENTIFIER - : [a-zA-Z_] [a-zA-Z0-9_]* - ; - -fragment -IGNORED - : [ \t\r\n]* - ; - -NEWLINE - : [\r\n]+ -> skip - ; - -WS - : [ \t]+ -> skip - ; diff --git a/tool/test/org/antlr/v4/xtest/Psl.g4 b/tool/test/org/antlr/v4/xtest/Psl.g4 deleted file mode 100644 index acc64488d..000000000 --- a/tool/test/org/antlr/v4/xtest/Psl.g4 +++ /dev/null @@ -1,348 +0,0 @@ -grammar Psl; - -@parser::members -{ - public void printPosition(String name, Token tok) - { - System.out.printf("%s: pos %d, len %d%n", - name, tok.getCharPositionInLine(), tok.getText().length()); - } - - - /** - * Checks whether a set of digit groups and commas construct - * a valid command-number. - * - * @param digits - * The groups of digits, each group in a separate item. - * @param commas - * The commas found separating the digit groups. - * - * There should be one more digit group than commas. - * There should be no internal white space. - * - * @returns true (valid), false (invalid) - */ - - public boolean isValidCommaNumber(List digits, List commas) - { - Token[] aDigits = new Token[0]; - Token[] aCommas = new Token[0]; - int j; - - aDigits = digits.toArray(aDigits); - aCommas = commas.toArray(aCommas); - if (aDigits.length != aCommas.length + 1) - { - return false; - } - for (j = 0; j < aCommas.length; ++j) - { - int p1, p2, p3; - p1 = aDigits[j].getCharPositionInLine() - + aDigits[j].getText().length(); - p2 = aCommas[j].getCharPositionInLine(); - p3 = aDigits[j + 1].getCharPositionInLine(); - if (p1 != p2 || (p2 + 1) != p3) - { - return false; - } - } - return true; - } - - - /** - * Checks whether a the pieces of a floating-point number - * construct a valid number. - * - * @param whole - * The whole part of the number. Can be null. - * @param period - * The decimal point. - * @param fraction - * The fraction part of the number. Can be null. - * - * At least one of the whole or fraction must be present. - * The decimal point is required. - * - * @returns true (valid), false (invalid) - */ - - public boolean isValidFloatingConstant( - Token whole, - Token period, - Token fraction - ) - { - boolean foundDigits = false; - int column; - - if (whole != null) - { - foundDigits = true; - column = whole.getCharPositionInLine() - + whole.getText().length(); - if (column != period.getCharPositionInLine()) - { - return false; - } - } - if (fraction != null) - { - foundDigits = true; - column = period.getCharPositionInLine() + 1; - if (column != fraction.getCharPositionInLine()) - { - return false; - } - } - return foundDigits; - } -} - -translation_unit - : numeric_range - EOF - ; - -pattern - : numeric_range - ; - -numeric_range - : EURO_NUMBER - PAREN_LEFT - numeric_endpoint - TILDE - numeric_endpoint - PAREN_RIGHT - | NUMBER - PAREN_LEFT - numeric_endpoint - TILDE - numeric_endpoint - PAREN_RIGHT - ; - -numeric_endpoint - : ( PLUS | MINUS )? integer_constant - | ( PLUS | MINUS )? floating_constant - | ( PLUS | MINUS )? comma_number - ; - - /* Floating-point numbers and comma numbers are valid only - * as numeric endpoints in number() or euro_number(). Otherwise, - * the pieces should be parsed as separate lexical tokens, such as - * - * integer_constant '.' integer_constant - * - * Because of parser lookahead and the subtle interactions between - * the parser and the lexer, changing lexical modes from the parser - * is not safe. The code below checks the constraints for floating - * numbers, forbidding internal white space. - */ - -floating_constant - : comma_number PERIOD fraction=DIGIT_SEQUENCE? - { - isValidFloatingConstant($comma_number.stop, $PERIOD, $fraction) - }? - - /*| whole=DIGIT_SEQUENCE PERIOD fraction=DIGIT_SEQUENCE? - { - isValidFloatingConstant($whole, $PERIOD, $fraction) - }?/* */ - - | PERIOD fraction=DIGIT_SEQUENCE - { - isValidFloatingConstant(null, $PERIOD, $fraction) - }? - ; - -comma_number - : digits+=DIGIT_SEQUENCE ( commas+=COMMA digits+=DIGIT_SEQUENCE )+ - { - isValidCommaNumber($digits, $commas) - }? - ; - -term_expression - : term - | RETURN - ( - PAREN_LEFT - ( integer_constant | ALL ) - PAREN_RIGHT - )? - term - ; - -term - : pattern - | PAREN_LEFT term_expression PAREN_RIGHT - ; - -integer_constant - : DIGIT_SEQUENCE - | INTEGER_CONSTANT - | BINARY_CONSTANT - | DECIMAL_CONSTANT - | HEXADECIMAL_CONSTANT - | OCTAL_CONSTANT - ; - -// LEXER - -/* Letter fragments - */ - -fragment A: [Aa] ; -fragment B: [BB] ; -fragment C: [Cc] ; -fragment D: [Dd] ; -fragment E: [Ee] ; -fragment F: [Ff] ; -fragment G: [Gg] ; -fragment H: [Hh] ; -fragment I: [Ii] ; -fragment J: [Jj] ; -fragment K: [Kk] ; -fragment L: [Ll] ; -fragment M: [Mm] ; -fragment N: [Nn] ; -fragment O: [Oo] ; -fragment P: [Pp] ; -fragment Q: [Qq] ; -fragment R: [Rr] ; -fragment S: [Ss] ; -fragment T: [Tt] ; -fragment U: [Uu] ; -fragment V: [Vv] ; -fragment W: [Ww] ; -fragment X: [Xx] ; -fragment Y: [Yy] ; -fragment Z: [Zz] ; - - -WHITESPACE_IN_LINE - : [ \t]+ - -> skip - ; - -NEWLINE - : '\r'? '\n' - -> skip - ; - -WHITESPACE_ALL - : [ \n\r\t]+ - -> skip - ; - - - /* A sequence of decimal digits is useful on its own, - * to avoid the base-prefixes (0b, 0x, ...) that an - * INTEGER_CONTANT would allow. - * Need to define before INTEGER_CONSTANT to make sure - * DIGIT_SEQUENCE is recognized before INTEGER_CONSTANT. - */ - -DIGIT_SEQUENCE - : [0-9]+ - ; - -INTEGER_CONSTANT - : BINARY_CONSTANT - | DECIMAL_CONSTANT - | HEXADECIMAL_CONSTANT - | OCTAL_CONSTANT - ; - -BINARY_CONSTANT - : '0' [Bb] [0-1]+ - ; - -DECIMAL_CONSTANT - : ( '0' [Dd] )? [0-9]+ - ; - -HEXADECIMAL_CONSTANT - : '0' [HhXx] [0-9a-fA-F]+ - ; - -OCTAL_CONSTANT - : '0' [Oo] [0-7]+ - ; - -/* keywords - */ - -ALL - : A L L - ; - -EURO_NUMBER - : E U R O '_' N U M B E R - ; - - -NUMBER - : N U M B E R - ; - -RETURN - : R E T U R N - ; - -IDENTIFIER - : [A-Za-z][A-Za-z0-9_]* - ; - - -/* The single-character tokens. - */ - -COMMA - : ',' - ; - -MINUS - : '-' - ; - -PAREN_LEFT - : '(' - ; - -PAREN_RIGHT - : ')' - ; - -PERIOD - : '.' - ; - -PLUS - : '+' - ; - -TILDE - : '~' - ; - - /* This rule must be last (or nearly last) to avoid - * matching individual characters for other rules. - */ - -ANY_CHAR_BUT_NEWLINE - : ~[\n\r] - ; diff --git a/tool/test/org/antlr/v4/xtest/TestASTStructure.gunit b/tool/test/org/antlr/v4/xtest/TestASTStructure.gunit deleted file mode 100644 index 9641b13d7..000000000 --- a/tool/test/org/antlr/v4/xtest/TestASTStructure.gunit +++ /dev/null @@ -1,155 +0,0 @@ -/** Test ANTLRParser's AST construction. Translate to junit tests with: - * - * $ java org.antlr.v4.gunit.Gen TestASTStructure.gunit - - NO LONGER using gunit!!! - - */ -gunit TestASTStructure; - -@header {package org.antlr.v4.test;} -options { - adaptor = org.antlr.v4.parse.GrammarASTAdaptor; - parser = org.antlr.v4.parse.ANTLRParser; - lexer = org.antlr.v4.parse.ANTLRLexer; -} - -grammarSpec: - "parser grammar P; a : A;" - -> (PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - tokens { A; B='33'; } - @header {foo} - a : A; - >> - -> - (PARSER_GRAMMAR P - (tokens { A (= B '33')) - (@ header {foo}) - (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - @header {foo} - tokens { A; B='33'; } - a : A; - >> - -> - (PARSER_GRAMMAR P - (@ header {foo}) - (tokens { A (= B '33')) - (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - import A=B, C; - a : A; - >> - -> - (PARSER_GRAMMAR P - (import (= A B) C) - (RULES (RULE a (BLOCK (ALT A))))) - -delegateGrammars: - "import A;" -> (import A) - -rule: - "a : A;" -> - (RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c)))))) - "A : B+;" -> (RULE A (BLOCK (ALT (+ (BLOCK (ALT B)))))) - - << - public a[int i] returns [int y] - options {backtrack=true;} - @init {blort} - : ID ; - >> - -> - (RULE a - (RULEMODIFIERS public) - int i - (returns int y) - (OPTIONS (= backtrack true)) - (@ init {blort}) - (BLOCK (ALT ID))) - - << - a[int i] returns [int y] - @init {blort} - options {backtrack=true;} - : ID; - >> - -> - (RULE a int i - (returns int y) - (@ init {blort}) - (OPTIONS (= backtrack true)) - (BLOCK (ALT ID))) - - << - a : ID ; - catch[A b] {foo} - finally {bar} - >> - -> - (RULE a (BLOCK (ALT ID)) - (catch A b {foo}) (finally {bar})) - - << - a : ID ; - catch[A a] {foo} - catch[B b] {fu} - finally {bar} - >> - -> - (RULE a (BLOCK (ALT ID)) - (catch A a {foo}) (catch B b {fu}) (finally {bar})) - - << - a[int i] - locals [int a, float b] - : A - ; - >> - -> (RULE a int i (locals int a, float b) (BLOCK (ALT A))) - - << - a[int i] throws a.b.c - : A - ; - >> - -> (RULE a int i (throws a.b.c) (BLOCK (ALT A))) - -ebnf: - "(A|B)" -> (BLOCK (ALT A) (ALT B)) - "(A|B)?" -> (? (BLOCK (ALT A) (ALT B))) - "(A|B)*" -> (* (BLOCK (ALT A) (ALT B))) - "(A|B)+" -> (+ (BLOCK (ALT A) (ALT B))) - -element: - "~A" -> (~ (SET A)) - "b+" -> (+ (BLOCK (ALT b))) - "(b)+" -> (+ (BLOCK (ALT b))) - "b?" -> (? (BLOCK (ALT b))) - "(b)?" -> (? (BLOCK (ALT b))) - "(b)*" -> (* (BLOCK (ALT b))) - "b*" -> (* (BLOCK (ALT b))) - "'while'*" -> (* (BLOCK (ALT 'while'))) - "'a'+" -> (+ (BLOCK (ALT 'a'))) - "a[3]" -> (a 3) - "'a'..'z'+" -> (+ (BLOCK (ALT (.. 'a' 'z')))) - "x=ID" -> (= x ID) - "x=ID?" -> (? (BLOCK (ALT (= x ID)))) - "x=ID*" -> (* (BLOCK (ALT (= x ID)))) - "x=b" -> (= x b) - "x=(A|B)" -> (= x (BLOCK (ALT A) (ALT B))) - "x=~(A|B)" -> (= x (~ (SET A B))) - "x+=~(A|B)" -> (+= x (~ (SET A B))) - "x+=~(A|B)+"-> (+ (BLOCK (ALT (+= x (~ (SET A B)))))) - "x=b+" -> (+ (BLOCK (ALT (= x b)))) - "x+=ID*" -> (* (BLOCK (ALT (+= x ID)))) - "x+='int'*" -> (* (BLOCK (ALT (+= x 'int')))) - "x+=b+" -> (+ (BLOCK (ALT (+= x b)))) - "({blort} 'x')*" -> (* (BLOCK (ALT {blort} 'x'))) diff --git a/tool/test/org/antlr/v4/xtest/TestASTStructure.java b/tool/test/org/antlr/v4/xtest/TestASTStructure.java deleted file mode 100644 index 6f0c15f62..000000000 --- a/tool/test/org/antlr/v4/xtest/TestASTStructure.java +++ /dev/null @@ -1,406 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.runtime.ANTLRStringStream; -import org.antlr.runtime.CharStream; -import org.antlr.runtime.CommonTokenStream; -import org.antlr.runtime.Parser; -import org.antlr.runtime.RuleReturnScope; -import org.antlr.runtime.TokenSource; -import org.antlr.runtime.TokenStream; -import org.antlr.runtime.tree.Tree; -import org.antlr.runtime.tree.TreeAdaptor; -import org.junit.Test; - -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; - -import static org.junit.Assert.assertEquals; - -// NO LONGER using gunit!!! - -public class TestASTStructure { - String lexerClassName = "org.antlr.v4.parse.ANTLRLexer"; - String parserClassName = "org.antlr.v4.parse.ANTLRParser"; - String adaptorClassName = "org.antlr.v4.parse.GrammarASTAdaptor"; - - public Object execParser( - String ruleName, - String input, - int scriptLine) - throws Exception - { - ANTLRStringStream is = new ANTLRStringStream(input); - Class lexerClass = Class.forName(lexerClassName).asSubclass(TokenSource.class); - Constructor lexConstructor = lexerClass.getConstructor(CharStream.class); - TokenSource lexer = lexConstructor.newInstance(is); - is.setLine(scriptLine); - - CommonTokenStream tokens = new CommonTokenStream(lexer); - - Class parserClass = Class.forName(parserClassName).asSubclass(Parser.class); - Constructor parConstructor = parserClass.getConstructor(TokenStream.class); - Parser parser = parConstructor.newInstance(tokens); - - // set up customized tree adaptor if necessary - if ( adaptorClassName!=null ) { - Method m = parserClass.getMethod("setTreeAdaptor", TreeAdaptor.class); - Class adaptorClass = Class.forName(adaptorClassName).asSubclass(TreeAdaptor.class); - m.invoke(parser, adaptorClass.newInstance()); - } - - Method ruleMethod = parserClass.getMethod(ruleName); - - // INVOKE RULE - return ruleMethod.invoke(parser); - } - - @Test public void test_grammarSpec1() throws Exception { - // gunit test on line 15 - RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "parser grammar P; a : A;", 15); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); - } - - @Test public void test_grammarSpec2() throws Exception { - // gunit test on line 18 - RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n tokens { A, B }\n @header {foo}\n a : A;\n ", 18); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(PARSER_GRAMMAR P (tokens { A B) (@ header {foo}) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); - } - - @Test public void test_grammarSpec3() throws Exception { - // gunit test on line 30 - RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n @header {foo}\n tokens { A,B }\n a : A;\n ", 30); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(PARSER_GRAMMAR P (@ header {foo}) (tokens { A B) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); - } - - @Test public void test_grammarSpec4() throws Exception { - // gunit test on line 42 - RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n import A=B, C;\n a : A;\n ", 42); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(PARSER_GRAMMAR P (import (= A B) C) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); - } @Test public void test_delegateGrammars1() throws Exception { - // gunit test on line 53 - RuleReturnScope rstruct = (RuleReturnScope)execParser("delegateGrammars", "import A;", 53); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(import A)"; - assertEquals("testing rule delegateGrammars", expecting, actual); - } @Test public void test_rule1() throws Exception { - // gunit test on line 56 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "a : A;", 56); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c))))))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule2() throws Exception { - // gunit test on line 58 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "A : B+;", 58); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE A (BLOCK (ALT (+ (BLOCK (ALT B))))))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule3() throws Exception { - // gunit test on line 60 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a[int i] returns [int y]\n @init {blort}\n : ID ;\n ", 60); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a int i (returns int y) (@ init {blort}) (BLOCK (ALT ID)))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule4() throws Exception { - // gunit test on line 75 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a[int i] returns [int y]\n @init {blort}\n options {backtrack=true;}\n : ID;\n ", 75); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a int i (returns int y) (@ init {blort}) (OPTIONS (= backtrack true)) (BLOCK (ALT ID)))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule5() throws Exception { - // gunit test on line 88 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A b] {foo}\n finally {bar}\n ", 88); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A b {foo}) (finally {bar}))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule6() throws Exception { - // gunit test on line 97 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A a] {foo}\n catch[B b] {fu}\n finally {bar}\n ", 97); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A a {foo}) (catch B b {fu}) (finally {bar}))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule7() throws Exception { - // gunit test on line 107 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n\ta[int i]\n\tlocals [int a, float b]\n\t\t:\tA\n\t\t;\n\t", 107); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a int i (locals int a, float b) (BLOCK (ALT A)))"; - assertEquals("testing rule rule", expecting, actual); - } - - @Test public void test_rule8() throws Exception { - // gunit test on line 115 - RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n\ta[int i] throws a.b.c\n\t\t:\tA\n\t\t;\n\t", 115); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(RULE a int i (throws a.b.c) (BLOCK (ALT A)))"; - assertEquals("testing rule rule", expecting, actual); - } @Test public void test_ebnf1() throws Exception { - // gunit test on line 123 - RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)", 123); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(BLOCK (ALT A) (ALT B))"; - assertEquals("testing rule ebnf", expecting, actual); - } - - @Test public void test_ebnf2() throws Exception { - // gunit test on line 124 - RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)?", 124); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(? (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule ebnf", expecting, actual); - } - - @Test public void test_ebnf3() throws Exception { - // gunit test on line 125 - RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)*", 125); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule ebnf", expecting, actual); - } - - @Test public void test_ebnf4() throws Exception { - // gunit test on line 126 - RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)+", 126); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule ebnf", expecting, actual); - } @Test public void test_element1() throws Exception { - // gunit test on line 129 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "~A", 129); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(~ (SET A))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element2() throws Exception { - // gunit test on line 130 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b+", 130); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element3() throws Exception { - // gunit test on line 131 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)+", 131); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element4() throws Exception { - // gunit test on line 132 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b?", 132); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(? (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element5() throws Exception { - // gunit test on line 133 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)?", 133); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(? (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element6() throws Exception { - // gunit test on line 134 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)*", 134); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element7() throws Exception { - // gunit test on line 135 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b*", 135); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element8() throws Exception { - // gunit test on line 136 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'while'*", 136); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT 'while')))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element9() throws Exception { - // gunit test on line 137 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'+", 137); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT 'a')))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element10() throws Exception { - // gunit test on line 138 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "a[3]", 138); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(a 3)"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element11() throws Exception { - // gunit test on line 139 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'..'z'+", 139); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT (.. 'a' 'z'))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element12() throws Exception { - // gunit test on line 140 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID", 140); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x ID)"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element13() throws Exception { - // gunit test on line 141 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID?", 141); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(? (BLOCK (ALT (= x ID))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element14() throws Exception { - // gunit test on line 142 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID*", 142); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT (= x ID))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element15() throws Exception { - // gunit test on line 143 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b", 143); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x b)"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element16() throws Exception { - // gunit test on line 144 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=(A|B)", 144); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element17() throws Exception { - // gunit test on line 145 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=~(A|B)", 145); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(= x (~ (SET A B)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element18() throws Exception { - // gunit test on line 146 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)", 146); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+= x (~ (SET A B)))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element19() throws Exception { - // gunit test on line 147 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)+", 147); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT (+= x (~ (SET A B))))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element20() throws Exception { - // gunit test on line 148 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b+", 148); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT (= x b))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element21() throws Exception { - // gunit test on line 149 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=ID*", 149); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT (+= x ID))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element22() throws Exception { - // gunit test on line 150 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+='int'*", 150); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT (+= x 'int'))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element23() throws Exception { - // gunit test on line 151 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=b+", 151); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(+ (BLOCK (ALT (+= x b))))"; - assertEquals("testing rule element", expecting, actual); - } - - @Test public void test_element24() throws Exception { - // gunit test on line 152 - RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "({blort} 'x')*", 152); - Object actual = ((Tree)rstruct.getTree()).toStringTree(); - Object expecting = "(* (BLOCK (ALT {blort} 'x')))"; - assertEquals("testing rule element", expecting, actual); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestATNConstruction.java b/tool/test/org/antlr/v4/xtest/TestATNConstruction.java deleted file mode 100644 index 68a8b98e2..000000000 --- a/tool/test/org/antlr/v4/xtest/TestATNConstruction.java +++ /dev/null @@ -1,981 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.parse.ANTLRParser; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.tool.ErrorType; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.ast.GrammarAST; -import org.antlr.v4.tool.ast.GrammarRootAST; -import org.antlr.v4.tool.ast.RuleAST; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class TestATNConstruction extends BaseTest { - @Test - public void testA() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A;"); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-A->s3\n" + - "s3->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s4\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAB() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A B ;"); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-A->s3\n" + - "s3-B->s4\n" + - "s4->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s5\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAorB() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A | B {;} ;"); - String expecting = - "RuleStart_a_0->BlockStart_5\n" + - "BlockStart_5->s2\n" + - "BlockStart_5->s3\n" + - "s2-A->BlockEnd_6\n" + - "s3-B->s4\n" + - "BlockEnd_6->RuleStop_a_1\n" + - "s4-action_0:-1->BlockEnd_6\n" + - "RuleStop_a_1-EOF->s7\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testSetAorB() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A | B ;"); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-{A, B}->s3\n" + - "s3->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s4\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testLexerIsntSetMultiCharString() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar P;\n"+ - "A : ('0x' | '0X') ;"); - String expecting = - "s0->RuleStart_A_1\n" + - "RuleStart_A_1->BlockStart_7\n" + - "BlockStart_7->s3\n" + - "BlockStart_7->s5\n" + - "s3-'0'->s4\n" + - "s5-'0'->s6\n" + - "s4-'x'->BlockEnd_8\n" + - "s6-'X'->BlockEnd_8\n" + - "BlockEnd_8->RuleStop_A_2\n"; - checkTokensRule(g, null, expecting); - } - @Test public void testRange() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar P;\n"+ - "A : 'a'..'c' ;" - ); - String expecting = - "s0->RuleStart_A_1\n" + - "RuleStart_A_1->s3\n" + - "s3-'a'..'c'->s4\n" + - "s4->RuleStop_A_2\n"; - checkTokensRule(g, null, expecting); - } - @Test public void testRangeOrRange() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar P;\n"+ - "A : ('a'..'c' 'h' | 'q' 'j'..'l') ;" - ); - String expecting = - "s0->RuleStart_A_1\n" + - "RuleStart_A_1->BlockStart_7\n" + - "BlockStart_7->s3\n" + - "BlockStart_7->s5\n" + - "s3-'a'..'c'->s4\n" + - "s5-'q'->s6\n" + - "s4-'h'->BlockEnd_8\n" + - "s6-'j'..'l'->BlockEnd_8\n" + - "BlockEnd_8->RuleStop_A_2\n"; - checkTokensRule(g, null, expecting); - } - @Test public void testStringLiteralInParser() throws Exception { - Grammar g = new Grammar( - "grammar P;\n"+ - "a : A|'b' ;" - ); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-{'b', A}->s3\n" + - "s3->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s4\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testABorCD() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A B | C D;"); - String expecting = - "RuleStart_a_0->BlockStart_6\n" + - "BlockStart_6->s2\n" + - "BlockStart_6->s4\n" + - "s2-A->s3\n" + - "s4-C->s5\n" + - "s3-B->BlockEnd_7\n" + - "s5-D->BlockEnd_7\n" + - "BlockEnd_7->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s8\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testbA() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : b A ;\n"+ - "b : B ;"); - String expecting = - "RuleStart_a_0->s4\n" + - "s4-b->RuleStart_b_2\n" + - "s5-A->s6\n" + - "s6->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s9\n"; - checkRuleATN(g, "a", expecting); - expecting = - "RuleStart_b_2->s7\n" + - "s7-B->s8\n" + - "s8->RuleStop_b_3\n" + - "RuleStop_b_3->s5\n"; - checkRuleATN(g, "b", expecting); - } - @Test public void testFollow() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : b A ;\n"+ - "b : B ;\n"+ - "c : b C;"); - String expecting = - "RuleStart_b_2->s9\n" + - "s9-B->s10\n" + - "s10->RuleStop_b_3\n" + - "RuleStop_b_3->s7\n" + - "RuleStop_b_3->s12\n"; - checkRuleATN(g, "b", expecting); - } - @Test public void testAorEpsilon() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A | ;"); - String expecting = - "RuleStart_a_0->BlockStart_4\n" + - "BlockStart_4->s2\n" + - "BlockStart_4->s3\n" + - "s2-A->BlockEnd_5\n" + - "s3->BlockEnd_5\n" + - "BlockEnd_5->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s6\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAOptional() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A?;"); - String expecting = - "RuleStart_a_0->BlockStart_3\n" + - "BlockStart_3->s2\n" + - "BlockStart_3->BlockEnd_4\n" + - "s2-A->BlockEnd_4\n" + - "BlockEnd_4->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s5\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAorBoptional() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A{;}|B)?;"); - String expecting = - "RuleStart_a_0->BlockStart_5\n" + - "BlockStart_5->s2\n" + - "BlockStart_5->s4\n" + - "BlockStart_5->BlockEnd_6\n" + - "s2-A->s3\n" + - "s4-B->BlockEnd_6\n" + - "BlockEnd_6->RuleStop_a_1\n" + - "s3-action_0:-1->BlockEnd_6\n" + - "RuleStop_a_1-EOF->s7\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testSetAorBoptional() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A|B)?;"); - String expecting = - "RuleStart_a_0->BlockStart_3\n" + - "BlockStart_3->s2\n" + - "BlockStart_3->BlockEnd_4\n" + - "s2-{A, B}->BlockEnd_4\n" + - "BlockEnd_4->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s5\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAorBthenC() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A | B) C;"); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-{A, B}->s3\n" + - "s3-C->s4\n" + - "s4->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s5\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAplus() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A+;"); - String expecting = - "RuleStart_a_0->PlusBlockStart_3\n" + - "PlusBlockStart_3->s2\n" + - "s2-A->BlockEnd_4\n" + - "BlockEnd_4->PlusLoopBack_5\n" + - "PlusLoopBack_5->PlusBlockStart_3\n" + - "PlusLoopBack_5->s6\n" + - "s6->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s7\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAplusSingleAltHasPlusASTPointingAtLoopBackState() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "s : a B ;\n" + // (RULE a (BLOCK (ALT (+ (BLOCK (ALT A)))))) - "a : A+;"); - String expecting = - "RuleStart_a_2->PlusBlockStart_8\n" + - "PlusBlockStart_8->s7\n" + - "s7-A->BlockEnd_9\n" + - "BlockEnd_9->PlusLoopBack_10\n" + - "PlusLoopBack_10->PlusBlockStart_8\n" + - "PlusLoopBack_10->s11\n" + - "s11->RuleStop_a_3\n" + - "RuleStop_a_3->s5\n"; - checkRuleATN(g, "a", expecting); - // Get all AST -> ATNState relationships. Make sure loopback is covered when no loop entry decision - List ruleNodes = g.ast.getNodesWithType(ANTLRParser.RULE); - RuleAST a = (RuleAST)ruleNodes.get(1); - List nodesInRule = a.getNodesWithType(null); - Map covered = new LinkedHashMap(); - for (GrammarAST node : nodesInRule) { - if ( node.atnState != null ) { - covered.put(node, node.atnState); - } - } - assertEquals("{RULE=2, BLOCK=8, +=10, BLOCK=8, A=7}", covered.toString()); - } - @Test public void testAorBplus() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A|B{;})+;"); - String expecting = - "RuleStart_a_0->PlusBlockStart_5\n" + - "PlusBlockStart_5->s2\n" + - "PlusBlockStart_5->s3\n" + - "s2-A->BlockEnd_6\n" + - "s3-B->s4\n" + - "BlockEnd_6->PlusLoopBack_7\n" + - "s4-action_0:-1->BlockEnd_6\n" + - "PlusLoopBack_7->PlusBlockStart_5\n" + - "PlusLoopBack_7->s8\n" + - "s8->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s9\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAorBorEmptyPlus() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A | B | )+ ;"); - String expecting = - "RuleStart_a_0->PlusBlockStart_5\n" + - "PlusBlockStart_5->s2\n" + - "PlusBlockStart_5->s3\n" + - "PlusBlockStart_5->s4\n" + - "s2-A->BlockEnd_6\n" + - "s3-B->BlockEnd_6\n" + - "s4->BlockEnd_6\n" + - "BlockEnd_6->PlusLoopBack_7\n" + - "PlusLoopBack_7->PlusBlockStart_5\n" + - "PlusLoopBack_7->s8\n" + - "s8->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s9\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAStar() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : A*;"); - String expecting = - "RuleStart_a_0->StarLoopEntry_5\n" + - "StarLoopEntry_5->StarBlockStart_3\n" + - "StarLoopEntry_5->s6\n" + - "StarBlockStart_3->s2\n" + - "s6->RuleStop_a_1\n" + - "s2-A->BlockEnd_4\n" + - "RuleStop_a_1-EOF->s8\n" + - "BlockEnd_4->StarLoopBack_7\n" + - "StarLoopBack_7->StarLoopEntry_5\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testNestedAstar() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (COMMA ID*)*;"); - String expecting = - "RuleStart_a_0->StarLoopEntry_11\n" + - "StarLoopEntry_11->StarBlockStart_9\n" + - "StarLoopEntry_11->s12\n" + - "StarBlockStart_9->s2\n" + - "s12->RuleStop_a_1\n" + - "s2-COMMA->StarLoopEntry_6\n" + - "RuleStop_a_1-EOF->s14\n" + - "StarLoopEntry_6->StarBlockStart_4\n" + - "StarLoopEntry_6->s7\n" + - "StarBlockStart_4->s3\n" + - "s7->BlockEnd_10\n" + - "s3-ID->BlockEnd_5\n" + - "BlockEnd_10->StarLoopBack_13\n" + - "BlockEnd_5->StarLoopBack_8\n" + - "StarLoopBack_13->StarLoopEntry_11\n" + - "StarLoopBack_8->StarLoopEntry_6\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testAorBstar() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : (A | B{;})* ;"); - String expecting = - "RuleStart_a_0->StarLoopEntry_7\n" + - "StarLoopEntry_7->StarBlockStart_5\n" + - "StarLoopEntry_7->s8\n" + - "StarBlockStart_5->s2\n" + - "StarBlockStart_5->s3\n" + - "s8->RuleStop_a_1\n" + - "s2-A->BlockEnd_6\n" + - "s3-B->s4\n" + - "RuleStop_a_1-EOF->s10\n" + - "BlockEnd_6->StarLoopBack_9\n" + - "s4-action_0:-1->BlockEnd_6\n" + - "StarLoopBack_9->StarLoopEntry_7\n"; - checkRuleATN(g, "a", expecting); - } - @Test public void testPredicatedAorB() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : {p1}? A | {p2}? B ;"); - String expecting = - "RuleStart_a_0->BlockStart_6\n" + - "BlockStart_6->s2\n" + - "BlockStart_6->s4\n" + - "s2-pred_0:0->s3\n" + - "s4-pred_0:1->s5\n" + - "s3-A->BlockEnd_7\n" + - "s5-B->BlockEnd_7\n" + - "BlockEnd_7->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s8\n"; - checkRuleATN(g, "a", expecting); - } - - @Test public void testParserRuleRefInLexerRule() throws Exception { - boolean threwException = false; - ErrorQueue errorQueue = new ErrorQueue(); - try { - String gstr = - "grammar U;\n"+ - "a : A;\n"+ - "A : a;\n"; - - Tool tool = new Tool(); - tool.removeListeners(); - tool.addListener(errorQueue); - assertEquals(0, errorQueue.size()); - GrammarRootAST grammarRootAST = tool.parseGrammarFromString(gstr); - assertEquals(0, errorQueue.size()); - Grammar g = tool.createGrammar(grammarRootAST); - assertEquals(0, errorQueue.size()); - g.fileName = ""; - tool.process(g, false); - } - catch (Exception e) { - threwException = true; - e.printStackTrace(); - } - System.out.println(errorQueue); - assertEquals(1, errorQueue.errors.size()); - assertEquals(ErrorType.PARSER_RULE_REF_IN_LEXER_RULE, errorQueue.errors.get(0).getErrorType()); - assertEquals("[a, A]", Arrays.toString(errorQueue.errors.get(0).getArgs())); - assertTrue(!threwException); - } - -/* - @Test public void testMultiplePredicates() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : {p1}? {p1a}? A | {p2}? B | {p3} b;\n" + - "b : {p4}? B ;"); - String expecting = - "\n"; - checkRule(g, "a", expecting); - } - @Test public void testSets() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "a : ( A | B )+ ;\n" + - "b : ( A | B{;} )+ ;\n" + - "c : (A|B) (A|B) ;\n" + - "d : ( A | B )* ;\n" + - "e : ( A | B )? ;"); - String expecting = - "\n"; - checkRule(g, "a", expecting); - expecting = - "\n"; - checkRule(g, "b", expecting); - expecting = - "\n"; - checkRule(g, "c", expecting); - expecting = - "\n"; - checkRule(g, "d", expecting); - expecting = - "\n"; - checkRule(g, "e", expecting); - } - @Test public void testNotSet() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "tokens { A; B; C; }\n"+ - "a : ~A ;\n"); - String expecting = - "\n"; - checkRule(g, "a", expecting); - } - @Test public void testNotSingletonBlockSet() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "tokens { A; B; C; }\n"+ - "a : ~(A) ;\n"); - String expecting = - "\n"; - checkRule(g, "a", expecting); - } - @Test public void testNotCharSet() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : ~'3' ;\n"); - String expecting = - "RuleStart_A_1->s5\n" + - "s5-{'\\u0000'..'2', '4'..'\\uFFFE'}->s6\n" + - "s6->RuleStop_A_2\n"; - checkRule(g, "A", expecting); - } - @Test public void testNotBlockSet() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : ~('3'|'b') ;\n"); - String expecting = - "\n"; - checkRule(g, "A", expecting); - } - @Test public void testNotSetLoop() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : ~('3')* ;\n"); - String expecting = - "\n"; - checkRule(g, "A", expecting); - } - @Test public void testNotBlockSetLoop() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : ~('3'|'b')* ;\n"); - String expecting = - "\n"; - checkRule(g, "A", expecting); - } - @Test public void testLabeledNotSet() throws Exception { - Grammar g = new Grammar( - "parser grammar P;\n"+ - "tokens { A; B; C; }\n"+ - "a : t=~A ;\n"); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-B..C->.s3\n" + - ".s3->:s4\n" + - ":s4-EOF->.s5\n"; - checkRule(g, "a", expecting); - } - @Test public void testLabeledNotCharSet() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : t=~'3' ;\n"); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" + - ".s3->:s4\n" + - ":s4-->.s5\n"; - checkRule(g, "A", expecting); - } - @Test public void testLabeledNotBlockSet() throws Exception { - Grammar g = new Grammar( - "lexer grammar P;\n"+ - "A : t=~('3'|'b') ;\n"); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" + - ".s3->:s4\n" + - ":s4-->.s5\n"; - checkRule(g, "A", expecting); - } - @Test public void testEscapedCharLiteral() throws Exception { - Grammar g = new Grammar( - "grammar P;\n"+ - "a : '\\n';"); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-'\\n'->.s3\n" + - ".s3->:s4\n" + - ":s4-EOF->.s5\n"; - checkRule(g, "a", expecting); - } - @Test public void testEscapedStringLiteral() throws Exception { - Grammar g = new Grammar( - "grammar P;\n"+ - "a : 'a\\nb\\u0030c\\'';"); - String expecting = - "RuleStart_a_0->s2\n" + - "s2-'a\\nb\\u0030c\\''->s3\n" + - "s3->RuleStop_a_1\n" + - "RuleStop_a_1-EOF->s4\n"; - checkRule(g, "a", expecting); - } - // AUTO BACKTRACKING STUFF - @Test public void testAutoBacktracking_RuleBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : 'a'{;}|'b';" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s1->.s9\n" + - ".s10-'b'->.s11\n" + - ".s11->.s6\n" + - ".s2-{synpred1_t}?->.s3\n" + - ".s3-'a'->.s4\n" + - ".s4-{}->.s5\n" + - ".s5->.s6\n" + - ".s6->:s7\n" + - ".s9->.s10\n" + - ":s7-EOF->.s8\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_RuleSetBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : 'a'|'b';" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-'a'..'b'->.s3\n" + - ".s3->:s4\n" + - ":s4-EOF->.s5\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_SimpleBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'{;}|'b') ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s10->.s11\n" + - ".s11-'b'->.s12\n" + - ".s12->.s7\n" + - ".s2->.s10\n" + - ".s2->.s3\n" + - ".s3-{synpred1_t}?->.s4\n" + - ".s4-'a'->.s5\n" + - ".s5-{}->.s6\n" + - ".s6->.s7\n" + - ".s7->:s8\n" + - ":s8-EOF->.s9\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_SetBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'|'b') ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2-'a'..'b'->.s3\n" + - ".s3->:s4\n" + - ":s4-EOF->.s5\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_StarBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'{;}|'b')* ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s12->.s13\n" + - ".s13-{synpred2_t}?->.s14\n" + - ".s14-'b'->.s15\n" + - ".s15->.s8\n" + - ".s16->.s9\n" + - ".s2->.s16\n" + - ".s2->.s3\n" + - ".s3->.s12\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6-{}->.s7\n" + - ".s7->.s8\n" + - ".s8->.s3\n" + - ".s8->.s9\n" + - ".s9->:s10\n" + - ":s10-EOF->.s11\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_StarSetBlock_IgnoresPreds() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'|'b')* ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2->.s3\n" + - ".s2->.s9\n" + - ".s3->.s4\n" + - ".s4-'a'..'b'->.s5\n" + - ".s5->.s3\n" + - ".s5->.s6\n" + - ".s6->:s7\n" + - ".s9->.s6\n" + - ":s7-EOF->.s8\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_StarSetBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'|'b'{;})* ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s11->.s12\n" + - ".s12-{synpred2_t}?->.s13\n" + - ".s13-'b'->.s14\n" + - ".s14-{}->.s15\n" + - ".s15->.s7\n" + - ".s16->.s8\n" + - ".s2->.s16\n" + - ".s2->.s3\n" + - ".s3->.s11\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6->.s7\n" + - ".s7->.s3\n" + - ".s7->.s8\n" + - ".s8->:s9\n" + - ":s9-EOF->.s10\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_StarBlock1Alt() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a')* ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s10->.s7\n" + - ".s2->.s10\n" + - ".s2->.s3\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6->.s3\n" + - ".s6->.s7\n" + - ".s7->:s8\n" + - ":s8-EOF->.s9\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_PlusBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'{;}|'b')+ ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s12->.s13\n" + - ".s13-{synpred2_t}?->.s14\n" + - ".s14-'b'->.s15\n" + - ".s15->.s8\n" + - ".s2->.s3\n" + - ".s3->.s12\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6-{}->.s7\n" + - ".s7->.s8\n" + - ".s8->.s3\n" + - ".s8->.s9\n" + - ".s9->:s10\n" + - ":s10-EOF->.s11\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_PlusSetBlock() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'|'b'{;})+ ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s11->.s12\n" + - ".s12-{synpred2_t}?->.s13\n" + - ".s13-'b'->.s14\n" + - ".s14-{}->.s15\n" + - ".s15->.s7\n" + - ".s2->.s3\n" + - ".s3->.s11\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6->.s7\n" + - ".s7->.s3\n" + - ".s7->.s8\n" + - ".s8->:s9\n" + - ":s9-EOF->.s10\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_PlusBlock1Alt() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a')+ ;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2->.s3\n" + - ".s3->.s4\n" + - ".s4-{synpred1_t}?->.s5\n" + - ".s5-'a'->.s6\n" + - ".s6->.s3\n" + - ".s6->.s7\n" + - ".s7->:s8\n" + - ":s8-EOF->.s9\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_OptionalBlock2Alts() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a'{;}|'b')?;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s10->.s11\n" + - ".s10->.s14\n" + - ".s11-{synpred2_t}?->.s12\n" + - ".s12-'b'->.s13\n" + - ".s13->.s7\n" + - ".s14->.s7\n" + - ".s2->.s10\n" + - ".s2->.s3\n" + - ".s3-{synpred1_t}?->.s4\n" + - ".s4-'a'->.s5\n" + - ".s5-{}->.s6\n" + - ".s6->.s7\n" + - ".s7->:s8\n" + - ":s8-EOF->.s9\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_OptionalBlock1Alt() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a')?;" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s2->.s3\n" + - ".s2->.s9\n" + - ".s3-{synpred1_t}?->.s4\n" + - ".s4-'a'->.s5\n" + - ".s5->.s6\n" + - ".s6->:s7\n" + - ".s9->.s6\n" + - ":s7-EOF->.s8\n"; - checkRule(g, "a", expecting); - } - @Test public void testAutoBacktracking_ExistingPred() throws Exception { - Grammar g = new Grammar( - "grammar t;\n" + - "options {backtrack=true;}\n"+ - "a : ('a')=> 'a' | 'b';" - ); - String expecting = - ".s0->.s1\n" + - ".s1->.s2\n" + - ".s1->.s8\n" + - ".s10->.s5\n" + - ".s2-{synpred1_t}?->.s3\n" + - ".s3-'a'->.s4\n" + - ".s4->.s5\n" + - ".s5->:s6\n" + - ".s8->.s9\n" + - ".s9-'b'->.s10\n" + - ":s6-EOF->.s7\n"; - checkRule(g, "a", expecting); - } -*/ - @Test public void testDefaultMode() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' ;\n" + - "X : 'x' ;\n" + - "mode FOO;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - String expecting = - "s0->RuleStart_A_2\n" + - "s0->RuleStart_X_4\n" + - "RuleStart_A_2->s10\n" + - "RuleStart_X_4->s12\n" + - "s10-'a'->s11\n" + - "s12-'x'->s13\n" + - "s11->RuleStop_A_3\n" + - "s13->RuleStop_X_5\n"; - checkTokensRule(g, "DEFAULT_MODE", expecting); - } - @Test public void testMode() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' ;\n" + - "X : 'x' ;\n" + - "mode FOO;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - String expecting = - "s1->RuleStart_B_6\n" + - "s1->RuleStart_C_8\n" + - "RuleStart_B_6->s14\n" + - "RuleStart_C_8->s16\n" + - "s14-'b'->s15\n" + - "s16-'c'->s17\n" + - "s15->RuleStop_B_7\n" + - "s17->RuleStop_C_9\n"; - checkTokensRule(g, "FOO", expecting); - } - void checkTokensRule(LexerGrammar g, String modeName, String expecting) { -// if ( g.ast!=null && !g.ast.hasErrors ) { -// System.out.println(g.ast.toStringTree()); -// Tool antlr = new Tool(); -// SemanticPipeline sem = new SemanticPipeline(g); -// sem.process(); -// if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) -// for (Grammar imp : g.getImportedGrammars()) { -// antlr.processNonCombinedGrammar(imp); -// } -// } -// } - if ( modeName==null ) modeName = "DEFAULT_MODE"; - if ( g.modes.get(modeName)==null ) { - System.err.println("no such mode "+modeName); - return; - } - ParserATNFactory f = new LexerATNFactory(g); - ATN nfa = f.createATN(); - ATNState startState = nfa.modeNameToStartState.get(modeName); - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - //System.out.print(result); - assertEquals(expecting, result); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestATNDeserialization.java b/tool/test/org/antlr/v4/xtest/TestATNDeserialization.java deleted file mode 100644 index ab252a36b..000000000 --- a/tool/test/org/antlr/v4/xtest/TestATNDeserialization.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import static org.junit.Assert.*; - -import java.util.Arrays; - -public class TestATNDeserialization extends BaseTest { - @Test public void testSimpleNoBlock() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A B ;"); - checkDeserializationIsStable(g); - } - - @Test public void testEOF() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : EOF ;"); - checkDeserializationIsStable(g); - } - - @Test public void testEOFInSet() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : (EOF|A) ;"); - checkDeserializationIsStable(g); - } - - @Test public void testNot() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A, B, C}\n" + - "a : ~A ;"); - checkDeserializationIsStable(g); - } - - @Test public void testWildcard() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A, B, C}\n" + - "a : . ;"); - checkDeserializationIsStable(g); - } - - @Test public void testPEGAchillesHeel() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A B ;"); - checkDeserializationIsStable(g); - } - - @Test public void test3Alts() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A B | A B C ;"); - checkDeserializationIsStable(g); - } - - @Test public void testSimpleLoop() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A+ B ;"); - checkDeserializationIsStable(g); - } - - @Test public void testRuleRef() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : e ;\n" + - "e : E ;\n"); - checkDeserializationIsStable(g); - } - - @Test public void testLexerTwoRules() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' ;\n" + - "B : 'b' ;\n"); - checkDeserializationIsStable(lg); - } - - @Test public void testLexerEOF() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' EOF ;\n"); - checkDeserializationIsStable(lg); - } - - @Test public void testLexerEOFInSet() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' (EOF|'\\n') ;\n"); - checkDeserializationIsStable(lg); - } - - @Test public void testLexerRange() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "INT : '0'..'9' ;\n"); - checkDeserializationIsStable(lg); - } - - @Test public void testLexerLoops() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "INT : '0'..'9'+ ;\n"); - checkDeserializationIsStable(lg); - } - - @Test public void testLexerNotSet() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : ~('a'|'b')\n ;"); - checkDeserializationIsStable(lg); - } - - @Test public void testLexerNotSetWithRange() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : ~('a'|'b'|'e'|'p'..'t')\n ;"); - checkDeserializationIsStable(lg); - } - - @Test public void testLexerNotSetWithRange2() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : ~('a'|'b') ~('e'|'p'..'t')\n ;"); - checkDeserializationIsStable(lg); - } - - @Test public void test2ModesInLexer() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a'\n ;\n" + - "mode M;\n" + - "B : 'b';\n" + - "mode M2;\n" + - "C : 'c';\n"); - checkDeserializationIsStable(lg); - } - - protected void checkDeserializationIsStable(Grammar g) { - ATN atn = createATN(g, false); - char[] data = Utils.toCharArray(ATNSerializer.getSerialized(atn)); - String atnData = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - ATN atn2 = new ATNDeserializer().deserialize(data); - String atn2Data = ATNSerializer.getDecoded(atn2, Arrays.asList(g.getTokenNames())); - - assertEquals(atnData, atn2Data); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestATNInterpreter.java b/tool/test/org/antlr/v4/xtest/TestATNInterpreter.java deleted file mode 100644 index 60b708986..000000000 --- a/tool/test/org/antlr/v4/xtest/TestATNInterpreter.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.BlockStartState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - - // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH - // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH - // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH - -public class TestATNInterpreter extends BaseTest { - @Test public void testSimpleNoBlock() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A B ;"); - checkMatchedAlt(lg, g, "ab", 1); - } - - @Test public void testSet() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A,B,C}\n" + - "a : ~A ;"); - checkMatchedAlt(lg, g, "b", 1); - } - - @Test public void testPEGAchillesHeel() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A B ;"); - checkMatchedAlt(lg, g, "a", 1); - checkMatchedAlt(lg, g, "ab", 2); - checkMatchedAlt(lg, g, "abc", 2); - } - - @Test public void testMustTrackPreviousGoodAlt() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A B ;"); - - checkMatchedAlt(lg, g, "a", 1); - checkMatchedAlt(lg, g, "ab", 2); - - checkMatchedAlt(lg, g, "ac", 1); - checkMatchedAlt(lg, g, "abc", 2); - } - - @Test(expected = NoViableAltException.class) - public void testMustTrackPreviousGoodAltWithEOF() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : (A | A B) EOF;"); - - checkMatchedAlt(lg, g, "a", 1); - checkMatchedAlt(lg, g, "ab", 2); - - try { - checkMatchedAlt(lg, g, "ac", 1); - } - catch (NoViableAltException re) { - assertEquals(1, re.getOffendingToken().getTokenIndex()); - assertEquals(3, re.getOffendingToken().getType()); - throw re; - } - } - - @Test public void testMustTrackPreviousGoodAlt2() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A B | A B C ;"); - - checkMatchedAlt(lg, g, "a", 1); - checkMatchedAlt(lg, g, "ab", 2); - checkMatchedAlt(lg, g, "abc", 3); - - checkMatchedAlt(lg, g, "ad", 1); - checkMatchedAlt(lg, g, "abd", 2); - checkMatchedAlt(lg, g, "abcd", 3); - } - - @Test(expected = NoViableAltException.class) - public void testMustTrackPreviousGoodAlt2WithEOF() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : (A | A B | A B C) EOF;"); - - checkMatchedAlt(lg, g, "a", 1); - checkMatchedAlt(lg, g, "ab", 2); - checkMatchedAlt(lg, g, "abc", 3); - - try { - checkMatchedAlt(lg, g, "abd", 1); - } - catch (NoViableAltException re) { - assertEquals(2, re.getOffendingToken().getTokenIndex()); - assertEquals(4, re.getOffendingToken().getType()); - throw re; - } - } - - @Test public void testMustTrackPreviousGoodAlt3() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A B | A | A B C ;"); - - checkMatchedAlt(lg, g, "a", 2); - checkMatchedAlt(lg, g, "ab", 1); - checkMatchedAlt(lg, g, "abc", 3); - - checkMatchedAlt(lg, g, "ad", 2); - checkMatchedAlt(lg, g, "abd", 1); - checkMatchedAlt(lg, g, "abcd", 3); - } - - @Test(expected = NoViableAltException.class) - public void testMustTrackPreviousGoodAlt3WithEOF() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : (A B | A | A B C) EOF;"); - - checkMatchedAlt(lg, g, "a", 2); - checkMatchedAlt(lg, g, "ab", 1); - checkMatchedAlt(lg, g, "abc", 3); - - try { - checkMatchedAlt(lg, g, "abd", 1); - } - catch (NoViableAltException re) { - assertEquals(2, re.getOffendingToken().getTokenIndex()); - assertEquals(4, re.getOffendingToken().getType()); - throw re; - } - } - - @Test public void testAmbigAltChooseFirst() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A B | A B ;"); // first alt - checkMatchedAlt(lg, g, "ab", 1); - checkMatchedAlt(lg, g, "abc", 1); - } - - @Test public void testAmbigAltChooseFirstWithFollowingToken() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : (A B | A B) C ;"); // first alt - checkMatchedAlt(lg, g, "abc", 1); - checkMatchedAlt(lg, g, "abcd", 1); - } - - @Test public void testAmbigAltChooseFirstWithFollowingToken2() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : (A B | A B | C) D ;"); - checkMatchedAlt(lg, g, "abd", 1); - checkMatchedAlt(lg, g, "abdc", 1); - checkMatchedAlt(lg, g, "cd", 3); - } - - @Test public void testAmbigAltChooseFirst2() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A B | A B | A B C ;"); - - checkMatchedAlt(lg, g, "ab", 1); - checkMatchedAlt(lg, g, "abc", 3); - - checkMatchedAlt(lg, g, "abd", 1); - checkMatchedAlt(lg, g, "abcd", 3); - } - - @Test(expected = NoViableAltException.class) - public void testAmbigAltChooseFirst2WithEOF() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : (A B | A B | A B C) EOF;"); - - checkMatchedAlt(lg, g, "ab", 1); - checkMatchedAlt(lg, g, "abc", 3); - - try { - checkMatchedAlt(lg, g, "abd", 1); - } - catch (NoViableAltException re) { - assertEquals(2, re.getOffendingToken().getTokenIndex()); - assertEquals(4, re.getOffendingToken().getType()); - throw re; - } - } - - @Test public void testSimpleLoop() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "D : 'd' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A+ B ;"); - checkMatchedAlt(lg, g, "ab", 1); - checkMatchedAlt(lg, g, "aab", 1); - checkMatchedAlt(lg, g, "aaaaaab", 1); - checkMatchedAlt(lg, g, "aabd", 1); - } - - @Test public void testCommonLeftPrefix() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A B | A C ;"); - checkMatchedAlt(lg, g, "ab", 1); - checkMatchedAlt(lg, g, "ac", 2); - } - - @Test public void testArbitraryLeftPrefix() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A+ B | A+ C ;"); - checkMatchedAlt(lg, g, "aac", 2); - } - - @Test public void testRecursiveLeftPrefix() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "LP : '(' ;\n" + - "RP : ')' ;\n" + - "INT : '0'..'9'+ ;\n" - ); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A,B,C,LP,RP,INT}\n" + - "a : e B | e C ;\n" + - "e : LP e RP\n" + - " | INT\n" + - " ;"); - checkMatchedAlt(lg, g, "34b", 1); - checkMatchedAlt(lg, g, "34c", 2); - checkMatchedAlt(lg, g, "(34)b", 1); - checkMatchedAlt(lg, g, "(34)c", 2); - checkMatchedAlt(lg, g, "((34))b", 1); - checkMatchedAlt(lg, g, "((34))c", 2); - } - - public void checkMatchedAlt(LexerGrammar lg, final Grammar g, - String inputString, - int expected) - { - ATN lexatn = createATN(lg, true); - LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn,new DFA[] { new DFA(lexatn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); - IntegerList types = getTokenTypesViaATN(inputString, lexInterp); - System.out.println(types); - - g.importVocab(lg); - - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - IntTokenStream input = new IntTokenStream(types); - System.out.println("input="+input.types); - ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, input); - ATNState startState = atn.ruleToStartState[g.getRule("a").index]; - if ( startState.transition(0).target instanceof BlockStartState ) { - startState = startState.transition(0).target; - } - - DOTGenerator dot = new DOTGenerator(g); - System.out.println(dot.getDOT(atn.ruleToStartState[g.getRule("a").index])); - Rule r = g.getRule("e"); - if ( r!=null ) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); - - int result = interp.matchATN(input, startState); - assertEquals(expected, result); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestATNLexerInterpreter.java b/tool/test/org/antlr/v4/xtest/TestATNLexerInterpreter.java deleted file mode 100644 index 6f174f1f1..000000000 --- a/tool/test/org/antlr/v4/xtest/TestATNLexerInterpreter.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import java.util.List; - -import static org.junit.Assert.*; - -/** - * Lexer rules are little quirky when it comes to wildcards. Problem - * stems from the fact that we want the longest match to win among - * several rules and even within a rule. However, that conflicts - * with the notion of non-greedy, which by definition tries to match - * the fewest possible. During ATN construction, non-greedy loops - * have their entry and exit branches reversed so that the ATN - * simulator will see the exit branch 1st, giving it a priority. The - * 1st path to the stop state kills any other paths for that rule - * that begin with the wildcard. In general, this does everything we - * want, but occasionally there are some quirks as you'll see from - * the tests below. - */ -public class TestATNLexerInterpreter extends BaseTest { - @Test public void testLexerTwoRules() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' ;\n" + - "B : 'b' ;\n"); - String expecting = "A, B, A, B, EOF"; - checkLexerMatches(lg, "abab", expecting); - } - - @Test public void testShortLongRule() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'xy'\n" + - " | 'xyz'\n" + // this alt is preferred since there are no non-greedy configs - " ;\n" + - "Z : 'z'\n" + - " ;\n"); - checkLexerMatches(lg, "xy", "A, EOF"); - checkLexerMatches(lg, "xyz", "A, EOF"); - } - - @Test public void testShortLongRule2() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'xyz'\n" + // make sure nongreedy mech cut off doesn't kill this alt - " | 'xy'\n" + - " ;\n"); - checkLexerMatches(lg, "xy", "A, EOF"); - checkLexerMatches(lg, "xyz", "A, EOF"); - } - - @Test public void testWildOnEndFirstAlt() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'xy' .\n" + // should pursue '.' since xyz hits stop first, before 2nd alt - " | 'xy'\n" + - " ;\n" + - "Z : 'z'\n" + - " ;\n"); - checkLexerMatches(lg, "xy", "A, EOF"); - checkLexerMatches(lg, "xyz", "A, EOF"); - } - - @Test public void testWildOnEndLastAlt() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'xy'\n" + - " | 'xy' .\n" + // this alt is preferred since there are no non-greedy configs - " ;\n" + - "Z : 'z'\n" + - " ;\n"); - checkLexerMatches(lg, "xy", "A, EOF"); - checkLexerMatches(lg, "xyz", "A, EOF"); - } - - @Test public void testWildcardNonQuirkWhenSplitBetweenTwoRules() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'xy' ;\n" + - "B : 'xy' . 'z' ;\n"); - checkLexerMatches(lg, "xy", "A, EOF"); - checkLexerMatches(lg, "xyqz", "B, EOF"); - } - - @Test public void testLexerLoops() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "INT : '0'..'9'+ ;\n" + - "ID : 'a'..'z'+ ;\n"); - String expecting = "ID, INT, ID, INT, EOF"; - checkLexerMatches(lg, "a34bde3", expecting); - } - - @Test public void testLexerNotSet() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : ~('a'|'b')\n ;"); - String expecting = "ID, EOF"; - checkLexerMatches(lg, "c", expecting); - } - - @Test public void testLexerKeywordIDAmbiguity() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "KEND : 'end' ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n')+ ;"); - String expecting = "ID, EOF"; - //checkLexerMatches(lg, "e", expecting); - expecting = "KEND, EOF"; - checkLexerMatches(lg, "end", expecting); - expecting = "ID, EOF"; - checkLexerMatches(lg, "ending", expecting); - expecting = "ID, WS, KEND, WS, ID, EOF"; - checkLexerMatches(lg, "a end bcd", expecting); - } - - @Test public void testLexerRuleRef() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "INT : DIGIT+ ;\n" + - "fragment DIGIT : '0'..'9' ;\n" + - "WS : (' '|'\\n')+ ;"); - String expecting = "INT, WS, INT, EOF"; - checkLexerMatches(lg, "32 99", expecting); - } - - @Test public void testRecursiveLexerRuleRef() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | ~'*')+ '*/' ;\n" + - "WS : (' '|'\\n')+ ;"); - String expecting = "CMT, WS, CMT, EOF"; - checkLexerMatches(lg, "/* ick */\n/* /*nested*/ */", expecting); - } - - @Test public void testRecursiveLexerRuleRefWithWildcard() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)*? '*/' ;\n" + - "WS : (' '|'\\n')+ ;"); - - String expecting = "CMT, WS, CMT, WS, EOF"; - checkLexerMatches(lg, - "/* ick */\n" + - "/* /* */\n" + - "/* /*nested*/ */\n", - expecting); - } - - @Test public void testLexerWildcardGreedyLoopByDefault() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '//' .* '\\n' ;\n"); - String expecting = "CMT, EOF"; - checkLexerMatches(lg, "//x\n//y\n", expecting); - } - - @Test public void testLexerWildcardLoopExplicitNonGreedy() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '//' .*? '\\n' ;\n"); - String expecting = "CMT, CMT, EOF"; - checkLexerMatches(lg, "//x\n//y\n", expecting); - } - - @Test public void testLexerEscapeInString() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "STR : '[' ('~' ']' | .)* ']' ;\n"); - checkLexerMatches(lg, "[a~]b]", "STR, EOF"); - checkLexerMatches(lg, "[a]", "STR, EOF"); - } - - @Test public void testLexerWildcardGreedyPlusLoopByDefault() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '//' .+ '\\n' ;\n"); - String expecting = "CMT, EOF"; - checkLexerMatches(lg, "//x\n//y\n", expecting); - } - - @Test public void testLexerWildcardExplicitNonGreedyPlusLoop() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '//' .+? '\\n' ;\n"); - String expecting = "CMT, CMT, EOF"; - checkLexerMatches(lg, "//x\n//y\n", expecting); - } - - // does not fail since ('*/')? can't match and have rule succeed - @Test public void testLexerGreedyOptionalShouldWorkAsWeExpect() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '/*' ('*/')? '*/' ;\n"); - String expecting = "CMT, EOF"; - checkLexerMatches(lg, "/**/", expecting); - } - - @Test public void testGreedyBetweenRules() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : '' ;\n" + - "B : '<' .+ '>' ;\n"); - String expecting = "B, EOF"; - checkLexerMatches(lg, "", expecting); - } - - @Test public void testNonGreedyBetweenRules() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : '' ;\n" + - "B : '<' .+? '>' ;\n"); - String expecting = "A, B, EOF"; - checkLexerMatches(lg, "", expecting); - } - - @Test public void testEOFAtEndOfLineComment() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '//' ~('\\n')* ;\n"); - String expecting = "CMT, EOF"; - checkLexerMatches(lg, "//x", expecting); - } - - @Test public void testEOFAtEndOfLineComment2() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '//' ~('\\n'|'\\r')* ;\n"); - String expecting = "CMT, EOF"; - checkLexerMatches(lg, "//x", expecting); - } - - /** only positive sets like (EOF|'\n') can match EOF and not in wildcard or ~foo sets - * EOF matches but does not advance cursor. - */ - @Test public void testEOFInSetAtEndOfLineComment() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "CMT : '//' .* (EOF|'\\n') ;\n"); - String expecting = "CMT, EOF"; - checkLexerMatches(lg, "//", expecting); - } - - @Test public void testEOFSuffixInSecondRule() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' ;\n"+ // shorter than 'a' EOF, despite EOF being 0 width - "B : 'a' EOF ;\n"); - String expecting = "B, EOF"; - checkLexerMatches(lg, "a", expecting); - } - - @Test public void testEOFSuffixInFirstRule() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' EOF ;\n"+ - "B : 'a';\n"); - String expecting = "A, EOF"; - checkLexerMatches(lg, "a", expecting); - } - - @Test public void testEOFByItself() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "DONE : EOF ;\n"+ - "A : 'a';\n"); - String expecting = "A, DONE, EOF"; - checkLexerMatches(lg, "a", expecting); - } - - protected void checkLexerMatches(LexerGrammar lg, String inputString, String expecting) { - ATN atn = createATN(lg, true); - CharStream input = new ANTLRInputStream(inputString); - ATNState startState = atn.modeNameToStartState.get("DEFAULT_MODE"); - DOTGenerator dot = new DOTGenerator(lg); - System.out.println(dot.getDOT(startState, true)); - - List tokenTypes = getTokenTypes(lg, atn, input); - - String result = Utils.join(tokenTypes.iterator(), ", "); - System.out.println(tokenTypes); - assertEquals(expecting, result); - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestATNParserPrediction.java b/tool/test/org/antlr/v4/xtest/TestATNParserPrediction.java deleted file mode 100644 index 9ecc513e2..000000000 --- a/tool/test/org/antlr/v4/xtest/TestATNParserPrediction.java +++ /dev/null @@ -1,531 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - - // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH - // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH - // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH - -public class TestATNParserPrediction extends BaseTest { - @Test public void testAorB() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A{;} | B ;"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "a", 1); - checkPredictedAlt(lg, g, decision, "b", 2); - - // After matching these inputs for decision, what is DFA after each prediction? - String[] inputs = { - "a", - "b", - "a" - }; - String[] dfa = { - "s0-'a'->:s1=>1\n", - - "s0-'a'->:s1=>1\n" + - "s0-'b'->:s2=>2\n", - - "s0-'a'->:s1=>1\n" + // don't change after it works - "s0-'b'->:s2=>2\n", - }; - checkDFAConstruction(lg, g, decision, inputs, dfa); - } - - @Test public void testEmptyInput() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | ;"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "a", 1); - checkPredictedAlt(lg, g, decision, "", 2); - - // After matching these inputs for decision, what is DFA after each prediction? - String[] inputs = { - "a", - "", - }; - String[] dfa = { - "s0-'a'->:s1=>1\n", - - "s0-EOF->:s2=>2\n" + - "s0-'a'->:s1=>1\n", - }; - checkDFAConstruction(lg, g, decision, inputs, dfa); - } - - @Test public void testPEGAchillesHeel() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A B ;"); - checkPredictedAlt(lg, g, 0, "a", 1); - checkPredictedAlt(lg, g, 0, "ab", 2); - checkPredictedAlt(lg, g, 0, "abc", 2); - - String[] inputs = { - "a", - "ab", - "abc" - }; - String[] dfa = { - "s0-'a'->s1\n" + - "s1-EOF->:s2=>1\n", - - "s0-'a'->s1\n" + - "s1-EOF->:s2=>1\n" + - "s1-'b'->:s3=>2\n", - - "s0-'a'->s1\n" + - "s1-EOF->:s2=>1\n" + - "s1-'b'->:s3=>2\n" - }; - checkDFAConstruction(lg, g, 0, inputs, dfa); - } - - @Test public void testRuleRefxory() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : x | y ;\n" + - "x : A ;\n" + - "y : B ;\n"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "a", 1); - checkPredictedAlt(lg, g, decision, "b", 2); - - // After matching these inputs for decision, what is DFA after each prediction? - String[] inputs = { - "a", - "b", - "a" - }; - String[] dfa = { - "s0-'a'->:s1=>1\n", - - "s0-'a'->:s1=>1\n" + - "s0-'b'->:s2=>2\n", - - "s0-'a'->:s1=>1\n" + // don't change after it works - "s0-'b'->:s2=>2\n", - }; - checkDFAConstruction(lg, g, decision, inputs, dfa); - } - - @Test public void testOptionalRuleChasesGlobalFollow() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A,B,C}\n" + - "a : x B ;\n" + - "b : x C ;\n" + - "x : A | ;\n"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "a", 1); - checkPredictedAlt(lg, g, decision, "b", 2); - checkPredictedAlt(lg, g, decision, "c", 2); - - // After matching these inputs for decision, what is DFA after each prediction? - String[] inputs = { - "a", - "b", - "c", - "c", - }; - String[] dfa = { - "s0-'a'->:s1=>1\n", - - "s0-'a'->:s1=>1\n" + - "s0-'b'->:s2=>2\n", - - "s0-'a'->:s1=>1\n" + - "s0-'b'->:s2=>2\n" + - "s0-'c'->:s3=>2\n", - - "s0-'a'->:s1=>1\n" + - "s0-'b'->:s2=>2\n" + - "s0-'c'->:s3=>2\n", - }; - checkDFAConstruction(lg, g, decision, inputs, dfa); - } - - @Test public void testLL1Ambig() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A | A B ;"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "a", 1); - checkPredictedAlt(lg, g, decision, "ab", 3); - - // After matching these inputs for decision, what is DFA after each prediction? - String[] inputs = { - "a", - "ab", - "ab" - }; - String[] dfa = { - "s0-'a'->s1\n" + - "s1-EOF->:s2^=>1\n", - - "s0-'a'->s1\n" + - "s1-EOF->:s2^=>1\n" + - "s1-'b'->:s3=>3\n", - - "s0-'a'->s1\n" + - "s1-EOF->:s2^=>1\n" + - "s1-'b'->:s3=>3\n", - }; - checkDFAConstruction(lg, g, decision, inputs, dfa); - } - - @Test public void testLL2Ambig() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A B | A B | A B C ;"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "ab", 1); - checkPredictedAlt(lg, g, decision, "abc", 3); - - // After matching these inputs for decision, what is DFA after each prediction? - String[] inputs = { - "ab", - "abc", - "ab" - }; - String[] dfa = { - "s0-'a'->s1\n" + - "s1-'b'->s2\n" + - "s2-EOF->:s3^=>1\n", - - "s0-'a'->s1\n" + - "s1-'b'->s2\n" + - "s2-EOF->:s3^=>1\n" + - "s2-'c'->:s4=>3\n", - - "s0-'a'->s1\n" + - "s1-'b'->s2\n" + - "s2-EOF->:s3^=>1\n" + - "s2-'c'->:s4=>3\n", - }; - checkDFAConstruction(lg, g, decision, inputs, dfa); - } - - @Test public void testRecursiveLeftPrefix() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "LP : '(' ;\n" + - "RP : ')' ;\n" + - "INT : '0'..'9'+ ;\n" - ); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A,B,C,LP,RP,INT}\n" + - "a : e B | e C ;\n" + - "e : LP e RP\n" + - " | INT\n" + - " ;"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "34b", 1); - checkPredictedAlt(lg, g, decision, "34c", 2); - checkPredictedAlt(lg, g, decision, "((34))b", 1); - checkPredictedAlt(lg, g, decision, "((34))c", 2); - - // After matching these inputs for decision, what is DFA after each prediction? - String[] inputs = { - "34b", - "34c", - "((34))b", - "((34))c" - }; - String[] dfa = { - "s0-INT->s1\n" + - "s1-'b'->:s2=>1\n", - - "s0-INT->s1\n" + - "s1-'b'->:s2=>1\n" + - "s1-'c'->:s3=>2\n", - - "s0-'('->s4\n" + - "s0-INT->s1\n" + - "s1-'b'->:s2=>1\n" + - "s1-'c'->:s3=>2\n" + - "s4-'('->s5\n" + - "s5-INT->s6\n" + - "s6-')'->s7\n" + - "s7-')'->s1\n", - - "s0-'('->s4\n" + - "s0-INT->s1\n" + - "s1-'b'->:s2=>1\n" + - "s1-'c'->:s3=>2\n" + - "s4-'('->s5\n" + - "s5-INT->s6\n" + - "s6-')'->s7\n" + - "s7-')'->s1\n", - }; - checkDFAConstruction(lg, g, decision, inputs, dfa); - } - - @Test public void testRecursiveLeftPrefixWithAorABIssue() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "LP : '(' ;\n" + - "RP : ')' ;\n" + - "INT : '0'..'9'+ ;\n" - ); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A,B,C,LP,RP,INT}\n" + - "a : e A | e A B ;\n" + - "e : LP e RP\n" + - " | INT\n" + - " ;"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "34a", 1); - checkPredictedAlt(lg, g, decision, "34ab", 2); // PEG would miss this one! - checkPredictedAlt(lg, g, decision, "((34))a", 1); - checkPredictedAlt(lg, g, decision, "((34))ab", 2); - - // After matching these inputs for decision, what is DFA after each prediction? - String[] inputs = { - "34a", - "34ab", - "((34))a", - "((34))ab", - }; - String[] dfa = { - "s0-INT->s1\n" + - "s1-'a'->s2\n" + - "s2-EOF->:s3=>1\n", - - "s0-INT->s1\n" + - "s1-'a'->s2\n" + - "s2-EOF->:s3=>1\n" + - "s2-'b'->:s4=>2\n", - - "s0-'('->s5\n" + - "s0-INT->s1\n" + - "s1-'a'->s2\n" + - "s2-EOF->:s3=>1\n" + - "s2-'b'->:s4=>2\n" + - "s5-'('->s6\n" + - "s6-INT->s7\n" + - "s7-')'->s8\n" + - "s8-')'->s1\n", - - "s0-'('->s5\n" + - "s0-INT->s1\n" + - "s1-'a'->s2\n" + - "s2-EOF->:s3=>1\n" + - "s2-'b'->:s4=>2\n" + - "s5-'('->s6\n" + - "s6-INT->s7\n" + - "s7-')'->s8\n" + - "s8-')'->s1\n", - }; - checkDFAConstruction(lg, g, decision, inputs, dfa); - } - - @Test public void testContinuePrediction() throws Exception { - // Sam found prev def of ambiguity was too restrictive. - // E.g., (13, 1, []), (13, 2, []), (12, 2, []) should not - // be declared ambig since (12, 2, []) can take us to - // unambig state maybe. keep going. - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "ID : 'a'..'z' ;\n" + // one char - "SEMI : ';' ;\n"+ - "INT : '0'..'9'+ ;\n" - ); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {ID,SEMI,INT}\n" + - "a : (ID | ID ID?) SEMI ;"); - int decision = 1; - checkPredictedAlt(lg, g, decision, "a;", 1); - checkPredictedAlt(lg, g, decision, "ab;", 2); - } - - @Test public void testContinuePrediction2() throws Exception { - // ID is ambig for first two alts, but ID SEMI lets us move forward with alt 3 - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "ID : 'a'..'z' ;\n" + // one char - "SEMI : ';' ;\n"+ - "INT : '0'..'9'+ ;\n" - ); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {ID,SEMI,INT}\n" + - "a : ID | ID | ID SEMI ;\n"); - int decision = 0; - checkPredictedAlt(lg, g, decision, "a", 1); - checkPredictedAlt(lg, g, decision, "a;", 3); - } - - /** first check that the ATN predicts right alt. - * Then check adaptive prediction. - */ - public void checkPredictedAlt(LexerGrammar lg, Grammar g, int decision, - String inputString, int expectedAlt) - { - Tool.internalOption_ShowATNConfigsInDFA = true; - ATN lexatn = createATN(lg, true); - LexerATNSimulator lexInterp = - new LexerATNSimulator(lexatn,new DFA[] { new DFA(lexatn.modeToStartState.get(Lexer.DEFAULT_MODE)) },new PredictionContextCache()); - IntegerList types = getTokenTypesViaATN(inputString, lexInterp); - System.out.println(types); - - semanticProcess(lg); - g.importVocab(lg); - semanticProcess(g); - - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - DOTGenerator dot = new DOTGenerator(g); - - Rule r = g.getRule("a"); - if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); - r = g.getRule("b"); - if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); - r = g.getRule("e"); - if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); - r = g.getRule("ifstat"); - if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); - r = g.getRule("block"); - if ( r!=null) System.out.println(dot.getDOT(atn.ruleToStartState[r.index])); - - // Check ATN prediction -// ParserATNSimulator interp = new ParserATNSimulator(atn); - TokenStream input = new IntTokenStream(types); - ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, input); - int alt = interp.adaptivePredict(input, decision, ParserRuleContext.EMPTY); - - assertEquals(expectedAlt, alt); - - // Check adaptive prediction - input.seek(0); - alt = interp.adaptivePredict(input, decision, null); - assertEquals(expectedAlt, alt); - // run 2x; first time creates DFA in atn - input.seek(0); - alt = interp.adaptivePredict(input, decision, null); - assertEquals(expectedAlt, alt); - } - - public void checkDFAConstruction(LexerGrammar lg, Grammar g, int decision, - String[] inputString, String[] dfaString) - { -// Tool.internalOption_ShowATNConfigsInDFA = true; - ATN lexatn = createATN(lg, true); - LexerATNSimulator lexInterp = - new LexerATNSimulator(lexatn,new DFA[] { new DFA(lexatn.getDecisionState(Lexer.DEFAULT_MODE)) }, new PredictionContextCache()); - - semanticProcess(lg); - g.importVocab(lg); - semanticProcess(g); - - ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, null); - for (int i=0; i2 EPSILON 0,0,0\n" + - "2->3 ATOM 1,0,0\n" + - "3->4 ATOM 2,0,0\n" + - "4->1 EPSILON 0,0,0\n"; - ATN atn = createATN(g, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testEOF() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A EOF ;"); - String expecting = - "max type 1\n" + - "0:RULE_START 0\n" + - "1:RULE_STOP 0\n" + - "2:BASIC 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "5:BASIC 0\n" + - "rule 0:0\n" + - "0->2 EPSILON 0,0,0\n" + - "2->3 ATOM 1,0,0\n" + - "3->4 ATOM 0,0,1\n" + - "4->1 EPSILON 0,0,0\n"; - ATN atn = createATN(g, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testEOFInSet() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : (A|EOF) ;"); - String expecting = - "max type 1\n" + - "0:RULE_START 0\n" + - "1:RULE_STOP 0\n" + - "2:BASIC 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "rule 0:0\n" + - "0:EOF, A..A\n" + - "0->2 EPSILON 0,0,0\n" + - "2->3 SET 0,0,0\n" + - "3->1 EPSILON 0,0,0\n"; - ATN atn = createATN(g, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testNot() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A, B, C}\n" + - "a : ~A ;"); - String expecting = - "max type 3\n" + - "0:RULE_START 0\n" + - "1:RULE_STOP 0\n" + - "2:BASIC 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "rule 0:0\n" + - "0:A..A\n" + - "0->2 EPSILON 0,0,0\n" + - "2->3 NOT_SET 0,0,0\n" + - "3->1 EPSILON 0,0,0\n"; - ATN atn = createATN(g, true); - DOTGenerator gen = new DOTGenerator(g); - System.out.println(gen.getDOT(atn.ruleToStartState[0])); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testWildcard() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "tokens {A, B, C}\n" + - "a : . ;"); - String expecting = - "max type 3\n" + - "0:RULE_START 0\n" + - "1:RULE_STOP 0\n" + - "2:BASIC 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "rule 0:0\n" + - "0->2 EPSILON 0,0,0\n" + - "2->3 WILDCARD 0,0,0\n" + - "3->1 EPSILON 0,0,0\n"; - ATN atn = createATN(g, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testPEGAchillesHeel() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A B ;"); - String expecting = - "max type 2\n" + - "0:RULE_START 0\n" + - "1:RULE_STOP 0\n" + - "2:BASIC 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "5:BLOCK_START 0 6\n" + - "6:BLOCK_END 0\n" + - "7:BASIC 0\n" + - "rule 0:0\n" + - "0->5 EPSILON 0,0,0\n" + - "2->6 ATOM 1,0,0\n" + - "3->4 ATOM 1,0,0\n" + - "4->6 ATOM 2,0,0\n" + - "5->2 EPSILON 0,0,0\n" + - "5->3 EPSILON 0,0,0\n" + - "6->1 EPSILON 0,0,0\n" + - "0:5\n"; - ATN atn = createATN(g, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void test3Alts() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A | A B | A B C ;"); - String expecting = - "max type 3\n" + - "0:RULE_START 0\n" + - "1:RULE_STOP 0\n" + - "2:BASIC 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "5:BASIC 0\n" + - "6:BASIC 0\n" + - "7:BASIC 0\n" + - "8:BLOCK_START 0 9\n" + - "9:BLOCK_END 0\n" + - "10:BASIC 0\n" + - "rule 0:0\n" + - "0->8 EPSILON 0,0,0\n" + - "2->9 ATOM 1,0,0\n" + - "3->4 ATOM 1,0,0\n" + - "4->9 ATOM 2,0,0\n" + - "5->6 ATOM 1,0,0\n" + - "6->7 ATOM 2,0,0\n" + - "7->9 ATOM 3,0,0\n" + - "8->2 EPSILON 0,0,0\n" + - "8->3 EPSILON 0,0,0\n" + - "8->5 EPSILON 0,0,0\n" + - "9->1 EPSILON 0,0,0\n" + - "0:8\n"; - ATN atn = createATN(g, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testSimpleLoop() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : A+ B ;"); - String expecting = - "max type 2\n" + - "0:RULE_START 0\n" + - "1:RULE_STOP 0\n" + - "2:BASIC 0\n" + - "3:PLUS_BLOCK_START 0 4\n" + - "4:BLOCK_END 0\n" + - "5:PLUS_LOOP_BACK 0\n" + - "6:LOOP_END 0 5\n" + - "7:BASIC 0\n" + - "8:BASIC 0\n" + - "9:BASIC 0\n" + - "rule 0:0\n" + - "0->3 EPSILON 0,0,0\n" + - "2->4 ATOM 1,0,0\n" + - "3->2 EPSILON 0,0,0\n" + - "4->5 EPSILON 0,0,0\n" + - "5->3 EPSILON 0,0,0\n" + - "5->6 EPSILON 0,0,0\n" + - "6->7 EPSILON 0,0,0\n" + - "7->8 ATOM 2,0,0\n" + - "8->1 EPSILON 0,0,0\n" + - "0:5\n"; - ATN atn = createATN(g, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testRuleRef() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : e ;\n" + - "e : E ;\n"); - String expecting = - "max type 1\n" + - "0:RULE_START 0\n" + - "1:RULE_STOP 0\n" + - "2:RULE_START 1\n" + - "3:RULE_STOP 1\n" + - "4:BASIC 0\n" + - "5:BASIC 0\n" + - "6:BASIC 1\n" + - "7:BASIC 1\n" + - "8:BASIC 1\n" + - "rule 0:0\n" + - "rule 1:2\n" + - "0->4 EPSILON 0,0,0\n" + - "2->6 EPSILON 0,0,0\n" + - "4->5 RULE 2,1,0\n" + - "5->1 EPSILON 0,0,0\n" + - "6->7 ATOM 1,0,0\n" + - "7->3 EPSILON 0,0,0\n"; - ATN atn = createATN(g, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(g.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerTwoRules() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' ;\n" + - "B : 'b' ;\n"); - String expecting = - "max type 2\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:RULE_START 1\n" + - "4:RULE_STOP 1\n" + - "5:BASIC 0\n" + - "6:BASIC 0\n" + - "7:BASIC 1\n" + - "8:BASIC 1\n" + - "rule 0:1 1\n" + - "rule 1:3 2\n" + - "mode 0:0\n" + - "0->1 EPSILON 0,0,0\n" + - "0->3 EPSILON 0,0,0\n" + - "1->5 EPSILON 0,0,0\n" + - "3->7 EPSILON 0,0,0\n" + - "5->6 ATOM 97,0,0\n" + - "6->2 EPSILON 0,0,0\n" + - "7->8 ATOM 98,0,0\n" + - "8->4 EPSILON 0,0,0\n" + - "0:0\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerRange() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "INT : '0'..'9' ;\n"); - String expecting = - "max type 1\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "rule 0:1 1\n" + - "mode 0:0\n" + - "0->1 EPSILON 0,0,0\n" + - "1->3 EPSILON 0,0,0\n" + - "3->4 RANGE 48,57,0\n" + - "4->2 EPSILON 0,0,0\n" + - "0:0\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerEOF() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "INT : 'a' EOF ;\n"); - String expecting = - "max type 1\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "5:BASIC 0\n" + - "rule 0:1 1\n" + - "mode 0:0\n" + - "0->1 EPSILON 0,0,0\n" + - "1->3 EPSILON 0,0,0\n" + - "3->4 ATOM 97,0,0\n" + - "4->5 ATOM 0,0,1\n" + - "5->2 EPSILON 0,0,0\n" + - "0:0\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerEOFInSet() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "INT : 'a' (EOF|'\\n') ;\n"); - String expecting = - "max type 1\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "5:BLOCK_START 0 6\n" + - "6:BLOCK_END 0\n" + - "rule 0:1 1\n" + - "mode 0:0\n" + - "0:EOF, '\\n'..'\\n'\n" + - "0->1 EPSILON 0,0,0\n" + - "1->3 EPSILON 0,0,0\n" + - "3->5 ATOM 97,0,0\n" + - "4->6 SET 0,0,0\n" + - "5->4 EPSILON 0,0,0\n" + - "6->2 EPSILON 0,0,0\n" + - "0:0\n" + - "1:5\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerLoops() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "INT : '0'..'9'+ ;\n"); - String expecting = - "max type 1\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:BASIC 0\n" + - "4:PLUS_BLOCK_START 0 5\n" + - "5:BLOCK_END 0\n" + - "6:PLUS_LOOP_BACK 0\n" + - "7:LOOP_END 0 6\n" + - "rule 0:1 1\n" + - "mode 0:0\n" + - "0->1 EPSILON 0,0,0\n" + - "1->4 EPSILON 0,0,0\n" + - "3->5 RANGE 48,57,0\n" + - "4->3 EPSILON 0,0,0\n" + - "5->6 EPSILON 0,0,0\n" + - "6->4 EPSILON 0,0,0\n" + - "6->7 EPSILON 0,0,0\n" + - "7->2 EPSILON 0,0,0\n" + - "0:0\n" + - "1:6\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerAction() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a' {a} ;\n" + - "B : 'b' ;\n" + - "C : 'c' {c} ;\n"); - String expecting = - "max type 3\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:RULE_START 1\n" + - "4:RULE_STOP 1\n" + - "5:RULE_START 2\n" + - "6:RULE_STOP 2\n" + - "7:BASIC 0\n" + - "8:BASIC 0\n" + - "9:BASIC 0\n" + - "10:BASIC 1\n" + - "11:BASIC 1\n" + - "12:BASIC 2\n" + - "13:BASIC 2\n" + - "14:BASIC 2\n" + - "rule 0:1 1\n" + - "rule 1:3 2\n" + - "rule 2:5 3\n" + - "mode 0:0\n" + - "0->1 EPSILON 0,0,0\n" + - "0->3 EPSILON 0,0,0\n" + - "0->5 EPSILON 0,0,0\n" + - "1->7 EPSILON 0,0,0\n" + - "3->10 EPSILON 0,0,0\n" + - "5->12 EPSILON 0,0,0\n" + - "7->8 ATOM 97,0,0\n" + - "8->9 ACTION 0,0,0\n" + - "9->2 EPSILON 0,0,0\n" + - "10->11 ATOM 98,0,0\n" + - "11->4 EPSILON 0,0,0\n" + - "12->13 ATOM 99,0,0\n" + - "13->14 ACTION 2,1,0\n" + - "14->6 EPSILON 0,0,0\n" + - "0:0\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerNotSet() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : ~('a'|'b')\n ;"); - String expecting = - "max type 1\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "rule 0:1 1\n" + - "mode 0:0\n" + - "0:'a'..'b'\n" + - "0->1 EPSILON 0,0,0\n" + - "1->3 EPSILON 0,0,0\n" + - "3->4 NOT_SET 0,0,0\n" + - "4->2 EPSILON 0,0,0\n" + - "0:0\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerSetWithRange() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : ('a'|'b'|'e'|'p'..'t')\n ;"); - String expecting = - "max type 1\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "rule 0:1 1\n" + - "mode 0:0\n" + - "0:'a'..'b', 'e'..'e', 'p'..'t'\n" + - "0->1 EPSILON 0,0,0\n" + - "1->3 EPSILON 0,0,0\n" + - "3->4 SET 0,0,0\n" + - "4->2 EPSILON 0,0,0\n" + - "0:0\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerNotSetWithRange() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : ~('a'|'b'|'e'|'p'..'t')\n ;"); - String expecting = - "max type 1\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "rule 0:1 1\n" + - "mode 0:0\n" + - "0:'a'..'b', 'e'..'e', 'p'..'t'\n" + - "0->1 EPSILON 0,0,0\n" + - "1->3 EPSILON 0,0,0\n" + - "3->4 NOT_SET 0,0,0\n" + - "4->2 EPSILON 0,0,0\n" + - "0:0\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerWildcardWithMode() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : 'a'..'z'+ ;\n"+ - "mode CMT;" + - "COMMENT : '*/' {skip(); popMode();} ;\n" + - "JUNK : . {more();} ;\n"); - String expecting = - "max type 3\n" + - "0:TOKEN_START -1\n" + - "1:TOKEN_START -1\n" + - "2:RULE_START 0\n" + - "3:RULE_STOP 0\n" + - "4:RULE_START 1\n" + - "5:RULE_STOP 1\n" + - "6:RULE_START 2\n" + - "7:RULE_STOP 2\n" + - "8:BASIC 0\n" + - "9:PLUS_BLOCK_START 0 10\n" + - "10:BLOCK_END 0\n" + - "11:PLUS_LOOP_BACK 0\n" + - "12:LOOP_END 0 11\n" + - "13:BASIC 1\n" + - "14:BASIC 1\n" + - "15:BASIC 1\n" + - "16:BASIC 1\n" + - "17:BASIC 1\n" + - "18:BASIC 2\n" + - "19:BASIC 2\n" + - "20:BASIC 2\n" + - "rule 0:2 1\n" + - "rule 1:4 2\n" + - "rule 2:6 3\n" + - "mode 0:0\n" + - "mode 1:1\n" + - "0->2 EPSILON 0,0,0\n" + - "1->4 EPSILON 0,0,0\n" + - "1->6 EPSILON 0,0,0\n" + - "2->9 EPSILON 0,0,0\n" + - "4->13 EPSILON 0,0,0\n" + - "6->18 EPSILON 0,0,0\n" + - "8->10 RANGE 97,122,0\n" + - "9->8 EPSILON 0,0,0\n" + - "10->11 EPSILON 0,0,0\n" + - "11->9 EPSILON 0,0,0\n" + - "11->12 EPSILON 0,0,0\n" + - "12->3 EPSILON 0,0,0\n" + - "13->14 ATOM 42,0,0\n" + - "14->15 ATOM 47,0,0\n" + - "15->16 EPSILON 0,0,0\n" + - "16->17 ACTION 1,0,0\n" + - "17->5 EPSILON 0,0,0\n" + - "18->19 WILDCARD 0,0,0\n" + - "19->20 ACTION 2,1,0\n" + - "20->7 EPSILON 0,0,0\n" + - "0:0\n" + - "1:1\n" + - "2:11\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testLexerNotSetWithRange2() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "ID : ~('a'|'b') ~('e'|'p'..'t')\n ;"); - String expecting = - "max type 1\n" + - "0:TOKEN_START -1\n" + - "1:RULE_START 0\n" + - "2:RULE_STOP 0\n" + - "3:BASIC 0\n" + - "4:BASIC 0\n" + - "5:BASIC 0\n" + - "rule 0:1 1\n" + - "mode 0:0\n" + - "0:'a'..'b'\n" + - "1:'e'..'e', 'p'..'t'\n" + - "0->1 EPSILON 0,0,0\n" + - "1->3 EPSILON 0,0,0\n" + - "3->4 NOT_SET 0,0,0\n" + - "4->5 NOT_SET 1,0,0\n" + - "5->2 EPSILON 0,0,0\n" + - "0:0\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void testModeInLexer() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a'\n ;\n" + - "B : 'b';\n" + - "mode A;\n" + - "C : 'c';\n"+ - "D : 'd';\n"); - String expecting = - "max type 4\n" + - "0:TOKEN_START -1\n" + - "1:TOKEN_START -1\n" + - "2:RULE_START 0\n" + - "3:RULE_STOP 0\n" + - "4:RULE_START 1\n" + - "5:RULE_STOP 1\n" + - "6:RULE_START 2\n" + - "7:RULE_STOP 2\n" + - "8:RULE_START 3\n" + - "9:RULE_STOP 3\n" + - "10:BASIC 0\n" + - "11:BASIC 0\n" + - "12:BASIC 1\n" + - "13:BASIC 1\n" + - "14:BASIC 2\n" + - "15:BASIC 2\n" + - "16:BASIC 3\n" + - "17:BASIC 3\n" + - "rule 0:2 1\n" + - "rule 1:4 2\n" + - "rule 2:6 3\n" + - "rule 3:8 4\n" + - "mode 0:0\n" + - "mode 1:1\n" + - "0->2 EPSILON 0,0,0\n" + - "0->4 EPSILON 0,0,0\n" + - "1->6 EPSILON 0,0,0\n" + - "1->8 EPSILON 0,0,0\n" + - "2->10 EPSILON 0,0,0\n" + - "4->12 EPSILON 0,0,0\n" + - "6->14 EPSILON 0,0,0\n" + - "8->16 EPSILON 0,0,0\n" + - "10->11 ATOM 97,0,0\n" + - "11->3 EPSILON 0,0,0\n" + - "12->13 ATOM 98,0,0\n" + - "13->5 EPSILON 0,0,0\n" + - "14->15 ATOM 99,0,0\n" + - "15->7 EPSILON 0,0,0\n" + - "16->17 ATOM 100,0,0\n" + - "17->9 EPSILON 0,0,0\n" + - "0:0\n" + - "1:1\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - - @Test public void test2ModesInLexer() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n"+ - "A : 'a'\n ;\n" + - "mode M;\n" + - "B : 'b';\n" + - "mode M2;\n" + - "C : 'c';\n"); - String expecting = - "max type 3\n" + - "0:TOKEN_START -1\n" + - "1:TOKEN_START -1\n" + - "2:TOKEN_START -1\n" + - "3:RULE_START 0\n" + - "4:RULE_STOP 0\n" + - "5:RULE_START 1\n" + - "6:RULE_STOP 1\n" + - "7:RULE_START 2\n" + - "8:RULE_STOP 2\n" + - "9:BASIC 0\n" + - "10:BASIC 0\n" + - "11:BASIC 1\n" + - "12:BASIC 1\n" + - "13:BASIC 2\n" + - "14:BASIC 2\n" + - "rule 0:3 1\n" + - "rule 1:5 2\n" + - "rule 2:7 3\n" + - "mode 0:0\n" + - "mode 1:1\n" + - "mode 2:2\n" + - "0->3 EPSILON 0,0,0\n" + - "1->5 EPSILON 0,0,0\n" + - "2->7 EPSILON 0,0,0\n" + - "3->9 EPSILON 0,0,0\n" + - "5->11 EPSILON 0,0,0\n" + - "7->13 EPSILON 0,0,0\n" + - "9->10 ATOM 97,0,0\n" + - "10->4 EPSILON 0,0,0\n" + - "11->12 ATOM 98,0,0\n" + - "12->6 EPSILON 0,0,0\n" + - "13->14 ATOM 99,0,0\n" + - "14->8 EPSILON 0,0,0\n" + - "0:0\n" + - "1:1\n" + - "2:2\n"; - ATN atn = createATN(lg, true); - String result = ATNSerializer.getDecoded(atn, Arrays.asList(lg.getTokenNames())); - assertEquals(expecting, result); - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestActionSplitter.java b/tool/test/org/antlr/v4/xtest/TestActionSplitter.java deleted file mode 100644 index 7112d2ae6..000000000 --- a/tool/test/org/antlr/v4/xtest/TestActionSplitter.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.runtime.ANTLRStringStream; -import org.antlr.runtime.Token; -import org.antlr.v4.parse.ActionSplitter; -import org.antlr.v4.semantics.BlankActionSplitterListener; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.*; - -public class TestActionSplitter extends BaseTest { - static String[] exprs = { - "foo", "['foo'<" + ActionSplitter.TEXT + ">]", - "$x", "['$x'<" + ActionSplitter.ATTR + ">]", - "\\$x", "['\\$x'<" + ActionSplitter.TEXT + ">]", - "$x.y", "['$x.y'<" + ActionSplitter.QUALIFIED_ATTR + ">]", - "$ID.text", "['$ID.text'<" + ActionSplitter.QUALIFIED_ATTR + ">]", - "$ID", "['$ID'<" + ActionSplitter.ATTR + ">]", - "$ID.getText()", "['$ID'<" + ActionSplitter.ATTR + ">, '.getText()'<" + ActionSplitter.TEXT + ">]", - "$ID.text = \"test\";", "['$ID.text'<" + ActionSplitter.QUALIFIED_ATTR + ">, ' = \"test\";'<" + ActionSplitter.TEXT + ">]", - "$a.line == $b.line", "['$a.line'<" + ActionSplitter.QUALIFIED_ATTR + ">, ' == '<" + ActionSplitter.TEXT + ">, '$b.line'<" + ActionSplitter.QUALIFIED_ATTR + ">]", - "$r.tree", "['$r.tree'<" + ActionSplitter.QUALIFIED_ATTR + ">]", - "foo $a::n bar", "['foo '<" + ActionSplitter.TEXT + ">, '$a::n'<" + ActionSplitter.NONLOCAL_ATTR + ">, ' bar'<" + ActionSplitter.TEXT + ">]", - "$rule::x;", "['$rule::x'<" + ActionSplitter.NONLOCAL_ATTR + ">, ';'<" + ActionSplitter.TEXT + ">]", - "$field::x = $field.st;", "['$field::x = $field.st;'<" + ActionSplitter.SET_NONLOCAL_ATTR + ">]", - "$foo.get(\"ick\");", "['$foo'<" + ActionSplitter.ATTR + ">, '.get(\"ick\");'<" + ActionSplitter.TEXT + ">]", - }; - - @Test public void testExprs() { - for (int i = 0; i < exprs.length; i+=2) { - String input = exprs[i]; - String expect = exprs[i+1]; - List chunks = getActionChunks(input); - assertEquals("input: "+input, expect, chunks.toString()); - } - } - - public static List getActionChunks(String a) { - List chunks = new ArrayList(); - ActionSplitter splitter = new ActionSplitter(new ANTLRStringStream(a), - new BlankActionSplitterListener()); - Token t = splitter.nextToken(); - while ( t.getType()!=Token.EOF ) { - chunks.add("'"+t.getText()+"'<"+t.getType()+">"); - t = splitter.nextToken(); - } - return chunks; - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestActionTranslation.java b/tool/test/org/antlr/v4/xtest/TestActionTranslation.java deleted file mode 100644 index ade9e70db..000000000 --- a/tool/test/org/antlr/v4/xtest/TestActionTranslation.java +++ /dev/null @@ -1,424 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.junit.Test; - -/** */ -public class TestActionTranslation extends BaseTest { - String attributeTemplate = - "attributeTemplate(members,init,inline,finally,inline2) ::= <<\n" + - "parser grammar A;\n"+ - "@members {#members##end-members#}\n" + - "a[int x, int x1] returns [int y]\n" + - "@init {#init##end-init#}\n" + - " : id=ID ids+=ID lab=b[34] c d {\n" + - " #inline##end-inline#\n" + - " }\n" + - " c\n" + - " ;\n" + - " finally {#finally##end-finally#}\n" + - "b[int d] returns [int e]\n" + - " : {#inline2##end-inline2#}\n" + - " ;\n" + - "c returns [int x, int y] : ;\n" + - "d : ;\n" + - ">>"; - - @Test public void testEscapedLessThanInAction() throws Exception { - String action = "i<3; ''"; - String expected = "i<3; ''"; - testActions(attributeTemplate, "members", action, expected); - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - testActions(attributeTemplate, "inline2", action, expected); - } - - @Test public void testEscaped$InAction() throws Exception { - String action = "int \\$n; \"\\$in string\\$\""; - String expected = "int $n; \"$in string$\""; - testActions(attributeTemplate, "members", action, expected); - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - testActions(attributeTemplate, "inline2", action, expected); - } - - /** - * Regression test for "in antlr v4 lexer, $ translation issue in action". - * https://github.com/antlr/antlr4/issues/176 - */ - @Test public void testUnescaped$InAction() throws Exception { - String action = "\\$string$"; - String expected = "$string$"; - testActions(attributeTemplate, "members", action, expected); - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - testActions(attributeTemplate, "inline2", action, expected); - } - - @Test public void testEscapedSlash() throws Exception { - String action = "x = '\\n';"; // x = '\n'; -> x = '\n'; - String expected = "x = '\\n';"; - testActions(attributeTemplate, "members", action, expected); - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - testActions(attributeTemplate, "inline2", action, expected); - } - - @Test public void testComplicatedArgParsing() throws Exception { - String action = "x, (*a).foo(21,33), 3.2+1, '\\n', "+ - "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; - String expected = "x, (*a).foo(21,33), 3.2+1, '\\n', "+ - "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; - testActions(attributeTemplate, "members", action, expected); - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - testActions(attributeTemplate, "inline2", action, expected); - } - - @Test public void testComplicatedArgParsingWithTranslation() throws Exception { - String action = "x, $ID.text+\"3242\", (*$ID).foo(21,33), 3.2+1, '\\n', "+ - "\"a,oo\\nick\", {bl, \"fdkj\"eck}"; - String expected = - "x, (((AContext)_localctx).ID!=null?((AContext)_localctx).ID.getText():null)+\"3242\", " + - "(*((AContext)_localctx).ID).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testArguments() throws Exception { - String action = "$x; $ctx.x"; - String expected = "_localctx.x; _localctx.x"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnValue() throws Exception { - String action = "$y; $ctx.y"; - String expected = "_localctx.y; _localctx.y"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnValueWithNumber() throws Exception { - String action = "$ctx.x1"; - String expected = "_localctx.x1"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnValuesCurrentRule() throws Exception { - String action = "$y; $ctx.y;"; - String expected = "_localctx.y; _localctx.y;"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnValues() throws Exception { - String action = "$lab.e; $b.e; $y.e = \"\";"; - String expected = "((AContext)_localctx).lab.e; ((AContext)_localctx).b.e; _localctx.y.e = \"\";"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testReturnWithMultipleRuleRefs() throws Exception { - String action = "$c.x; $c.y;"; - String expected = "((AContext)_localctx).c.x; ((AContext)_localctx).c.y;"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testTokenRefs() throws Exception { - String action = "$id; $ID; $id.text; $id.getText(); $id.line;"; - String expected = "((AContext)_localctx).id; ((AContext)_localctx).ID; (((AContext)_localctx).id!=null?((AContext)_localctx).id.getText():null); ((AContext)_localctx).id.getText(); (((AContext)_localctx).id!=null?((AContext)_localctx).id.getLine():0);"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testRuleRefs() throws Exception { - String action = "$lab.start; $c.text;"; - String expected = "(((AContext)_localctx).lab!=null?(((AContext)_localctx).lab.start):null); (((AContext)_localctx).c!=null?_input.getText(((AContext)_localctx).c.start,((AContext)_localctx).c.stop):null);"; - testActions(attributeTemplate, "inline", action, expected); - } - - @Test public void testRefToTextAttributeForCurrentRule() throws Exception { - String action = "$ctx.text; $text"; - - // this is the expected translation for all cases - String expected = - "_localctx.text; _input.getText(_localctx.start, _input.LT(-1))"; - - testActions(attributeTemplate, "init", action, expected); - testActions(attributeTemplate, "inline", action, expected); - testActions(attributeTemplate, "finally", action, expected); - } - - @Test public void testDynamicRuleScopeRefInSubrule() throws Exception { - String action = "$a::n;"; - } - @Test public void testRuleScopeFromAnotherRule() throws Exception { - String action = "$a::n;"; // must be qualified - } - @Test public void testFullyQualifiedRefToCurrentRuleParameter() throws Exception { - String action = "$a.i;"; - } - @Test public void testFullyQualifiedRefToCurrentRuleRetVal() throws Exception { - String action = "$a.i;"; - } - @Test public void testSetFullyQualifiedRefToCurrentRuleRetVal() throws Exception { - String action = "$a.i = 1;"; - } - @Test public void testIsolatedRefToCurrentRule() throws Exception { - String action = "$a;"; - } - @Test public void testIsolatedRefToRule() throws Exception { - String action = "$x;"; - } - @Test public void testFullyQualifiedRefToLabelInCurrentRule() throws Exception { - String action = "$a.x;"; - } - @Test public void testFullyQualifiedRefToListLabelInCurrentRule() throws Exception { - String action = "$a.x;"; // must be qualified - } - @Test public void testFullyQualifiedRefToTemplateAttributeInCurrentRule() throws Exception { - String action = "$a.st;"; // can be qualified - } - @Test public void testRuleRefWhenRuleHasScope() throws Exception { - String action = "$b.start;"; - } - @Test public void testDynamicScopeRefOkEvenThoughRuleRefExists() throws Exception { - String action = "$b::n;"; - } - @Test public void testRefToTemplateAttributeForCurrentRule() throws Exception { - String action = "$st=null;"; - } - - @Test public void testRefToStartAttributeForCurrentRule() throws Exception { - String action = "$start;"; - } - - @Test public void testTokenLabelFromMultipleAlts() throws Exception { - String action = "$ID.text;"; // must be qualified - } - @Test public void testRuleLabelFromMultipleAlts() throws Exception { - String action = "$b.text;"; // must be qualified - } - @Test public void testUnqualifiedRuleScopeAttribute() throws Exception { - String action = "$n;"; // must be qualified - } - @Test public void testRuleAndTokenLabelTypeMismatch() throws Exception { - } - @Test public void testListAndTokenLabelTypeMismatch() throws Exception { - } - @Test public void testListAndRuleLabelTypeMismatch() throws Exception { - } - @Test public void testArgReturnValueMismatch() throws Exception { - } - @Test public void testSimplePlusEqualLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualStringLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualSetLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualWildcardLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testImplicitTokenLabel() throws Exception { - String action = "$ID; $ID.text; $ID.getText()"; - } - - @Test public void testImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testRuleLabelWithoutOutputOption() throws Exception { - } - @Test public void testMissingArgs() throws Exception { - } - @Test public void testArgsWhenNoneDefined() throws Exception { - } - @Test public void testReturnInitValue() throws Exception { - } - @Test public void testMultipleReturnInitValue() throws Exception { - } - @Test public void testCStyleReturnInitValue() throws Exception { - } - @Test public void testArgsWithInitValues() throws Exception { - } - @Test public void testArgsOnToken() throws Exception { - } - @Test public void testArgsOnTokenInLexer() throws Exception { - } - @Test public void testLabelOnRuleRefInLexer() throws Exception { - String action = "$i.text"; - } - - @Test public void testRefToRuleRefInLexer() throws Exception { - String action = "$ID.text"; - } - - @Test public void testRefToRuleRefInLexerNoAttribute() throws Exception { - String action = "$ID"; - } - - @Test public void testCharLabelInLexer() throws Exception { - } - @Test public void testCharListLabelInLexer() throws Exception { - } - @Test public void testWildcardCharLabelInLexer() throws Exception { - } - @Test public void testWildcardCharListLabelInLexer() throws Exception { - } - @Test public void testMissingArgsInLexer() throws Exception { - } - @Test public void testLexerRulePropertyRefs() throws Exception { - String action = "$text $type $line $pos $channel $index $start $stop"; - } - - @Test public void testLexerLabelRefs() throws Exception { - String action = "$a $b.text $c $d.text"; - } - - @Test public void testSettingLexerRulePropertyRefs() throws Exception { - String action = "$text $type=1 $line=1 $pos=1 $channel=1 $index"; - } - - @Test public void testArgsOnTokenInLexerRuleOfCombined() throws Exception { - } - @Test public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception { - } - @Test public void testTokenLabelTreeProperty() throws Exception { - String action = "$id.tree;"; - } - - @Test public void testTokenRefTreeProperty() throws Exception { - String action = "$ID.tree;"; - } - - @Test public void testAmbiguousTokenRef() throws Exception { - String action = "$ID;"; - } - - @Test public void testAmbiguousTokenRefWithProp() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testRuleRefWithDynamicScope() throws Exception { - String action = "$field::x = $field.st;"; - } - - @Test public void testAssignToOwnRulenameAttr() throws Exception { - String action = "$rule.tree = null;"; - } - - @Test public void testAssignToOwnParamAttr() throws Exception { - String action = "$rule.i = 42; $i = 23;"; - } - - @Test public void testIllegalAssignToOwnRulenameAttr() throws Exception { - String action = "$rule.stop = 0;"; - } - - @Test public void testIllegalAssignToLocalAttr() throws Exception { - String action = "$tree = null; $st = null; $start = 0; $stop = 0; $text = 0;"; - } - - @Test public void testIllegalAssignRuleRefAttr() throws Exception { - String action = "$other.tree = null;"; - } - - @Test public void testIllegalAssignTokenRefAttr() throws Exception { - String action = "$ID.text = \"test\";"; - } - - @Test public void testAssignToTreeNodeAttribute() throws Exception { - String action = "$tree.scope = localScope;"; - } - - @Test public void testDoNotTranslateAttributeCompare() throws Exception { - String action = "$a.line == $b.line"; - } - - @Test public void testDoNotTranslateScopeAttributeCompare() throws Exception { - String action = "if ($rule::foo == \"foo\" || 1) { System.out.println(\"ouch\"); }"; - } - - @Test public void testTreeRuleStopAttributeIsInvalid() throws Exception { - String action = "$r.x; $r.start; $r.stop"; - } - - @Test public void testRefToTextAttributeForCurrentTreeRule() throws Exception { - String action = "$text"; - } - - @Test public void testTypeOfGuardedAttributeRefIsCorrect() throws Exception { - String action = "int x = $b::n;"; - } - - @Test public void testBracketArgParsing() throws Exception { - } - - @Test public void testStringArgParsing() throws Exception { - String action = "34, '{', \"it's<\", '\"', \"\\\"\", 19"; - } - @Test public void testComplicatedSingleArgParsing() throws Exception { - String action = "(*a).foo(21,33,\",\")"; - } - @Test public void testArgWithLT() throws Exception { - String action = "34<50"; - } - @Test public void testGenericsAsArgumentDefinition() throws Exception { - String action = "$foo.get(\"ick\");"; - } - @Test public void testGenericsAsArgumentDefinition2() throws Exception { - String action = "$foo.get(\"ick\"); x=3;"; - } - @Test public void testGenericsAsReturnValue() throws Exception { - } - - // TODO: nonlocal $rule::x -} diff --git a/tool/test/org/antlr/v4/xtest/TestAttributeChecks.java b/tool/test/org/antlr/v4/xtest/TestAttributeChecks.java deleted file mode 100644 index 457eb9be7..000000000 --- a/tool/test/org/antlr/v4/xtest/TestAttributeChecks.java +++ /dev/null @@ -1,273 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.runtime.RecognitionException; -import org.antlr.v4.tool.ErrorType; -import org.junit.Test; -import org.stringtemplate.v4.ST; - -/** */ -public class TestAttributeChecks extends BaseTest { - String attributeTemplate = - "parser grammar A;\n"+ - "@members {}\n" + - "tokens{ID}\n" + - "a[int x] returns [int y]\n" + - "@init {}\n" + - " : id=ID ids+=ID lab=b[34] labs+=b[34] {\n" + - " \n" + - " }\n" + - " c\n" + - " ;\n" + - " finally {}\n" + - "b[int d] returns [int e]\n" + - " : {}\n" + - " ;\n" + - "c : ;\n"; - - String[] membersChecks = { - "$a", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:2:11: unknown attribute reference a in $a\n", - "$a.y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:2:11: unknown attribute reference a in $a.y\n", - }; - - String[] initChecks = { - "$text", "", - "$start", "", - "$x = $y", "", - "$y = $x", "", - "$lab.e", "", - "$ids", "", - "$labs", "", - - "$c", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:5:8: unknown attribute reference c in $c\n", - "$a.q", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:5:10: unknown attribute q for rule a in $a.q\n", - }; - - String[] inlineChecks = { - "$text", "", - "$start", "", - "$x = $y", "", - "$y = $x", "", - "$y.b = 3;", "", - "$ctx.x = $ctx.y", "", - "$lab.e", "", - "$lab.text", "", - "$b.e", "", - "$c.text", "", - "$ID", "", - "$ID.text", "", - "$id", "", - "$id.text", "", - "$ids", "", - "$labs", "", - }; - - String[] bad_inlineChecks = { - "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:7:4: missing attribute access on rule reference lab in $lab\n", - "$q", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q\n", - "$q.y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q.y\n", - "$q = 3", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q\n", - "$q = 3;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q = 3;\n", - "$q.y = 3;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q.y\n", - "$q = $blort;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference q in $q = $blort;\n" + - "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:9: unknown attribute reference blort in $blort\n", - "$a.ick", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:7:6: unknown attribute ick for rule a in $a.ick\n", - "$a.ick = 3;", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:7:6: unknown attribute ick for rule a in $a.ick\n", - "$b.d", "error(" + ErrorType.INVALID_RULE_PARAMETER_REF.code + "): A.g4:7:6: parameter d of rule b is not accessible in this scope: $b.d\n", // cant see rule refs arg - "$d.text", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference d in $d.text\n", // valid rule, but no ref - "$lab.d", "error(" + ErrorType.INVALID_RULE_PARAMETER_REF.code + "): A.g4:7:8: parameter d of rule b is not accessible in this scope: $lab.d\n", - "$ids = null;", "error(" + ErrorType.ASSIGNMENT_TO_LIST_LABEL.code + "): A.g4:7:4: cannot assign a value to list label ids\n", - "$labs = null;","error(" + ErrorType.ASSIGNMENT_TO_LIST_LABEL.code + "): A.g4:7:4: cannot assign a value to list label labs\n", - }; - - String[] finallyChecks = { - "$text", "", - "$start", "", - "$x = $y", "", - "$y = $x", "", - "$lab.e", "", - "$lab.text", "", - "$id", "", - "$id.text", "", - "$ids", "", - "$labs", "", - - "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:10:14: missing attribute access on rule reference lab in $lab\n", - "$q", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q\n", - "$q.y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q.y\n", - "$q = 3", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q\n", - "$q = 3;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q = 3;\n", - "$q.y = 3;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q.y\n", - "$q = $blort;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference q in $q = $blort;\n" + - "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:19: unknown attribute reference blort in $blort\n", - "$a.ick", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:10:16: unknown attribute ick for rule a in $a.ick\n", - "$a.ick = 3;", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:10:16: unknown attribute ick for rule a in $a.ick\n", - "$b.e", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference b in $b.e\n", // cant see rule refs outside alts - "$b.d", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference b in $b.d\n", - "$c.text", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference c in $c.text\n", - "$lab.d", "error(" + ErrorType.INVALID_RULE_PARAMETER_REF.code + "): A.g4:10:18: parameter d of rule b is not accessible in this scope: $lab.d\n", - }; - - String[] dynMembersChecks = { - "$S", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:2:11: unknown attribute reference S in $S\n", - "$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::i\n", - "$S::i=$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::i\n" + - "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:17: reference to undefined rule S in non-local ref $S::i\n", - - "$b::f", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:2:14: unknown attribute f for rule b in $b::f\n", - "$S::j", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::j\n", - "$S::j = 3;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::j = 3;\n", - "$S::j = $S::k;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:2:11: reference to undefined rule S in non-local ref $S::j = $S::k;\n", - }; - - String[] dynInitChecks = { - "$a", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:5:8: missing attribute access on rule reference a in $a\n", - "$b", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:5:8: unknown attribute reference b in $b\n", - "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:5:8: missing attribute access on rule reference lab in $lab\n", - "$b::f", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:5:11: unknown attribute f for rule b in $b::f\n", - "$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::i\n", - "$S::i=$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::i\n" + - "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:14: reference to undefined rule S in non-local ref $S::i\n", - "$a::z", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:5:11: unknown attribute z for rule a in $a::z\n", - "$S", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:5:8: unknown attribute reference S in $S\n", - - "$S::j", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::j\n", - "$S::j = 3;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::j = 3;\n", - "$S::j = $S::k;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:5:8: reference to undefined rule S in non-local ref $S::j = $S::k;\n", - }; - - String[] dynInlineChecks = { - "$a", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:7:4: missing attribute access on rule reference a in $a\n", - "$b", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:7:4: missing attribute access on rule reference b in $b\n", - "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:7:4: missing attribute access on rule reference lab in $lab\n", - "$b::f", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:7:7: unknown attribute f for rule b in $b::f\n", - "$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::i\n", - "$S::i=$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::i\n" + - "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:10: reference to undefined rule S in non-local ref $S::i\n", - "$a::z", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:7:7: unknown attribute z for rule a in $a::z\n", - - "$S::j", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::j\n", - "$S::j = 3;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::j = 3;\n", - "$S::j = $S::k;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:4: reference to undefined rule S in non-local ref $S::j = $S::k;\n", - "$Q[-1]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", - "$Q[-i]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", - "$Q[i]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", - "$Q[0]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", - "$Q[-1]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", - "$Q[-i]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", - "$Q[i]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", - "$Q[0]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference Q in $Q\n", - "$S[-1]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", - "$S[-i]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", - "$S[i]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", - "$S[0]::y", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", - "$S[-1]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", - "$S[-i]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", - "$S[i]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", - "$S[0]::y = 23;", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n", - "$S[$S::y]::i", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:7:4: unknown attribute reference S in $S\n" + - "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:7:7: reference to undefined rule S in non-local ref $S::y\n" - }; - - String[] dynFinallyChecks = { - "$a", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:10:14: missing attribute access on rule reference a in $a\n", - "$b", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference b in $b\n", - "$lab", "error(" + ErrorType.ISOLATED_RULE_REF.code + "): A.g4:10:14: missing attribute access on rule reference lab in $lab\n", - "$b::f", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:10:17: unknown attribute f for rule b in $b::f\n", - "$S", "error(" + ErrorType.UNKNOWN_SIMPLE_ATTRIBUTE.code + "): A.g4:10:14: unknown attribute reference S in $S\n", - "$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::i\n", - "$S::i=$S::i", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::i\n" + - "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:20: reference to undefined rule S in non-local ref $S::i\n", - "$a::z", "error(" + ErrorType.UNKNOWN_RULE_ATTRIBUTE.code + "): A.g4:10:17: unknown attribute z for rule a in $a::z\n", - - "$S::j", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::j\n", - "$S::j = 3;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::j = 3;\n", - "$S::j = $S::k;", "error(" + ErrorType.UNDEFINED_RULE_IN_NONLOCAL_REF.code + "): A.g4:10:14: reference to undefined rule S in non-local ref $S::j = $S::k;\n", - }; - - @Test public void testMembersActions() throws RecognitionException { - testActions("members", membersChecks, attributeTemplate); - } - - @Test public void testDynamicMembersActions() throws RecognitionException { - testActions("members", dynMembersChecks, attributeTemplate); - } - - @Test public void testInitActions() throws RecognitionException { - testActions("init", initChecks, attributeTemplate); - } - - @Test public void testDynamicInitActions() throws RecognitionException { - testActions("init", dynInitChecks, attributeTemplate); - } - - @Test public void testInlineActions() throws RecognitionException { - testActions("inline", inlineChecks, attributeTemplate); - } - - @Test public void testDynamicInlineActions() throws RecognitionException { - testActions("inline", dynInlineChecks, attributeTemplate); - } - - @Test public void testBadInlineActions() throws RecognitionException { - testActions("inline", bad_inlineChecks, attributeTemplate); - } - - @Test public void testFinallyActions() throws RecognitionException { - testActions("finally", finallyChecks, attributeTemplate); - } - - @Test public void testDynamicFinallyActions() throws RecognitionException { - testActions("finally", dynFinallyChecks, attributeTemplate); - } - - @Test public void testTokenRef() throws RecognitionException { - String grammar = - "parser grammar S;\n" + - "tokens{ID}\n" + - "a : x=ID {Token t = $x; t = $ID;} ;\n"; - String expected = - ""; - testErrors(new String[] {grammar, expected}, false); - } - - public void testActions(String location, String[] pairs, String template) { - for (int i = 0; i < pairs.length; i+=2) { - String action = pairs[i]; - String expected = pairs[i+1]; - ST st = new ST(template); - st.add(location, action); - String grammar = st.render(); - testErrors(new String[] {grammar, expected}, false); - } - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestBasicSemanticErrors.java b/tool/test/org/antlr/v4/xtest/TestBasicSemanticErrors.java deleted file mode 100644 index 79f953624..000000000 --- a/tool/test/org/antlr/v4/xtest/TestBasicSemanticErrors.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.tool.ErrorType; -import org.junit.Test; -import org.stringtemplate.v4.ST; - -public class TestBasicSemanticErrors extends BaseTest { - static String[] U = { - // INPUT - "parser grammar U;\n" + - "options { foo=bar; k=3;}\n" + - "tokens {\n" + - " ID,\n" + - " f,\n" + - " S\n" + - "}\n" + - "tokens { A }\n" + - "options { x=y; }\n" + - "\n" + - "a\n" + - "options { blech=bar; greedy=true; }\n" + - " : ID\n" + - " ;\n" + - "b : ( options { ick=bar; greedy=true; } : ID )+ ;\n" + - "c : ID ID ;", - // YIELDS - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:2:10: unsupported option foo\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:2:19: unsupported option k\n" + - "error(" + ErrorType.TOKEN_NAMES_MUST_START_UPPER.code + "): U.g4:5:8: token names must start with an uppercase letter: f\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:9:10: unsupported option x\n" + - "error(" + ErrorType.REPEATED_PREQUEL.code + "): U.g4:9:0: repeated grammar prequel spec (options, tokens, or import); please merge\n" + - "error(" + ErrorType.REPEATED_PREQUEL.code + "): U.g4:8:0: repeated grammar prequel spec (options, tokens, or import); please merge\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:12:10: unsupported option blech\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:12:21: unsupported option greedy\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:15:16: unsupported option ick\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:15:25: unsupported option greedy\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:16:16: unsupported option x\n", - }; - - @Test public void testU() { super.testErrors(U, false); } - - /** - * Regression test for #25 "Don't allow labels on not token set subrules". - * https://github.com/antlr/antlr4/issues/25 - */ - @Test - public void testIllegalNonSetLabel() throws Exception { - String grammar = - "grammar T;\n" + - "ss : op=('=' | '+=' | expr) EOF;\n" + - "expr : '=' '=';\n" + - ""; - - String expected = - "error(" + ErrorType.LABEL_BLOCK_NOT_A_SET.code + "): T.g4:2:5: label op assigned to a block which is not a set\n"; - - testErrors(new String[] { grammar, expected }, false); - } - - @Test - public void testArgumentRetvalLocalConflicts() throws Exception { - String grammarTemplate = - "grammar T;\n" + - "ss[] returns []\n" + - "locals []\n" + - " : EOF;\n" + - "expr : '=';\n"; - - String expected = - "error(" + ErrorType.ARG_CONFLICTS_WITH_RULE.code + "): T.g4:2:7: parameter expr conflicts with rule with same name\n" + - "error(" + ErrorType.RETVAL_CONFLICTS_WITH_RULE.code + "): T.g4:2:26: return value expr conflicts with rule with same name\n" + - "error(" + ErrorType.LOCAL_CONFLICTS_WITH_RULE.code + "): T.g4:3:12: local expr conflicts with rule with same name\n" + - "error(" + ErrorType.RETVAL_CONFLICTS_WITH_ARG.code + "): T.g4:2:26: return value expr conflicts with parameter with same name\n" + - "error(" + ErrorType.LOCAL_CONFLICTS_WITH_ARG.code + "): T.g4:3:12: local expr conflicts with parameter with same name\n" + - "error(" + ErrorType.LOCAL_CONFLICTS_WITH_RETVAL.code + "): T.g4:3:12: local expr conflicts with return value with same name\n" + - "error(" + ErrorType.LABEL_CONFLICTS_WITH_RULE.code + "): T.g4:4:4: label expr conflicts with rule with same name\n" + - "error(" + ErrorType.LABEL_CONFLICTS_WITH_ARG.code + "): T.g4:4:4: label expr conflicts with parameter with same name\n" + - "error(" + ErrorType.LABEL_CONFLICTS_WITH_RETVAL.code + "): T.g4:4:4: label expr conflicts with return value with same name\n" + - "error(" + ErrorType.LABEL_CONFLICTS_WITH_LOCAL.code + "): T.g4:4:4: label expr conflicts with local with same name\n"; - ST grammarST = new ST(grammarTemplate); - grammarST.add("args", "int expr"); - grammarST.add("retvals", "int expr"); - grammarST.add("locals", "int expr"); - grammarST.add("body", "expr=expr"); - testErrors(new String[] { grammarST.render(), expected }, false); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestBufferedTokenStream.java b/tool/test/org/antlr/v4/xtest/TestBufferedTokenStream.java deleted file mode 100644 index 940d85e6b..000000000 --- a/tool/test/org/antlr/v4/xtest/TestBufferedTokenStream.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.BufferedTokenStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.LexerInterpreter; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -public class TestBufferedTokenStream extends BaseTest { - - protected TokenStream createTokenStream(TokenSource src) { - return new BufferedTokenStream(src); - } - - @Test public void testFirstToken() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 3 * 0 + 2 * 0; - CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TokenStream tokens = createTokenStream(lexEngine); - - String result = tokens.LT(1).getText(); - String expecting = "x"; - assertEquals(expecting, result); - } - - @Test public void test2ndToken() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 3 * 0 + 2 * 0; - CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TokenStream tokens = createTokenStream(lexEngine); - - String result = tokens.LT(2).getText(); - String expecting = " "; - assertEquals(expecting, result); - } - - @Test public void testCompleteBuffer() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 3 * 0 + 2 * 0; - CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TokenStream tokens = createTokenStream(lexEngine); - - int i = 1; - Token t = tokens.LT(i); - while ( t.getType()!=Token.EOF ) { - i++; - t = tokens.LT(i); - } - tokens.LT(i++); // push it past end - tokens.LT(i++); - - String result = tokens.getText(); - String expecting = "x = 3 * 0 + 2 * 0;"; - assertEquals(expecting, result); - } - - @Test public void testCompleteBufferAfterConsuming() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 3 * 0 + 2 * 0; - CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TokenStream tokens = createTokenStream(lexEngine); - - Token t = tokens.LT(1); - while ( t.getType()!=Token.EOF ) { - tokens.consume(); - t = tokens.LT(1); - } - - String result = tokens.getText(); - String expecting = "x = 3 * 0 + 2 * 0;"; - assertEquals(expecting, result); - } - - @Test public void testLookback() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 3 * 0 + 2 * 0; - CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;"); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TokenStream tokens = createTokenStream(lexEngine); - - tokens.consume(); // get x into buffer - Token t = tokens.LT(-1); - assertEquals("x", t.getText()); - - tokens.consume(); - tokens.consume(); // consume '=' - t = tokens.LT(-3); - assertEquals("x", t.getText()); - t = tokens.LT(-2); - assertEquals(" ", t.getText()); - t = tokens.LT(-1); - assertEquals("=", t.getText()); - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestCodeGeneration.java b/tool/test/org/antlr/v4/xtest/TestCodeGeneration.java deleted file mode 100644 index ba9b7e541..000000000 --- a/tool/test/org/antlr/v4/xtest/TestCodeGeneration.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2014 Terence Parr - * Copyright (c) 2014 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.runtime.RecognitionException; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; -import org.stringtemplate.v4.AutoIndentWriter; -import org.stringtemplate.v4.InstanceScope; -import org.stringtemplate.v4.Interpreter; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STWriter; -import org.stringtemplate.v4.misc.ErrorManager; -import org.stringtemplate.v4.misc.ErrorType; - -import java.io.IOException; -import java.io.StringWriter; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertFalse; - -public class TestCodeGeneration extends BaseTest { - @Test public void testArgDecl() throws Exception { // should use template not string - ErrorQueue equeue = new ErrorQueue(); - String g = - "grammar T;\n" + - "a[int xyz] : 'a' ;\n"; - List evals = getEvalInfoForString(g, "int xyz"); - System.out.println(evals); - for (int i = 0; i < evals.size(); i++) { - String eval = evals.get(i); - assertFalse("eval should not be POJO: "+eval, eval.startsWith(" evals = new ArrayList(); - ErrorManager myErrMgrCopy; - int tab = 0; - public DebugInterpreter(STGroup group, ErrorManager errMgr, boolean debug) { - super(group, errMgr, debug); - myErrMgrCopy = errMgr; - } - - @Override - protected int writeObject(STWriter out, InstanceScope scope, Object o, String[] options) { - if ( o instanceof ST ) { - String name = ((ST)o).getName(); - name = name.substring(1); - if ( !name.startsWith("_sub") ) { - try { - out.write(""); - evals.add(""); - int r = super.writeObject(out, scope, o, options); - out.write(""); - evals.add(""); - return r; - } catch (IOException ioe) { - myErrMgrCopy.IOError(scope.st, ErrorType.WRITE_IO_ERROR, ioe); - } - } - } - return super.writeObject(out, scope, o, options); - } - - @Override - protected int writePOJO(STWriter out, InstanceScope scope, Object o, String[] options) throws IOException { - Class type = o.getClass(); - String name = type.getSimpleName(); - out.write(""+o.toString()+""); - evals.add("" + o.toString() + ""); - return super.writePOJO(out, scope, o, options); - } - - public void indent(STWriter out) throws IOException { - for (int i=1; i<=tab; i++) { - out.write("\t"); - } - } - } - - public List getEvalInfoForString(String grammarString, String pattern) throws RecognitionException { - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammarString); - List evals = new ArrayList(); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if (g.isLexer()) factory = new LexerATNFactory((LexerGrammar) g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - -// STViz viz = outputFileST.inspect(); -// try { -// viz.waitForClose(); -// } -// catch (Exception e) { -// e.printStackTrace(); -// } - - boolean debug = false; - DebugInterpreter interp = - new DebugInterpreter(outputFileST.groupThatCreatedThisInstance, - outputFileST.impl.nativeGroup.errMgr, - debug); - InstanceScope scope = new InstanceScope(null, outputFileST); - StringWriter sw = new StringWriter(); - AutoIndentWriter out = new AutoIndentWriter(sw); - interp.exec(out, scope); - - for (String e : interp.evals) { - if (e.contains(pattern)) { - evals.add(e); - } - } - } - if ( equeue.size()>0 ) { - System.err.println(equeue.toString()); - } - return evals; - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestCommonTokenStream.java b/tool/test/org/antlr/v4/xtest/TestCommonTokenStream.java deleted file mode 100644 index 68c7df1ca..000000000 --- a/tool/test/org/antlr/v4/xtest/TestCommonTokenStream.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenFactory; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestCommonTokenStream extends TestBufferedTokenStream { - - @Override - protected TokenStream createTokenStream(TokenSource src) { - return new CommonTokenStream(src); - } - - @Test public void testOffChannel() throws Exception { - TokenSource lexer = // simulate input " x =34 ;\n" - new TokenSource() { - int i = 0; - WritableToken[] tokens = { - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, - new CommonToken(1,"x"), - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, - new CommonToken(1,"="), - new CommonToken(1,"34"), - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, - new CommonToken(1,";"), - new CommonToken(1,"\n") {{channel = Lexer.HIDDEN;}}, - new CommonToken(Token.EOF,"") - }; - @Override - public Token nextToken() { - return tokens[i++]; - } - @Override - public String getSourceName() { return "test"; } - @Override - public int getCharPositionInLine() { - return 0; - } - @Override - public int getLine() { - return 0; - } - @Override - public CharStream getInputStream() { - return null; - } - - @Override - public void setTokenFactory(TokenFactory factory) { - } - - @Override - public TokenFactory getTokenFactory() { - return null; - } - }; - - CommonTokenStream tokens = new CommonTokenStream(lexer); - - assertEquals("x", tokens.LT(1).getText()); // must skip first off channel token - tokens.consume(); - assertEquals("=", tokens.LT(1).getText()); - assertEquals("x", tokens.LT(-1).getText()); - - tokens.consume(); - assertEquals("34", tokens.LT(1).getText()); - assertEquals("=", tokens.LT(-1).getText()); - - tokens.consume(); - assertEquals(";", tokens.LT(1).getText()); - assertEquals("34", tokens.LT(-1).getText()); - - tokens.consume(); - assertEquals(Token.EOF, tokens.LA(1)); - assertEquals(";", tokens.LT(-1).getText()); - - assertEquals("34", tokens.LT(-2).getText()); - assertEquals("=", tokens.LT(-3).getText()); - assertEquals("x", tokens.LT(-4).getText()); - } - - @Test public void testFetchOffChannel() throws Exception { - TokenSource lexer = // simulate input " x =34 ; \n" - // token indexes 01234 56789 - new TokenSource() { - int i = 0; - WritableToken[] tokens = { - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 0 - new CommonToken(1,"x"), // 1 - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 2 - new CommonToken(1,"="), // 3 - new CommonToken(1,"34"), // 4 - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 5 - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}}, // 6 - new CommonToken(1,";"), // 7 - new CommonToken(1," ") {{channel = Lexer.HIDDEN;}},// 8 - new CommonToken(1,"\n") {{channel = Lexer.HIDDEN;}},// 9 - new CommonToken(Token.EOF,"") // 10 - }; - @Override - public Token nextToken() { - return tokens[i++]; - } - @Override - public String getSourceName() { return "test"; } - @Override - public int getCharPositionInLine() { - return 0; - } - @Override - public int getLine() { - return 0; - } - @Override - public CharStream getInputStream() { - return null; - } - - @Override - public void setTokenFactory(TokenFactory factory) { - } - - @Override - public TokenFactory getTokenFactory() { - return null; - } - }; - - CommonTokenStream tokens = new CommonTokenStream(lexer); - tokens.fill(); - assertEquals(null, tokens.getHiddenTokensToLeft(0)); - assertEquals(null, tokens.getHiddenTokensToRight(0)); - - assertEquals("[[@0,0:0=' ',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToLeft(1).toString()); - assertEquals("[[@2,0:0=' ',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToRight(1).toString()); - - assertEquals(null, tokens.getHiddenTokensToLeft(2)); - assertEquals(null, tokens.getHiddenTokensToRight(2)); - - assertEquals("[[@2,0:0=' ',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToLeft(3).toString()); - assertEquals(null, tokens.getHiddenTokensToRight(3)); - - assertEquals(null, tokens.getHiddenTokensToLeft(4)); - assertEquals("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToRight(4).toString()); - - assertEquals(null, tokens.getHiddenTokensToLeft(5)); - assertEquals("[[@6,0:0=' ',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToRight(5).toString()); - - assertEquals("[[@5,0:0=' ',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToLeft(6).toString()); - assertEquals(null, tokens.getHiddenTokensToRight(6)); - - assertEquals("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToLeft(7).toString()); - assertEquals("[[@8,0:0=' ',<1>,channel=1,0:-1], [@9,0:0='\\n',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToRight(7).toString()); - - assertEquals(null, tokens.getHiddenTokensToLeft(8)); - assertEquals("[[@9,0:0='\\n',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToRight(8).toString()); - - assertEquals("[[@8,0:0=' ',<1>,channel=1,0:-1]]", - tokens.getHiddenTokensToLeft(9).toString()); - assertEquals(null, tokens.getHiddenTokensToRight(9)); - } - - @Test - public void testSingleEOF() throws Exception { - TokenSource lexer = new TokenSource() { - - @Override - public Token nextToken() { - return new CommonToken(Token.EOF); - } - - @Override - public int getLine() { - return 0; - } - - @Override - public int getCharPositionInLine() { - return 0; - } - - @Override - public CharStream getInputStream() { - return null; - } - - @Override - public String getSourceName() { - return IntStream.UNKNOWN_SOURCE_NAME; - } - - @Override - public TokenFactory getTokenFactory() { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public void setTokenFactory(TokenFactory factory) { - throw new UnsupportedOperationException("Not supported yet."); - } - }; - - CommonTokenStream tokens = new CommonTokenStream(lexer); - tokens.fill(); - - assertEquals(Token.EOF, tokens.LA(1)); - assertEquals(0, tokens.index()); - assertEquals(1, tokens.size()); - } - - @Test(expected = IllegalStateException.class) - public void testCannotConsumeEOF() throws Exception { - TokenSource lexer = new TokenSource() { - - @Override - public Token nextToken() { - return new CommonToken(Token.EOF); - } - - @Override - public int getLine() { - return 0; - } - - @Override - public int getCharPositionInLine() { - return 0; - } - - @Override - public CharStream getInputStream() { - return null; - } - - @Override - public String getSourceName() { - return IntStream.UNKNOWN_SOURCE_NAME; - } - - @Override - public TokenFactory getTokenFactory() { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public void setTokenFactory(TokenFactory factory) { - throw new UnsupportedOperationException("Not supported yet."); - } - }; - - CommonTokenStream tokens = new CommonTokenStream(lexer); - tokens.fill(); - - assertEquals(Token.EOF, tokens.LA(1)); - assertEquals(0, tokens.index()); - assertEquals(1, tokens.size()); - tokens.consume(); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestCompositeGrammars.java b/tool/test/org/antlr/v4/xtest/TestCompositeGrammars.java deleted file mode 100644 index 82ba4c9f4..000000000 --- a/tool/test/org/antlr/v4/xtest/TestCompositeGrammars.java +++ /dev/null @@ -1,820 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.tool.ErrorType; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class TestCompositeGrammars extends BaseTest { - protected boolean debug = false; - - @Test public void testImportFileLocationInSubdir() throws Exception { - String slave = - "parser grammar S;\n" + - "a : B {System.out.println(\"S.a\");} ;\n"; - mkdir(tmpdir); - String subdir = tmpdir + "/sub"; - mkdir(subdir); - writeFile(subdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(tmpdir, "M.g4", master); - ErrorQueue equeue = antlr("M.g4", false, "-lib", subdir); - assertEquals(equeue.size(), 0); - } - - @Test public void testImportFileNotSearchedForInOutputDir() throws Exception { - String slave = - "parser grammar S;\n" + - "a : B {System.out.println(\"S.a\");} ;\n"; - mkdir(tmpdir); - String outdir = tmpdir + "/out"; - mkdir(outdir); - writeFile(outdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(tmpdir, "M.g4", master); - ErrorQueue equeue = antlr("M.g4", false, "-o", outdir); - assertEquals(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, equeue.errors.get(0).getErrorType()); - } - - @Test public void testOutputDirShouldNotEffectImports() throws Exception { - String slave = - "parser grammar S;\n" + - "a : B {System.out.println(\"S.a\");} ;\n"; - mkdir(tmpdir); - String subdir = tmpdir + "/sub"; - mkdir(subdir); - writeFile(subdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(tmpdir, "M.g4", master); - String outdir = tmpdir + "/out"; - mkdir(outdir); - ErrorQueue equeue = antlr("M.g4", false, "-o", outdir, "-lib", subdir); - assertEquals(0, equeue.size()); - } - - @Test public void testTokensFileInOutputDirAndImportFileInSubdir() throws Exception { - String slave = - "parser grammar S;\n" + - "a : B {System.out.println(\"S.a\");} ;\n"; - mkdir(tmpdir); - String subdir = tmpdir + "/sub"; - mkdir(subdir); - writeFile(subdir, "S.g4", slave); - String parser = - "parser grammar MParser;\n" + - "import S;\n" + - "options {tokenVocab=MLexer;}\n" + - "s : a ;\n"; - writeFile(tmpdir, "MParser.g4", parser); - String lexer = - "lexer grammar MLexer;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(tmpdir, "MLexer.g4", lexer); - String outdir = tmpdir + "/out"; - mkdir(outdir); - ErrorQueue equeue = antlr("MLexer.g4", false, "-o", outdir); - assertEquals(0, equeue.size()); - equeue = antlr("MParser.g4", false, "-o", outdir, "-lib", subdir); - assertEquals(0, equeue.size()); - } - - @Test public void testDelegatorInvokesDelegateRule() throws Exception { - String slave = - "parser grammar S;\n" + - "a : B {System.out.println(\"S.a\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("S.a\n", found); - } - - @Test public void testBringInLiteralsFromDelegate() throws Exception { - String slave = - "parser grammar S;\n" + - "a : '=' 'a' {System.out.println(\"S.a\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "=a", debug); - assertEquals("S.a\n", found); - } - - @Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception { - // must generate something like: - // public int a(int x) throws RecognitionException { return gS.a(x); } - // in M. - String slave = - "parser grammar S;\n" + - "a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : label=a[3] {System.out.println($label.y);} ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("S.a1000\n", found); - } - - @Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception { - // must generate something like: - // public int a(int x) throws RecognitionException { return gS.a(x); } - // in M. - String slave = - "parser grammar S;\n" + - "a : B {System.out.print(\"S.a\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a {System.out.println($a.text);} ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("S.ab\n", found); - } - - @Test public void testDelegatorAccessesDelegateMembers() throws Exception { - String slave = - "parser grammar S;\n" + - "@parser::members {\n" + - " public void foo() {System.out.println(\"foo\");}\n" + - "}\n" + - "a : B ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + // uses no rules from the import - "import S;\n" + - "s : 'b' {foo();} ;\n" + // gS is import pointer - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("foo\n", found); - } - - @Test public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception { - String slave = - "parser grammar S;\n" + - "a : b {System.out.println(\"S.a\");} ;\n" + - "b : B ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String slave2 = - "parser grammar T;\n" + - "a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a - writeFile(tmpdir, "T.g4", slave2); - String master = - "grammar M;\n" + - "import S,T;\n" + - "s : a ;\n" + - "B : 'b' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("S.a\n", found); - } - - @Test public void testDelegatesSeeSameTokenType() throws Exception { - String slave = - "parser grammar S;\n" + // A, B, C token type order - "tokens { A, B, C }\n" + - "x : A {System.out.println(\"S.x\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String slave2 = - "parser grammar T;\n" + - "tokens { C, B, A }\n" + // reverse order - "y : A {System.out.println(\"T.y\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "T.g4", slave2); - // The lexer will create rules to match letters a, b, c. - // The associated token types A, B, C must have the same value - // and all import'd parsers. Since ANTLR regenerates all imports - // for use with the delegator M, it can generate the same token type - // mapping in each parser: - // public static final int C=6; - // public static final int EOF=-1; - // public static final int B=5; - // public static final int WS=7; - // public static final int A=4; - - String master = - "grammar M;\n" + - "import S,T;\n" + - "s : x y ;\n" + // matches AA, which should be "aa" - "B : 'b' ;\n" + // another order: B, A, C - "A : 'a' ;\n" + - "C : 'c' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "aa", debug); - assertEquals("S.x\n" + - "T.y\n", found); - } - - @Test public void testDelegatesSeeSameTokenType2() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "parser grammar S;\n" + // A, B, C token type order - "tokens { A, B, C }\n" + - "x : A {System.out.println(\"S.x\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String slave2 = - "parser grammar T;\n" + - "tokens { C, B, A }\n" + // reverse order - "y : A {System.out.println(\"T.y\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "T.g4", slave2); - - String master = - "grammar M;\n" + - "import S,T;\n" + - "s : x y ;\n" + // matches AA, which should be "aa" - "B : 'b' ;\n" + // another order: B, A, C - "A : 'a' ;\n" + - "C : 'c' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - String expectedTokenIDToTypeMap = "{EOF=-1, B=1, A=2, C=3, WS=4}"; - String expectedStringLiteralToTypeMap = "{'a'=2, 'b'=1, 'c'=3}"; - String expectedTypeToTokenList = "[B, A, C, WS]"; - - assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString()); - assertEquals(expectedStringLiteralToTypeMap, sort(g.stringLiteralToTypeMap).toString()); - assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "aa", debug); - assertEquals("S.x\n" + - "T.y\n", found); - } - - @Test public void testCombinedImportsCombined() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "grammar S;\n" + // A, B, C token type order - "tokens { A, B, C }\n" + - "x : 'x' INT {System.out.println(\"S.x\");} ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - - String master = - "grammar M;\n" + - "import S;\n" + - "s : x INT ;\n"; - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "x 34 9", debug); - assertEquals("S.x\n", found); - } - - @Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "parser grammar S;\n" + - "options {tokenVocab=whatever;}\n" + - "tokens { A }\n" + - "x : A {System.out.println(\"S.x\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - - String master = - "grammar M;\n" + - "import S;\n" + - "s : x ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - Object expectedArg = "S"; - ErrorType expectedMsgID = ErrorType.OPTIONS_IN_DELEGATE; - GrammarSemanticsMessage expectedMessage = - new GrammarSemanticsMessage(expectedMsgID, g.fileName, null, expectedArg); - checkGrammarSemanticsWarning(equeue, expectedMessage); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size()); - } - - @Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "parser grammar S;\n" + - "options {toke\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - - String master = - "grammar M;\n" + - "import S;\n" + - "s : x ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - assertEquals(ErrorType.SYNTAX_ERROR, equeue.errors.get(0).getErrorType()); - } - - @Test public void testDelegatorRuleOverridesDelegate() throws Exception { - String slave = - "parser grammar S;\n" + - "a : b {System.out.println(\"S.a\");} ;\n" + - "b : B ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "b : 'b'|'c' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "a", "c", debug); - assertEquals("S.a\n", found); - } - - @Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception { - String slave = - "parser grammar JavaDecl;\n" + - "type : 'int' ;\n" + - "decl : type ID ';'\n" + - " | type ID init ';' {System.out.println(\"JavaDecl: \"+$text);}\n" + - " ;\n" + - "init : '=' INT ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "JavaDecl.g4", slave); - String master = - "grammar Java;\n" + - "import JavaDecl;\n" + - "prog : decl ;\n" + - "type : 'int' | 'float' ;\n" + - "\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - // for float to work in decl, type must be overridden - String found = execParser("Java.g4", master, "JavaParser", "JavaLexer", - "prog", "float x = 3;", debug); - assertEquals("JavaDecl: floatx=3;\n", found); - } - - @Test public void testDelegatorRuleOverridesDelegates() throws Exception { - String slave = - "parser grammar S;\n" + - "a : b {System.out.println(\"S.a\");} ;\n" + - "b : 'b' ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - - String slave2 = - "parser grammar T;\n" + - "tokens { A }\n" + - "b : 'b' {System.out.println(\"T.b\");} ;\n"; - writeFile(tmpdir, "T.g4", slave2); - - String master = - "grammar M;\n" + - "import S, T;\n" + - "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "a", "c", debug); - assertEquals("M.b\n" + - "S.a\n", found); - } - // LEXER INHERITANCE - - @Test public void testLexerDelegatorInvokesDelegateRule() throws Exception { - String slave = - "lexer grammar S;\n" + - "A : 'a' {System.out.println(\"S.A\");} ;\n" + - "C : 'c' ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "lexer grammar M;\n" + - "import S;\n" + - "B : 'b' ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String expecting = - "S.A\n" + - "[@0,0:0='a',<3>,1:0]\n" + - "[@1,1:1='b',<1>,1:1]\n" + - "[@2,2:2='c',<4>,1:2]\n" + - "[@3,3:2='',<-1>,1:3]\n"; - String found = execLexer("M.g4", master, "M", "abc", debug); - assertEquals(expecting, found); - } - - @Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception { - String slave = - "lexer grammar S;\n" + - "A : 'a' {System.out.println(\"S.A\");} ;\n" + - "B : 'b' {System.out.println(\"S.B\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "lexer grammar M;\n" + - "import S;\n" + - "A : 'a' B {System.out.println(\"M.A\");} ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execLexer("M.g4", master, "M", "ab", debug); - assertEquals("M.A\n" + - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:1='',<-1>,1:2]\n", found); - } - - @Test public void testKeywordVSIDOrder() throws Exception { - // rules in lexer are imported at END so rules in master override - // *and* get priority over imported rules. So importing ID doesn't - // mess up keywords in master grammar - ErrorQueue equeue = new ErrorQueue(); - String slave = - "lexer grammar S;\n" + - "ID : 'a'..'z'+ ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "a : A {System.out.println(\"M.a: \"+$A);} ;\n" + - "A : 'abc' {System.out.println(\"M.A\");} ;\n" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "a", "abc", debug); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size()); - - assertEquals("M.A\n" + - "M.a: [@0,0:2='abc',<1>,1:0]\n", found); - } - - // Make sure that M can import S that imports T. - @Test public void test3LevelImport() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "parser grammar T;\n" + - "a : T ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "T.g4", slave); - String slave2 = - "parser grammar S;\n" + - "import T;\n" + - "a : S ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave2); - - String master = - "grammar M;\n" + - "import S;\n" + - "a : M ;\n" ; - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - String expectedTokenIDToTypeMap = "{EOF=-1, M=1}"; // S and T aren't imported; overridden - String expectedStringLiteralToTypeMap = "{}"; - String expectedTypeToTokenList = "[M]"; - - assertEquals(expectedTokenIDToTypeMap, - g.tokenNameToTypeMap.toString()); - assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); - assertEquals(expectedTypeToTokenList, - realElements(g.typeToTokenList).toString()); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - boolean ok = - rawGenerateAndBuildRecognizer("M.g4", master, "MParser", null); - boolean expecting = true; // should be ok - assertEquals(expecting, ok); - } - - @Test public void testBigTreeOfImports() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "parser grammar T;\n" + - "tokens{T}\n" + - "x : T ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "T.g4", slave); - slave = - "parser grammar S;\n" + - "import T;\n" + - "tokens{S}\n" + - "y : S ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - - slave = - "parser grammar C;\n" + - "tokens{C}\n" + - "i : C ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "C.g4", slave); - slave = - "parser grammar B;\n" + - "tokens{B}\n" + - "j : B ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "B.g4", slave); - slave = - "parser grammar A;\n" + - "import B,C;\n" + - "tokens{A}\n" + - "k : A ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "A.g4", slave); - - String master = - "grammar M;\n" + - "import S,A;\n" + - "tokens{M}\n" + - "a : M ;\n" ; - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - assertEquals("[]", equeue.errors.toString()); - assertEquals("[]", equeue.warnings.toString()); - String expectedTokenIDToTypeMap = "{EOF=-1, M=1, S=2, T=3, A=4, B=5, C=6}"; - String expectedStringLiteralToTypeMap = "{}"; - String expectedTypeToTokenList = "[M, S, T, A, B, C]"; - - assertEquals(expectedTokenIDToTypeMap, - g.tokenNameToTypeMap.toString()); - assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); - assertEquals(expectedTypeToTokenList, - realElements(g.typeToTokenList).toString()); - - boolean ok = - rawGenerateAndBuildRecognizer("M.g4", master, "MParser", null); - boolean expecting = true; // should be ok - assertEquals(expecting, ok); - } - - @Test public void testRulesVisibleThroughMultilevelImport() throws Exception { - ErrorQueue equeue = new ErrorQueue(); - String slave = - "parser grammar T;\n" + - "x : T ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "T.g4", slave); - String slave2 = - "parser grammar S;\n" + // A, B, C token type order - "import T;\n" + - "a : S ;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave2); - - String master = - "grammar M;\n" + - "import S;\n" + - "a : M x ;\n" ; // x MUST BE VISIBLE TO M - writeFile(tmpdir, "M.g4", master); - Grammar g = new Grammar(tmpdir+"/M.g4", master, equeue); - - String expectedTokenIDToTypeMap = "{EOF=-1, M=1, T=2}"; - String expectedStringLiteralToTypeMap = "{}"; - String expectedTypeToTokenList = "[M, T]"; - - assertEquals(expectedTokenIDToTypeMap, - g.tokenNameToTypeMap.toString()); - assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); - assertEquals(expectedTypeToTokenList, - realElements(g.typeToTokenList).toString()); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - } - - @Test public void testNestedComposite() throws Exception { - // Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438 - ErrorQueue equeue = new ErrorQueue(); - String gstr = - "lexer grammar L;\n" + - "T1: '1';\n" + - "T2: '2';\n" + - "T3: '3';\n" + - "T4: '4';\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "L.g4", gstr); - gstr = - "parser grammar G1;\n" + - "s: a | b;\n" + - "a: T1;\n" + - "b: T2;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "G1.g4", gstr); - - gstr = - "parser grammar G2;\n" + - "import G1;\n" + - "a: T3;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "G2.g4", gstr); - String G3str = - "grammar G3;\n" + - "import G2;\n" + - "b: T4;\n" ; - mkdir(tmpdir); - writeFile(tmpdir, "G3.g4", G3str); - - Grammar g = new Grammar(tmpdir+"/G3.g4", G3str, equeue); - - String expectedTokenIDToTypeMap = "{EOF=-1, T4=1, T3=2}"; - String expectedStringLiteralToTypeMap = "{}"; - String expectedTypeToTokenList = "[T4, T3]"; - - assertEquals(expectedTokenIDToTypeMap, - g.tokenNameToTypeMap.toString()); - assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); - assertEquals(expectedTypeToTokenList, - realElements(g.typeToTokenList).toString()); - - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - boolean ok = - rawGenerateAndBuildRecognizer("G3.g4", G3str, "G3Parser", null); - boolean expecting = true; // should be ok - assertEquals(expecting, ok); - } - - @Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception { - String slave = - "parser grammar S;\n" + - "a : B {System.out.print(\"S.a\");} ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "@header{package mypackage;}\n" + - "s : a ;\n" + - "B : 'b' ;" + // defines B from inherited token space - "WS : (' '|'\\n') -> skip ;\n" ; - ErrorQueue equeue = antlr("M.g4", master, false); - int expecting = 0; // should be ok - assertEquals(expecting, equeue.errors.size()); - } - - @Test public void testImportedRuleWithAction() throws Exception { - // wasn't terminating. @after was injected into M as if it were @members - String slave = - "parser grammar S;\n" + - "a @after {int x;} : B ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("", found); - } - - @Test public void testImportedGrammarWithEmptyOptions() throws Exception { - String slave = - "parser grammar S;\n" + - "options {}\n" + - "a : B ;\n"; - mkdir(tmpdir); - writeFile(tmpdir, "S.g4", slave); - String master = - "grammar M;\n" + - "import S;\n" + - "s : a ;\n" + - "B : 'b' ;" + - "WS : (' '|'\\n') -> skip ;\n" ; - String found = execParser("M.g4", master, "MParser", "MLexer", - "s", "b", debug); - assertEquals("", found); - } - - /** - * This is a regression test for antlr/antlr4#248 "Including grammar with - * only fragments breaks generated lexer". - * https://github.com/antlr/antlr4/issues/248 - */ - @Test public void testImportLexerWithOnlyFragmentRules() { - String slave = - "lexer grammar Unicode;\n" + - "\n" + - "fragment\n" + - "UNICODE_CLASS_Zs : '\\u0020' | '\\u00A0' | '\\u1680' | '\\u180E'\n" + - " | '\\u2000'..'\\u200A'\n" + - " | '\\u202F' | '\\u205F' | '\\u3000'\n" + - " ;\n"; - String master = - "grammar Test;\n" + - "import Unicode;\n" + - "\n" + - "program : 'test' 'test' ;\n" + - "\n" + - "WS : (UNICODE_CLASS_Zs)+ -> skip;\n"; - - mkdir(tmpdir); - writeFile(tmpdir, "Unicode.g4", slave); - String found = execParser("Test.g4", master, "TestParser", "TestLexer", "program", "test test", debug); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#670 "exception when importing - * grammar". - * https://github.com/antlr/antlr4/issues/670 - */ - @Test - public void testImportLargeGrammar() throws Exception { - String slave = load("Java.g4", "UTF-8"); - String master = - "grammar NewJava;\n" + - "import Java;\n"; - - System.out.println("dir "+tmpdir); - mkdir(tmpdir); - writeFile(tmpdir, "Java.g4", slave); - String found = execParser("NewJava.g4", master, "NewJavaParser", "NewJavaLexer", "compilationUnit", "package Foo;", debug); - assertEquals("", found); - assertNull(stderrDuringParse); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestFastQueue.java b/tool/test/org/antlr/v4/xtest/TestFastQueue.java deleted file mode 100644 index 390d60428..000000000 --- a/tool/test/org/antlr/v4/xtest/TestFastQueue.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.runtime.misc.FastQueue; -import org.junit.Test; - -import java.util.NoSuchElementException; - -import static org.junit.Assert.assertEquals; - -public class TestFastQueue { - @Test public void testQueueNoRemove() throws Exception { - FastQueue q = new FastQueue(); - q.add("a"); - q.add("b"); - q.add("c"); - q.add("d"); - q.add("e"); - String expecting = "a b c d e"; - String found = q.toString(); - assertEquals(expecting, found); - } - - @Test public void testQueueThenRemoveAll() throws Exception { - FastQueue q = new FastQueue(); - q.add("a"); - q.add("b"); - q.add("c"); - q.add("d"); - q.add("e"); - StringBuilder buf = new StringBuilder(); - while ( q.size()>0 ) { - String o = q.remove(); - buf.append(o); - if ( q.size()>0 ) buf.append(" "); - } - assertEquals("queue should be empty", 0, q.size()); - String expecting = "a b c d e"; - String found = buf.toString(); - assertEquals(expecting, found); - } - - @Test public void testQueueThenRemoveOneByOne() throws Exception { - StringBuilder buf = new StringBuilder(); - FastQueue q = new FastQueue(); - q.add("a"); - buf.append(q.remove()); - q.add("b"); - buf.append(q.remove()); - q.add("c"); - buf.append(q.remove()); - q.add("d"); - buf.append(q.remove()); - q.add("e"); - buf.append(q.remove()); - assertEquals("queue should be empty", 0, q.size()); - String expecting = "abcde"; - String found = buf.toString(); - assertEquals(expecting, found); - } - - // E r r o r s - - @Test public void testGetFromEmptyQueue() throws Exception { - FastQueue q = new FastQueue(); - String msg = null; - try { q.remove(); } - catch (NoSuchElementException nsee) { - msg = nsee.getMessage(); - } - String expecting = "queue index 0 > last index -1"; - String found = msg; - assertEquals(expecting, found); - } - - @Test public void testGetFromEmptyQueueAfterSomeAdds() throws Exception { - FastQueue q = new FastQueue(); - q.add("a"); - q.add("b"); - q.remove(); - q.remove(); - String msg = null; - try { q.remove(); } - catch (NoSuchElementException nsee) { - msg = nsee.getMessage(); - } - String expecting = "queue index 0 > last index -1"; - String found = msg; - assertEquals(expecting, found); - } - - @Test public void testGetFromEmptyQueueAfterClear() throws Exception { - FastQueue q = new FastQueue(); - q.add("a"); - q.add("b"); - q.clear(); - String msg = null; - try { q.remove(); } - catch (NoSuchElementException nsee) { - msg = nsee.getMessage(); - } - String expecting = "queue index 0 > last index -1"; - String found = msg; - assertEquals(expecting, found); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestFullContextParsing.java b/tool/test/org/antlr/v4/xtest/TestFullContextParsing.java deleted file mode 100644 index c8a2b0e78..000000000 --- a/tool/test/org/antlr/v4/xtest/TestFullContextParsing.java +++ /dev/null @@ -1,356 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -/* - cover these cases: - dead end - single alt - single alt + preds - conflict - conflict + preds - - */ -public class TestFullContextParsing extends BaseTest { - @Test public void testAmbigYieldsCtxSensitiveDFA() { - String grammar = - "grammar T;\n"+ - "s" + - "@after {dumpDFA();}\n" + - " : ID | ID {;} ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "abc", true); - String expecting = - "Decision 0:\n" + - "s0-ID->:s1^=>1\n"; // ctx sensitive - assertEquals(expecting, result); - assertEquals("line 1:0 reportAttemptingFullContext d=0 (s), input='abc'\n", - this.stderrDuringParse); - } - - public String testCtxSensitiveDFA(String input) { - String grammar = - "grammar T;\n"+ - "s @after {dumpDFA();}\n" + - " : '$' a | '@' b ;\n" + - "a : e ID ;\n" + - "b : e INT ID ;\n" + - "e : INT | ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "INT : '0'..'9'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - return execParser("T.g4", grammar, "TParser", "TLexer", "s", input, true); - } - - @Test - public void testCtxSensitiveDFA1() { - String result = testCtxSensitiveDFA("$ 34 abc"); - String expecting = - "Decision 1:\n" + - "s0-INT->s1\n" + - "s1-ID->:s2^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + - "line 1:2 reportContextSensitivity d=1 (e), input='34'\n", - this.stderrDuringParse); - } - - @Test - public void testCtxSensitiveDFA2() { - String result = testCtxSensitiveDFA("@ 34 abc"); - String expecting = - "Decision 1:\n" + - "s0-INT->s1\n" + - "s1-ID->:s2^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:5 reportAttemptingFullContext d=1 (e), input='34abc'\n" + - "line 1:5 reportContextSensitivity d=1 (e), input='34abc'\n", - this.stderrDuringParse); - } - - @Test public void testCtxSensitiveDFATwoDiffInput() { - String grammar = - "grammar T;\n"+ - "s @after {dumpDFA();}\n" + - " : ('$' a | '@' b)+ ;\n" + - "a : e ID ;\n" + - "b : e INT ID ;\n" + - "e : INT | ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "INT : '0'..'9'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "$ 34 abc @ 34 abc", true); - String expecting = - "Decision 2:\n" + - "s0-INT->s1\n" + - "s1-ID->:s2^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:5 reportAttemptingFullContext d=2 (e), input='34abc'\n" + - "line 1:2 reportContextSensitivity d=2 (e), input='34'\n" + - "line 1:14 reportAttemptingFullContext d=2 (e), input='34abc'\n" + - "line 1:14 reportContextSensitivity d=2 (e), input='34abc'\n", - this.stderrDuringParse); - } - - @Test - public void testSLLSeesEOFInLLGrammar() { - String grammar = - "grammar T;\n"+ - "s @after {dumpDFA();}\n" + - " : a ;\n" + - "a : e ID ;\n" + - "b : e INT ID ;\n" + - "e : INT | ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "INT : '0'..'9'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "34 abc", true); - String expecting = - "Decision 0:\n" + - "s0-INT->s1\n" + - "s1-ID->:s2^=>1\n"; // Must point at accept state - assertEquals(expecting, result); - assertEquals("line 1:3 reportAttemptingFullContext d=0 (e), input='34abc'\n" + - "line 1:0 reportContextSensitivity d=0 (e), input='34'\n", - this.stderrDuringParse); - } - - @Test public void testFullContextIF_THEN_ELSEParse() { - String grammar = - "grammar T;\n"+ - "s" + - "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - "@after {dumpDFA();}\n" + - " : '{' stat* '}'" + - " ;\n" + - "stat: 'if' ID 'then' stat ('else' ID)?\n" + - " | 'return'\n" + - " ;" + - "ID : 'a'..'z'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String input = "{ if x then return }"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - String expecting = - "Decision 1:\n" + - "s0-'}'->:s1=>2\n"; - assertEquals(expecting, result); - assertEquals(null, this.stderrDuringParse); - - input = "{ if x then return else foo }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - // Technically, this input sequence is not ambiguous because else - // uniquely predicts going into the optional subrule. else cannot - // be matched by exiting stat since that would only match '}' or - // the start of a stat. But, we are using the theory that - // SLL(1)=LL(1) and so we are avoiding full context parsing - // by declaring all else clause parsing to be ambiguous. - assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n", - this.stderrDuringParse); - - input = - "{ if x then if y then return else foo }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'}'->:s2=>2\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", - this.stderrDuringParse); - - // should not be ambiguous because the second 'else bar' clearly - // indicates that the first else should match to the innermost if. - // LL_EXACT_AMBIG_DETECTION makes us keep going to resolve - - input = - "{ if x then if y then return else foo else bar }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportContextSensitivity d=1 (stat), input='elsefooelse'\n" + - "line 1:38 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:38 reportContextSensitivity d=1 (stat), input='else'\n", - this.stderrDuringParse); - - input = - "{ if x then return else foo\n" + - "if x then if y then return else foo }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'}'->:s2=>2\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + - "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", - this.stderrDuringParse); - - input = - "{ if x then return else foo\n" + - "if x then if y then return else foo }"; - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - input, true); - expecting = - "Decision 1:\n" + - "s0-'}'->:s2=>2\n" + - "s0-'else'->:s1^=>1\n"; - assertEquals(expecting, result); - assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" + - "line 2:27 reportAttemptingFullContext d=1 (stat), input='else'\n" + - "line 2:36 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n", - this.stderrDuringParse); - } - - /** - * Tests predictions for the following case involving closures. - * http://www.antlr.org/wiki/display/~admin/2011/12/29/Flaw+in+ANTLR+v3+LL(*)+analysis+algorithm - */ - @Test - public void testLoopsSimulateTailRecursion() throws Exception { - String grammar = - "grammar T;\n" + - "prog\n" + - "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " : expr_or_assign*;\n" + - "expr_or_assign\n" + - " : expr '++' {System.out.println(\"fail.\");}\n" + - " | expr {System.out.println(\"pass: \"+$expr.text);}\n" + - " ;\n" + - "expr: expr_primary ('<-' ID)? ;\n" + - "expr_primary\n" + - " : '(' ID ')'\n" + - " | ID '(' ID ')'\n" + - " | ID\n" + - " ;\n" + - "ID : [a-z]+ ;\n" + - ""; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a(i)<-x", true); - assertEquals("pass: a(i)<-x\n", found); - - String expecting = - "line 1:3 reportAttemptingFullContext d=3 (expr_primary), input='a(i)'\n" + - "line 1:7 reportAmbiguity d=3 (expr_primary): ambigAlts={2, 3}, input='a(i)<-x'\n"; - assertEquals(expecting, this.stderrDuringParse); - } - - @Test - public void testAmbiguityNoLoop() throws Exception { - // simpler version of testLoopsSimulateTailRecursion, no loops - String grammar = - "grammar T;\n" + - "prog\n" + - "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " : expr expr {System.out.println(\"alt 1\");}\n" + - " | expr\n" + - " ;\n" + - "expr: '@'\n" + - " | ID '@'\n" + - " | ID\n" + - " ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", "a@", true); - assertEquals("alt 1\n", found); - - String expecting = - "line 1:2 reportAttemptingFullContext d=0 (prog), input='a@'\n" + - "line 1:2 reportAmbiguity d=0 (prog): ambigAlts={1, 2}, input='a@'\n" + - "line 1:2 reportAttemptingFullContext d=1 (expr), input='a@'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='a@'\n"; - assertEquals(expecting, this.stderrDuringParse); - } - - @Test - public void testExprAmbiguity() throws Exception { - // translated left-recursive expr rule to test ambig detection - String grammar = - "grammar T;\n" + - "s\n" + - "@init {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " : expr[0] {System.out.println($expr.ctx.toStringTree(this));} ;\n" + - "\n" + - "expr[int _p]\n" + - " : ID\n" + - " ( {5 >= $_p}? '*' expr[6]\n" + - " | {4 >= $_p}? '+' expr[5]\n" + - " )*\n" + - " ;\n" + - "\n" + - "ID : [a-zA-Z]+ ; // match identifiers\n" + - "WS : [ \\t\\r\\n]+ -> skip ; // toss out whitespace\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a+b", true); - assertEquals("(expr a + (expr b))\n", found); - - String expecting = - "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n"; - assertEquals(expecting, this.stderrDuringParse); - - found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a+b*c", true); - assertEquals("(expr a + (expr b * (expr c)))\n", found); - - expecting = - "line 1:1 reportAttemptingFullContext d=1 (expr), input='+'\n" + - "line 1:2 reportContextSensitivity d=1 (expr), input='+b'\n" + - "line 1:3 reportAttemptingFullContext d=1 (expr), input='*'\n" + - "line 1:5 reportAmbiguity d=1 (expr): ambigAlts={1, 2}, input='*c'\n"; - assertEquals(expecting, this.stderrDuringParse); - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestGraphNodes.java b/tool/test/org/antlr/v4/xtest/TestGraphNodes.java deleted file mode 100644 index 7f7f8f3ba..000000000 --- a/tool/test/org/antlr/v4/xtest/TestGraphNodes.java +++ /dev/null @@ -1,906 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.atn.ArrayPredictionContext; -import org.antlr.v4.runtime.atn.PredictionContext; -import org.antlr.v4.runtime.atn.PredictionContextCache; -import org.antlr.v4.runtime.atn.SingletonPredictionContext; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; - -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.IdentityHashMap; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -public class TestGraphNodes { - PredictionContextCache contextCache; - - @Before - public void setUp() { - PredictionContext.globalNodeCount = 1; - contextCache = new PredictionContextCache(); - } - - public boolean rootIsWildcard() { return true; } - public boolean fullCtx() { return false; } - - @Test public void test_$_$() { - PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, - PredictionContext.EMPTY, - rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"*\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_$_$_fullctx() { - PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, - PredictionContext.EMPTY, - fullCtx(), null); - System.out.println(toDOTString(r, fullCtx())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"$\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, fullCtx())); - } - - @Test public void test_x_$() { - PredictionContext r = PredictionContext.merge(x(), PredictionContext.EMPTY, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"*\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_x_$_fullctx() { - PredictionContext r = PredictionContext.merge(x(), PredictionContext.EMPTY, fullCtx(), null); - System.out.println(toDOTString(r, fullCtx())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|$\"];\n" + - " s1[label=\"$\"];\n" + - " s0:p0->s1[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, fullCtx())); - } - - @Test public void test_$_x() { - PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, x(), rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"*\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_$_x_fullctx() { - PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, x(), fullCtx(), null); - System.out.println(toDOTString(r, fullCtx())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|$\"];\n" + - " s1[label=\"$\"];\n" + - " s0:p0->s1[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, fullCtx())); - } - - @Test public void test_a_a() { - PredictionContext r = PredictionContext.merge(a(), a(), rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_a$_ax() { - PredictionContext a1 = a(); - PredictionContext x = x(); - PredictionContext a2 = createSingleton(x, 1); - PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_a$_ax_fullctx() { - PredictionContext a1 = a(); - PredictionContext x = x(); - PredictionContext a2 = createSingleton(x, 1); - PredictionContext r = PredictionContext.merge(a1, a2, fullCtx(), null); - System.out.println(toDOTString(r, fullCtx())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[shape=record, label=\"|$\"];\n" + - " s2[label=\"$\"];\n" + - " s0->s1[label=\"1\"];\n" + - " s1:p0->s2[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, fullCtx())); - } - - @Test public void test_ax$_a$() { - PredictionContext x = x(); - PredictionContext a1 = createSingleton(x, 1); - PredictionContext a2 = a(); - PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_aa$_a$_$_fullCtx() { - PredictionContext empty = PredictionContext.EMPTY; - PredictionContext child1 = createSingleton(empty, 8); - PredictionContext right = PredictionContext.merge(empty, child1, false, null); - PredictionContext left = createSingleton(right, 8); - PredictionContext merged = PredictionContext.merge(left, right, false, null); - String actual = toDOTString(merged, false); - System.out.println(actual); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|$\"];\n" + - " s1[shape=record, label=\"|$\"];\n" + - " s2[label=\"$\"];\n" + - " s0:p0->s1[label=\"8\"];\n" + - " s1:p0->s2[label=\"8\"];\n" + - "}\n"; - assertEquals(expecting, actual); - } - - @Test public void test_ax$_a$_fullctx() { - PredictionContext x = x(); - PredictionContext a1 = createSingleton(x, 1); - PredictionContext a2 = a(); - PredictionContext r = PredictionContext.merge(a1, a2, fullCtx(), null); - System.out.println(toDOTString(r, fullCtx())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[shape=record, label=\"|$\"];\n" + - " s2[label=\"$\"];\n" + - " s0->s1[label=\"1\"];\n" + - " s1:p0->s2[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, fullCtx())); - } - - @Test public void test_a_b() { - PredictionContext r = PredictionContext.merge(a(), b(), rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s1[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_ax_ax_same() { - PredictionContext x = x(); - PredictionContext a1 = createSingleton(x, 1); - PredictionContext a2 = createSingleton(x, 1); - PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[label=\"1\"];\n" + - " s2[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - " s1->s2[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_ax_ax() { - PredictionContext x1 = x(); - PredictionContext x2 = x(); - PredictionContext a1 = createSingleton(x1, 1); - PredictionContext a2 = createSingleton(x2, 1); - PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[label=\"1\"];\n" + - " s2[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - " s1->s2[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_abx_abx() { - PredictionContext x1 = x(); - PredictionContext x2 = x(); - PredictionContext b1 = createSingleton(x1, 2); - PredictionContext b2 = createSingleton(x2, 2); - PredictionContext a1 = createSingleton(b1, 1); - PredictionContext a2 = createSingleton(b2, 1); - PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[label=\"1\"];\n" + - " s2[label=\"2\"];\n" + - " s3[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - " s1->s2[label=\"2\"];\n" + - " s2->s3[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_abx_acx() { - PredictionContext x1 = x(); - PredictionContext x2 = x(); - PredictionContext b = createSingleton(x1, 2); - PredictionContext c = createSingleton(x2, 3); - PredictionContext a1 = createSingleton(b, 1); - PredictionContext a2 = createSingleton(c, 1); - PredictionContext r = PredictionContext.merge(a1, a2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[shape=record, label=\"|\"];\n" + - " s2[label=\"2\"];\n" + - " s3[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - " s1:p0->s2[label=\"2\"];\n" + - " s1:p1->s2[label=\"3\"];\n" + - " s2->s3[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_ax_bx_same() { - PredictionContext x = x(); - PredictionContext a = createSingleton(x, 1); - PredictionContext b = createSingleton(x, 2); - PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s1[label=\"1\"];\n" + - " s2[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - " s1->s2[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_ax_bx() { - PredictionContext x1 = x(); - PredictionContext x2 = x(); - PredictionContext a = createSingleton(x1, 1); - PredictionContext b = createSingleton(x2, 2); - PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s1[label=\"1\"];\n" + - " s2[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - " s1->s2[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_ax_by() { - PredictionContext a = createSingleton(x(), 1); - PredictionContext b = createSingleton(y(), 2); - PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s2[label=\"2\"];\n" + - " s3[label=\"*\"];\n" + - " s1[label=\"1\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s2->s3[label=\"10\"];\n" + - " s1->s3[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_a$_bx() { - PredictionContext x2 = x(); - PredictionContext a = a(); - PredictionContext b = createSingleton(x2, 2); - PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s2[label=\"2\"];\n" + - " s1[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s2->s1[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_a$_bx_fullctx() { - PredictionContext x2 = x(); - PredictionContext a = a(); - PredictionContext b = createSingleton(x2, 2); - PredictionContext r = PredictionContext.merge(a, b, fullCtx(), null); - System.out.println(toDOTString(r, fullCtx())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s2[label=\"2\"];\n" + - " s1[label=\"$\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s2->s1[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, fullCtx())); - } - - @Ignore("Known inefficiency but deferring resolving the issue for now") - @Test public void test_aex_bfx() { - // TJP: this is inefficient as it leaves the top x nodes unmerged. - PredictionContext x1 = x(); - PredictionContext x2 = x(); - PredictionContext e = createSingleton(x1, 5); - PredictionContext f = createSingleton(x2, 6); - PredictionContext a = createSingleton(e, 1); - PredictionContext b = createSingleton(f, 2); - PredictionContext r = PredictionContext.merge(a, b, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s2[label=\"2\"];\n" + - " s3[label=\"3\"];\n" + - " s4[label=\"*\"];\n" + - " s1[label=\"1\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s2->s3[label=\"6\"];\n" + - " s3->s4[label=\"9\"];\n" + - " s1->s3[label=\"5\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - // Array merges - - @Test public void test_A$_A$_fullctx() { - ArrayPredictionContext A1 = array(PredictionContext.EMPTY); - ArrayPredictionContext A2 = array(PredictionContext.EMPTY); - PredictionContext r = PredictionContext.merge(A1, A2, fullCtx(), null); - System.out.println(toDOTString(r, fullCtx())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"$\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, fullCtx())); - } - - @Test public void test_Aab_Ac() { // a,b + c - SingletonPredictionContext a = a(); - SingletonPredictionContext b = b(); - SingletonPredictionContext c = c(); - ArrayPredictionContext A1 = array(a, b); - ArrayPredictionContext A2 = array(c); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"||\"];\n" + - " s1[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - " s0:p2->s1[label=\"3\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aa_Aa() { - SingletonPredictionContext a1 = a(); - SingletonPredictionContext a2 = a(); - ArrayPredictionContext A1 = array(a1); - ArrayPredictionContext A2 = array(a2); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aa_Abc() { // a + b,c - SingletonPredictionContext a = a(); - SingletonPredictionContext b = b(); - SingletonPredictionContext c = c(); - ArrayPredictionContext A1 = array(a); - ArrayPredictionContext A2 = array(b, c); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"||\"];\n" + - " s1[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - " s0:p2->s1[label=\"3\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aac_Ab() { // a,c + b - SingletonPredictionContext a = a(); - SingletonPredictionContext b = b(); - SingletonPredictionContext c = c(); - ArrayPredictionContext A1 = array(a, c); - ArrayPredictionContext A2 = array(b); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"||\"];\n" + - " s1[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - " s0:p2->s1[label=\"3\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aab_Aa() { // a,b + a - ArrayPredictionContext A1 = array(a(), b()); - ArrayPredictionContext A2 = array(a()); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s1[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aab_Ab() { // a,b + b - ArrayPredictionContext A1 = array(a(), b()); - ArrayPredictionContext A2 = array(b()); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s1[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aax_Aby() { // ax + by but in arrays - SingletonPredictionContext a = createSingleton(x(), 1); - SingletonPredictionContext b = createSingleton(y(), 2); - ArrayPredictionContext A1 = array(a); - ArrayPredictionContext A2 = array(b); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|\"];\n" + - " s2[label=\"2\"];\n" + - " s3[label=\"*\"];\n" + - " s1[label=\"1\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s2->s3[label=\"10\"];\n" + - " s1->s3[label=\"9\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aax_Aay() { // ax + ay -> merged singleton a, array parent - SingletonPredictionContext a1 = createSingleton(x(), 1); - SingletonPredictionContext a2 = createSingleton(y(), 1); - ArrayPredictionContext A1 = array(a1); - ArrayPredictionContext A2 = array(a2); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[label=\"0\"];\n" + - " s1[shape=record, label=\"|\"];\n" + - " s2[label=\"*\"];\n" + - " s0->s1[label=\"1\"];\n" + - " s1:p0->s2[label=\"9\"];\n" + - " s1:p1->s2[label=\"10\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aaxc_Aayd() { // ax,c + ay,d -> merged a, array parent - SingletonPredictionContext a1 = createSingleton(x(), 1); - SingletonPredictionContext a2 = createSingleton(y(), 1); - ArrayPredictionContext A1 = array(a1, c()); - ArrayPredictionContext A2 = array(a2, d()); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"||\"];\n" + - " s2[label=\"*\"];\n" + - " s1[shape=record, label=\"|\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"3\"];\n" + - " s0:p2->s2[label=\"4\"];\n" + - " s1:p0->s2[label=\"9\"];\n" + - " s1:p1->s2[label=\"10\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aaubv_Acwdx() { // au,bv + cw,dx -> [a,b,c,d]->[u,v,w,x] - SingletonPredictionContext a = createSingleton(u(), 1); - SingletonPredictionContext b = createSingleton(v(), 2); - SingletonPredictionContext c = createSingleton(w(), 3); - SingletonPredictionContext d = createSingleton(x(), 4); - ArrayPredictionContext A1 = array(a, b); - ArrayPredictionContext A2 = array(c, d); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|||\"];\n" + - " s4[label=\"4\"];\n" + - " s5[label=\"*\"];\n" + - " s3[label=\"3\"];\n" + - " s2[label=\"2\"];\n" + - " s1[label=\"1\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s0:p2->s3[label=\"3\"];\n" + - " s0:p3->s4[label=\"4\"];\n" + - " s4->s5[label=\"9\"];\n" + - " s3->s5[label=\"8\"];\n" + - " s2->s5[label=\"7\"];\n" + - " s1->s5[label=\"6\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aaubv_Abvdx() { // au,bv + bv,dx -> [a,b,d]->[u,v,x] - SingletonPredictionContext a = createSingleton(u(), 1); - SingletonPredictionContext b1 = createSingleton(v(), 2); - SingletonPredictionContext b2 = createSingleton(v(), 2); - SingletonPredictionContext d = createSingleton(x(), 4); - ArrayPredictionContext A1 = array(a, b1); - ArrayPredictionContext A2 = array(b2, d); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"||\"];\n" + - " s3[label=\"3\"];\n" + - " s4[label=\"*\"];\n" + - " s2[label=\"2\"];\n" + - " s1[label=\"1\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s0:p2->s3[label=\"4\"];\n" + - " s3->s4[label=\"9\"];\n" + - " s2->s4[label=\"7\"];\n" + - " s1->s4[label=\"6\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aaubv_Abwdx() { // au,bv + bw,dx -> [a,b,d]->[u,[v,w],x] - SingletonPredictionContext a = createSingleton(u(), 1); - SingletonPredictionContext b1 = createSingleton(v(), 2); - SingletonPredictionContext b2 = createSingleton(w(), 2); - SingletonPredictionContext d = createSingleton(x(), 4); - ArrayPredictionContext A1 = array(a, b1); - ArrayPredictionContext A2 = array(b2, d); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"||\"];\n" + - " s3[label=\"3\"];\n" + - " s4[label=\"*\"];\n" + - " s2[shape=record, label=\"|\"];\n" + - " s1[label=\"1\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s0:p2->s3[label=\"4\"];\n" + - " s3->s4[label=\"9\"];\n" + - " s2:p0->s4[label=\"7\"];\n" + - " s2:p1->s4[label=\"8\"];\n" + - " s1->s4[label=\"6\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aaubv_Abvdu() { // au,bv + bv,du -> [a,b,d]->[u,v,u]; u,v shared - SingletonPredictionContext a = createSingleton(u(), 1); - SingletonPredictionContext b1 = createSingleton(v(), 2); - SingletonPredictionContext b2 = createSingleton(v(), 2); - SingletonPredictionContext d = createSingleton(u(), 4); - ArrayPredictionContext A1 = array(a, b1); - ArrayPredictionContext A2 = array(b2, d); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"||\"];\n" + - " s2[label=\"2\"];\n" + - " s3[label=\"*\"];\n" + - " s1[label=\"1\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s2[label=\"2\"];\n" + - " s0:p2->s1[label=\"4\"];\n" + - " s2->s3[label=\"7\"];\n" + - " s1->s3[label=\"6\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - @Test public void test_Aaubu_Acudu() { // au,bu + cu,du -> [a,b,c,d]->[u,u,u,u] - SingletonPredictionContext a = createSingleton(u(), 1); - SingletonPredictionContext b = createSingleton(u(), 2); - SingletonPredictionContext c = createSingleton(u(), 3); - SingletonPredictionContext d = createSingleton(u(), 4); - ArrayPredictionContext A1 = array(a, b); - ArrayPredictionContext A2 = array(c, d); - PredictionContext r = PredictionContext.merge(A1, A2, rootIsWildcard(), null); - System.out.println(toDOTString(r, rootIsWildcard())); - String expecting = - "digraph G {\n" + - "rankdir=LR;\n" + - " s0[shape=record, label=\"|||\"];\n" + - " s1[label=\"1\"];\n" + - " s2[label=\"*\"];\n" + - " s0:p0->s1[label=\"1\"];\n" + - " s0:p1->s1[label=\"2\"];\n" + - " s0:p2->s1[label=\"3\"];\n" + - " s0:p3->s1[label=\"4\"];\n" + - " s1->s2[label=\"6\"];\n" + - "}\n"; - assertEquals(expecting, toDOTString(r, rootIsWildcard())); - } - - - // ------------ SUPPORT ------------------------- - - protected SingletonPredictionContext a() { - return createSingleton(PredictionContext.EMPTY, 1); - } - - private SingletonPredictionContext b() { - return createSingleton(PredictionContext.EMPTY, 2); - } - - private SingletonPredictionContext c() { - return createSingleton(PredictionContext.EMPTY, 3); - } - - private SingletonPredictionContext d() { - return createSingleton(PredictionContext.EMPTY, 4); - } - - private SingletonPredictionContext u() { - return createSingleton(PredictionContext.EMPTY, 6); - } - - private SingletonPredictionContext v() { - return createSingleton(PredictionContext.EMPTY, 7); - } - - private SingletonPredictionContext w() { - return createSingleton(PredictionContext.EMPTY, 8); - } - - private SingletonPredictionContext x() { - return createSingleton(PredictionContext.EMPTY, 9); - } - - private SingletonPredictionContext y() { - return createSingleton(PredictionContext.EMPTY, 10); - } - - public SingletonPredictionContext createSingleton(PredictionContext parent, int payload) { - SingletonPredictionContext a = SingletonPredictionContext.create(parent, payload); - return a; - } - - public ArrayPredictionContext array(SingletonPredictionContext... nodes) { - PredictionContext[] parents = new PredictionContext[nodes.length]; - int[] invokingStates = new int[nodes.length]; - for (int i=0; i visited = new IdentityHashMap(); - Map contextIds = new IdentityHashMap(); - Deque workList = new ArrayDeque(); - visited.put(context, context); - contextIds.put(context, contextIds.size()); - workList.add(context); - while (!workList.isEmpty()) { - PredictionContext current = workList.pop(); - nodes.append(" s").append(contextIds.get(current)).append('['); - - if (current.size() > 1) { - nodes.append("shape=record, "); - } - - nodes.append("label=\""); - - if (current.isEmpty()) { - nodes.append(rootIsWildcard ? '*' : '$'); - } else if (current.size() > 1) { - for (int i = 0; i < current.size(); i++) { - if (i > 0) { - nodes.append('|'); - } - - nodes.append("'); - if (current.getReturnState(i) == PredictionContext.EMPTY_RETURN_STATE) { - nodes.append(rootIsWildcard ? '*' : '$'); - } - } - } else { - nodes.append(contextIds.get(current)); - } - - nodes.append("\"];\n"); - - if (current.isEmpty()) { - continue; - } - - for (int i = 0; i < current.size(); i++) { - if (current.getReturnState(i) == PredictionContext.EMPTY_RETURN_STATE) { - continue; - } - - if (visited.put(current.getParent(i), current.getParent(i)) == null) { - contextIds.put(current.getParent(i), contextIds.size()); - workList.push(current.getParent(i)); - } - - edges.append(" s").append(contextIds.get(current)); - if (current.size() > 1) { - edges.append(":p").append(i); - } - - edges.append("->"); - edges.append('s').append(contextIds.get(current.getParent(i))); - edges.append("[label=\"").append(current.getReturnState(i)).append("\"]"); - edges.append(";\n"); - } - } - - StringBuilder builder = new StringBuilder(); - builder.append("digraph G {\n"); - builder.append("rankdir=LR;\n"); - builder.append(nodes); - builder.append(edges); - builder.append("}\n"); - return builder.toString(); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestIntervalSet.java b/tool/test/org/antlr/v4/xtest/TestIntervalSet.java deleted file mode 100644 index d22803770..000000000 --- a/tool/test/org/antlr/v4/xtest/TestIntervalSet.java +++ /dev/null @@ -1,453 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.misc.IntervalSet; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class TestIntervalSet extends BaseTest { - - /** Public default constructor used by TestRig */ - public TestIntervalSet() { - } - - @Test public void testSingleElement() throws Exception { - IntervalSet s = IntervalSet.of(99); - String expecting = "99"; - assertEquals(s.toString(), expecting); - } - - @Test public void testMin() throws Exception { - assertEquals(0, IntervalSet.COMPLETE_CHAR_SET.getMinElement()); - assertEquals(Token.EPSILON, IntervalSet.COMPLETE_CHAR_SET.or(IntervalSet.of(Token.EPSILON)).getMinElement()); - assertEquals(Token.EOF, IntervalSet.COMPLETE_CHAR_SET.or(IntervalSet.of(Token.EOF)).getMinElement()); - } - - @Test public void testIsolatedElements() throws Exception { - IntervalSet s = new IntervalSet(); - s.add(1); - s.add('z'); - s.add('\uFFF0'); - String expecting = "{1, 122, 65520}"; - assertEquals(s.toString(), expecting); - } - - @Test public void testMixedRangesAndElements() throws Exception { - IntervalSet s = new IntervalSet(); - s.add(1); - s.add('a','z'); - s.add('0','9'); - String expecting = "{1, 48..57, 97..122}"; - assertEquals(s.toString(), expecting); - } - - @Test public void testSimpleAnd() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - IntervalSet s2 = IntervalSet.of(13,15); - String expecting = "{13..15}"; - String result = (s.and(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testRangeAndIsolatedElement() throws Exception { - IntervalSet s = IntervalSet.of('a','z'); - IntervalSet s2 = IntervalSet.of('d'); - String expecting = "100"; - String result = (s.and(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testEmptyIntersection() throws Exception { - IntervalSet s = IntervalSet.of('a','z'); - IntervalSet s2 = IntervalSet.of('0','9'); - String expecting = "{}"; - String result = (s.and(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testEmptyIntersectionSingleElements() throws Exception { - IntervalSet s = IntervalSet.of('a'); - IntervalSet s2 = IntervalSet.of('d'); - String expecting = "{}"; - String result = (s.and(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testNotSingleElement() throws Exception { - IntervalSet vocabulary = IntervalSet.of(1,1000); - vocabulary.add(2000,3000); - IntervalSet s = IntervalSet.of(50,50); - String expecting = "{1..49, 51..1000, 2000..3000}"; - String result = (s.complement(vocabulary)).toString(); - assertEquals(expecting, result); - } - - @Test public void testNotSet() throws Exception { - IntervalSet vocabulary = IntervalSet.of(1,1000); - IntervalSet s = IntervalSet.of(50,60); - s.add(5); - s.add(250,300); - String expecting = "{1..4, 6..49, 61..249, 301..1000}"; - String result = (s.complement(vocabulary)).toString(); - assertEquals(expecting, result); - } - - @Test public void testNotEqualSet() throws Exception { - IntervalSet vocabulary = IntervalSet.of(1,1000); - IntervalSet s = IntervalSet.of(1,1000); - String expecting = "{}"; - String result = (s.complement(vocabulary)).toString(); - assertEquals(expecting, result); - } - - @Test public void testNotSetEdgeElement() throws Exception { - IntervalSet vocabulary = IntervalSet.of(1,2); - IntervalSet s = IntervalSet.of(1); - String expecting = "2"; - String result = (s.complement(vocabulary)).toString(); - assertEquals(expecting, result); - } - - @Test public void testNotSetFragmentedVocabulary() throws Exception { - IntervalSet vocabulary = IntervalSet.of(1,255); - vocabulary.add(1000,2000); - vocabulary.add(9999); - IntervalSet s = IntervalSet.of(50, 60); - s.add(3); - s.add(250,300); - s.add(10000); // this is outside range of vocab and should be ignored - String expecting = "{1..2, 4..49, 61..249, 1000..2000, 9999}"; - String result = (s.complement(vocabulary)).toString(); - assertEquals(expecting, result); - } - - @Test public void testSubtractOfCompletelyContainedRange() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - IntervalSet s2 = IntervalSet.of(12,15); - String expecting = "{10..11, 16..20}"; - String result = (s.subtract(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testSubtractFromSetWithEOF() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - s.add(Token.EOF); - IntervalSet s2 = IntervalSet.of(12,15); - String expecting = "{, 10..11, 16..20}"; - String result = (s.subtract(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testSubtractOfOverlappingRangeFromLeft() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - IntervalSet s2 = IntervalSet.of(5,11); - String expecting = "{12..20}"; - String result = (s.subtract(s2)).toString(); - assertEquals(expecting, result); - - IntervalSet s3 = IntervalSet.of(5,10); - expecting = "{11..20}"; - result = (s.subtract(s3)).toString(); - assertEquals(expecting, result); - } - - @Test public void testSubtractOfOverlappingRangeFromRight() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - IntervalSet s2 = IntervalSet.of(15,25); - String expecting = "{10..14}"; - String result = (s.subtract(s2)).toString(); - assertEquals(expecting, result); - - IntervalSet s3 = IntervalSet.of(20,25); - expecting = "{10..19}"; - result = (s.subtract(s3)).toString(); - assertEquals(expecting, result); - } - - @Test public void testSubtractOfCompletelyCoveredRange() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - IntervalSet s2 = IntervalSet.of(1,25); - String expecting = "{}"; - String result = (s.subtract(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testSubtractOfRangeSpanningMultipleRanges() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - s.add(30,40); - s.add(50,60); // s has 3 ranges now: 10..20, 30..40, 50..60 - IntervalSet s2 = IntervalSet.of(5,55); // covers one and touches 2nd range - String expecting = "{56..60}"; - String result = (s.subtract(s2)).toString(); - assertEquals(expecting, result); - - IntervalSet s3 = IntervalSet.of(15,55); // touches both - expecting = "{10..14, 56..60}"; - result = (s.subtract(s3)).toString(); - assertEquals(expecting, result); - } - - /** The following was broken: - {0..113, 115..65534}-{0..115, 117..65534}=116..65534 - */ - @Test public void testSubtractOfWackyRange() throws Exception { - IntervalSet s = IntervalSet.of(0,113); - s.add(115,200); - IntervalSet s2 = IntervalSet.of(0,115); - s2.add(117,200); - String expecting = "116"; - String result = (s.subtract(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testSimpleEquals() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - IntervalSet s2 = IntervalSet.of(10,20); - assertEquals(s, s2); - - IntervalSet s3 = IntervalSet.of(15,55); - assertFalse(s.equals(s3)); - } - - @Test public void testEquals() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - s.add(2); - s.add(499,501); - IntervalSet s2 = IntervalSet.of(10,20); - s2.add(2); - s2.add(499,501); - assertEquals(s, s2); - - IntervalSet s3 = IntervalSet.of(10,20); - s3.add(2); - assertFalse(s.equals(s3)); - } - - @Test public void testSingleElementMinusDisjointSet() throws Exception { - IntervalSet s = IntervalSet.of(15,15); - IntervalSet s2 = IntervalSet.of(1,5); - s2.add(10,20); - String expecting = "{}"; // 15 - {1..5, 10..20} = {} - String result = s.subtract(s2).toString(); - assertEquals(expecting, result); - } - - @Test public void testMembership() throws Exception { - IntervalSet s = IntervalSet.of(15,15); - s.add(50,60); - assertTrue(!s.contains(0)); - assertTrue(!s.contains(20)); - assertTrue(!s.contains(100)); - assertTrue(s.contains(15)); - assertTrue(s.contains(55)); - assertTrue(s.contains(50)); - assertTrue(s.contains(60)); - } - - // {2,15,18} & 10..20 - @Test public void testIntersectionWithTwoContainedElements() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - IntervalSet s2 = IntervalSet.of(2,2); - s2.add(15); - s2.add(18); - String expecting = "{15, 18}"; - String result = (s.and(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testIntersectionWithTwoContainedElementsReversed() throws Exception { - IntervalSet s = IntervalSet.of(10,20); - IntervalSet s2 = IntervalSet.of(2,2); - s2.add(15); - s2.add(18); - String expecting = "{15, 18}"; - String result = (s2.and(s)).toString(); - assertEquals(expecting, result); - } - - @Test public void testComplement() throws Exception { - IntervalSet s = IntervalSet.of(100,100); - s.add(101,101); - IntervalSet s2 = IntervalSet.of(100,102); - String expecting = "102"; - String result = (s.complement(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testComplement2() throws Exception { - IntervalSet s = IntervalSet.of(100,101); - IntervalSet s2 = IntervalSet.of(100,102); - String expecting = "102"; - String result = (s.complement(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testComplement3() throws Exception { - IntervalSet s = IntervalSet.of(1,96); - s.add(99, Lexer.MAX_CHAR_VALUE); - String expecting = "{97..98}"; - String result = (s.complement(1, Lexer.MAX_CHAR_VALUE)).toString(); - assertEquals(expecting, result); - } - - @Test public void testMergeOfRangesAndSingleValues() throws Exception { - // {0..41, 42, 43..65534} - IntervalSet s = IntervalSet.of(0,41); - s.add(42); - s.add(43,65534); - String expecting = "{0..65534}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - @Test public void testMergeOfRangesAndSingleValuesReverse() throws Exception { - IntervalSet s = IntervalSet.of(43,65534); - s.add(42); - s.add(0,41); - String expecting = "{0..65534}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - @Test public void testMergeWhereAdditionMergesTwoExistingIntervals() throws Exception { - // 42, 10, {0..9, 11..41, 43..65534} - IntervalSet s = IntervalSet.of(42); - s.add(10); - s.add(0,9); - s.add(43,65534); - s.add(11,41); - String expecting = "{0..65534}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - /** - * This case is responsible for antlr/antlr4#153. - * https://github.com/antlr/antlr4/issues/153 - */ - @Test public void testMergeWhereAdditionMergesThreeExistingIntervals() throws Exception { - IntervalSet s = new IntervalSet(); - s.add(0); - s.add(3); - s.add(5); - s.add(0, 7); - String expecting = "{0..7}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - @Test public void testMergeWithDoubleOverlap() throws Exception { - IntervalSet s = IntervalSet.of(1,10); - s.add(20,30); - s.add(5,25); // overlaps two! - String expecting = "{1..30}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - @Test public void testSize() throws Exception { - IntervalSet s = IntervalSet.of(20,30); - s.add(50,55); - s.add(5,19); - String expecting = "32"; - String result = String.valueOf(s.size()); - assertEquals(expecting, result); - } - - @Test public void testToList() throws Exception { - IntervalSet s = IntervalSet.of(20,25); - s.add(50,55); - s.add(5,5); - String expecting = "[5, 20, 21, 22, 23, 24, 25, 50, 51, 52, 53, 54, 55]"; - String result = String.valueOf(s.toList()); - assertEquals(expecting, result); - } - - /** The following was broken: - {'\u0000'..'s', 'u'..'\uFFFE'} & {'\u0000'..'q', 's'..'\uFFFE'}= - {'\u0000'..'q', 's'}!!!! broken... - 'q' is 113 ascii - 'u' is 117 - */ - @Test public void testNotRIntersectionNotT() throws Exception { - IntervalSet s = IntervalSet.of(0,'s'); - s.add('u',200); - IntervalSet s2 = IntervalSet.of(0,'q'); - s2.add('s',200); - String expecting = "{0..113, 115, 117..200}"; - String result = (s.and(s2)).toString(); - assertEquals(expecting, result); - } - - @Test public void testRmSingleElement() throws Exception { - IntervalSet s = IntervalSet.of(1,10); - s.add(-3,-3); - s.remove(-3); - String expecting = "{1..10}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - @Test public void testRmLeftSide() throws Exception { - IntervalSet s = IntervalSet.of(1,10); - s.add(-3,-3); - s.remove(1); - String expecting = "{-3, 2..10}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - @Test public void testRmRightSide() throws Exception { - IntervalSet s = IntervalSet.of(1,10); - s.add(-3,-3); - s.remove(10); - String expecting = "{-3, 1..9}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - @Test public void testRmMiddleRange() throws Exception { - IntervalSet s = IntervalSet.of(1,10); - s.add(-3,-3); - s.remove(5); - String expecting = "{-3, 1..4, 6..10}"; - String result = s.toString(); - assertEquals(expecting, result); - } - - -} diff --git a/tool/test/org/antlr/v4/xtest/TestLeftRecursion.java b/tool/test/org/antlr/v4/xtest/TestLeftRecursion.java deleted file mode 100644 index 67e3ccd27..000000000 --- a/tool/test/org/antlr/v4/xtest/TestLeftRecursion.java +++ /dev/null @@ -1,732 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.tool.ErrorType; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -/** */ -public class TestLeftRecursion extends BaseTest { - protected boolean debug = false; - - @Test public void testSimple() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + - "a : a ID\n" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x", debug); - String expecting = "(s (a x))\n"; - assertEquals(expecting, found); - - found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x y", debug); - expecting = "(s (a (a x) y))\n"; - assertEquals(expecting, found); - - found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x y z", debug); - expecting = "(s (a (a (a x) y) z))\n"; - assertEquals(expecting, found); - } - - /** - * This is a regression test for "Support direct calls to left-recursive - * rules". - * https://github.com/antlr/antlr4/issues/161 - */ - @Test public void testDirectCallToLeftRecursiveRule() throws Exception { - String grammar = - "grammar T;\n" + - "a @after {System.out.println($ctx.toStringTree(this));} : a ID\n" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - String expecting = "(a x)\n"; - assertEquals(expecting, found); - - found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x y", debug); - expecting = "(a (a x) y)\n"; - assertEquals(expecting, found); - - found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x y z", debug); - expecting = "(a (a (a x) y) z)\n"; - assertEquals(expecting, found); - } - - @Test public void testSemPred() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + - "a : a {true}? ID\n" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x y z", debug); - String expecting = "(s (a (a (a x) y) z))\n"; - assertEquals(expecting, found); - } - - @Test - public void testSemPredFailOption() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + - "a : a ID {false}?\n" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "s", "x y z", debug); - String expecting = "(s (a (a x) y z))\n"; - assertEquals(expecting, found); - assertEquals("line 1:4 rule a custom message\n", stderrDuringParse); - } - - @Test public void testTernaryExpr() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow or 'a' won't match - "e : e '*' e" + - " | e '+' e" + - " | e '?' e ':' e" + - " | e '=' e" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "(s (e a) )", - "a+b", "(s (e (e a) + (e b)) )", - "a*b", "(s (e (e a) * (e b)) )", - "a?b:c", "(s (e (e a) ? (e b) : (e c)) )", - "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", - "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", - "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", - "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", - "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#542 "First alternative cannot - * be right-associative". - * https://github.com/antlr/antlr4/issues/542 - */ - @Test public void testTernaryExprExplicitAssociativity() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow or 'a' won't match - "e : e '*' e" + - " | e '+' e" + - " | e '?' e ':' e" + - " | e '=' e" + - " | ID" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "(s (e a) )", - "a+b", "(s (e (e a) + (e b)) )", - "a*b", "(s (e (e a) * (e b)) )", - "a?b:c", "(s (e (e a) ? (e b) : (e c)) )", - "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", - "a?b+c:d", "(s (e (e a) ? (e (e b) + (e c)) : (e d)) )", - "a?b=c:d", "(s (e (e a) ? (e (e b) = (e c)) : (e d)) )", - "a? b?c:d : e", "(s (e (e a) ? (e (e b) ? (e c) : (e d)) : (e e)) )", - "a?b: c?d:e", "(s (e (e a) ? (e b) : (e (e c) ? (e d) : (e e))) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testExpressions() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow - "e : e '.' ID\n" + - " | e '.' 'this'\n" + - " | '-' e\n" + - " | e '*' e\n" + - " | e ('+'|'-') e\n" + - " | INT\n" + - " | ID\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "(s (e a) )", - "1", "(s (e 1) )", - "a-1", "(s (e (e a) - (e 1)) )", - "a.b", "(s (e (e a) . b) )", - "a.this", "(s (e (e a) . this) )", - "-a", "(s (e - (e a)) )", - "-a+b", "(s (e (e - (e a)) + (e b)) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testJavaExpressions() throws Exception { - // Generates about 7k in bytecodes for generated e_ rule; - // Well within the 64k method limit. e_primary compiles - // to about 2k in bytecodes. - // this is simplified from real java - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e EOF ;\n" + // must indicate EOF can follow - "expressionList\n" + - " : e (',' e)*\n" + - " ;\n" + - "e : '(' e ')'\n" + - " | 'this' \n" + - " | 'super'\n" + - " | INT\n" + - " | ID\n" + - " | type '.' 'class'\n" + - " | e '.' ID\n" + - " | e '.' 'this'\n" + - " | e '.' 'super' '(' expressionList? ')'\n" + - " | e '.' 'new' ID '(' expressionList? ')'\n" + - " | 'new' type ( '(' expressionList? ')' | ('[' e ']')+)\n" + - " | e '[' e ']'\n" + - " | '(' type ')' e\n" + - " | e ('++' | '--')\n" + - " | e '(' expressionList? ')'\n" + - " | ('+'|'-'|'++'|'--') e\n" + - " | ('~'|'!') e\n" + - " | e ('*'|'/'|'%') e\n" + - " | e ('+'|'-') e\n" + - " | e ('<<' | '>>>' | '>>') e\n" + - " | e ('<=' | '>=' | '>' | '<') e\n" + - " | e 'instanceof' e\n" + - " | e ('==' | '!=') e\n" + - " | e '&' e\n" + - " | e '^' e\n" + - " | e '|' e\n" + - " | e '&&' e\n" + - " | e '||' e\n" + - " | e '?' e ':' e\n" + - " |" + - " e ('='\n" + - " |'+='\n" + - " |'-='\n" + - " |'*='\n" + - " |'/='\n" + - " |'&='\n" + - " |'|='\n" + - " |'^='\n" + - " |'>>='\n" + - " |'>>>='\n" + - " |'<<='\n" + - " |'%=') e\n" + - " ;\n" + - "type: ID \n" + - " | ID '[' ']'\n" + - " | 'int'\n" + - " | 'int' '[' ']' \n" + - " ;\n" + - "ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a|b&c", "(s (e (e a) | (e (e b) & (e c))) )", - "(a|b)&c", "(s (e (e ( (e (e a) | (e b)) )) & (e c)) )", - "a > b", "(s (e (e a) > (e b)) )", - "a >> b", "(s (e (e a) >> (e b)) )", - "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )", - "a^b^c", "(s (e (e a) ^ (e (e b) ^ (e c))) )", - "(T)x", "(s (e ( (type T) ) (e x)) )", - "new A().b", "(s (e (e new (type A) ( )) . b) )", - "(T)t.f()", "(s (e (e ( (type T) ) (e (e t) . f)) ( )) )", - "a.f(x)==T.c", "(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )", - "a.f().g(x,1)", "(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )", - "new T[((n-1) * x) + 1]", "(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testDeclarations() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : declarator EOF ;\n" + // must indicate EOF can follow - "declarator\n" + - " : declarator '[' e ']'\n" + - " | declarator '[' ']'\n" + - " | declarator '(' ')'\n" + - " | '*' declarator\n" + // binds less tight than suffixes - " | '(' declarator ')'\n" + - " | ID\n" + - " ;\n" + - "e : INT ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "(s (declarator a) )", - "*a", "(s (declarator * (declarator a)) )", - "**a", "(s (declarator * (declarator * (declarator a))) )", - "a[3]", "(s (declarator (declarator a) [ (e 3) ]) )", - "b[]", "(s (declarator (declarator b) [ ]) )", - "(a)", "(s (declarator ( (declarator a) )) )", - "a[]()", "(s (declarator (declarator (declarator a) [ ]) ( )) )", - "a[][]", "(s (declarator (declarator (declarator a) [ ]) [ ]) )", - "*a[]", "(s (declarator * (declarator (declarator a) [ ])) )", - "(*a)[]", "(s (declarator (declarator ( (declarator * (declarator a)) )) [ ]) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testReturnValueAndActions() throws Exception { - String grammar = - "grammar T;\n" + - "s : e {System.out.println($e.v);} ;\n" + - "e returns [int v, List ignored]\n" + - " : a=e '*' b=e {$v = $a.v * $b.v;}\n" + - " | a=e '+' b=e {$v = $a.v + $b.v;}\n" + - " | INT {$v = $INT.int;}\n" + - " | '(' x=e ')' {$v = $x.v;}\n" + - " ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "4", - "1+2", "3", - "1+2*3", "7", - "(1+2)*3", "9", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#677 "labels not working in - * grammar file". - * https://github.com/antlr/antlr4/issues/677 - * - *

This test treats {@code ,} and {@code >>} as part of a single compound - * operator (similar to a ternary operator).

- */ - @Test public void testReturnValueAndActionsList1() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" + - "expr:\n" + - " a=expr '*' a=expr #Factor\n" + - " | b+=expr (',' b+=expr)* '>>' c=expr #Send\n" + - " | ID #JustId //semantic check on modifiers\n" + - ";\n" + - "\n" + - "ID : ('a'..'z'|'A'..'Z'|'_')\n" + - " ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" + - ";\n" + - "\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String[] tests = { - "a*b", "(s (expr (expr a) * (expr b)) )", - "a,c>>x", "(s (expr (expr a) , (expr c) >> (expr x)) )", - "x", "(s (expr x) )", - "a*b,c,x*y>>r", "(s (expr (expr (expr a) * (expr b)) , (expr c) , (expr (expr x) * (expr y)) >> (expr r)) )", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#677 "labels not working in - * grammar file". - * https://github.com/antlr/antlr4/issues/677 - * - *

This test treats the {@code ,} and {@code >>} operators separately.

- */ - @Test public void testReturnValueAndActionsList2() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" + - "expr:\n" + - " a=expr '*' a=expr #Factor\n" + - " | b+=expr ',' b+=expr #Comma\n" + - " | b+=expr '>>' c=expr #Send\n" + - " | ID #JustId //semantic check on modifiers\n" + - ";\n" + - "\n" + - "ID : ('a'..'z'|'A'..'Z'|'_')\n" + - " ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" + - ";\n" + - "\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String[] tests = { - "a*b", "(s (expr (expr a) * (expr b)) )", - "a,c>>x", "(s (expr (expr (expr a) , (expr c)) >> (expr x)) )", - "x", "(s (expr x) )", - "a*b,c,x*y>>r", "(s (expr (expr (expr (expr (expr a) * (expr b)) , (expr c)) , (expr (expr x) * (expr y))) >> (expr r)) )", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testLabelsOnOpSubrule() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + - "e : a=e op=('*'|'/') b=e {}\n" + - " | INT {}\n" + - " | '(' x=e ')' {}\n" + - " ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "(s (e 4))", - "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", - "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testReturnValueAndActionsAndLabels() throws Exception { - String grammar = - "grammar T;\n" + - "s : q=e {System.out.println($e.v);} ;\n" + - "\n" + - "e returns [int v]\n" + - " : a=e op='*' b=e {$v = $a.v * $b.v;} # mult\n" + - " | a=e '+' b=e {$v = $a.v + $b.v;} # add\n" + - " | INT {$v = $INT.int;} # anInt\n" + - " | '(' x=e ')' {$v = $x.v;} # parens\n" + - " | x=e '++' {$v = $x.v+1;} # inc\n" + - " | e '--' # dec\n" + - " | ID {$v = 3;} # anID\n" + - " ; \n" + - "\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "4", - "1+2", "3", - "1+2*3", "7", - "i++*3", "12", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#433 "Not all context accessor - * methods are generated when an alternative rule label is used for multiple - * alternatives". - * https://github.com/antlr/antlr4/issues/433 - */ - @Test public void testMultipleAlternativesWithCommonLabel() throws Exception { - String grammar = - "grammar T;\n" + - "s : e {System.out.println($e.v);} ;\n" + - "\n" + - "e returns [int v]\n" + - " : e '*' e {$v = ((BinaryContext)$ctx).e(0).v * ((BinaryContext)$ctx).e(1).v;} # binary\n" + - " | e '+' e {$v = ((BinaryContext)$ctx).e(0).v + ((BinaryContext)$ctx).e(1).v;} # binary\n" + - " | INT {$v = $INT.int;} # anInt\n" + - " | '(' e ')' {$v = $e.v;} # parens\n" + - " | left=e INC {assert(((UnaryContext)$ctx).INC() != null); $v = $left.v + 1;} # unary\n" + - " | left=e DEC {assert(((UnaryContext)$ctx).DEC() != null); $v = $left.v - 1;} # unary\n" + - " | ID {$v = 3;} # anID\n" + - " ; \n" + - "\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "INC : '++' ;\n" + - "DEC : '--' ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "4", - "1+2", "3", - "1+2*3", "7", - "i++*3", "12", - }; - runTests(grammar, tests, "s"); - } - - @Test public void testPrefixOpWithActionAndLabel() throws Exception { - String grammar = - "grammar T;\n" + - "s : e {System.out.println($e.result);} ;\n" + - "\n" + - "e returns [String result]\n" + - " : ID '=' e1=e { $result = \"(\" + $ID.getText() + \"=\" + $e1.result + \")\"; }\n" + - " | ID { $result = $ID.getText(); }\n" + - " | e1=e '+' e2=e { $result = \"(\" + $e1.result + \"+\" + $e2.result + \")\"; }\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "a", "a", - "a+b", "(a+b)", - "a=b+c", "((a=b)+c)", - }; - runTests(grammar, tests, "s"); - } - - @Test - public void testAmbigLR() throws Exception { - String grammar = - "grammar Expr;\n" + - "prog: stat ;\n" + - "stat: expr NEWLINE # printExpr\n" + - " | ID '=' expr NEWLINE # assign\n" + - " | NEWLINE # blank\n" + - " ;\n" + - "expr: expr ('*'|'/') expr # MulDiv\n" + - " | expr ('+'|'-') expr # AddSub\n" + - " | INT # int\n" + - " | ID # id\n" + - " | '(' expr ')' # parens\n" + - " ;\n" + - "\n" + - "MUL : '*' ; // assigns token name to '*' used above in grammar\n" + - "DIV : '/' ;\n" + - "ADD : '+' ;\n" + - "SUB : '-' ;\n" + - "ID : [a-zA-Z]+ ; // match identifiers\n" + - "INT : [0-9]+ ; // match integers\n" + - "NEWLINE:'\\r'? '\\n' ; // return newlines to parser (is end-statement signal)\n" + - "WS : [ \\t]+ -> skip ; // toss out whitespace\n"; - String result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "1\n", true); - assertNull(stderrDuringParse); - - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "a = 5\n", true); - assertNull(stderrDuringParse); - - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "b = 6\n", true); - assertNull(stderrDuringParse); - - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "a+b*2\n", true); - assertNull(stderrDuringParse); - - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "(1+2)*3\n", true); - assertNull(stderrDuringParse); - } - - @Test public void testCheckForNonLeftRecursiveRule() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + - "a : a ID\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String expected = - "error(" + ErrorType.NO_NON_LR_ALTS.code + "): T.g4:3:0: left recursive rule a must contain an alternative which is not left recursive\n"; - testErrors(new String[] { grammar, expected }, false); - } - - @Test public void testCheckForLeftRecursiveEmptyFollow() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : a ;\n" + - "a : a ID?\n" + - " | ID\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String expected = - "error(" + ErrorType.EPSILON_LR_FOLLOW.code + "): T.g4:3:0: left recursive rule a contains a left recursive alternative which can be followed by the empty string\n"; - testErrors(new String[] { grammar, expected }, false); - } - - /** - * This is a regression test for #239 "recoursive parser using implicit - * tokens ignore white space lexer rule". - * https://github.com/antlr/antlr4/issues/239 - */ - @Test public void testWhitespaceInfluence() { - String grammar = - "grammar Expr;\n" + - "prog : expression EOF;\n" + - "expression\n" + - " : ID '(' expression (',' expression)* ')' # doFunction\n" + - " | '(' expression ')' # doParenthesis\n" + - " | '!' expression # doNot\n" + - " | '-' expression # doNegate\n" + - " | '+' expression # doPositiv\n" + - " | expression '^' expression # doPower\n" + - " | expression '*' expression # doMultipy\n" + - " | expression '/' expression # doDivide\n" + - " | expression '%' expression # doModulo\n" + - " | expression '-' expression # doMinus\n" + - " | expression '+' expression # doPlus\n" + - " | expression '=' expression # doEqual\n" + - " | expression '!=' expression # doNotEqual\n" + - " | expression '>' expression # doGreather\n" + - " | expression '>=' expression # doGreatherEqual\n" + - " | expression '<' expression # doLesser\n" + - " | expression '<=' expression # doLesserEqual\n" + - " | expression K_IN '(' expression (',' expression)* ')' # doIn\n" + - " | expression ( '&' | K_AND) expression # doAnd\n" + - " | expression ( '|' | K_OR) expression # doOr\n" + - " | '[' expression (',' expression)* ']' # newArray\n" + - " | K_TRUE # newTrueBoolean\n" + - " | K_FALSE # newFalseBoolean\n" + - " | NUMBER # newNumber\n" + - " | DATE # newDateTime\n" + - " | ID # newIdentifier\n" + - " | SQ_STRING # newString\n" + - " | K_NULL # newNull\n" + - " ;\n" + - "\n" + - "// Fragments\n" + - "fragment DIGIT : '0' .. '9'; \n" + - "fragment UPPER : 'A' .. 'Z';\n" + - "fragment LOWER : 'a' .. 'z';\n" + - "fragment LETTER : LOWER | UPPER;\n" + - "fragment WORD : LETTER | '_' | '$' | '#' | '.';\n" + - "fragment ALPHANUM : WORD | DIGIT; \n" + - "\n" + - "// Tokens\n" + - "ID : LETTER ALPHANUM*;\n" + - "NUMBER : DIGIT+ ('.' DIGIT+)? (('e'|'E')('+'|'-')? DIGIT+)?;\n" + - "DATE : '\\'' DIGIT DIGIT DIGIT DIGIT '-' DIGIT DIGIT '-' DIGIT DIGIT (' ' DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT ('.' DIGIT+)?)? '\\'';\n" + - "SQ_STRING : '\\'' ('\\'\\'' | ~'\\'')* '\\'';\n" + - "DQ_STRING : '\"' ('\\\\\"' | ~'\"')* '\"';\n" + - "WS : [ \\t\\n\\r]+ -> skip ;\n" + - "COMMENTS : ('/*' .*? '*/' | '//' ~'\\n'* '\\n' ) -> skip;\n"; - - String expected = - ""; - String result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "Test(1,3)", false); - assertEquals(expected, result); - assertNull(stderrDuringParse); - - expected = - ""; - result = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "prog", "Test(1, 3)", false); - assertEquals(expected, result); - assertNull(stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#509 "Incorrect rule chosen in - * unambiguous grammar". - * https://github.com/antlr/antlr4/issues/509 - */ - @Test public void testPrecedenceFilterConsidersContext() throws Exception { - String grammar = - "grammar T;\n" + - "prog\n" + - "@after {System.out.println($ctx.toStringTree(this));}\n" + - ": statement* EOF {};\n" + - "statement: letterA | statement letterA 'b' ;\n" + - "letterA: 'a';\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", - "aa", false); - assertEquals("(prog (statement (letterA a)) (statement (letterA a)) )\n", found); - } - - /** - * This is a regression test for antlr/antlr4#625 "Duplicate action breaks - * operator precedence" - * https://github.com/antlr/antlr4/issues/625 - */ - @Test public void testMultipleActions() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + - "e : a=e op=('*'|'/') b=e {}{}\n" + - " | INT {}{}\n" + - " | '(' x=e ')' {}{}\n" + - " ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "(s (e 4))", - "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", - "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", - }; - runTests(grammar, tests, "s"); - } - - /** - * This is a regression test for antlr/antlr4#625 "Duplicate action breaks - * operator precedence" - * https://github.com/antlr/antlr4/issues/625 - */ - @Test public void testMultipleActionsPredicatesOptions() throws Exception { - String grammar = - "grammar T;\n" + - "s @after {System.out.println($ctx.toStringTree(this));} : e ;\n" + - "e : a=e op=('*'|'/') b=e {}{true}?\n" + - " | a=e op=('+'|'-') b=e {}{true}?\n" + - " | INT {}{}\n" + - " | '(' x=e ')' {}{}\n" + - " ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - String[] tests = { - "4", "(s (e 4))", - "1*2/3", "(s (e (e (e 1) * (e 2)) / (e 3)))", - "(1/2)*3", "(s (e (e ( (e (e 1) / (e 2)) )) * (e 3)))", - }; - runTests(grammar, tests, "s"); - } - - public void runTests(String grammar, String[] tests, String startRule) { - boolean success = rawGenerateAndBuildRecognizer("T.g4", grammar, "TParser", "TLexer"); - assertTrue(success); - writeRecognizerAndCompile("TParser", - "TLexer", - startRule, - debug, - false); - - for (int i=0; i "+found); - assertEquals(expecting, found); - } - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestLexerActions.java b/tool/test/org/antlr/v4/xtest/TestLexerActions.java deleted file mode 100644 index 0059f404f..000000000 --- a/tool/test/org/antlr/v4/xtest/TestLexerActions.java +++ /dev/null @@ -1,283 +0,0 @@ -package org.antlr.v4.xtest; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -public class TestLexerActions extends BaseTest { - // ----- ACTIONS -------------------------------------------------------- - - @Test public void testActionExecutedInDFA() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 34"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,3:4='34',<1>,1:3]\n" + - "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } - - @Test public void testActionEvalsAtCorrectIndex() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : [0-9] {System.out.println(\"2nd char: \"+(char)_input.LA(1));} [0-9]+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "123 45"); - String expecting = - "2nd char: 2\n" + - "2nd char: 5\n" + - "[@0,0:2='123',<1>,1:0]\n" + - "[@1,4:5='45',<1>,1:4]\n" + - "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expecting, found); - } - - /** - * This is a regressing test for antlr/antlr4#469 "Not all internal lexer - * rule actions are executed". - * https://github.com/antlr/antlr4/issues/469 - */ - @Test public void testEvalMultipleActions() throws Exception { - String grammar = - "lexer grammar L;\n" + - "\n" + - "@lexer::members\n" + - "{\n" + - "class Marker\n" + - "{\n" + - " Marker (Lexer lexer) { this.lexer = lexer; }\n" + - "\n" + - " public String getText ()\n" + - " {\n" + - " return lexer._input.getText (new Interval (start_index, stop_index));\n" + - " }\n" + - "\n" + - " public void start () { start_index = lexer._input.index (); System.out.println (\"Start:\" + start_index);}\n" + - " public void stop () { stop_index = lexer._input.index (); System.out.println (\"Stop:\" + stop_index);}\n" + - "\n" + - " private int start_index = 0;\n" + - " private int stop_index = 0;\n" + - " private Lexer lexer;\n" + - "}\n" + - "\n" + - "Marker m_name = new Marker (this);\n" + - "}\n" + - "\n" + - "HELLO: 'hello' WS { m_name.start (); } NAME { m_name.stop (); } '\\n' { System.out.println (\"Hello: \" + m_name.getText ()); };\n" + - "NAME: ('a'..'z' | 'A'..'Z')+ ('\\n')?;\n" + - "\n" + - "fragment WS: [ \\r\\t\\n]+ ;\n"; - String found = execLexer("L.g4", grammar, "L", "hello Steve\n"); - String expecting = - "Start:6\n" + - "Stop:11\n" + - "Hello: Steve\n" + - "\n" + - "[@0,0:11='hello Steve\\n',<1>,1:0]\n" + - "[@1,12:11='',<-1>,2:12]\n"; - assertEquals(expecting, found); - } - - @Test public void test2ActionsIn1Rule() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : [0-9] {System.out.println(\"x\");} [0-9]+ {System.out.println(\"y\");} ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "123 45"); - String expecting = - "x\n" + - "y\n" + - "x\n" + - "y\n" + - "[@0,0:2='123',<1>,1:0]\n" + - "[@1,4:5='45',<1>,1:4]\n" + - "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expecting, found); - } - - @Test public void testAltActionsIn1Rule() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : ( [0-9]+ {System.out.print(\"int\");}\n" + - " | [a-z]+ {System.out.print(\"id\");}\n" + - " )\n" + - " {System.out.println(\" last\");}\n" + - " ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "123 ab"); - String expecting = - "int last\n" + - "id last\n" + - "[@0,0:2='123',<1>,1:0]\n" + - "[@1,4:5='ab',<1>,1:4]\n" + - "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expecting, found); - } - - @Test public void testActionPlusCommand() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} -> skip ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 34"); - String expecting = - "I\n" + - "I\n" + - "[@0,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } - - // ----- COMMANDS -------------------------------------------------------- - - @Test public void testSkipCommand() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 34"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,3:4='34',<1>,1:3]\n" + - "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } - - @Test public void testMoreCommand() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "WS : '#' -> more ;"; - String found = execLexer("L.g4", grammar, "L", "34#10"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,2:4='#10',<1>,1:2]\n" + - "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } - - @Test public void testTypeCommand() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "HASH : '#' -> type(HASH) ;"; - String found = execLexer("L.g4", grammar, "L", "34#"); - String expecting = - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,2:2='#',<2>,1:2]\n" + - "[@2,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCombinedCommand() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "HASH : '#' -> type(100), skip, more ;"; - String found = execLexer("L.g4", grammar, "L", "34#11"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,2:4='#11',<1>,1:2]\n" + - "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } - - @Test public void testLexerMode() throws Exception { - String grammar = - "lexer grammar L;\n" + - "STRING_START : '\"' -> pushMode(STRING_MODE), more;\n" + - "WS : (' '|'\\n') -> skip ;\n"+ - "mode STRING_MODE;\n"+ - "STRING : '\"' -> popMode;\n"+ - "ANY : . -> more;\n"; - String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); - String expecting = - "[@0,0:4='\"abc\"',<2>,1:0]\n" + - "[@1,6:9='\"ab\"',<2>,1:6]\n" + - "[@2,10:9='',<-1>,1:10]\n"; - assertEquals(expecting, found); - } - - @Test public void testLexerPushPopModeAction() throws Exception { - String grammar = - "lexer grammar L;\n" + - "STRING_START : '\"' -> pushMode(STRING_MODE), more ;\n" + - "WS : (' '|'\\n') -> skip ;\n"+ - "mode STRING_MODE;\n"+ - "STRING : '\"' -> popMode ;\n"+ // token type 2 - "ANY : . -> more ;\n"; - String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); - String expecting = - "[@0,0:4='\"abc\"',<2>,1:0]\n" + - "[@1,6:9='\"ab\"',<2>,1:6]\n" + - "[@2,10:9='',<-1>,1:10]\n"; - assertEquals(expecting, found); - } - - @Test public void testLexerModeAction() throws Exception { - String grammar = - "lexer grammar L;\n" + - "STRING_START : '\"' -> mode(STRING_MODE), more ;\n" + - "WS : (' '|'\\n') -> skip ;\n"+ - "mode STRING_MODE;\n"+ - "STRING : '\"' -> mode(DEFAULT_MODE) ;\n"+ // ttype 2 since '"' ambiguity - "ANY : . -> more ;\n"; - String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); - String expecting = - "[@0,0:4='\"abc\"',<2>,1:0]\n" + - "[@1,6:9='\"ab\"',<2>,1:6]\n" + - "[@2,10:9='',<-1>,1:10]\n"; - assertEquals(expecting, found); - } - - // ----- PREDICATES -------------------------------------------------------- - - /** - * This is a regression test for antlr/antlr4#398 "Lexer: literal matches - * while negated char set fail to match" - * https://github.com/antlr/antlr4/issues/398 - */ - @Test - public void testFailingPredicateEvalIsNotCached() { - String grammar = - "lexer grammar TestLexer;\n" + - "\n" + - "fragment WS: [ \\t]+;\n" + - "fragment EOL: '\\r'? '\\n';\n" + - "\n" + - "LINE: WS? ~[\\r\\n]* EOL { !getText().trim().startsWith(\"Item:\") }?;\n" + - "ITEM: WS? 'Item:' -> pushMode(ITEM_HEADING_MODE);\n" + - "\n" + - "mode ITEM_HEADING_MODE;\n" + - "\n" + - "NAME: ~[\\r\\n]+;\n" + - "SECTION_HEADING_END: EOL -> popMode;\n"; - String input = - "A line here.\n" + - "Item: name of item\n" + - "Another line.\n" + - "More line.\n"; - String found = execLexer("TestLexer.g4", grammar, "TestLexer", input); - String expecting = - "[@0,0:12='A line here.\\n',<1>,1:0]\n" + - "[@1,13:17='Item:',<2>,2:0]\n" + - "[@2,18:30=' name of item',<3>,2:5]\n" + - "[@3,31:31='\\n',<4>,2:18]\n" + - "[@4,32:45='Another line.\\n',<1>,3:0]\n" + - "[@5,46:56='More line.\\n',<1>,4:0]\n" + - "[@6,57:56='',<-1>,5:11]\n"; - assertEquals(expecting, found); - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestLexerErrors.java b/tool/test/org/antlr/v4/xtest/TestLexerErrors.java deleted file mode 100644 index 1ec038518..000000000 --- a/tool/test/org/antlr/v4/xtest/TestLexerErrors.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestLexerErrors extends BaseTest { - // TEST DETECTION - @Test public void testInvalidCharAtStart() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' 'b' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "x"); - String expectingTokens = - "[@0,1:0='',<-1>,1:1]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:0 token recognition error at: 'x'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test - public void testStringsEmbeddedInActions() { - String grammar = - "lexer grammar Actions;\n" - + "ACTION2 : '[' (STRING | ~'\"')*? ']';\n" - + "STRING : '\"' ('\\\"' | .)*? '\"';\n" - + "WS : [ \\t\\r\\n]+ -> skip;\n"; - String tokens = execLexer("Actions.g4", grammar, "Actions", "[\"foo\"]"); - String expectingTokens = - "[@0,0:6='[\"foo\"]',<1>,1:0]\n" + - "[@1,7:6='',<-1>,1:7]\n"; - assertEquals(expectingTokens, tokens); - assertNull(stderrDuringParse); - - tokens = execLexer("Actions.g4", grammar, "Actions", "[\"foo]"); - expectingTokens = - "[@0,6:5='',<-1>,1:6]\n"; - assertEquals(expectingTokens, tokens); - assertEquals("line 1:0 token recognition error at: '[\"foo]'\n", stderrDuringParse); - } - - @Test public void testEnforcedGreedyNestedBrances() { - String grammar = - "lexer grammar R;\n" - + "ACTION : '{' (ACTION | ~[{}])* '}';\n" - + "WS : [ \\r\\n\\t]+ -> skip;\n"; - String tokens = execLexer("R.g4", grammar, "R", "{ { } }"); - String expectingTokens = - "[@0,0:6='{ { } }',<1>,1:0]\n" + - "[@1,7:6='',<-1>,1:7]\n"; - assertEquals(expectingTokens, tokens); - assertEquals(null, stderrDuringParse); - - tokens = execLexer("R.g4", grammar, "R", "{ { }"); - expectingTokens = - "[@0,5:4='',<-1>,1:5]\n"; - assertEquals(expectingTokens, tokens); - assertEquals("line 1:0 token recognition error at: '{ { }'\n", stderrDuringParse); - } - - @Test public void testInvalidCharAtStartAfterDFACache() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' 'b' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "abx"); - String expectingTokens = - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:2 token recognition error at: 'x'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testInvalidCharInToken() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' 'b' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "ax"); - String expectingTokens = - "[@0,2:1='',<-1>,1:2]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:0 token recognition error at: 'ax'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testInvalidCharInTokenAfterDFACache() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' 'b' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "abax"); - String expectingTokens = - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,4:3='',<-1>,1:4]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:2 token recognition error at: 'ax'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testDFAToATNThatFailsBackToDFA() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'ab' ;\n"+ - "B : 'abc' ;\n"; - // The first ab caches the DFA then abx goes through the DFA but - // into the ATN for the x, which fails. Must go back into DFA - // and return to previous dfa accept state - String tokens = execLexer("L.g4", grammar, "L", "ababx"); - String expectingTokens = - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:3='ab',<1>,1:2]\n" + - "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:4 token recognition error at: 'x'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testDFAToATNThatMatchesThenFailsInATN() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'ab' ;\n"+ - "B : 'abc' ;\n"+ - "C : 'abcd' ;\n"; - // The first ab caches the DFA then abx goes through the DFA but - // into the ATN for the c. It marks that hasn't except state - // and then keeps going in the ATN. It fails on the x, but - // uses the previous accepted in the ATN not DFA - String tokens = execLexer("L.g4", grammar, "L", "ababcx"); - String expectingTokens = - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:4='abc',<2>,1:2]\n" + - "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:5 token recognition error at: 'x'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - @Test public void testErrorInMiddle() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'abc' ;\n"; - String tokens = execLexer("L.g4", grammar, "L", "abx"); - String expectingTokens = - "[@0,3:2='',<-1>,1:3]\n"; - assertEquals(expectingTokens, tokens); - String expectingError = "line 1:0 token recognition error at: 'abx'\n"; - String error = stderrDuringParse; - assertEquals(expectingError, error); - } - - // TEST RECOVERY - - /** - * This is a regression test for #45 "NullPointerException in LexerATNSimulator.execDFA". - * https://github.com/antlr/antlr4/issues/46 - */ - @Test - public void testLexerExecDFA() throws Exception { - String grammar = - "grammar T;\n" + - "start : ID ':' expr;\n" + - "expr : primary expr? {} | expr '->' ID;\n" + - "primary : ID;\n" + - "ID : [a-z]+;\n" + - "\n"; - String result = execLexer("T.g4", grammar, "TLexer", "x : x", false); - String expecting = - "[@0,0:0='x',<3>,1:0]\n" + - "[@1,2:2=':',<1>,1:2]\n" + - "[@2,4:4='x',<3>,1:4]\n" + - "[@3,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, result); - assertEquals("line 1:1 token recognition error at: ' '\n" + - "line 1:3 token recognition error at: ' '\n", - this.stderrDuringParse); - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestLexerExec.java b/tool/test/org/antlr/v4/xtest/TestLexerExec.java deleted file mode 100644 index b0841d11a..000000000 --- a/tool/test/org/antlr/v4/xtest/TestLexerExec.java +++ /dev/null @@ -1,690 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.misc.Nullable; -import org.junit.Test; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class TestLexerExec extends BaseTest { - @Test public void testQuoteTranslation() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "QUOTE : '\"' ;\n"; // make sure this compiles - String found = execLexer("L.g4", grammar, "L", "\""); - String expecting = - "[@0,0:0='\"',<1>,1:0]\n" + - "[@1,1:0='',<-1>,1:1]\n"; - assertEquals(expecting, found); - } - - @Test public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : '-' I ;\n" + - "I : '0'..'9'+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 -21 3"); - String expecting = - "[@0,0:1='34',<2>,1:0]\n" + - "[@1,3:5='-21',<1>,1:3]\n" + - "[@2,7:7='3',<2>,1:7]\n" + - "[@3,8:7='',<-1>,1:8]\n"; // EOF has no length so range is 8:7 not 8:8 - assertEquals(expecting, found); - } - - @Test public void testSlashes() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "Backslash : '\\\\';\n" + - "Slash : '/';\n" + - "Vee : '\\\\/';\n" + - "Wedge : '/\\\\';\n"+ - "WS : [ \\t] -> skip;"; - String found = execLexer("L.g4", grammar, "L", "\\ / \\/ /\\"); - String expecting = - "[@0,0:0='\\',<1>,1:0]\n" + - "[@1,2:2='/',<2>,1:2]\n" + - "[@2,4:5='\\/',<3>,1:4]\n" + - "[@3,7:8='/\\',<4>,1:7]\n" + - "[@4,9:8='',<-1>,1:9]\n"; - assertEquals(expecting, found); - } - - /** - * This is a regression test for antlr/antlr4#224: "Parentheses without - * quantifier in lexer rules have unclear effect". - * https://github.com/antlr/antlr4/issues/224 - */ - @Test public void testParentheses() { - String grammar = - "lexer grammar Demo;\n" + - "\n" + - "START_BLOCK: '-.-.-';\n" + - "\n" + - "ID : (LETTER SEPARATOR) (LETTER SEPARATOR)+;\n" + - "fragment LETTER: L_A|L_K;\n" + - "fragment L_A: '.-';\n" + - "fragment L_K: '-.-';\n" + - "\n" + - "SEPARATOR: '!';\n"; - String found = execLexer("Demo.g4", grammar, "Demo", "-.-.-!"); - String expecting = - "[@0,0:4='-.-.-',<1>,1:0]\n" + - "[@1,5:5='!',<3>,1:5]\n" + - "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expecting, found); - } - - @Test - public void testNonGreedyTermination() throws Exception { - String grammar = - "lexer grammar L;\n" - + "STRING : '\"' ('\"\"' | .)*? '\"';"; - - String found = execLexer("L.g4", grammar, "L", "\"hi\"\"mom\""); - assertEquals( - "[@0,0:3='\"hi\"',<1>,1:0]\n" + - "[@1,4:8='\"mom\"',<1>,1:4]\n" + - "[@2,9:8='',<-1>,1:9]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testNonGreedyTermination2() throws Exception { - String grammar = - "lexer grammar L;\n" - + "STRING : '\"' ('\"\"' | .)+? '\"';"; - - String found = execLexer("L.g4", grammar, "L", "\"\"\"mom\""); - assertEquals( - "[@0,0:6='\"\"\"mom\"',<1>,1:0]\n" + - "[@1,7:6='',<-1>,1:7]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testGreedyOptional() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : '//' .*? '\\n' CMT?;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + - "[@1,14:13='',<-1>,3:14]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testNonGreedyOptional() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : '//' .*? '\\n' CMT??;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:6='//blah\\n',<1>,1:0]\n" + - "[@1,7:13='//blah\\n',<1>,2:0]\n" + - "[@2,14:13='',<-1>,3:7]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testGreedyClosure() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : '//' .*? '\\n' CMT*;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + - "[@1,14:13='',<-1>,3:14]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testNonGreedyClosure() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : '//' .*? '\\n' CMT*?;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:6='//blah\\n',<1>,1:0]\n" + - "[@1,7:13='//blah\\n',<1>,2:0]\n" + - "[@2,14:13='',<-1>,3:7]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testGreedyPositiveClosure() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : ('//' .*? '\\n')+;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" + - "[@1,14:13='',<-1>,3:14]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testNonGreedyPositiveClosure() throws Exception { - String grammar = - "lexer grammar L;\n" - + "CMT : ('//' .*? '\\n')+?;\n" - + "WS : (' '|'\\t')+;"; - - String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n"); - assertEquals( - "[@0,0:6='//blah\\n',<1>,1:0]\n" + - "[@1,7:13='//blah\\n',<1>,2:0]\n" + - "[@2,14:13='',<-1>,3:7]\n", found); - assertNull(stderrDuringParse); - } - - @Test - public void testRecursiveLexerRuleRefWithWildcardStar1() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)*? '*/' ;\n" + - "WS : (' '|'\\n')+ ;\n" - /*+ "ANY : .;"*/; - - String expecting = - "[@0,0:8='/* ick */',<1>,1:0]\n" + - "[@1,9:9='\\n',<2>,1:9]\n" + - "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + - "[@3,35:35='\\n',<2>,3:16]\n" + - "[@4,36:35='',<-1>,4:17]\n"; - - // stuff on end of comment matches another rule - String found = execLexer("L.g4", grammar, "L", - "/* ick */\n" + - "/* /* */\n" + - "/* /*nested*/ */\n"); - assertEquals(expecting, found); - assertNull(stderrDuringParse); - } - - @Test - public void testRecursiveLexerRuleRefWithWildcardStar2() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)*? '*/' ;\n" + - "WS : (' '|'\\n')+ ;\n" - /*+ "ANY : .;"*/; - - // stuff on end of comment doesn't match another rule - String expecting = - "[@0,0:8='/* ick */',<1>,1:0]\n" + - "[@1,10:10='\\n',<2>,1:10]\n" + - "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + - "[@3,38:38='\\n',<2>,3:17]\n" + - "[@4,39:38='',<-1>,4:18]\n"; - String found = execLexer("L.g4", grammar, "L", - "/* ick */x\n" + - "/* /* */x\n" + - "/* /*nested*/ */x\n"); - assertEquals(expecting, found); - assertEquals( - "line 1:9 token recognition error at: 'x'\n" + - "line 3:16 token recognition error at: 'x'\n", stderrDuringParse); - } - - @Test - public void testRecursiveLexerRuleRefWithWildcardPlus1() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)+? '*/' ;\n" + - "WS : (' '|'\\n')+ ;\n" - /*+ "ANY : .;"*/; - - String expecting = - "[@0,0:8='/* ick */',<1>,1:0]\n" + - "[@1,9:9='\\n',<2>,1:9]\n" + - "[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" + - "[@3,35:35='\\n',<2>,3:16]\n" + - "[@4,36:35='',<-1>,4:17]\n"; - - // stuff on end of comment matches another rule - String found = execLexer("L.g4", grammar, "L", - "/* ick */\n" + - "/* /* */\n" + - "/* /*nested*/ */\n"); - assertEquals(expecting, found); - assertNull(stderrDuringParse); - } - - @Test - public void testRecursiveLexerRuleRefWithWildcardPlus2() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "CMT : '/*' (CMT | .)+? '*/' ;\n" + - "WS : (' '|'\\n')+ ;\n" - /*+ "ANY : .;"*/; - - // stuff on end of comment doesn't match another rule - String expecting = - "[@0,0:8='/* ick */',<1>,1:0]\n" + - "[@1,10:10='\\n',<2>,1:10]\n" + - "[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" + - "[@3,38:38='\\n',<2>,3:17]\n" + - "[@4,39:38='',<-1>,4:18]\n"; - String found = execLexer("L.g4", grammar, "L", - "/* ick */x\n" + - "/* /* */x\n" + - "/* /*nested*/ */x\n"); - assertEquals(expecting, found); - assertEquals( - "line 1:9 token recognition error at: 'x'\n" + - "line 3:16 token recognition error at: 'x'\n", stderrDuringParse); - } - - @Test public void testActionPlacement() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : ({System.out.println(\"stuff fail: \" + getText());} 'a' | {System.out.println(\"stuff0: \" + getText());} 'a' {System.out.println(\"stuff1: \" + getText());} 'b' {System.out.println(\"stuff2: \" + getText());}) {System.out.println(getText());} ;\n"+ - "WS : (' '|'\\n') -> skip ;\n" + - "J : .;\n"; - String found = execLexer("L.g4", grammar, "L", "ab"); - String expecting = - "stuff0: \n" + - "stuff1: a\n" + - "stuff2: ab\n" + - "ab\n" + - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:1='',<-1>,1:2]\n"; - assertEquals(expecting, found); - } - - @Test public void testGreedyConfigs() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : ('a' | 'ab') {System.out.println(getText());} ;\n"+ - "WS : (' '|'\\n') -> skip ;\n" + - "J : .;\n"; - String found = execLexer("L.g4", grammar, "L", "ab"); - String expecting = - "ab\n" + - "[@0,0:1='ab',<1>,1:0]\n" + - "[@1,2:1='',<-1>,1:2]\n"; - assertEquals(expecting, found); - } - - @Test public void testNonGreedyConfigs() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : .*? ('a' | 'ab') {System.out.println(getText());} ;\n"+ - "WS : (' '|'\\n') -> skip ;\n" + - "J : . {System.out.println(getText());};\n"; - String found = execLexer("L.g4", grammar, "L", "ab"); - String expecting = - "a\n" + - "b\n" + - "[@0,0:0='a',<1>,1:0]\n" + - "[@1,1:1='b',<3>,1:1]\n" + - "[@2,2:1='',<-1>,1:2]\n"; - assertEquals(expecting, found); - } - - @Test public void testKeywordID() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "KEND : 'end' ;\n" + // has priority - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n')+ ;"; - String found = execLexer("L.g4", grammar, "L", "end eend ending a"); - String expecting = - "[@0,0:2='end',<1>,1:0]\n" + - "[@1,3:3=' ',<3>,1:3]\n" + - "[@2,4:7='eend',<2>,1:4]\n" + - "[@3,8:8=' ',<3>,1:8]\n" + - "[@4,9:14='ending',<2>,1:9]\n" + - "[@5,15:15=' ',<3>,1:15]\n" + - "[@6,16:16='a',<2>,1:16]\n" + - "[@7,17:16='',<-1>,1:17]\n"; - assertEquals(expecting, found); - } - - @Test public void testHexVsID() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "HexLiteral : '0' ('x'|'X') HexDigit+ ;\n"+ - "DecimalLiteral : ('0' | '1'..'9' '0'..'9'*) ;\n" + - "FloatingPointLiteral : ('0x' | '0X') HexDigit* ('.' HexDigit*)? ;\n" + - "DOT : '.' ;\n" + - "ID : 'a'..'z'+ ;\n" + - "fragment HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;\n" + - "WS : (' '|'\\n')+ ;"; - String found = execLexer("L.g4", grammar, "L", "x 0 1 a.b a.l"); - String expecting = - "[@0,0:0='x',<5>,1:0]\n" + - "[@1,1:1=' ',<6>,1:1]\n" + - "[@2,2:2='0',<2>,1:2]\n" + - "[@3,3:3=' ',<6>,1:3]\n" + - "[@4,4:4='1',<2>,1:4]\n" + - "[@5,5:5=' ',<6>,1:5]\n" + - "[@6,6:6='a',<5>,1:6]\n" + - "[@7,7:7='.',<4>,1:7]\n" + - "[@8,8:8='b',<5>,1:8]\n" + - "[@9,9:9=' ',<6>,1:9]\n" + - "[@10,10:10='a',<5>,1:10]\n" + - "[@11,11:11='.',<4>,1:11]\n" + - "[@12,12:12='l',<5>,1:12]\n" + - "[@13,13:12='',<-1>,1:13]\n"; - assertEquals(expecting, found); - } - - // must get DONE EOF - @Test public void testEOFByItself() throws Exception { - String grammar = - "lexer grammar L;\n" + - "DONE : EOF ;\n" + - "A : 'a';\n"; - String found = execLexer("L.g4", grammar, "L", ""); - String expecting = - "[@0,0:-1='',<1>,1:0]\n" + - "[@1,0:-1='',<-1>,1:0]\n"; - assertEquals(expecting, found); - } - - @Test public void testEOFSuffixInFirstRule() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : 'a' EOF ;\n"+ - "B : 'a';\n"+ - "C : 'c';\n"; - String found = execLexer("L.g4", grammar, "L", ""); - String expecting = - "[@0,0:-1='',<-1>,1:0]\n"; - assertEquals(expecting, found); - - found = execLexer("L.g4", grammar, "L", "a"); - expecting = - "[@0,0:0='a',<1>,1:0]\n" + - "[@1,1:0='',<-1>,1:1]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSet() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D] -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,5:6='34',<1>,2:1]\n" + - "[@2,7:6='',<-1>,2:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetPlus() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r\n 34"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,5:6='34',<1>,2:1]\n" + - "[@2,7:6='',<-1>,2:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetNot() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : ~[ab \\n] ~[ \\ncd]* {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "xaf"); - String expecting = - "I\n" + - "[@0,0:2='xaf',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetInSet() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : (~[ab \\n]|'a') {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "a x"); - String expecting = - "I\n" + - "I\n" + - "[@0,0:0='a',<1>,1:0]\n" + - "[@1,2:2='x',<1>,1:2]\n" + - "[@2,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetRange() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : [0-9]+ {System.out.println(\"I\");} ;\n"+ - "ID : [a-zA-Z] [a-zA-Z0-9]* {System.out.println(\"ID\");} ;\n"+ - "WS : [ \\n\\u0009\\r]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r 34 a2 abc \n "); - String expecting = - "I\n" + - "I\n" + - "ID\n" + - "ID\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,4:5='34',<1>,1:4]\n" + - "[@2,7:8='a2',<2>,1:7]\n" + - "[@3,10:12='abc',<2>,1:10]\n" + - "[@4,18:17='',<-1>,2:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithMissingEndRange() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : [0-]+ {System.out.println(\"I\");} ;\n"+ - "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "00\r\n"); - String expecting = - "I\n" + - "[@0,0:1='00',<1>,1:0]\n" + - "[@1,4:3='',<-1>,2:0]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithMissingEscapeChar() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "I : [0-9]+ {System.out.println(\"I\");} ;\n"+ - "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 "); - String expecting = - "I\n" + - "[@0,0:1='34',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithEscapedChar() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "DASHBRACK : [\\-\\]]+ {System.out.println(\"DASHBRACK\");} ;\n"+ - "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "- ] "); - String expecting = - "DASHBRACK\n" + - "DASHBRACK\n" + - "[@0,0:0='-',<1>,1:0]\n" + - "[@1,2:2=']',<1>,1:2]\n" + - "[@2,4:3='',<-1>,1:4]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithReversedRange() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : [z-a9]+ {System.out.println(\"A\");} ;\n"+ - "WS : [ \\u]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "9"); - String expecting = - "A\n" + - "[@0,0:0='9',<1>,1:0]\n" + - "[@1,1:0='',<-1>,1:1]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithQuote() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : [\"a-z]+ {System.out.println(\"A\");} ;\n"+ - "WS : [ \\n\\t]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "b\"a"); - String expecting = - "A\n" + - "[@0,0:2='b\"a',<1>,1:0]\n" + - "[@1,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); - } - - @Test public void testCharSetWithQuote2() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "A : [\"\\\\ab]+ {System.out.println(\"A\");} ;\n"+ - "WS : [ \\n\\t]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "b\"\\a"); - String expecting = - "A\n" + - "[@0,0:3='b\"\\a',<1>,1:0]\n" + - "[@1,4:3='',<-1>,1:4]\n"; - assertEquals(expecting, found); - } - - @Test - public void testPositionAdjustingLexer() throws Exception { - String grammar = load("PositionAdjustingLexer.g4", null); - String input = - "tokens\n" + - "tokens {\n" + - "notLabel\n" + - "label1 =\n" + - "label2 +=\n" + - "notLabel\n"; - String found = execLexer("PositionAdjustingLexer.g4", grammar, "PositionAdjustingLexer", input); - - final int TOKENS = 4; - final int LABEL = 5; - final int IDENTIFIER = 6; - String expecting = - "[@0,0:5='tokens',<" + IDENTIFIER + ">,1:0]\n" + - "[@1,7:12='tokens',<" + TOKENS + ">,2:0]\n" + - "[@2,14:14='{',<3>,2:7]\n" + - "[@3,16:23='notLabel',<" + IDENTIFIER + ">,3:0]\n" + - "[@4,25:30='label1',<" + LABEL + ">,4:0]\n" + - "[@5,32:32='=',<1>,4:7]\n" + - "[@6,34:39='label2',<" + LABEL + ">,5:0]\n" + - "[@7,41:42='+=',<2>,5:7]\n" + - "[@8,44:51='notLabel',<" + IDENTIFIER + ">,6:0]\n" + - "[@9,53:52='',<-1>,7:0]\n"; - - assertEquals(expecting, found); - } - - /** - * This is a regression test for antlr/antlr4#76 "Serialized ATN strings - * should be split when longer than 2^16 bytes (class file limitation)" - * https://github.com/antlr/antlr4/issues/76 - */ - @Test - public void testLargeLexer() throws Exception { - StringBuilder grammar = new StringBuilder(); - grammar.append("lexer grammar L;\n"); - grammar.append("WS : [ \\t\\r\\n]+ -> skip;\n"); - for (int i = 0; i < 4000; i++) { - grammar.append("KW").append(i).append(" : 'KW' '").append(i).append("';\n"); - } - - String input = "KW400"; - String found = execLexer("L.g4", grammar.toString(), "L", input); - String expecting = - "[@0,0:4='KW400',<402>,1:0]\n" + - "[@1,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } - - /** - * This is a regression test for antlr/antlr4#687 "Empty zero-length tokens - * cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match - * zero-length tokens" - * https://github.com/antlr/antlr4/issues/687 - * https://github.com/antlr/antlr4/issues/688 - */ - @Test public void testZeroLengthToken() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "\n" + - "BeginString\n" + - " : '\\'' -> more, pushMode(StringMode)\n" + - " ;\n" + - "\n" + - "mode StringMode;\n" + - "\n" + - " StringMode_X : 'x' -> more;\n" + - " StringMode_Done : -> more, mode(EndStringMode);\n" + - "\n" + - "mode EndStringMode; \n" + - "\n" + - " EndString : '\\'' -> popMode;\n"; - String found = execLexer("L.g4", grammar, "L", "'xxx'"); - String expecting = - "[@0,0:4=''xxx'',<1>,1:0]\n" + - "[@1,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestListeners.java b/tool/test/org/antlr/v4/xtest/TestListeners.java deleted file mode 100644 index 32f272ab5..000000000 --- a/tool/test/org/antlr/v4/xtest/TestListeners.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestListeners extends BaseTest { - @Test public void testBasic() throws Exception { - String grammar = - "grammar T;\n" + - "@header {import org.antlr.v4.runtime.tree.*;}\n"+ - "@parser::members {\n" + - "public static class LeafListener extends TBaseListener {\n" + - " public void visitTerminal(TerminalNode node) {\n" + - " System.out.println(node.getSymbol().getText());\n" + - " }\n" + - " }}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=a ;\n" + - "a : INT INT" + - " | ID" + - " ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); - String expecting = "(a 1 2)\n" + - "1\n" + - "2\n"; - assertEquals(expecting, result); - } - - @Test public void testTokenGetters() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {\n" + - "public static class LeafListener extends TBaseListener {\n" + - " public void exitA(TParser.AContext ctx) {\n" + - " if (ctx.getChildCount()==2) System.out.printf(\"%s %s %s\",ctx.INT(0).getSymbol().getText(),ctx.INT(1).getSymbol().getText(),ctx.INT());\n" + - " else System.out.println(ctx.ID().getSymbol());\n" + - " }\n" + - " }}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=a ;\n" + - "a : INT INT" + - " | ID" + - " ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); - String expecting = - "(a 1 2)\n" + - "1 2 [1, 2]\n"; - assertEquals(expecting, result); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); - expecting = "(a abc)\n" + - "[@0,0:2='abc',<4>,1:0]\n"; - assertEquals(expecting, result); - } - - @Test public void testRuleGetters() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {\n" + - "public static class LeafListener extends TBaseListener {\n" + - " public void exitA(TParser.AContext ctx) {\n" + - " if (ctx.getChildCount()==2) {\n" + - " System.out.printf(\"%s %s %s\",ctx.b(0).start.getText(),\n" + - " ctx.b(1).start.getText(),ctx.b().get(0).start.getText());\n" + - " }\n" + - " else System.out.println(ctx.b(0).start.getText());\n" + - " }\n" + - " }}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=a ;\n" + - "a : b b" + // forces list - " | b" + // a list still - " ;\n" + - "b : ID | INT ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1 2", false); - String expecting = "(a (b 1) (b 2))\n" + - "1 2 1\n"; - assertEquals(expecting, result); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "abc", false); - expecting = "(a (b abc))\n" + - "abc\n"; - assertEquals(expecting, result); - } - - @Test public void testLR() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {\n" + - "public static class LeafListener extends TBaseListener {\n" + - " public void exitE(TParser.EContext ctx) {\n" + - " if (ctx.getChildCount()==3) {\n" + - " System.out.printf(\"%s %s %s\\n\",ctx.e(0).start.getText(),\n" + - " ctx.e(1).start.getText()," + - " ctx.e().get(0).start.getText());\n" + - " }\n" + - " else System.out.println(ctx.INT().getSymbol().getText());\n" + - " }\n" + - " }" + - "}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=e ;\n" + - "e : e op='*' e\n" + - " | e op='+' e\n" + - " | INT\n" + - " ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1+2*3", false); - String expecting = - "(e (e 1) + (e (e 2) * (e 3)))\n" + - "1\n" + - "2\n" + - "3\n" + - "2 3 2\n" + - "1 2 1\n"; - assertEquals(expecting, result); - } - - @Test public void testLRWithLabels() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {\n" + - " public static class LeafListener extends TBaseListener {\n" + - " public void exitCall(TParser.CallContext ctx) {\n" + - " System.out.printf(\"%s %s\",ctx.e().start.getText(),\n" + - " ctx.eList());\n" + - " }\n" + - " public void exitInt(TParser.IntContext ctx) {\n" + - " System.out.println(ctx.INT().getSymbol().getText());\n" + - " }\n" + - " }\n" + - "}\n" + - "s\n" + - "@after {" + - " System.out.println($r.ctx.toStringTree(this));" + - " ParseTreeWalker walker = new ParseTreeWalker();\n" + - " walker.walk(new LeafListener(), $r.ctx);" + - "}\n" + - " : r=e ;\n" + - "e : e '(' eList ')' # Call\n" + - " | INT # Int\n" + - " ; \n" + - "eList : e (',' e)* ;\n" + - "MULT: '*' ;\n" + - "ADD : '+' ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\t\\n]+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "1(2,3)", false); - String expecting = - "(e (e 1) ( (eList (e 2) , (e 3)) ))\n" + - "1\n" + - "2\n" + - "3\n" + - "1 [13 6]\n"; - assertEquals(expecting, result); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestParseErrors.java b/tool/test/org/antlr/v4/xtest/TestParseErrors.java deleted file mode 100644 index 1af881bfc..000000000 --- a/tool/test/org/antlr/v4/xtest/TestParseErrors.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.junit.Test; - -import static org.junit.Assert.*; - -/** test runtime parse errors */ -public class TestParseErrors extends BaseTest { - @Test public void testTokenMismatch() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aa", false); - String expecting = "line 1:1 mismatched input 'a' expecting 'b'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletion() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false); - String expecting = "line 1:1 extraneous input 'a' expecting 'b'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletionExpectingSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'c') ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aab", false); - String expecting = "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenInsertion() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b' 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); - String expecting = "line 1:1 missing 'b' at 'c'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testConjuringUpToken() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); - String expecting = "conjured=[@-1,-1:-1='',<2>,1:1]\n"; - assertEquals(expecting, result); - } - - @Test public void testSingleSetInsertion() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'c') 'd' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); - String expecting = "line 1:1 missing {'b', 'c'} at 'd'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testConjuringUpTokenFromSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); - String expecting = "conjured=[@-1,-1:-1='',<2>,1:1]\n"; - assertEquals(expecting, result); - } - - @Test public void testLL2() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'" + - " | 'a' 'c'" + - ";\n" + - "q : 'e' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ae", false); - String expecting = "line 1:1 no viable alternative at input 'ae'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testLL3() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'* 'c'" + - " | 'a' 'b' 'd'" + - " ;\n" + - "q : 'e' ;\n"; - System.out.println(grammar); - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abe", false); - String expecting = "line 1:2 no viable alternative at input 'abe'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testLLStar() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a'+ 'b'" + - " | 'a'+ 'c'" + - ";\n" + - "q : 'e' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aaae", false); - String expecting = "line 1:3 no viable alternative at input 'aaae'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletionBeforeLoop() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'*;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false); - String expecting = "line 1:1 extraneous input 'a' expecting {, 'b'}\n" + - "line 1:3 token recognition error at: 'c'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testMultiTokenDeletionBeforeLoop() throws Exception { - // can only delete 1 before loop - String grammar = - "grammar T;\n" + - "a : 'a' 'b'* 'c';"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false); - String expecting = - "line 1:1 extraneous input 'a' expecting {'b', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletionDuringLoop() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'* 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false); - String expecting = "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testMultiTokenDeletionDuringLoop() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' 'b'* 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false); - String expecting = - "line 1:2 extraneous input 'a' expecting {'b', 'c'}\n" + - "line 1:6 extraneous input 'a' expecting {'b', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - // ------ - - @Test public void testSingleTokenDeletionBeforeLoop2() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'z'{;})*;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aabc", false); - String expecting = "line 1:1 extraneous input 'a' expecting {, 'b', 'z'}\n" + - "line 1:3 token recognition error at: 'c'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testMultiTokenDeletionBeforeLoop2() throws Exception { - // can only delete 1 before loop - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'z'{;})* 'c';"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "aacabc", false); - String expecting = - "line 1:1 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testSingleTokenDeletionDuringLoop2() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'z'{;})* 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ababbc", false); - String expecting = "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testMultiTokenDeletionDuringLoop2() throws Exception { - String grammar = - "grammar T;\n" + - "a : 'a' ('b'|'z'{;})* 'c' ;"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "abaaababc", false); - String expecting = - "line 1:2 extraneous input 'a' expecting {'b', 'z', 'c'}\n" + - "line 1:6 extraneous input 'a' expecting {'b', 'z', 'c'}\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testLL1ErrorInfo() throws Exception { - String grammar = - "grammar T;\n" + - "start : animal (AND acClass)? service EOF;\n" + - "animal : (DOG | CAT );\n" + - "service : (HARDWARE | SOFTWARE) ;\n" + - "AND : 'and';\n" + - "DOG : 'dog';\n" + - "CAT : 'cat';\n" + - "HARDWARE: 'hardware';\n" + - "SOFTWARE: 'software';\n" + - "WS : ' ' -> skip ;" + - "acClass\n" + - "@init\n" + - "{ System.out.println(getExpectedTokens().toString(tokenNames)); }\n" + - " : ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "dog and software", false); - String expecting = "{'hardware', 'software'}\n"; - assertEquals(expecting, result); - } - - /** - * This is a regression test for #6 "NullPointerException in getMissingSymbol". - * https://github.com/antlr/antlr4/issues/6 - */ - @Test - public void testInvalidEmptyInput() throws Exception { - String grammar = - "grammar T;\n" + - "start : ID+;\n" + - "ID : [a-z]+;\n" + - "\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "", true); - String expecting = ""; - assertEquals(expecting, result); - assertEquals("line 1:0 missing ID at ''\n", this.stderrDuringParse); - } - - /** - * Regression test for "Getter for context is not a list when it should be". - * https://github.com/antlr/antlr4/issues/19 - */ - @Test - public void testContextListGetters() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members{\n" + - " void foo() {\n" + - " SContext s = null;\n" + - " List a = s.a();\n" + - " List b = s.b();\n" + - " }\n" + - "}\n" + - "s : (a | b)+;\n" + - "a : 'a' {System.out.print('a');};\n" + - "b : 'b' {System.out.print('b');};\n" + - ""; - String result = execParser("T.g", grammar, "TParser", "TLexer", "s", "abab", true); - String expecting = "abab\n"; - assertEquals(expecting, result); - assertNull(this.stderrDuringParse); - } - - /** - * This is a regression test for #26 "an exception upon simple rule with double recursion in an alternative". - * https://github.com/antlr/antlr4/issues/26 - */ - @Test - public void testDuplicatedLeftRecursiveCall() throws Exception { - String grammar = - "grammar T;\n" + - "start : expr EOF;\n" + - "expr : 'x'\n" + - " | expr expr\n" + - " ;\n" + - "\n"; - - String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x", true); - assertEquals("", result); - assertNull(this.stderrDuringParse); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xx", true); - assertEquals("", result); - assertNull(this.stderrDuringParse); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xxx", true); - assertEquals("", result); - assertNull(this.stderrDuringParse); - - result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xxxx", true); - assertEquals("", result); - assertNull(this.stderrDuringParse); - } - - /** - * This is a regression test for #45 "NullPointerException in ATNConfig.hashCode". - * https://github.com/antlr/antlr4/issues/45 - *

- * The original cause of this issue was an error in the tool's ATN state optimization, - * which is now detected early in {@link ATNSerializer} by ensuring that all - * serialized transitions point to states which were not removed. - */ - @Test - public void testInvalidATNStateRemoval() throws Exception { - String grammar = - "grammar T;\n" + - "start : ID ':' expr;\n" + - "expr : primary expr? {} | expr '->' ID;\n" + - "primary : ID;\n" + - "ID : [a-z]+;\n" + - "\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x:x", true); - String expecting = ""; - assertEquals(expecting, result); - assertNull(this.stderrDuringParse); - } - - @Test public void testNoViableAltAvoidance() throws Exception { - // "a." matches 'a' to rule e but then realizes '.' won't match. - // previously would cause noviablealt. now prediction pretends to - // have "a' predict 2nd alt of e. Will get syntax error later so - // let it get farther. - String grammar = - "grammar T;\n" + - "s : e '!' ;\n" + - "e : 'a' 'b'\n" + - " | 'a'\n" + - " ;\n" + - "DOT : '.' ;\n" + - "WS : [ \\t\\r\\n]+ -> skip;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", "a.", false); - String expecting = - "line 1:1 mismatched input '.' expecting '!'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestParseTreeMatcher.java b/tool/test/org/antlr/v4/xtest/TestParseTreeMatcher.java deleted file mode 100644 index c5bc753cf..000000000 --- a/tool/test/org/antlr/v4/xtest/TestParseTreeMatcher.java +++ /dev/null @@ -1,464 +0,0 @@ -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.InputMismatchException; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.runtime.tree.pattern.ParseTreeMatch; -import org.antlr.v4.runtime.tree.pattern.ParseTreePattern; -import org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher; -import org.junit.Test; - -import java.lang.reflect.Constructor; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -public class TestParseTreeMatcher extends BaseTest { - @Test public void testChunking() throws Exception { - ParseTreePatternMatcher m = new ParseTreePatternMatcher(null, null); - assertEquals("[ID, ' = ', expr, ' ;']", m.split(" = ;").toString()); - assertEquals("[' ', ID, ' = ', expr]", m.split(" = ").toString()); - assertEquals("[ID, ' = ', expr]", m.split(" = ").toString()); - assertEquals("[expr]", m.split("").toString()); - assertEquals("[' foo']", m.split("\\ foo").toString()); - assertEquals("['foo bar ', tag]", m.split("foo \\ bar ").toString()); - } - - @Test public void testDelimiters() throws Exception { - ParseTreePatternMatcher m = new ParseTreePatternMatcher(null, null); - m.setDelimiters("<<", ">>", "$"); - String result = m.split("<> = <> ;$<< ick $>>").toString(); - assertEquals("[ID, ' = ', expr, ' ;<< ick >>']", result); - } - - @Test public void testInvertedTags() throws Exception { - ParseTreePatternMatcher m= new ParseTreePatternMatcher(null, null); - String result = null; - try { - m.split(">expr<"); - } - catch (IllegalArgumentException iae) { - result = iae.getMessage(); - } - String expected = "tag delimiters out of order in pattern: >expr<"; - assertEquals(expected, result); - } - - @Test public void testUnclosedTag() throws Exception { - ParseTreePatternMatcher m = new ParseTreePatternMatcher(null, null); - String result = null; - try { - m.split(" >"); - } - catch (IllegalArgumentException iae) { - result = iae.getMessage(); - } - String expected = "missing start tag in pattern: >"; - assertEquals(expected, result); - } - - @Test public void testTokenizingPattern() throws Exception { - String grammar = - "grammar X1;\n" + - "s : ID '=' expr ';' ;\n" + - "expr : ID | INT ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X1.g4", grammar, "X1Parser", "X1Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X1"); - - List tokens = m.tokenize(" = ;"); - String results = tokens.toString(); - String expected = "[ID:3, [@-1,1:1='=',<1>,1:1], expr:7, [@-1,1:1=';',<2>,1:1]]"; - assertEquals(expected, results); - } - - @Test - public void testCompilingPattern() throws Exception { - String grammar = - "grammar X2;\n" + - "s : ID '=' expr ';' ;\n" + - "expr : ID | INT ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); - - ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); - String results = t.getPatternTree().toStringTree(m.getParser()); - String expected = "(s = (expr ) ;)"; - assertEquals(expected, results); - } - - @Test - public void testCompilingPatternConsumesAllTokens() throws Exception { - String grammar = - "grammar X2;\n" + - "s : ID '=' expr ';' ;\n" + - "expr : ID | INT ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); - - boolean failed = false; - try { - m.compile(" = ; extra", m.getParser().getRuleIndex("s")); - } - catch (ParseTreePatternMatcher.StartRuleDoesNotConsumeFullPattern e) { - failed = true; - } - assertTrue(failed); - } - - @Test - public void testPatternMatchesStartRule() throws Exception { - String grammar = - "grammar X2;\n" + - "s : ID '=' expr ';' ;\n" + - "expr : ID | INT ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); - - boolean failed = false; - try { - m.compile(" ;", m.getParser().getRuleIndex("s")); - } - catch (InputMismatchException e) { - failed = true; - } - assertTrue(failed); - } - - @Test - public void testPatternMatchesStartRule2() throws Exception { - String grammar = - "grammar X2;\n" + - "s : ID '=' expr ';' | expr ';' ;\n" + - "expr : ID | INT ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); - - boolean failed = false; - try { - m.compile(" ;", m.getParser().getRuleIndex("s")); - } - catch (NoViableAltException e) { - failed = true; - } - assertTrue(failed); - } - - @Test - public void testHiddenTokensNotSeenByTreePatternParser() throws Exception { - String grammar = - "grammar X2;\n" + - "s : ID '=' expr ';' ;\n" + - "expr : ID | INT ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> channel(HIDDEN) ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); - - ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); - String results = t.getPatternTree().toStringTree(m.getParser()); - String expected = "(s = (expr ) ;)"; - assertEquals(expected, results); - } - - @Test - public void testCompilingMultipleTokens() throws Exception { - String grammar = - "grammar X2;\n" + - "s : ID '=' ID ';' ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); - - ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); - String results = t.getPatternTree().toStringTree(m.getParser()); - String expected = "(s = ;)"; - assertEquals(expected, results); - } - - @Test public void testIDNodeMatches() throws Exception { - String grammar = - "grammar X3;\n" + - "s : ID ';' ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String input = "x ;"; - String pattern = ";"; - checkPatternMatch(grammar, "s", input, pattern, "X3"); - } - - @Test public void testIDNodeWithLabelMatches() throws Exception { - String grammar = - "grammar X8;\n" + - "s : ID ';' ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String input = "x ;"; - String pattern = ";"; - ParseTreeMatch m = checkPatternMatch(grammar, "s", input, pattern, "X8"); - assertEquals("{ID=[x], id=[x]}", m.getLabels().toString()); - assertNotNull(m.get("id")); - assertNotNull(m.get("ID")); - assertEquals("x", m.get("id").getText()); - assertEquals("x", m.get("ID").getText()); - assertEquals("[x]", m.getAll("id").toString()); - assertEquals("[x]", m.getAll("ID").toString()); - - assertNull(m.get("undefined")); - assertEquals("[]", m.getAll("undefined").toString()); - } - - @Test public void testLabelGetsLastIDNode() throws Exception { - String grammar = - "grammar X9;\n" + - "s : ID ID ';' ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String input = "x y;"; - String pattern = " ;"; - ParseTreeMatch m = checkPatternMatch(grammar, "s", input, pattern, "X9"); - assertEquals("{ID=[x, y], id=[x, y]}", m.getLabels().toString()); - assertNotNull(m.get("id")); - assertNotNull(m.get("ID")); - assertEquals("y", m.get("id").getText()); - assertEquals("y", m.get("ID").getText()); - assertEquals("[x, y]", m.getAll("id").toString()); - assertEquals("[x, y]", m.getAll("ID").toString()); - - assertNull(m.get("undefined")); - assertEquals("[]", m.getAll("undefined").toString()); - } - - @Test public void testIDNodeWithMultipleLabelMatches() throws Exception { - String grammar = - "grammar X7;\n" + - "s : ID ID ID ';' ;\n" + - "ID : [a-z]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String input = "x y z;"; - String pattern = " ;"; - ParseTreeMatch m = checkPatternMatch(grammar, "s", input, pattern, "X7"); - assertEquals("{ID=[x, y, z], a=[x, z], b=[y]}", m.getLabels().toString()); - assertNotNull(m.get("a")); // get first - assertNotNull(m.get("b")); - assertNotNull(m.get("ID")); - assertEquals("z", m.get("a").getText()); - assertEquals("y", m.get("b").getText()); - assertEquals("z", m.get("ID").getText()); // get last - assertEquals("[x, z]", m.getAll("a").toString()); - assertEquals("[y]", m.getAll("b").toString()); - assertEquals("[x, y, z]", m.getAll("ID").toString()); // ordered - - assertEquals("xyz;", m.getTree().getText()); // whitespace stripped by lexer - - assertNull(m.get("undefined")); - assertEquals("[]", m.getAll("undefined").toString()); - } - - @Test public void testTokenAndRuleMatch() throws Exception { - String grammar = - "grammar X4;\n" + - "s : ID '=' expr ';' ;\n" + - "expr : ID | INT ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String input = "x = 99;"; - String pattern = " = ;"; - checkPatternMatch(grammar, "s", input, pattern, "X4"); - } - - @Test public void testTokenTextMatch() throws Exception { - String grammar = - "grammar X4;\n" + - "s : ID '=' expr ';' ;\n" + - "expr : ID | INT ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String input = "x = 0;"; - String pattern = " = 1;"; - boolean invertMatch = true; // 0!=1 - checkPatternMatch(grammar, "s", input, pattern, "X4", invertMatch); - - input = "x = 0;"; - pattern = " = 0;"; - invertMatch = false; - checkPatternMatch(grammar, "s", input, pattern, "X4", invertMatch); - - input = "x = 0;"; - pattern = "x = 0;"; - invertMatch = false; - checkPatternMatch(grammar, "s", input, pattern, "X4", invertMatch); - - input = "x = 0;"; - pattern = "y = 0;"; - invertMatch = true; - checkPatternMatch(grammar, "s", input, pattern, "X4", invertMatch); - } - - @Test public void testAssign() throws Exception { - String grammar = - "grammar X5;\n" + - "s : expr ';'\n" + - //" | 'return' expr ';'\n" + - " ;\n" + - "expr: expr '.' ID\n" + - " | expr '*' expr\n" + - " | expr '=' expr\n" + - " | ID\n" + - " | INT\n" + - " ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String input = "x = 99;"; - String pattern = " = ;"; - checkPatternMatch(grammar, "s", input, pattern, "X5"); - } - - @Test public void testLRecursiveExpr() throws Exception { - String grammar = - "grammar X6;\n" + - "s : expr ';'\n" + - " ;\n" + - "expr: expr '.' ID\n" + - " | expr '*' expr\n" + - " | expr '=' expr\n" + - " | ID\n" + - " | INT\n" + - " ;\n" + - "ID : [a-z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "WS : [ \\r\\n\\t]+ -> skip ;\n"; - - String input = "3*4*5"; - String pattern = " * * "; - checkPatternMatch(grammar, "expr", input, pattern, "X6"); - } - - public ParseTreeMatch checkPatternMatch(String grammar, String startRule, - String input, String pattern, - String grammarName) - throws Exception - { - return checkPatternMatch(grammar, startRule, input, pattern, grammarName, false); - } - - public ParseTreeMatch checkPatternMatch(String grammar, String startRule, - String input, String pattern, - String grammarName, boolean invertMatch) - throws Exception - { - String grammarFileName = grammarName+".g4"; - String parserName = grammarName+"Parser"; - String lexerName = grammarName+"Lexer"; - boolean ok = - rawGenerateAndBuildRecognizer(grammarFileName, grammar, parserName, lexerName, false); - assertTrue(ok); - - ParseTree result = execParser(startRule, input, parserName, lexerName); - - ParseTreePattern p = getPattern(grammarName, pattern, startRule); - ParseTreeMatch match = p.match(result); - boolean matched = match.succeeded(); - if ( invertMatch ) assertFalse(matched); - else assertTrue(matched); - return match; - } - - public ParseTreePattern getPattern(String grammarName, String pattern, String ruleName) - throws Exception - { - Class lexerClass = loadLexerClassFromTempDir(grammarName + "Lexer"); - Constructor ctor = lexerClass.getConstructor(CharStream.class); - Lexer lexer = ctor.newInstance((CharStream) null); - - Class parserClass = loadParserClassFromTempDir(grammarName + "Parser"); - Constructor pctor = parserClass.getConstructor(TokenStream.class); - Parser parser = pctor.newInstance(new CommonTokenStream(lexer)); - - return parser.compileParseTreePattern(pattern, parser.getRuleIndex(ruleName)); - } - - public ParseTreePatternMatcher getPatternMatcher(String grammarName) - throws Exception - { - Class lexerClass = loadLexerClassFromTempDir(grammarName + "Lexer"); - Constructor ctor = lexerClass.getConstructor(CharStream.class); - Lexer lexer = ctor.newInstance((CharStream) null); - - Class parserClass = loadParserClassFromTempDir(grammarName + "Parser"); - Constructor pctor = parserClass.getConstructor(TokenStream.class); - Parser parser = pctor.newInstance(new CommonTokenStream(lexer)); - - return new ParseTreePatternMatcher(lexer, parser); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestParseTrees.java b/tool/test/org/antlr/v4/xtest/TestParseTrees.java deleted file mode 100644 index e1ba23671..000000000 --- a/tool/test/org/antlr/v4/xtest/TestParseTrees.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestParseTrees extends BaseTest { - @Test public void testTokenAndRuleContextString() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " :r=a ;\n" + - "a : 'x' {System.out.println(getRuleInvocationStack());} ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "x", false); - String expecting = "[a, s]\n(a x)\n"; - assertEquals(expecting, result); - } - - @Test public void testToken2() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " :r=a ;\n" + - "a : 'x' 'y'\n" + - " ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xy", false); - String expecting = "(a x y)\n"; - assertEquals(expecting, result); - } - - @Test public void test2Alts() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " :r=a ;\n" + - "a : 'x' | 'y'\n" + - " ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "y", false); - String expecting = "(a y)\n"; - assertEquals(expecting, result); - } - - @Test public void test2AltLoop() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " :r=a ;\n" + - "a : ('x' | 'y')* 'z'\n" + - " ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xyyxyxz", false); - String expecting = "(a x y y x y x z)\n"; - assertEquals(expecting, result); - } - - @Test public void testRuleRef() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " : r=a ;\n" + - "a : b 'x'\n" + - " ;\n" + - "b : 'y' ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "yx", false); - String expecting = "(a (b y) x)\n"; - assertEquals(expecting, result); - } - - // ERRORS - - @Test public void testExtraToken() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " : r=a ;\n" + - "a : 'x' 'y'\n" + - " ;\n" + - "Z : 'z'; \n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzy", false); - String expecting = "(a x z y)\n"; // ERRORs not shown. z is colored red in tree view - assertEquals(expecting, result); - } - - @Test public void testNoViableAlt() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " : r=a ;\n" + - "a : 'x' | 'y'\n" + - " ;\n" + - "Z : 'z'; \n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "z", false); - String expecting = "(a z)\n"; - assertEquals(expecting, result); - } - - @Test public void testSync() throws Exception { - String grammar = - "grammar T;\n" + - "s\n" + - "@init {setBuildParseTree(true);}\n" + - "@after {System.out.println($r.ctx.toStringTree(this));}\n" + - " : r=a ;\n" + - "a : 'x' 'y'* '!'\n" + - " ;\n" + - "Z : 'z'; \n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", "xzyy!", false); - String expecting = "(a x z y y !)\n"; - assertEquals(expecting, result); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestParserExec.java b/tool/test/org/antlr/v4/xtest/TestParserExec.java deleted file mode 100644 index 34b4d4815..000000000 --- a/tool/test/org/antlr/v4/xtest/TestParserExec.java +++ /dev/null @@ -1,597 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.junit.Ignore; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -/** Test parser execution. - * - * For the non-greedy stuff, the rule is that .* or any other non-greedy loop - * (any + or * loop that has an alternative with '.' in it is automatically - * non-greedy) never sees past the end of the rule containing that loop. - * There is no automatic way to detect when the exit branch of a non-greedy - * loop has seen enough input to determine how much the loop should consume - * yet still allow matching the entire input. Of course, this is extremely - * inefficient, particularly for things like - * - * block : '{' (block|.)* '}' ; - * - * that need only see one symbol to know when it hits a '}'. So, I - * came up with a practical solution. During prediction, the ATN - * simulator never fall off the end of a rule to compute the global - * FOLLOW. Instead, we terminate the loop, choosing the exit branch. - * Otherwise, we predict to reenter the loop. For example, input - * "{ foo }" will allow the loop to match foo, but that's it. During - * prediction, the ATN simulator will see that '}' reaches the end of a - * rule that contains a non-greedy loop and stop prediction. It will choose - * the exit branch of the inner loop. So, the way in which you construct - * the rule containing a non-greedy loop dictates how far it will scan ahead. - * Include everything after the non-greedy loop that you know it must scan - * in order to properly make a prediction decision. these beasts are tricky, - * so be careful. don't liberally sprinkle them around your code. - * - * To simulate filter mode, use ( .* (pattern1|pattern2|...) )* - * - * Nongreedy loops match as much input as possible while still allowing - * the remaining input to match. - */ -public class TestParserExec extends BaseTest { - @Test public void testLabels() throws Exception { - String grammar = - "grammar T;\n" + - "a : b1=b b2+=b* b3+=';' ;\n" + - "b : id=ID val+=INT*;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "abc 34;", false); - assertEquals("", found); - assertEquals(null, stderrDuringParse); - } - - /** - * This is a regression test for #270 "Fix operator += applied to a set of - * tokens". - * https://github.com/antlr/antlr4/issues/270 - */ - @Test public void testListLabelOnSet() { - String grammar = - "grammar T;\n" + - "a : b b* ';' ;\n" + - "b : ID val+=(INT | FLOAT)*;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "FLOAT : [0-9]+ '.' [0-9]+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "abc 34;", false); - assertEquals("", found); - assertEquals(null, stderrDuringParse); - } - - @Test public void testBasic() throws Exception { - String grammar = - "grammar T;\n" + - "a : ID INT {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "abc 34", false); - assertEquals("abc34\n", found); - } - - @Test public void testAorB() throws Exception { - String grammar = - "grammar T;\n" + - "a : ID {System.out.println(\" alt 1\");}" + - " | INT {System.out.println(\"alt 2\");}" + - ";\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "34", false); - assertEquals("alt 2\n", found); - } - - @Test public void testAPlus() throws Exception { - String grammar = - "grammar T;\n" + - "a : ID+ {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a b c", false); - assertEquals("abc\n", found); - } - - // force complex decision - @Test public void testAorAPlus() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|ID)+ {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a b c", false); - assertEquals("abc\n", found); - } - - private static final String ifIfElseGrammarFormat = - "grammar T;\n" + - "start : statement+ ;\n" + - "statement : 'x' | ifStatement;\n" + - "ifStatement : 'if' 'y' statement %s {System.out.println($text);};\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> channel(HIDDEN);\n"; - - @Test public void testIfIfElseGreedyBinding1() throws Exception { - final String input = "if y if y x else x"; - final String expectedInnerBound = "if y x else x\nif y if y x else x\n"; - - String grammar = String.format(ifIfElseGrammarFormat, "('else' statement)?"); - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); - assertEquals(expectedInnerBound, found); - - } - - @Test public void testIfIfElseGreedyBinding2() throws Exception { - final String input = "if y if y x else x"; - final String expectedInnerBound = "if y x else x\nif y if y x else x\n"; - - String grammar = String.format(ifIfElseGrammarFormat, "('else' statement|)"); - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); - assertEquals(expectedInnerBound, found); - } - - @Test public void testIfIfElseNonGreedyBinding() throws Exception { - final String input = "if y if y x else x"; - final String expectedOuterBound = "if y x\nif y if y x else x\n"; - - String grammar = String.format(ifIfElseGrammarFormat, "('else' statement)??"); - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); - assertEquals(expectedOuterBound, found); - - grammar = String.format(ifIfElseGrammarFormat, "(|'else' statement)"); - found = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, false); - assertEquals(expectedOuterBound, found); - } - - @Test public void testAStar() throws Exception { - String grammar = - "grammar T;\n" + - "a : ID* {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a b c", false); - assertEquals("abc\n", found); - } - - @Test public void testLL1OptionalBlock() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|{}INT)? {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a", false); - assertEquals("a\n", found); - } - - // force complex decision - @Test public void testAorAStar() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|ID)* {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a b c", false); - assertEquals("abc\n", found); - } - - @Test public void testAorBPlus() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|INT{;})+ {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a 34 c", false); - assertEquals("a34c\n", found); - } - - @Test public void testAorBStar() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|INT{;})* {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a 34 c", false); - assertEquals("a34c\n", found); - } - - - /** - * This test is meant to detect regressions of bug antlr/antlr4#41. - * https://github.com/antlr/antlr4/issues/41 - */ - @Test - public void testOptional1() throws Exception { - String grammar = - "grammar T;\n" + - "stat : ifstat | 'x';\n" + - "ifstat : 'if' stat ('else' stat)?;\n" + - "WS : [ \\n\\t]+ -> skip ;" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "x", false); - assertEquals("", found); - assertNull(this.stderrDuringParse); - } - - @Test - public void testOptional2() throws Exception { - String grammar = - "grammar T;\n" + - "stat : ifstat | 'x';\n" + - "ifstat : 'if' stat ('else' stat)?;\n" + - "WS : [ \\n\\t]+ -> skip ;" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if x else x", false); - assertEquals("", found); - assertNull(this.stderrDuringParse); - } - - @Test - public void testOptional3() throws Exception { - String grammar = - "grammar T;\n" + - "stat : ifstat | 'x';\n" + - "ifstat : 'if' stat ('else' stat)?;\n" + - "WS : [ \\n\\t]+ -> skip ;" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if x", false); - assertEquals("", found); - assertNull(this.stderrDuringParse); - } - - @Test - public void testOptional4() throws Exception { - String grammar = - "grammar T;\n" + - "stat : ifstat | 'x';\n" + - "ifstat : 'if' stat ('else' stat)?;\n" + - "WS : [ \\n\\t]+ -> skip ;" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "stat", "if if x else x", false); - assertEquals("", found); - assertNull(this.stderrDuringParse); - } - - /** - * This test is meant to test the expected solution to antlr/antlr4#42. - * https://github.com/antlr/antlr4/issues/42 - */ - @Test - public void testPredicatedIfIfElse() throws Exception { - String grammar = - "grammar T;\n" + - "s : stmt EOF ;\n" + - "stmt : ifStmt | ID;\n" + - "ifStmt : 'if' ID stmt ('else' stmt | {_input.LA(1) != ELSE}?);\n" + - "ELSE : 'else';\n" + - "ID : [a-zA-Z]+;\n" + - "WS : [ \\n\\t]+ -> skip;\n" - ; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "if x if x a else b", true); - String expecting = ""; - assertEquals(expecting, found); - assertNull(this.stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#118. - * https://github.com/antlr/antlr4/issues/118 - */ - @Ignore("Performance impact of passing this test may not be worthwhile") - @Test public void testStartRuleWithoutEOF() { - String grammar = - "grammar T;\n"+ - "s @after {dumpDFA();}\n" + - " : ID | ID INT ID ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "INT : '0'..'9'+ ;\n"+ - "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "abc 34", true); - String expecting = - "Decision 0:\n" + - "s0-ID->s1\n" + - "s1-INT->s2\n" + - "s2-EOF->:s3=>1\n"; // Must point at accept state - assertEquals(expecting, result); - assertNull(this.stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#195 "label 'label' type - * mismatch with previous definition: TOKEN_LABEL!=RULE_LABEL" - * https://github.com/antlr/antlr4/issues/195 - */ - @Test public void testLabelAliasingAcrossLabeledAlternatives() throws Exception { - String grammar = - "grammar T;\n" + - "start : a* EOF;\n" + - "a\n" + - " : label=subrule {System.out.println($label.text);} #One\n" + - " | label='y' {System.out.println($label.text);} #Two\n" + - " ;\n" + - "subrule : 'x';\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", - "xy", false); - assertEquals("x\ny\n", found); - } - - /** - * This is a regression test for antlr/antlr4#334 "BailErrorStrategy: bails - * out on proper input". - * https://github.com/antlr/antlr4/issues/334 - */ - @Test public void testPredictionIssue334() { - String grammar = - "grammar T;\n" + - "\n" + - "file @init{setErrorHandler(new BailErrorStrategy());} \n" + - "@after {System.out.println($ctx.toStringTree(this));}\n" + - " : item (SEMICOLON item)* SEMICOLON? EOF ;\n" + - "item : A B?;\n" + - "\n" + - "\n" + - "\n" + - "SEMICOLON: ';';\n" + - "\n" + - "A : 'a'|'A';\n" + - "B : 'b'|'B';\n" + - "\n" + - "WS : [ \\r\\t\\n]+ -> skip;\n"; - - String input = "a"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "file", input, false); - assertEquals("(file (item a) )\n", found); - assertNull(stderrDuringParse); - } - - /** - * This is a regressino test for antlr/antlr4#299 "Repeating subtree not - * accessible in visitor". - * https://github.com/antlr/antlr4/issues/299 - */ - @Test public void testListLabelForClosureContext() throws Exception { - String grammar = - "grammar T;\n" + - "ifStatement\n" + - "@after { List items = $ctx.elseIfStatement(); }\n" + - " : 'if' expression\n" + - " ( ( 'then'\n" + - " executableStatement*\n" + - " elseIfStatement* // <--- problem is here\n" + - " elseStatement?\n" + - " 'end' 'if'\n" + - " ) | executableStatement )\n" + - " ;\n" + - "\n" + - "elseIfStatement\n" + - " : 'else' 'if' expression 'then' executableStatement*\n" + - " ;\n" - + "expression : 'a' ;\n" - + "executableStatement : 'a' ;\n" - + "elseStatement : 'a' ;\n"; - String input = "a"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "expression", input, false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** - * This test ensures that {@link ParserATNSimulator} produces a correct - * result when the grammar contains multiple explicit references to - * {@code EOF} inside of parser rules. - */ - @Test - public void testMultipleEOFHandling() throws Exception { - String grammar = - "grammar T;\n" + - "prog : ('x' | 'x' 'y') EOF EOF;\n"; - String input = "x"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", input, false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** - * This test ensures that {@link ParserATNSimulator} does not produce a - * {@link StackOverflowError} when it encounters an {@code EOF} transition - * inside a closure. - */ - @Test - public void testEOFInClosure() throws Exception { - String grammar = - "grammar T;\n" + - "prog : stat EOF;\n" + - "stat : 'x' ('y' | EOF)*?;\n"; - String input = "x"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", "prog", input, false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#561 "Issue with parser - * generation in 4.2.2" - * https://github.com/antlr/antlr4/issues/561 - */ - @Test public void testReferenceToATN() throws Exception { - String grammar = - "grammar T;\n" + - "a : (ID|ATN)* ATN? {System.out.println($text);} ;\n" + - "ID : 'a'..'z'+ ;\n" + - "ATN : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "", false); - assertEquals("\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", "a", - "a 34 c", false); - assertEquals("a34c\n", found); - } - - /** - * This is a regression test for antlr/antlr4#588 "ClassCastException during - * semantic predicate handling". - * https://github.com/antlr/antlr4/issues/588 - */ - @Test public void testFailedPredicateExceptionState() throws Exception { - String grammar = load("Psl.g4", "UTF-8"); - String found = execParser("Psl.g4", grammar, "PslParser", "PslLexer", "floating_constant", " . 234", false); - assertEquals("", found); - assertEquals("line 1:6 rule floating_constant DEC:A floating-point constant cannot have internal white space\n", stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#563 "Inconsistent token - * handling in ANTLR4". - * https://github.com/antlr/antlr4/issues/563 - */ - @Test public void testAlternateQuotes() throws Exception { - String lexerGrammar = - "lexer grammar ModeTagsLexer;\n" + - "\n" + - "// Default mode rules (the SEA)\n" + - "OPEN : '«' -> mode(ISLAND) ; // switch to ISLAND mode\n" + - "TEXT : ~'«'+ ; // clump all text together\n" + - "\n" + - "mode ISLAND;\n" + - "CLOSE : '»' -> mode(DEFAULT_MODE) ; // back to SEA mode \n" + - "SLASH : '/' ;\n" + - "ID : [a-zA-Z]+ ; // match/send ID in tag to parser\n"; - String parserGrammar = - "parser grammar ModeTagsParser;\n" + - "\n" + - "options { tokenVocab=ModeTagsLexer; } // use tokens from ModeTagsLexer.g4\n" + - "\n" + - "file: (tag | TEXT)* ;\n" + - "\n" + - "tag : '«' ID '»'\n" + - " | '«' '/' ID '»'\n" + - " ;"; - - boolean success = rawGenerateAndBuildRecognizer("ModeTagsLexer.g4", - lexerGrammar, - null, - "ModeTagsLexer"); - assertTrue(success); - - String found = execParser("ModeTagsParser.g4", parserGrammar, "ModeTagsParser", "ModeTagsLexer", "file", "", false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#672 "Initialization failed in - * locals". - * https://github.com/antlr/antlr4/issues/672 - */ - @Test public void testAttributeValueInitialization() throws Exception { - String grammar = - "grammar Data; \n" + - "\n" + - "file : group+ EOF; \n" + - "\n" + - "group: INT sequence {System.out.println($sequence.values.size());} ; \n" + - "\n" + - "sequence returns [List values = new ArrayList()] \n" + - " locals[List localValues = new ArrayList()]\n" + - " : (INT {$localValues.add($INT.int);})* {$values.addAll($localValues);}\n" + - "; \n" + - "\n" + - "INT : [0-9]+ ; // match integers \n" + - "WS : [ \\t\\n\\r]+ -> skip ; // toss out all whitespace\n"; - - String input = "2 9 10 3 1 2 3"; - String found = execParser("Data.g4", grammar, "DataParser", "DataLexer", "file", input, false); - assertEquals("6\n", found); - assertNull(stderrDuringParse); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestParserInterpreter.java b/tool/test/org/antlr/v4/xtest/TestParserInterpreter.java deleted file mode 100644 index c58619f92..000000000 --- a/tool/test/org/antlr/v4/xtest/TestParserInterpreter.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2013 Terence Parr - * Copyright (c) 2013 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.LexerInterpreter; -import org.antlr.v4.runtime.ParserInterpreter; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - - -public class TestParserInterpreter extends BaseTest { - @Test public void testA() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : A ;", - lg); - - testInterp(lg, g, "s", "a", "(s a)"); - } - - @Test public void testAorB() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "s : A{;} | B ;", - lg); - testInterp(lg, g, "s", "a", "(s a)"); - testInterp(lg, g, "s", "b", "(s b)"); - } - - @Test public void testCall() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "s : t C ;\n" + - "t : A{;} | B ;\n", - lg); - - testInterp(lg, g, "s", "ac", "(s (t a) c)"); - testInterp(lg, g, "s", "bc", "(s (t b) c)"); - } - - @Test public void testCall2() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n"+ - "s : t C ;\n" + - "t : u ;\n" + - "u : A{;} | B ;\n", - lg); - - testInterp(lg, g, "s", "ac", "(s (t (u a)) c)"); - testInterp(lg, g, "s", "bc", "(s (t (u b)) c)"); - } - - @Test public void testOptionalA() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : A? B ;\n", - lg); - - testInterp(lg, g, "s", "b", "(s b)"); - testInterp(lg, g, "s", "ab", "(s a b)"); - } - - @Test public void testOptionalAorB() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : (A{;}|B)? C ;\n", - lg); - - testInterp(lg, g, "s", "c", "(s c)"); - testInterp(lg, g, "s", "ac", "(s a c)"); - testInterp(lg, g, "s", "bc", "(s b c)"); - } - - @Test public void testStarA() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : A* B ;\n", - lg); - - testInterp(lg, g, "s", "b", "(s b)"); - testInterp(lg, g, "s", "ab", "(s a b)"); - testInterp(lg, g, "s", "aaaaaab", "(s a a a a a a b)"); - } - - @Test public void testStarAorB() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : (A{;}|B)* C ;\n", - lg); - - testInterp(lg, g, "s", "c", "(s c)"); - testInterp(lg, g, "s", "ac", "(s a c)"); - testInterp(lg, g, "s", "bc", "(s b c)"); - testInterp(lg, g, "s", "abaaabc", "(s a b a a a b c)"); - testInterp(lg, g, "s", "babac", "(s b a b a c)"); - } - - @Test public void testLeftRecursion() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "PLUS : '+' ;\n" + - "MULT : '*' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : e ;\n" + - "e : e MULT e\n" + - " | e PLUS e\n" + - " | A\n" + - " ;\n", - lg); - - testInterp(lg, g, "s", "a", "(s (e a))"); - testInterp(lg, g, "s", "a+a", "(s (e (e a) + (e a)))"); - testInterp(lg, g, "s", "a*a", "(s (e (e a) * (e a)))"); - testInterp(lg, g, "s", "a+a+a", "(s (e (e (e a) + (e a)) + (e a)))"); - testInterp(lg, g, "s", "a*a+a", "(s (e (e (e a) * (e a)) + (e a)))"); - testInterp(lg, g, "s", "a+a*a", "(s (e (e a) + (e (e a) * (e a))))"); - } - - /** - * This is a regression test for antlr/antlr4#461. - * https://github.com/antlr/antlr4/issues/461 - */ - @Test public void testLeftRecursiveStartRule() throws Exception { - LexerGrammar lg = new LexerGrammar( - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n" + - "C : 'c' ;\n" + - "PLUS : '+' ;\n" + - "MULT : '*' ;\n"); - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : e ;\n" + - "e : e MULT e\n" + - " | e PLUS e\n" + - " | A\n" + - " ;\n", - lg); - - testInterp(lg, g, "e", "a", "(e a)"); - testInterp(lg, g, "e", "a+a", "(e (e a) + (e a))"); - testInterp(lg, g, "e", "a*a", "(e (e a) * (e a))"); - testInterp(lg, g, "e", "a+a+a", "(e (e (e a) + (e a)) + (e a))"); - testInterp(lg, g, "e", "a*a+a", "(e (e (e a) * (e a)) + (e a))"); - testInterp(lg, g, "e", "a+a*a", "(e (e a) + (e (e a) * (e a)))"); - } - - void testInterp(LexerGrammar lg, Grammar g, - String startRule, String input, - String parseTree) - { - LexerInterpreter lexEngine = lg.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream tokens = new CommonTokenStream(lexEngine); - ParserInterpreter parser = g.createParserInterpreter(tokens); - ParseTree t = parser.parse(g.rules.get(startRule).index); - System.out.println("parse tree: "+t.toStringTree(parser)); - assertEquals(parseTree, t.toStringTree(parser)); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestParserProfiler.java b/tool/test/org/antlr/v4/xtest/TestParserProfiler.java deleted file mode 100644 index 13cfd3043..000000000 --- a/tool/test/org/antlr/v4/xtest/TestParserProfiler.java +++ /dev/null @@ -1,280 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2014 Terence Parr - * Copyright (c) 2014 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.LexerInterpreter; -import org.antlr.v4.runtime.ParserInterpreter; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.atn.DecisionInfo; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.junit.Ignore; -import org.junit.Test; - -import java.util.Arrays; - -import static org.junit.Assert.assertEquals; - -public class TestParserProfiler extends BaseTest { - LexerGrammar lg; - - @Override - public void setUp() throws Exception { - super.setUp(); - lg = new LexerGrammar( - "lexer grammar L;\n" + - "WS : [ \\r\\t\\n]+ -> channel(HIDDEN) ;\n" + - "SEMI : ';' ;\n" + - "DOT : '.' ;\n" + - "ID : [a-zA-Z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "PLUS : '+' ;\n" + - "MULT : '*' ;\n"); - } - - @Test public void testLL1() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : ';'{}\n" + - " | '.'\n" + - " ;\n", - lg); - - DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", ";"); - assertEquals(1, info.length); - String expecting = - "{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=1, " + - "SLL_ATNTransitions=1, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}"; - assertEquals(expecting, info[0].toString()); - } - - @Test public void testLL2() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : ID ';'{}\n" + - " | ID '.'\n" + - " ;\n", - lg); - - DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "xyz;"); - assertEquals(1, info.length); - String expecting = - "{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=2, " + - "SLL_ATNTransitions=2, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}"; - assertEquals(expecting, info[0].toString()); - } - - @Test public void testRepeatedLL2() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : ID ';'{}\n" + - " | ID '.'\n" + - " ;\n", - lg); - - DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "xyz;", "abc;"); - assertEquals(1, info.length); - String expecting = - "{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=4, " + - "SLL_ATNTransitions=2, SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}"; - assertEquals(expecting, info[0].toString()); - } - - @Test public void test3xLL2() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : ID ';'{}\n" + - " | ID '.'\n" + - " ;\n", - lg); - - // The '.' vs ';' causes another ATN transition - DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "xyz;", "abc;", "z."); - assertEquals(1, info.length); - String expecting = - "{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, " + - "SLL_ATNTransitions=3, SLL_DFATransitions=3, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}"; - assertEquals(expecting, info[0].toString()); - } - - @Test public void testOptional() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : ID ('.' ID)? ';'\n" + - " | ID INT \n" + - " ;\n", - lg); - - DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "a.b;"); - assertEquals(2, info.length); - String expecting = - "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=1, " + - "SLL_ATNTransitions=1, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}, " + - "{decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=2, " + - "SLL_ATNTransitions=2, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]"; - assertEquals(expecting, Arrays.toString(info)); - } - - @Test public void test2xOptional() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : ID ('.' ID)? ';'\n" + - " | ID INT \n" + - " ;\n", - lg); - - DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "a.b;", "a.b;"); - assertEquals(2, info.length); - String expecting = - "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=2, " + - "SLL_ATNTransitions=1, SLL_DFATransitions=1, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}, " + - "{decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=4, " + - "SLL_ATNTransitions=2, SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]"; - assertEquals(expecting, Arrays.toString(info)); - } - - @Test public void testContextSensitivity() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n"+ - "a : '.' e ID \n" + - " | ';' e INT ID ;\n" + - "e : INT | ;\n", - lg); - DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "a", "; 1 x"); - assertEquals(2, info.length); - String expecting = - "{decision=1, contextSensitivities=1, errors=0, ambiguities=0, SLL_lookahead=3, SLL_ATNTransitions=2, SLL_DFATransitions=0, LL_Fallback=1, LL_lookahead=3, LL_ATNTransitions=2}"; - assertEquals(expecting, info[1].toString()); - } - - @Ignore - @Test public void testSimpleLanguage() throws Exception { - Grammar g = new Grammar(TestXPath.grammar); - String input = - "def f(x,y) { x = 3+4*1*1/5*1*1+1*1+1; y; ; }\n" + - "def g(x,a,b,c,d,e) { return 1+2*x; }\n"+ - "def h(x) { a=3; x=0+1; return a*x; }\n"; - DecisionInfo[] info = interpAndGetDecisionInfo(g.getImplicitLexer(), g, "prog", input); - String expecting = - "[{decision=0, contextSensitivities=1, errors=0, ambiguities=0, SLL_lookahead=3, " + - "SLL_ATNTransitions=2, SLL_DFATransitions=0, LL_Fallback=1, LL_ATNTransitions=1}]"; - - - assertEquals(expecting, Arrays.toString(info)); - assertEquals(1, info.length); - } - - @Ignore - @Test public void testDeepLookahead() throws Exception { - Grammar g = new Grammar( - "parser grammar T;\n" + - "s : e ';'\n" + - " | e '.' \n" + - " ;\n" + - "e : (ID|INT) ({true}? '+' e)*\n" + // d=1 entry, d=2 bypass - " ;\n", - lg); - - // pred forces to - // ambig and ('+' e)* tail recursion forces lookahead to fall out of e - // any non-precedence predicates are always evaluated as true by the interpreter - DecisionInfo[] info = interpAndGetDecisionInfo(lg, g, "s", "a+b+c;"); - // at "+b" it uses k=1 and enters loop then calls e for b... - // e matches and d=2 uses "+c;" for k=3 - assertEquals(2, info.length); - String expecting = - "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, " + - "SLL_ATNTransitions=6, SLL_DFATransitions=0, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}, " + - "{decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=4, " + - "SLL_ATNTransitions=2, SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]"; - assertEquals(expecting, Arrays.toString(info)); - } - - @Test public void testProfilerGeneratedCode() throws Exception { - String grammar = - "grammar T;\n" + - "s : a+ ID EOF ;\n" + - "a : ID ';'{}\n" + - " | ID '.'\n" + - " ;\n"+ - "WS : [ \\r\\t\\n]+ -> channel(HIDDEN) ;\n" + - "SEMI : ';' ;\n" + - "DOT : '.' ;\n" + - "ID : [a-zA-Z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "PLUS : '+' ;\n" + - "MULT : '*' ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "xyz;abc;z.q", false, true); - String expecting = - "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, SLL_ATNTransitions=4, " + - "SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}," + - " {decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, " + - "SLL_ATNTransitions=3, SLL_DFATransitions=3, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]\n"; - assertEquals(expecting, found); - assertEquals(null, stderrDuringParse); - } - - public DecisionInfo[] interpAndGetDecisionInfo( - LexerGrammar lg, Grammar g, - String startRule, String... input) - { - - LexerInterpreter lexEngine = lg.createLexerInterpreter(null); - ParserInterpreter parser = g.createParserInterpreter(null); - parser.setProfile(true); - for (String s : input) { - lexEngine.reset(); - parser.reset(); - lexEngine.setInputStream(new ANTLRInputStream(s)); - CommonTokenStream tokens = new CommonTokenStream(lexEngine); - parser.setInputStream(tokens); - Rule r = g.rules.get(startRule); - if ( r==null ) { - return parser.getParseInfo().getDecisionInfo(); - } - ParserRuleContext t = parser.parse(r.index); -// try { -// Utils.waitForClose(t.inspect(parser).get()); -// } -// catch (Exception e) { -// e.printStackTrace(); -// } -// -// System.out.println(t.toStringTree(parser)); - } - return parser.getParseInfo().getDecisionInfo(); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestPerformance.java b/tool/test/org/antlr/v4/xtest/TestPerformance.java deleted file mode 100644 index b06f925e5..000000000 --- a/tool/test/org/antlr/v4/xtest/TestPerformance.java +++ /dev/null @@ -1,2031 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.ANTLRFileStream; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.BailErrorStrategy; -import org.antlr.v4.runtime.BaseErrorListener; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.DefaultErrorStrategy; -import org.antlr.v4.runtime.DiagnosticErrorListener; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.ParserInterpreter; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.RecognitionException; -import org.antlr.v4.runtime.Recognizer; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNConfig; -import org.antlr.v4.runtime.atn.ATNConfigSet; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.atn.ParserATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; -import org.antlr.v4.runtime.atn.PredictionMode; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.dfa.DFAState; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.runtime.misc.NotNull; -import org.antlr.v4.runtime.misc.Nullable; -import org.antlr.v4.runtime.misc.ParseCancellationException; -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.runtime.tree.ErrorNode; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.runtime.tree.ParseTreeListener; -import org.antlr.v4.runtime.tree.ParseTreeWalker; -import org.antlr.v4.runtime.tree.TerminalNode; -import org.junit.Assert; -import org.junit.Test; - -import java.io.File; -import java.io.FilenameFilter; -import java.io.IOException; -import java.lang.ref.Reference; -import java.lang.ref.SoftReference; -import java.lang.ref.WeakReference; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.URL; -import java.net.URLClassLoader; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.BitSet; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicIntegerArray; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.zip.CRC32; -import java.util.zip.Checksum; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; - -public class TestPerformance extends BaseTest { - /** - * Parse all java files under this package within the JDK_SOURCE_ROOT - * (environment variable or property defined on the Java command line). - */ - private static final String TOP_PACKAGE = "java.lang"; - /** - * {@code true} to load java files from sub-packages of - * {@link #TOP_PACKAGE}. - */ - private static final boolean RECURSIVE = true; - /** - * {@code true} to read all source files from disk into memory before - * starting the parse. The default value is {@code true} to help prevent - * drive speed from affecting the performance results. This value may be set - * to {@code false} to support parsing large input sets which would not - * otherwise fit into memory. - */ - private static final boolean PRELOAD_SOURCES = true; - /** - * The encoding to use when reading source files. - */ - private static final String ENCODING = "UTF-8"; - /** - * The maximum number of files to parse in a single iteration. - */ - private static final int MAX_FILES_PER_PARSE_ITERATION = Integer.MAX_VALUE; - - /** - * {@code true} to call {@link Collections#shuffle} on the list of input - * files before the first parse iteration. - */ - private static final boolean SHUFFLE_FILES_AT_START = false; - /** - * {@code true} to call {@link Collections#shuffle} before each parse - * iteration after the first. - */ - private static final boolean SHUFFLE_FILES_AFTER_ITERATIONS = false; - /** - * The instance of {@link Random} passed when calling - * {@link Collections#shuffle}. - */ - private static final Random RANDOM = new Random(); - - /** - * {@code true} to use the Java grammar with expressions in the v4 - * left-recursive syntax (Java-LR.g4). {@code false} to use the standard - * grammar (Java.g4). In either case, the grammar is renamed in the - * temporary directory to Java.g4 before compiling. - */ - private static final boolean USE_LR_GRAMMAR = true; - /** - * {@code true} to specify the {@code -Xforce-atn} option when generating - * the grammar, forcing all decisions in {@code JavaParser} to be handled by - * {@link ParserATNSimulator#adaptivePredict}. - */ - private static final boolean FORCE_ATN = false; - /** - * {@code true} to specify the {@code -atn} option when generating the - * grammar. This will cause ANTLR to export the ATN for each decision as a - * DOT (GraphViz) file. - */ - private static final boolean EXPORT_ATN_GRAPHS = true; - /** - * {@code true} to specify the {@code -XdbgST} option when generating the - * grammar. - */ - private static final boolean DEBUG_TEMPLATES = false; - /** - * {@code true} to specify the {@code -XdbgSTWait} option when generating the - * grammar. - */ - private static final boolean DEBUG_TEMPLATES_WAIT = DEBUG_TEMPLATES; - /** - * {@code true} to delete temporary (generated and compiled) files when the - * test completes. - */ - private static final boolean DELETE_TEMP_FILES = true; - /** - * {@code true} to use a {@link ParserInterpreter} for parsing instead of - * generated parser. - */ - private static final boolean USE_PARSER_INTERPRETER = false; - - /** - * {@code true} to call {@link System#gc} and then wait for 5 seconds at the - * end of the test to make it easier for a profiler to grab a heap dump at - * the end of the test run. - */ - private static final boolean PAUSE_FOR_HEAP_DUMP = false; - - /** - * Parse each file with {@code JavaParser.compilationUnit}. - */ - private static final boolean RUN_PARSER = true; - /** - * {@code true} to use {@link BailErrorStrategy}, {@code false} to use - * {@link DefaultErrorStrategy}. - */ - private static final boolean BAIL_ON_ERROR = false; - /** - * {@code true} to compute a checksum for verifying consistency across - * optimizations and multiple passes. - */ - private static final boolean COMPUTE_CHECKSUM = true; - /** - * This value is passed to {@link Parser#setBuildParseTree}. - */ - private static final boolean BUILD_PARSE_TREES = false; - /** - * Use - * {@link ParseTreeWalker#DEFAULT}{@code .}{@link ParseTreeWalker#walk walk} - * with the {@code JavaParserBaseListener} to show parse tree walking - * overhead. If {@link #BUILD_PARSE_TREES} is {@code false}, the listener - * will instead be called during the parsing process via - * {@link Parser#addParseListener}. - */ - private static final boolean BLANK_LISTENER = false; - - /** - * Shows the number of {@link DFAState} and {@link ATNConfig} instances in - * the DFA cache at the end of each pass. If {@link #REUSE_LEXER_DFA} and/or - * {@link #REUSE_PARSER_DFA} are false, the corresponding instance numbers - * will only apply to one file (the last file if {@link #NUMBER_OF_THREADS} - * is 0, otherwise the last file which was parsed on the first thread). - */ - private static final boolean SHOW_DFA_STATE_STATS = true; - /** - * If {@code true}, the DFA state statistics report includes a breakdown of - * the number of DFA states contained in each decision (with rule names). - */ - private static final boolean DETAILED_DFA_STATE_STATS = true; - - /** - * Specify the {@link PredictionMode} used by the - * {@link ParserATNSimulator}. If {@link #TWO_STAGE_PARSING} is - * {@code true}, this value only applies to the second stage, as the first - * stage will always use {@link PredictionMode#SLL}. - */ - private static final PredictionMode PREDICTION_MODE = PredictionMode.LL; - - private static final boolean TWO_STAGE_PARSING = true; - - private static final boolean SHOW_CONFIG_STATS = false; - - /** - * If {@code true}, detailed statistics for the number of DFA edges were - * taken while parsing each file, as well as the number of DFA edges which - * required on-the-fly computation. - */ - private static final boolean COMPUTE_TRANSITION_STATS = false; - private static final boolean SHOW_TRANSITION_STATS_PER_FILE = false; - /** - * If {@code true}, the transition statistics will be adjusted to a running - * total before reporting the final results. - */ - private static final boolean TRANSITION_RUNNING_AVERAGE = false; - /** - * If {@code true}, transition statistics will be weighted according to the - * total number of transitions taken during the parsing of each file. - */ - private static final boolean TRANSITION_WEIGHTED_AVERAGE = false; - - /** - * If {@code true}, after each pass a summary of the time required to parse - * each file will be printed. - */ - private static final boolean COMPUTE_TIMING_STATS = false; - /** - * If {@code true}, the timing statistics for {@link #COMPUTE_TIMING_STATS} - * will be cumulative (i.e. the time reported for the nth file will - * be the total time required to parse the first n files). - */ - private static final boolean TIMING_CUMULATIVE = false; - /** - * If {@code true}, the timing statistics will include the parser only. This - * flag allows for targeted measurements, and helps eliminate variance when - * {@link #PRELOAD_SOURCES} is {@code false}. - *

- * This flag has no impact when {@link #RUN_PARSER} is {@code false}. - */ - private static final boolean TIME_PARSE_ONLY = false; - - /** - * When {@code true}, messages will be printed to {@link System#err} when - * the first stage (SLL) parsing resulted in a syntax error. This option is - * ignored when {@link #TWO_STAGE_PARSING} is {@code false}. - */ - private static final boolean REPORT_SECOND_STAGE_RETRY = true; - private static final boolean REPORT_SYNTAX_ERRORS = true; - private static final boolean REPORT_AMBIGUITIES = false; - private static final boolean REPORT_FULL_CONTEXT = false; - private static final boolean REPORT_CONTEXT_SENSITIVITY = REPORT_FULL_CONTEXT; - - /** - * If {@code true}, a single {@code JavaLexer} will be used, and - * {@link Lexer#setInputStream} will be called to initialize it for each - * source file. Otherwise, a new instance will be created for each file. - */ - private static final boolean REUSE_LEXER = false; - /** - * If {@code true}, a single DFA will be used for lexing which is shared - * across all threads and files. Otherwise, each file will be lexed with its - * own DFA which is accomplished by creating one ATN instance per thread and - * clearing its DFA cache before lexing each file. - */ - private static final boolean REUSE_LEXER_DFA = true; - /** - * If {@code true}, a single {@code JavaParser} will be used, and - * {@link Parser#setInputStream} will be called to initialize it for each - * source file. Otherwise, a new instance will be created for each file. - */ - private static final boolean REUSE_PARSER = false; - /** - * If {@code true}, a single DFA will be used for parsing which is shared - * across all threads and files. Otherwise, each file will be parsed with - * its own DFA which is accomplished by creating one ATN instance per thread - * and clearing its DFA cache before parsing each file. - */ - private static final boolean REUSE_PARSER_DFA = true; - /** - * If {@code true}, the shared lexer and parser are reset after each pass. - * If {@code false}, all passes after the first will be fully "warmed up", - * which makes them faster and can compare them to the first warm-up pass, - * but it will not distinguish bytecode load/JIT time from warm-up time - * during the first pass. - */ - private static final boolean CLEAR_DFA = false; - /** - * Total number of passes to make over the source. - */ - private static final int PASSES = 4; - - /** - * This option controls the granularity of multi-threaded parse operations. - * If {@code true}, the parsing operation will be parallelized across files; - * otherwise the parsing will be parallelized across multiple iterations. - */ - private static final boolean FILE_GRANULARITY = true; - - /** - * Number of parser threads to use. - */ - private static final int NUMBER_OF_THREADS = 1; - - private static final Lexer[] sharedLexers = new Lexer[NUMBER_OF_THREADS]; - - private static final Parser[] sharedParsers = new Parser[NUMBER_OF_THREADS]; - - private static final ParseTreeListener[] sharedListeners = new ParseTreeListener[NUMBER_OF_THREADS]; - - private static final long[][] totalTransitionsPerFile; - private static final long[][] computedTransitionsPerFile; - static { - if (COMPUTE_TRANSITION_STATS) { - totalTransitionsPerFile = new long[PASSES][]; - computedTransitionsPerFile = new long[PASSES][]; - } else { - totalTransitionsPerFile = null; - computedTransitionsPerFile = null; - } - } - - private static final long[][][] decisionInvocationsPerFile; - private static final long[][][] fullContextFallbackPerFile; - private static final long[][][] nonSllPerFile; - private static final long[][][] totalTransitionsPerDecisionPerFile; - private static final long[][][] computedTransitionsPerDecisionPerFile; - private static final long[][][] fullContextTransitionsPerDecisionPerFile; - static { - if (COMPUTE_TRANSITION_STATS && DETAILED_DFA_STATE_STATS) { - decisionInvocationsPerFile = new long[PASSES][][]; - fullContextFallbackPerFile = new long[PASSES][][]; - nonSllPerFile = new long[PASSES][][]; - totalTransitionsPerDecisionPerFile = new long[PASSES][][]; - computedTransitionsPerDecisionPerFile = new long[PASSES][][]; - fullContextTransitionsPerDecisionPerFile = new long[PASSES][][]; - } else { - decisionInvocationsPerFile = null; - fullContextFallbackPerFile = null; - nonSllPerFile = null; - totalTransitionsPerDecisionPerFile = null; - computedTransitionsPerDecisionPerFile = null; - fullContextTransitionsPerDecisionPerFile = null; - } - } - - private static final long[][] timePerFile; - private static final int[][] tokensPerFile; - static { - if (COMPUTE_TIMING_STATS) { - timePerFile = new long[PASSES][]; - tokensPerFile = new int[PASSES][]; - } else { - timePerFile = null; - tokensPerFile = null; - } - } - - private final AtomicIntegerArray tokenCount = new AtomicIntegerArray(PASSES); - - @Test - //@org.junit.Ignore - public void compileJdk() throws IOException, InterruptedException, ExecutionException { - String jdkSourceRoot = getSourceRoot("JDK"); - assertTrue("The JDK_SOURCE_ROOT environment variable must be set for performance testing.", jdkSourceRoot != null && !jdkSourceRoot.isEmpty()); - - compileJavaParser(USE_LR_GRAMMAR); - final String lexerName = "JavaLexer"; - final String parserName = "JavaParser"; - final String listenerName = "JavaBaseListener"; - final String entryPoint = "compilationUnit"; - final ParserFactory factory = getParserFactory(lexerName, parserName, listenerName, entryPoint); - - if (!TOP_PACKAGE.isEmpty()) { - jdkSourceRoot = jdkSourceRoot + '/' + TOP_PACKAGE.replace('.', '/'); - } - - File directory = new File(jdkSourceRoot); - assertTrue(directory.isDirectory()); - - FilenameFilter filesFilter = FilenameFilters.extension(".java", false); - FilenameFilter directoriesFilter = FilenameFilters.ALL_FILES; - final List sources = loadSources(directory, filesFilter, directoriesFilter, RECURSIVE); - - for (int i = 0; i < PASSES; i++) { - if (COMPUTE_TRANSITION_STATS) { - totalTransitionsPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)]; - computedTransitionsPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)]; - - if (DETAILED_DFA_STATE_STATS) { - decisionInvocationsPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; - fullContextFallbackPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; - nonSllPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; - totalTransitionsPerDecisionPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; - computedTransitionsPerDecisionPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; - fullContextTransitionsPerDecisionPerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)][]; - } - } - - if (COMPUTE_TIMING_STATS) { - timePerFile[i] = new long[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)]; - tokensPerFile[i] = new int[Math.min(sources.size(), MAX_FILES_PER_PARSE_ITERATION)]; - } - } - - System.out.format("Located %d source files.%n", sources.size()); - System.out.print(getOptionsDescription(TOP_PACKAGE)); - - ExecutorService executorService = Executors.newFixedThreadPool(FILE_GRANULARITY ? 1 : NUMBER_OF_THREADS, new NumberedThreadFactory()); - - List> passResults = new ArrayList>(); - passResults.add(executorService.submit(new Runnable() { - @Override - public void run() { - try { - parse1(0, factory, sources, SHUFFLE_FILES_AT_START); - } catch (InterruptedException ex) { - Logger.getLogger(TestPerformance.class.getName()).log(Level.SEVERE, null, ex); - } - } - })); - for (int i = 0; i < PASSES - 1; i++) { - final int currentPass = i + 1; - passResults.add(executorService.submit(new Runnable() { - @Override - public void run() { - if (CLEAR_DFA) { - int index = FILE_GRANULARITY ? 0 : ((NumberedThread)Thread.currentThread()).getThreadNumber(); - if (sharedLexers.length > 0 && sharedLexers[index] != null) { - ATN atn = sharedLexers[index].getATN(); - for (int j = 0; j < sharedLexers[index].getInterpreter().decisionToDFA.length; j++) { - sharedLexers[index].getInterpreter().decisionToDFA[j] = new DFA(atn.getDecisionState(j), j); - } - } - - if (sharedParsers.length > 0 && sharedParsers[index] != null) { - ATN atn = sharedParsers[index].getATN(); - for (int j = 0; j < sharedParsers[index].getInterpreter().decisionToDFA.length; j++) { - sharedParsers[index].getInterpreter().decisionToDFA[j] = new DFA(atn.getDecisionState(j), j); - } - } - - if (FILE_GRANULARITY) { - Arrays.fill(sharedLexers, null); - Arrays.fill(sharedParsers, null); - } - } - - try { - parse2(currentPass, factory, sources, SHUFFLE_FILES_AFTER_ITERATIONS); - } catch (InterruptedException ex) { - Logger.getLogger(TestPerformance.class.getName()).log(Level.SEVERE, null, ex); - } - } - })); - } - - for (Future passResult : passResults) { - passResult.get(); - } - - executorService.shutdown(); - executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); - - if (COMPUTE_TRANSITION_STATS && SHOW_TRANSITION_STATS_PER_FILE) { - computeTransitionStatistics(); - } - - if (COMPUTE_TIMING_STATS) { - computeTimingStatistics(); - } - - sources.clear(); - if (PAUSE_FOR_HEAP_DUMP) { - System.gc(); - System.out.println("Pausing before application exit."); - try { - Thread.sleep(4000); - } catch (InterruptedException ex) { - Logger.getLogger(TestPerformance.class.getName()).log(Level.SEVERE, null, ex); - } - } - } - - /** - * Compute and print ATN/DFA transition statistics. - */ - private void computeTransitionStatistics() { - if (TRANSITION_RUNNING_AVERAGE) { - for (int i = 0; i < PASSES; i++) { - long[] data = computedTransitionsPerFile[i]; - for (int j = 0; j < data.length - 1; j++) { - data[j + 1] += data[j]; - } - - data = totalTransitionsPerFile[i]; - for (int j = 0; j < data.length - 1; j++) { - data[j + 1] += data[j]; - } - } - } - - long[] sumNum = new long[totalTransitionsPerFile[0].length]; - long[] sumDen = new long[totalTransitionsPerFile[0].length]; - double[] sumNormalized = new double[totalTransitionsPerFile[0].length]; - for (int i = 0; i < PASSES; i++) { - long[] num = computedTransitionsPerFile[i]; - long[] den = totalTransitionsPerFile[i]; - for (int j = 0; j < den.length; j++) { - sumNum[j] += num[j]; - sumDen[j] += den[j]; - if (den[j] > 0) { - sumNormalized[j] += (double)num[j] / (double)den[j]; - } - } - } - - double[] weightedAverage = new double[totalTransitionsPerFile[0].length]; - double[] average = new double[totalTransitionsPerFile[0].length]; - for (int i = 0; i < average.length; i++) { - if (sumDen[i] > 0) { - weightedAverage[i] = (double)sumNum[i] / (double)sumDen[i]; - } - else { - weightedAverage[i] = 0; - } - - average[i] = sumNormalized[i] / PASSES; - } - - double[] low95 = new double[totalTransitionsPerFile[0].length]; - double[] high95 = new double[totalTransitionsPerFile[0].length]; - double[] low67 = new double[totalTransitionsPerFile[0].length]; - double[] high67 = new double[totalTransitionsPerFile[0].length]; - double[] stddev = new double[totalTransitionsPerFile[0].length]; - for (int i = 0; i < stddev.length; i++) { - double[] points = new double[PASSES]; - for (int j = 0; j < PASSES; j++) { - long totalTransitions = totalTransitionsPerFile[j][i]; - if (totalTransitions > 0) { - points[j] = ((double)computedTransitionsPerFile[j][i] / (double)totalTransitionsPerFile[j][i]); - } - else { - points[j] = 0; - } - } - - Arrays.sort(points); - - final double averageValue = TRANSITION_WEIGHTED_AVERAGE ? weightedAverage[i] : average[i]; - double value = 0; - for (int j = 0; j < PASSES; j++) { - double diff = points[j] - averageValue; - value += diff * diff; - } - - int ignoreCount95 = (int)Math.round(PASSES * (1 - 0.95) / 2.0); - int ignoreCount67 = (int)Math.round(PASSES * (1 - 0.667) / 2.0); - low95[i] = points[ignoreCount95]; - high95[i] = points[points.length - 1 - ignoreCount95]; - low67[i] = points[ignoreCount67]; - high67[i] = points[points.length - 1 - ignoreCount67]; - stddev[i] = Math.sqrt(value / PASSES); - } - - System.out.format("File\tAverage\tStd. Dev.\t95%% Low\t95%% High\t66.7%% Low\t66.7%% High%n"); - for (int i = 0; i < stddev.length; i++) { - final double averageValue = TRANSITION_WEIGHTED_AVERAGE ? weightedAverage[i] : average[i]; - System.out.format("%d\t%e\t%e\t%e\t%e\t%e\t%e%n", i + 1, averageValue, stddev[i], averageValue - low95[i], high95[i] - averageValue, averageValue - low67[i], high67[i] - averageValue); - } - } - - /** - * Compute and print timing statistics. - */ - private void computeTimingStatistics() { - if (TIMING_CUMULATIVE) { - for (int i = 0; i < PASSES; i++) { - long[] data = timePerFile[i]; - for (int j = 0; j < data.length - 1; j++) { - data[j + 1] += data[j]; - } - - int[] data2 = tokensPerFile[i]; - for (int j = 0; j < data2.length - 1; j++) { - data2[j + 1] += data2[j]; - } - } - } - - final int fileCount = timePerFile[0].length; - double[] sum = new double[fileCount]; - for (int i = 0; i < PASSES; i++) { - long[] data = timePerFile[i]; - int[] tokenData = tokensPerFile[i]; - for (int j = 0; j < data.length; j++) { - sum[j] += (double)data[j] / (double)tokenData[j]; - } - } - - double[] average = new double[fileCount]; - for (int i = 0; i < average.length; i++) { - average[i] = sum[i] / PASSES; - } - - double[] low95 = new double[fileCount]; - double[] high95 = new double[fileCount]; - double[] low67 = new double[fileCount]; - double[] high67 = new double[fileCount]; - double[] stddev = new double[fileCount]; - for (int i = 0; i < stddev.length; i++) { - double[] points = new double[PASSES]; - for (int j = 0; j < PASSES; j++) { - points[j] = (double)timePerFile[j][i] / (double)tokensPerFile[j][i]; - } - - Arrays.sort(points); - - final double averageValue = average[i]; - double value = 0; - for (int j = 0; j < PASSES; j++) { - double diff = points[j] - averageValue; - value += diff * diff; - } - - int ignoreCount95 = (int)Math.round(PASSES * (1 - 0.95) / 2.0); - int ignoreCount67 = (int)Math.round(PASSES * (1 - 0.667) / 2.0); - low95[i] = points[ignoreCount95]; - high95[i] = points[points.length - 1 - ignoreCount95]; - low67[i] = points[ignoreCount67]; - high67[i] = points[points.length - 1 - ignoreCount67]; - stddev[i] = Math.sqrt(value / PASSES); - } - - System.out.format("File\tAverage\tStd. Dev.\t95%% Low\t95%% High\t66.7%% Low\t66.7%% High%n"); - for (int i = 0; i < stddev.length; i++) { - final double averageValue = average[i]; - System.out.format("%d\t%e\t%e\t%e\t%e\t%e\t%e%n", i + 1, averageValue, stddev[i], averageValue - low95[i], high95[i] - averageValue, averageValue - low67[i], high67[i] - averageValue); - } - } - - private String getSourceRoot(String prefix) { - String sourceRoot = System.getenv(prefix+"_SOURCE_ROOT"); - if (sourceRoot == null) { - sourceRoot = System.getProperty(prefix+"_SOURCE_ROOT"); - } - - return sourceRoot; - } - - @Override - protected void eraseTempDir() { - if (DELETE_TEMP_FILES) { - super.eraseTempDir(); - } - } - - public static String getOptionsDescription(String topPackage) { - StringBuilder builder = new StringBuilder(); - builder.append("Input="); - if (topPackage.isEmpty()) { - builder.append("*"); - } - else { - builder.append(topPackage).append(".*"); - } - - builder.append(", Grammar=").append(USE_LR_GRAMMAR ? "LR" : "Standard"); - builder.append(", ForceAtn=").append(FORCE_ATN); - - builder.append(newline); - - builder.append("Op=Lex").append(RUN_PARSER ? "+Parse" : " only"); - builder.append(", Strategy=").append(BAIL_ON_ERROR ? BailErrorStrategy.class.getSimpleName() : DefaultErrorStrategy.class.getSimpleName()); - builder.append(", BuildParseTree=").append(BUILD_PARSE_TREES); - builder.append(", WalkBlankListener=").append(BLANK_LISTENER); - - builder.append(newline); - - builder.append("Lexer=").append(REUSE_LEXER ? "setInputStream" : "newInstance"); - builder.append(", Parser=").append(REUSE_PARSER ? "setInputStream" : "newInstance"); - builder.append(", AfterPass=").append(CLEAR_DFA ? "newInstance" : "setInputStream"); - - builder.append(newline); - - return builder.toString(); - } - - /** - * This method is separate from {@link #parse2} so the first pass can be distinguished when analyzing - * profiler results. - */ - protected void parse1(int currentPass, ParserFactory factory, Collection sources, boolean shuffleSources) throws InterruptedException { - if (FILE_GRANULARITY) { - System.gc(); - } - - parseSources(currentPass, factory, sources, shuffleSources); - } - - /** - * This method is separate from {@link #parse1} so the first pass can be distinguished when analyzing - * profiler results. - */ - protected void parse2(int currentPass, ParserFactory factory, Collection sources, boolean shuffleSources) throws InterruptedException { - if (FILE_GRANULARITY) { - System.gc(); - } - - parseSources(currentPass, factory, sources, shuffleSources); - } - - protected List loadSources(File directory, FilenameFilter filesFilter, FilenameFilter directoriesFilter, boolean recursive) { - List result = new ArrayList(); - loadSources(directory, filesFilter, directoriesFilter, recursive, result); - return result; - } - - protected void loadSources(File directory, FilenameFilter filesFilter, FilenameFilter directoriesFilter, boolean recursive, Collection result) { - assert directory.isDirectory(); - - File[] sources = directory.listFiles(filesFilter); - for (File file : sources) { - if (!file.isFile()) { - continue; - } - - result.add(new InputDescriptor(file.getAbsolutePath())); - } - - if (recursive) { - File[] children = directory.listFiles(directoriesFilter); - for (File child : children) { - if (child.isDirectory()) { - loadSources(child, filesFilter, directoriesFilter, true, result); - } - } - } - } - - int configOutputSize = 0; - - @SuppressWarnings("unused") - protected void parseSources(final int currentPass, final ParserFactory factory, Collection sources, boolean shuffleSources) throws InterruptedException { - if (shuffleSources) { - List sourcesList = new ArrayList(sources); - synchronized (RANDOM) { - Collections.shuffle(sourcesList, RANDOM); - } - - sources = sourcesList; - } - - long startTime = System.nanoTime(); - tokenCount.set(currentPass, 0); - int inputSize = 0; - int inputCount = 0; - - Collection> results = new ArrayList>(); - ExecutorService executorService; - if (FILE_GRANULARITY) { - executorService = Executors.newFixedThreadPool(FILE_GRANULARITY ? NUMBER_OF_THREADS : 1, new NumberedThreadFactory()); - } else { - executorService = Executors.newSingleThreadExecutor(new FixedThreadNumberFactory(((NumberedThread)Thread.currentThread()).getThreadNumber())); - } - - for (InputDescriptor inputDescriptor : sources) { - if (inputCount >= MAX_FILES_PER_PARSE_ITERATION) { - break; - } - - final CharStream input = inputDescriptor.getInputStream(); - input.seek(0); - inputSize += input.size(); - inputCount++; - Future futureChecksum = executorService.submit(new Callable() { - @Override - public FileParseResult call() { - // this incurred a great deal of overhead and was causing significant variations in performance results. - //System.out.format("Parsing file %s\n", input.getSourceName()); - try { - return factory.parseFile(input, currentPass, ((NumberedThread)Thread.currentThread()).getThreadNumber()); - } catch (IllegalStateException ex) { - ex.printStackTrace(System.err); - } catch (Throwable t) { - t.printStackTrace(System.err); - } - - return null; - } - }); - - results.add(futureChecksum); - } - - Checksum checksum = new CRC32(); - int currentIndex = -1; - for (Future future : results) { - currentIndex++; - int fileChecksum = 0; - try { - FileParseResult fileResult = future.get(); - if (COMPUTE_TRANSITION_STATS) { - totalTransitionsPerFile[currentPass][currentIndex] = sum(fileResult.parserTotalTransitions); - computedTransitionsPerFile[currentPass][currentIndex] = sum(fileResult.parserComputedTransitions); - - if (DETAILED_DFA_STATE_STATS) { - decisionInvocationsPerFile[currentPass][currentIndex] = fileResult.decisionInvocations; - fullContextFallbackPerFile[currentPass][currentIndex] = fileResult.fullContextFallback; - nonSllPerFile[currentPass][currentIndex] = fileResult.nonSll; - totalTransitionsPerDecisionPerFile[currentPass][currentIndex] = fileResult.parserTotalTransitions; - computedTransitionsPerDecisionPerFile[currentPass][currentIndex] = fileResult.parserComputedTransitions; - fullContextTransitionsPerDecisionPerFile[currentPass][currentIndex] = fileResult.parserFullContextTransitions; - } - } - - if (COMPUTE_TIMING_STATS) { - timePerFile[currentPass][currentIndex] = fileResult.endTime - fileResult.startTime; - tokensPerFile[currentPass][currentIndex] = fileResult.tokenCount; - } - - fileChecksum = fileResult.checksum; - } catch (ExecutionException ex) { - Logger.getLogger(TestPerformance.class.getName()).log(Level.SEVERE, null, ex); - } - - if (COMPUTE_CHECKSUM) { - updateChecksum(checksum, fileChecksum); - } - } - - executorService.shutdown(); - executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); - - System.out.format("%d. Total parse time for %d files (%d KB, %d tokens%s): %.0fms%n", - currentPass + 1, - inputCount, - inputSize / 1024, - tokenCount.get(currentPass), - COMPUTE_CHECKSUM ? String.format(", checksum 0x%8X", checksum.getValue()) : "", - (double)(System.nanoTime() - startTime) / 1000000.0); - - if (sharedLexers.length > 0) { - int index = FILE_GRANULARITY ? 0 : ((NumberedThread)Thread.currentThread()).getThreadNumber(); - Lexer lexer = sharedLexers[index]; - final LexerATNSimulator lexerInterpreter = lexer.getInterpreter(); - final DFA[] modeToDFA = lexerInterpreter.decisionToDFA; - if (SHOW_DFA_STATE_STATS) { - int states = 0; - int configs = 0; - Set uniqueConfigs = new HashSet(); - - for (int i = 0; i < modeToDFA.length; i++) { - DFA dfa = modeToDFA[i]; - if (dfa == null) { - continue; - } - - states += dfa.states.size(); - for (DFAState state : dfa.states.values()) { - configs += state.configs.size(); - uniqueConfigs.addAll(state.configs); - } - } - - System.out.format("There are %d lexer DFAState instances, %d configs (%d unique).%n", states, configs, uniqueConfigs.size()); - - if (DETAILED_DFA_STATE_STATS) { - System.out.format("\tMode\tStates\tConfigs\tMode%n"); - for (int i = 0; i < modeToDFA.length; i++) { - DFA dfa = modeToDFA[i]; - if (dfa == null || dfa.states.isEmpty()) { - continue; - } - - int modeConfigs = 0; - for (DFAState state : dfa.states.values()) { - modeConfigs += state.configs.size(); - } - - String modeName = lexer.getModeNames()[i]; - System.out.format("\t%d\t%d\t%d\t%s%n", dfa.decision, dfa.states.size(), modeConfigs, modeName); - } - } - } - } - - if (RUN_PARSER && sharedParsers.length > 0) { - int index = FILE_GRANULARITY ? 0 : ((NumberedThread)Thread.currentThread()).getThreadNumber(); - Parser parser = sharedParsers[index]; - // make sure the individual DFAState objects actually have unique ATNConfig arrays - final ParserATNSimulator interpreter = parser.getInterpreter(); - final DFA[] decisionToDFA = interpreter.decisionToDFA; - - if (SHOW_DFA_STATE_STATS) { - int states = 0; - int configs = 0; - Set uniqueConfigs = new HashSet(); - - for (int i = 0; i < decisionToDFA.length; i++) { - DFA dfa = decisionToDFA[i]; - if (dfa == null) { - continue; - } - - states += dfa.states.size(); - for (DFAState state : dfa.states.values()) { - configs += state.configs.size(); - uniqueConfigs.addAll(state.configs); - } - } - - System.out.format("There are %d parser DFAState instances, %d configs (%d unique).%n", states, configs, uniqueConfigs.size()); - - if (DETAILED_DFA_STATE_STATS) { - if (COMPUTE_TRANSITION_STATS) { - System.out.format("\tDecision\tStates\tConfigs\tPredict (ALL)\tPredict (LL)\tNon-SLL\tTransitions\tTransitions (ATN)\tTransitions (LL)\tLA (SLL)\tLA (LL)\tRule%n"); - } - else { - System.out.format("\tDecision\tStates\tConfigs\tRule%n"); - } - - for (int i = 0; i < decisionToDFA.length; i++) { - DFA dfa = decisionToDFA[i]; - if (dfa == null || dfa.states.isEmpty()) { - continue; - } - - int decisionConfigs = 0; - for (DFAState state : dfa.states.values()) { - decisionConfigs += state.configs.size(); - } - - String ruleName = parser.getRuleNames()[parser.getATN().decisionToState.get(dfa.decision).ruleIndex]; - - long calls = 0; - long fullContextCalls = 0; - long nonSllCalls = 0; - long transitions = 0; - long computedTransitions = 0; - long fullContextTransitions = 0; - double lookahead = 0; - double fullContextLookahead = 0; - String formatString; - if (COMPUTE_TRANSITION_STATS) { - for (long[] data : decisionInvocationsPerFile[currentPass]) { - calls += data[i]; - } - - for (long[] data : fullContextFallbackPerFile[currentPass]) { - fullContextCalls += data[i]; - } - - for (long[] data : nonSllPerFile[currentPass]) { - nonSllCalls += data[i]; - } - - for (long[] data : totalTransitionsPerDecisionPerFile[currentPass]) { - transitions += data[i]; - } - - for (long[] data : computedTransitionsPerDecisionPerFile[currentPass]) { - computedTransitions += data[i]; - } - - for (long[] data : fullContextTransitionsPerDecisionPerFile[currentPass]) { - fullContextTransitions += data[i]; - } - - if (calls > 0) { - lookahead = (double)(transitions - fullContextTransitions) / (double)calls; - } - - if (fullContextCalls > 0) { - fullContextLookahead = (double)fullContextTransitions / (double)fullContextCalls; - } - - formatString = "\t%1$d\t%2$d\t%3$d\t%4$d\t%5$d\t%6$d\t%7$d\t%8$d\t%9$d\t%10$f\t%11$f\t%12$s%n"; - } - else { - calls = 0; - formatString = "\t%1$d\t%2$d\t%3$d\t%12$s%n"; - } - - System.out.format(formatString, dfa.decision, dfa.states.size(), decisionConfigs, calls, fullContextCalls, nonSllCalls, transitions, computedTransitions, fullContextTransitions, lookahead, fullContextLookahead, ruleName); - } - } - } - - int localDfaCount = 0; - int globalDfaCount = 0; - int localConfigCount = 0; - int globalConfigCount = 0; - int[] contextsInDFAState = new int[0]; - - for (int i = 0; i < decisionToDFA.length; i++) { - DFA dfa = decisionToDFA[i]; - if (dfa == null) { - continue; - } - - if (SHOW_CONFIG_STATS) { - for (DFAState state : dfa.states.keySet()) { - if (state.configs.size() >= contextsInDFAState.length) { - contextsInDFAState = Arrays.copyOf(contextsInDFAState, state.configs.size() + 1); - } - - if (state.isAcceptState) { - boolean hasGlobal = false; - for (ATNConfig config : state.configs) { - if (config.reachesIntoOuterContext > 0) { - globalConfigCount++; - hasGlobal = true; - } else { - localConfigCount++; - } - } - - if (hasGlobal) { - globalDfaCount++; - } else { - localDfaCount++; - } - } - - contextsInDFAState[state.configs.size()]++; - } - } - } - - if (SHOW_CONFIG_STATS && currentPass == 0) { - System.out.format(" DFA accept states: %d total, %d with only local context, %d with a global context%n", localDfaCount + globalDfaCount, localDfaCount, globalDfaCount); - System.out.format(" Config stats: %d total, %d local, %d global%n", localConfigCount + globalConfigCount, localConfigCount, globalConfigCount); - if (SHOW_DFA_STATE_STATS) { - for (int i = 0; i < contextsInDFAState.length; i++) { - if (contextsInDFAState[i] != 0) { - System.out.format(" %d configs = %d%n", i, contextsInDFAState[i]); - } - } - } - } - } - - if (COMPUTE_TIMING_STATS) { - System.out.format("File\tTokens\tTime%n"); - for (int i = 0; i< timePerFile[currentPass].length; i++) { - System.out.format("%d\t%d\t%d%n", i + 1, tokensPerFile[currentPass][i], timePerFile[currentPass][i]); - } - } - } - - private static long sum(long[] array) { - long result = 0; - for (int i = 0; i < array.length; i++) { - result += array[i]; - } - - return result; - } - - protected void compileJavaParser(boolean leftRecursive) throws IOException { - String grammarFileName = "Java.g4"; - String sourceName = leftRecursive ? "Java-LR.g4" : "Java.g4"; - String body = load(sourceName, null); - List extraOptions = new ArrayList(); - extraOptions.add("-Werror"); - if (FORCE_ATN) { - extraOptions.add("-Xforce-atn"); - } - if (EXPORT_ATN_GRAPHS) { - extraOptions.add("-atn"); - } - if (DEBUG_TEMPLATES) { - extraOptions.add("-XdbgST"); - if (DEBUG_TEMPLATES_WAIT) { - extraOptions.add("-XdbgSTWait"); - } - } - extraOptions.add("-visitor"); - String[] extraOptionsArray = extraOptions.toArray(new String[extraOptions.size()]); - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, "JavaParser", "JavaLexer", true, extraOptionsArray); - assertTrue(success); - } - - private static void updateChecksum(Checksum checksum, int value) { - checksum.update((value) & 0xFF); - checksum.update((value >>> 8) & 0xFF); - checksum.update((value >>> 16) & 0xFF); - checksum.update((value >>> 24) & 0xFF); - } - - private static void updateChecksum(Checksum checksum, Token token) { - if (token == null) { - checksum.update(0); - return; - } - - updateChecksum(checksum, token.getStartIndex()); - updateChecksum(checksum, token.getStopIndex()); - updateChecksum(checksum, token.getLine()); - updateChecksum(checksum, token.getCharPositionInLine()); - updateChecksum(checksum, token.getType()); - updateChecksum(checksum, token.getChannel()); - } - - protected ParserFactory getParserFactory(String lexerName, String parserName, String listenerName, final String entryPoint) { - try { - ClassLoader loader = new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, ClassLoader.getSystemClassLoader()); - final Class lexerClass = loader.loadClass(lexerName).asSubclass(Lexer.class); - final Class parserClass = loader.loadClass(parserName).asSubclass(Parser.class); - final Class listenerClass = loader.loadClass(listenerName).asSubclass(ParseTreeListener.class); - - final Constructor lexerCtor = lexerClass.getConstructor(CharStream.class); - final Constructor parserCtor = parserClass.getConstructor(TokenStream.class); - - // construct initial instances of the lexer and parser to deserialize their ATNs - TokenSource tokenSource = lexerCtor.newInstance(new ANTLRInputStream("")); - parserCtor.newInstance(new CommonTokenStream(tokenSource)); - - return new ParserFactory() { - @Override - public FileParseResult parseFile(CharStream input, int currentPass, int thread) { - final Checksum checksum = new CRC32(); - - final long startTime = System.nanoTime(); - assert thread >= 0 && thread < NUMBER_OF_THREADS; - - try { - ParseTreeListener listener = sharedListeners[thread]; - if (listener == null) { - listener = listenerClass.newInstance(); - sharedListeners[thread] = listener; - } - - Lexer lexer = sharedLexers[thread]; - if (REUSE_LEXER && lexer != null) { - lexer.setInputStream(input); - } else { - Lexer previousLexer = lexer; - lexer = lexerCtor.newInstance(input); - DFA[] decisionToDFA = (FILE_GRANULARITY || previousLexer == null ? lexer : previousLexer).getInterpreter().decisionToDFA; - if (!REUSE_LEXER_DFA || (!FILE_GRANULARITY && previousLexer == null)) { - decisionToDFA = new DFA[decisionToDFA.length]; - } - - if (COMPUTE_TRANSITION_STATS) { - lexer.setInterpreter(new StatisticsLexerATNSimulator(lexer, lexer.getATN(), decisionToDFA, lexer.getInterpreter().getSharedContextCache())); - } else if (!REUSE_LEXER_DFA) { - lexer.setInterpreter(new LexerATNSimulator(lexer, lexer.getATN(), decisionToDFA, lexer.getInterpreter().getSharedContextCache())); - } - - sharedLexers[thread] = lexer; - } - - lexer.removeErrorListeners(); - lexer.addErrorListener(DescriptiveErrorListener.INSTANCE); - - if (lexer.getInterpreter().decisionToDFA[0] == null) { - ATN atn = lexer.getATN(); - for (int i = 0; i < lexer.getInterpreter().decisionToDFA.length; i++) { - lexer.getInterpreter().decisionToDFA[i] = new DFA(atn.getDecisionState(i), i); - } - } - - CommonTokenStream tokens = new CommonTokenStream(lexer); - tokens.fill(); - tokenCount.addAndGet(currentPass, tokens.size()); - - if (COMPUTE_CHECKSUM) { - for (Token token : tokens.getTokens()) { - updateChecksum(checksum, token); - } - } - - if (!RUN_PARSER) { - return new FileParseResult(input.getSourceName(), (int)checksum.getValue(), null, tokens.size(), startTime, lexer, null); - } - - final long parseStartTime = System.nanoTime(); - Parser parser = sharedParsers[thread]; - if (REUSE_PARSER && parser != null) { - parser.setInputStream(tokens); - } else { - Parser previousParser = parser; - - if (USE_PARSER_INTERPRETER) { - Parser referenceParser = parserCtor.newInstance(tokens); - parser = new ParserInterpreter(referenceParser.getGrammarFileName(), referenceParser.getVocabulary(), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens); - } - else { - parser = parserCtor.newInstance(tokens); - } - - DFA[] decisionToDFA = (FILE_GRANULARITY || previousParser == null ? parser : previousParser).getInterpreter().decisionToDFA; - if (!REUSE_PARSER_DFA || (!FILE_GRANULARITY && previousParser == null)) { - decisionToDFA = new DFA[decisionToDFA.length]; - } - - if (COMPUTE_TRANSITION_STATS) { - parser.setInterpreter(new StatisticsParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache())); - } else if (!REUSE_PARSER_DFA) { - parser.setInterpreter(new ParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache())); - } - - sharedParsers[thread] = parser; - } - - parser.removeParseListeners(); - parser.removeErrorListeners(); - if (!TWO_STAGE_PARSING) { - parser.addErrorListener(DescriptiveErrorListener.INSTANCE); - parser.addErrorListener(new SummarizingDiagnosticErrorListener()); - } - - if (parser.getInterpreter().decisionToDFA[0] == null) { - ATN atn = parser.getATN(); - for (int i = 0; i < parser.getInterpreter().decisionToDFA.length; i++) { - parser.getInterpreter().decisionToDFA[i] = new DFA(atn.getDecisionState(i), i); - } - } - - parser.getInterpreter().setPredictionMode(TWO_STAGE_PARSING ? PredictionMode.SLL : PREDICTION_MODE); - parser.setBuildParseTree(BUILD_PARSE_TREES); - if (!BUILD_PARSE_TREES && BLANK_LISTENER) { - parser.addParseListener(listener); - } - if (BAIL_ON_ERROR || TWO_STAGE_PARSING) { - parser.setErrorHandler(new BailErrorStrategy()); - } - - Method parseMethod = parserClass.getMethod(entryPoint); - Object parseResult; - - try { - if (COMPUTE_CHECKSUM && !BUILD_PARSE_TREES) { - parser.addParseListener(new ChecksumParseTreeListener(checksum)); - } - - if (USE_PARSER_INTERPRETER) { - ParserInterpreter parserInterpreter = (ParserInterpreter)parser; - parseResult = parserInterpreter.parse(Collections.lastIndexOfSubList(Arrays.asList(parser.getRuleNames()), Collections.singletonList(entryPoint))); - } - else { - parseResult = parseMethod.invoke(parser); - } - } catch (InvocationTargetException ex) { - if (!TWO_STAGE_PARSING) { - throw ex; - } - - String sourceName = tokens.getSourceName(); - sourceName = sourceName != null && !sourceName.isEmpty() ? sourceName+": " : ""; - if (REPORT_SECOND_STAGE_RETRY) { - System.err.println(sourceName+"Forced to retry with full context."); - } - - if (!(ex.getCause() instanceof ParseCancellationException)) { - throw ex; - } - - tokens.reset(); - if (REUSE_PARSER && parser != null) { - parser.setInputStream(tokens); - } else { - Parser previousParser = parser; - - if (USE_PARSER_INTERPRETER) { - Parser referenceParser = parserCtor.newInstance(tokens); - parser = new ParserInterpreter(referenceParser.getGrammarFileName(), referenceParser.getVocabulary(), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens); - } - else { - parser = parserCtor.newInstance(tokens); - } - - DFA[] decisionToDFA = previousParser.getInterpreter().decisionToDFA; - if (COMPUTE_TRANSITION_STATS) { - parser.setInterpreter(new StatisticsParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache())); - } else if (!REUSE_PARSER_DFA) { - parser.setInterpreter(new ParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache())); - } - - sharedParsers[thread] = parser; - } - - parser.removeParseListeners(); - parser.removeErrorListeners(); - parser.addErrorListener(DescriptiveErrorListener.INSTANCE); - parser.addErrorListener(new SummarizingDiagnosticErrorListener()); - parser.getInterpreter().setPredictionMode(PredictionMode.LL); - parser.setBuildParseTree(BUILD_PARSE_TREES); - if (COMPUTE_CHECKSUM && !BUILD_PARSE_TREES) { - parser.addParseListener(new ChecksumParseTreeListener(checksum)); - } - if (!BUILD_PARSE_TREES && BLANK_LISTENER) { - parser.addParseListener(listener); - } - if (BAIL_ON_ERROR) { - parser.setErrorHandler(new BailErrorStrategy()); - } - - parseResult = parseMethod.invoke(parser); - } - - assertThat(parseResult, instanceOf(ParseTree.class)); - if (COMPUTE_CHECKSUM && BUILD_PARSE_TREES) { - ParseTreeWalker.DEFAULT.walk(new ChecksumParseTreeListener(checksum), (ParseTree)parseResult); - } - if (BUILD_PARSE_TREES && BLANK_LISTENER) { - ParseTreeWalker.DEFAULT.walk(listener, (ParseTree)parseResult); - } - - return new FileParseResult(input.getSourceName(), (int)checksum.getValue(), (ParseTree)parseResult, tokens.size(), TIME_PARSE_ONLY ? parseStartTime : startTime, lexer, parser); - } catch (Exception e) { - if (!REPORT_SYNTAX_ERRORS && e instanceof ParseCancellationException) { - return new FileParseResult("unknown", (int)checksum.getValue(), null, 0, startTime, null, null); - } - - e.printStackTrace(System.out); - throw new IllegalStateException(e); - } - } - }; - } catch (Exception e) { - e.printStackTrace(System.out); - Assert.fail(e.getMessage()); - throw new IllegalStateException(e); - } - } - - protected interface ParserFactory { - FileParseResult parseFile(CharStream input, int currentPass, int thread); - } - - protected static class FileParseResult { - public final String sourceName; - public final int checksum; - public final ParseTree parseTree; - public final int tokenCount; - public final long startTime; - public final long endTime; - - public final int lexerDFASize; - public final long lexerTotalTransitions; - public final long lexerComputedTransitions; - - public final int parserDFASize; - public final long[] decisionInvocations; - public final long[] fullContextFallback; - public final long[] nonSll; - public final long[] parserTotalTransitions; - public final long[] parserComputedTransitions; - public final long[] parserFullContextTransitions; - - public FileParseResult(String sourceName, int checksum, @Nullable ParseTree parseTree, int tokenCount, long startTime, Lexer lexer, Parser parser) { - this.sourceName = sourceName; - this.checksum = checksum; - this.parseTree = parseTree; - this.tokenCount = tokenCount; - this.startTime = startTime; - this.endTime = System.nanoTime(); - - if (lexer != null) { - LexerATNSimulator interpreter = lexer.getInterpreter(); - if (interpreter instanceof StatisticsLexerATNSimulator) { - lexerTotalTransitions = ((StatisticsLexerATNSimulator)interpreter).totalTransitions; - lexerComputedTransitions = ((StatisticsLexerATNSimulator)interpreter).computedTransitions; - } else { - lexerTotalTransitions = 0; - lexerComputedTransitions = 0; - } - - int dfaSize = 0; - for (DFA dfa : interpreter.decisionToDFA) { - if (dfa != null) { - dfaSize += dfa.states.size(); - } - } - - lexerDFASize = dfaSize; - } else { - lexerDFASize = 0; - lexerTotalTransitions = 0; - lexerComputedTransitions = 0; - } - - if (parser != null) { - ParserATNSimulator interpreter = parser.getInterpreter(); - if (interpreter instanceof StatisticsParserATNSimulator) { - decisionInvocations = ((StatisticsParserATNSimulator)interpreter).decisionInvocations; - fullContextFallback = ((StatisticsParserATNSimulator)interpreter).fullContextFallback; - nonSll = ((StatisticsParserATNSimulator)interpreter).nonSll; - parserTotalTransitions = ((StatisticsParserATNSimulator)interpreter).totalTransitions; - parserComputedTransitions = ((StatisticsParserATNSimulator)interpreter).computedTransitions; - parserFullContextTransitions = ((StatisticsParserATNSimulator)interpreter).fullContextTransitions; - } else { - decisionInvocations = new long[0]; - fullContextFallback = new long[0]; - nonSll = new long[0]; - parserTotalTransitions = new long[0]; - parserComputedTransitions = new long[0]; - parserFullContextTransitions = new long[0]; - } - - int dfaSize = 0; - for (DFA dfa : interpreter.decisionToDFA) { - if (dfa != null) { - dfaSize += dfa.states.size(); - } - } - - parserDFASize = dfaSize; - } else { - parserDFASize = 0; - decisionInvocations = new long[0]; - fullContextFallback = new long[0]; - nonSll = new long[0]; - parserTotalTransitions = new long[0]; - parserComputedTransitions = new long[0]; - parserFullContextTransitions = new long[0]; - } - } - } - - private static class StatisticsLexerATNSimulator extends LexerATNSimulator { - - public long totalTransitions; - public long computedTransitions; - - public StatisticsLexerATNSimulator(ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) { - super(atn, decisionToDFA, sharedContextCache); - } - - public StatisticsLexerATNSimulator(Lexer recog, ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) { - super(recog, atn, decisionToDFA, sharedContextCache); - } - - @Override - protected DFAState getExistingTargetState(DFAState s, int t) { - totalTransitions++; - return super.getExistingTargetState(s, t); - } - - @Override - protected DFAState computeTargetState(CharStream input, DFAState s, int t) { - computedTransitions++; - return super.computeTargetState(input, s, t); - } - } - - private static class StatisticsParserATNSimulator extends ParserATNSimulator { - - public final long[] decisionInvocations; - public final long[] fullContextFallback; - public final long[] nonSll; - public final long[] totalTransitions; - public final long[] computedTransitions; - public final long[] fullContextTransitions; - - private int decision; - - public StatisticsParserATNSimulator(ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) { - super(atn, decisionToDFA, sharedContextCache); - decisionInvocations = new long[atn.decisionToState.size()]; - fullContextFallback = new long[atn.decisionToState.size()]; - nonSll = new long[atn.decisionToState.size()]; - totalTransitions = new long[atn.decisionToState.size()]; - computedTransitions = new long[atn.decisionToState.size()]; - fullContextTransitions = new long[atn.decisionToState.size()]; - } - - public StatisticsParserATNSimulator(Parser parser, ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) { - super(parser, atn, decisionToDFA, sharedContextCache); - decisionInvocations = new long[atn.decisionToState.size()]; - fullContextFallback = new long[atn.decisionToState.size()]; - nonSll = new long[atn.decisionToState.size()]; - totalTransitions = new long[atn.decisionToState.size()]; - computedTransitions = new long[atn.decisionToState.size()]; - fullContextTransitions = new long[atn.decisionToState.size()]; - } - - @Override - public int adaptivePredict(TokenStream input, int decision, ParserRuleContext outerContext) { - try { - this.decision = decision; - decisionInvocations[decision]++; - return super.adaptivePredict(input, decision, outerContext); - } - finally { - this.decision = -1; - } - } - - @Override - protected int execATNWithFullContext(DFA dfa, DFAState D, ATNConfigSet s0, TokenStream input, int startIndex, ParserRuleContext outerContext) { - fullContextFallback[decision]++; - return super.execATNWithFullContext(dfa, D, s0, input, startIndex, outerContext); - } - - @Override - protected DFAState getExistingTargetState(DFAState previousD, int t) { - totalTransitions[decision]++; - return super.getExistingTargetState(previousD, t); - } - - @Override - protected DFAState computeTargetState(DFA dfa, DFAState previousD, int t) { - computedTransitions[decision]++; - return super.computeTargetState(dfa, previousD, t); - } - - @Override - protected ATNConfigSet computeReachSet(ATNConfigSet closure, int t, boolean fullCtx) { - if (fullCtx) { - totalTransitions[decision]++; - computedTransitions[decision]++; - fullContextTransitions[decision]++; - } - - return super.computeReachSet(closure, t, fullCtx); - } - } - - private static class DescriptiveErrorListener extends BaseErrorListener { - public static DescriptiveErrorListener INSTANCE = new DescriptiveErrorListener(); - - @Override - public void syntaxError(Recognizer recognizer, Object offendingSymbol, - int line, int charPositionInLine, - String msg, RecognitionException e) - { - if (!REPORT_SYNTAX_ERRORS) { - return; - } - - String sourceName = recognizer.getInputStream().getSourceName(); - if (!sourceName.isEmpty()) { - sourceName = String.format("%s:%d:%d: ", sourceName, line, charPositionInLine); - } - - System.err.println(sourceName+"line "+line+":"+charPositionInLine+" "+msg); - } - - } - - private static class SummarizingDiagnosticErrorListener extends DiagnosticErrorListener { - private BitSet _sllConflict; - private ATNConfigSet _sllConfigs; - - @Override - public void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, boolean exact, BitSet ambigAlts, ATNConfigSet configs) { - if (COMPUTE_TRANSITION_STATS && DETAILED_DFA_STATE_STATS) { - BitSet sllPredictions = getConflictingAlts(_sllConflict, _sllConfigs); - int sllPrediction = sllPredictions.nextSetBit(0); - BitSet llPredictions = getConflictingAlts(ambigAlts, configs); - int llPrediction = llPredictions.cardinality() == 0 ? ATN.INVALID_ALT_NUMBER : llPredictions.nextSetBit(0); - if (sllPrediction != llPrediction) { - ((StatisticsParserATNSimulator)recognizer.getInterpreter()).nonSll[dfa.decision]++; - } - } - - if (!REPORT_AMBIGUITIES) { - return; - } - - // show the rule name along with the decision - String format = "reportAmbiguity d=%d (%s): ambigAlts=%s, input='%s'"; - int decision = dfa.decision; - String rule = recognizer.getRuleNames()[dfa.atnStartState.ruleIndex]; - String input = recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)); - recognizer.notifyErrorListeners(String.format(format, decision, rule, ambigAlts, input)); - } - - @Override - public void reportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) { - _sllConflict = conflictingAlts; - _sllConfigs = configs; - if (!REPORT_FULL_CONTEXT) { - return; - } - - // show the rule name and viable configs along with the base info - String format = "reportAttemptingFullContext d=%d (%s), input='%s', viable=%s"; - int decision = dfa.decision; - String rule = recognizer.getRuleNames()[dfa.atnStartState.ruleIndex]; - String input = recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)); - BitSet representedAlts = getConflictingAlts(conflictingAlts, configs); - recognizer.notifyErrorListeners(String.format(format, decision, rule, input, representedAlts)); - } - - @Override - public void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, int stopIndex, int prediction, ATNConfigSet configs) { - if (COMPUTE_TRANSITION_STATS && DETAILED_DFA_STATE_STATS) { - BitSet sllPredictions = getConflictingAlts(_sllConflict, _sllConfigs); - int sllPrediction = sllPredictions.nextSetBit(0); - if (sllPrediction != prediction) { - ((StatisticsParserATNSimulator)recognizer.getInterpreter()).nonSll[dfa.decision]++; - } - } - - if (!REPORT_CONTEXT_SENSITIVITY) { - return; - } - - // show the rule name and viable configs along with the base info - String format = "reportContextSensitivity d=%d (%s), input='%s', viable={%d}"; - int decision = dfa.decision; - String rule = recognizer.getRuleNames()[dfa.atnStartState.ruleIndex]; - String input = recognizer.getTokenStream().getText(Interval.of(startIndex, stopIndex)); - recognizer.notifyErrorListeners(String.format(format, decision, rule, input, prediction)); - } - - } - - protected static final class FilenameFilters { - public static final FilenameFilter ALL_FILES = new FilenameFilter() { - - @Override - public boolean accept(File dir, String name) { - return true; - } - - }; - - public static FilenameFilter extension(String extension) { - return extension(extension, true); - } - - public static FilenameFilter extension(String extension, boolean caseSensitive) { - return new FileExtensionFilenameFilter(extension, caseSensitive); - } - - public static FilenameFilter name(String filename) { - return name(filename, true); - } - - public static FilenameFilter name(String filename, boolean caseSensitive) { - return new FileNameFilenameFilter(filename, caseSensitive); - } - - public static FilenameFilter all(FilenameFilter... filters) { - return new AllFilenameFilter(filters); - } - - public static FilenameFilter any(FilenameFilter... filters) { - return new AnyFilenameFilter(filters); - } - - public static FilenameFilter none(FilenameFilter... filters) { - return not(any(filters)); - } - - public static FilenameFilter not(FilenameFilter filter) { - return new NotFilenameFilter(filter); - } - - private FilenameFilters() { - } - - protected static class FileExtensionFilenameFilter implements FilenameFilter { - - private final String extension; - private final boolean caseSensitive; - - public FileExtensionFilenameFilter(String extension, boolean caseSensitive) { - if (!extension.startsWith(".")) { - extension = '.' + extension; - } - - this.extension = extension; - this.caseSensitive = caseSensitive; - } - - @Override - public boolean accept(File dir, String name) { - if (caseSensitive) { - return name.endsWith(extension); - } else { - return name.toLowerCase().endsWith(extension); - } - } - } - - protected static class FileNameFilenameFilter implements FilenameFilter { - - private final String filename; - private final boolean caseSensitive; - - public FileNameFilenameFilter(String filename, boolean caseSensitive) { - this.filename = filename; - this.caseSensitive = caseSensitive; - } - - @Override - public boolean accept(File dir, String name) { - if (caseSensitive) { - return name.equals(filename); - } else { - return name.toLowerCase().equals(filename); - } - } - } - - protected static class AllFilenameFilter implements FilenameFilter { - - private final FilenameFilter[] filters; - - public AllFilenameFilter(FilenameFilter[] filters) { - this.filters = filters; - } - - @Override - public boolean accept(File dir, String name) { - for (FilenameFilter filter : filters) { - if (!filter.accept(dir, name)) { - return false; - } - } - - return true; - } - } - - protected static class AnyFilenameFilter implements FilenameFilter { - - private final FilenameFilter[] filters; - - public AnyFilenameFilter(FilenameFilter[] filters) { - this.filters = filters; - } - - @Override - public boolean accept(File dir, String name) { - for (FilenameFilter filter : filters) { - if (filter.accept(dir, name)) { - return true; - } - } - - return false; - } - } - - protected static class NotFilenameFilter implements FilenameFilter { - - private final FilenameFilter filter; - - public NotFilenameFilter(FilenameFilter filter) { - this.filter = filter; - } - - @Override - public boolean accept(File dir, String name) { - return !filter.accept(dir, name); - } - } - } - - protected static class NumberedThread extends Thread { - private final int threadNumber; - - public NumberedThread(Runnable target, int threadNumber) { - super(target); - this.threadNumber = threadNumber; - } - - public final int getThreadNumber() { - return threadNumber; - } - - } - - protected static class NumberedThreadFactory implements ThreadFactory { - private final AtomicInteger nextThread = new AtomicInteger(); - - @Override - public Thread newThread(Runnable r) { - int threadNumber = nextThread.getAndIncrement(); - assert threadNumber < NUMBER_OF_THREADS; - return new NumberedThread(r, threadNumber); - } - - } - - protected static class FixedThreadNumberFactory implements ThreadFactory { - private final int threadNumber; - - public FixedThreadNumberFactory(int threadNumber) { - this.threadNumber = threadNumber; - } - - @Override - public Thread newThread(Runnable r) { - assert threadNumber < NUMBER_OF_THREADS; - return new NumberedThread(r, threadNumber); - } - } - - protected static class ChecksumParseTreeListener implements ParseTreeListener { - private static final int VISIT_TERMINAL = 1; - private static final int VISIT_ERROR_NODE = 2; - private static final int ENTER_RULE = 3; - private static final int EXIT_RULE = 4; - - private final Checksum checksum; - - public ChecksumParseTreeListener(Checksum checksum) { - this.checksum = checksum; - } - - @Override - public void visitTerminal(TerminalNode node) { - checksum.update(VISIT_TERMINAL); - updateChecksum(checksum, node.getSymbol()); - } - - @Override - public void visitErrorNode(ErrorNode node) { - checksum.update(VISIT_ERROR_NODE); - updateChecksum(checksum, node.getSymbol()); - } - - @Override - public void enterEveryRule(ParserRuleContext ctx) { - checksum.update(ENTER_RULE); - updateChecksum(checksum, ctx.getRuleIndex()); - updateChecksum(checksum, ctx.getStart()); - } - - @Override - public void exitEveryRule(ParserRuleContext ctx) { - checksum.update(EXIT_RULE); - updateChecksum(checksum, ctx.getRuleIndex()); - updateChecksum(checksum, ctx.getStop()); - } - - } - - protected static final class InputDescriptor { - private final String source; - private Reference inputStream; - - public InputDescriptor(@NotNull String source) { - this.source = source; - if (PRELOAD_SOURCES) { - getInputStream(); - } - } - - @NotNull - public synchronized CharStream getInputStream() { - CloneableANTLRFileStream stream = inputStream != null ? inputStream.get() : null; - if (stream == null) { - try { - stream = new CloneableANTLRFileStream(source, ENCODING); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - - if (PRELOAD_SOURCES) { - inputStream = new StrongReference(stream); - } else { - inputStream = new SoftReference(stream); - } - } - - return new JavaUnicodeInputStream(stream.createCopy()); - } - } - - protected static class CloneableANTLRFileStream extends ANTLRFileStream { - - public CloneableANTLRFileStream(String fileName, String encoding) throws IOException { - super(fileName, encoding); - } - - public ANTLRInputStream createCopy() { - ANTLRInputStream stream = new ANTLRInputStream(this.data, this.n); - stream.name = this.getSourceName(); - return stream; - } - } - - public static class StrongReference extends WeakReference { - public final T referent; - - public StrongReference(T referent) { - super(referent); - this.referent = referent; - } - - @Override - public T get() { - return referent; - } - } - - /** - * This is a regression test for antlr/antlr4#192 "Poor performance of - * expression parsing". - * https://github.com/antlr/antlr4/issues/192 - */ - @Test(timeout = 60000) - public void testExpressionGrammar() { - String grammar = - "grammar Expr;\n" + - "\n" + - "program: expr EOF;\n" + - "\n" + - "expr: ID\n" + - " | 'not' expr\n" + - " | expr 'and' expr\n" + - " | expr 'or' expr\n" + - " ;\n" + - "\n" + - "ID: [a-zA-Z_][a-zA-Z_0-9]*;\n" + - "WS: [ \\t\\n\\r\\f]+ -> skip;\n" + - "ERROR: .;\n"; - String input = - "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + - " X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and not X4 and X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and not X4 and not X5 and X6 and not X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and X7 and not X8 and not X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and X8 and not X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and X9 and not X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and X10 and not X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and X11 and not X12 or\n" + - "not X1 and not X2 and not X3 and not X4 and not X5 and not X6 and not X7 and not X8 and not X9 and not X10 and not X11 and X12\n"; - - String found = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "program", - input, false); - Assert.assertEquals("", found); - Assert.assertEquals(null, stderrDuringParse); - - List inputs = new ArrayList(); - for (int i = 0; i < 10; i++) { - inputs.add(input); - } - - input = Utils.join(inputs.iterator(), " or\n"); - found = execParser("Expr.g4", grammar, "ExprParser", "ExprLexer", "program", - input, false); - Assert.assertEquals("", found); - Assert.assertEquals(null, stderrDuringParse); - } - - @Test(timeout = 20000) - public void testExponentialInclude() { - String grammarFormat = - "parser grammar Level_%d_%d;\n" + - "\n" + - "%s import Level_%d_1, Level_%d_2;\n" + - "\n" + - "rule_%d_%d : EOF;\n"; - - System.out.println("dir "+tmpdir); - mkdir(tmpdir); - - long startTime = System.nanoTime(); - - int levels = 20; - for (int level = 0; level < levels; level++) { - String leafPrefix = level == levels - 1 ? "//" : ""; - String grammar1 = String.format(grammarFormat, level, 1, leafPrefix, level + 1, level + 1, level, 1); - writeFile(tmpdir, "Level_" + level + "_1.g4", grammar1); - if (level > 0) { - String grammar2 = String.format(grammarFormat, level, 2, leafPrefix, level + 1, level + 1, level, 1); - writeFile(tmpdir, "Level_" + level + "_2.g4", grammar2); - } - } - - ErrorQueue equeue = antlr("Level_0_1.g4", false); - Assert.assertTrue(equeue.errors.isEmpty()); - - long endTime = System.nanoTime(); - System.out.format("%s milliseconds.%n", (endTime - startTime) / 1000000.0); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestScopeParsing.java b/tool/test/org/antlr/v4/xtest/TestScopeParsing.java deleted file mode 100644 index 26d80fc0c..000000000 --- a/tool/test/org/antlr/v4/xtest/TestScopeParsing.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.parse.ScopeParser; -import org.antlr.v4.tool.ErrorManager; -import org.antlr.v4.tool.Grammar; -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestScopeParsing extends BaseTest { - String[] argPairs = { - "", "{}", - " ", "{}", - "int i", "{i=int i}", - "int[] i, int j[]", "{i=int[] i, j=int [] j}", - "Map[] i, int j[]", "{i=Map[] i, j=int [] j}", - "Map>[] i", "{i=Map>[] i}", - "int i = 34+a[3], int j[] = new int[34]", - "{i=int i= 34+a[3], j=int [] j= new int[34]}", - "char *foo32[3] = {1,2,3}", "{3=char *foo32[] 3= {1,2,3}}", - "String[] headers", "{headers=String[] headers}", - - // python/ruby style - "i", "{i=null i}", - "i,j", "{i=null i, j=null j}", - "i,j, k", "{i=null i, j=null j, k=null k}", - }; - - @Test public void testArgs() throws Exception { - for (int i = 0; i < argPairs.length; i+=2) { - String input = argPairs[i]; - String expected = argPairs[i+1]; - Grammar dummy = new Grammar("grammar T; a:'a';"); - String actual = ScopeParser.parseTypedArgList(null, input, dummy).attributes.toString(); - assertEquals(expected, actual); - } - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestSemPredEvalLexer.java b/tool/test/org/antlr/v4/xtest/TestSemPredEvalLexer.java deleted file mode 100644 index fafc1aa68..000000000 --- a/tool/test/org/antlr/v4/xtest/TestSemPredEvalLexer.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.junit.Test; - -import static org.junit.Assert.*; - -public class TestSemPredEvalLexer extends BaseTest { - - @Test public void testDisableRule() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "E1 : 'enum' {false}? ;\n" + - "E2 : 'enum' {true}? ;\n" + // winner not E1 or ID - "ID : 'a'..'z'+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum abc", true); - String expecting = - "[@0,0:3='enum',<2>,1:0]\n" + - "[@1,5:7='abc',<3>,1:5]\n" + - "[@2,8:7='',<-1>,1:8]\n" + - "s0-' '->:s5=>4\n" + - "s0-'a'->:s6=>3\n" + - "s0-'e'->:s1=>3\n" + - ":s1=>3-'n'->:s2=>3\n" + - ":s2=>3-'u'->:s3=>3\n" + - ":s6=>3-'b'->:s6=>3\n" + - ":s6=>3-'c'->:s6=>3\n"; - assertEquals(expecting, found); - } - - @Test public void testIDvsEnum() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "ENUM : 'enum' {false}? ;\n" + - "ID : 'a'..'z'+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); - String expecting = - "[@0,0:3='enum',<2>,1:0]\n" + - "[@1,5:7='abc',<2>,1:5]\n" + - "[@2,9:12='enum',<2>,1:9]\n" + - "[@3,13:12='',<-1>,1:13]\n" + - "s0-' '->:s5=>3\n" + - "s0-'a'->:s4=>2\n" + - "s0-'e'->:s1=>2\n" + - ":s1=>2-'n'->:s2=>2\n" + - ":s2=>2-'u'->:s3=>2\n" + - ":s4=>2-'b'->:s4=>2\n" + - ":s4=>2-'c'->:s4=>2\n"; // no 'm'-> transition...conflicts with pred - assertEquals(expecting, found); - } - - @Test public void testIDnotEnum() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "ENUM : [a-z]+ {false}? ;\n" + - "ID : [a-z]+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); - String expecting = - "[@0,0:3='enum',<2>,1:0]\n" + - "[@1,5:7='abc',<2>,1:5]\n" + - "[@2,9:12='enum',<2>,1:9]\n" + - "[@3,13:12='',<-1>,1:13]\n" + - "s0-' '->:s2=>3\n"; // no edges in DFA for enum/id. all paths lead to pred. - assertEquals(expecting, found); - } - - @Test public void testEnumNotID() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "ENUM : [a-z]+ {getText().equals(\"enum\")}? ;\n" + - "ID : [a-z]+ ;\n"+ - "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "enum abc enum", true); - String expecting = - "[@0,0:3='enum',<1>,1:0]\n" + - "[@1,5:7='abc',<2>,1:5]\n" + - "[@2,9:12='enum',<1>,1:9]\n" + - "[@3,13:12='',<-1>,1:13]\n" + - "s0-' '->:s3=>3\n"; // no edges in DFA for enum/id. all paths lead to pred. - assertEquals(expecting, found); - } - - @Test public void testIndent() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "ID : [a-z]+ ;\n"+ - "INDENT : [ \\t]+ {_tokenStartCharPositionInLine==0}? \n" + - " {System.out.println(\"INDENT\");} ;"+ - "NL : '\\n' ;"+ - "WS : [ \\t]+ ;"; - String found = execLexer("L.g4", grammar, "L", "abc\n def \n", true); - String expecting = - "INDENT\n" + // action output - "[@0,0:2='abc',<1>,1:0]\n" + // ID - "[@1,3:3='\\n',<3>,1:3]\n" + // NL - "[@2,4:5=' ',<2>,2:0]\n" + // INDENT - "[@3,6:8='def',<1>,2:2]\n" + // ID - "[@4,9:10=' ',<4>,2:5]\n" + // WS - "[@5,11:11='\\n',<3>,2:7]\n" + - "[@6,12:11='',<-1>,3:8]\n" + - "s0-'\n" + - "'->:s2=>3\n" + - "s0-'a'->:s1=>1\n" + - "s0-'d'->:s1=>1\n" + - ":s1=>1-'b'->:s1=>1\n" + - ":s1=>1-'c'->:s1=>1\n" + - ":s1=>1-'e'->:s1=>1\n" + - ":s1=>1-'f'->:s1=>1\n"; - assertEquals(expecting, found); - } - - @Test public void testLexerInputPositionSensitivePredicates() throws Exception { - String grammar = - "lexer grammar L;\n"+ - "WORD1 : ID1+ {System.out.println(getText());} ;\n"+ - "WORD2 : ID2+ {System.out.println(getText());} ;\n"+ - "fragment ID1 : {getCharPositionInLine()<2}? [a-zA-Z];\n"+ - "fragment ID2 : {getCharPositionInLine()>=2}? [a-zA-Z];\n"+ - "WS : (' '|'\\n') -> skip;\n"; - String found = execLexer("L.g4", grammar, "L", "a cde\nabcde\n"); - String expecting = - "a\n" + - "cde\n" + - "ab\n" + - "cde\n" + - "[@0,0:0='a',<1>,1:0]\n" + - "[@1,2:4='cde',<2>,1:2]\n" + - "[@2,6:7='ab',<1>,2:0]\n" + - "[@3,8:10='cde',<2>,2:2]\n" + - "[@4,12:11='',<-1>,3:0]\n"; - assertEquals(expecting, found); - } - - @Test public void testPredicatedKeywords() { - String grammar = - "lexer grammar A;" + - "ENUM : [a-z]+ {getText().equals(\"enum\")}? {System.out.println(\"enum!\");} ;\n" + - "ID : [a-z]+ {System.out.println(\"ID \"+getText());} ;\n" + - "WS : [ \\n] -> skip ;"; - String found = execLexer("A.g4", grammar, "A", "enum enu a"); - String expecting = - "enum!\n" + - "ID enu\n" + - "ID a\n" + - "[@0,0:3='enum',<1>,1:0]\n" + - "[@1,5:7='enu',<2>,1:5]\n" + - "[@2,9:9='a',<2>,1:9]\n" + - "[@3,10:9='',<-1>,1:10]\n"; - assertEquals(expecting, found); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestSemPredEvalParser.java b/tool/test/org/antlr/v4/xtest/TestSemPredEvalParser.java deleted file mode 100644 index 444c5c52d..000000000 --- a/tool/test/org/antlr/v4/xtest/TestSemPredEvalParser.java +++ /dev/null @@ -1,626 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class TestSemPredEvalParser extends BaseTest { - // TEST VALIDATING PREDS - - @Test public void testSimpleValidate() throws Exception { - String grammar = - "grammar T;\n" + - "s : a ;\n" + - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - /*String found = */execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x", false); - - String expecting = "line 1:0 no viable alternative at input 'x'\n"; - assertEquals(expecting, stderrDuringParse); - } - - @Test public void testSimpleValidate2() throws Exception { - String grammar = - "grammar T;\n" + - "s : a a a;\n" + - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "3 4 x", false); - String expecting = - "alt 2\n" + - "alt 2\n"; - assertEquals(expecting, found); - - expecting = "line 1:4 no viable alternative at input 'x'\n"; - assertEquals(expecting, stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#196 - * "element+ in expression grammar doesn't parse properly" - * https://github.com/antlr/antlr4/issues/196 - */ - @Test public void testAtomWithClosureInTranslatedLRRule() throws Exception { - String grammar = - "grammar T;\n" + - "start : e[0] EOF;\n" + - "e[int _p]\n" + - " : ( 'a'\n" + - " | 'b'+\n" + - " )\n" + - " ( {3 >= $_p}? '+' e[4]\n" + - " )*\n" + - " ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "start", - "a+b+a", false); - String expecting = ""; - assertEquals(expecting, found); - assertNull(stderrDuringParse); - } - - @Test public void testValidateInDFA() throws Exception { - String grammar = - "grammar T;\n" + - "s : a ';' a;\n" + - // ';' helps us to resynchronize without consuming - // 2nd 'a' reference. We our testing that the DFA also - // throws an exception if the validating predicate fails - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x ; y", false); - String expecting = ""; - assertEquals(expecting, found); - - expecting = - "line 1:0 no viable alternative at input 'x'\n" + - "line 1:4 no viable alternative at input 'y'\n"; - assertEquals(expecting, stderrDuringParse); - } - - // TEST DISAMBIG PREDS - - @Test public void testSimple() throws Exception { - String grammar = - "grammar T;\n" + - "s : a a a;\n" + // do 3x: once in ATN, next in DFA then INT in ATN - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? ID {System.out.println(\"alt 2\");}\n" + - " | INT {System.out.println(\"alt 3\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x y 3", false); - String expecting = - "alt 2\n" + - "alt 2\n" + - "alt 3\n"; - assertEquals(expecting, found); - } - - @Test public void testOrder() throws Exception { - // Under new predicate ordering rules (see antlr/antlr4#29), the first - // alt with an acceptable config (unpredicated, or predicated and evaluates - // to true) is chosen. - String grammar = - "grammar T;\n" + - "s : a {} a;\n" + // do 2x: once in ATN, next in DFA; - // action blocks lookahead from falling off of 'a' - // and looking into 2nd 'a' ref. !ctx dependent pred - "a : ID {System.out.println(\"alt 1\");}\n" + - " | {true}? ID {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x y", false); - String expecting = - "alt 1\n" + - "alt 1\n"; - assertEquals(expecting, found); - } - - @Test public void test2UnpredicatedAlts() throws Exception { - // We have n-2 predicates for n alternatives. pick first alt - String grammar = - "grammar T;\n" + - "@header {" + - "import java.util.*;" + - "}" + - "s : {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " a ';' a;\n" + // do 2x: once in ATN, next in DFA - "a : ID {System.out.println(\"alt 1\");}\n" + - " | ID {System.out.println(\"alt 2\");}\n" + - " | {false}? ID {System.out.println(\"alt 3\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x; y", true); - String expecting = - "alt 1\n" + - "alt 1\n"; - assertEquals(expecting, found); - assertEquals("line 1:0 reportAttemptingFullContext d=0 (a), input='x'\n" + - "line 1:0 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='x'\n" + - "line 1:3 reportAttemptingFullContext d=0 (a), input='y'\n" + - "line 1:3 reportAmbiguity d=0 (a): ambigAlts={1, 2}, input='y'\n", - this.stderrDuringParse); - } - - @Test public void test2UnpredicatedAltsAndOneOrthogonalAlt() throws Exception { - String grammar = - "grammar T;\n" + - "@header {" + - "import java.util.*;" + - "}" + - "s : {_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);}\n" + - " a ';' a ';' a;\n" + - "a : INT {System.out.println(\"alt 1\");}\n" + - " | ID {System.out.println(\"alt 2\");}\n" + // must pick this one for ID since pred is false - " | ID {System.out.println(\"alt 3\");}\n" + - " | {false}? ID {System.out.println(\"alt 4\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "34; x; y", true); - String expecting = - "alt 1\n" + - "alt 2\n" + - "alt 2\n"; - assertEquals(expecting, found); - assertEquals("line 1:4 reportAttemptingFullContext d=0 (a), input='x'\n" + - "line 1:4 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='x'\n" + - "line 1:7 reportAttemptingFullContext d=0 (a), input='y'\n" + - "line 1:7 reportAmbiguity d=0 (a): ambigAlts={2, 3}, input='y'\n", - this.stderrDuringParse); - } - - @Test public void testRewindBeforePredEval() throws Exception { - // The parser consumes ID and moves to the 2nd token INT. - // To properly evaluate the predicates after matching ID INT, - // we must correctly see come back to starting index so LT(1) works - String grammar = - "grammar T;\n" + - "s : a a;\n" + - "a : {_input.LT(1).getText().equals(\"x\")}? ID INT {System.out.println(\"alt 1\");}\n" + - " | {_input.LT(1).getText().equals(\"y\")}? ID INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "y 3 x 4", false); - String expecting = - "alt 2\n" + - "alt 1\n"; - assertEquals(expecting, found); - } - - @Test public void testNoTruePredsThrowsNoViableAlt() throws Exception { - // checks that we throw exception if all alts - // are covered with a predicate and none succeeds - String grammar = - "grammar T;\n" + - "s : a a;\n" + - "a : {false}? ID INT {System.out.println(\"alt 1\");}\n" + - " | {false}? ID INT {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - execParser("T.g4", grammar, "TParser", "TLexer", "s", - "y 3 x 4", false); - String expecting = "line 1:0 no viable alternative at input 'y'\n"; - String result = stderrDuringParse; - assertEquals(expecting, result); - } - - @Test public void testToLeft() throws Exception { - String grammar = - "grammar T;\n" + - "s : a+ ;\n" + - "a : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? ID {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x x y", false); - String expecting = - "alt 2\n" + - "alt 2\n" + - "alt 2\n"; - assertEquals(expecting, found); - } - - @Test - public void testUnpredicatedPathsInAlt() throws Exception{ - String grammar = - "grammar T;\n" + - "s : a {System.out.println(\"alt 1\");}\n" + - " | b {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "a : {false}? ID INT\n" + - " | ID INT\n" + - " ;\n" + - "b : ID ID\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x 4", false); - String expecting = - "alt 1\n"; - assertEquals(expecting, found); - - expecting = null; - assertEquals(expecting, stderrDuringParse); - } - - @Test public void testActionHidesPreds() throws Exception { - // can't see preds, resolves to first alt found (1 in this case) - String grammar = - "grammar T;\n" + - "@parser::members {int i;}\n" + - "s : a+ ;\n" + - "a : {i=1;} ID {i==1}? {System.out.println(\"alt 1\");}\n" + - " | {i=2;} ID {i==2}? {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x x y", false); - String expecting = - "alt 1\n" + - "alt 1\n" + - "alt 1\n"; - assertEquals(expecting, found); - } - - /** In this case, we use predicates that depend on global information - * like we would do for a symbol table. We simply execute - * the predicates assuming that all necessary information is available. - * The i++ action is done outside of the prediction and so it is executed. - */ - @Test public void testToLeftWithVaryingPredicate() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {int i=0;}\n" + - "s : ({i++; System.out.println(\"i=\"+i);} a)+ ;\n" + - "a : {i % 2 == 0}? ID {System.out.println(\"alt 1\");}\n" + - " | {i % 2 != 0}? ID {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "x x y", false); - String expecting = - "i=1\n" + - "alt 2\n" + - "i=2\n" + - "alt 1\n" + - "i=3\n" + - "alt 2\n"; - assertEquals(expecting, found); - } - - /** - * In this case, we're passing a parameter into a rule that uses that - * information to predict the alternatives. This is the special case - * where we know exactly which context we are in. The context stack - * is empty and we have not dipped into the outer context to make a decision. - */ - @Test public void testPredicateDependentOnArg() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {int i=0;}\n" + - "s : a[2] a[1];\n" + - "a[int i]" + - " : {$i==1}? ID {System.out.println(\"alt 1\");}\n" + - " | {$i==2}? ID {System.out.println(\"alt 2\");}\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a b", false); - String expecting = - "alt 2\n" + - "alt 1\n"; - assertEquals(expecting, found); - } - - /** In this case, we have to ensure that the predicates are not - tested during the closure after recognizing the 1st ID. The - closure will fall off the end of 'a' 1st time and reach into the - a[1] rule invocation. It should not execute predicates because it - does not know what the parameter is. The context stack will not - be empty and so they should be ignored. It will not affect - recognition, however. We are really making sure the ATN - simulation doesn't crash with context object issues when it - encounters preds during FOLLOW. - */ - @Test public void testPredicateDependentOnArg2() throws Exception { - String grammar = - "grammar T;\n" + - "s : a[2] a[1];\n" + - "a[int i]" + - " : {$i==1}? ID\n" + - " | {$i==2}? ID\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a b", false); - String expecting = - ""; - assertEquals(expecting, found); - } - - @Test public void testDependentPredNotInOuterCtxShouldBeIgnored() throws Exception { - // uses ID ';' or ID '.' lookahead to solve s. preds not tested. - String grammar = - "grammar T;\n" + - "s : b[2] ';' | b[2] '.' ;\n" + // decision in s drills down to ctx-dependent pred in a; - "b[int i] : a[i] ;\n" + - "a[int i]" + - " : {$i==1}? ID {System.out.println(\"alt 1\");}\n" + - " | {$i==2}? ID {System.out.println(\"alt 2\");}\n" + - " ;" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a;", false); - String expecting = - "alt 2\n"; - assertEquals(expecting, found); - } - - @Test public void testIndependentPredNotPassedOuterCtxToAvoidCastException() throws Exception { - String grammar = - "grammar T;\n" + - "s : b ';' | b '.' ;\n" + - "b : a ;\n" + - "a" + - " : {false}? ID {System.out.println(\"alt 1\");}\n" + - " | {true}? ID {System.out.println(\"alt 2\");}\n" + - " ;" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a;", false); - String expecting = - "alt 2\n"; - assertEquals(expecting, found); - } - - /** During a global follow operation, we still collect semantic - * predicates as long as they are not dependent on local context - */ - @Test public void testPredsInGlobalFOLLOW() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {" + - "void f(Object s) {System.out.println(s);}\n" + - "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + - "}\n" + - "s : e {p(true)}? {f(\"parse\");} '!' ;\n" + - "t : e {p(false)}? ID ;\n" + - "e : ID | ;\n" + // non-LL(1) so we use ATN - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a!", false); - String expecting = - "eval=true\n" + // now we are parsing - "parse\n"; - assertEquals(expecting, found); - } - - /** We cannot collect predicates that are dependent on local context if - * we are doing a global follow. They appear as if they were not there at all. - */ - @Test public void testDepedentPredsInGlobalFOLLOW() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {" + - "void f(Object s) {System.out.println(s);}\n" + - "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + - "}\n" + - "s : a[99] ;\n" + - "a[int i] : e {p($i==99)}? {f(\"parse\");} '!' ;\n" + - "b[int i] : e {p($i==99)}? ID ;\n" + - "e : ID | ;\n" + // non-LL(1) so we use ATN - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a!", false); - String expecting = - "eval=true\n" + - "parse\n"; - assertEquals(expecting, found); - } - - /** Regular non-forced actions can create side effects used by semantic - * predicates and so we cannot evaluate any semantic predicate - * encountered after having seen a regular action. This includes - * during global follow operations. - */ - @Test public void testActionsHidePredsInGlobalFOLLOW() throws Exception { - String grammar = - "grammar T;\n" + - "@parser::members {" + - "void f(Object s) {System.out.println(s);}\n" + - "boolean p(boolean v) {System.out.println(\"eval=\"+v); return v;}\n" + - "}\n" + - "s : e {} {p(true)}? {f(\"parse\");} '!' ;\n" + - "t : e {} {p(false)}? ID ;\n" + - "e : ID | ;\n" + // non-LL(1) so we use ATN - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", - "a!", false); - String expecting = - "eval=true\n" + - "parse\n"; - assertEquals(expecting, found); - } - - @Test public void testPredTestedEvenWhenUnAmbig() throws Exception { - String grammar = - "grammar T;\n" + - "\n" + - "@parser::members {boolean enumKeyword = true;}\n" + - "\n" + - "primary\n" + - " : ID {System.out.println(\"ID \"+$ID.text);}\n" + - " | {!enumKeyword}? 'enum' {System.out.println(\"enum\");}\n" + - " ;\n" + - "\n" + - "ID : [a-z]+ ;\n" + - "\n" + - "WS : [ \\t\\n\\r]+ -> skip ;\n"; - - String found = execParser("T.g4", grammar, "TParser", "TLexer", "primary", - "abc", false); - assertEquals("ID abc\n", found); - - execParser("T.g4", grammar, "TParser", "TLexer", "primary", - "enum", false); - assertEquals("line 1:0 no viable alternative at input 'enum'\n", stderrDuringParse); - } - - /** - * This is a regression test for antlr/antlr4#218 "ANTLR4 EOF Related Bug". - * https://github.com/antlr/antlr4/issues/218 - */ - @Test public void testDisabledAlternative() { - String grammar = - "grammar AnnotProcessor;\n" + - "\n" + - "cppCompilationUnit : content+ EOF;\n" + - "\n" + - "content: anything | {false}? .;\n" + - "\n" + - "anything: ANY_CHAR;\n" + - "\n" + - "ANY_CHAR: [_a-zA-Z0-9];\n"; - - String input = "hello"; - String found = execParser("AnnotProcessor.g4", grammar, "AnnotProcessorParser", "AnnotProcessorLexer", "cppCompilationUnit", - input, false); - assertEquals("", found); - assertNull(stderrDuringParse); - } - - /** Loopback doesn't eval predicate at start of alt */ - @Test public void testPredFromAltTestedInLoopBack() { - String grammar = - "grammar T2;\n" + - "\n" + - "file\n" + - "@after {System.out.println($ctx.toStringTree(this));}\n" + - " : para para EOF ;" + - "para: paraContent NL NL ;\n"+ - "paraContent : ('s'|'x'|{_input.LA(2)!=NL}? NL)+ ;\n"+ - "NL : '\\n' ;\n"+ - "S : 's' ;\n"+ - "X : 'x' ;\n"; - - String input = "s\n\n\nx\n"; - String found = execParser("T2.g4", grammar, "T2Parser", "T2Lexer", "file", - input, true); - assertEquals("(file (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n", found); - assertEquals(stderrDuringParse, "line 5:2 mismatched input '' expecting '\n'\n"); - - input = "s\n\n\nx\n\n"; - found = execParser("T2.g4", grammar, "T2Parser", "T2Lexer", "file", - input, true); - assertEquals("(file (para (paraContent s) \\n \\n) (para (paraContent \\n x) \\n \\n) )\n", found); - - assertNull(stderrDuringParse); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestSets.java b/tool/test/org/antlr/v4/xtest/TestSets.java deleted file mode 100644 index c55847637..000000000 --- a/tool/test/org/antlr/v4/xtest/TestSets.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.v4.tool.ErrorType; -import org.junit.Test; - -import static org.junit.Assert.*; - -/** Test the set stuff in lexer and parser */ -public class TestSets extends BaseTest { - protected boolean debug = false; - - /** Public default constructor used by TestRig */ - public TestSets() { - } - - @Test public void testSeqDoesNotBecomeSet() throws Exception { - // this must return A not I to the parser; calling a nonfragment rule - // from a nonfragment rule does not set the overall token. - String grammar = - "grammar P;\n" + - "a : C {System.out.println(_input.getText());} ;\n" + - "fragment A : '1' | '2';\n" + - "fragment B : '3' '4';\n" + - "C : A | B;\n"; - String found = execParser("P.g4", grammar, "PParser", "PLexer", - "a", "34", debug); - assertEquals("34\n", found); - } - - @Test public void testParserSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : t=('x'|'y') {System.out.println($t.text);} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testParserNotSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "zz", debug); - assertEquals("z\n", found); - } - - @Test public void testParserNotToken() throws Exception { - String grammar = - "grammar T;\n" + - "a : ~'x' 'z' {System.out.println(_input.getText());} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "zz", debug); - assertEquals("zz\n", found); - } - - @Test public void testParserNotTokenWithLabel() throws Exception { - String grammar = - "grammar T;\n" + - "a : t=~'x' 'z' {System.out.println($t.text);} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "zz", debug); - assertEquals("z\n", found); - } - - @Test public void testRuleAsSet() throws Exception { - String grammar = - "grammar T;\n" + - "a @after {System.out.println(_input.getText());} : 'a' | 'b' |'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "b", debug); - assertEquals("b\n", found); - } - - @Test public void testNotChar() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ~'b' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testOptionalSingleElement() throws Exception { - String grammar = - "grammar T;\n" + - "a : A? 'c' {System.out.println(_input.getText());} ;\n" + - "A : 'b' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "bc", debug); - assertEquals("bc\n", found); - } - - @Test public void testOptionalLexerSingleElement() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : 'b'? 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "bc", debug); - assertEquals("bc\n", found); - } - - @Test public void testStarLexerSingleElement() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : 'b'* 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "bbbbc", debug); - assertEquals("bbbbc\n", found); - found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "c", debug); - assertEquals("c\n", found); - } - - @Test public void testPlusLexerSingleElement() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : 'b'+ 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "bbbbc", debug); - assertEquals("bbbbc\n", found); - } - - @Test public void testOptionalSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : ('a'|'b')? 'c' {System.out.println(_input.getText());} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "ac", debug); - assertEquals("ac\n", found); - } - - @Test public void testStarSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : ('a'|'b')* 'c' {System.out.println(_input.getText());} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "abaac", debug); - assertEquals("abaac\n", found); - } - - @Test public void testPlusSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : ('a'|'b')+ 'c' {System.out.println(_input.getText());} ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "abaac", debug); - assertEquals("abaac\n", found); - } - - @Test public void testLexerOptionalSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : ('a'|'b')? 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "ac", debug); - assertEquals("ac\n", found); - } - - @Test public void testLexerStarSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : ('a'|'b')* 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "abaac", debug); - assertEquals("abaac\n", found); - } - - @Test public void testLexerPlusSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println(_input.getText());} ;\n" + - "A : ('a'|'b')+ 'c' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "abaac", debug); - assertEquals("abaac\n", found); - } - - @Test public void testNotCharSet() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ~('b'|'c') ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testNotCharSetWithLabel() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : h=~('b'|'c') ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testNotCharSetWithRuleRef() throws Exception { - // might be a useful feature to add someday - String[] pair = new String[] { - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ~('a'|B) ;\n" + - "B : 'b' ;\n", - "error(" + ErrorType.UNSUPPORTED_REFERENCE_IN_LEXER_SET.code + "): T.g4:3:10: rule reference B is not currently supported in a set\n" - }; - super.testErrors(pair, true); - } - - @Test public void testNotCharSetWithString() throws Exception { - // might be a useful feature to add someday - String[] pair = new String[] { - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ~('a'|'aa') ;\n" + - "B : 'b' ;\n", - "error(" + ErrorType.INVALID_LITERAL_IN_LEXER_SET.code + "): T.g4:3:10: multi-character literals are not allowed in lexer sets: 'aa'\n" - }; - super.testErrors(pair, true); - } - - @Test public void testNotCharSetWithRuleRef3() throws Exception { - String grammar = - "grammar T;\n" + - "a : A {System.out.println($A.text);} ;\n" + - "A : ('a'|B) ;\n" + // this doesn't collapse to set but works - "fragment\n" + - "B : ~('a'|'c') ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "x", debug); - assertEquals("x\n", found); - } - - @Test public void testCharSetLiteral() throws Exception { - String grammar = - "grammar T;\n" + - "a : (A {System.out.println($A.text);})+ ;\n" + - "A : [AaBb] ;\n" + - "WS : (' '|'\\n')+ -> skip ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - "a", "A a B b", debug); - assertEquals("A\n" + - "a\n" + - "B\n" + - "b\n", found); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestSymbolIssues.java b/tool/test/org/antlr/v4/xtest/TestSymbolIssues.java deleted file mode 100644 index bb6d050f5..000000000 --- a/tool/test/org/antlr/v4/xtest/TestSymbolIssues.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.tool.ErrorType; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import static org.junit.Assert.*; - -/** */ -public class TestSymbolIssues extends BaseTest { - static String[] A = { - // INPUT - "grammar A;\n" + - "options { opt='sss'; k=3; }\n" + - "\n" + - "@members {foo}\n" + - "@members {bar}\n" + - "@lexer::header {package jj;}\n" + - "@lexer::header {package kk;}\n" + - "\n" + - "a[int i] returns [foo f] : X ID a[3] b[34] c ;\n" + - "b returns [int g] : Y 'y' 'if' a ;\n" + - "c : FJKD ;\n" + - "\n" + - "ID : 'a'..'z'+ ID ;", - // YIELDS - "error(" + ErrorType.ACTION_REDEFINITION.code + "): A.g4:5:1: redefinition of members action\n" + - "error(" + ErrorType.ACTION_REDEFINITION.code + "): A.g4:7:1: redefinition of header action\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): A.g4:2:10: unsupported option opt\n" + - "warning(" + ErrorType.ILLEGAL_OPTION.code + "): A.g4:2:21: unsupported option k\n" + - "error(" + ErrorType.ACTION_REDEFINITION.code + "): A.g4:5:1: redefinition of members action\n" + - "warning(" + ErrorType.IMPLICIT_TOKEN_DEFINITION.code + "): A.g4:9:27: implicit definition of token X in parser\n" + - "warning(" + ErrorType.IMPLICIT_TOKEN_DEFINITION.code + "): A.g4:10:20: implicit definition of token Y in parser\n" + - "warning(" + ErrorType.IMPLICIT_TOKEN_DEFINITION.code + "): A.g4:11:4: implicit definition of token FJKD in parser\n" + - "error(" + ErrorType.RULE_HAS_NO_ARGS.code + "): A.g4:9:37: rule b has no defined parameters\n" + - "error(" + ErrorType.MISSING_RULE_ARGS.code + "): A.g4:10:31: missing arguments(s) on rule reference: a\n" - }; - - static String[] B = { - // INPUT - "parser grammar B;\n" + - "tokens { ID, FOO, X, Y }\n" + - "\n" + - "a : s=ID b+=ID X=ID '.' ;\n" + - "\n" + - "b : x=ID x+=ID ;\n" + - "\n" + - "s : FOO ;", - // YIELDS - "error(" + ErrorType.LABEL_CONFLICTS_WITH_RULE.code + "): B.g4:4:4: label s conflicts with rule with same name\n" + - "error(" + ErrorType.LABEL_CONFLICTS_WITH_RULE.code + "): B.g4:4:9: label b conflicts with rule with same name\n" + - "error(" + ErrorType.LABEL_CONFLICTS_WITH_TOKEN.code + "): B.g4:4:15: label X conflicts with token with same name\n" + - "error(" + ErrorType.LABEL_TYPE_CONFLICT.code + "): B.g4:6:9: label x type mismatch with previous definition: TOKEN_LIST_LABEL!=TOKEN_LABEL\n" + - "error(" + ErrorType.IMPLICIT_STRING_DEFINITION.code + "): B.g4:4:20: cannot create implicit token for string literal in non-combined grammar: '.'\n" - }; - - static String[] D = { - // INPUT - "parser grammar D;\n" + - "tokens{ID}\n" + - "a[int j] \n" + - " : i=ID j=ID ;\n" + - "\n" + - "b[int i] returns [int i] : ID ;\n" + - "\n" + - "c[int i] returns [String k]\n" + - " : ID ;", - - // YIELDS - "error(" + ErrorType.LABEL_CONFLICTS_WITH_ARG.code + "): D.g4:4:21: label j conflicts with parameter with same name\n" + - "error(" + ErrorType.RETVAL_CONFLICTS_WITH_ARG.code + "): D.g4:6:22: return value i conflicts with parameter with same name\n" - }; - - static String[] E = { - // INPUT - "grammar E;\n" + - "tokens {\n" + - " A, A,\n" + - " B,\n" + - " C\n" + - "}\n" + - "a : A ;\n", - - // YIELDS - "warning(" + ErrorType.TOKEN_NAME_REASSIGNMENT.code + "): E.g4:3:4: token name A is already defined\n" - }; - - @Test public void testA() { super.testErrors(A, false); } - @Test public void testB() { super.testErrors(B, false); } - @Test public void testD() { super.testErrors(D, false); } - @Test public void testE() { super.testErrors(E, false); } - - @Test public void testStringLiteralRedefs() throws Exception { - String grammar = - "lexer grammar L;\n" + - "A : 'a' ;\n" + - "mode X;\n"+ - "B : 'a' ;\n"+ - "mode Y;\n"+ - "C : 'a' ;\n"; - - LexerGrammar g = new LexerGrammar(grammar); - - String expectedTokenIDToTypeMap = "{EOF=-1, A=1, B=2, C=3}"; - String expectedStringLiteralToTypeMap = "{}"; - String expectedTypeToTokenList = "[A, B, C]"; - - assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString()); - assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); - assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); - } - - @Test public void testEmptyLexerModeDetection() throws Exception { - String[] test = { - "lexer grammar L;\n" + - "A : 'a';\n" + - "mode X;\n" + - "fragment B : 'b';", - - "error(" + ErrorType.MODE_WITHOUT_RULES.code + "): L.g4:3:5: lexer mode X must contain at least one non-fragment rule\n" - }; - - testErrors(test, false); - } - - @Test public void testEmptyLexerRuleDetection() throws Exception { - String[] test = { - "lexer grammar L;\n" + - "A : 'a';\n" + - "WS : [ \t]* -> skip;\n" + - "mode X;\n" + - " B : C;\n" + - " fragment C : A | (A C)?;", - - "warning(" + ErrorType.EPSILON_TOKEN.code + "): L.g4:3:0: non-fragment lexer rule WS can match the empty string\n" + - "warning(" + ErrorType.EPSILON_TOKEN.code + "): L.g4:5:2: non-fragment lexer rule B can match the empty string\n" - }; - - testErrors(test, false); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestTokenPositionOptions.java b/tool/test/org/antlr/v4/xtest/TestTokenPositionOptions.java deleted file mode 100644 index ecedabdb3..000000000 --- a/tool/test/org/antlr/v4/xtest/TestTokenPositionOptions.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2014 Terence Parr - * Copyright (c) 2014 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.runtime.Token; -import org.antlr.v4.misc.Utils; -import org.antlr.v4.parse.ANTLRParser; -import org.antlr.v4.runtime.misc.IntervalSet; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.ast.GrammarAST; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; - -public class TestTokenPositionOptions extends BaseTest { - @Test public void testLeftRecursionRewrite() throws Exception { - Grammar g = new Grammar( - "grammar T;\n" + - "s : e ';' ;\n" + - "e : e '*' e\n" + - " | e '+' e\n" + - " | e '.' ID\n" + - " | '-' e\n" + - " | ID\n" + - " ;\n" + - "ID : [a-z]+ ;\n" - ); - - String expectedTree = - "(COMBINED_GRAMMAR T (RULES (RULE s (BLOCK (ALT e ';'))) (RULE e (BLOCK (ALT (BLOCK (ALT {} ('-' (ELEMENT_OPTIONS (= tokenIndex 43))) (e (ELEMENT_OPTIONS (= tokenIndex 45) (= p 2)))) (ALT (ID (ELEMENT_OPTIONS (= tokenIndex 49))))) (* (BLOCK (ALT ({precpred(_ctx, 5)}? (ELEMENT_OPTIONS (= p 5))) ('*' (ELEMENT_OPTIONS (= tokenIndex 21))) (e (ELEMENT_OPTIONS (= tokenIndex 23) (= p 6)))) (ALT ({precpred(_ctx, 4)}? (ELEMENT_OPTIONS (= p 4))) ('+' (ELEMENT_OPTIONS (= tokenIndex 29))) (e (ELEMENT_OPTIONS (= tokenIndex 31) (= p 5)))) (ALT ({precpred(_ctx, 3)}? (ELEMENT_OPTIONS (= p 3))) ('.' (ELEMENT_OPTIONS (= tokenIndex 37))) (ID (ELEMENT_OPTIONS (= tokenIndex 39)))))))))))"; - assertEquals(expectedTree, g.ast.toStringTree()); - - String expectedElementTokens = - "[@5,11:11='s',<57>,2:0]\n" + - "[@9,15:15='e',<57>,2:4]\n" + - "[@11,17:19='';'',<62>,2:6]\n" + - "[@15,23:23='e',<57>,3:0]\n" + - "[@43,64:66=''-'',<62>,6:4]\n" + - "[@45,68:68='e',<57>,6:8]\n" + - "[@49,74:75='ID',<66>,7:4]\n" + - "[@21,29:31=''*'',<62>,3:6]\n" + - "[@23,33:33='e',<57>,3:10]\n" + - "[@29,41:43=''+'',<62>,4:6]\n" + - "[@31,45:45='e',<57>,4:10]\n" + - "[@37,53:55=''.'',<62>,5:6]\n" + - "[@39,57:58='ID',<66>,5:10]"; - - IntervalSet types = - new IntervalSet(ANTLRParser.TOKEN_REF, - ANTLRParser.STRING_LITERAL, - ANTLRParser.RULE_REF); - List nodes = g.ast.getNodesWithTypePreorderDFS(types); - List tokens = new ArrayList(); - for (GrammarAST node : nodes) { - tokens.add(node.getToken()); - } - assertEquals(expectedElementTokens, Utils.join(tokens.toArray(), "\n")); - } - - @Test public void testLeftRecursionWithLabels() throws Exception { - Grammar g = new Grammar( - "grammar T;\n" + - "s : e ';' ;\n" + - "e : e '*' x=e\n" + - " | e '+' e\n" + - " | e '.' y=ID\n" + - " | '-' e\n" + - " | ID\n" + - " ;\n" + - "ID : [a-z]+ ;\n" - ); - - String expectedTree = - "(COMBINED_GRAMMAR T (RULES (RULE s (BLOCK (ALT e ';'))) (RULE e (BLOCK (ALT (BLOCK (ALT {} ('-' (ELEMENT_OPTIONS (= tokenIndex 47))) (e (ELEMENT_OPTIONS (= tokenIndex 49) (= p 2)))) (ALT (ID (ELEMENT_OPTIONS (= tokenIndex 53))))) (* (BLOCK (ALT ({precpred(_ctx, 5)}? (ELEMENT_OPTIONS (= p 5))) ('*' (ELEMENT_OPTIONS (= tokenIndex 21))) (= x (e (ELEMENT_OPTIONS (= tokenIndex 25) (= p 6))))) (ALT ({precpred(_ctx, 4)}? (ELEMENT_OPTIONS (= p 4))) ('+' (ELEMENT_OPTIONS (= tokenIndex 31))) (e (ELEMENT_OPTIONS (= tokenIndex 33) (= p 5)))) (ALT ({precpred(_ctx, 3)}? (ELEMENT_OPTIONS (= p 3))) ('.' (ELEMENT_OPTIONS (= tokenIndex 39))) (= y (ID (ELEMENT_OPTIONS (= tokenIndex 43))))))))))))"; - assertEquals(expectedTree, g.ast.toStringTree()); - - String expectedElementTokens = - "[@5,11:11='s',<57>,2:0]\n" + - "[@9,15:15='e',<57>,2:4]\n" + - "[@11,17:19='';'',<62>,2:6]\n" + - "[@15,23:23='e',<57>,3:0]\n" + - "[@47,68:70=''-'',<62>,6:4]\n" + - "[@49,72:72='e',<57>,6:8]\n" + - "[@53,78:79='ID',<66>,7:4]\n" + - "[@21,29:31=''*'',<62>,3:6]\n" + - "[@25,35:35='e',<57>,3:12]\n" + - "[@31,43:45=''+'',<62>,4:6]\n" + - "[@33,47:47='e',<57>,4:10]\n" + - "[@39,55:57=''.'',<62>,5:6]\n" + - "[@43,61:62='ID',<66>,5:12]"; - - IntervalSet types = - new IntervalSet(ANTLRParser.TOKEN_REF, - ANTLRParser.STRING_LITERAL, - ANTLRParser.RULE_REF); - List nodes = g.ast.getNodesWithTypePreorderDFS(types); - List tokens = new ArrayList(); - for (GrammarAST node : nodes) { - tokens.add(node.getToken()); - } - assertEquals(expectedElementTokens, Utils.join(tokens.toArray(), "\n")); - } - - @Test public void testLeftRecursionWithSet() throws Exception { - Grammar g = new Grammar( - "grammar T;\n" + - "s : e ';' ;\n" + - "e : e op=('*'|'/') e\n" + - " | e '+' e\n" + - " | e '.' ID\n" + - " | '-' e\n" + - " | ID\n" + - " ;\n" + - "ID : [a-z]+ ;\n" - ); - - String expectedTree = - "(COMBINED_GRAMMAR T (RULES (RULE s (BLOCK (ALT e ';'))) (RULE e (BLOCK (ALT (BLOCK (ALT {} ('-' (ELEMENT_OPTIONS (= tokenIndex 49))) (e (ELEMENT_OPTIONS (= tokenIndex 51) (= p 2)))) (ALT (ID (ELEMENT_OPTIONS (= tokenIndex 55))))) (* (BLOCK (ALT ({precpred(_ctx, 5)}? (ELEMENT_OPTIONS (= p 5))) (= op (SET ('*' (ELEMENT_OPTIONS (= tokenIndex 24))) ('/' (ELEMENT_OPTIONS (= tokenIndex 26))))) (e (ELEMENT_OPTIONS (= tokenIndex 29) (= p 6)))) (ALT ({precpred(_ctx, 4)}? (ELEMENT_OPTIONS (= p 4))) ('+' (ELEMENT_OPTIONS (= tokenIndex 35))) (e (ELEMENT_OPTIONS (= tokenIndex 37) (= p 5)))) (ALT ({precpred(_ctx, 3)}? (ELEMENT_OPTIONS (= p 3))) ('.' (ELEMENT_OPTIONS (= tokenIndex 43))) (ID (ELEMENT_OPTIONS (= tokenIndex 45)))))))))))"; - assertEquals(expectedTree, g.ast.toStringTree()); - - String expectedElementTokens = - "[@5,11:11='s',<57>,2:0]\n" + - "[@9,15:15='e',<57>,2:4]\n" + - "[@11,17:19='';'',<62>,2:6]\n" + - "[@15,23:23='e',<57>,3:0]\n" + - "[@49,73:75=''-'',<62>,6:4]\n" + - "[@51,77:77='e',<57>,6:8]\n" + - "[@55,83:84='ID',<66>,7:4]\n" + - "[@24,33:35=''*'',<62>,3:10]\n" + - "[@26,37:39=''/'',<62>,3:14]\n" + - "[@29,42:42='e',<57>,3:19]\n" + - "[@35,50:52=''+'',<62>,4:6]\n" + - "[@37,54:54='e',<57>,4:10]\n" + - "[@43,62:64=''.'',<62>,5:6]\n" + - "[@45,66:67='ID',<66>,5:10]"; - - IntervalSet types = - new IntervalSet(ANTLRParser.TOKEN_REF, - ANTLRParser.STRING_LITERAL, - ANTLRParser.RULE_REF); - List nodes = g.ast.getNodesWithTypePreorderDFS(types); - List tokens = new ArrayList(); - for (GrammarAST node : nodes) { - tokens.add(node.getToken()); - } - assertEquals(expectedElementTokens, Utils.join(tokens.toArray(), "\n")); - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestTokenStreamRewriter.java b/tool/test/org/antlr/v4/xtest/TestTokenStreamRewriter.java deleted file mode 100644 index 71bff0fee..000000000 --- a/tool/test/org/antlr/v4/xtest/TestTokenStreamRewriter.java +++ /dev/null @@ -1,884 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.LexerInterpreter; -import org.antlr.v4.runtime.TokenStreamRewriter; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -public class TestTokenStreamRewriter extends BaseTest { - - /** Public default constructor used by TestRig */ - public TestTokenStreamRewriter() { - } - - @Test public void testInsertBeforeIndex0() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream("abc")); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(0, "0"); - String result = tokens.getText(); - String expecting = "0abc"; - assertEquals(expecting, result); - } - - @Test public void testInsertAfterLastIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertAfter(2, "x"); - String result = tokens.getText(); - String expecting = "abcx"; - assertEquals(expecting, result); - } - - @Test public void test2InsertBeforeAfterMiddleIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(1, "x"); - tokens.insertAfter(1, "x"); - String result = tokens.getText(); - String expecting = "axbxc"; - assertEquals(expecting, result); - } - - @Test public void testReplaceIndex0() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(0, "x"); - String result = tokens.getText(); - String expecting = "xbc"; - assertEquals(expecting, result); - } - - @Test public void testReplaceLastIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, "x"); - String result = tokens.getText(); - String expecting = "abx"; - assertEquals(expecting, result); - } - - @Test public void testReplaceMiddleIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(1, "x"); - String result = tokens.getText(); - String expecting = "axc"; - assertEquals(expecting, result); - } - - @Test public void testToStringStartStop() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "MUL : '*';\n" + - "ASSIGN : '=';\n" + - "WS : ' '+;\n"); - // Tokens: 0123456789 - // Input: x = 3 * 0; - String input = "x = 3 * 0;"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(4, 8, "0"); - stream.fill(); -// replace 3 * 0 with 0 - - String result = tokens.getTokenStream().getText(); - String expecting = "x = 3 * 0;"; - assertEquals(expecting, result); - - result = tokens.getText(); - expecting = "x = 0;"; - assertEquals(expecting, result); - - result = tokens.getText(Interval.of(0, 9)); - expecting = "x = 0;"; - assertEquals(expecting, result); - - result = tokens.getText(Interval.of(4, 8)); - expecting = "0"; - assertEquals(expecting, result); - } - - @Test public void testToStringStartStop2() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 3 * 0 + 2 * 0; - String input = "x = 3 * 0 + 2 * 0;"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - - String result = tokens.getTokenStream().getText(); - String expecting = "x = 3 * 0 + 2 * 0;"; - assertEquals(expecting, result); - - tokens.replace(4, 8, "0"); - stream.fill(); -// replace 3 * 0 with 0 - result = tokens.getText(); - expecting = "x = 0 + 2 * 0;"; - assertEquals(expecting, result); - - result = tokens.getText(Interval.of(0, 17)); - expecting = "x = 0 + 2 * 0;"; - assertEquals(expecting, result); - - result = tokens.getText(Interval.of(4, 8)); - expecting = "0"; - assertEquals(expecting, result); - - result = tokens.getText(Interval.of(0, 8)); - expecting = "x = 0"; - assertEquals(expecting, result); - - result = tokens.getText(Interval.of(12, 16)); - expecting = "2 * 0"; - assertEquals(expecting, result); - - tokens.insertAfter(17, "// comment"); - result = tokens.getText(Interval.of(12, 18)); - expecting = "2 * 0;// comment"; - assertEquals(expecting, result); - - result = tokens.getText(Interval.of(0, 8)); - stream.fill(); -// try again after insert at end - expecting = "x = 0"; - assertEquals(expecting, result); - } - - - @Test public void test2ReplaceMiddleIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(1, "x"); - tokens.replace(1, "y"); - String result = tokens.getText(); - String expecting = "ayc"; - assertEquals(expecting, result); - } - - @Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(0, "_"); - tokens.replace(1, "x"); - tokens.replace(1, "y"); - String result = tokens.getText(); - String expecting = "_ayc"; - assertEquals(expecting, result); - } - - @Test public void testReplaceThenDeleteMiddleIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(1, "x"); - tokens.delete(1); - String result = tokens.getText(); - String expecting = "ac"; - assertEquals(expecting, result); - } - - @Test public void testInsertInPriorReplace() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(0, 2, "x"); - tokens.insertBefore(1, "0"); - Exception exc = null; - try { - tokens.getText(); - } - catch (IllegalArgumentException iae) { - exc = iae; - } - String expecting = "insert op ,1:1]:\"0\"> within boundaries of previous ,1:0]..[@2,2:2='c',<3>,1:2]:\"x\">"; - assertNotNull(exc); - assertEquals(expecting, exc.getMessage()); - } - - @Test public void testInsertThenReplaceSameIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(0, "0"); - tokens.replace(0, "x"); - stream.fill(); -// supercedes insert at 0 - String result = tokens.getText(); - String expecting = "0xbc"; - assertEquals(expecting, result); - } - - @Test public void test2InsertMiddleIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(1, "x"); - tokens.insertBefore(1, "y"); - String result = tokens.getText(); - String expecting = "ayxbc"; - assertEquals(expecting, result); - } - - @Test public void test2InsertThenReplaceIndex0() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(0, "x"); - tokens.insertBefore(0, "y"); - tokens.replace(0, "z"); - String result = tokens.getText(); - String expecting = "yxzbc"; - assertEquals(expecting, result); - } - - @Test public void testReplaceThenInsertBeforeLastIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, "x"); - tokens.insertBefore(2, "y"); - String result = tokens.getText(); - String expecting = "abyx"; - assertEquals(expecting, result); - } - - @Test public void testInsertThenReplaceLastIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(2, "y"); - tokens.replace(2, "x"); - String result = tokens.getText(); - String expecting = "abyx"; - assertEquals(expecting, result); - } - - @Test public void testReplaceThenInsertAfterLastIndex() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, "x"); - tokens.insertAfter(2, "y"); - String result = tokens.getText(); - String expecting = "abxy"; - assertEquals(expecting, result); - } - - @Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcccba"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, 4, "x"); - tokens.insertBefore(2, "y"); - String result = tokens.getText(); - String expecting = "abyxba"; - assertEquals(expecting, result); - } - - @Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcccba"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, 4, "x"); - tokens.insertBefore(4, "y"); - stream.fill(); // no effect; within range of a replace - Exception exc = null; - try { - tokens.getText(); - } - catch (IllegalArgumentException iae) { - exc = iae; - } - String expecting = "insert op ,1:4]:\"y\"> within boundaries of previous ,1:2]..[@4,4:4='c',<3>,1:4]:\"x\">"; - assertNotNull(exc); - assertEquals(expecting, exc.getMessage()); - } - - @Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcccba"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, 4, "x"); - tokens.insertAfter(4, "y"); - String result = tokens.getText(); - String expecting = "abxyba"; - assertEquals(expecting, result); - } - - @Test public void testReplaceAll() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcccba"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(0, 6, "x"); - String result = tokens.getText(); - String expecting = "x"; - assertEquals(expecting, result); - } - - @Test public void testReplaceSubsetThenFetch() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcccba"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, 4, "xyz"); - String result = tokens.getText(Interval.of(0, 6)); - String expecting = "abxyzba"; - assertEquals(expecting, result); - } - - @Test public void testReplaceThenReplaceSuperset() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcccba"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, 4, "xyz"); - tokens.replace(3, 5, "foo"); - stream.fill(); -// overlaps, error - Exception exc = null; - try { - tokens.getText(); - } - catch (IllegalArgumentException iae) { - exc = iae; - } - String expecting = "replace op boundaries of ,1:3]..[@5,5:5='b',<2>,1:5]:\"foo\"> overlap with previous ,1:2]..[@4,4:4='c',<3>,1:4]:\"xyz\">"; - assertNotNull(exc); - assertEquals(expecting, exc.getMessage()); - } - - @Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcccba"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, 4, "xyz"); - tokens.replace(1, 3, "foo"); - stream.fill(); -// overlap, error - Exception exc = null; - try { - tokens.getText(); - } - catch (IllegalArgumentException iae) { - exc = iae; - } - String expecting = "replace op boundaries of ,1:1]..[@3,3:3='c',<3>,1:3]:\"foo\"> overlap with previous ,1:2]..[@4,4:4='c',<3>,1:4]:\"xyz\">"; - assertNotNull(exc); - assertEquals(expecting, exc.getMessage()); - } - - @Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcba"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, 2, "xyz"); - tokens.replace(0, 3, "foo"); - String result = tokens.getText(); - String expecting = "fooa"; - assertEquals(expecting, result); - } - - @Test public void testCombineInserts() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(0, "x"); - tokens.insertBefore(0, "y"); - String result = tokens.getText(); - String expecting = "yxabc"; - assertEquals(expecting, result); - } - - @Test public void testCombine3Inserts() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(1, "x"); - tokens.insertBefore(0, "y"); - tokens.insertBefore(1, "z"); - String result = tokens.getText(); - String expecting = "yazxbc"; - assertEquals(expecting, result); - } - - @Test public void testCombineInsertOnLeftWithReplace() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(0, 2, "foo"); - tokens.insertBefore(0, "z"); - stream.fill(); -// combine with left edge of rewrite - String result = tokens.getText(); - String expecting = "zfoo"; - assertEquals(expecting, result); - } - - @Test public void testCombineInsertOnLeftWithDelete() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.delete(0, 2); - tokens.insertBefore(0, "z"); - stream.fill(); -// combine with left edge of rewrite - String result = tokens.getText(); - String expecting = "z"; - stream.fill(); -// make sure combo is not znull - assertEquals(expecting, result); - } - - @Test public void testDisjointInserts() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(1, "x"); - tokens.insertBefore(2, "y"); - tokens.insertBefore(0, "z"); - String result = tokens.getText(); - String expecting = "zaxbyc"; - assertEquals(expecting, result); - } - - @Test public void testOverlappingReplace() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(1, 2, "foo"); - tokens.replace(0, 3, "bar"); - stream.fill(); -// wipes prior nested replace - String result = tokens.getText(); - String expecting = "bar"; - assertEquals(expecting, result); - } - - @Test public void testOverlappingReplace2() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(0, 3, "bar"); - tokens.replace(1, 2, "foo"); - stream.fill(); -// cannot split earlier replace - Exception exc = null; - try { - tokens.getText(); - } - catch (IllegalArgumentException iae) { - exc = iae; - } - String expecting = "replace op boundaries of ,1:1]..[@2,2:2='c',<3>,1:2]:\"foo\"> overlap with previous ,1:0]..[@3,3:3='c',<3>,1:3]:\"bar\">"; - assertNotNull(exc); - assertEquals(expecting, exc.getMessage()); - } - - @Test public void testOverlappingReplace3() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(1, 2, "foo"); - tokens.replace(0, 2, "bar"); - stream.fill(); -// wipes prior nested replace - String result = tokens.getText(); - String expecting = "barc"; - assertEquals(expecting, result); - } - - @Test public void testOverlappingReplace4() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(1, 2, "foo"); - tokens.replace(1, 3, "bar"); - stream.fill(); -// wipes prior nested replace - String result = tokens.getText(); - String expecting = "abar"; - assertEquals(expecting, result); - } - - @Test public void testDropIdenticalReplace() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(1, 2, "foo"); - tokens.replace(1, 2, "foo"); - stream.fill(); -// drop previous, identical - String result = tokens.getText(); - String expecting = "afooc"; - assertEquals(expecting, result); - } - - @Test public void testDropPrevCoveredInsert() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(1, "foo"); - tokens.replace(1, 2, "foo"); - stream.fill(); -// kill prev insert - String result = tokens.getText(); - String expecting = "afoofoo"; - assertEquals(expecting, result); - } - - @Test public void testLeaveAloneDisjointInsert() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(1, "x"); - tokens.replace(2, 3, "foo"); - String result = tokens.getText(); - String expecting = "axbfoo"; - assertEquals(expecting, result); - } - - @Test public void testLeaveAloneDisjointInsert2() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abcc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.replace(2, 3, "foo"); - tokens.insertBefore(1, "x"); - String result = tokens.getText(); - String expecting = "axbfoo"; - assertEquals(expecting, result); - } - - @Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar T;\n"+ - "A : 'a';\n" + - "B : 'b';\n" + - "C : 'c';\n"); - String input = "abc"; - LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input)); - CommonTokenStream stream = new CommonTokenStream(lexEngine); - stream.fill(); - TokenStreamRewriter tokens = new TokenStreamRewriter(stream); - tokens.insertBefore(2, "y"); - tokens.delete(2); - String result = tokens.getText(); - String expecting = "aby"; - assertEquals(expecting, result); - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestTokenTypeAssignment.java b/tool/test/org/antlr/v4/xtest/TestTokenTypeAssignment.java deleted file mode 100644 index 503b9cc8e..000000000 --- a/tool/test/org/antlr/v4/xtest/TestTokenTypeAssignment.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.Token; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.Set; -import java.util.StringTokenizer; - -import static org.junit.Assert.*; - -public class TestTokenTypeAssignment extends BaseTest { - - @Test - public void testParserSimpleTokens() throws Exception { - Grammar g = new Grammar( - "parser grammar t;\n"+ - "a : A | B;\n" + - "b : C ;"); - String rules = "a, b"; - String tokenNames = "A, B, C"; - checkSymbols(g, rules, tokenNames); - } - - @Test public void testParserTokensSection() throws Exception { - Grammar g = new Grammar( - "parser grammar t;\n" + - "tokens {\n" + - " C,\n" + - " D" + - "}\n"+ - "a : A | B;\n" + - "b : C ;"); - String rules = "a, b"; - String tokenNames = "A, B, C, D"; - checkSymbols(g, rules, tokenNames); - } - - @Test public void testLexerTokensSection() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n" + - "tokens {\n" + - " C,\n" + - " D" + - "}\n"+ - "A : 'a';\n" + - "C : 'c' ;"); - String rules = "A, C"; - String tokenNames = "A, C, D"; - checkSymbols(g, rules, tokenNames); - } - - @Test public void testCombinedGrammarLiterals() throws Exception { - Grammar g = new Grammar( - "grammar t;\n"+ - "a : 'begin' b 'end';\n" + - "b : C ';' ;\n" + - "ID : 'a' ;\n" + - "FOO : 'foo' ;\n" + // "foo" is not a token name - "C : 'c' ;\n"); // nor is 'c' - String rules = "a, b"; - String tokenNames = "C, FOO, ID, 'begin', 'end', ';'"; - checkSymbols(g, rules, tokenNames); - } - - @Test public void testLiteralInParserAndLexer() throws Exception { - // 'x' is token and char in lexer rule - Grammar g = new Grammar( - "grammar t;\n" + - "a : 'x' E ; \n" + - "E: 'x' '0' ;\n"); - - String literals = "['x']"; - String foundLiterals = g.stringLiteralToTypeMap.keySet().toString(); - assertEquals(literals, foundLiterals); - - foundLiterals = g.implicitLexer.stringLiteralToTypeMap.keySet().toString(); - assertEquals("['x']", foundLiterals); // pushed in lexer from parser - - String[] typeToTokenName = g.getTokenDisplayNames(); - Set tokens = new LinkedHashSet(); - for (String t : typeToTokenName) if ( t!=null ) tokens.add(t); - assertEquals("[, 'x', E]", tokens.toString()); - } - - @Test public void testPredDoesNotHideNameToLiteralMapInLexer() throws Exception { - // 'x' is token and char in lexer rule - Grammar g = new Grammar( - "grammar t;\n" + - "a : 'x' X ; \n" + - "X: 'x' {true}?;\n"); // must match as alias even with pred - - assertEquals("{'x'=1}", g.stringLiteralToTypeMap.toString()); - assertEquals("{EOF=-1, X=1}", g.tokenNameToTypeMap.toString()); - - // pushed in lexer from parser - assertEquals("{'x'=1}", g.implicitLexer.stringLiteralToTypeMap.toString()); - assertEquals("{EOF=-1, X=1}", g.implicitLexer.tokenNameToTypeMap.toString()); - } - - @Test public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception { - Grammar g = new Grammar( - "grammar t;\n"+ - "a : 'a' ;\n" + - "A : 'a' ;\n"); - String rules = "a"; - String tokenNames = "A, 'a'"; - checkSymbols(g, rules, tokenNames); - } - - @Test public void testSetDoesNotMissTokenAliases() throws Exception { - Grammar g = new Grammar( - "grammar t;\n"+ - "a : 'a'|'b' ;\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n"); - String rules = "a"; - String tokenNames = "A, 'a', B, 'b'"; - checkSymbols(g, rules, tokenNames); - } - - // T E S T L I T E R A L E S C A P E S - - @Test public void testParserCharLiteralWithEscape() throws Exception { - Grammar g = new Grammar( - "grammar t;\n"+ - "a : '\\n';\n"); - Set literals = g.stringLiteralToTypeMap.keySet(); - // must store literals how they appear in the antlr grammar - assertEquals("'\\n'", literals.toArray()[0]); - } - - protected void checkSymbols(Grammar g, - String rulesStr, - String allValidTokensStr) - throws Exception - { - String[] typeToTokenName = g.getTokenNames(); - Set tokens = new HashSet(); - for (int i = 0; i < typeToTokenName.length; i++) { - String t = typeToTokenName[i]; - if ( t!=null ) { - if (t.startsWith(Grammar.AUTO_GENERATED_TOKEN_NAME_PREFIX)) { - tokens.add(g.getTokenDisplayName(i)); - } - else { - tokens.add(t); - } - } - } - - // make sure expected tokens are there - StringTokenizer st = new StringTokenizer(allValidTokensStr, ", "); - while ( st.hasMoreTokens() ) { - String tokenName = st.nextToken(); - assertTrue("token "+tokenName+" expected, but was undefined", - g.getTokenType(tokenName) != Token.INVALID_TYPE); - tokens.remove(tokenName); - } - // make sure there are not any others (other than etc...) - for (String tokenName : tokens) { - assertTrue("unexpected token name "+tokenName, - g.getTokenType(tokenName) < Token.MIN_USER_TOKEN_TYPE); - } - - // make sure all expected rules are there - st = new StringTokenizer(rulesStr, ", "); - int n = 0; - while ( st.hasMoreTokens() ) { - String ruleName = st.nextToken(); - assertNotNull("rule "+ruleName+" expected", g.getRule(ruleName)); - n++; - } - //System.out.println("rules="+rules); - // make sure there are no extra rules - assertEquals("number of rules mismatch; expecting "+n+"; found "+g.rules.size(), - n, g.rules.size()); - - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestToolSyntaxErrors.java b/tool/test/org/antlr/v4/xtest/TestToolSyntaxErrors.java deleted file mode 100644 index 231f3961e..000000000 --- a/tool/test/org/antlr/v4/xtest/TestToolSyntaxErrors.java +++ /dev/null @@ -1,656 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.Tool; -import org.antlr.v4.tool.ErrorType; -import org.junit.Test; - -public class TestToolSyntaxErrors extends BaseTest { - static String[] A = { - // INPUT - "grammar A;\n" + - "", - // YIELDS - "error(" + ErrorType.NO_RULES.code + "): A.g4::: grammar A has no rules\n", - - "A;", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:1:0: syntax error: 'A' came as a complete surprise to me\n", - - "grammar ;", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:1:8: syntax error: ';' came as a complete surprise to me while looking for an identifier\n", - - "grammar A\n" + - "a : ID ;\n", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:0: syntax error: missing SEMI at 'a'\n", - - "grammar A;\n" + - "a : ID ;;\n"+ - "b : B ;", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:8: syntax error: ';' came as a complete surprise to me\n", - - "grammar A;;\n" + - "a : ID ;\n", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A;.g4:1:10: syntax error: ';' came as a complete surprise to me\n", - - "grammar A;\n" + - "a @init : ID ;\n", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:8: syntax error: mismatched input ':' expecting ACTION while matching rule preamble\n", - - "grammar A;\n" + - "a ( A | B ) D ;\n" + - "b : B ;", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:3: syntax error: '(' came as a complete surprise to me while matching rule preamble\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:11: syntax error: mismatched input ')' expecting SEMI while matching a rule\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:15: syntax error: mismatched input ';' expecting COLON while matching a lexer rule\n", - }; - - @Test public void testA() { super.testErrors(A, true); } - - @Test public void testExtraColon() { - String[] pair = new String[] { - "grammar A;\n" + - "a : : A ;\n" + - "b : B ;", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: ':' came as a complete surprise to me while matching alternative\n", - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi() { - String[] pair = new String[] { - "grammar A;\n" + - "a : A \n" + - "b : B ;", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:0: syntax error: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n", - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi2() { - String[] pair = new String[] { - "lexer grammar A;\n" + - "A : 'a' \n" + - "B : 'b' ;", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:0: syntax error: unterminated rule (missing ';') detected at 'B :' while looking for lexer rule element\n", - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi3() { - String[] pair = new String[] { - "grammar A;\n" + - "a : A \n" + - "b[int i] returns [int y] : B ;", - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:9: syntax error: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n" - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi4() { - String[] pair = new String[] { - "grammar A;\n" + - "a : b \n" + - " catch [Exception e] {...}\n" + - "b : B ;\n", - - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n" - }; - super.testErrors(pair, true); - } - - @Test public void testMissingRuleSemi5() { - String[] pair = new String[] { - "grammar A;\n" + - "a : A \n" + - " catch [Exception e] {...}\n", - - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n" - }; - super.testErrors(pair, true); - } - - @Test public void testBadRulePrequelStart() { - String[] pair = new String[] { - "grammar A;\n" + - "a @ options {k=1;} : A ;\n" + - "b : B ;", - - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: 'options {' came as a complete surprise to me while looking for an identifier\n" - }; - super.testErrors(pair, true); - } - - @Test public void testBadRulePrequelStart2() { - String[] pair = new String[] { - "grammar A;\n" + - "a } : A ;\n" + - "b : B ;", - - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:2: syntax error: '}' came as a complete surprise to me while matching rule preamble\n" - }; - super.testErrors(pair, true); - } - - @Test public void testModeInParser() { - String[] pair = new String[] { - "grammar A;\n" + - "a : A ;\n" + - "mode foo;\n" + - "b : B ;", - - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:4:0: syntax error: 'b' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:4:6: syntax error: mismatched input ';' expecting COLON while matching a lexer rule\n" - }; - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#243 - * "Generate a good message for unterminated strings" - * https://github.com/antlr/antlr4/issues/243 - */ - @Test public void testUnterminatedStringLiteral() { - String[] pair = new String[] { - "grammar A;\n" + - "a : 'x\n" + - " ;\n", - - "error(" + ErrorType.UNTERMINATED_STRING_LITERAL.code + "): A.g4:2:4: unterminated string literal\n" - }; - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#262 - * "Parser Rule Name Starting With an Underscore" - * https://github.com/antlr/antlr4/issues/262 - */ - @Test public void testParserRuleNameStartingWithUnderscore() { - String[] pair = new String[] { - "grammar A;\n" + - "_a : 'x' ;\n", - - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:0: syntax error: '_' came as a complete surprise to me\n" - }; - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#194 - * "NullPointerException on 'options{}' in grammar file" - * https://github.com/antlr/antlr4/issues/194 - */ - @Test public void testEmptyGrammarOptions() { - String[] pair = new String[] { - "grammar A;\n" + - "options {}\n" + - "a : 'x' ;\n", - - "" - }; - super.testErrors(pair, true); - } - - /** - * This is a "related" regression test for antlr/antlr4#194 - * "NullPointerException on 'options{}' in grammar file" - * https://github.com/antlr/antlr4/issues/194 - */ - @Test public void testEmptyRuleOptions() { - String[] pair = new String[] { - "grammar A;\n" + - "a options{} : 'x' ;\n", - - "" - }; - super.testErrors(pair, true); - } - - /** - * This is a "related" regression test for antlr/antlr4#194 - * "NullPointerException on 'options{}' in grammar file" - * https://github.com/antlr/antlr4/issues/194 - */ - @Test public void testEmptyBlockOptions() { - String[] pair = new String[] { - "grammar A;\n" + - "a : (options{} : 'x') ;\n", - - "" - }; - super.testErrors(pair, true); - } - - @Test public void testEmptyTokensBlock() { - String[] pair = new String[] { - "grammar A;\n" + - "tokens {}\n" + - "a : 'x' ;\n", - - "" - }; - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#190 - * "NullPointerException building lexer grammar using bogus 'token' action" - * https://github.com/antlr/antlr4/issues/190 - */ - @Test public void testInvalidLexerCommand() { - String[] pair = new String[] { - "grammar A;\n" + - "tokens{Foo}\n" + - "b : Foo ;\n" + - "X : 'foo' -> popmode;\n" + // "meant" to use -> popMode - "Y : 'foo' -> token(Foo);", // "meant" to use -> type(Foo) - - "error(" + ErrorType.INVALID_LEXER_COMMAND.code + "): A.g4:4:13: lexer command popmode does not exist or is not supported by the current target\n" + - "error(" + ErrorType.INVALID_LEXER_COMMAND.code + "): A.g4:5:13: lexer command token does not exist or is not supported by the current target\n" - }; - super.testErrors(pair, true); - } - - @Test public void testLexerCommandArgumentValidation() { - String[] pair = new String[] { - "grammar A;\n" + - "tokens{Foo}\n" + - "b : Foo ;\n" + - "X : 'foo' -> popMode(Foo);\n" + // "meant" to use -> popMode - "Y : 'foo' -> type;", // "meant" to use -> type(Foo) - - "error(" + ErrorType.UNWANTED_LEXER_COMMAND_ARGUMENT.code + "): A.g4:4:13: lexer command popMode does not take any arguments\n" + - "error(" + ErrorType.MISSING_LEXER_COMMAND_ARGUMENT.code + "): A.g4:5:13: missing argument for lexer command type\n" - }; - super.testErrors(pair, true); - } - - @Test public void testRuleRedefinition() { - String[] pair = new String[] { - "grammar Oops;\n" + - "\n" + - "ret_ty : A ;\n" + - "ret_ty : B ;\n" + - "\n" + - "A : 'a' ;\n" + - "B : 'b' ;\n", - - "error(" + ErrorType.RULE_REDEFINITION.code + "): Oops.g4:4:0: rule ret_ty redefinition; previous at line 3\n" - }; - super.testErrors(pair, true); - } - - @Test public void testEpsilonClosureAnalysis() { - String grammar = - "grammar A;\n" - + "x : ;\n" - + "y1 : x+;\n" - + "y2 : x*;\n" - + "z1 : ('foo' | 'bar'? 'bar2'?)*;\n" - + "z2 : ('foo' | 'bar' 'bar2'? | 'bar2')*;\n"; - String expected = - "error(" + ErrorType.EPSILON_CLOSURE.code + "): A.g4:3:0: rule y1 contains a closure with at least one alternative that can match an empty string\n" + - "error(" + ErrorType.EPSILON_CLOSURE.code + "): A.g4:4:0: rule y2 contains a closure with at least one alternative that can match an empty string\n" + - "error(" + ErrorType.EPSILON_CLOSURE.code + "): A.g4:5:0: rule z1 contains a closure with at least one alternative that can match an empty string\n"; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - @Test public void testEpsilonOptionalAnalysis() { - String grammar = - "grammar A;\n" - + "x : ;\n" - + "y : x?;\n" - + "z1 : ('foo' | 'bar'? 'bar2'?)?;\n" - + "z2 : ('foo' | 'bar' 'bar2'? | 'bar2')?;\n"; - String expected = - "warning(" + ErrorType.EPSILON_OPTIONAL.code + "): A.g4:3:0: rule y contains an optional block with at least one alternative that can match an empty string\n" + - "warning(" + ErrorType.EPSILON_OPTIONAL.code + "): A.g4:4:0: rule z1 contains an optional block with at least one alternative that can match an empty string\n"; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#315 - * "Inconsistent lexer error msg for actions" - * https://github.com/antlr/antlr4/issues/315 - */ - @Test public void testActionAtEndOfOneLexerAlternative() { - String grammar = - "grammar A;\n" + - "stat : 'start' CharacterLiteral 'end' EOF;\n" + - "\n" + - "// Lexer\n" + - "\n" + - "CharacterLiteral\n" + - " : '\\'' SingleCharacter '\\''\n" + - " | '\\'' ~[\\r\\n] {notifyErrorListeners(\"unclosed character literal\");}\n" + - " ;\n" + - "\n" + - "fragment\n" + - "SingleCharacter\n" + - " : ~['\\\\\\r\\n]\n" + - " ;\n" + - "\n" + - "WS : [ \\r\\t\\n]+ -> skip ;\n"; - String expected = - ""; - - String[] pair = new String[] { grammar, expected }; - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#308 "NullPointer exception" - * https://github.com/antlr/antlr4/issues/308 - */ - @Test public void testDoubleQuotedStringLiteral() { - String grammar = - "lexer grammar A;\n" - + "WHITESPACE : (\" \" | \"\\t\" | \"\\n\" | \"\\r\" | \"\\f\");\n"; - String expected = - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:14: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:16: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:20: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:21: syntax error: '\\' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:23: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:27: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:28: syntax error: '\\' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:30: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:34: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:35: syntax error: '\\' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:37: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:41: syntax error: '\"' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:42: syntax error: '\\' came as a complete surprise to me\n" + - "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:44: syntax error: '\"' came as a complete surprise to me\n"; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - /** - * This test ensures that the {@link ErrorType#INVALID_ESCAPE_SEQUENCE} - * error is not reported for escape sequences that are known to be valid. - */ - @Test public void testValidEscapeSequences() { - String grammar = - "lexer grammar A;\n" + - "NORMAL_ESCAPE : '\\b \\t \\n \\f \\r \\\" \\' \\\\';\n" + - "UNICODE_ESCAPE : '\\u0001 \\u00A1 \\u00a1 \\uaaaa \\uAAAA';\n"; - String expected = - ""; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#507 "NullPointerException When - * Generating Code from Grammar". - * https://github.com/antlr/antlr4/issues/507 - */ - @Test public void testInvalidEscapeSequences() { - String grammar = - "lexer grammar A;\n" + - "RULE : 'Foo \\uAABG \\x \\u';\n"; - String expected = - "error(" + ErrorType.INVALID_ESCAPE_SEQUENCE.code + "): A.g4:2:12: invalid escape sequence\n" + - "error(" + ErrorType.INVALID_ESCAPE_SEQUENCE.code + "): A.g4:2:19: invalid escape sequence\n" + - "error(" + ErrorType.INVALID_ESCAPE_SEQUENCE.code + "): A.g4:2:22: invalid escape sequence\n"; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - /** - * This test ensures the {@link ErrorType#UNRECOGNIZED_ASSOC_OPTION} warning - * is produced as described in the documentation. - */ - @Test public void testUnrecognizedAssocOption() { - String grammar = - "grammar A;\n" + - "x : 'x'\n" + - " | x '+' x // warning 157\n" + - " | x '*' x // ok\n" + - " ;\n"; - String expected = - "warning(" + ErrorType.UNRECOGNIZED_ASSOC_OPTION.code + "): A.g4:3:10: rule x contains an assoc terminal option in an unrecognized location\n"; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - /** - * This test ensures the {@link ErrorType#FRAGMENT_ACTION_IGNORED} warning - * is produced as described in the documentation. - */ - @Test public void testFragmentActionIgnored() { - String grammar = - "lexer grammar A;\n" + - "X1 : 'x' -> more // ok\n" + - " ;\n" + - "Y1 : 'x' {more();} // ok\n" + - " ;\n" + - "fragment\n" + - "X2 : 'x' -> more // warning 158\n" + - " ;\n" + - "fragment\n" + - "Y2 : 'x' {more();} // warning 158\n" + - " ;\n"; - String expected = - "warning(" + ErrorType.FRAGMENT_ACTION_IGNORED.code + "): A.g4:7:12: fragment rule X2 contains an action or command which can never be executed\n" + - "warning(" + ErrorType.FRAGMENT_ACTION_IGNORED.code + "): A.g4:10:9: fragment rule Y2 contains an action or command which can never be executed\n"; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#500 "Array Index Out Of - * Bounds". - * https://github.com/antlr/antlr4/issues/500 - */ - @Test public void testTokenNamedEOF() { - String grammar = - "lexer grammar A;\n" + - "WS : ' ';\n" + - " EOF : 'a';\n"; - String expected = - "error(" + ErrorType.RESERVED_RULE_NAME.code + "): A.g4:3:1: cannot declare a rule with reserved name EOF\n"; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#649 "unknown target causes - * null ptr exception.". - * https://github.com/antlr/antlr4/issues/649 - * Stops before processing the lexer - */ - @Test public void testInvalidLanguageInGrammarWithLexerCommand() throws Exception { - String grammar = - "grammar T;\n" + - "options { language=Foo; }\n" + - "start : 'T' EOF;\n" + - "Something : 'something' -> channel(CUSTOM);"; - String expected = - "error(" + ErrorType.CANNOT_CREATE_TARGET_GENERATOR.code + "): ANTLR cannot generate Foo code as of version " + Tool.VERSION + "\n"; - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#649 "unknown target causes - * null ptr exception.". - * https://github.com/antlr/antlr4/issues/649 - */ - @Test public void testInvalidLanguageInGrammar() throws Exception { - String grammar = - "grammar T;\n" + - "options { language=Foo; }\n" + - "start : 'T' EOF;\n"; - String expected = - "error(" + ErrorType.CANNOT_CREATE_TARGET_GENERATOR.code + "): ANTLR cannot generate Foo code as of version " + Tool.VERSION + "\n"; - - String[] pair = new String[] { - grammar, - expected - }; - - super.testErrors(pair, true); - } - - @Test public void testChannelDefinitionInLexer() throws Exception { - String grammar = - "lexer grammar T;\n" + - "\n" + - "channels {\n" + - " WHITESPACE_CHANNEL,\n" + - " COMMENT_CHANNEL\n" + - "}\n" + - "\n" + - "COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" + - "WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n"; - - String expected = ""; - - String[] pair = { grammar, expected }; - super.testErrors(pair, true); - } - - @Test public void testChannelDefinitionInParser() throws Exception { - String grammar = - "parser grammar T;\n" + - "\n" + - "channels {\n" + - " WHITESPACE_CHANNEL,\n" + - " COMMENT_CHANNEL\n" + - "}\n" + - "\n" + - "start : EOF;\n"; - - String expected = - "error(" + ErrorType.CHANNELS_BLOCK_IN_PARSER_GRAMMAR.code + "): T.g4:3:0: custom channels are not supported in parser grammars\n"; - - String[] pair = { grammar, expected }; - super.testErrors(pair, true); - } - - @Test public void testChannelDefinitionInCombined() throws Exception { - String grammar = - "grammar T;\n" + - "\n" + - "channels {\n" + - " WHITESPACE_CHANNEL,\n" + - " COMMENT_CHANNEL\n" + - "}\n" + - "\n" + - "start : EOF;\n" + - "\n" + - "COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" + - "WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n"; - - String expected = - "warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:10:35: rule COMMENT contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n" + - "warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:11:35: rule WHITESPACE contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n" + - "error(" + ErrorType.CHANNELS_BLOCK_IN_COMBINED_GRAMMAR.code + "): T.g4:3:0: custom channels are not supported in combined grammars\n"; - - String[] pair = { grammar, expected }; - super.testErrors(pair, true); - } - - /** - * This is a regression test for antlr/antlr4#497 now that antlr/antlr4#309 - * is resolved. - * https://github.com/antlr/antlr4/issues/497 - * https://github.com/antlr/antlr4/issues/309 - */ - @Test public void testChannelDefinitions() throws Exception { - String grammar = - "lexer grammar T;\n" + - "\n" + - "channels {\n" + - " WHITESPACE_CHANNEL,\n" + - " COMMENT_CHANNEL\n" + - "}\n" + - "\n" + - "COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" + - "WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n" + - "NEWLINE: '\\r'? '\\n' -> channel(NEWLINE_CHANNEL);"; - - // WHITESPACE_CHANNEL and COMMENT_CHANNEL are defined, but NEWLINE_CHANNEL is not - String expected = - "warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:10:34: rule NEWLINE contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n"; - - String[] pair = { grammar, expected }; - super.testErrors(pair, true); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestTopologicalSort.java b/tool/test/org/antlr/v4/xtest/TestTopologicalSort.java deleted file mode 100644 index 65df21760..000000000 --- a/tool/test/org/antlr/v4/xtest/TestTopologicalSort.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.v4.misc.Graph; -import org.junit.Test; - -import java.util.List; - -import static org.junit.Assert.*; - -/** Test topo sort in GraphNode. */ -public class TestTopologicalSort extends BaseTest { - @Test - public void testFairlyLargeGraph() throws Exception { - Graph g = new Graph(); - g.addEdge("C", "F"); - g.addEdge("C", "G"); - g.addEdge("C", "A"); - g.addEdge("C", "B"); - g.addEdge("A", "D"); - g.addEdge("A", "E"); - g.addEdge("B", "E"); - g.addEdge("D", "E"); - g.addEdge("D", "F"); - g.addEdge("F", "H"); - g.addEdge("E", "F"); - - String expecting = "[H, F, G, E, D, A, B, C]"; - List nodes = g.sort(); - String result = nodes.toString(); - assertEquals(expecting, result); - } - - @Test - public void testCyclicGraph() throws Exception { - Graph g = new Graph(); - g.addEdge("A", "B"); - g.addEdge("B", "C"); - g.addEdge("C", "A"); - g.addEdge("C", "D"); - - String expecting = "[D, C, B, A]"; - List nodes = g.sort(); - String result = nodes.toString(); - assertEquals(expecting, result); - } - - @Test - public void testRepeatedEdges() throws Exception { - Graph g = new Graph(); - g.addEdge("A", "B"); - g.addEdge("B", "C"); - g.addEdge("A", "B"); // dup - g.addEdge("C", "D"); - - String expecting = "[D, C, B, A]"; - List nodes = g.sort(); - String result = nodes.toString(); - assertEquals(expecting, result); - } - - @Test - public void testSimpleTokenDependence() throws Exception { - Graph g = new Graph(); - g.addEdge("Java.g4", "MyJava.tokens"); // Java feeds off manual token file - g.addEdge("Java.tokens", "Java.g4"); - g.addEdge("Def.g4", "Java.tokens"); // walkers feed off generated tokens - g.addEdge("Ref.g4", "Java.tokens"); - - String expecting = "[MyJava.tokens, Java.g4, Java.tokens, Def.g4, Ref.g4]"; - List nodes = g.sort(); - String result = nodes.toString(); - assertEquals(expecting, result); - } - - @Test - public void testParserLexerCombo() throws Exception { - Graph g = new Graph(); - g.addEdge("JavaLexer.tokens", "JavaLexer.g4"); - g.addEdge("JavaParser.g4", "JavaLexer.tokens"); - g.addEdge("Def.g4", "JavaLexer.tokens"); - g.addEdge("Ref.g4", "JavaLexer.tokens"); - - String expecting = "[JavaLexer.g4, JavaLexer.tokens, JavaParser.g4, Def.g4, Ref.g4]"; - List nodes = g.sort(); - String result = nodes.toString(); - assertEquals(expecting, result); - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestUnbufferedCharStream.java b/tool/test/org/antlr/v4/xtest/TestUnbufferedCharStream.java deleted file mode 100644 index a38ca66be..000000000 --- a/tool/test/org/antlr/v4/xtest/TestUnbufferedCharStream.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonTokenFactory; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.LexerInterpreter; -import org.antlr.v4.runtime.UnbufferedCharStream; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import java.io.Reader; -import java.io.StringReader; - -import static org.junit.Assert.assertEquals; - -public class TestUnbufferedCharStream extends BaseTest { - @Test public void testNoChar() throws Exception { - CharStream input = createStream(""); - assertEquals(IntStream.EOF, input.LA(1)); - assertEquals(IntStream.EOF, input.LA(2)); - } - - /** - * The {@link IntStream} interface does not specify the behavior when the - * EOF symbol is consumed, but {@link UnbufferedCharStream} handles this - * particular case by throwing an {@link IllegalStateException}. - */ - @Test(expected = IllegalStateException.class) - public void testConsumeEOF() throws Exception { - CharStream input = createStream(""); - assertEquals(IntStream.EOF, input.LA(1)); - input.consume(); - input.consume(); - } - - @Test(expected = IllegalArgumentException.class) - public void testNegativeSeek() { - CharStream input = createStream(""); - input.seek(-1); - } - - @Test - public void testSeekPastEOF() { - CharStream input = createStream(""); - assertEquals(0, input.index()); - input.seek(1); - assertEquals(0, input.index()); - } - - /** - * The {@link IntStream} interface does not specify the behavior when marks - * are not released in the reversed order they were created, but - * {@link UnbufferedCharStream} handles this case by throwing an - * {@link IllegalStateException}. - */ - @Test(expected = IllegalStateException.class) - public void testMarkReleaseOutOfOrder() { - CharStream input = createStream(""); - int m1 = input.mark(); - int m2 = input.mark(); - input.release(m1); - } - - /** - * The {@link IntStream} interface does not specify the behavior when a mark - * is released twice, but {@link UnbufferedCharStream} handles this case by - * throwing an {@link IllegalStateException}. - */ - @Test(expected = IllegalStateException.class) - public void testMarkReleasedTwice() { - CharStream input = createStream(""); - int m1 = input.mark(); - input.release(m1); - input.release(m1); - } - - /** - * The {@link IntStream} interface does not specify the behavior when a mark - * is released twice, but {@link UnbufferedCharStream} handles this case by - * throwing an {@link IllegalStateException}. - */ - @Test(expected = IllegalStateException.class) - public void testNestedMarkReleasedTwice() { - CharStream input = createStream(""); - int m1 = input.mark(); - int m2 = input.mark(); - input.release(m2); - input.release(m2); - } - - /** - * It is not valid to pass a mark to {@link IntStream#seek}, but - * {@link UnbufferedCharStream} creates marks in such a way that this - * invalid usage results in an {@link IllegalArgumentException}. - */ - @Test(expected = IllegalArgumentException.class) - public void testMarkPassedToSeek() { - CharStream input = createStream(""); - int m1 = input.mark(); - input.seek(m1); - } - - @Test(expected = IllegalArgumentException.class) - public void testSeekBeforeBufferStart() { - CharStream input = createStream("xyz"); - input.consume(); - int m1 = input.mark(); - assertEquals(1, input.index()); - input.consume(); - input.seek(0); - } - - @Test(expected = UnsupportedOperationException.class) - public void testGetTextBeforeBufferStart() { - CharStream input = createStream("xyz"); - input.consume(); - int m1 = input.mark(); - assertEquals(1, input.index()); - input.getText(new Interval(0, 1)); - } - - @Test - public void testGetTextInMarkedRange() { - CharStream input = createStream("xyz"); - input.consume(); - int m1 = input.mark(); - assertEquals(1, input.index()); - input.consume(); - input.consume(); - assertEquals("yz", input.getText(new Interval(1, 2))); - } - - @Test - public void testLastChar() { - CharStream input = createStream("abcdef"); - - input.consume(); - assertEquals('a', input.LA(-1)); - - int m1 = input.mark(); - input.consume(); - input.consume(); - input.consume(); - assertEquals('d', input.LA(-1)); - - input.seek(2); - assertEquals('b', input.LA(-1)); - - input.release(m1); - input.seek(3); - assertEquals('c', input.LA(-1)); - // this special case is not required by the IntStream interface, but - // UnbufferedCharStream allows it so we have to make sure the resulting - // state is consistent - input.seek(2); - assertEquals('b', input.LA(-1)); - } - - @Test public void test1Char() throws Exception { - TestingUnbufferedCharStream input = createStream("x"); - assertEquals('x', input.LA(1)); - input.consume(); - assertEquals(IntStream.EOF, input.LA(1)); - String r = input.getRemainingBuffer(); - assertEquals("\uFFFF", r); // shouldn't include x - assertEquals("\uFFFF", input.getBuffer()); // whole buffer - } - - @Test public void test2Char() throws Exception { - TestingUnbufferedCharStream input = createStream("xy"); - assertEquals('x', input.LA(1)); - input.consume(); - assertEquals('y', input.LA(1)); - assertEquals("y", input.getRemainingBuffer()); // shouldn't include x - assertEquals("y", input.getBuffer()); - input.consume(); - assertEquals(IntStream.EOF, input.LA(1)); - assertEquals("\uFFFF", input.getBuffer()); - } - - @Test public void test2CharAhead() throws Exception { - CharStream input = createStream("xy"); - assertEquals('x', input.LA(1)); - assertEquals('y', input.LA(2)); - assertEquals(IntStream.EOF, input.LA(3)); - } - - @Test public void testBufferExpand() throws Exception { - TestingUnbufferedCharStream input = createStream("01234", 2); - assertEquals('0', input.LA(1)); - assertEquals('1', input.LA(2)); - assertEquals('2', input.LA(3)); - assertEquals('3', input.LA(4)); - assertEquals('4', input.LA(5)); - assertEquals("01234", input.getBuffer()); - assertEquals(IntStream.EOF, input.LA(6)); - } - - @Test public void testBufferWrapSize1() throws Exception { - CharStream input = createStream("01234", 1); - assertEquals('0', input.LA(1)); - input.consume(); - assertEquals('1', input.LA(1)); - input.consume(); - assertEquals('2', input.LA(1)); - input.consume(); - assertEquals('3', input.LA(1)); - input.consume(); - assertEquals('4', input.LA(1)); - input.consume(); - assertEquals(IntStream.EOF, input.LA(1)); - } - - @Test public void testBufferWrapSize2() throws Exception { - CharStream input = createStream("01234", 2); - assertEquals('0', input.LA(1)); - input.consume(); - assertEquals('1', input.LA(1)); - input.consume(); - assertEquals('2', input.LA(1)); - input.consume(); - assertEquals('3', input.LA(1)); - input.consume(); - assertEquals('4', input.LA(1)); - input.consume(); - assertEquals(IntStream.EOF, input.LA(1)); - } - - @Test public void test1Mark() throws Exception { - TestingUnbufferedCharStream input = createStream("xyz"); - int m = input.mark(); - assertEquals('x', input.LA(1)); - assertEquals('y', input.LA(2)); - assertEquals('z', input.LA(3)); - input.release(m); - assertEquals(IntStream.EOF, input.LA(4)); - assertEquals("xyz\uFFFF", input.getBuffer()); - } - - @Test public void test1MarkWithConsumesInSequence() throws Exception { - TestingUnbufferedCharStream input = createStream("xyz"); - int m = input.mark(); - input.consume(); // x, moves to y - input.consume(); // y - input.consume(); // z, moves to EOF - assertEquals(IntStream.EOF, input.LA(1)); - assertEquals("xyz\uFFFF", input.getBuffer()); - input.release(m); // wipes buffer - assertEquals("\uFFFF", input.getBuffer()); - } - - @Test public void test2Mark() throws Exception { - TestingUnbufferedCharStream input = createStream("xyz", 100); - assertEquals('x', input.LA(1)); - input.consume(); // reset buffer index (p) to 0 - int m1 = input.mark(); - assertEquals('y', input.LA(1)); - input.consume(); - int m2 = input.mark(); - assertEquals("yz", input.getBuffer()); - input.release(m2); // drop to 1 marker - input.consume(); - input.release(m1); // shifts remaining char to beginning - assertEquals(IntStream.EOF, input.LA(1)); - assertEquals("\uFFFF", input.getBuffer()); - } - - @Test public void testAFewTokens() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 3 * 0 + 2 * 0; - TestingUnbufferedCharStream input = createStream("x = 302 * 91 + 20234234 * 0;"); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - // copy text into tokens from char stream - lexEngine.setTokenFactory(new CommonTokenFactory(true)); - CommonTokenStream tokens = new CommonTokenStream(lexEngine); - String result = tokens.LT(1).getText(); - String expecting = "x"; - assertEquals(expecting, result); - tokens.fill(); - expecting = - "[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1], [@2,2:2='=',<4>,1:2]," + - " [@3,3:3=' ',<7>,1:3], [@4,4:6='302',<2>,1:4], [@5,7:7=' ',<7>,1:7]," + - " [@6,8:8='*',<6>,1:8], [@7,9:9=' ',<7>,1:9], [@8,10:11='91',<2>,1:10]," + - " [@9,12:12=' ',<7>,1:12], [@10,13:13='+',<5>,1:13], [@11,14:14=' ',<7>,1:14]," + - " [@12,15:22='20234234',<2>,1:15], [@13,23:23=' ',<7>,1:23]," + - " [@14,24:24='*',<6>,1:24], [@15,25:25=' ',<7>,1:25], [@16,26:26='0',<2>,1:26]," + - " [@17,27:27=';',<3>,1:27], [@18,28:27='',<-1>,1:28]]"; - assertEquals(expecting, tokens.getTokens().toString()); - } - - protected static TestingUnbufferedCharStream createStream(String text) { - return new TestingUnbufferedCharStream(new StringReader(text)); - } - - protected static TestingUnbufferedCharStream createStream(String text, int bufferSize) { - return new TestingUnbufferedCharStream(new StringReader(text), bufferSize); - } - - protected static class TestingUnbufferedCharStream extends UnbufferedCharStream { - - public TestingUnbufferedCharStream(Reader input) { - super(input); - } - - public TestingUnbufferedCharStream(Reader input, int bufferSize) { - super(input, bufferSize); - } - - /** For testing. What's in moving window into data stream from - * current index, LA(1) or data[p], to end of buffer? - */ - public String getRemainingBuffer() { - if ( n==0 ) return ""; - return new String(data,p,n-p); - } - - /** For testing. What's in moving window buffer into data stream. - * From 0..p-1 have been consume. - */ - public String getBuffer() { - if ( n==0 ) return ""; - return new String(data,0,n); - } - - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestUnbufferedTokenStream.java b/tool/test/org/antlr/v4/xtest/TestUnbufferedTokenStream.java deleted file mode 100644 index fda765562..000000000 --- a/tool/test/org/antlr/v4/xtest/TestUnbufferedTokenStream.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.LexerInterpreter; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.UnbufferedTokenStream; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; - -import java.io.StringReader; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertEquals; - -public class TestUnbufferedTokenStream extends BaseTest { - @Test public void testLookahead() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 302; - CharStream input = new ANTLRInputStream( - new StringReader("x = 302;") - ); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TokenStream tokens = new UnbufferedTokenStream(lexEngine); - - assertEquals("x", tokens.LT(1).getText()); - assertEquals(" ", tokens.LT(2).getText()); - assertEquals("=", tokens.LT(3).getText()); - assertEquals(" ", tokens.LT(4).getText()); - assertEquals("302", tokens.LT(5).getText()); - assertEquals(";", tokens.LT(6).getText()); - } - - @Test public void testNoBuffering() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 302; - CharStream input = new ANTLRInputStream( - new StringReader("x = 302;") - ); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TestingUnbufferedTokenStream tokens = new TestingUnbufferedTokenStream(lexEngine); - - assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString()); - assertEquals("x", tokens.LT(1).getText()); - tokens.consume(); // move to WS - assertEquals(" ", tokens.LT(1).getText()); - assertEquals("[[@1,1:1=' ',<7>,1:1]]", tokens.getRemainingBuffer().toString()); - tokens.consume(); - assertEquals("=", tokens.LT(1).getText()); - assertEquals("[[@2,2:2='=',<4>,1:2]]", tokens.getRemainingBuffer().toString()); - tokens.consume(); - assertEquals(" ", tokens.LT(1).getText()); - assertEquals("[[@3,3:3=' ',<7>,1:3]]", tokens.getRemainingBuffer().toString()); - tokens.consume(); - assertEquals("302", tokens.LT(1).getText()); - assertEquals("[[@4,4:6='302',<2>,1:4]]", tokens.getRemainingBuffer().toString()); - tokens.consume(); - assertEquals(";", tokens.LT(1).getText()); - assertEquals("[[@5,7:7=';',<3>,1:7]]", tokens.getRemainingBuffer().toString()); - } - - @Test public void testMarkStart() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 302; - CharStream input = new ANTLRInputStream( - new StringReader("x = 302;") - ); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TestingUnbufferedTokenStream tokens = new TestingUnbufferedTokenStream(lexEngine); - - int m = tokens.mark(); - assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString()); - assertEquals("x", tokens.LT(1).getText()); - tokens.consume(); // consume x - assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString()); - tokens.consume(); // ' ' - tokens.consume(); // = - tokens.consume(); // ' ' - tokens.consume(); // 302 - tokens.consume(); // ; - assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]," + - " [@2,2:2='=',<4>,1:2], [@3,3:3=' ',<7>,1:3]," + - " [@4,4:6='302',<2>,1:4], [@5,7:7=';',<3>,1:7]," + - " [@6,8:7='',<-1>,1:8]]", - tokens.getBuffer().toString()); - } - - @Test public void testMarkThenRelease() throws Exception { - LexerGrammar g = new LexerGrammar( - "lexer grammar t;\n"+ - "ID : 'a'..'z'+;\n" + - "INT : '0'..'9'+;\n" + - "SEMI : ';';\n" + - "ASSIGN : '=';\n" + - "PLUS : '+';\n" + - "MULT : '*';\n" + - "WS : ' '+;\n"); - // Tokens: 012345678901234567 - // Input: x = 302; - CharStream input = new ANTLRInputStream( - new StringReader("x = 302 + 1;") - ); - LexerInterpreter lexEngine = g.createLexerInterpreter(input); - TestingUnbufferedTokenStream tokens = new TestingUnbufferedTokenStream(lexEngine); - - int m = tokens.mark(); - assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString()); - assertEquals("x", tokens.LT(1).getText()); - tokens.consume(); // consume x - assertEquals("[[@0,0:0='x',<1>,1:0], [@1,1:1=' ',<7>,1:1]]", tokens.getBuffer().toString()); - tokens.consume(); // ' ' - tokens.consume(); // = - tokens.consume(); // ' ' - assertEquals("302", tokens.LT(1).getText()); - tokens.release(m); // "x = 302" is in buffer. will kill buffer - tokens.consume(); // 302 - tokens.consume(); // ' ' - m = tokens.mark(); // mark at the + - assertEquals("+", tokens.LT(1).getText()); - tokens.consume(); // '+' - tokens.consume(); // ' ' - tokens.consume(); // 1 - tokens.consume(); // ; - assertEquals("", tokens.LT(1).getText()); - // we marked at the +, so that should be the start of the buffer - assertEquals("[[@6,8:8='+',<5>,1:8], [@7,9:9=' ',<7>,1:9]," + - " [@8,10:10='1',<2>,1:10], [@9,11:11=';',<3>,1:11]," + - " [@10,12:11='',<-1>,1:12]]", - tokens.getBuffer().toString()); - tokens.release(m); - } - - protected static class TestingUnbufferedTokenStream extends UnbufferedTokenStream { - - public TestingUnbufferedTokenStream(TokenSource tokenSource) { - super(tokenSource); - } - - /** For testing. What's in moving window into token stream from - * current index, LT(1) or tokens[p], to end of buffer? - */ - protected List getRemainingBuffer() { - if ( n==0 ) { - return Collections.emptyList(); - } - - return Arrays.asList(tokens).subList(p, n); - } - - /** For testing. What's in moving window buffer into data stream. - * From 0..p-1 have been consume. - */ - protected List getBuffer() { - if ( n==0 ) { - return Collections.emptyList(); - } - - return Arrays.asList(tokens).subList(0, n); - } - - } -} diff --git a/tool/test/org/antlr/v4/xtest/TestVocabulary.java b/tool/test/org/antlr/v4/xtest/TestVocabulary.java deleted file mode 100644 index 664871339..000000000 --- a/tool/test/org/antlr/v4/xtest/TestVocabulary.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * [The "BSD license"] - * Copyright (c) 2014 Terence Parr - * Copyright (c) 2014 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.junit.Assert; -import org.junit.Test; - -/** - * - * @author Sam Harwell - */ -public class TestVocabulary extends BaseTest { - - @Test - public void testEmptyVocabulary() { - Assert.assertNotNull(VocabularyImpl.EMPTY_VOCABULARY); - Assert.assertEquals("EOF", VocabularyImpl.EMPTY_VOCABULARY.getSymbolicName(Token.EOF)); - Assert.assertEquals("0", VocabularyImpl.EMPTY_VOCABULARY.getDisplayName(Token.INVALID_TYPE)); - } - - @Test - public void testVocabularyFromTokenNames() { - String[] tokenNames = { - "", - "TOKEN_REF", "RULE_REF", "'//'", "'/'", "'*'", "'!'", "ID", "STRING" - }; - - Vocabulary vocabulary = VocabularyImpl.fromTokenNames(tokenNames); - Assert.assertNotNull(vocabulary); - Assert.assertEquals("EOF", vocabulary.getSymbolicName(Token.EOF)); - for (int i = 0; i < tokenNames.length; i++) { - Assert.assertEquals(tokenNames[i], vocabulary.getDisplayName(i)); - - if (tokenNames[i].startsWith("'")) { - Assert.assertEquals(tokenNames[i], vocabulary.getLiteralName(i)); - Assert.assertNull(vocabulary.getSymbolicName(i)); - } - else if (Character.isUpperCase(tokenNames[i].charAt(0))) { - Assert.assertNull(vocabulary.getLiteralName(i)); - Assert.assertEquals(tokenNames[i], vocabulary.getSymbolicName(i)); - } - else { - Assert.assertNull(vocabulary.getLiteralName(i)); - Assert.assertNull(vocabulary.getSymbolicName(i)); - } - } - } - -} diff --git a/tool/test/org/antlr/v4/xtest/TestXPath.java b/tool/test/org/antlr/v4/xtest/TestXPath.java deleted file mode 100644 index 886a04536..000000000 --- a/tool/test/org/antlr/v4/xtest/TestXPath.java +++ /dev/null @@ -1,228 +0,0 @@ -package org.antlr.v4.xtest; - -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.runtime.tree.TerminalNode; -import org.antlr.v4.runtime.tree.xpath.XPath; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public class TestXPath extends BaseTest { - public static final String grammar = - "grammar Expr;\n" + - "prog: func+ ;\n" + - "func: 'def' ID '(' arg (',' arg)* ')' body ;\n" + - "body: '{' stat+ '}' ;\n" + - "arg : ID ;\n" + - "stat: expr ';' # printExpr\n" + - " | ID '=' expr ';' # assign\n" + - " | 'return' expr ';' # ret\n" + - " | ';' # blank\n" + - " ;\n" + - "expr: expr ('*'|'/') expr # MulDiv\n" + - " | expr ('+'|'-') expr # AddSub\n" + - " | primary # prim\n" + - " ;\n" + - "primary" + - " : INT # int\n" + - " | ID # id\n" + - " | '(' expr ')' # parens\n" + - " ;" + - "\n" + - "MUL : '*' ; // assigns token name to '*' used above in grammar\n" + - "DIV : '/' ;\n" + - "ADD : '+' ;\n" + - "SUB : '-' ;\n" + - "RETURN : 'return' ;\n" + - "ID : [a-zA-Z]+ ; // match identifiers\n" + - "INT : [0-9]+ ; // match integers\n" + - "NEWLINE:'\\r'? '\\n' -> skip; // return newlines to parser (is end-statement signal)\n" + - "WS : [ \\t]+ -> skip ; // toss out whitespace\n"; - public static final String SAMPLE_PROGRAM = - "def f(x,y) { x = 3+4; y; ; }\n" + - "def g(x) { return 1+2*x; }\n"; - - @Test public void testValidPaths() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - - String xpath[] = { - "/prog/func", // all funcs under prog at root - "/prog/*", // all children of prog at root - "/*/func", // all func kids of any root node - "prog", // prog must be root node - "/prog", // prog must be root node - "/*", // any root - "*", // any root - "//ID", // any ID in tree - "//expr/primary/ID",// any ID child of a primary under any expr - "//body//ID", // any ID under a body - "//'return'", // any 'return' literal in tree, matched by literal name - "//RETURN", // any 'return' literal in tree, matched by symbolic name - "//primary/*", // all kids of any primary - "//func/*/stat", // all stat nodes grandkids of any func node - "/prog/func/'def'", // all def literal kids of func kid of prog - "//stat/';'", // all ';' under any stat node - "//expr/primary/!ID", // anything but ID under primary under any expr node - "//expr/!primary", // anything but primary under any expr node - "//!*", // nothing anywhere - "/!*", // nothing at root - "//expr//ID", // any ID under any expression (tests antlr/antlr4#370) - }; - String expected[] = { - "[func, func]", - "[func, func]", - "[func, func]", - "[prog]", - "[prog]", - "[prog]", - "[prog]", - "[f, x, y, x, y, g, x, x]", - "[y, x]", - "[x, y, x]", - "[return]", - "[return]", - "[3, 4, y, 1, 2, x]", - "[stat, stat, stat, stat]", - "[def, def]", - "[;, ;, ;, ;]", - "[3, 4, 1, 2]", - "[expr, expr, expr, expr, expr, expr]", - "[]", - "[]", - "[y, x]", - }; - - for (int i=0; i nodes = getNodeStrings(SAMPLE_PROGRAM, xpath[i], "prog", "ExprParser", "ExprLexer"); - String result = nodes.toString(); - assertEquals("path "+xpath[i]+" failed", expected[i], result); - } - } - - @Test public void testWeirdChar() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - - String path = "&"; - String expected = "Invalid tokens or characters at index 0 in path '&'"; - - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); - } - - @Test public void testWeirdChar2() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - - String path = "//w&e/"; - String expected = "Invalid tokens or characters at index 3 in path '//w&e/'"; - - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); - } - - @Test public void testBadSyntax() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - - String path = "///"; - String expected = "/ at index 2 isn't a valid rule name"; - - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); - } - - @Test public void testMissingWordAtEnd() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - - String path = "//"; - String expected = "Missing path element at end of path"; - - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); - } - - @Test public void testBadTokenName() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - - String path = "//Ick"; - String expected = "Ick at index 2 isn't a valid token name"; - - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); - } - - @Test public void testBadRuleName() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - - String path = "/prog/ick"; - String expected = "ick at index 6 isn't a valid rule name"; - - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); - } - - protected void testError(String input, String path, String expected, - String startRuleName, - String parserName, String lexerName) - throws Exception - { - Pair pl = getParserAndLexer(input, parserName, lexerName); - Parser parser = pl.a; - ParseTree tree = execStartRule(startRuleName, parser); - - IllegalArgumentException e = null; - try { - XPath.findAll(tree, path, parser); - } - catch (IllegalArgumentException iae) { - e = iae; - } - assertNotNull(e); - assertEquals(expected, e.getMessage()); - } - - public List getNodeStrings(String input, String xpath, - String startRuleName, - String parserName, String lexerName) - throws Exception - { - Pair pl = getParserAndLexer(input, parserName, lexerName); - Parser parser = pl.a; - ParseTree tree = execStartRule(startRuleName, parser); - - List nodes = new ArrayList(); - for (ParseTree t : XPath.findAll(tree, xpath, parser) ) { - if ( t instanceof RuleContext) { - RuleContext r = (RuleContext)t; - nodes.add(parser.getRuleNames()[r.getRuleIndex()]); - } - else { - TerminalNode token = (TerminalNode)t; - nodes.add(token.getText()); - } - } - return nodes; - } -} From 537819ba478c8b0d1c0270c0c300daa837f21ff8 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Mon, 27 Oct 2014 03:38:24 +0800 Subject: [PATCH 23/26] minor changes for Safari support --- .../org/antlr/v4/test/rt/gen/Generator.java | 38 ++++++++++++------- .../ParseTrees/TokenAndRuleContextString.st | 2 +- .../org/antlr/v4/test/rt/java/Java.test.stg | 2 +- .../antlr/v4/test/rt/java/TestLexerExec.java | 26 ++++++------- 4 files changed, 40 insertions(+), 28 deletions(-) diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index b6e062876..b26120556 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -34,6 +34,8 @@ public class Generator { Map configs = new HashMap(); configs.put("Source", readGrammarDir()); // source of test templates configs.put("Java", readJavaDir()); // generated Java tests + configs.put("NodeJS", readNodeJSDir()); // generated NodeJS tests + configs.put("Safari", readSafariDir()); // generated Firefox tests configs.put("Firefox", readFirefoxDir()); // generated Firefox tests return configs; } @@ -46,6 +48,16 @@ public class Generator { return new File(new URI(uri)); } + private static File readNodeJSDir() { + // TODO Auto-generated method stub + return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-javascript/tool/test/org/antlr/v4/test/rt/js/node"); + } + + private static File readSafariDir() { + // TODO read from env variable + return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-javascript/tool/test/org/antlr/v4/test/rt/js/safari"); + } + private static File readFirefoxDir() { // TODO read from env variable return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-javascript/tool/test/org/antlr/v4/test/rt/js/firefox"); @@ -1189,18 +1201,18 @@ public class Generator { file.addLexerTest(input, "EOFSuffixInFirstRule", "L", "a", "[@0,0:0='a',<1>,1:0]\n" + "[@1,1:0='',<-1>,1:1]\n", null, 2); - file.addLexerTest(input, "CharSet", "L", "34\r\n 34", + file.addLexerTest(input, "CharSet", "L", "34\n 34", "I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + - "[@1,5:6='34',<1>,2:1]\n" + - "[@2,7:6='',<-1>,2:3]\n", null); - file.addLexerTest(input, "CharSetPlus", "L", "34\r\n 34", + "[@1,4:5='34',<1>,2:1]\n" + + "[@2,6:5='',<-1>,2:3]\n", null); + file.addLexerTest(input, "CharSetPlus", "L", "34\n 34", "I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + - "[@1,5:6='34',<1>,2:1]\n" + - "[@2,7:6='',<-1>,2:3]\n", null); + "[@1,4:5='34',<1>,2:1]\n" + + "[@2,6:5='',<-1>,2:3]\n", null); file.addLexerTest(input, "CharSetNot", "L", "xaf", "I\n" + "[@0,0:2='xaf',<1>,1:0]\n" + @@ -1211,20 +1223,20 @@ public class Generator { "[@0,0:0='a',<1>,1:0]\n" + "[@1,2:2='x',<1>,1:2]\n" + "[@2,3:2='',<-1>,1:3]\n", null); - file.addLexerTest(input, "CharSetRange", "L", "34\r 34 a2 abc \n ", + file.addLexerTest(input, "CharSetRange", "L", "34\n 34 a2 abc \n ", "I\n" + "I\n" + "ID\n" + "ID\n" + "[@0,0:1='34',<1>,1:0]\n" + - "[@1,4:5='34',<1>,1:4]\n" + - "[@2,7:8='a2',<2>,1:7]\n" + - "[@3,10:12='abc',<2>,1:10]\n" + - "[@4,18:17='',<-1>,2:3]\n", null); - file.addLexerTest(input, "CharSetWithMissingEndRange", "L", "00\r\n", + "[@1,4:5='34',<1>,2:1]\n" + + "[@2,7:8='a2',<2>,2:4]\n" + + "[@3,10:12='abc',<2>,2:7]\n" + + "[@4,18:17='',<-1>,3:3]\n", null); + file.addLexerTest(input, "CharSetWithMissingEndRange", "L", "00\n", "I\n" + "[@0,0:1='00',<1>,1:0]\n" + - "[@1,4:3='',<-1>,2:0]\n", null); + "[@1,3:2='',<-1>,2:0]\n", null); file.addLexerTest(input, "CharSetWithMissingEscapeChar", "L", "34 ", "I\n" + "[@0,0:1='34',<1>,1:0]\n" + diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/TokenAndRuleContextString.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/TokenAndRuleContextString.st index 020e0bb92..8903bd5bb 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/TokenAndRuleContextString.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParseTrees/TokenAndRuleContextString.st @@ -8,5 +8,5 @@ s } : r=a ; a : 'x' { - + } ; diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index edd2f23f1..157e53636 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -161,7 +161,7 @@ ImportListener(X) ::= "" GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames)" -WriteRuleInvocationStack() ::= "System.out.println(getRuleInvocationStack());" +RuleInvocationStack() ::= "getRuleInvocationStack()" LL_EXACT_AMBIG_DETECTION() ::= <<_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);>> diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java b/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java index 51114b219..3f468806f 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestLexerExec.java @@ -341,12 +341,12 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n" + "WS : [ \\n\\u000D] -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r\n 34", false); + String found = execLexer("L.g4", grammar, "L", "34\n 34", false); assertEquals("I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + - "[@1,5:6='34',<1>,2:1]\n" + - "[@2,7:6='',<-1>,2:3]\n", found); + "[@1,4:5='34',<1>,2:1]\n" + + "[@2,6:5='',<-1>,2:3]\n", found); assertNull(this.stderrDuringParse); } @@ -355,12 +355,12 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "I : '0'..'9'+ {System.out.println(\"I\");} ;\n" + "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r\n 34", false); + String found = execLexer("L.g4", grammar, "L", "34\n 34", false); assertEquals("I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + - "[@1,5:6='34',<1>,2:1]\n" + - "[@2,7:6='',<-1>,2:3]\n", found); + "[@1,4:5='34',<1>,2:1]\n" + + "[@2,6:5='',<-1>,2:3]\n", found); assertNull(this.stderrDuringParse); } @@ -397,16 +397,16 @@ public class TestLexerExec extends BaseTest { "I : [0-9]+ {System.out.println(\"I\");} ;\n" + "ID : [a-zA-Z] [a-zA-Z0-9]* {System.out.println(\"ID\");} ;\n" + "WS : [ \\n\\u0009\\r]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34\r 34 a2 abc \n ", false); + String found = execLexer("L.g4", grammar, "L", "34\n 34 a2 abc \n ", false); assertEquals("I\n" + "I\n" + "ID\n" + "ID\n" + "[@0,0:1='34',<1>,1:0]\n" + - "[@1,4:5='34',<1>,1:4]\n" + - "[@2,7:8='a2',<2>,1:7]\n" + - "[@3,10:12='abc',<2>,1:10]\n" + - "[@4,18:17='',<-1>,2:3]\n", found); + "[@1,4:5='34',<1>,2:1]\n" + + "[@2,7:8='a2',<2>,2:4]\n" + + "[@3,10:12='abc',<2>,2:7]\n" + + "[@4,18:17='',<-1>,3:3]\n", found); assertNull(this.stderrDuringParse); } @@ -415,10 +415,10 @@ public class TestLexerExec extends BaseTest { String grammar = "lexer grammar L;\n" + "I : [0-]+ {System.out.println(\"I\");} ;\n" + "WS : [ \\n\\u000D]+ -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "00\r\n", false); + String found = execLexer("L.g4", grammar, "L", "00\n", false); assertEquals("I\n" + "[@0,0:1='00',<1>,1:0]\n" + - "[@1,4:3='',<-1>,2:0]\n", found); + "[@1,3:2='',<-1>,2:0]\n", found); assertNull(this.stderrDuringParse); } From 97a2946a2d14d276ab9031bf15d07572ad5f9d30 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Wed, 29 Oct 2014 04:33:37 +0800 Subject: [PATCH 24/26] latest selenium driver --- tool/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tool/pom.xml b/tool/pom.xml index 6698deb6e..36e46df5f 100644 --- a/tool/pom.xml +++ b/tool/pom.xml @@ -23,7 +23,7 @@ org.seleniumhq.selenium selenium-java - 2.43.1 + 2.44.0 test From 86fc7fd44d07d136fed45762a8656ebf407db7c0 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sat, 1 Nov 2014 01:47:19 +0800 Subject: [PATCH 25/26] Tests generator and generated tests for Java MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As of commit, the generator produces tests for Java, Python 2 and 3, NodeJS and Safari. It’s a great relief to seethe full set of runtime tests pas identically on all targets The previous tests are now located at org.antlr.v4.test.tool, the files have been cleaned up to remove duplicates --- .../org/antlr/v4/test/rt/gen/Generator.java | 22 ++++++++++++++----- ...gatorRuleOverridesLookaheadInDelegate_S.st | 4 +--- .../CompositeParsers/KeywordVSIDOrder.st | 2 +- .../grammars/LeftRecursion/JavaExpressions.st | 8 +++---- .../MultipleActionsPredicatesOptions.st | 4 ++-- .../rt/gen/grammars/LeftRecursion/SemPred.st | 2 +- .../LeftRecursion/SemPredFailOption.st | 2 +- .../grammars/ParserErrors/ConjuringUpToken.st | 2 +- .../ParserErrors/ConjuringUpTokenFromSet.st | 2 +- .../LabelAliasingAcrossLabeledAlternatives.st | 4 ++-- .../gen/grammars/ParserExec/ReferenceToATN.st | 4 ++-- .../grammars/SemPredEvalLexer/DisableRule.st | 4 ++-- .../grammars/SemPredEvalLexer/IDnotEnum.st | 2 +- .../gen/grammars/SemPredEvalLexer/IDvsEnum.st | 2 +- .../SemPredEvalParser/2UnpredicatedAlts.st | 2 +- .../2UnpredicatedAltsAndOneOrthogonalAlt.st | 2 +- .../ActionsHidePredsInGlobalFOLLOW.st | 4 ++-- .../SemPredEvalParser/DisabledAlternative.st | 2 +- ...edNotPassedOuterCtxToAvoidCastException.st | 4 ++-- .../NoTruePredsThrowsNoViableAlt.st | 4 ++-- .../gen/grammars/SemPredEvalParser/Order.st | 2 +- .../PredTestedEvenWhenUnAmbig.st | 4 ++-- .../SemPredEvalParser/PredsInGlobalFOLLOW.st | 4 ++-- .../gen/grammars/SemPredEvalParser/Simple.st | 4 ++-- .../SemPredEvalParser/SimpleValidate.st | 4 ++-- .../SemPredEvalParser/SimpleValidate2.st | 4 ++-- .../gen/grammars/SemPredEvalParser/ToLeft.st | 4 ++-- .../ToLeftWithVaryingPredicate.st | 2 +- .../UnpredicatedPathsInAlt.st | 2 +- .../SemPredEvalParser/ValidateInDFA.st | 4 ++-- .../org/antlr/v4/test/rt/java/Java.test.stg | 8 +++++++ .../v4/test/rt/java/TestCompositeParsers.java | 6 ++--- .../v4/test/rt/java/TestLeftRecursion.java | 16 +++++++------- .../v4/test/rt/java/TestParserErrors.java | 4 ++-- .../antlr/v4/test/rt/java/TestParserExec.java | 8 +++---- .../test/rt/java/TestSemPredEvalParser.java | 2 +- 36 files changed, 88 insertions(+), 72 deletions(-) diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index b26120556..c15a95673 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -34,9 +34,11 @@ public class Generator { Map configs = new HashMap(); configs.put("Source", readGrammarDir()); // source of test templates configs.put("Java", readJavaDir()); // generated Java tests + configs.put("Python2", readPython2Dir()); // generated Python2 tests + configs.put("Python3", readPython3Dir()); // generated Python3 tests configs.put("NodeJS", readNodeJSDir()); // generated NodeJS tests configs.put("Safari", readSafariDir()); // generated Firefox tests - configs.put("Firefox", readFirefoxDir()); // generated Firefox tests + // configs.put("Firefox", readFirefoxDir()); // generated Firefox tests return configs; } @@ -48,6 +50,16 @@ public class Generator { return new File(new URI(uri)); } + private static File readPython2Dir() { + // TODO Auto-generated method stub + return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-python2/tool/test/org/antlr/v4/test/rt/py2"); + } + + private static File readPython3Dir() { + // TODO Auto-generated method stub + return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-python3/tool/test/org/antlr/v4/test/rt/py3"); + } + private static File readNodeJSDir() { // TODO Auto-generated method stub return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-javascript/tool/test/org/antlr/v4/test/rt/js/node"); @@ -802,12 +814,12 @@ public class Generator { "a >> b", "(s (e (e a) >> (e b)) )\n", "a=b=c", "(s (e (e a) = (e (e b) = (e c))) )\n", "a^b^c", "(s (e (e a) ^ (e (e b) ^ (e c))) )\n", - "(T)x", "(s (e ( (type T) ) (e x)) )\n", - "new A().b", "(s (e (e new (type A) ( )) . b) )\n", - "(T)t.f()", "(s (e (e ( (type T) ) (e (e t) . f)) ( )) )\n", + "(T)x", "(s (e ( (type_ T) ) (e x)) )\n", + "new A().b", "(s (e (e new (type_ A) ( )) . b) )\n", + "(T)t.f()", "(s (e (e ( (type_ T) ) (e (e t) . f)) ( )) )\n", "a.f(x)==T.c", "(s (e (e (e (e a) . f) ( (expressionList (e x)) )) == (e (e T) . c)) )\n", "a.f().g(x,1)", "(s (e (e (e (e (e a) . f) ( )) . g) ( (expressionList (e x) , (e 1)) )) )\n", - "new T[((n-1) * x) + 1]", "(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )\n"); + "new T[((n-1) * x) + 1]", "(s (e new (type_ T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )\n"); file.addParserTests(input, "Declarations", "T", "s", "a", "(s (declarator a) )\n", "*a", "(s (declarator * (declarator a)) )\n", diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st index c60a2288d..c00963f02 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/DelegatorRuleOverridesLookaheadInDelegate_S.st @@ -1,7 +1,5 @@ parser grammar S; type_ : 'int' ; decl : type_ ID ';' - | type_ ID init ';' { - - }; + | type_ ID init ';' {}; init : '=' INT; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/KeywordVSIDOrder.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/KeywordVSIDOrder.st index 6e137e60e..f0620d567 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/KeywordVSIDOrder.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/CompositeParsers/KeywordVSIDOrder.st @@ -1,5 +1,5 @@ grammar M; import S; -a : A {}; +a : A {}; A : 'abc' {}; WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/JavaExpressions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/JavaExpressions.st index c23d612af..2cb9f774a 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/JavaExpressions.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/JavaExpressions.st @@ -8,14 +8,14 @@ e : '(' e ')' | 'super' | INT | ID - | type '.' 'class' + | type_ '.' 'class' | e '.' ID | e '.' 'this' | e '.' 'super' '(' expressionList? ')' | e '.' 'new' ID '(' expressionList? ')' - | 'new' type ( '(' expressionList? ')' | ('[' e ']')+) + | 'new' type_ ( '(' expressionList? ')' | ('[' e ']')+) | e '[' e ']' - | '(' type ')' e + | '(' type_ ')' e | e ('++' | '--') | e '(' expressionList? ')' | ('+'|'-'|'++'|'--') e @@ -46,7 +46,7 @@ e : '(' e ')' |'\<\<=' |'%=') e ; -type: ID +type_: ID | ID '[' ']' | 'int' | 'int' '[' ']' diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActionsPredicatesOptions.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActionsPredicatesOptions.st index 91a8b0a65..aa74f8906 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActionsPredicatesOptions.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/MultipleActionsPredicatesOptions.st @@ -1,7 +1,7 @@ grammar ; s @after {} : e ; -e : a=e op=('*'|'/') b=e {}{true}? - | a=e op=('+'|'-') b=e {}\{true}?\ +e : a=e op=('*'|'/') b=e {}{}? + | a=e op=('+'|'-') b=e {}\{}?\ | INT {}{} | '(' x=e ')' {}{} ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPred.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPred.st index e568a8944..b95a58025 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPred.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPred.st @@ -1,6 +1,6 @@ grammar ; s @after {} : a ; -a : a {}? ID +a : a {}? ID | ID ; ID : 'a'..'z'+ ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPredFailOption.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPredFailOption.st index aac2fc7f1..b5eb5f7ec 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPredFailOption.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/LeftRecursion/SemPredFailOption.st @@ -1,6 +1,6 @@ grammar ; s @after {} : a ; -a : a ID {false}?\ +a : a ID {}?\ | ID ; ID : 'a'..'z'+ ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpToken.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpToken.st index d7259fee4..c4ba9d33a 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpToken.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpToken.st @@ -1,2 +1,2 @@ grammar ; -a : 'a' x='b' {} 'c' ; \ No newline at end of file +a : 'a' x='b' {} 'c' ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpTokenFromSet.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpTokenFromSet.st index e8f27a064..5030a368d 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpTokenFromSet.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserErrors/ConjuringUpTokenFromSet.st @@ -1,2 +1,2 @@ grammar ; -a : 'a' x=('b'|'c') {} 'd' ; +a : 'a' x=('b'|'c') {} 'd' ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st index 6074d12bf..8e823dd2a 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/LabelAliasingAcrossLabeledAlternatives.st @@ -1,8 +1,8 @@ grammar ; start : a* EOF; a - : label=subrule { } #One - | label='y' { } #Two + : label=subrule {} #One + | label='y' {} #Two ; subrule : 'x'; WS : (' '|'\n') -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ReferenceToATN.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ReferenceToATN.st index 6b20ad9af..1099affbc 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ReferenceToATN.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/ParserExec/ReferenceToATN.st @@ -1,5 +1,5 @@ grammar ; -a : (ID|ATN)* ATN? {} ; +a : (ID|ATN_)* ATN_? {} ; ID : 'a'..'z'+ ; -ATN : '0'..'9'+; +ATN_ : '0'..'9'+; WS : (' '|'\n') -> skip ; \ No newline at end of file diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/DisableRule.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/DisableRule.st index 90248db25..ec1263a71 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/DisableRule.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/DisableRule.st @@ -1,5 +1,5 @@ lexer grammar ; -E1 : 'enum' { }? ; -E2 : 'enum' { }? ; // winner not E1 or ID +E1 : 'enum' { }? ; +E2 : 'enum' { }? ; // winner not E1 or ID ID : 'a'..'z'+ ; WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDnotEnum.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDnotEnum.st index cac3df8d1..c055aa9db 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDnotEnum.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDnotEnum.st @@ -1,4 +1,4 @@ lexer grammar ; -ENUM : [a-z]+ { }? ; +ENUM : [a-z]+ { }? ; ID : [a-z]+ ; WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDvsEnum.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDvsEnum.st index fc0b0696e..c5180a1ed 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDvsEnum.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalLexer/IDvsEnum.st @@ -1,4 +1,4 @@ lexer grammar ; -ENUM : 'enum' { }? ; +ENUM : 'enum' { }? ; ID : 'a'..'z'+ ; WS : (' '|'\n') -> skip; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAlts.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAlts.st index 3219067ba..0982d1d67 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAlts.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAlts.st @@ -2,7 +2,7 @@ grammar ; s : {} a ';' a; // do 2x: once in ATN, next in DFA a : ID {} | ID {} - | {}? ID {} + | {}? ID {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st index 47bc99281..4ab1601dc 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/2UnpredicatedAltsAndOneOrthogonalAlt.st @@ -3,7 +3,7 @@ s : {} a ';' a ';' a; a : INT {} | ID {} // must pick this one for ID since pred is false | ID {} - | {}? ID {} + | {}? ID {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st index 8c4ced294..8316cf133 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ActionsHidePredsInGlobalFOLLOW.st @@ -2,8 +2,8 @@ grammar ; @members { } -s : e {} {}? {} '!' ; -t : e {} {}? ID ; +s : e {} {}? {} '!' ; +t : e {} {}? ID ; e : ID | ; // non-LL(1) so we use ATN ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DisabledAlternative.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DisabledAlternative.st index 869d94b27..2cfbbab6c 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DisabledAlternative.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/DisabledAlternative.st @@ -1,5 +1,5 @@ grammar ; cppCompilationUnit : content+ EOF; -content: anything | {}? .; +content: anything | {}? .; anything: ANY_CHAR; ANY_CHAR: [_a-zA-Z0-9]; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st index 1b5baf097..4143ec3b3 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/IndependentPredNotPassedOuterCtxToAvoidCastException.st @@ -2,8 +2,8 @@ grammar ; s : b ';' | b '.' ; b : a ; a - : {}? ID {} - | {}? ID {} + : {}? ID {} + | {}? ID {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st index ae5714552..dd7b05895 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/NoTruePredsThrowsNoViableAlt.st @@ -1,7 +1,7 @@ grammar ; s : a a; -a : {}? ID INT {} - | {}? ID INT {} +a : {}? ID INT {} + | {}? ID INT {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Order.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Order.st index 2b6d59d4f..1d83ac807 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Order.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Order.st @@ -3,7 +3,7 @@ s : a {} a; // do 2x: once in ATN, next in DFA; // action blocks lookahead from falling off of 'a' // and looking into 2nd 'a' ref. !ctx dependent pred a : ID {} - | {}? ID {} + | {}? ID {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st index 8840f393b..7b724ad2f 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredTestedEvenWhenUnAmbig.st @@ -1,8 +1,8 @@ grammar ; -@members {} +@members {} primary : ID {} - | {!}? 'enum' {} + | {}? 'enum' {} ; ID : [a-z]+ ; WS : [ \t\n\r]+ -> skip ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st index 3a63138a3..27c61d473 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/PredsInGlobalFOLLOW.st @@ -2,8 +2,8 @@ grammar ; @members { } -s : e {}? {} '!' ; -t : e {}? ID ; +s : e {}? {} '!' ; +t : e {}? ID ; e : ID | ; // non-LL(1) so we use ATN ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Simple.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Simple.st index 8570ed205..5285fcf83 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Simple.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/Simple.st @@ -1,7 +1,7 @@ grammar ; s : a a a; // do 3x: once in ATN, next in DFA then INT in ATN -a : {}? ID {} - | {}? ID {} +a : {}? ID {} + | {}? ID {} | INT {} ; ID : 'a'..'z'+ ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate.st index 3b0577eeb..5169d43b6 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate.st @@ -1,7 +1,7 @@ grammar ; s : a ; -a : {}? ID {} - | {}? INT {} +a : {}? ID {} + | {}? INT {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate2.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate2.st index 6a84abcca..606c0083e 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate2.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/SimpleValidate2.st @@ -1,7 +1,7 @@ grammar ; s : a a a; -a : {}? ID {} - | {}? INT {} +a : {}? ID {} + | {}? INT {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeft.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeft.st index bf6e1f899..a69e500a8 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeft.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeft.st @@ -1,7 +1,7 @@ grammar ; s : a+ ; -a : {}? ID {} - | {}? ID {} +a : {}? ID {} + | {}? ID {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st index be00d9913..f67afc57e 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ToLeftWithVaryingPredicate.st @@ -1,7 +1,7 @@ grammar ; @members {} s : ({ - } a)+ ; +} a)+ ; a : {}? ID {} | {}? ID {} ; diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st index a53c09554..207df7b63 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/UnpredicatedPathsInAlt.st @@ -2,7 +2,7 @@ grammar ; s : a {} | b {} ; -a : {}? ID INT +a : {}? ID INT | ID INT ; b : ID ID diff --git a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ValidateInDFA.st b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ValidateInDFA.st index 7b153b7c4..47b10e627 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ValidateInDFA.st +++ b/tool/test/org/antlr/v4/test/rt/gen/grammars/SemPredEvalParser/ValidateInDFA.st @@ -3,8 +3,8 @@ s : a ';' a; // ';' helps us to resynchronize without consuming // 2nd 'a' reference. We our testing that the DFA also // throws an exception if the validating predicate fails -a : {}? ID {} - | {}? INT {} +a : {}? ID {} + | {}? INT {} ; ID : 'a'..'z'+ ; INT : '0'..'9'+; diff --git a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg index 157e53636..d52e9fa49 100644 --- a/tool/test/org/antlr/v4/test/rt/java/Java.test.stg +++ b/tool/test/org/antlr/v4/test/rt/java/Java.test.stg @@ -99,10 +99,18 @@ writeln(s) ::= <);>> write(s) ::= <);>> +False() ::= "false" + +True() ::= "true" + +Not(v) ::= "!" + Assert(s) ::= <);>> Cast(t,v) ::= "(())" +Append(a,b) ::= " + " + Concat(a,b) ::= "" DeclareLocal(s,v) ::= "Object = ;" diff --git a/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java b/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java index 7c4c99f84..c5599b0ce 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestCompositeParsers.java @@ -209,9 +209,7 @@ public class TestCompositeParsers extends BaseTest { String slave_S = "parser grammar S;\n" + "type_ : 'int' ;\n" + "decl : type_ ID ';'\n" + - " | type_ ID init ';' {\n" + - " System.out.print(\"Decl: \" + $text);\n" + - " };\n" + + " | type_ ID init ';' {System.out.print(\"Decl: \" + $text);};\n" + "init : '=' INT;"; mkdir(tmpdir); writeFile(tmpdir, "S.g4", slave_S); @@ -261,7 +259,7 @@ public class TestCompositeParsers extends BaseTest { String grammar = "grammar M;\n" + "import S;\n" + - "a : A {System.out.println(\"M.a: \"+$A);};\n" + + "a : A {System.out.println(\"M.a: \" + $A);};\n" + "A : 'abc' {System.out.println(\"M.A\");};\n" + "WS : (' '|'\\n') -> skip ;"; String found = execParser("M.g4", grammar, "MParser", "MLexer", "a", "abc", false); diff --git a/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java b/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java index 6bac42fa3..0709657db 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestLeftRecursion.java @@ -236,14 +236,14 @@ public class TestLeftRecursion extends BaseTest { " | 'super'\n" + " | INT\n" + " | ID\n" + - " | type '.' 'class'\n" + + " | type_ '.' 'class'\n" + " | e '.' ID\n" + " | e '.' 'this'\n" + " | e '.' 'super' '(' expressionList? ')'\n" + " | e '.' 'new' ID '(' expressionList? ')'\n" + - " | 'new' type ( '(' expressionList? ')' | ('[' e ']')+)\n" + + " | 'new' type_ ( '(' expressionList? ')' | ('[' e ']')+)\n" + " | e '[' e ']'\n" + - " | '(' type ')' e\n" + + " | '(' type_ ')' e\n" + " | e ('++' | '--')\n" + " | e '(' expressionList? ')'\n" + " | ('+'|'-'|'++'|'--') e\n" + @@ -274,7 +274,7 @@ public class TestLeftRecursion extends BaseTest { " |'<<='\n" + " |'%=') e\n" + " ;\n" + - "type: ID \n" + + "type_: ID \n" + " | ID '[' ']'\n" + " | 'int'\n" + " | 'int' '[' ']' \n" + @@ -330,21 +330,21 @@ public class TestLeftRecursion extends BaseTest { @Test public void testJavaExpressions_7() throws Exception { String found = testJavaExpressions("(T)x"); - assertEquals("(s (e ( (type T) ) (e x)) )\n", found); + assertEquals("(s (e ( (type_ T) ) (e x)) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_8() throws Exception { String found = testJavaExpressions("new A().b"); - assertEquals("(s (e (e new (type A) ( )) . b) )\n", found); + assertEquals("(s (e (e new (type_ A) ( )) . b) )\n", found); assertNull(this.stderrDuringParse); } @Test public void testJavaExpressions_9() throws Exception { String found = testJavaExpressions("(T)t.f()"); - assertEquals("(s (e (e ( (type T) ) (e (e t) . f)) ( )) )\n", found); + assertEquals("(s (e (e ( (type_ T) ) (e (e t) . f)) ( )) )\n", found); assertNull(this.stderrDuringParse); } @@ -365,7 +365,7 @@ public class TestLeftRecursion extends BaseTest { @Test public void testJavaExpressions_12() throws Exception { String found = testJavaExpressions("new T[((n-1) * x) + 1]"); - assertEquals("(s (e new (type T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )\n", found); + assertEquals("(s (e new (type_ T) [ (e (e ( (e (e ( (e (e n) - (e 1)) )) * (e x)) )) + (e 1)) ]) )\n", found); assertNull(this.stderrDuringParse); } diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java b/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java index f9c081c50..3c94359e7 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestParserErrors.java @@ -44,7 +44,7 @@ public class TestParserErrors extends BaseTest { @Test public void testConjuringUpToken() throws Exception { String grammar = "grammar T;\n" + - "a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;"; + "a : 'a' x='b' {System.out.println(\"conjured=\" + $x);} 'c' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false); assertEquals("conjured=[@-1,-1:-1='',<2>,1:1]\n", found); assertEquals("line 1:1 missing 'b' at 'c'\n", this.stderrDuringParse); @@ -62,7 +62,7 @@ public class TestParserErrors extends BaseTest { @Test public void testConjuringUpTokenFromSet() throws Exception { String grammar = "grammar T;\n" + - "a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;"; + "a : 'a' x=('b'|'c') {System.out.println(\"conjured=\" + $x);} 'd' ;"; String found = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false); assertEquals("conjured=[@-1,-1:-1='',<2>,1:1]\n", found); assertEquals("line 1:1 missing {'b', 'c'} at 'd'\n", this.stderrDuringParse); diff --git a/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java b/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java index a001a0b3b..325bff2cb 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestParserExec.java @@ -317,8 +317,8 @@ public class TestParserExec extends BaseTest { String grammar = "grammar T;\n" + "start : a* EOF;\n" + "a\n" + - " : label=subrule { System.out.println($label.text); } #One\n" + - " | label='y' { System.out.println($label.text); } #Two\n" + + " : label=subrule {System.out.println($label.text);} #One\n" + + " | label='y' {System.out.println($label.text);} #Two\n" + " ;\n" + "subrule : 'x';\n" + "WS : (' '|'\\n') -> skip ;"; @@ -395,9 +395,9 @@ public class TestParserExec extends BaseTest { String testReferenceToATN(String input) throws Exception { String grammar = "grammar T;\n" + - "a : (ID|ATN)* ATN? {System.out.println($text);} ;\n" + + "a : (ID|ATN_)* ATN_? {System.out.println($text);} ;\n" + "ID : 'a'..'z'+ ;\n" + - "ATN : '0'..'9'+;\n" + + "ATN_ : '0'..'9'+;\n" + "WS : (' '|'\\n') -> skip ;"; return execParser("T.g4", grammar, "TParser", "TLexer", "a", input, false); } diff --git a/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java index 6bbd98d4d..5f4442351 100644 --- a/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java +++ b/tool/test/org/antlr/v4/test/rt/java/TestSemPredEvalParser.java @@ -216,7 +216,7 @@ public class TestSemPredEvalParser extends BaseTest { String grammar = "grammar T;\n" + "@members {int i = 0;}\n" + "s : ({this.i += 1;\n" + - " System.out.println(\"i=\" + this.i);} a)+ ;\n" + + "System.out.println(\"i=\" + this.i);} a)+ ;\n" + "a : {this.i % 2 == 0}? ID {System.out.println(\"alt 1\");}\n" + " | {this.i % 2 != 0}? ID {System.out.println(\"alt 2\");}\n" + " ;\n" + From 5847fd6995fc5ea6c6736a6615316eb0802344c4 Mon Sep 17 00:00:00 2001 From: ericvergnaud Date: Sun, 9 Nov 2014 13:22:52 +0800 Subject: [PATCH 26/26] preparing for CSharp --- tool/test/org/antlr/v4/test/rt/gen/Generator.java | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tool/test/org/antlr/v4/test/rt/gen/Generator.java b/tool/test/org/antlr/v4/test/rt/gen/Generator.java index c15a95673..c51888c31 100644 --- a/tool/test/org/antlr/v4/test/rt/gen/Generator.java +++ b/tool/test/org/antlr/v4/test/rt/gen/Generator.java @@ -34,10 +34,11 @@ public class Generator { Map configs = new HashMap(); configs.put("Source", readGrammarDir()); // source of test templates configs.put("Java", readJavaDir()); // generated Java tests - configs.put("Python2", readPython2Dir()); // generated Python2 tests - configs.put("Python3", readPython3Dir()); // generated Python3 tests - configs.put("NodeJS", readNodeJSDir()); // generated NodeJS tests - configs.put("Safari", readSafariDir()); // generated Firefox tests + configs.put("CSharp", readCSharpDir()); // generated CSharp tests + // configs.put("Python2", readPython2Dir()); // generated Python2 tests + // configs.put("Python3", readPython3Dir()); // generated Python3 tests + // configs.put("NodeJS", readNodeJSDir()); // generated NodeJS tests + // configs.put("Safari", readSafariDir()); // generated Firefox tests // configs.put("Firefox", readFirefoxDir()); // generated Firefox tests return configs; } @@ -50,6 +51,11 @@ public class Generator { return new File(new URI(uri)); } + private static File readCSharpDir() { + // TODO Auto-generated method stub + return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-csharp/tool/test/org/antlr/v4/test/rt/csharp"); + } + private static File readPython2Dir() { // TODO Auto-generated method stub return new File("/Users/ericvergnaud/Development/antlr4/antlr/antlr4-python2/tool/test/org/antlr/v4/test/rt/py2");