From 7dc17ace6e1624ba08e3e2e322b60aa6617f5666 Mon Sep 17 00:00:00 2001 From: Ivan Kochurkin Date: Sun, 14 May 2017 13:23:53 +0300 Subject: [PATCH 001/102] Distinct error codes, added unit-test. fixes #1865. --- .../org/antlr/v4/test/tool/TestToolSyntaxErrors.java | 11 +++++++++++ tool/src/org/antlr/v4/tool/ErrorType.java | 8 ++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java index 3219c0bad..b19d6c865 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java @@ -8,6 +8,7 @@ package org.antlr.v4.test.tool; import org.antlr.v4.Tool; import org.antlr.v4.tool.ErrorType; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -60,6 +61,16 @@ public class TestToolSyntaxErrors extends BaseJavaToolTest { super.testSetUp(); } + @Test + public void AllErrorCodesDistinct() { + ErrorType[] errorTypes = ErrorType.class.getEnumConstants(); + for (int i = 0; i < errorTypes.length; i++) { + for (int j = i + 1; j < errorTypes.length; j++) { + Assert.assertNotEquals(errorTypes[i].code, errorTypes[j].code); + } + } + } + @Test public void testA() { super.testErrors(A, true); } @Test public void testExtraColon() { diff --git a/tool/src/org/antlr/v4/tool/ErrorType.java b/tool/src/org/antlr/v4/tool/ErrorType.java index 700269fb9..d7ad7ab40 100644 --- a/tool/src/org/antlr/v4/tool/ErrorType.java +++ b/tool/src/org/antlr/v4/tool/ErrorType.java @@ -394,11 +394,11 @@ public enum ErrorType { */ IMPORT_NAME_CLASH(113, " grammar and imported grammar both generate ", ErrorSeverity.ERROR), /** - * Compiler Error 160. + * Compiler Error 114. * *

cannot find tokens file filename

*/ - CANNOT_FIND_TOKENS_FILE_REFD_IN_GRAMMAR(160, "cannot find tokens file ", ErrorSeverity.ERROR), + CANNOT_FIND_TOKENS_FILE_REFD_IN_GRAMMAR(114, "cannot find tokens file ", ErrorSeverity.ERROR), /** * Compiler Warning 118. * @@ -522,7 +522,7 @@ public enum ErrorType { */ USE_OF_BAD_WORD(134, "symbol conflicts with generated code in target language or runtime", ErrorSeverity.ERROR), /** - * Compiler Error 134. + * Compiler Error 183. * *

rule reference rule is not currently supported in a set

* @@ -530,7 +530,7 @@ public enum ErrorType { * Note: This error has the same number as the unrelated error * {@link #USE_OF_BAD_WORD}.

*/ - UNSUPPORTED_REFERENCE_IN_LEXER_SET(134, "rule reference is not currently supported in a set", ErrorSeverity.ERROR), + UNSUPPORTED_REFERENCE_IN_LEXER_SET(183, "rule reference is not currently supported in a set", ErrorSeverity.ERROR), /** * Compiler Error 135. * From 49b462f9607a08dd98e37e672990afacf04f9136 Mon Sep 17 00:00:00 2001 From: Sergey Parshukov Date: Wed, 28 Jun 2017 11:43:50 +0300 Subject: [PATCH 002/102] Go file header complies with standardised Go 'generated code' header --- contributors.txt | 1 + tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/contributors.txt b/contributors.txt index 71e97419e..4de6877cd 100644 --- a/contributors.txt +++ b/contributors.txt @@ -150,3 +150,4 @@ YYYY/MM/DD, github id, Full name, email 2017/05/29, kosak, Corey Kosak, kosak@kosak.com 2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com 2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com +2017/06/28, jBugman, Sergey Parshukov, codedby@bugman.me diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg index b1835e4e7..75112868a 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg @@ -1,5 +1,5 @@ fileHeader(grammarFileName, ANTLRVersion) ::= << -// Generated from by ANTLR . +// Code generated from by ANTLR . DO NOT EDIT. >> ParserFile(file, parser, namedActions, contextSuperClass) ::= << From f33fca678923dbb4999fa6cbb0eb729395d6203d Mon Sep 17 00:00:00 2001 From: Mike Lischke Date: Sat, 1 Jul 2017 19:30:02 +0200 Subject: [PATCH 003/102] XCode build fixes for previous C++ patch. --- .../antlrcpp.xcodeproj/project.pbxproj | 112 ++++++- .../Cpp/runtime/src/ANTLRErrorListener.cpp | 5 + .../Cpp/runtime/src/ANTLRErrorStrategy.cpp | 5 + runtime/Cpp/runtime/src/Token.cpp | 5 + runtime/Cpp/runtime/src/TokenSource.cpp | 5 + runtime/Cpp/runtime/src/WritableToken.cpp | 5 + .../Cpp/runtime/src/atn/BlockStartState.cpp | 5 + runtime/Cpp/runtime/src/atn/LexerAction.cpp | 5 + .../runtime/src/atn/ParserATNSimulator.cpp | 12 +- .../Cpp/runtime/src/atn/ParserATNSimulator.h | 275 +++++++++--------- runtime/Cpp/runtime/src/support/Any.cpp | 5 + runtime/Cpp/runtime/src/tree/ErrorNode.cpp | 5 + .../src/tree/IterativeParseTreeWalker.cpp | 31 +- .../runtime/src/tree/ParseTreeListener.cpp | 5 + .../Cpp/runtime/src/tree/ParseTreeVisitor.cpp | 5 + runtime/Cpp/runtime/src/tree/TerminalNode.cpp | 5 + .../Cpp/runtime/src/tree/pattern/Chunk.cpp | 5 + 17 files changed, 309 insertions(+), 186 deletions(-) diff --git a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj index 643c05885..ced55cf90 100644 --- a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj +++ b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj @@ -534,9 +534,6 @@ 276E5F411CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; }; 276E5F421CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; }; 276E5F431CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5F441CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; }; - 276E5F451CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; }; - 276E5F461CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; settings = {ATTRIBUTES = (Public, ); }; }; 276E5F471CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; }; 276E5F481CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; }; 276E5F491CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; }; @@ -800,6 +797,45 @@ 27745F081CE49C000067C6A3 /* RuntimeMetaData.h in Headers */ = {isa = PBXBuildFile; fileRef = 27745EFC1CE49C000067C6A3 /* RuntimeMetaData.h */; }; 27874F1E1CCB7A0700AF1C53 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */; }; 27874F211CCB7B1700AF1C53 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */; }; + 2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; }; + 2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; }; + 2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; }; + 2793DC891F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; }; + 2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; }; + 2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; }; + 2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; + 2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; + 2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; + 2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; + 2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; + 2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; + 2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; + 2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; + 2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; + 2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; + 2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; + 2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; + 2793DC9D1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; }; + 2793DC9E1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; }; + 2793DC9F1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; }; + 2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; }; + 2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; }; + 2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; }; + 2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; }; + 2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; }; + 2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; }; + 2793DCAA1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; }; + 2793DCAB1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; }; + 2793DCAC1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; }; + 2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; + 2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; + 2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; + 2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; + 2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; + 2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; + 2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; + 2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; + 2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; 2794D8561CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; }; 2794D8571CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; }; 2794D8581CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; }; @@ -1061,7 +1097,6 @@ 276E5CBD1CDB57AA003FF4B4 /* InterpreterRuleContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InterpreterRuleContext.h; sourceTree = ""; wrapsLines = 0; }; 276E5CBE1CDB57AA003FF4B4 /* IntStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IntStream.cpp; sourceTree = ""; }; 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntStream.h; sourceTree = ""; }; - 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IRecognizer.h; sourceTree = ""; }; 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Lexer.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5CC21CDB57AA003FF4B4 /* Lexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Lexer.h; sourceTree = ""; }; 276E5CC31CDB57AA003FF4B4 /* LexerInterpreter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerInterpreter.cpp; sourceTree = ""; wrapsLines = 0; }; @@ -1152,6 +1187,19 @@ 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; }; 278E313E1D9D6534001C28F9 /* Tests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Tests.m; sourceTree = ""; }; 278E31401D9D6534001C28F9 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 2793DC841F08083F00A84290 /* TokenSource.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TokenSource.cpp; sourceTree = ""; }; + 2793DC881F08087500A84290 /* Chunk.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Chunk.cpp; sourceTree = ""; }; + 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeListener.cpp; sourceTree = ""; }; + 2793DC901F0808A200A84290 /* TerminalNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TerminalNode.cpp; sourceTree = ""; }; + 2793DC941F0808E100A84290 /* ErrorNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorNode.cpp; sourceTree = ""; }; + 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeVisitor.cpp; sourceTree = ""; }; + 2793DC9C1F08090D00A84290 /* Any.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Any.cpp; sourceTree = ""; }; + 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorListener.cpp; sourceTree = ""; }; + 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorStrategy.cpp; sourceTree = ""; }; + 2793DCA21F08095F00A84290 /* Token.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Token.cpp; sourceTree = ""; }; + 2793DCA31F08095F00A84290 /* WritableToken.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WritableToken.cpp; sourceTree = ""; }; + 2793DCB01F08099C00A84290 /* BlockStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BlockStartState.cpp; sourceTree = ""; }; + 2793DCB11F08099C00A84290 /* LexerAction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerAction.cpp; sourceTree = ""; }; 2794D8551CE7821B00FADD0F /* antlr4-common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-common.h"; sourceTree = ""; }; 27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-runtime.h"; sourceTree = ""; }; 27B36AC41DACE7AF0069C868 /* RuleContextWithAltNum.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RuleContextWithAltNum.cpp; sourceTree = ""; }; @@ -1230,7 +1278,9 @@ 276E5CF91CDB57AA003FF4B4 /* tree */, 2794D8551CE7821B00FADD0F /* antlr4-common.h */, 27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */, + 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */, 276E5C0C1CDB57AA003FF4B4 /* ANTLRErrorListener.h */, + 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */, 276E5C0D1CDB57AA003FF4B4 /* ANTLRErrorStrategy.h */, 276E5C0E1CDB57AA003FF4B4 /* ANTLRFileStream.cpp */, 276E5C0F1CDB57AA003FF4B4 /* ANTLRFileStream.h */, @@ -1266,7 +1316,6 @@ 276E5CBD1CDB57AA003FF4B4 /* InterpreterRuleContext.h */, 276E5CBE1CDB57AA003FF4B4 /* IntStream.cpp */, 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */, - 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */, 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */, 276E5CC21CDB57AA003FF4B4 /* Lexer.h */, 276E5CC31CDB57AA003FF4B4 /* LexerInterpreter.cpp */, @@ -1295,8 +1344,10 @@ 27B36AC51DACE7AF0069C868 /* RuleContextWithAltNum.h */, 27745EFB1CE49C000067C6A3 /* RuntimeMetaData.cpp */, 27745EFC1CE49C000067C6A3 /* RuntimeMetaData.h */, + 2793DCA21F08095F00A84290 /* Token.cpp */, 276E5CF01CDB57AA003FF4B4 /* Token.h */, 276E5CF21CDB57AA003FF4B4 /* TokenFactory.h */, + 2793DC841F08083F00A84290 /* TokenSource.cpp */, 276E5CF41CDB57AA003FF4B4 /* TokenSource.h */, 276E5CF51CDB57AA003FF4B4 /* TokenStream.cpp */, 276E5CF61CDB57AA003FF4B4 /* TokenStream.h */, @@ -1308,6 +1359,7 @@ 276E5D251CDB57AA003FF4B4 /* UnbufferedTokenStream.h */, 276E5D271CDB57AA003FF4B4 /* Vocabulary.cpp */, 276E5D281CDB57AA003FF4B4 /* Vocabulary.h */, + 2793DCA31F08095F00A84290 /* WritableToken.cpp */, 276E5D2A1CDB57AA003FF4B4 /* WritableToken.h */, ); name = runtime; @@ -1350,6 +1402,7 @@ 276E5C321CDB57AA003FF4B4 /* BasicState.h */, 276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */, 276E5C341CDB57AA003FF4B4 /* BlockEndState.h */, + 2793DCB01F08099C00A84290 /* BlockStartState.cpp */, 276E5C351CDB57AA003FF4B4 /* BlockStartState.h */, 276E5C371CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp */, 276E5C381CDB57AA003FF4B4 /* ContextSensitivityInfo.h */, @@ -1365,6 +1418,7 @@ 276E5C421CDB57AA003FF4B4 /* EpsilonTransition.h */, 276E5C431CDB57AA003FF4B4 /* ErrorInfo.cpp */, 276E5C441CDB57AA003FF4B4 /* ErrorInfo.h */, + 2793DCB11F08099C00A84290 /* LexerAction.cpp */, 276E5C451CDB57AA003FF4B4 /* LexerAction.h */, 276E5C461CDB57AA003FF4B4 /* LexerActionExecutor.cpp */, 276E5C471CDB57AA003FF4B4 /* LexerActionExecutor.h */, @@ -1483,6 +1537,7 @@ 276E5CE41CDB57AA003FF4B4 /* support */ = { isa = PBXGroup; children = ( + 2793DC9C1F08090D00A84290 /* Any.cpp */, 27F4A8551D4CEB2A00E067EE /* Any.h */, 276E5CE51CDB57AA003FF4B4 /* Arrays.cpp */, 276E5CE61CDB57AA003FF4B4 /* Arrays.h */, @@ -1504,6 +1559,7 @@ 276E5D061CDB57AA003FF4B4 /* pattern */, 27DB448A1D045537007E790B /* xpath */, 276E5CFA1CDB57AA003FF4B4 /* AbstractParseTreeVisitor.h */, + 2793DC941F0808E100A84290 /* ErrorNode.cpp */, 276E5CFB1CDB57AA003FF4B4 /* ErrorNode.h */, 276E5CFC1CDB57AA003FF4B4 /* ErrorNodeImpl.cpp */, 276E5CFD1CDB57AA003FF4B4 /* ErrorNodeImpl.h */, @@ -1511,11 +1567,14 @@ 27D414511DEB0D3D00D0F3F9 /* IterativeParseTreeWalker.h */, 276566DF1DA93BFB000869BE /* ParseTree.cpp */, 276E5CFE1CDB57AA003FF4B4 /* ParseTree.h */, + 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */, 276E5D001CDB57AA003FF4B4 /* ParseTreeListener.h */, 276E5D021CDB57AA003FF4B4 /* ParseTreeProperty.h */, + 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */, 276E5D031CDB57AA003FF4B4 /* ParseTreeVisitor.h */, 276E5D041CDB57AA003FF4B4 /* ParseTreeWalker.cpp */, 276E5D051CDB57AA003FF4B4 /* ParseTreeWalker.h */, + 2793DC901F0808A200A84290 /* TerminalNode.cpp */, 276E5D181CDB57AA003FF4B4 /* TerminalNode.h */, 276E5D191CDB57AA003FF4B4 /* TerminalNodeImpl.cpp */, 276E5D1A1CDB57AA003FF4B4 /* TerminalNodeImpl.h */, @@ -1529,6 +1588,7 @@ isa = PBXGroup; children = ( 276E5D071CDB57AA003FF4B4 /* Chunk.h */, + 2793DC881F08087500A84290 /* Chunk.cpp */, 276E5D081CDB57AA003FF4B4 /* ParseTreeMatch.cpp */, 276E5D091CDB57AA003FF4B4 /* ParseTreeMatch.h */, 276E5D0A1CDB57AA003FF4B4 /* ParseTreePattern.cpp */, @@ -1707,7 +1767,6 @@ 27DB44CC1D0463DB007E790B /* XPathElement.h in Headers */, 276E5F581CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, 276E5D811CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, - 276E5F461CDB57AA003FF4B4 /* IRecognizer.h in Headers */, 27DB44B61D0463CC007E790B /* XPathLexer.h in Headers */, 276E5FC41CDB57AA003FF4B4 /* guid.h in Headers */, 276E602D1CDB57AA003FF4B4 /* TagChunk.h in Headers */, @@ -1875,7 +1934,6 @@ 276E60141CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */, 276E5F571CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, 276E5D801CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, - 276E5F451CDB57AA003FF4B4 /* IRecognizer.h in Headers */, 276E5FC31CDB57AA003FF4B4 /* guid.h in Headers */, 276E602C1CDB57AA003FF4B4 /* TagChunk.h in Headers */, 276E5E941CDB57AA003FF4B4 /* RuleStopState.h in Headers */, @@ -2033,7 +2091,6 @@ 276E60131CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */, 276E5F561CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, 276E5D7F1CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, - 276E5F441CDB57AA003FF4B4 /* IRecognizer.h in Headers */, 276E5FC21CDB57AA003FF4B4 /* guid.h in Headers */, 276E602B1CDB57AA003FF4B4 /* TagChunk.h in Headers */, 276E5E931CDB57AA003FF4B4 /* RuleStopState.h in Headers */, @@ -2225,10 +2282,12 @@ 276E60451CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD21CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, 276E5F551CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, + 2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */, 276E5E561CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 276E5E1D1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, 276E5EBC1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, 276E5D721CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, + 2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */, 276E5E2F1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFF1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, 276E60511CDB57AA003FF4B4 /* Trees.cpp in Sources */, @@ -2256,6 +2315,8 @@ 276E5E921CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */, 276E60631CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DDB1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, + 2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */, + 2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9E1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 276E5EC81CDB57AA003FF4B4 /* Transition.cpp in Sources */, 276E601E1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */, @@ -2263,12 +2324,15 @@ 276E5D481CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, 276E5DC61CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED41CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, + 2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, + 2793DCAC1F08095F00A84290 /* Token.cpp in Sources */, 276E5FA31CDB57AA003FF4B4 /* Recognizer.cpp in Sources */, 276E5D6C1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */, 276E60361CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */, 27DB44D51D0463DB007E790B /* XPathTokenElement.cpp in Sources */, 27DB44D11D0463DB007E790B /* XPathRuleElement.cpp in Sources */, 276E5DED1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, + 2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606C1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1C1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60181CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, @@ -2293,7 +2357,9 @@ 276E5D781CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F051CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAE1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, + 2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, 276E5D661CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */, + 2793DC9F1F08090D00A84290 /* Any.cpp in Sources */, 276E5FAF1CDB57AA003FF4B4 /* Arrays.cpp in Sources */, 276E5ECE1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E861CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, @@ -2301,6 +2367,7 @@ 276E5D9C1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, 276E5FC11CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E801CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, + 2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F401CDB57AA003FF4B4 /* IntStream.cpp in Sources */, 276E5F5B1CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */, 276E5F6D1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */, @@ -2315,6 +2382,7 @@ 27DB44CF1D0463DB007E790B /* XPathRuleAnywhereElement.cpp in Sources */, 276E5E441CDB57AA003FF4B4 /* OrderedATNConfigSet.cpp in Sources */, 276E5DCC1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */, + 2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */, 276E5D5A1CDB57AA003FF4B4 /* ATN.cpp in Sources */, 276E5EE61CDB57AA003FF4B4 /* CharStream.cpp in Sources */, 276E5EE01CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, @@ -2333,6 +2401,8 @@ 276E5DC01CDB57AA003FF4B4 /* DecisionState.cpp in Sources */, 276E5E981CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF81CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, + 2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */, + 2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60121CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E21DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEC1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, @@ -2365,10 +2435,12 @@ 276E60441CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD11CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, 276E5F541CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, + 2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */, 276E5E551CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 276E5E1C1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, 276E5EBB1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, 276E5D711CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, + 2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */, 276E5E2E1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFE1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, 276E60501CDB57AA003FF4B4 /* Trees.cpp in Sources */, @@ -2396,6 +2468,8 @@ 276E5E911CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */, 276E60621CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DDA1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, + 2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */, + 2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9D1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 276E5EC71CDB57AA003FF4B4 /* Transition.cpp in Sources */, 276E601D1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */, @@ -2403,12 +2477,15 @@ 276E5D471CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, 276E5DC51CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED31CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, + 2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, + 2793DCAB1F08095F00A84290 /* Token.cpp in Sources */, 276E5FA21CDB57AA003FF4B4 /* Recognizer.cpp in Sources */, 276E5D6B1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */, 276E60351CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */, 27DB44C31D0463DA007E790B /* XPathTokenElement.cpp in Sources */, 27DB44BF1D0463DA007E790B /* XPathRuleElement.cpp in Sources */, 276E5DEC1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, + 2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606B1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1B1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60171CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, @@ -2433,7 +2510,9 @@ 276E5D771CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F041CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAD1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, + 2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, 276E5D651CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */, + 2793DC9E1F08090D00A84290 /* Any.cpp in Sources */, 276E5FAE1CDB57AA003FF4B4 /* Arrays.cpp in Sources */, 276E5ECD1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E851CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, @@ -2441,6 +2520,7 @@ 276E5D9B1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, 276E5FC01CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E7F1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, + 2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F3F1CDB57AA003FF4B4 /* IntStream.cpp in Sources */, 276E5F5A1CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */, 276E5F6C1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */, @@ -2455,6 +2535,7 @@ 27DB44BD1D0463DA007E790B /* XPathRuleAnywhereElement.cpp in Sources */, 276E5E431CDB57AA003FF4B4 /* OrderedATNConfigSet.cpp in Sources */, 276E5DCB1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */, + 2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */, 276E5D591CDB57AA003FF4B4 /* ATN.cpp in Sources */, 276E5EE51CDB57AA003FF4B4 /* CharStream.cpp in Sources */, 276E5EDF1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, @@ -2473,6 +2554,8 @@ 276E5DBF1CDB57AA003FF4B4 /* DecisionState.cpp in Sources */, 276E5E971CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF71CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, + 2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */, + 2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60111CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E11DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEB1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, @@ -2505,10 +2588,12 @@ 276E5DB21CDB57AA003FF4B4 /* DecisionEventInfo.cpp in Sources */, 276E60431CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD01CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, + 2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */, 276E5F531CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, 276E5E541CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 276E5E1B1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, 276E5EBA1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, + 2793DC891F08087500A84290 /* Chunk.cpp in Sources */, 276E5D701CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, 276E5E2D1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFD1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, @@ -2536,6 +2621,8 @@ 276E60611CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DD91CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, 27DB449D1D045537007E790B /* XPath.cpp in Sources */, + 2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */, + 2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9C1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 27DB44AD1D045537007E790B /* XPathWildcardElement.cpp in Sources */, 276E5EC61CDB57AA003FF4B4 /* Transition.cpp in Sources */, @@ -2543,12 +2630,15 @@ 27DB44A51D045537007E790B /* XPathRuleElement.cpp in Sources */, 276E5F201CDB57AA003FF4B4 /* DiagnosticErrorListener.cpp in Sources */, 276E5D461CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, + 2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, + 2793DCAA1F08095F00A84290 /* Token.cpp in Sources */, 276E5DC41CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED21CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, 276E5FA11CDB57AA003FF4B4 /* Recognizer.cpp in Sources */, 276E5D6A1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */, 276E60341CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */, 276E5DEB1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, + 2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606A1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1A1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60161CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, @@ -2573,7 +2663,9 @@ 276E5D761CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F031CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAC1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, + 2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, 276E5D641CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */, + 2793DC9D1F08090D00A84290 /* Any.cpp in Sources */, 276E5FAD1CDB57AA003FF4B4 /* Arrays.cpp in Sources */, 276E5ECC1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E841CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, @@ -2581,6 +2673,7 @@ 276E5D9A1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, 276E5FBF1CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E7E1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, + 2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F3E1CDB57AA003FF4B4 /* IntStream.cpp in Sources */, 276E5F591CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */, 276E5F6B1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */, @@ -2595,6 +2688,7 @@ 276E5D581CDB57AA003FF4B4 /* ATN.cpp in Sources */, 276E5EE41CDB57AA003FF4B4 /* CharStream.cpp in Sources */, 27DB44AB1D045537007E790B /* XPathWildcardAnywhereElement.cpp in Sources */, + 2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */, 276E5EDE1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, 276E5F021CDB57AA003FF4B4 /* DefaultErrorStrategy.cpp in Sources */, 276E5D401CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */, @@ -2613,6 +2707,8 @@ 276E5DBE1CDB57AA003FF4B4 /* DecisionState.cpp in Sources */, 276E5E961CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF61CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, + 2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */, + 2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60101CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E01DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEA1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, diff --git a/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp b/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp index ab0d40328..6ceadb87f 100644 --- a/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp +++ b/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "ANTLRErrorListener.h" antlr4::ANTLRErrorListener::~ANTLRErrorListener() diff --git a/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp b/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp index 04af575c3..1655a5731 100644 --- a/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp +++ b/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "ANTLRErrorStrategy.h" antlr4::ANTLRErrorStrategy::~ANTLRErrorStrategy() diff --git a/runtime/Cpp/runtime/src/Token.cpp b/runtime/Cpp/runtime/src/Token.cpp index 06047867a..31266b42d 100644 --- a/runtime/Cpp/runtime/src/Token.cpp +++ b/runtime/Cpp/runtime/src/Token.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "Token.h" antlr4::Token::~Token() { diff --git a/runtime/Cpp/runtime/src/TokenSource.cpp b/runtime/Cpp/runtime/src/TokenSource.cpp index 50b9684ec..6b9d7af2f 100644 --- a/runtime/Cpp/runtime/src/TokenSource.cpp +++ b/runtime/Cpp/runtime/src/TokenSource.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "TokenSource.h" antlr4::TokenSource::~TokenSource() { diff --git a/runtime/Cpp/runtime/src/WritableToken.cpp b/runtime/Cpp/runtime/src/WritableToken.cpp index 2e3b01241..a30cd96f1 100644 --- a/runtime/Cpp/runtime/src/WritableToken.cpp +++ b/runtime/Cpp/runtime/src/WritableToken.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "WritableToken.h" antlr4::WritableToken::~WritableToken() { diff --git a/runtime/Cpp/runtime/src/atn/BlockStartState.cpp b/runtime/Cpp/runtime/src/atn/BlockStartState.cpp index b8ec09440..44cca8f77 100644 --- a/runtime/Cpp/runtime/src/atn/BlockStartState.cpp +++ b/runtime/Cpp/runtime/src/atn/BlockStartState.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "BlockStartState.h" antlr4::atn::BlockStartState::~BlockStartState() { diff --git a/runtime/Cpp/runtime/src/atn/LexerAction.cpp b/runtime/Cpp/runtime/src/atn/LexerAction.cpp index 5c98cfe43..983ba6d52 100644 --- a/runtime/Cpp/runtime/src/atn/LexerAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerAction.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "LexerAction.h" antlr4::atn::LexerAction::~LexerAction() { diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp index 0d7d9c54d..5e82bbaff 100755 --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp @@ -184,7 +184,7 @@ size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream throw e; } - if (D->requiresFullContext && mode != PredictionMode::SLL) { + if (D->requiresFullContext && _mode != PredictionMode::SLL) { // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) BitSet conflictingAlts; if (D->predicates.size() != 0) { @@ -283,7 +283,7 @@ dfa::DFAState *ParserATNSimulator::computeTargetState(dfa::DFA &dfa, dfa::DFASta D->isAcceptState = true; D->configs->uniqueAlt = predictedAlt; D->prediction = predictedAlt; - } else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(mode, D->configs.get())) { + } else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(_mode, D->configs.get())) { // MORE THAN ONE VIABLE ALTERNATIVE D->configs->conflictingAlts = getConflictingAlts(D->configs.get()); D->requiresFullContext = true; @@ -370,7 +370,7 @@ size_t ParserATNSimulator::execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState * predictedAlt = reach->uniqueAlt; break; } - if (mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) { + if (_mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) { predictedAlt = PredictionModeClass::resolvesToJustOneViableAlt(altSubSets); if (predictedAlt != ATN::INVALID_ALT_NUMBER) { break; @@ -1332,11 +1332,11 @@ void ParserATNSimulator::reportAmbiguity(dfa::DFA &dfa, dfa::DFAState * /*D*/, s } void ParserATNSimulator::setPredictionMode(PredictionMode newMode) { - mode = newMode; + _mode = newMode; } atn::PredictionMode ParserATNSimulator::getPredictionMode() { - return mode; + return _mode; } Parser* ParserATNSimulator::getParser() { @@ -1352,6 +1352,6 @@ bool ParserATNSimulator::getLrLoopSetting() { } void ParserATNSimulator::InitializeInstanceFields() { - mode = PredictionMode::LL; + _mode = PredictionMode::LL; _startIndex = 0; } diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h index b5c6d98a9..e2a406324 100755 --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h @@ -243,20 +243,133 @@ namespace atn { * the input.

*/ class ANTLR4CPP_PUBLIC ParserATNSimulator : public ATNSimulator { - protected: - Parser *const parser; - public: + /// Testing only! + ParserATNSimulator(const ATN &atn, std::vector &decisionToDFA, + PredictionContextCache &sharedContextCache); + + ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA, + PredictionContextCache &sharedContextCache); + + virtual void reset() override; + virtual void clearDFA() override; + virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext); + static const bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT; std::vector &decisionToDFA; + + /** Implements first-edge (loop entry) elimination as an optimization + * during closure operations. See antlr/antlr4#1398. + * + * The optimization is to avoid adding the loop entry config when + * the exit path can only lead back to the same + * StarLoopEntryState after popping context at the rule end state + * (traversing only epsilon edges, so we're still in closure, in + * this same rule). + * + * We need to detect any state that can reach loop entry on + * epsilon w/o exiting rule. We don't have to look at FOLLOW + * links, just ensure that all stack tops for config refer to key + * states in LR rule. + * + * To verify we are in the right situation we must first check + * closure is at a StarLoopEntryState generated during LR removal. + * Then we check that each stack top of context is a return state + * from one of these cases: + * + * 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state + * 2. expr op expr. The return state is the block end of internal block of (...)* + * 3. 'between' expr 'and' expr. The return state of 2nd expr reference. + * That state points at block end of internal block of (...)*. + * 4. expr '?' expr ':' expr. The return state points at block end, + * which points at loop entry state. + * + * If any is true for each stack top, then closure does not add a + * config to the current config set for edge[0], the loop entry branch. + * + * Conditions fail if any context for the current config is: + * + * a. empty (we'd fall out of expr to do a global FOLLOW which could + * even be to some weird spot in expr) or, + * b. lies outside of expr or, + * c. lies within expr but at a state not the BlockEndState + * generated during LR removal + * + * Do we need to evaluate predicates ever in closure for this case? + * + * No. Predicates, including precedence predicates, are only + * evaluated when computing a DFA start state. I.e., only before + * the lookahead (but not parser) consumes a token. + * + * There are no epsilon edges allowed in LR rule alt blocks or in + * the "primary" part (ID here). If closure is in + * StarLoopEntryState any lookahead operation will have consumed a + * token as there are no epsilon-paths that lead to + * StarLoopEntryState. We do not have to evaluate predicates + * therefore if we are in the generated StarLoopEntryState of a LR + * rule. Note that when making a prediction starting at that + * decision point, decision d=2, compute-start-state performs + * closure starting at edges[0], edges[1] emanating from + * StarLoopEntryState. That means it is not performing closure on + * StarLoopEntryState during compute-start-state. + * + * How do we know this always gives same prediction answer? + * + * Without predicates, loop entry and exit paths are ambiguous + * upon remaining input +b (in, say, a+b). Either paths lead to + * valid parses. Closure can lead to consuming + immediately or by + * falling out of this call to expr back into expr and loop back + * again to StarLoopEntryState to match +b. In this special case, + * we choose the more efficient path, which is to take the bypass + * path. + * + * The lookahead language has not changed because closure chooses + * one path over the other. Both paths lead to consuming the same + * remaining input during a lookahead operation. If the next token + * is an operator, lookahead will enter the choice block with + * operators. If it is not, lookahead will exit expr. Same as if + * closure had chosen to enter the choice block immediately. + * + * Closure is examining one config (some loopentrystate, some alt, + * context) which means it is considering exactly one alt. Closure + * always copies the same alt to any derived configs. + * + * How do we know this optimization doesn't mess up precedence in + * our parse trees? + * + * Looking through expr from left edge of stat only has to confirm + * that an input, say, a+b+c; begins with any valid interpretation + * of an expression. The precedence actually doesn't matter when + * making a decision in stat seeing through expr. It is only when + * parsing rule expr that we must use the precedence to get the + * right interpretation and, hence, parse tree. + */ + bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const; + virtual std::string getRuleName(size_t index); + + virtual Ref precedenceTransition(Ref const& config, PrecedencePredicateTransition *pt, + bool collectPredicates, bool inContext, bool fullCtx); + + void setPredictionMode(PredictionMode newMode); + PredictionMode getPredictionMode(); + + Parser* getParser(); + + virtual std::string getTokenName(size_t t); + + virtual std::string getLookaheadName(TokenStream *input); - private: /// - /// SLL, LL, or LL + exact ambig detection? - PredictionMode mode; - + /// Used for debugging in adaptivePredict around execATN but I cut + /// it out for clarity now that alg. works well. We can leave this + /// "dead" code for a bit. + /// + virtual void dumpDeadEndConfigs(NoViableAltException &nvae); + protected: + Parser *const parser; + /// /// Each prediction operation uses a cache for merge of prediction contexts. /// Don't keep around as it wastes huge amounts of memory. The merge cache @@ -273,20 +386,7 @@ namespace atn { size_t _startIndex; ParserRuleContext *_outerContext; dfa::DFA *_dfa; // Reference into the decisionToDFA vector. - - public: - /// Testing only! - ParserATNSimulator(const ATN &atn, std::vector &decisionToDFA, - PredictionContextCache &sharedContextCache); - - ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA, - PredictionContextCache &sharedContextCache); - - virtual void reset() override; - virtual void clearDFA() override; - virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext); - - protected: + /// /// Performs ATN simulation to compute a predicted alternative based /// upon the remaining input, but also updates the DFA cache to avoid @@ -350,7 +450,7 @@ namespace atn { // comes back with reach.uniqueAlt set to a valid alt virtual size_t execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *D, ATNConfigSet *s0, - TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over + TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over virtual std::unique_ptr computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx); @@ -549,10 +649,10 @@ namespace atn { virtual ATNState *getReachableTarget(Transition *trans, size_t ttype); virtual std::vector> getPredsForAmbigAlts(const antlrcpp::BitSet &ambigAlts, - ATNConfigSet *configs, size_t nalts); + ATNConfigSet *configs, size_t nalts); virtual std::vector getPredicatePredictions(const antlrcpp::BitSet &ambigAlts, - std::vector> altToPred); + std::vector> altToPred); /** * This method is used to improve the localization of error messages by @@ -601,7 +701,7 @@ namespace atn { * identified and {@link #adaptivePredict} should report an error instead. */ size_t getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(ATNConfigSet *configs, - ParserRuleContext *outerContext); + ParserRuleContext *outerContext); virtual size_t getAltThatFinishedDecisionEntryRule(ATNConfigSet *configs); @@ -615,7 +715,7 @@ namespace atn { * prediction, which is where predicates need to evaluate. */ std::pair splitAccordingToSemanticValidity(ATNConfigSet *configs, - ParserRuleContext *outerContext); + ParserRuleContext *outerContext); /// /// Look through a list of predicate/alt pairs, returning alts for the @@ -627,7 +727,6 @@ namespace atn { virtual antlrcpp::BitSet evalSemanticContext(std::vector predPredictions, ParserRuleContext *outerContext, bool complete); - /** * Evaluate a semantic context within a specific parser context. * @@ -672,111 +771,15 @@ namespace atn { virtual void closureCheckingStopState(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy, bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon); - + /// Do the actual work of walking epsilon edges. virtual void closure_(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy, bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon); - - public: - /** Implements first-edge (loop entry) elimination as an optimization - * during closure operations. See antlr/antlr4#1398. - * - * The optimization is to avoid adding the loop entry config when - * the exit path can only lead back to the same - * StarLoopEntryState after popping context at the rule end state - * (traversing only epsilon edges, so we're still in closure, in - * this same rule). - * - * We need to detect any state that can reach loop entry on - * epsilon w/o exiting rule. We don't have to look at FOLLOW - * links, just ensure that all stack tops for config refer to key - * states in LR rule. - * - * To verify we are in the right situation we must first check - * closure is at a StarLoopEntryState generated during LR removal. - * Then we check that each stack top of context is a return state - * from one of these cases: - * - * 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state - * 2. expr op expr. The return state is the block end of internal block of (...)* - * 3. 'between' expr 'and' expr. The return state of 2nd expr reference. - * That state points at block end of internal block of (...)*. - * 4. expr '?' expr ':' expr. The return state points at block end, - * which points at loop entry state. - * - * If any is true for each stack top, then closure does not add a - * config to the current config set for edge[0], the loop entry branch. - * - * Conditions fail if any context for the current config is: - * - * a. empty (we'd fall out of expr to do a global FOLLOW which could - * even be to some weird spot in expr) or, - * b. lies outside of expr or, - * c. lies within expr but at a state not the BlockEndState - * generated during LR removal - * - * Do we need to evaluate predicates ever in closure for this case? - * - * No. Predicates, including precedence predicates, are only - * evaluated when computing a DFA start state. I.e., only before - * the lookahead (but not parser) consumes a token. - * - * There are no epsilon edges allowed in LR rule alt blocks or in - * the "primary" part (ID here). If closure is in - * StarLoopEntryState any lookahead operation will have consumed a - * token as there are no epsilon-paths that lead to - * StarLoopEntryState. We do not have to evaluate predicates - * therefore if we are in the generated StarLoopEntryState of a LR - * rule. Note that when making a prediction starting at that - * decision point, decision d=2, compute-start-state performs - * closure starting at edges[0], edges[1] emanating from - * StarLoopEntryState. That means it is not performing closure on - * StarLoopEntryState during compute-start-state. - * - * How do we know this always gives same prediction answer? - * - * Without predicates, loop entry and exit paths are ambiguous - * upon remaining input +b (in, say, a+b). Either paths lead to - * valid parses. Closure can lead to consuming + immediately or by - * falling out of this call to expr back into expr and loop back - * again to StarLoopEntryState to match +b. In this special case, - * we choose the more efficient path, which is to take the bypass - * path. - * - * The lookahead language has not changed because closure chooses - * one path over the other. Both paths lead to consuming the same - * remaining input during a lookahead operation. If the next token - * is an operator, lookahead will enter the choice block with - * operators. If it is not, lookahead will exit expr. Same as if - * closure had chosen to enter the choice block immediately. - * - * Closure is examining one config (some loopentrystate, some alt, - * context) which means it is considering exactly one alt. Closure - * always copies the same alt to any derived configs. - * - * How do we know this optimization doesn't mess up precedence in - * our parse trees? - * - * Looking through expr from left edge of stat only has to confirm - * that an input, say, a+b+c; begins with any valid interpretation - * of an expression. The precedence actually doesn't matter when - * making a decision in stat seeing through expr. It is only when - * parsing rule expr that we must use the precedence to get the - * right interpretation and, hence, parse tree. - */ - bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const; - virtual std::string getRuleName(size_t index); - - protected: + virtual Ref getEpsilonTarget(Ref const& config, Transition *t, bool collectPredicates, bool inContext, bool fullCtx, bool treatEofAsEpsilon); virtual Ref actionTransition(Ref const& config, ActionTransition *t); - public: - virtual Ref precedenceTransition(Ref const& config, PrecedencePredicateTransition *pt, - bool collectPredicates, bool inContext, bool fullCtx); - - protected: virtual Ref predTransition(Ref const& config, PredicateTransition *pt, bool collectPredicates, bool inContext, bool fullCtx); @@ -832,19 +835,6 @@ namespace atn { virtual antlrcpp::BitSet getConflictingAltsOrUniqueAlt(ATNConfigSet *configs); - public: - virtual std::string getTokenName(size_t t); - - virtual std::string getLookaheadName(TokenStream *input); - - /// - /// Used for debugging in adaptivePredict around execATN but I cut - /// it out for clarity now that alg. works well. We can leave this - /// "dead" code for a bit. - /// - virtual void dumpDeadEndConfigs(NoViableAltException &nvae); - - protected: virtual NoViableAltException noViableAlt(TokenStream *input, ParserRuleContext *outerContext, ATNConfigSet *configs, size_t startIndex); @@ -901,13 +891,10 @@ namespace atn { const antlrcpp::BitSet &ambigAlts, ATNConfigSet *configs); // configs that LL not SLL considered conflicting - public: - void setPredictionMode(PredictionMode newMode); - PredictionMode getPredictionMode(); - - Parser* getParser(); - private: + // SLL, LL, or LL + exact ambig detection? + PredictionMode _mode; + static bool getLrLoopSetting(); void InitializeInstanceFields(); }; diff --git a/runtime/Cpp/runtime/src/support/Any.cpp b/runtime/Cpp/runtime/src/support/Any.cpp index 1404343d3..2eec593b0 100644 --- a/runtime/Cpp/runtime/src/support/Any.cpp +++ b/runtime/Cpp/runtime/src/support/Any.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "Any.h" antlrcpp::Any::~Any() diff --git a/runtime/Cpp/runtime/src/tree/ErrorNode.cpp b/runtime/Cpp/runtime/src/tree/ErrorNode.cpp index 685047d20..ade2539af 100644 --- a/runtime/Cpp/runtime/src/tree/ErrorNode.cpp +++ b/runtime/Cpp/runtime/src/tree/ErrorNode.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "tree/ErrorNode.h" antlr4::tree::ErrorNode::~ErrorNode() { diff --git a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp index 5ce30d3a7..a4b3efd73 100644 --- a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp +++ b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp @@ -1,31 +1,6 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. */ #include "support/CPPUtils.h" diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp b/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp index 820962118..ce1229758 100644 --- a/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp +++ b/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "ParseTreeListener.h" antlr4::tree::ParseTreeListener::~ParseTreeListener() { diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp b/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp index 5298eee09..a329919c1 100644 --- a/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp +++ b/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "ParseTreeVisitor.h" antlr4::tree::ParseTreeVisitor::~ParseTreeVisitor() { diff --git a/runtime/Cpp/runtime/src/tree/TerminalNode.cpp b/runtime/Cpp/runtime/src/tree/TerminalNode.cpp index e41ff7e9d..d630469c7 100644 --- a/runtime/Cpp/runtime/src/tree/TerminalNode.cpp +++ b/runtime/Cpp/runtime/src/tree/TerminalNode.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "tree/TerminalNode.h" antlr4::tree::TerminalNode::~TerminalNode() { diff --git a/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp b/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp index 7997ce867..5320f910b 100644 --- a/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "tree/pattern/Chunk.h" antlr4::tree::pattern::Chunk::~Chunk() { From b7060ba1f5f306247d1e05dc1c97f3ec27796eef Mon Sep 17 00:00:00 2001 From: Mike Lischke Date: Sun, 2 Jul 2017 14:29:54 +0200 Subject: [PATCH 004/102] Visual Studio build fixes for previous C++ patch. VS 2013 doesn't accept certain C++11 constants like std::numeric_limits, so we have to return to the code used before that mentioned patch. --- runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj | 16 ++++++- .../runtime/antlr4cpp-vs2013.vcxproj.filters | 44 ++++++++++++++++++- runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj | 16 ++++++- .../runtime/antlr4cpp-vs2015.vcxproj.filters | 44 ++++++++++++++++++- runtime/Cpp/runtime/src/IntStream.h | 2 +- runtime/Cpp/runtime/src/Recognizer.h | 2 +- runtime/Cpp/runtime/src/Token.h | 2 +- .../Cpp/runtime/src/UnbufferedCharStream.cpp | 4 +- runtime/Cpp/runtime/src/antlr4-common.h | 2 + runtime/Cpp/runtime/src/atn/ATNState.h | 2 +- .../Cpp/runtime/src/atn/PredictionContext.h | 3 +- runtime/Cpp/runtime/src/support/Any.cpp | 6 ++- runtime/Cpp/runtime/src/support/Any.h | 2 +- 13 files changed, 130 insertions(+), 15 deletions(-) diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj index 50ab20c8b..80f9ebf77 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj +++ b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj @@ -321,6 +321,8 @@ + + @@ -339,6 +341,7 @@ + @@ -346,6 +349,7 @@ + @@ -412,6 +416,7 @@ + @@ -422,16 +427,23 @@ + + + + + + + @@ -439,6 +451,7 @@ + @@ -454,6 +467,7 @@ + @@ -620,4 +634,4 @@ - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters index d3b301654..499a82ed4 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters +++ b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters @@ -938,5 +938,47 @@ Source Files\tree + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\support + + + Source Files\atn + + + Source Files\atn + + + Source Files\tree\pattern + + + Source Files\misc + - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj index e549a78b6..f9bebf6fe 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj +++ b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj @@ -334,6 +334,8 @@ + + @@ -352,6 +354,7 @@ + @@ -359,6 +362,7 @@ + @@ -425,6 +429,7 @@ + @@ -435,16 +440,23 @@ + + + + + + + @@ -452,6 +464,7 @@ + @@ -467,6 +480,7 @@ + @@ -633,4 +647,4 @@ - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters index 21eaaf722..26db5b9c4 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters +++ b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters @@ -938,5 +938,47 @@ Source Files\tree + + Source Files + + + Source Files + + + Source Files\atn + + + Source Files\atn + + + Source Files\misc + + + Source Files + + + Source Files + + + Source Files + + + Source Files\support + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree\pattern + - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/src/IntStream.h b/runtime/Cpp/runtime/src/IntStream.h index 7c7401074..96d23d56f 100755 --- a/runtime/Cpp/runtime/src/IntStream.h +++ b/runtime/Cpp/runtime/src/IntStream.h @@ -27,7 +27,7 @@ namespace antlr4 { /// class ANTLR4CPP_PUBLIC IntStream { public: - static const size_t EOF = std::numeric_limits::max(); + static const size_t EOF = (size_t)-1; // std::numeric_limits::max(); doesn't work in VS 2013 /// The value returned by when the end of the stream is /// reached. diff --git a/runtime/Cpp/runtime/src/Recognizer.h b/runtime/Cpp/runtime/src/Recognizer.h index dbffde2e7..adca6c1d6 100755 --- a/runtime/Cpp/runtime/src/Recognizer.h +++ b/runtime/Cpp/runtime/src/Recognizer.h @@ -11,7 +11,7 @@ namespace antlr4 { class ANTLR4CPP_PUBLIC Recognizer { public: - static const size_t EOF = std::numeric_limits::max(); + static const size_t EOF = (size_t)-1; // std::numeric_limits::max(); doesn't work in VS 2013. Recognizer(); Recognizer(Recognizer const&) = delete; diff --git a/runtime/Cpp/runtime/src/Token.h b/runtime/Cpp/runtime/src/Token.h index 2560c7f1b..4e194224d 100755 --- a/runtime/Cpp/runtime/src/Token.h +++ b/runtime/Cpp/runtime/src/Token.h @@ -18,7 +18,7 @@ namespace antlr4 { /// During lookahead operations, this "token" signifies we hit rule end ATN state /// and did not follow it despite needing to. - static const size_t EPSILON = std::numeric_limits::max() - 1; + static const size_t EPSILON = (size_t)-2; static const size_t MIN_USER_TOKEN_TYPE = 1; static const size_t EOF = IntStream::EOF; diff --git a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp index 6a9152b50..53e905c2f 100755 --- a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp +++ b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp @@ -52,7 +52,7 @@ void UnbufferedCharStream::sync(size_t want) { size_t UnbufferedCharStream::fill(size_t n) { for (size_t i = 0; i < n; i++) { - if (_data.size() > 0 && _data.back() == (uint32_t)EOF) { + if (_data.size() > 0 && _data.back() == 0xFFFF) { return i; } @@ -101,7 +101,7 @@ size_t UnbufferedCharStream::LA(ssize_t i) { return EOF; } - if (_data[(size_t)index] == (uint32_t)EOF) { + if (_data[(size_t)index] == 0xFFFF) { return EOF; } diff --git a/runtime/Cpp/runtime/src/antlr4-common.h b/runtime/Cpp/runtime/src/antlr4-common.h index dc0596f1d..316256276 100644 --- a/runtime/Cpp/runtime/src/antlr4-common.h +++ b/runtime/Cpp/runtime/src/antlr4-common.h @@ -63,6 +63,8 @@ typedef std::basic_string<__int32> i32string; typedef i32string UTF32String; + #else + typedef std::u32string UTF32String; #endif #ifdef ANTLR4CPP_EXPORTS diff --git a/runtime/Cpp/runtime/src/atn/ATNState.h b/runtime/Cpp/runtime/src/atn/ATNState.h index a6035b4c6..4b4f72571 100755 --- a/runtime/Cpp/runtime/src/atn/ATNState.h +++ b/runtime/Cpp/runtime/src/atn/ATNState.h @@ -77,7 +77,7 @@ namespace atn { virtual ~ATNState(); static const size_t INITIAL_NUM_TRANSITIONS = 4; - static const size_t INVALID_STATE_NUMBER = std::numeric_limits::max(); + static const size_t INVALID_STATE_NUMBER = (size_t)-1; // std::numeric_limits::max(); enum { ATN_INVALID_TYPE = 0, diff --git a/runtime/Cpp/runtime/src/atn/PredictionContext.h b/runtime/Cpp/runtime/src/atn/PredictionContext.h index fb053f14a..290642154 100755 --- a/runtime/Cpp/runtime/src/atn/PredictionContext.h +++ b/runtime/Cpp/runtime/src/atn/PredictionContext.h @@ -17,7 +17,6 @@ namespace atn { class PredictionContextMergeCache; typedef std::unordered_set, PredictionContextHasher, PredictionContextComparer> PredictionContextCache; - //typedef std::map, Ref>, Ref> PredictionContextMergeCache; class ANTLR4CPP_PUBLIC PredictionContext { public: @@ -31,7 +30,7 @@ namespace atn { // ml: originally Integer.MAX_VALUE, which would be (size_t)-1 for us, but this is already used in places where // -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't // conflict with real return states. - static const size_t EMPTY_RETURN_STATE = std::numeric_limits::max() - 9; + static const size_t EMPTY_RETURN_STATE = (size_t)-10; // std::numeric_limits::max() - 9; private: static const size_t INITIAL_HASH = 1; diff --git a/runtime/Cpp/runtime/src/support/Any.cpp b/runtime/Cpp/runtime/src/support/Any.cpp index 2eec593b0..3dd1a94bf 100644 --- a/runtime/Cpp/runtime/src/support/Any.cpp +++ b/runtime/Cpp/runtime/src/support/Any.cpp @@ -5,10 +5,12 @@ #include "Any.h" -antlrcpp::Any::~Any() +using namespace antlrcpp; + +Any::~Any() { delete _ptr; } -antlrcpp::Any::Base::~Base() { +Any::Base::~Base() { } diff --git a/runtime/Cpp/runtime/src/support/Any.h b/runtime/Cpp/runtime/src/support/Any.h index f9559b30d..3d8845c70 100644 --- a/runtime/Cpp/runtime/src/support/Any.h +++ b/runtime/Cpp/runtime/src/support/Any.h @@ -19,7 +19,7 @@ namespace antlrcpp { template using StorageType = typename std::decay::type; -struct Any +struct ANTLR4CPP_PUBLIC Any { bool isNull() const { return _ptr == nullptr; } bool isNotNull() const { return _ptr != nullptr; } From 6319d62409b846a4f4912c127fcb39c0a575e416 Mon Sep 17 00:00:00 2001 From: Mike Lischke Date: Sun, 2 Jul 2017 14:31:01 +0200 Subject: [PATCH 005/102] Don't create VS 2013 packages anymore by default. Can be enabled on demand. --- runtime/Cpp/deploy-windows.cmd | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/runtime/Cpp/deploy-windows.cmd b/runtime/Cpp/deploy-windows.cmd index ec81b5940..5660f26a2 100644 --- a/runtime/Cpp/deploy-windows.cmd +++ b/runtime/Cpp/deploy-windows.cmd @@ -12,7 +12,8 @@ rem Headers xcopy runtime\src\*.h antlr4-runtime\ /s rem Binaries -if exist "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" ( +rem VS 2013 disabled by default. Change the X to a C to enable it. +if exist "X:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" ( call "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" pushd runtime From 749fa81993d2d215d0175f7779d0d495cc528cdc Mon Sep 17 00:00:00 2001 From: WAKAYAMA Shirou Date: Fri, 7 Jul 2017 13:14:16 +0900 Subject: [PATCH 006/102] [Go] Add exported getter method to Interval. --- runtime/Go/antlr/interval_set.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/runtime/Go/antlr/interval_set.go b/runtime/Go/antlr/interval_set.go index 749ec1cb3..180f94b94 100644 --- a/runtime/Go/antlr/interval_set.go +++ b/runtime/Go/antlr/interval_set.go @@ -23,6 +23,18 @@ func NewInterval(start, stop int) *Interval { return i } +func (i *Interval) GetStart() int { + return i.start +} + +func (i *Interval) GetStop() int { + return i.stop +} + +func (i *Interval) Contains(item int) bool { + return i.contains(item) +} + func (i *Interval) contains(item int) bool { return item >= i.start && item < i.stop } From 9eb7edbf30addf8c39c5cf21636e7e5e7d21ca31 Mon Sep 17 00:00:00 2001 From: WAKAYAMA Shirou Date: Fri, 7 Jul 2017 13:16:41 +0900 Subject: [PATCH 007/102] add to contributors.txt --- contributors.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contributors.txt b/contributors.txt index 63d6d57f5..c6661d56d 100644 --- a/contributors.txt +++ b/contributors.txt @@ -150,4 +150,5 @@ YYYY/MM/DD, github id, Full name, email 2017/05/29, kosak, Corey Kosak, kosak@kosak.com 2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net 2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com -2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com \ No newline at end of file +2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com +2017/07/07, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com From 1054e658b2c968ad5d00e35a63a79c7f037e4985 Mon Sep 17 00:00:00 2001 From: WAKAYAMA Shirou Date: Fri, 7 Jul 2017 14:41:01 +0900 Subject: [PATCH 008/102] [Go] change Interval.contains to public instead of add wrapper function. --- runtime/Go/antlr/interval_set.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/runtime/Go/antlr/interval_set.go b/runtime/Go/antlr/interval_set.go index 180f94b94..939f72ec9 100644 --- a/runtime/Go/antlr/interval_set.go +++ b/runtime/Go/antlr/interval_set.go @@ -32,10 +32,6 @@ func (i *Interval) GetStop() int { } func (i *Interval) Contains(item int) bool { - return i.contains(item) -} - -func (i *Interval) contains(item int) bool { return item >= i.start && item < i.stop } @@ -143,7 +139,7 @@ func (i *IntervalSet) contains(item int) bool { return false } for k := 0; k < len(i.intervals); k++ { - if i.intervals[k].contains(item) { + if i.intervals[k].Contains(item) { return true } } From 1ce5f91573b8d67d4b1cbd058d44ab8f73c4505e Mon Sep 17 00:00:00 2001 From: Arshinskiy Mike Date: Sun, 9 Jul 2017 09:39:46 +0200 Subject: [PATCH 009/102] [Go] Implemented TokenStreamRewriter TokenStreamRewriter implementation was missing Ported code from Java version; however, there are couple of deviations due to difference between composition (Go) and inheritance (Java) concepts Ported tests from Swift for LexerA --- contributors.txt | 3 +- runtime/Go/antlr/tokenstream_rewriter.go | 722 ++++++++++++++++++ runtime/Go/antlr/tokenstream_rewriter_test.go | 392 ++++++++++ 3 files changed, 1116 insertions(+), 1 deletion(-) create mode 100644 runtime/Go/antlr/tokenstream_rewriter.go create mode 100644 runtime/Go/antlr/tokenstream_rewriter_test.go diff --git a/contributors.txt b/contributors.txt index 63d6d57f5..484e8da60 100644 --- a/contributors.txt +++ b/contributors.txt @@ -150,4 +150,5 @@ YYYY/MM/DD, github id, Full name, email 2017/05/29, kosak, Corey Kosak, kosak@kosak.com 2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net 2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com -2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com \ No newline at end of file +2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com +2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com diff --git a/runtime/Go/antlr/tokenstream_rewriter.go b/runtime/Go/antlr/tokenstream_rewriter.go new file mode 100644 index 000000000..32f48514b --- /dev/null +++ b/runtime/Go/antlr/tokenstream_rewriter.go @@ -0,0 +1,722 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. +package antlr + +import ( +"bytes" +"fmt" +) + + +// Useful for rewriting out a buffered input token stream after doing some +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. +// +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+// +//

+// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+// +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+// +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+// +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+// +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+// +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+// +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+// +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+// +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+// +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+// /augmentation or other manipulations on it. +// +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+// +//

+// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+// +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+// +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+// +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+// +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+// +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+// +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+// +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+// +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+// +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+// + +const( + Default_Program_Name = "default" + Program_Init_Size = 100 + Min_Token_Index = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instruction_index int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + op_name string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation)GetInstructionIndex() int{ + return op.instruction_index +} + +func (op *BaseRewriteOperation)GetIndex() int{ + return op.index +} + +func (op *BaseRewriteOperation)GetText() string{ + return op.text +} + +func (op *BaseRewriteOperation)GetOpName() string{ + return op.op_name +} + +func (op *BaseRewriteOperation)GetTokens() TokenStream{ + return op.tokens +} + +func (op *BaseRewriteOperation)SetInstructionIndex(val int){ + op.instruction_index = val +} + +func (op *BaseRewriteOperation)SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation)SetText(val string){ + op.text = val +} + +func (op *BaseRewriteOperation)SetOpName(val string){ + op.op_name = val +} + +func (op *BaseRewriteOperation)SetTokens(val TokenStream) { + op.tokens = val +} + + +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.op_name, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ + return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ + index:index, + text:text, + op_name:"InsertBeforeOp", + tokens:stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index+1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// Distinguish between insert after/before to do the "insert afters" +// first and then the "insert befores" at same index. Implementation +// of "insert after" is "insert before index+1". + +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ + return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ + index:index+1, + text:text, + tokens:stream, + }} +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index+1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct{ + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation:BaseRewriteOperation{ + index:from, + text:text, + op_name:"ReplaceOp", + tokens:stream, + }, + LastIndex:to, + } +} + +func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ + if op.text != ""{ + buffer.WriteString(op.text) + } + return op.LastIndex +1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + last_rewrite_token_indexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{ + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size), + }, + last_rewrite_token_indexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{ + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){ + is, ok := tsr.programs[program_name] + if ok{ + tsr.programs[program_name] = is[Min_Token_Index:instruction_index] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){ + tsr.Rollback(Default_Program_Name, instruction_index) +} +//Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){ + tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault(){ + tsr.DeleteProgram(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){ + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){ + tsr.InsertAfter(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){ + tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +} + +func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){ + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){ + tsr.InsertBefore(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){ + tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){ + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){ + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) { + tsr.Replace(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){ + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){ + tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){ + tsr.ReplaceToken(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){ + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){ + tsr.Replace(program_name, from, to, "" ) +} + +func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){ + tsr.Delete(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){ + tsr.DeleteDefault(index,index) +} + +func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) { + tsr.ReplaceToken(program_name, from, to, "") +} + +func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){ + tsr.DeleteToken(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int { + i, ok := tsr.last_rewrite_token_indexes[program_name] + if !ok{ + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{ + return tsr.GetLastRewriteTokenIndex(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){ + tsr.last_rewrite_token_indexes[program_name] = i +} + +func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{ + is := make([]RewriteOperation, 0, Program_Init_Size) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){ + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok{ + is = tsr.InitializeProgram(name) + } + return is +} +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter)GetTextDefault() string{ + return tsr.GetText( + Default_Program_Name, + NewInterval(0, tsr.tokens.Size()-1)) +} +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { + rewrites := tsr.programs[program_name] + start := interval.start + stop := interval.stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start,0) + if rewrites == nil || len(rewrites) == 0{ + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())} + } + } + return buf.String() +} + +// We need to combine operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... Here are the cases: +// +// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this replace. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the replace range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// Return a map from token index to operation. +// +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{ + // WALK REPLACES + for i:=0; i < len(rewrites); i++{ + op := rewrites[i] + if op == nil{continue} + rop, ok := op.(*ReplaceOp) + if !ok{continue} + // Wipe prior inserts within range + for j:=0; j rop.index && iop.index <=rop.LastIndex{ + // delete insert as it's a no-op. + rewrites[iop.instruction_index] = nil + } + } + } + // Drop any prior replaces contained within + for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{ + // delete replace as it's a no-op. + rewrites[prevop.instruction_index] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint{ + rewrites[prevop.instruction_index] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + println("new rop" + rop.String()) //TODO: remove console write, taken from Java version + }else if !disjoint{ + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i:=0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil{continue} + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok{continue} + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{ + panic("insert op "+iop.String()+" within boundaries of previous "+rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i:=0; i < len(rewrites); i++{ + op := rewrites[i] + if op == nil {continue} + if _, ok := m[op.GetIndex()]; ok{ + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + + +/* + Quick fixing Go lack of overloads + */ + +func max(a,b int)int{ + if a>b{ + return a + }else { + return b + } +} +func min(a,b int)int{ + if aaa", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder", + func(r *TokenStreamRewriter){ + r.InsertBeforeDefault(0, "") + r.InsertAfterDefault(0, "") + r.InsertBeforeDefault(1, "") + r.InsertAfterDefault(1,"") + }), + NewLexerTest("aa", "

a

a", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", + func(r *TokenStreamRewriter){ + r.InsertBeforeDefault(0, "

") + r.InsertBeforeDefault(0, "") + r.InsertAfterDefault(0, "

") + r.InsertAfterDefault(0, "") + r.InsertBeforeDefault(1, "") + r.InsertAfterDefault(1,"") + }), + NewLexerTest("ab", "

a

!b", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", + func(r *TokenStreamRewriter){ + r.InsertBeforeDefault(0, "

") + r.InsertBeforeDefault(0, "") + r.InsertBeforeDefault(0, "

") + r.InsertAfterDefault(0, "

") + r.InsertAfterDefault(0, "
") + r.InsertAfterDefault(0, "
") + r.InsertBeforeDefault(1, "!") + }), + } + + + for _,c := range tests{ + t.Run(c.description,func(t *testing.T) { + rewriter := prepare_rewriter(c.input) + c.ops(rewriter) + if len(c.expected_exception)>0{ + panic_tester(t, c.expected_exception, rewriter) + }else{ + result := rewriter.GetTextDefault() + if result!=c.expected{ + t.Errorf("Expected:%s | Result: %s", c.expected, result) + } + } + } ) + } +} + + +// Suppress unused import error +var _ = fmt.Printf +var _ = unicode.IsLetter + +var serializedLexerAtn = []uint16{ + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 5, 15, 8, + 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, + 4, 2, 2, 5, 3, 3, 5, 4, 7, 5, 3, 2, 2, 2, 14, 2, 3, 3, 2, 2, 2, 2, 5, 3, + 2, 2, 2, 2, 7, 3, 2, 2, 2, 3, 9, 3, 2, 2, 2, 5, 11, 3, 2, 2, 2, 7, 13, + 3, 2, 2, 2, 9, 10, 7, 99, 2, 2, 10, 4, 3, 2, 2, 2, 11, 12, 7, 100, 2, 2, + 12, 6, 3, 2, 2, 2, 13, 14, 7, 101, 2, 2, 14, 8, 3, 2, 2, 2, 3, 2, 2, +} + +var lexerDeserializer = NewATNDeserializer(nil) +var lexerAtn = lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn) + +var lexerChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", +} + +var lexerModeNames = []string{ + "DEFAULT_MODE", +} + +var lexerLiteralNames = []string{ + "", "'a'", "'b'", "'c'", +} + +var lexerSymbolicNames = []string{ + "", "A", "B", "C", +} + +var lexerRuleNames = []string{ + "A", "B", "C", +} + +type LexerA struct { + *BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var lexerDecisionToDFA = make([]*DFA, len(lexerAtn.DecisionToState)) + +func init() { + for index, ds := range lexerAtn.DecisionToState { + lexerDecisionToDFA[index] = NewDFA(ds, index) + } +} + +func NewLexerA(input CharStream) *LexerA { + + l := new(LexerA) + + l.BaseLexer = NewBaseLexer(input) + l.Interpreter = NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, NewPredictionContextCache()) + + l.channelNames = lexerChannelNames + l.modeNames = lexerModeNames + l.RuleNames = lexerRuleNames + l.LiteralNames = lexerLiteralNames + l.SymbolicNames = lexerSymbolicNames + l.GrammarFileName = "LexerA.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// LexerA tokens. +const ( + LexerAA = 1 + LexerAB = 2 + LexerAC = 3 +) + From b36044108a6f106ffeabfd05d3d682d8c174e48a Mon Sep 17 00:00:00 2001 From: WAKAYAMA Shirou Date: Mon, 10 Jul 2017 09:51:07 +0900 Subject: [PATCH 010/102] [Go] Expose Start and Stop of Interval. --- runtime/Go/antlr/common_token_stream.go | 4 +- runtime/Go/antlr/input_stream.go | 2 +- runtime/Go/antlr/interval_set.go | 104 +++++++++++------------- 3 files changed, 51 insertions(+), 59 deletions(-) diff --git a/runtime/Go/antlr/common_token_stream.go b/runtime/Go/antlr/common_token_stream.go index 0121fe8e4..3154e00ac 100644 --- a/runtime/Go/antlr/common_token_stream.go +++ b/runtime/Go/antlr/common_token_stream.go @@ -337,8 +337,8 @@ func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { interval = NewInterval(0, len(c.tokens)-1) } - start := interval.start - stop := interval.stop + start := interval.Start + stop := interval.Stop if start < 0 || stop < 0 { return "" diff --git a/runtime/Go/antlr/input_stream.go b/runtime/Go/antlr/input_stream.go index da9d2f7f4..5ff270f53 100644 --- a/runtime/Go/antlr/input_stream.go +++ b/runtime/Go/antlr/input_stream.go @@ -101,7 +101,7 @@ func (is *InputStream) GetTextFromTokens(start, stop Token) string { } func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.start, i.stop) + return is.GetText(i.Start, i.Stop) } func (*InputStream) GetSourceName() string { diff --git a/runtime/Go/antlr/interval_set.go b/runtime/Go/antlr/interval_set.go index 939f72ec9..510d90911 100644 --- a/runtime/Go/antlr/interval_set.go +++ b/runtime/Go/antlr/interval_set.go @@ -10,41 +10,33 @@ import ( ) type Interval struct { - start int - stop int + Start int + Stop int } /* stop is not included! */ func NewInterval(start, stop int) *Interval { i := new(Interval) - i.start = start - i.stop = stop + i.Start = start + i.Stop = stop return i } -func (i *Interval) GetStart() int { - return i.start -} - -func (i *Interval) GetStop() int { - return i.stop -} - func (i *Interval) Contains(item int) bool { - return item >= i.start && item < i.stop + return item >= i.Start && item < i.Stop } func (i *Interval) String() string { - if i.start == i.stop-1 { - return strconv.Itoa(i.start) + if i.Start == i.Stop-1 { + return strconv.Itoa(i.Start) } - return strconv.Itoa(i.start) + ".." + strconv.Itoa(i.stop-1) + return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) } func (i *Interval) length() int { - return i.stop - i.start + return i.Stop - i.Start } type IntervalSet struct { @@ -67,7 +59,7 @@ func (i *IntervalSet) first() int { return TokenInvalidType } - return i.intervals[0].start + return i.intervals[0].Start } func (i *IntervalSet) addOne(v int) { @@ -86,24 +78,24 @@ func (i *IntervalSet) addInterval(v *Interval) { // find insert pos for k, interval := range i.intervals { // distinct range -> insert - if v.stop < interval.start { + if v.Stop < interval.Start { i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) return - } else if v.stop == interval.start { - i.intervals[k].start = v.start + } else if v.Stop == interval.Start { + i.intervals[k].Start = v.Start return - } else if v.start <= interval.stop { - i.intervals[k] = NewInterval(intMin(interval.start, v.start), intMax(interval.stop, v.stop)) + } else if v.Start <= interval.Stop { + i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) // if not applying to end, merge potential overlaps if k < len(i.intervals)-1 { l := i.intervals[k] r := i.intervals[k+1] // if r contained in l - if l.stop >= r.stop { + if l.Stop >= r.Stop { i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } else if l.stop >= r.start { // partial overlap - i.intervals[k] = NewInterval(l.start, r.stop) + } else if l.Stop >= r.Start { // partial overlap + i.intervals[k] = NewInterval(l.Start, r.Stop) i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) } } @@ -119,7 +111,7 @@ func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { if other.intervals != nil { for k := 0; k < len(other.intervals); k++ { i2 := other.intervals[k] - i.addInterval(NewInterval(i2.start, i2.stop)) + i.addInterval(NewInterval(i2.Start, i2.Stop)) } } return i @@ -157,29 +149,29 @@ func (i *IntervalSet) length() int { } func (i *IntervalSet) removeRange(v *Interval) { - if v.start == v.stop-1 { - i.removeOne(v.start) + if v.Start == v.Stop-1 { + i.removeOne(v.Start) } else if i.intervals != nil { k := 0 for n := 0; n < len(i.intervals); n++ { ni := i.intervals[k] // intervals are ordered - if v.stop <= ni.start { + if v.Stop <= ni.Start { return - } else if v.start > ni.start && v.stop < ni.stop { - i.intervals[k] = NewInterval(ni.start, v.start) - x := NewInterval(v.stop, ni.stop) + } else if v.Start > ni.Start && v.Stop < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + x := NewInterval(v.Stop, ni.Stop) // i.intervals.splice(k, 0, x) i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) return - } else if v.start <= ni.start && v.stop >= ni.stop { + } else if v.Start <= ni.Start && v.Stop >= ni.Stop { // i.intervals.splice(k, 1) i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) k = k - 1 // need another pass - } else if v.start < ni.stop { - i.intervals[k] = NewInterval(ni.start, v.start) - } else if v.stop < ni.stop { - i.intervals[k] = NewInterval(v.stop, ni.stop) + } else if v.Start < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + } else if v.Stop < ni.Stop { + i.intervals[k] = NewInterval(v.Stop, ni.Stop) } k++ } @@ -191,21 +183,21 @@ func (i *IntervalSet) removeOne(v int) { for k := 0; k < len(i.intervals); k++ { ki := i.intervals[k] // intervals i ordered - if v < ki.start { + if v < ki.Start { return - } else if v == ki.start && v == ki.stop-1 { + } else if v == ki.Start && v == ki.Stop-1 { // i.intervals.splice(k, 1) i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) return - } else if v == ki.start { - i.intervals[k] = NewInterval(ki.start+1, ki.stop) + } else if v == ki.Start { + i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) return - } else if v == ki.stop-1 { - i.intervals[k] = NewInterval(ki.start, ki.stop-1) + } else if v == ki.Stop-1 { + i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) return - } else if v < ki.stop-1 { - x := NewInterval(ki.start, v) - ki.start = v + 1 + } else if v < ki.Stop-1 { + x := NewInterval(ki.Start, v) + ki.Start = v + 1 // i.intervals.splice(k, 0, x) i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) return @@ -236,14 +228,14 @@ func (i *IntervalSet) toCharString() string { for j := 0; j < len(i.intervals); j++ { v := i.intervals[j] - if v.stop == v.start+1 { - if v.start == TokenEOF { + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { names = append(names, "") } else { - names = append(names, ("'" + string(v.start) + "'")) + names = append(names, ("'" + string(v.Start) + "'")) } } else { - names = append(names, "'"+string(v.start)+"'..'"+string(v.stop-1)+"'") + names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'") } } if len(names) > 1 { @@ -258,14 +250,14 @@ func (i *IntervalSet) toIndexString() string { names := make([]string, 0) for j := 0; j < len(i.intervals); j++ { v := i.intervals[j] - if v.stop == v.start+1 { - if v.start == TokenEOF { + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { names = append(names, "") } else { - names = append(names, strconv.Itoa(v.start)) + names = append(names, strconv.Itoa(v.Start)) } } else { - names = append(names, strconv.Itoa(v.start)+".."+strconv.Itoa(v.stop-1)) + names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) } } if len(names) > 1 { @@ -278,7 +270,7 @@ func (i *IntervalSet) toIndexString() string { func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { names := make([]string, 0) for _, v := range i.intervals { - for j := v.start; j < v.stop; j++ { + for j := v.Start; j < v.Stop; j++ { names = append(names, i.elementName(literalNames, symbolicNames, j)) } } From 41889939d6be9e964f20dffbc9cac06536d89af3 Mon Sep 17 00:00:00 2001 From: Daniel Halperin Date: Tue, 11 Jul 2017 17:48:52 -0700 Subject: [PATCH 011/102] Make the Antlr4MojoTest restore state between tests Otherwise, we could pick up stale changes from prior tests. --- .../org/antlr/mojo/antlr4/Antlr4MojoTest.java | 58 ++++++++++--------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java index d90728922..6c700ed0b 100644 --- a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java +++ b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java @@ -222,21 +222,20 @@ public class Antlr4MojoTest { assertTrue(Files.exists(genHello)); assertTrue(Files.exists(genTestParser)); assertTrue(Files.exists(genTestLexer)); + byte[] origTestLexerSum = checksum(genTestLexer); + byte[] origTestParserSum = checksum(genTestParser); + byte[] origHelloSum = checksum(genHello); //////////////////////////////////////////////////////////////////////// // 2nd - nothing has been modified, no grammars have to be processed //////////////////////////////////////////////////////////////////////// { - byte[] testLexerSum = checksum(genTestLexer); - byte[] testParserSum = checksum(genTestParser); - byte[] helloSum = checksum(genHello); - maven.executeMojo(session, project, exec); - assertTrue(Arrays.equals(testLexerSum, checksum(genTestLexer))); - assertTrue(Arrays.equals(testParserSum, checksum(genTestParser))); - assertTrue(Arrays.equals(helloSum, checksum(genHello))); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } //////////////////////////////////////////////////////////////////////// @@ -245,16 +244,17 @@ public class Antlr4MojoTest { // modify the grammar to make checksum comparison detect a change try(Change change = Change.of(baseGrammar, "DOT: '.' ;")) { - byte[] testLexerSum = checksum(genTestLexer); - byte[] testParserSum = checksum(genTestParser); - byte[] helloSum = checksum(genHello); - maven.executeMojo(session, project, exec); - assertFalse(Arrays.equals(testLexerSum, checksum(genTestLexer))); - assertFalse(Arrays.equals(testParserSum, checksum(genTestParser))); - assertTrue(Arrays.equals(helloSum, checksum(genHello))); + assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } + // Restore file and confirm it was restored. + maven.executeMojo(session, project, exec); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); //////////////////////////////////////////////////////////////////////// // 4th - the lexer grammar changed, the parser grammar has to be processed as well @@ -262,16 +262,17 @@ public class Antlr4MojoTest { // modify the grammar to make checksum comparison detect a change try(Change change = Change.of(lexerGrammar)) { - byte[] testLexerSum = checksum(genTestLexer); - byte[] testParserSum = checksum(genTestParser); - byte[] helloSum = checksum(genHello); - maven.executeMojo(session, project, exec); - assertFalse(Arrays.equals(testLexerSum, checksum(genTestLexer))); - assertFalse(Arrays.equals(testParserSum, checksum(genTestParser))); - assertTrue(Arrays.equals(helloSum, checksum(genHello))); + assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } + // Restore file and confirm it was restored. + maven.executeMojo(session, project, exec); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); //////////////////////////////////////////////////////////////////////// // 5th - the parser grammar changed, no other grammars have to be processed @@ -279,16 +280,17 @@ public class Antlr4MojoTest { // modify the grammar to make checksum comparison detect a change try(Change change = Change.of(parserGrammar, " t : WS* ;")) { - byte[] testLexerSum = checksum(genTestLexer); - byte[] testParserSum = checksum(genTestParser); - byte[] helloSum = checksum(genHello); - maven.executeMojo(session, project, exec); - assertTrue(Arrays.equals(testLexerSum, checksum(genTestLexer))); - assertFalse(Arrays.equals(testParserSum, checksum(genTestParser))); - assertTrue(Arrays.equals(helloSum, checksum(genHello))); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } + // Restore file and confirm it was restored. + maven.executeMojo(session, project, exec); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } @Test From b09e30c6caf8e44dbddf6c0598480b0c43eb78fb Mon Sep 17 00:00:00 2001 From: Daniel Halperin Date: Tue, 11 Jul 2017 20:45:28 -0700 Subject: [PATCH 012/102] Make the lexer test possible to pass Adding a newline should actually not change the generated lexer. --- .../src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java | 2 +- .../projects/importsStandard/src/main/antlr4/test/TestLexer.g4 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java index 6c700ed0b..f481f32ac 100644 --- a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java +++ b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java @@ -261,7 +261,7 @@ public class Antlr4MojoTest { //////////////////////////////////////////////////////////////////////// // modify the grammar to make checksum comparison detect a change - try(Change change = Change.of(lexerGrammar)) { + try(Change change = Change.of(lexerGrammar, "FOO: 'foo' ;")) { maven.executeMojo(session, project, exec); assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 index 668b76496..c69c925ad 100644 --- a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 +++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 @@ -3,4 +3,4 @@ lexer grammar TestLexer; import TestBaseLexer; WS : Whitespace+ -> skip; -TEXT : ~[<&]+ ; // match any 16 bit char other than < and & \ No newline at end of file +TEXT : ~[<&]+ ; // match any 16 bit char other than < and & From a9dfca3666bdcd74476ca5aa6f3baa1e396ce6ce Mon Sep 17 00:00:00 2001 From: Daniel Halperin Date: Tue, 11 Jul 2017 20:58:24 -0700 Subject: [PATCH 013/102] isDependencyChanged: consider equal timestamps as out of date Some systems have low-granularity timestamps, so that file modification dates are rounded to seconds. This causes false negatives when detecting if a grammar needs to be recompiled if it changes a second after producing its tokens. This likely only causes an issue for tests that frequently mutate files; real humans are unlikely to compile within 1s of changing a grammar. Still, this seems a cleaner solution that hacking the failing test to use force a different modification time, as there will almost never be false positives. This fixes the failing test after making the test correct. --- .../src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java index dcdc0a29d..0a81f51fb 100644 --- a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java +++ b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java @@ -395,7 +395,7 @@ public class Antlr4Mojo extends AbstractMojo { String tokensFileName = grammarFile.getName().split("\\.")[0] + ".tokens"; File outputFile = new File(outputDirectory, tokensFileName); if ( (! outputFile.exists()) || - outputFile.lastModified() < grammarFile.lastModified() || + outputFile.lastModified() <= grammarFile.lastModified() || dependencies.isDependencyChanged(grammarFile)) { grammarFilesToProcess.add(grammarFile); } From d58e7e31bfcefe41840bc3b86c8eaf06eb9f19eb Mon Sep 17 00:00:00 2001 From: Daniel Halperin Date: Tue, 11 Jul 2017 15:03:11 -0700 Subject: [PATCH 014/102] GrammarDependencies: include all imports For some reason, the grammar import dependency analysis only included the first import on a line. This does not work so well... --- .../org/antlr/mojo/antlr4/GrammarDependencies.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java index 2e9e2472c..d21d1ab7f 100644 --- a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java +++ b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java @@ -216,14 +216,14 @@ class GrammarDependencies { return; for (GrammarAST importDecl : grammar.getAllChildrenWithType(ANTLRParser.IMPORT)) { - Tree id = importDecl.getFirstChildWithType(ANTLRParser.ID); + for (Tree id: importDecl.getAllChildrenWithType(ANTLRParser.ID)) { + // missing id is not valid, but we don't want to prevent the root cause from + // being reported by the ANTLR tool + if (id != null) { + String grammarPath = getRelativePath(grammarFile); - // missing id is not valid, but we don't want to prevent the root cause from - // being reported by the ANTLR tool - if (id != null) { - String grammarPath = getRelativePath(grammarFile); - - graph.addEdge(id.getText() + ".g4", grammarPath); + graph.addEdge(id.getText() + ".g4", grammarPath); + } } } From 7a882f7d5b8b6b6ccb124d68395986104c969a09 Mon Sep 17 00:00:00 2001 From: Daniel Halperin Date: Tue, 11 Jul 2017 20:43:06 -0700 Subject: [PATCH 015/102] Add test for multiple imports --- .../org/antlr/mojo/antlr4/Antlr4MojoTest.java | 23 +++++++++++++++++-- .../src/main/antlr4/imports/TestBaseLexer.g4 | 5 +--- .../src/main/antlr4/imports/TestBaseLexer2.g4 | 4 ++++ .../src/main/antlr4/test/TestLexer.g4 | 2 +- 4 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4 diff --git a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java index f481f32ac..da38c582a 100644 --- a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java +++ b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java @@ -202,6 +202,7 @@ public class Antlr4MojoTest { Path genHello = generatedSources.resolve("test/HelloParser.java"); Path baseGrammar = antlrDir.resolve("imports/TestBaseLexer.g4"); + Path baseGrammar2 = antlrDir.resolve("imports/TestBaseLexer2.g4"); Path lexerGrammar = antlrDir.resolve("test/TestLexer.g4"); Path parserGrammar = antlrDir.resolve("test/TestParser.g4"); @@ -257,7 +258,25 @@ public class Antlr4MojoTest { assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); //////////////////////////////////////////////////////////////////////// - // 4th - the lexer grammar changed, the parser grammar has to be processed as well + // 4th - the second imported grammar changed, every dependency has to be processed + //////////////////////////////////////////////////////////////////////// + + // modify the grammar to make checksum comparison detect a change + try(Change change = Change.of(baseGrammar2, "BANG: '!' ;")) { + maven.executeMojo(session, project, exec); + + assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); + } + // Restore file and confirm it was restored. + maven.executeMojo(session, project, exec); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); + + //////////////////////////////////////////////////////////////////////// + // 5th - the lexer grammar changed, the parser grammar has to be processed as well //////////////////////////////////////////////////////////////////////// // modify the grammar to make checksum comparison detect a change @@ -275,7 +294,7 @@ public class Antlr4MojoTest { assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); //////////////////////////////////////////////////////////////////////// - // 5th - the parser grammar changed, no other grammars have to be processed + // 6th - the parser grammar changed, no other grammars have to be processed //////////////////////////////////////////////////////////////////////// // modify the grammar to make checksum comparison detect a change diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 index 5fcc6d353..6c3164de3 100644 --- a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 +++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 @@ -10,7 +10,4 @@ fragment Whitespace : ' ' | '\n' | '\t' | '\r' ; fragment -Hexdigit : [a-fA-F0-9] ; - -fragment -Digit : [0-9] ; +Hexdigit : [a-fA-F0-9] ; \ No newline at end of file diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4 new file mode 100644 index 000000000..18aa0c4f3 --- /dev/null +++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4 @@ -0,0 +1,4 @@ +lexer grammar TestBaseLexer2; + +fragment +Digit : [0-9] ; diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 index c69c925ad..b9c07b3df 100644 --- a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 +++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 @@ -1,6 +1,6 @@ lexer grammar TestLexer; -import TestBaseLexer; +import TestBaseLexer, TestBaseLexer2; WS : Whitespace+ -> skip; TEXT : ~[<&]+ ; // match any 16 bit char other than < and & From 9cb52345399eaaefa7035624d1de468706f7cdd8 Mon Sep 17 00:00:00 2001 From: Daniel Halperin Date: Tue, 11 Jul 2017 21:01:08 -0700 Subject: [PATCH 016/102] Sign the contributors.txt --- contributors.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contributors.txt b/contributors.txt index 63d6d57f5..4349b9556 100644 --- a/contributors.txt +++ b/contributors.txt @@ -150,4 +150,5 @@ YYYY/MM/DD, github id, Full name, email 2017/05/29, kosak, Corey Kosak, kosak@kosak.com 2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net 2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com -2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com \ No newline at end of file +2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com +2017/07/11, dhalperi, Daniel Halperin, daniel@halper.in From 093009520751d5f4982bb050645703060a678f1e Mon Sep 17 00:00:00 2001 From: Eric Vergnaud Date: Sat, 15 Jul 2017 12:19:54 +0800 Subject: [PATCH 017/102] fix inconsistent naming in target stg --- .../v4/tool/templates/codegen/JavaScript/JavaScript.stg | 8 ++++---- .../antlr/v4/tool/templates/codegen/Python2/Python2.stg | 2 +- .../antlr/v4/tool/templates/codegen/Python3/Python3.stg | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg index fb5883c02..4d04b9f76 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg @@ -36,12 +36,12 @@ * REQUIRED. */ -pythonTypeInitMap ::= [ - "bool":"False", +javascriptTypeInitMap ::= [ + "bool":"false", "int":"0", "float":"0.0", "str":"", - default:"None" // anything other than a primitive type is an object + default:"{}" // anything other than a primitive type is an object ] // args must be , @@ -860,7 +860,7 @@ var serializedATN = [" "}>"].join(""); * must be an object, default value is "null". */ initValue(typeName) ::= << - + >> codeFileExtension() ::= ".js" diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Python2/Python2.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Python2/Python2.stg index b01a76fc4..570f1659f 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Python2/Python2.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Python2/Python2.stg @@ -809,7 +809,7 @@ def serializedATN(): * must be an object, default value is "null". */ initValue(typeName) ::= << - + >> codeFileExtension() ::= ".py" diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Python3/Python3.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Python3/Python3.stg index 081e3f3f1..34e525b85 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Python3/Python3.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Python3/Python3.stg @@ -816,7 +816,7 @@ def serializedATN(): * must be an object, default value is "null". */ initValue(typeName) ::= << - + >> codeFileExtension() ::= ".py" From 8b3da13259024bf4fd9913ebcc7c950339687779 Mon Sep 17 00:00:00 2001 From: vaibhavaingankar09 Date: Mon, 17 Jul 2017 06:45:29 +0000 Subject: [PATCH 018/102] fix to the ATN deserialiser issue on big endian architecture --- contributors.txt | 3 ++- .../runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/contributors.txt b/contributors.txt index 63d6d57f5..ba29c99e4 100644 --- a/contributors.txt +++ b/contributors.txt @@ -150,4 +150,5 @@ YYYY/MM/DD, github id, Full name, email 2017/05/29, kosak, Corey Kosak, kosak@kosak.com 2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net 2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com -2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com \ No newline at end of file +2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com +2017/07/17, vaibhavaingankar09, Vaibhav Vaingankar, vbhvvaingankar9@gmail.com diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs index 9009b9f43..3ce2e87d2 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs @@ -1092,7 +1092,10 @@ nextTransition_continue: ; protected internal Guid ReadUUID() { byte[] d = BitConverter.GetBytes (ReadLong ()); - Array.Reverse(d); + if(BitConverter.IsLittleEndian) + { + Array.Reverse(d); + } short c = (short)ReadInt(); short b = (short)ReadInt(); int a = ReadInt32(); From 2bce3a74d4c4c1fcc33611c63dd6afe195dc4f25 Mon Sep 17 00:00:00 2001 From: Corey Kosak Date: Mon, 17 Jul 2017 17:54:17 -0400 Subject: [PATCH 019/102] Remove the useless visibility attribute from this "enum class" declaration. When compiling under gcc, ANTLR4CPP_PUBLIC macro expands to the following gcc visibility attribute: __attribute__((visibility ("default"))) (when compiling under Windows it expands to the corresponding __declspec attribute) This change was introduced in commit 8ff852640a7ec62d46330f71e274c591ada1fc70 Although the attribute makes perfect sense when applied to a "class" declaration, it makes no sense (has no effect) when applied to an "enum class" declaration. I assume that doing so was unintentional; that when the change was introduced it was it was added mechanically to all "class XXX" instances in the source code, a process which accidentally picked up one "enum class XXX" instance. Although it has no effect on the object code, it leads to the following warning when compiling under gcc: /usr/local/include/antlr4-runtime/atn/PredictionMode.h:18:31: error: type attributes ignored after type is already defined [-Werror=attributes] enum class ANTLR4CPP_PUBLIC PredictionMode { This is a problem for people who would like their builds to be warning-free. Happily, this declaration can be safely removed. The "enum class" construct (just like with regular enum) does not cause any linker symbols to be emitted. So having a linker attribute on the type does not actually have any effect. It can therefore be safely removed. --- runtime/Cpp/runtime/src/atn/PredictionMode.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/Cpp/runtime/src/atn/PredictionMode.h b/runtime/Cpp/runtime/src/atn/PredictionMode.h index d3de2e952..726f4cf40 100755 --- a/runtime/Cpp/runtime/src/atn/PredictionMode.h +++ b/runtime/Cpp/runtime/src/atn/PredictionMode.h @@ -15,7 +15,7 @@ namespace atn { * utility methods for analyzing configuration sets for conflicts and/or * ambiguities. */ - enum class ANTLR4CPP_PUBLIC PredictionMode { + enum class PredictionMode { /** * The SLL(*) prediction mode. This prediction mode ignores the current * parser context when making predictions. This is the fastest prediction From c8805ab584908e214d05d55f0f28b760d1ec17cf Mon Sep 17 00:00:00 2001 From: Sam Harwell Date: Tue, 18 Jul 2017 07:27:36 -0500 Subject: [PATCH 020/102] Avoid adding to closureBusy before all ATNConfig properties are set Setting ATNConfig properties can change the hash code of the instance, leading to cases where the closureBusy set places objects in the wrong buckets. While this has not led to known cases of stack overflow, it has led to cases where one or more buckets contains a large number of duplicate objects, and the set's add operation goes from O(1) to O(n). --- .../v4/runtime/atn/ParserATNSimulator.java | 31 ++++++++++--------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java index 46840ab7c..d415cdfb2 100755 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java @@ -1541,11 +1541,6 @@ public class ParserATNSimulator extends ATNSimulator { ATNConfig c = getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon); if ( c!=null ) { - if (!t.isEpsilon() && !closureBusy.add(c)) { - // avoid infinite recursion for EOF* and EOF+ - continue; - } - int newDepth = depth; if ( config.state instanceof RuleStopState) { assert !fullCtx; @@ -1555,11 +1550,6 @@ public class ParserATNSimulator extends ATNSimulator { // come in handy and we avoid evaluating context dependent // preds if this is > 0. - if (!closureBusy.add(c)) { - // avoid infinite recursion for right-recursive rules - continue; - } - if (_dfa != null && _dfa.isPrecedenceDfa()) { int outermostPrecedenceReturn = ((EpsilonTransition)t).outermostPrecedenceReturn(); if (outermostPrecedenceReturn == _dfa.atnStartState.ruleIndex) { @@ -1568,15 +1558,28 @@ public class ParserATNSimulator extends ATNSimulator { } c.reachesIntoOuterContext++; + + if (!closureBusy.add(c)) { + // avoid infinite recursion for right-recursive rules + continue; + } + configs.dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method assert newDepth > Integer.MIN_VALUE; newDepth--; if ( debug ) System.out.println("dips into outer ctx: "+c); } - else if (t instanceof RuleTransition) { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if (newDepth >= 0) { - newDepth++; + else { + if (!t.isEpsilon() && !closureBusy.add(c)) { + // avoid infinite recursion for EOF* and EOF+ + continue; + } + + if (t instanceof RuleTransition) { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if (newDepth >= 0) { + newDepth++; + } } } From 6e02088e6abb687f4752b3d0bfcf9fe56e8b3992 Mon Sep 17 00:00:00 2001 From: Renata Hodovan Date: Tue, 18 Jul 2017 18:29:36 +0200 Subject: [PATCH 021/102] Python: add public getter to parsers to access syntax error count. ANTLR parsers in Java are allowed to access the number of encountered syntax errors via the getNumberOfSyntaxErrors method. However, the Python variants must use the protected _syntaxErrors member to get this value. The patch defines the same getter for Python targets too. --- runtime/Python2/src/antlr4/Parser.py | 7 +++++++ runtime/Python3/src/antlr4/Parser.py | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/runtime/Python2/src/antlr4/Parser.py b/runtime/Python2/src/antlr4/Parser.py index d88f77918..69abe739b 100644 --- a/runtime/Python2/src/antlr4/Parser.py +++ b/runtime/Python2/src/antlr4/Parser.py @@ -218,6 +218,13 @@ class Parser (Recognizer): self._ctx.exitRule(listener) listener.exitEveryRule(self._ctx) + # Gets the number of syntax errors reported during parsing. This value is + # incremented each time {@link #notifyErrorListeners} is called. + # + # @see #notifyErrorListeners + # + def getNumberOfSyntaxErrors(self): + return self._syntaxErrors def getTokenFactory(self): return self._input.tokenSource._factory diff --git a/runtime/Python3/src/antlr4/Parser.py b/runtime/Python3/src/antlr4/Parser.py index 03f10a438..c461bbdc0 100644 --- a/runtime/Python3/src/antlr4/Parser.py +++ b/runtime/Python3/src/antlr4/Parser.py @@ -227,6 +227,14 @@ class Parser (Recognizer): listener.exitEveryRule(self._ctx) + # Gets the number of syntax errors reported during parsing. This value is + # incremented each time {@link #notifyErrorListeners} is called. + # + # @see #notifyErrorListeners + # + def getNumberOfSyntaxErrors(self): + return self._syntaxErrors + def getTokenFactory(self): return self._input.tokenSource._factory From 8149ff77fadec08fe4b8fb800bd87b0968b8c66d Mon Sep 17 00:00:00 2001 From: Mike Lischke Date: Wed, 19 Jul 2017 10:43:36 +0200 Subject: [PATCH 022/102] Consequently use static_cast for (s)size_t casts. Also fixed XCode tests. --- .../Mac/antlrcpp Tests/InputHandlingTests.mm | 10 +-- .../demo/Mac/antlrcpp Tests/MiscClassTests.mm | 69 +++++++++++-------- runtime/Cpp/runtime/src/IntStream.h | 2 +- runtime/Cpp/runtime/src/Recognizer.h | 2 +- runtime/Cpp/runtime/src/Token.h | 2 +- .../Cpp/runtime/src/UnbufferedCharStream.cpp | 22 +++--- .../Cpp/runtime/src/UnbufferedTokenStream.cpp | 12 ++-- .../Cpp/runtime/src/atn/ATNDeserializer.cpp | 3 +- runtime/Cpp/runtime/src/atn/ATNSerializer.cpp | 7 +- runtime/Cpp/runtime/src/atn/ATNState.h | 2 +- runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp | 4 +- .../runtime/src/atn/LexerChannelAction.cpp | 2 +- .../Cpp/runtime/src/atn/LexerCustomAction.cpp | 2 +- .../Cpp/runtime/src/atn/LexerModeAction.cpp | 2 +- .../Cpp/runtime/src/atn/LexerMoreAction.cpp | 2 +- .../runtime/src/atn/LexerPopModeAction.cpp | 2 +- .../runtime/src/atn/LexerPushModeAction.cpp | 2 +- .../Cpp/runtime/src/atn/LexerSkipAction.cpp | 2 +- .../Cpp/runtime/src/atn/LexerTypeAction.cpp | 2 +- .../Cpp/runtime/src/atn/PredictionContext.h | 4 +- .../Cpp/runtime/src/atn/SemanticContext.cpp | 2 +- runtime/Cpp/runtime/src/misc/Interval.cpp | 10 +-- 22 files changed, 91 insertions(+), 76 deletions(-) diff --git a/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm b/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm index 7b13ae83d..647f73fed 100644 --- a/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm +++ b/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm @@ -91,7 +91,7 @@ using namespace antlr4::misc; - (void)testANTLRInputStreamUse { std::string text(u8"🚧Lorem ipsum dolor sit amet🕶"); - std::u32string wtext = utfConverter.from_bytes(text); // Convert to UTF-32. + std::u32string wtext = utf8_to_utf32(text.c_str(), text.c_str() + text.size()); // Convert to UTF-32. ANTLRInputStream stream(text); XCTAssertEqual(stream.index(), 0U); XCTAssertEqual(stream.size(), wtext.size()); @@ -116,8 +116,8 @@ using namespace antlr4::misc; XCTAssertEqual(stream.LA(0), 0ULL); for (size_t i = 1; i < wtext.size(); ++i) { - XCTAssertEqual(stream.LA((ssize_t)i), wtext[i - 1]); // LA(1) means: current char. - XCTAssertEqual(stream.LT((ssize_t)i), wtext[i - 1]); // LT is mapped to LA. + XCTAssertEqual(stream.LA(static_cast(i)), wtext[i - 1]); // LA(1) means: current char. + XCTAssertEqual(stream.LT(static_cast(i)), wtext[i - 1]); // LT is mapped to LA. XCTAssertEqual(stream.index(), 0U); // No consumption when looking ahead. } @@ -128,7 +128,7 @@ using namespace antlr4::misc; XCTAssertEqual(stream.index(), wtext.size() / 2); stream.seek(wtext.size() - 1); - for (ssize_t i = 1; i < (ssize_t)wtext.size() - 1; ++i) { + for (ssize_t i = 1; i < static_cast(wtext.size()) - 1; ++i) { XCTAssertEqual(stream.LA(-i), wtext[wtext.size() - i - 1]); // LA(-1) means: previous char. XCTAssertEqual(stream.LT(-i), wtext[wtext.size() - i - 1]); // LT is mapped to LA. XCTAssertEqual(stream.index(), wtext.size() - 1); // No consumption when looking ahead. @@ -150,7 +150,7 @@ using namespace antlr4::misc; misc::Interval interval1(2, 10UL); // From - to, inclusive. std::string output = stream.getText(interval1); - std::string sub = utfConverter.to_bytes(wtext.substr(2, 9)); + std::string sub = utf32_to_utf8(wtext.substr(2, 9)); XCTAssertEqual(output, sub); misc::Interval interval2(200, 10UL); // Start beyond bounds. diff --git a/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm b/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm index 063616a1d..58cac4be4 100644 --- a/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm +++ b/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm @@ -92,7 +92,7 @@ using namespace antlrcpp; // in a deterministic and a random sequence of 100K values each. std::set hashs; for (size_t i = 0; i < 100000; ++i) { - std::vector data = { i, (size_t)(i * M_PI), arc4random()}; + std::vector data = { i, static_cast(i * M_PI), arc4random() }; size_t hash = 0; for (auto value : data) hash = MurmurHash::update(hash, value); @@ -103,7 +103,7 @@ using namespace antlrcpp; hashs.clear(); for (size_t i = 0; i < 100000; ++i) { - std::vector data = { i, (size_t)(i * M_PI)}; + std::vector data = { i, static_cast(i * M_PI) }; size_t hash = 0; for (auto value : data) hash = MurmurHash::update(hash, value); @@ -232,19 +232,25 @@ using namespace antlrcpp; { 78, Interval(1000, 1000UL), Interval(20, 100UL), { false, false, true, true, false, true, false, false } }, // It's possible to add more tests with borders that touch each other (e.g. first starts before/on/after second - // and first ends directly before/after second. However, such cases are not handled differently in the Interval class + // and first ends directly before/after second. However, such cases are not handled differently in the Interval + // class // (only adjacent intervals, where first ends directly before second starts and vice versa. So I ommitted them here. }; for (auto &entry : testData) { - XCTAssert(entry.interval1.startsBeforeDisjoint(entry.interval2) == entry.results[0], @"entry: %zu", entry.runningNumber); - XCTAssert(entry.interval1.startsBeforeNonDisjoint(entry.interval2) == entry.results[1], @"entry: %zu", entry.runningNumber); + XCTAssert(entry.interval1.startsBeforeDisjoint(entry.interval2) == entry.results[0], @"entry: %zu", + entry.runningNumber); + XCTAssert(entry.interval1.startsBeforeNonDisjoint(entry.interval2) == entry.results[1], @"entry: %zu", + entry.runningNumber); XCTAssert(entry.interval1.startsAfter(entry.interval2) == entry.results[2], @"entry: %zu", entry.runningNumber); - XCTAssert(entry.interval1.startsAfterDisjoint(entry.interval2) == entry.results[3], @"entry: %zu", entry.runningNumber); - XCTAssert(entry.interval1.startsAfterNonDisjoint(entry.interval2) == entry.results[4], @"entry: %zu", entry.runningNumber); + XCTAssert(entry.interval1.startsAfterDisjoint(entry.interval2) == entry.results[3], @"entry: %zu", + entry.runningNumber); + XCTAssert(entry.interval1.startsAfterNonDisjoint(entry.interval2) == entry.results[4], @"entry: %zu", + entry.runningNumber); XCTAssert(entry.interval1.disjoint(entry.interval2) == entry.results[5], @"entry: %zu", entry.runningNumber); XCTAssert(entry.interval1.adjacent(entry.interval2) == entry.results[6], @"entry: %zu", entry.runningNumber); - XCTAssert(entry.interval1.properlyContains(entry.interval2) == entry.results[7], @"entry: %zu", entry.runningNumber); + XCTAssert(entry.interval1.properlyContains(entry.interval2) == entry.results[7], @"entry: %zu", + entry.runningNumber); } XCTAssert(Interval().Union(Interval(10, 100UL)) == Interval(-1L, 100)); @@ -327,30 +333,34 @@ using namespace antlrcpp; try { set4.clear(); XCTFail(@"Expected exception"); - } - catch (IllegalStateException &e) { + } catch (IllegalStateException &e) { } try { set4.setReadOnly(false); XCTFail(@"Expected exception"); + } catch (IllegalStateException &e) { } - catch (IllegalStateException &e) { - } - - set4 = IntervalSet::of(12345); - XCTAssertEqual(set4.getSingleElement(), 12345); - XCTAssertEqual(set4.getMinElement(), 12345); - XCTAssertEqual(set4.getMaxElement(), 12345); - IntervalSet set5(10, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50); - XCTAssertEqual(set5.getMinElement(), 5); - XCTAssertEqual(set5.getMaxElement(), 50); - XCTAssertEqual(set5.size(), 10U); - set5.add(12, 18); - XCTAssertEqual(set5.size(), 16U); // (15, 15) replaced by (12, 18) - set5.add(9, 33); - XCTAssertEqual(set5.size(), 30U); // (10, 10), (12, 18), (20, 20), (25, 25) and (30, 30) replaced by (9, 33) + try { + set4 = IntervalSet::of(12345); + XCTFail(@"Expected exception"); + } catch (IllegalStateException &e) { + } + + IntervalSet set5 = IntervalSet::of(12345); + XCTAssertEqual(set5.getSingleElement(), 12345); + XCTAssertEqual(set5.getMinElement(), 12345); + XCTAssertEqual(set5.getMaxElement(), 12345); + + IntervalSet set6(10, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50); + XCTAssertEqual(set6.getMinElement(), 5); + XCTAssertEqual(set6.getMaxElement(), 50); + XCTAssertEqual(set6.size(), 10U); + set6.add(12, 18); + XCTAssertEqual(set6.size(), 16U); // (15, 15) replaced by (12, 18) + set6.add(9, 33); + XCTAssertEqual(set6.size(), 30U); // (10, 10), (12, 18), (20, 20), (25, 25) and (30, 30) replaced by (9, 33) XCTAssert(IntervalSet(3, 1, 2, 10).Or(IntervalSet(3, 1, 2, 5)) == IntervalSet(4, 1, 2, 5, 10)); XCTAssert(IntervalSet({ Interval(2, 10UL) }).Or(IntervalSet({ Interval(5, 8UL) })) == IntervalSet({ Interval(2, 10UL) })); @@ -358,8 +368,10 @@ using namespace antlrcpp; XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(7, 55)) == IntervalSet::of(11, 55)); XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(20, 55)) == IntervalSet::of(20, 55)); XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(5, 6)) == IntervalSet::EMPTY_SET); - XCTAssert(IntervalSet::of(15, 20).complement(IntervalSet::of(7, 55)) == IntervalSet({ Interval(7, 14UL), Interval(21, 55UL) })); - XCTAssert(IntervalSet({ Interval(1, 10UL), Interval(30, 35UL) }).complement(IntervalSet::of(7, 55)) == IntervalSet({ Interval(11, 29UL), Interval(36, 55UL) })); + XCTAssert(IntervalSet::of(15, 20).complement(IntervalSet::of(7, 55)) == + IntervalSet({ Interval(7, 14UL), Interval(21, 55UL) })); + XCTAssert(IntervalSet({ Interval(1, 10UL), Interval(30, 35UL) }).complement(IntervalSet::of(7, 55)) == + IntervalSet({ Interval(11, 29UL), Interval(36, 55UL) })); XCTAssert(IntervalSet::of(1, 10).And(IntervalSet::of(7, 55)) == IntervalSet::of(7, 10)); XCTAssert(IntervalSet::of(1, 10).And(IntervalSet::of(20, 55)) == IntervalSet::EMPTY_SET); @@ -368,7 +380,8 @@ using namespace antlrcpp; XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(7, 55)) == IntervalSet::of(1, 6)); XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(20, 55)) == IntervalSet::of(1, 10)); - XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(5, 6)) == IntervalSet({ Interval(1, 4UL), Interval(7, 10UL) })); + XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(5, 6)) == + IntervalSet({ Interval(1, 4UL), Interval(7, 10UL) })); XCTAssert(IntervalSet::of(15, 20).subtract(IntervalSet::of(7, 55)) == IntervalSet::EMPTY_SET); } diff --git a/runtime/Cpp/runtime/src/IntStream.h b/runtime/Cpp/runtime/src/IntStream.h index 96d23d56f..9932a9722 100755 --- a/runtime/Cpp/runtime/src/IntStream.h +++ b/runtime/Cpp/runtime/src/IntStream.h @@ -27,7 +27,7 @@ namespace antlr4 { ///
class ANTLR4CPP_PUBLIC IntStream { public: - static const size_t EOF = (size_t)-1; // std::numeric_limits::max(); doesn't work in VS 2013 + static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013 /// The value returned by when the end of the stream is /// reached. diff --git a/runtime/Cpp/runtime/src/Recognizer.h b/runtime/Cpp/runtime/src/Recognizer.h index adca6c1d6..8c0bcb0ba 100755 --- a/runtime/Cpp/runtime/src/Recognizer.h +++ b/runtime/Cpp/runtime/src/Recognizer.h @@ -11,7 +11,7 @@ namespace antlr4 { class ANTLR4CPP_PUBLIC Recognizer { public: - static const size_t EOF = (size_t)-1; // std::numeric_limits::max(); doesn't work in VS 2013. + static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013. Recognizer(); Recognizer(Recognizer const&) = delete; diff --git a/runtime/Cpp/runtime/src/Token.h b/runtime/Cpp/runtime/src/Token.h index 4e194224d..a7c1594ff 100755 --- a/runtime/Cpp/runtime/src/Token.h +++ b/runtime/Cpp/runtime/src/Token.h @@ -18,7 +18,7 @@ namespace antlr4 { /// During lookahead operations, this "token" signifies we hit rule end ATN state /// and did not follow it despite needing to. - static const size_t EPSILON = (size_t)-2; + static const size_t EPSILON = static_cast(-2); static const size_t MIN_USER_TOKEN_TYPE = 1; static const size_t EOF = IntStream::EOF; diff --git a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp index 53e905c2f..1f18d3843 100755 --- a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp +++ b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp @@ -89,23 +89,23 @@ size_t UnbufferedCharStream::LA(ssize_t i) { } // We can look back only as many chars as we have buffered. - ssize_t index = (ssize_t)_p + i - 1; + ssize_t index = static_cast(_p) + i - 1; if (index < 0) { throw IndexOutOfBoundsException(); } if (i > 0) { - sync((size_t)i); // No need to sync if we look back. + sync(static_cast(i)); // No need to sync if we look back. } - if ((size_t)index >= _data.size()) { + if (static_cast(index) >= _data.size()) { return EOF; } - if (_data[(size_t)index] == 0xFFFF) { + if (_data[static_cast(index)] == 0xFFFF) { return EOF; } - return _data[(size_t)index]; + return _data[static_cast(index)]; } ssize_t UnbufferedCharStream::mark() { @@ -113,13 +113,13 @@ ssize_t UnbufferedCharStream::mark() { _lastCharBufferStart = _lastChar; } - ssize_t mark = -(ssize_t)_numMarkers - 1; + ssize_t mark = -static_cast(_numMarkers) - 1; _numMarkers++; return mark; } void UnbufferedCharStream::release(ssize_t marker) { - ssize_t expectedMark = -(ssize_t)_numMarkers; + ssize_t expectedMark = -static_cast(_numMarkers); if (marker != expectedMark) { throw IllegalStateException("release() called with an invalid marker."); } @@ -147,16 +147,16 @@ void UnbufferedCharStream::seek(size_t index) { } // index == to bufferStartIndex should set p to 0 - ssize_t i = (ssize_t)index - (ssize_t)getBufferStartIndex(); + ssize_t i = static_cast(index) - static_cast(getBufferStartIndex()); if (i < 0) { throw IllegalArgumentException(std::string("cannot seek to negative index ") + std::to_string(index)); - } else if (i >= (ssize_t)_data.size()) { + } else if (i >= static_cast(_data.size())) { throw UnsupportedOperationException("Seek to index outside buffer: " + std::to_string(index) + " not in " + std::to_string(getBufferStartIndex()) + ".." + std::to_string(getBufferStartIndex() + _data.size())); } - _p = (size_t)i; + _p = static_cast(i); _currentCharIndex = index; if (_p == 0) { _lastChar = _lastCharBufferStart; @@ -189,7 +189,7 @@ std::string UnbufferedCharStream::getText(const misc::Interval &interval) { } } - if (interval.a < (ssize_t)bufferStartIndex || interval.b >= ssize_t(bufferStartIndex + _data.size())) { + if (interval.a < static_cast(bufferStartIndex) || interval.b >= ssize_t(bufferStartIndex + _data.size())) { throw UnsupportedOperationException("interval " + interval.toString() + " outside buffer: " + std::to_string(bufferStartIndex) + ".." + std::to_string(bufferStartIndex + _data.size() - 1)); } diff --git a/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp b/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp index fb9a59f35..98e952a0a 100755 --- a/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp +++ b/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp @@ -46,17 +46,17 @@ Token* UnbufferedTokenStream::LT(ssize_t i) } sync(i); - ssize_t index = (ssize_t)_p + i - 1; + ssize_t index = static_cast(_p) + i - 1; if (index < 0) { throw IndexOutOfBoundsException(std::string("LT(") + std::to_string(i) + std::string(") gives negative index")); } - if (index >= (ssize_t)_tokens.size()) { + if (index >= static_cast(_tokens.size())) { assert(_tokens.size() > 0 && _tokens.back()->getType() == EOF); return _tokens.back().get(); } - return _tokens[(size_t)index].get(); + return _tokens[static_cast(index)].get(); } size_t UnbufferedTokenStream::LA(ssize_t i) @@ -113,9 +113,9 @@ void UnbufferedTokenStream::consume() ///
void UnbufferedTokenStream::sync(ssize_t want) { - ssize_t need = ((ssize_t)_p + want - 1) - (ssize_t)_tokens.size() + 1; // how many more elements we need? + ssize_t need = (static_cast(_p) + want - 1) - static_cast(_tokens.size()) + 1; // how many more elements we need? if (need > 0) { - fill((size_t)need); + fill(static_cast(need)); } } @@ -177,7 +177,7 @@ void UnbufferedTokenStream::release(ssize_t marker) if (_p > 0) { // Copy tokens[p]..tokens[n-1] to tokens[0]..tokens[(n-1)-p], reset ptrs // p is last valid token; move nothing if p==n as we have no valid char - _tokens.erase(_tokens.begin(), _tokens.begin() + (ssize_t)_p); + _tokens.erase(_tokens.begin(), _tokens.begin() + static_cast(_p)); _p = 0; } diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp index ea2e79266..c6cceda13 100755 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp @@ -752,6 +752,7 @@ Ref ATNDeserializer::lexerActionFactory(LexerActionType type, int d return std::make_shared(data1); default: - throw IllegalArgumentException("The specified lexer action type " + std::to_string((size_t)type) + " is not valid."); + throw IllegalArgumentException("The specified lexer action type " + std::to_string(static_cast(type)) + + " is not valid."); } } diff --git a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp index 6eec3ed7f..206c74281 100755 --- a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp @@ -58,7 +58,7 @@ std::vector ATNSerializer::serialize() { serializeUUID(data, ATNDeserializer::SERIALIZED_UUID()); // convert grammar type to ATN const to avoid dependence on ANTLRParser - data.push_back((size_t)atn->grammarType); + data.push_back(static_cast(atn->grammarType)); data.push_back(atn->maxTokenType); size_t nedges = 0; @@ -288,7 +288,7 @@ std::vector ATNSerializer::serialize() { if (atn->grammarType == ATNType::LEXER) { data.push_back(atn->lexerActions.size()); for (Ref &action : atn->lexerActions) { - data.push_back((size_t)action->getActionType()); + data.push_back(static_cast(action->getActionType())); switch (action->getActionType()) { case LexerActionType::CHANNEL: { @@ -348,7 +348,8 @@ std::vector ATNSerializer::serialize() { default: throw IllegalArgumentException("The specified lexer action type " + - std::to_string((size_t)action->getActionType()) + " is not valid."); + std::to_string(static_cast(action->getActionType())) + + " is not valid."); } } } diff --git a/runtime/Cpp/runtime/src/atn/ATNState.h b/runtime/Cpp/runtime/src/atn/ATNState.h index 4b4f72571..96e8fedb7 100755 --- a/runtime/Cpp/runtime/src/atn/ATNState.h +++ b/runtime/Cpp/runtime/src/atn/ATNState.h @@ -77,7 +77,7 @@ namespace atn { virtual ~ATNState(); static const size_t INITIAL_NUM_TRANSITIONS = 4; - static const size_t INVALID_STATE_NUMBER = (size_t)-1; // std::numeric_limits::max(); + static const size_t INVALID_STATE_NUMBER = static_cast(-1); // std::numeric_limits::max(); enum { ATN_INVALID_TYPE = 0, diff --git a/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp b/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp index 6f39129e5..d7949cd1e 100755 --- a/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp +++ b/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp @@ -144,12 +144,12 @@ void LL1Analyzer::_LOOK(ATNState *s, ATNState *stopState, Ref } else if (t->isEpsilon()) { _LOOK(t->target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); } else if (t->getSerializationType() == Transition::WILDCARD) { - look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, (ssize_t)_atn.maxTokenType)); + look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType))); } else { misc::IntervalSet set = t->label(); if (!set.isEmpty()) { if (is(t)) { - set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, (ssize_t)_atn.maxTokenType)); + set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType))); } look.addAll(set); } diff --git a/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp b/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp index dac78fe0c..959beab3d 100755 --- a/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp @@ -32,7 +32,7 @@ void LexerChannelAction::execute(Lexer *lexer) { size_t LexerChannelAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _channel); return MurmurHash::finish(hash, 2); } diff --git a/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp b/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp index 00df7df76..1e977a310 100755 --- a/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp @@ -38,7 +38,7 @@ void LexerCustomAction::execute(Lexer *lexer) { size_t LexerCustomAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _ruleIndex); hash = MurmurHash::update(hash, _actionIndex); return MurmurHash::finish(hash, 3); diff --git a/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp index bfd6ea9b3..0bda8b7af 100755 --- a/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp @@ -33,7 +33,7 @@ void LexerModeAction::execute(Lexer *lexer) { size_t LexerModeAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _mode); return MurmurHash::finish(hash, 2); } diff --git a/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp b/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp index e7b01e078..99b2dd99b 100755 --- a/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp @@ -34,7 +34,7 @@ void LexerMoreAction::execute(Lexer *lexer) { size_t LexerMoreAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } diff --git a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp index 3d584a3d1..cac0996f4 100755 --- a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp @@ -34,7 +34,7 @@ void LexerPopModeAction::execute(Lexer *lexer) { size_t LexerPopModeAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } diff --git a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp index 641537a1b..017abed04 100755 --- a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp @@ -33,7 +33,7 @@ void LexerPushModeAction::execute(Lexer *lexer) { size_t LexerPushModeAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _mode); return MurmurHash::finish(hash, 2); } diff --git a/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp b/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp index 28cda7cc3..01947ce78 100755 --- a/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp @@ -34,7 +34,7 @@ void LexerSkipAction::execute(Lexer *lexer) { size_t LexerSkipAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } diff --git a/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp index c1e054b68..006778adc 100755 --- a/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp @@ -33,7 +33,7 @@ void LexerTypeAction::execute(Lexer *lexer) { size_t LexerTypeAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _type); return MurmurHash::finish(hash, 2); } diff --git a/runtime/Cpp/runtime/src/atn/PredictionContext.h b/runtime/Cpp/runtime/src/atn/PredictionContext.h index 290642154..9a52e00e5 100755 --- a/runtime/Cpp/runtime/src/atn/PredictionContext.h +++ b/runtime/Cpp/runtime/src/atn/PredictionContext.h @@ -27,10 +27,10 @@ namespace atn { /// Represents $ in an array in full context mode, when $ /// doesn't mean wildcard: $ + x = [$,x]. Here, /// $ = EMPTY_RETURN_STATE. - // ml: originally Integer.MAX_VALUE, which would be (size_t)-1 for us, but this is already used in places where + // ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is already used in places where // -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't // conflict with real return states. - static const size_t EMPTY_RETURN_STATE = (size_t)-10; // std::numeric_limits::max() - 9; + static const size_t EMPTY_RETURN_STATE = static_cast(-10); // std::numeric_limits::max() - 9; private: static const size_t INITIAL_HASH = 1; diff --git a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp index fdc272f84..0531e37f8 100755 --- a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp +++ b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp @@ -82,7 +82,7 @@ int SemanticContext::PrecedencePredicate::compareTo(PrecedencePredicate *o) { size_t SemanticContext::PrecedencePredicate::hashCode() const { size_t hashCode = 1; - hashCode = 31 * hashCode + (size_t)precedence; + hashCode = 31 * hashCode + static_cast(precedence); return hashCode; } diff --git a/runtime/Cpp/runtime/src/misc/Interval.cpp b/runtime/Cpp/runtime/src/misc/Interval.cpp index 325b8621f..97486bf3f 100755 --- a/runtime/Cpp/runtime/src/misc/Interval.cpp +++ b/runtime/Cpp/runtime/src/misc/Interval.cpp @@ -10,16 +10,16 @@ using namespace antlr4::misc; Interval::~Interval() = default; size_t antlr4::misc::numericToSymbol(ssize_t v) { - return (size_t)v; + return static_cast(v); } ssize_t antlr4::misc::symbolToNumeric(size_t v) { - return (ssize_t)v; + return static_cast(v); } Interval const Interval::INVALID; -Interval::Interval() : Interval((ssize_t)-1, -2) { // Need an explicit cast here for VS. +Interval::Interval() : Interval(static_cast(-1), -2) { // Need an explicit cast here for VS. } Interval::Interval(size_t a_, size_t b_) : Interval(symbolToNumeric(a_), symbolToNumeric(b_)) { @@ -41,8 +41,8 @@ bool Interval::operator == (const Interval &other) const { size_t Interval::hashCode() const { size_t hash = 23; - hash = hash * 31 + (size_t)a; - hash = hash * 31 + (size_t)b; + hash = hash * 31 + static_cast(a); + hash = hash * 31 + static_cast(b); return hash; } From 62184c38eb60e8f65ae01c5a9ba02423a64cf5ed Mon Sep 17 00:00:00 2001 From: Daniel Halperin Date: Wed, 19 Jul 2017 18:02:47 -0700 Subject: [PATCH 023/102] Always refresh grammars with changed dependencies The buildContext.hasDelta function is ignorant of importants. Since we have more advanced dependency analysis, stop relying on hasDelta and instead just refresh grammars where we know the dependencies have changed. --- .../src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java index 0a81f51fb..c0926fe6c 100644 --- a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java +++ b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java @@ -412,10 +412,7 @@ public class Antlr4Mojo extends AbstractMojo { // Iterate each grammar file we were given and add it into the tool's list of // grammars to process. for (File grammarFile : grammarFiles) { - if (!buildContext.hasDelta(grammarFile)) { - continue; - } - + buildContext.refresh(grammarFile); buildContext.removeMessages(grammarFile); getLog().debug("Grammar file '" + grammarFile.getPath() + "' detected."); From e01af374d2d725e236be8ea5c0e12d7d0b1dab10 Mon Sep 17 00:00:00 2001 From: Venkat Peri Date: Sat, 22 Jul 2017 18:11:37 -0400 Subject: [PATCH 024/102] 'requires' SUPERclass for Lexers in split mode, symbols/modes/channels available on Lexer prototype for use in lexer actions (this.SOME_MODE) like in Java actions, Utils.escapeWhiteSpace uses regex for global replace (was replacing only first occurnce of \n etc). --- runtime/JavaScript/src/antlr4/Utils.js | 10 +++++----- .../tool/templates/codegen/JavaScript/JavaScript.stg | 8 ++++++++ 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/runtime/JavaScript/src/antlr4/Utils.js b/runtime/JavaScript/src/antlr4/Utils.js index d7627be60..2cb939a66 100644 --- a/runtime/JavaScript/src/antlr4/Utils.js +++ b/runtime/JavaScript/src/antlr4/Utils.js @@ -401,11 +401,11 @@ DoubleDict.prototype.set = function (a, b, o) { function escapeWhitespace(s, escapeSpaces) { - s = s.replace("\t", "\\t"); - s = s.replace("\n", "\\n"); - s = s.replace("\r", "\\r"); + s = s.replace(/\t/g, "\\t") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r"); if (escapeSpaces) { - s = s.replace(" ", "\u00B7"); + s = s.replace(/ /g, "\u00B7"); } return s; } @@ -443,4 +443,4 @@ exports.hashStuff = hashStuff; exports.escapeWhitespace = escapeWhitespace; exports.arrayToString = arrayToString; exports.titleCase = titleCase; -exports.equalArrays = equalArrays; \ No newline at end of file +exports.equalArrays = equalArrays; diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg index fb5883c02..6981ec03f 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg @@ -160,6 +160,7 @@ Object.defineProperty(.prototype, "atn", { }); .EOF = antlr4.Token.EOF; +.prototype.EOF = antlr4.Token.EOF; . = ;}; separator="\n", wrap, anchor> @@ -802,6 +803,9 @@ var antlr4 = require('antlr4/index'); >> Lexer(lexer, atn, actionFuncs, sempredFuncs, superClass) ::= << + +var = require('./').; + @@ -819,14 +823,18 @@ function (input) { .prototype.constructor = ; .EOF = antlr4.Token.EOF; +.prototype.EOF = antlr4.Token.EOF; . = ;}; separator="\n", wrap, anchor> +.prototype. = ;}; separator="\n", wrap, anchor> . = ;}; separator="\n"> +.prototype. = ;}; separator="\n"> . = ;}; separator="\n"> +.prototype. = ;}; separator="\n"> .prototype.channelNames = [ "DEFAULT_TOKEN_CHANNEL", "HIDDEN", "}; separator=", ", wrap, anchor> ]; From 61edb0204dff4e35840d8a35e82e937268d02bb5 Mon Sep 17 00:00:00 2001 From: Venkat Peri Date: Sun, 23 Jul 2017 10:14:34 -0400 Subject: [PATCH 025/102] signed contributors.txt --- contributors.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contributors.txt b/contributors.txt index 63d6d57f5..4187ee0cc 100644 --- a/contributors.txt +++ b/contributors.txt @@ -150,4 +150,5 @@ YYYY/MM/DD, github id, Full name, email 2017/05/29, kosak, Corey Kosak, kosak@kosak.com 2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net 2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com -2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com \ No newline at end of file +2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com +2017/07/23, venkatperi, Venkat Peri, venkatperi@gmail.com From caa5e46def0cdd0a48fd0e2bee64bf2bc90b84ec Mon Sep 17 00:00:00 2001 From: Venkat Peri Date: Mon, 24 Jul 2017 15:09:43 -0400 Subject: [PATCH 026/102] removed proto level stuff --- .../antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg index 6981ec03f..3095dcc29 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg @@ -160,7 +160,6 @@ Object.defineProperty(.prototype, "atn", { }); .EOF = antlr4.Token.EOF; -.prototype.EOF = antlr4.Token.EOF; . = ;}; separator="\n", wrap, anchor> @@ -825,16 +824,13 @@ function (input) { .EOF = antlr4.Token.EOF; .prototype.EOF = antlr4.Token.EOF; . = ;}; separator="\n", wrap, anchor> -.prototype. = ;}; separator="\n", wrap, anchor> . = ;}; separator="\n"> -.prototype. = ;}; separator="\n"> . = ;}; separator="\n"> -.prototype. = ;}; separator="\n"> .prototype.channelNames = [ "DEFAULT_TOKEN_CHANNEL", "HIDDEN", "}; separator=", ", wrap, anchor> ]; From e123bb8cea642577b4291285aca7c1cf44f2f192 Mon Sep 17 00:00:00 2001 From: Venkat Peri Date: Mon, 24 Jul 2017 15:11:00 -0400 Subject: [PATCH 027/102] removed proto level stuff (one more) --- .../antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg | 1 - 1 file changed, 1 deletion(-) diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg index 3095dcc29..f143bbd38 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg @@ -822,7 +822,6 @@ function (input) { .prototype.constructor = ; .EOF = antlr4.Token.EOF; -.prototype.EOF = antlr4.Token.EOF; . = ;}; separator="\n", wrap, anchor> From 0803c74eb255fe5e6fe5a6f11d879198d305e715 Mon Sep 17 00:00:00 2001 From: Sam Harwell Date: Wed, 26 Jul 2017 07:21:27 -0500 Subject: [PATCH 028/102] Report InputMismatchException with original context information Fixes #1922 --- .../descriptors/ParserErrorsDescriptors.java | 24 ++++++++++++ .../SemPredEvalParserDescriptors.java | 9 ++++- .../v4/runtime/DefaultErrorStrategy.java | 39 ++++++++++++++++++- .../v4/runtime/InputMismatchException.java | 6 +++ 4 files changed, 74 insertions(+), 4 deletions(-) diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java index 0b53e994e..26352d317 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java @@ -618,4 +618,28 @@ public class ParserErrorsDescriptors { public String grammar; } + + public static class ExtraneousInput extends BaseParserTestDescriptor { + public String input = "baa"; + public String output = null; + public String errors = "line 1:0 mismatched input 'b' expecting {, 'a'}\n"; + public String startRule = "file"; + public String grammarName = "T"; + + /** + grammar T; + + member : 'a'; + body : member*; + file : body EOF; + B : 'b'; + */ + @CommentHasStringValue + public String grammar; + + @Override + public boolean ignore(String targetName) { + return !"Java".equals(targetName); + } + } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java index fbf6cfbfc..218bdb789 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java @@ -283,11 +283,16 @@ public class SemPredEvalParserDescriptors { public String input = "s\n\n\nx\n"; public String output = "(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n"; /** - line 5:0 mismatched input '' expecting ' - ' + line 5:0 mismatched input '' expecting {'s', ' + ', 'x'} */ @CommentHasStringValue public String errors; + + @Override + public boolean ignore(String targetName) { + return !"Java".equals(targetName); + } } public static class PredFromAltTestedInLoopBack_2 extends PredFromAltTestedInLoopBack { diff --git a/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java b/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java index 819538539..02b5ee510 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java +++ b/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java @@ -36,6 +36,21 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy { protected IntervalSet lastErrorStates; + /** + * This field is used to propagate information about the lookahead following + * the previous match. Since prediction prefers completing the current rule + * to error recovery efforts, error reporting may occur later than the + * original point where it was discoverable. The original context is used to + * compute the true expected sets as though the reporting occurred as early + * as possible. + */ + protected ParserRuleContext nextTokensContext; + + /** + * @see #nextTokensContext + */ + protected int nextTokensState; + /** * {@inheritDoc} * @@ -225,7 +240,20 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy { // try cheaper subset first; might get lucky. seems to shave a wee bit off IntervalSet nextTokens = recognizer.getATN().nextTokens(s); - if (nextTokens.contains(Token.EPSILON) || nextTokens.contains(la)) { + if (nextTokens.contains(la)) { + // We are sure the token matches + nextTokensContext = null; + nextTokensState = ATNState.INVALID_STATE_NUMBER; + return; + } + + if (nextTokens.contains(Token.EPSILON)) { + if (nextTokensContext == null) { + // It's possible the next token won't match; information tracked + // by sync is restricted for performance. + nextTokensContext = recognizer.getContext(); + nextTokensState = recognizer.getState(); + } return; } @@ -450,7 +478,14 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy { } // even that didn't work; must throw the exception - throw new InputMismatchException(recognizer); + InputMismatchException e; + if (nextTokensContext == null) { + e = new InputMismatchException(recognizer); + } else { + e = new InputMismatchException(recognizer, nextTokensState, nextTokensContext); + } + + throw e; } /** diff --git a/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java b/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java index fc4261558..08ef67c58 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java +++ b/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java @@ -13,4 +13,10 @@ public class InputMismatchException extends RecognitionException { super(recognizer, recognizer.getInputStream(), recognizer._ctx); this.setOffendingToken(recognizer.getCurrentToken()); } + + public InputMismatchException(Parser recognizer, int state, ParserRuleContext ctx) { + super(recognizer, recognizer.getInputStream(), ctx); + this.setOffendingState(state); + this.setOffendingToken(recognizer.getCurrentToken()); + } } From 95338f710ecd201f2a5fe5f451c610aba7c64ae7 Mon Sep 17 00:00:00 2001 From: Matt Hauck Date: Thu, 27 Jul 2017 20:54:59 -0700 Subject: [PATCH 029/102] Ignore Cmake policy CMP0054 --- runtime/Cpp/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/Cpp/CMakeLists.txt b/runtime/Cpp/CMakeLists.txt index 65e704516..07ef35e69 100644 --- a/runtime/Cpp/CMakeLists.txt +++ b/runtime/Cpp/CMakeLists.txt @@ -33,6 +33,7 @@ endif() if(CMAKE_VERSION VERSION_EQUAL "3.3.0" OR CMAKE_VERSION VERSION_GREATER "3.3.0") CMAKE_POLICY(SET CMP0059 OLD) + CMAKE_POLICY(SET CMP0054 OLD) endif() if(CMAKE_SYSTEM_NAME MATCHES "Linux") From f12a71306200160bc5b7af99ade1ae11f0f50510 Mon Sep 17 00:00:00 2001 From: Matt Hauck Date: Thu, 27 Jul 2017 20:56:54 -0700 Subject: [PATCH 030/102] sign contributors.txt --- contributors.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/contributors.txt b/contributors.txt index 4b923fd79..97a19126e 100644 --- a/contributors.txt +++ b/contributors.txt @@ -153,3 +153,4 @@ YYYY/MM/DD, github id, Full name, email 2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com 2017/07/11, dhalperi, Daniel Halperin, daniel@halper.in 2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com +2017/07/27, matthauck, Matt Hauck, matthauck@gmail.com From b41782964b8b0d0fd9652ec2bbf876296cd699e0 Mon Sep 17 00:00:00 2001 From: Matt Hauck Date: Thu, 27 Jul 2017 21:37:09 -0700 Subject: [PATCH 031/102] Add MSVC support to c++ cmake build --- runtime/Cpp/CMakeLists.txt | 25 +++++++++++++++++++------ runtime/Cpp/runtime/CMakeLists.txt | 10 +++++++++- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/runtime/Cpp/CMakeLists.txt b/runtime/Cpp/CMakeLists.txt index 65e704516..4259a95e7 100644 --- a/runtime/Cpp/CMakeLists.txt +++ b/runtime/Cpp/CMakeLists.txt @@ -61,7 +61,11 @@ if (WITH_DEMO) endif() endif(WITH_DEMO) -set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W") +if (MSVC_VERSION) + set(MY_CXX_WARNING_FLAGS " /W4") +else() + set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W") +endif() # Initialize CXXFLAGS. if("${CMAKE_VERSION}" VERSION_GREATER 3.1.0) @@ -75,11 +79,18 @@ else() set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -std=c++11") endif() -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall ${MY_CXX_WARNING_FLAGS}") -set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}") -set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}") -set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -DNDEBUG ${MY_CXX_WARNING_FLGAS}") -set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_CXX_WARNING_FLAGS}") +if (MSVC_VERSION) + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /O1 -DNDEBUG ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2 -DNDEBUG ${MY_CXX_WARNING_FLGAS}") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /O2 /Zi ${MY_CXX_WARNING_FLAGS}") +else() + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -DNDEBUG ${MY_CXX_WARNING_FLGAS}") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}") +endif() # Compiler-specific C++11 activation. if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") @@ -101,6 +112,8 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND CMAKE_SYSTEM_NAME MATCHES if (WITH_LIBCXX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") endif() +elseif ( MSVC_VERSION GREATER 1800 OR MSVC_VERSION EQUAL 1800 ) + # Visual Studio 2012+ supports c++11 features else () message(FATAL_ERROR "Your C++ compiler does not support C++11.") endif () diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt index b2a4fbd02..d8746ea7a 100644 --- a/runtime/Cpp/runtime/CMakeLists.txt +++ b/runtime/Cpp/runtime/CMakeLists.txt @@ -44,7 +44,11 @@ elseif(APPLE) target_link_libraries(antlr4_static ${COREFOUNDATION_LIBRARY}) endif() -set(disabled_compile_warnings "-Wno-overloaded-virtual") +if (MSVC_VERSION) + set(disabled_compile_warnings "/wd4251") +else() + set(disabled_compile_warnings "-Wno-overloaded-virtual") +endif() if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") set(disabled_compile_warnings "${disabled_compile_warnings} -Wno-dollar-in-identifier-extension -Wno-four-char-constants") elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") @@ -57,6 +61,10 @@ if (WIN32) set(extra_share_compile_flags "-DANTLR4CPP_EXPORTS") set(extra_static_compile_flags "-DANTLR4CPP_STATIC") endif(WIN32) +if (MSVC_VERSION) + set(extra_share_compile_flags "${extra_share_compile_flags} /MD") + set(extra_static_compile_flags "${extra_static_compile_flags} /MT") +endif() set_target_properties(antlr4_shared PROPERTIES VERSION ${ANTLR_VERSION} From 11d185f744793046d04837355cadce899c041d86 Mon Sep 17 00:00:00 2001 From: Matt Hauck Date: Thu, 27 Jul 2017 22:08:13 -0700 Subject: [PATCH 032/102] add some missing flags; fix debug build --- runtime/Cpp/CMakeLists.txt | 10 +++++----- runtime/Cpp/runtime/CMakeLists.txt | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/runtime/Cpp/CMakeLists.txt b/runtime/Cpp/CMakeLists.txt index 4259a95e7..6dcd113e1 100644 --- a/runtime/Cpp/CMakeLists.txt +++ b/runtime/Cpp/CMakeLists.txt @@ -79,12 +79,12 @@ else() set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -std=c++11") endif() -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_CXX_WARNING_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_CXX_WARNING_FLAGS}") if (MSVC_VERSION) - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi ${MY_CXX_WARNING_FLAGS}") - set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /O1 -DNDEBUG ${MY_CXX_WARNING_FLAGS}") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2 -DNDEBUG ${MY_CXX_WARNING_FLGAS}") - set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /O2 /Zi ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi /MP ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /O1 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLGAS}") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /O2 /Oi /Ob2 /Gy /MP /Zi ${MY_CXX_WARNING_FLAGS}") else() set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}") set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}") diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt index d8746ea7a..aec8bf7b4 100644 --- a/runtime/Cpp/runtime/CMakeLists.txt +++ b/runtime/Cpp/runtime/CMakeLists.txt @@ -62,8 +62,8 @@ if (WIN32) set(extra_static_compile_flags "-DANTLR4CPP_STATIC") endif(WIN32) if (MSVC_VERSION) - set(extra_share_compile_flags "${extra_share_compile_flags} /MD") - set(extra_static_compile_flags "${extra_static_compile_flags} /MT") + target_compile_options(antlr4_shared PRIVATE "/MD$<$:d>") + target_compile_options(antlr4_static PRIVATE "/MT$<$:d>") endif() set_target_properties(antlr4_shared From a2fcad61f021f5e3d24480015b5bf7b7cdca1dc9 Mon Sep 17 00:00:00 2001 From: Matt Hauck Date: Fri, 28 Jul 2017 13:12:47 -0700 Subject: [PATCH 033/102] Differentiate windows static lib suffix to avoid being clobbered Since the install target install static and shared libs into same folder, and because on windows a shared lib also outputs a shared .lib file to link against, need to make sure the static/shared .lib files do not clobber each other. --- runtime/Cpp/runtime/CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt index aec8bf7b4..dcd21b8b0 100644 --- a/runtime/Cpp/runtime/CMakeLists.txt +++ b/runtime/Cpp/runtime/CMakeLists.txt @@ -66,6 +66,11 @@ if (MSVC_VERSION) target_compile_options(antlr4_static PRIVATE "/MT$<$:d>") endif() +set(static_lib_suffix "") +if (MSVC_VERSION) + set(static_lib_suffix "-static") +endif() + set_target_properties(antlr4_shared PROPERTIES VERSION ${ANTLR_VERSION} SOVERSION ${ANTLR_VERSION} @@ -80,7 +85,7 @@ set_target_properties(antlr4_shared set_target_properties(antlr4_static PROPERTIES VERSION ${ANTLR_VERSION} SOVERSION ${ANTLR_VERSION} - OUTPUT_NAME antlr4-runtime + OUTPUT_NAME "antlr4-runtime${static_lib_suffix}" ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}") From ca9409c7bc656833e269ef758398624977363307 Mon Sep 17 00:00:00 2001 From: neatnerd Date: Fri, 28 Jul 2017 23:55:40 +0200 Subject: [PATCH 034/102] Remove duplicate comments --- runtime/Go/antlr/tokenstream_rewriter.go | 115 +++++------------------ 1 file changed, 21 insertions(+), 94 deletions(-) diff --git a/runtime/Go/antlr/tokenstream_rewriter.go b/runtime/Go/antlr/tokenstream_rewriter.go index 32f48514b..ef68519d7 100644 --- a/runtime/Go/antlr/tokenstream_rewriter.go +++ b/runtime/Go/antlr/tokenstream_rewriter.go @@ -9,11 +9,10 @@ import ( ) -// Useful for rewriting out a buffered input token stream after doing some -// +// // Useful for rewriting out a buffered input token stream after doing some // augmentation or other manipulations on it. -// + //

// You can insert stuff, replace, and delete chunks. Note that the operations // are done lazily--only if you convert the buffer to a {@link String} with @@ -24,31 +23,31 @@ import ( // operation is done and then normal {@link String} rendering continues on the // buffer. This is like having multiple Turing machine instruction streams // (programs) operating on a single input tape. :)

-// //

+ // This rewriter makes no modifications to the token stream. It does not ask the // stream to fill itself up nor does it advance the input cursor. The token // stream {@link TokenStream#index()} will return the same value before and // after any {@link #getText()} call.

-// + //

// The rewriter only works on tokens that you have in the buffer and ignores the // current input cursor. If you are buffering tokens on-demand, calling // {@link #getText()} halfway through the input will only do rewrites for those // tokens in the first half of the file.

-// + //

// Since the operations are done lazily at {@link #getText}-time, operations do // not screw up the token index values. That is, an insert operation at token // index {@code i} does not change the index values for tokens // {@code i}+1..n-1.

-// + //

// Because operations never actually alter the buffer, you may always get the // original token stream back without undoing anything. Since the instructions // are queued up, you can easily simulate transactions and roll back any changes // if there is an error just by removing instructions. For example,

-// + //
 // CharStream input = new ANTLRFileStream("input");
 // TLexer lex = new TLexer(input);
@@ -57,10 +56,10 @@ import (
 // TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
 // parser.startRule();
 // 
-// + //

// Then in the rules, you can execute (assuming rewriter is visible):

-// + //
 // Token t,u;
 // ...
@@ -68,97 +67,25 @@ import (
 // rewriter.insertAfter(u, "text after u");}
 // System.out.println(rewriter.getText());
 // 
-// + //

// You can also have multiple "instruction streams" and get multiple rewrites // from a single pass over the input. Just name the instruction streams and use // that name again when printing the buffer. This could be useful for generating // a C file and also its header file--all from the same buffer:

-// + //
 // rewriter.insertAfter("pass1", t, "text to put after t");}
 // rewriter.insertAfter("pass2", u, "text after u");}
 // System.out.println(rewriter.getText("pass1"));
 // System.out.println(rewriter.getText("pass2"));
 // 
-// + //

// If you don't use named rewrite streams, a "default" stream is used as the // first example shows.

-// /augmentation or other manipulations on it. -// -//

-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)

-// -//

-// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.

-// -//

-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.

-// -//

-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.

-// -//

-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,

-// -//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-// 
-// -//

-// Then in the rules, you can execute (assuming rewriter is visible):

-// -//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-// 
-// -//

-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:

-// -//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-// 
-// -//

-// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.

-// + + const( Default_Program_Name = "default" @@ -172,12 +99,12 @@ type RewriteOperation interface { // Execute the rewrite operation by possibly adding to the buffer. // Return the index of the next token to operate on. Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream SetInstructionIndex(val int) SetIndex(int) SetText(string) @@ -187,7 +114,7 @@ type RewriteOperation interface { type BaseRewriteOperation struct { //Current index of rewrites list - instruction_index int + instruction_index int //Token buffer index index int //Substitution text From dced604c7cfe337a6370b83e3bd25a8f173a1b00 Mon Sep 17 00:00:00 2001 From: Arshinskiy Mike Date: Sat, 29 Jul 2017 22:05:46 +0200 Subject: [PATCH 035/102] Corrected Start/Stop spelling --- runtime/Go/antlr/tokenstream_rewriter.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/Go/antlr/tokenstream_rewriter.go b/runtime/Go/antlr/tokenstream_rewriter.go index ef68519d7..96a03f02a 100644 --- a/runtime/Go/antlr/tokenstream_rewriter.go +++ b/runtime/Go/antlr/tokenstream_rewriter.go @@ -443,8 +443,8 @@ func (tsr *TokenStreamRewriter)GetTextDefault() string{ // instructions given to this rewriter. func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { rewrites := tsr.programs[program_name] - start := interval.start - stop := interval.stop + start := interval.Start + stop := interval.Stop // ensure start/end are in range stop = min(stop, tsr.tokens.Size()-1) start = max(start,0) From 0137218930d4f2b67a7b7b74a25c5417c34fa5bf Mon Sep 17 00:00:00 2001 From: Hanzhou Shi Date: Sat, 29 Jul 2017 17:27:33 -0700 Subject: [PATCH 036/102] Removing dead code, malformed documentations, etc. --- .../Sources/Antlr4/ANTLRErrorListener.swift | 4 +- .../Sources/Antlr4/ANTLRErrorStrategy.swift | 12 +-- .../Sources/Antlr4/ANTLRFileStream.swift | 1 - .../Sources/Antlr4/ANTLRInputStream.swift | 69 --------------- .../Sources/Antlr4/BaseErrorListener.swift | 2 +- .../Sources/Antlr4/BufferedTokenStream.swift | 9 -- runtime/Swift/Sources/Antlr4/CharStream.swift | 6 +- .../Sources/Antlr4/DefaultErrorStrategy.swift | 17 ---- runtime/Swift/Sources/Antlr4/IntStream.swift | 11 ++- runtime/Swift/Sources/Antlr4/Lexer.swift | 7 -- .../Sources/Antlr4/ListTokenSource.swift | 17 ---- .../Sources/Antlr4/NoViableAltException.swift | 1 - runtime/Swift/Sources/Antlr4/Parser.swift | 6 +- .../Sources/Antlr4/ParserRuleContext.swift | 8 +- .../Sources/Antlr4/ProxyErrorListener.swift | 1 - .../Sources/Antlr4/RecognitionException.swift | 4 +- runtime/Swift/Sources/Antlr4/Recognizer.swift | 13 +-- .../Swift/Sources/Antlr4/RuleContext.swift | 87 +++++++++---------- .../Swift/Sources/Antlr4/TokenStream.swift | 8 +- runtime/Swift/Sources/Antlr4/atn/ATN.swift | 4 +- .../Sources/Antlr4/atn/ATNSimulator.swift | 24 +---- .../Antlr4/atn/ParserATNSimulator.swift | 10 --- runtime/Swift/Sources/Antlr4/dfa/DFA.swift | 13 +-- .../Swift/Sources/Antlr4/misc/BitSet.swift | 68 +++++---------- .../Swift/Sources/Antlr4/misc/IntSet.swift | 6 +- .../Swift/Sources/Antlr4/tree/ErrorNode.swift | 2 - .../Antlr4/tree/pattern/ParseTreeMatch.swift | 6 +- .../pattern/ParseTreePatternMatcher.swift | 58 +++---------- .../Antlr4/tree/pattern/RuleTagToken.swift | 38 +------- .../Antlr4/tree/pattern/TagChunk.swift | 8 +- .../Antlr4/tree/pattern/TextChunk.swift | 2 +- 31 files changed, 128 insertions(+), 394 deletions(-) diff --git a/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift b/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift index 942986b5e..930665825 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift @@ -10,7 +10,7 @@ public protocol ANTLRErrorListener: class { /// messages. This listener's job is simply to emit a computed message, /// though it has enough information to create its own message in many cases. /// - ///

The {@link org.antlr.v4.runtime.RecognitionException} is non-null for all syntax errors except + ///

The {@link RecognitionException} is non-null for all syntax errors except /// when we discover mismatched token errors that we can recover from /// in-line, without returning from the surrounding rule (via the single /// token insertion and deletion mechanism).

@@ -40,7 +40,7 @@ public protocol ANTLRErrorListener: class { _ line: Int, _ charPositionInLine: Int, _ msg: String, - _ e: AnyObject?// RecognitionException? + _ e: AnyObject? ) /// This method is called by the parser when a full-context prediction diff --git a/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift index 541a2a3b4..3ee9ce972 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift @@ -36,10 +36,10 @@ public protocol ANTLRErrorStrategy { /// for calling {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} as appropriate.

/// /// - parameter recognizer: the parser instance - /// - org.antlr.v4.runtime.RecognitionException if the error strategy was not able to + /// - throws: _RecognitionException_ if the error strategy was not able to /// recover from the unexpected input symbol @discardableResult - func recoverInline(_ recognizer: Parser) throws -> Token // RecognitionException; + func recoverInline(_ recognizer: Parser) throws -> Token /// This method is called to recover from exception {@code e}. This method is /// called after {@link #reportError} by the default exception handler @@ -49,9 +49,9 @@ public protocol ANTLRErrorStrategy { /// /// - parameter recognizer: the parser instance /// - parameter e: the recognition exception to recover from - /// - org.antlr.v4.runtime.RecognitionException if the error strategy could not recover from + /// - throws: _RecognitionException_ if the error strategy could not recover from /// the recognition exception - func recover(_ recognizer: Parser, _ e: AnyObject) throws // RecognitionException; + func recover(_ recognizer: Parser, _ e: AnyObject) throws /// This method provides the error handler with an opportunity to handle /// syntactic or semantic errors in the input stream before they result in a @@ -67,10 +67,10 @@ public protocol ANTLRErrorStrategy { /// - seealso: org.antlr.v4.runtime.DefaultErrorStrategy#sync /// /// - parameter recognizer: the parser instance - /// - org.antlr.v4.runtime.RecognitionException if an error is detected by the error + /// - throws: _RecognitionException_ if an error is detected by the error /// strategy but cannot be automatically recovered at the current state in /// the parsing process - func sync(_ recognizer: Parser) throws // RecognitionException; + func sync(_ recognizer: Parser) throws /// Tests whether or not recognizer} is in the process of recovering /// from an error. In error recovery mode, {@link org.antlr.v4.runtime.Parser#consume} adds diff --git a/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift b/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift index 053b6348d..96623d1d2 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift @@ -10,7 +10,6 @@ public class ANTLRFileStream: ANTLRInputStream { internal var fileName: String public convenience override init(_ fileName: String) { - // throws; IOException self.init(fileName, nil) } diff --git a/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift b/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift index 3b73981fd..f57902d74 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift @@ -39,76 +39,11 @@ public class ANTLRInputStream: CharStream { self.data = data self.n = numberOfActualCharsInArray } - /// public convenience init(_ r : Reader) throws; IOException { - /// self.init(r, INITIAL_BUFFER_SIZE, READ_BUFFER_SIZE); - /// } - /// - /// public convenience init(_ r : Reader, _ initialSize : Int) throws; IOException { - /// self.init(r, initialSize, READ_BUFFER_SIZE); - /// } - /// - /// public init(_ r : Reader, _ initialSize : Int, _ readChunkSize : Int) throws; IOException { - /// load(r, initialSize, readChunkSize); - /// } - /// - /// public convenience init(_ input : InputStream) throws; IOException { - /// self.init(InputStreamReader(input), INITIAL_BUFFER_SIZE); - /// } - /// - /// public convenience init(_ input : InputStream, _ initialSize : Int) throws; IOException { - /// self.init(InputStreamReader(input), initialSize); - /// } - /// - /// public convenience init(_ input : InputStream, _ initialSize : Int, _ readChunkSize : Int) throws; IOException { - /// self.init(InputStreamReader(input), initialSize, readChunkSize); - /// } - /// - /// public func load(r : Reader, _ size : Int, _ readChunkSize : Int) - /// throws; IOException - /// { - /// if ( r==nil ) { - /// return; - /// } - /// if ( size<=0 ) { - /// size = INITIAL_BUFFER_SIZE; - /// } - /// if ( readChunkSize<=0 ) { - /// readChunkSize = READ_BUFFER_SIZE; - /// } - /// // print("load "+size+" in chunks of "+readChunkSize); - /// try { - /// // alloc initial buffer size. - /// data = new char[size]; - /// // read all the data in chunks of readChunkSize - /// var numRead : Int=0; - /// var p : Int = 0; - /// do { - /// if ( p+readChunkSize > data.length ) { // overflow? - /// // print("### overflow p="+p+", data.length="+data.length); - /// data = Arrays.copyOf(data, data.length * 2); - /// } - /// numRead = r.read(data, p, readChunkSize); - /// // print("read "+numRead+" chars; p was "+p+" is now "+(p+numRead)); - /// p += numRead; - /// } while (numRead!=-1); // while not EOF - /// // set the actual size of the data available; - /// // EOF subtracted one above in p+=numRead; add one back - /// n = p+1; - /// //print("n="+n); - /// } - /// finally { - /// r.close(); - /// } - /// } - /// Reset the stream so that it's in the same state it was - /// when the object was created *except* the data array is not - /// touched. public func reset() { p = 0 } - public func consume() throws { if p >= n { assert(LA(1) == ANTLRInputStream.EOF, "Expected: LA(1)==IntStream.EOF") @@ -124,7 +59,6 @@ public class ANTLRInputStream: CharStream { } } - public func LA(_ i: Int) -> Int { var i = i if i == 0 { @@ -186,7 +120,6 @@ public class ANTLRInputStream: CharStream { } } - public func getText(_ interval: Interval) -> String { let start: Int = interval.a var stop: Int = interval.b @@ -201,7 +134,6 @@ public class ANTLRInputStream: CharStream { return String(data[start ..< (start + count)]) } - public func getSourceName() -> String { guard let name = name , !name.isEmpty else { return ANTLRInputStream.UNKNOWN_SOURCE_NAME @@ -209,7 +141,6 @@ public class ANTLRInputStream: CharStream { return name } - public func toString() -> String { return String(data) } diff --git a/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift b/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift index 5a4292c4e..725fd01a6 100644 --- a/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift @@ -18,7 +18,7 @@ open class BaseErrorListener: ANTLRErrorListener { _ line: Int, _ charPositionInLine: Int, _ msg: String, - _ e: AnyObject?//RecognitionException + _ e: AnyObject? ) { } diff --git a/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift b/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift index 8ea114821..abc010c72 100644 --- a/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift +++ b/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift @@ -69,7 +69,6 @@ public class BufferedTokenStream: TokenStream { return 0 } - public func release(_ marker: Int) { // no resources to release } @@ -108,8 +107,6 @@ public class BufferedTokenStream: TokenStream { if try !skipEofCheck && LA(1) == BufferedTokenStream.EOF { throw ANTLRError.illegalState(msg: "cannot consume EOF") - //RuntimeException("cannot consume EOF") - //throw ANTLRError.IllegalState /* throw IllegalStateException("cannot consume EOF"); */ } if try sync(p + 1) { @@ -159,7 +156,6 @@ public class BufferedTokenStream: TokenStream { return n } - public func get(_ i: Int) throws -> Token { if i < 0 || i >= tokens.count { let index = tokens.count - 1 @@ -394,8 +390,6 @@ public class BufferedTokenStream: TokenStream { try lazyInit() if tokenIndex < 0 || tokenIndex >= tokens.count { throw ANTLRError.indexOutOfBounds(msg: "\(tokenIndex) not in 0..\(tokens.count - 1)") - //RuntimeException("\(tokenIndex) not in 0..\(tokens.count-1)") - //throw ANTLRError.IndexOutOfBounds /* throw IndexOutOfBoundsException(tokenIndex+" not in 0.."+(tokens.count-1)); */ } if tokenIndex == 0 { @@ -447,13 +441,10 @@ public class BufferedTokenStream: TokenStream { } /// Get the text of all tokens in this buffer. - - public func getText() throws -> String { return try getText(Interval.of(0, size() - 1)) } - public func getText(_ interval: Interval) throws -> String { let start: Int = interval.a var stop: Int = interval.b diff --git a/runtime/Swift/Sources/Antlr4/CharStream.swift b/runtime/Swift/Sources/Antlr4/CharStream.swift index c4895112e..fa63600ed 100644 --- a/runtime/Swift/Sources/Antlr4/CharStream.swift +++ b/runtime/Swift/Sources/Antlr4/CharStream.swift @@ -14,11 +14,11 @@ public protocol CharStream: IntStream { /// - parameter interval: an interval within the stream /// - returns: the text of the specified interval /// - /// - NullPointerException if {@code interval} is {@code null} - /// - IllegalArgumentException if {@code interval.a < 0}, or if + /// - throws: _ANTLRError.nullPointer_ if {@code interval} is {@code null} + /// - throws: _ANTLRError.illegalArgument_ if {@code interval.a < 0}, or if /// {@code interval.b < interval.a - 1}, or if {@code interval.b} lies at or /// past the end of the stream - /// - UnsupportedOperationException if the stream does not support + /// - throws: _ANTLRError.unsupportedOperation_ if the stream does not support /// getting the text of the specified interval func getText(_ interval: Interval) -> String } diff --git a/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift index a7221b8dc..50303b2de 100644 --- a/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift +++ b/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift @@ -2,8 +2,6 @@ /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. - - /// This is the default implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} used for /// error reporting and recovery in ANTLR parsers. @@ -26,11 +24,8 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { internal var lastErrorStates: IntervalSet? - /// {@inheritDoc} - /// ///

The default implementation simply calls {@link #endErrorCondition} to /// ensure that the handler is not in error recovery mode.

- public func reset(_ recognizer: Parser) { endErrorCondition(recognizer) } @@ -43,8 +38,6 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { errorRecoveryMode = true } - /// {@inheritDoc} - public func inErrorRecoveryMode(_ recognizer: Parser) -> Bool { return errorRecoveryMode } @@ -59,15 +52,11 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { lastErrorIndex = -1 } - /// {@inheritDoc} - /// ///

The default implementation simply calls {@link #endErrorCondition}.

- public func reportMatch(_ recognizer: Parser) { endErrorCondition(recognizer) } - /// {@inheritDoc} /// ///

The default implementation returns immediately if the handler is already /// in error recovery mode. Otherwise, it calls {@link #beginErrorCondition} @@ -84,7 +73,6 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { ///

  • All other types: calls {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} to report /// the exception
  • /// - public func reportError(_ recognizer: Parser, _ e: AnyObject) { // if we've already reported an error and have not matched a token @@ -94,7 +82,6 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { return // don't report spurious errors } beginErrorCondition(recognizer) - //TODO: exception handler if (e is NoViableAltException) { try! reportNoViableAlternative(recognizer, e as! NoViableAltException); } else { @@ -112,12 +99,9 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { } } - /// {@inheritDoc} - /// ///

    The default implementation resynchronizes the parser by consuming tokens /// until we find one in the resynchronization set--loosely the set of tokens /// that can follow the current rule.

    - public func recover(_ recognizer: Parser, _ e: AnyObject) throws { // print("recover in "+recognizer.getRuleInvocationStack()+ // " index="+getTokenStream(recognizer).index()+ @@ -507,7 +491,6 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { /// a CommonToken of the appropriate type. The text will be the token. /// If you change what tokens must be created by the lexer, /// override this method to create the appropriate tokens. - internal func getTokenStream(_ recognizer: Parser) -> TokenStream { return recognizer.getInputStream() as! TokenStream } diff --git a/runtime/Swift/Sources/Antlr4/IntStream.swift b/runtime/Swift/Sources/Antlr4/IntStream.swift index 69af63969..8e7fde515 100644 --- a/runtime/Swift/Sources/Antlr4/IntStream.swift +++ b/runtime/Swift/Sources/Antlr4/IntStream.swift @@ -45,7 +45,7 @@ public protocol IntStream: class { /// filtering streams (e.g. {@link org.antlr.v4.runtime.CommonTokenStream} which distinguishes /// between "on-channel" and "off-channel" tokens). /// - /// - IllegalStateException if an attempt is made to consume the the + /// - throws: _ANTLRError.illegalState_ if an attempt is made to consume the the /// end of the stream (i.e. if {@code LA(1)==}{@link #EOF EOF} before calling /// {@code consume}). func consume() throws @@ -80,7 +80,7 @@ public protocol IntStream: class { /// calls to {@link #consume consume()} have occurred from the beginning of /// the stream before calling this method.

    /// - /// - UnsupportedOperationException if the stream does not support + /// - throws: _ANTLRError.unsupportedOperation_ if the stream does not support /// retrieving the value of the specified symbol func LA(_ i: Int) throws -> Int @@ -173,21 +173,20 @@ public protocol IntStream: class { /// /// - parameter index: The absolute index to seek to. /// - /// - IllegalArgumentException if {@code index} is less than 0 - /// - UnsupportedOperationException if the stream does not support + /// - throws: _ANTLRError.illegalArgument_ if {@code index} is less than 0 + /// - throws: _ANTLRError.unsupportedOperation_ if the stream does not support /// seeking to the specified index func seek(_ index: Int) throws /// Returns the total number of symbols in the stream, including a single EOF /// symbol. /// - /// - UnsupportedOperationException if the size of the stream is + /// - throws: _ANTLRError.unsupportedOperation_ if the size of the stream is /// unknown. func size() -> Int /// Gets the name of the underlying symbol source. This method returns a /// non-null, non-empty string. If such a name is not known, this method /// returns {@link #UNKNOWN_SOURCE_NAME}. - func getSourceName() -> String } diff --git a/runtime/Swift/Sources/Antlr4/Lexer.swift b/runtime/Swift/Sources/Antlr4/Lexer.swift index 9c251ba66..2af2140fe 100644 --- a/runtime/Swift/Sources/Antlr4/Lexer.swift +++ b/runtime/Swift/Sources/Antlr4/Lexer.swift @@ -384,9 +384,6 @@ open class Lexer: Recognizer s = "" } switch s { -// case CommonToken.EOF : -// s = ""; -// break; case "\n": s = "\\n" case "\t": @@ -408,11 +405,7 @@ open class Lexer: Recognizer /// a token, so do the easy thing and just kill a character and hope /// it all works out. You can instead use the rule invocation stack /// to do sophisticated error recovery if you are in a fragment rule. - //public func recover(re : RecognitionException) { - open func recover(_ re: AnyObject) throws { - //System.out.println("consuming char "+(char)input.LA(1)+" during recovery"); - //re.printStackTrace(); // TODO: Do we lose character or line position information? try _input!.consume() } diff --git a/runtime/Swift/Sources/Antlr4/ListTokenSource.swift b/runtime/Swift/Sources/Antlr4/ListTokenSource.swift index a553642cd..301f5b417 100644 --- a/runtime/Swift/Sources/Antlr4/ListTokenSource.swift +++ b/runtime/Swift/Sources/Antlr4/ListTokenSource.swift @@ -36,7 +36,6 @@ public class ListTokenSource: TokenSource { /// /// - parameter tokens: The collection of {@link org.antlr.v4.runtime.Token} objects to provide as a /// {@link org.antlr.v4.runtime.TokenSource}. - /// - NullPointerException if {@code tokens} is {@code null} public convenience init(_ tokens: Array) { self.init(tokens, nil) } @@ -50,16 +49,12 @@ public class ListTokenSource: TokenSource { /// {@code null}, {@link #getSourceName} will attempt to infer the name from /// the next {@link org.antlr.v4.runtime.Token} (or the previous token if the end of the input has /// been reached). - /// - /// - NullPointerException if {@code tokens} is {@code null} public init(_ tokens: Array, _ sourceName: String?) { self.tokens = tokens self.sourceName = sourceName } - /// {@inheritDoc} - public func getCharPositionInLine() -> Int { if i < tokens.count { return tokens[i].getCharPositionInLine() @@ -92,8 +87,6 @@ public class ListTokenSource: TokenSource { return 0 } - /// {@inheritDoc} - public func nextToken() -> Token { if i >= tokens.count { if eofToken == nil { @@ -121,8 +114,6 @@ public class ListTokenSource: TokenSource { return t } - /// {@inheritDoc} - public func getLine() -> Int { if i < tokens.count { return tokens[i].getLine() @@ -156,8 +147,6 @@ public class ListTokenSource: TokenSource { return 1 } - /// {@inheritDoc} - public func getInputStream() -> CharStream? { if i < tokens.count { return tokens[i].getInputStream() @@ -175,8 +164,6 @@ public class ListTokenSource: TokenSource { return nil } - /// {@inheritDoc} - public func getSourceName() -> String { if sourceName != nil { return sourceName! @@ -189,14 +176,10 @@ public class ListTokenSource: TokenSource { return "List" } - /// {@inheritDoc} - public func setTokenFactory(_ factory: TokenFactory) { self._factory = factory } - /// {@inheritDoc} - public func getTokenFactory() -> TokenFactory { return _factory } diff --git a/runtime/Swift/Sources/Antlr4/NoViableAltException.swift b/runtime/Swift/Sources/Antlr4/NoViableAltException.swift index 8a41f8cca..ebbcd717b 100644 --- a/runtime/Swift/Sources/Antlr4/NoViableAltException.swift +++ b/runtime/Swift/Sources/Antlr4/NoViableAltException.swift @@ -20,7 +20,6 @@ public class NoViableAltException: RecognitionException { * time the error occurred, of course the stream needs to keep a * buffer all of the tokens but later we might not have access to those.) */ - private final var startToken: Token public convenience init(_ recognizer: Parser?) throws { diff --git a/runtime/Swift/Sources/Antlr4/Parser.swift b/runtime/Swift/Sources/Antlr4/Parser.swift index 81ce2ac4f..1562551f1 100644 --- a/runtime/Swift/Sources/Antlr4/Parser.swift +++ b/runtime/Swift/Sources/Antlr4/Parser.swift @@ -217,7 +217,7 @@ open class Parser: Recognizer { * @throws org.antlr.v4.runtime.RecognitionException if the current input symbol did not match * a wildcard and the error strategy could not recover from the mismatched * symbol - *///; RecognitionException + */ @discardableResult public func matchWildcard() throws -> Token { var t: Token = try getCurrentToken() @@ -330,7 +330,7 @@ open class Parser: Recognizer { * * @param listener the listener to add * - * @throws NullPointerException if {@code} listener is {@code null} + * @throws _ANTLRError.nullPointer_ if listener is {@code null} */ public func addParseListener(_ listener: ParseTreeListener) { if _parseListeners == nil { @@ -432,7 +432,7 @@ open class Parser: Recognizer { * The ATN with bypass alternatives is expensive to create so we create it * lazily. * - * @throws UnsupportedOperationException if the current parser does not + * @throws _ANTLRError.unsupportedOperation_ if the current parser does not * implement the {@link #getSerializedATN()} method. */ diff --git a/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift b/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift index 532018058..9950e43f7 100644 --- a/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift +++ b/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift @@ -26,7 +26,6 @@ * group values such as this aggregate. The getters/setters are there to * satisfy the superclass interface. */ - open class ParserRuleContext: RuleContext { public var visited = false /** If we are debugging or building a parse tree for a visitor, @@ -55,8 +54,6 @@ open class ParserRuleContext: RuleContext { * * This does not trace states visited during prediction. */ -// public List states; - public var start: Token?, stop: Token? /** @@ -64,7 +61,6 @@ open class ParserRuleContext: RuleContext { * completed, this is {@code null}. */ public var exception: AnyObject! - //RecognitionException!; public override init() { super.init() @@ -198,7 +194,9 @@ open class ParserRuleContext: RuleContext { override - /** Override to make type more specific */ + /** + * Override to make type more specific + */ open func getParent() -> Tree? { return super.getParent() } diff --git a/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift b/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift index 53f172b13..50bb87be8 100644 --- a/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift @@ -20,7 +20,6 @@ public class ProxyErrorListener: ANTLRErrorListener { self.delegates = delegates } - //_ e : RecognitionException public func syntaxError(_ recognizer: Recognizer, _ offendingSymbol: AnyObject?, _ line: Int, diff --git a/runtime/Swift/Sources/Antlr4/RecognitionException.swift b/runtime/Swift/Sources/Antlr4/RecognitionException.swift index ea3348b6a..97fdc60ba 100644 --- a/runtime/Swift/Sources/Antlr4/RecognitionException.swift +++ b/runtime/Swift/Sources/Antlr4/RecognitionException.swift @@ -12,7 +12,9 @@ */ public class RecognitionException { - /** The {@link org.antlr.v4.runtime.Recognizer} where this exception originated. */ + /** + * The {@link org.antlr.v4.runtime.Recognizer} where this exception originated. + */ private final var recognizer: Recognizer? //Recognizer? ; diff --git a/runtime/Swift/Sources/Antlr4/Recognizer.swift b/runtime/Swift/Sources/Antlr4/Recognizer.swift index 525e46756..1fb8542f7 100644 --- a/runtime/Swift/Sources/Antlr4/Recognizer.swift +++ b/runtime/Swift/Sources/Antlr4/Recognizer.swift @@ -128,7 +128,6 @@ open class Recognizer { open func getSerializedATN() -> String { RuntimeException("there is no serialized ATN") fatalError() - ///throw ANTLRError.UnsupportedOperation /* throw UnsupportedOperationException("there is no /serialized ATN"); */ } /** For debugging and other purposes, might want the grammar name. @@ -177,9 +176,9 @@ open class Recognizer { _interp = interpreter } - /** What is the error header, normally line/character position information? */ - //public func getErrorHeader(e : RecognitionException - + /** + * What is the error header, normally line/character position information? + */ open func getErrorHeader(_ e: AnyObject) -> String { let line: Int = (e as! RecognitionException).getOffendingToken().getLine() let charPositionInLine: Int = (e as! RecognitionException).getOffendingToken().getCharPositionInLine() @@ -222,7 +221,7 @@ open class Recognizer { } /** - * @exception NullPointerException if {@code listener} is {@code null}. + * @exception ANTLRError.nullPointer if {@code listener} is {@code null}. */ open func addErrorListener(_ listener: ANTLRErrorListener) { @@ -285,22 +284,18 @@ open class Recognizer { fatalError() } - open func setInputStream(_ input: IntStream) throws { RuntimeException(#function + "Must be overridden") } - open func getTokenFactory() -> TokenFactory { RuntimeException(#function + "Must be overridden") fatalError() } - open func setTokenFactory(_ input: TokenFactory) { RuntimeException(#function + "Must be overridden") } - } diff --git a/runtime/Swift/Sources/Antlr4/RuleContext.swift b/runtime/Swift/Sources/Antlr4/RuleContext.swift index ce19d0362..c1e020723 100644 --- a/runtime/Swift/Sources/Antlr4/RuleContext.swift +++ b/runtime/Swift/Sources/Antlr4/RuleContext.swift @@ -159,50 +159,49 @@ open class RuleContext: RuleNode { return visitor.visitChildren(self) } - /* - /** Call this method to view a parse tree in a dialog box visually. */ - public func inspect(parser : Parser) -> Future { - var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; - return inspect(ruleNames); - } - - public func inspect(ruleNames : Array) -> Future { - var viewer : TreeViewer = TreeViewer(ruleNames, self); - return viewer.open(); - } - - /** Save this tree in a postscript file */ - public func save(parser : Parser, _ fileName : String) - throws; IOException, PrintException - { - var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; - save(ruleNames, fileName); - } - - /** Save this tree in a postscript file using a particular font name and size */ - public func save(parser : Parser, _ fileName : String, - _ fontName : String, _ fontSize : Int) - throws; IOException - { - var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; - save(ruleNames, fileName, fontName, fontSize); - } - - /** Save this tree in a postscript file */ - public func save(ruleNames : Array, _ fileName : String) - throws; IOException, PrintException - { - Trees.writePS(self, ruleNames, fileName); - } - - /** Save this tree in a postscript file using a particular font name and size */ - public func save(ruleNames : Array, _ fileName : String, - _ fontName : String, _ fontSize : Int) - throws; IOException - { - Trees.writePS(self, ruleNames, fileName, fontName, fontSize); - } - */ +// /** Call this method to view a parse tree in a dialog box visually. */ +// public func inspect(parser : Parser) -> Future { +// var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; +// return inspect(ruleNames); +// } +// +// public func inspect(ruleNames : Array) -> Future { +// var viewer : TreeViewer = TreeViewer(ruleNames, self); +// return viewer.open(); +// } +// +// /** Save this tree in a postscript file */ +// public func save(parser : Parser, _ fileName : String) +// throws; IOException, PrintException +// { +// var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; +// save(ruleNames, fileName); +// } +// +// /** Save this tree in a postscript file using a particular font name and size */ +// public func save(parser : Parser, _ fileName : String, +// _ fontName : String, _ fontSize : Int) +// throws; IOException +// { +// var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; +// save(ruleNames, fileName, fontName, fontSize); +// } +// +// /** Save this tree in a postscript file */ +// public func save(ruleNames : Array, _ fileName : String) +// throws; IOException, PrintException +// { +// Trees.writePS(self, ruleNames, fileName); +// } +// +// /** Save this tree in a postscript file using a particular font name and size */ +// public func save(ruleNames : Array, _ fileName : String, +// _ fontName : String, _ fontSize : Int) +// throws; IOException +// { +// Trees.writePS(self, ruleNames, fileName, fontName, fontSize); +// } + /** Print out a whole tree, not just a node, in LISP format * (root child1 .. childN). Print just a node if this is a leaf. * We have to know the recognizer so we can get rule names. diff --git a/runtime/Swift/Sources/Antlr4/TokenStream.swift b/runtime/Swift/Sources/Antlr4/TokenStream.swift index 92c35e549..7dd72e7ea 100644 --- a/runtime/Swift/Sources/Antlr4/TokenStream.swift +++ b/runtime/Swift/Sources/Antlr4/TokenStream.swift @@ -35,8 +35,8 @@ public protocol TokenStream: IntStream { * of the stream. Unlike {@code seek()}, this method does not adjust * {@code index} to point to a non-ignored symbol.

    * - * @throws IllegalArgumentException if {code index} is less than 0 - * @throws UnsupportedOperationException if the stream does not support + * @throws ANTLRError.illegalArgument if {code index} is less than 0 + * @throws ANTLRError.unsupportedOperation if the stream does not support * retrieving the token at the specified index */ func get(_ index: Int) throws -> Token @@ -66,7 +66,7 @@ public protocol TokenStream: IntStream { * @return The text of all tokens within the specified interval in this * stream. * - * @throws NullPointerException if {@code interval} is {@code null} + * @throws ANTLRError.nullPointer if {@code interval} is {@code null} */ func getText(_ interval: Interval) throws -> String @@ -131,7 +131,7 @@ public protocol TokenStream: IntStream { * @return The text of all tokens lying between the specified {@code start} * and {@code stop} tokens. * - * @throws UnsupportedOperationException if this stream does not support + * @throws ANTLRError.unsupportedOperation if this stream does not support * this method for the specified tokens */ func getText(_ start: Token?, _ stop: Token?) throws -> String diff --git a/runtime/Swift/Sources/Antlr4/atn/ATN.swift b/runtime/Swift/Sources/Antlr4/atn/ATN.swift index e67343fb6..23cd8df2c 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATN.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATN.swift @@ -119,16 +119,14 @@ public class ATN { /// - parameter context: the full parse context /// - returns: The set of potentially valid input symbols which could follow the /// specified state in the specified context. - /// - IllegalArgumentException if the ATN does not contain a state with + /// - throws: _ANTLRError.illegalArgument_ if the ATN does not contain a state with /// number {@code stateNumber} public func getExpectedTokens(_ stateNumber: Int, _ context: RuleContext) throws -> IntervalSet { if stateNumber < 0 || stateNumber >= states.count { throw ANTLRError.illegalArgument(msg: "Invalid state number.") - /// throw IllegalArgumentException("Invalid state number."); } var ctx: RuleContext? = context - //TODO: s may be nil let s: ATNState = states[stateNumber]! var following: IntervalSet = try nextTokens(s) if !following.contains(CommonToken.EPSILON) { diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNSimulator.swift b/runtime/Swift/Sources/Antlr4/atn/ATNSimulator.swift index c8ef2da90..9bb2f06a7 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNSimulator.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNSimulator.swift @@ -20,7 +20,6 @@ open class ATNSimulator { /// Must distinguish between missing edge and edge we know leads nowhere - public static let ERROR: DFAState = { let error = DFAState(ATNConfigSet()) error.stateNumber = Int.max @@ -50,11 +49,6 @@ open class ATNSimulator { /// so it's not worth the complexity.

    internal final var sharedContextCache: PredictionContextCache? - //static; { - //ERROR = DFAState(ATNConfigSet()); - // ERROR.stateNumber = Integer.MAX_VALUE; - //} - public init(_ atn: ATN, _ sharedContextCache: PredictionContextCache) { @@ -71,7 +65,7 @@ open class ATNSimulator { /// performance (but not accuracy) of other parsers which are being used /// concurrently. /// - /// - UnsupportedOperationException if the current instance does not + /// - throws: ANTLRError.unsupportedOperation if the current instance does not /// support clearing the DFA. /// /// - 4.3 @@ -100,50 +94,35 @@ open class ATNSimulator { } /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#deserialize} instead. - ////@Deprecated public static func deserialize(_ data: [Character]) throws -> ATN { return try ATNDeserializer().deserialize(data) } /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#checkCondition(boolean)} instead. - ////@Deprecated public static func checkCondition(_ condition: Bool) throws { try ATNDeserializer().checkCondition(condition) } /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#checkCondition(boolean, String)} instead. - ////@Deprecated public static func checkCondition(_ condition: Bool, _ message: String) throws { try ATNDeserializer().checkCondition(condition, message) } /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#toInt} instead. - ////@Deprecated public func toInt(_ c: Character) -> Int { return toInt(c) } /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#toInt32} instead. - ////@Deprecated public func toInt32(_ data: [Character], _ offset: Int) -> Int { return toInt32(data, offset) } /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#toLong} instead. - ////@Deprecated public func toLong(_ data: [Character], _ offset: Int) -> Int64 { return toLong(data, offset) } - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#toUUID} instead. - ////@Deprecated - //public class func toUUID(data : [Character], _ offset : Int) -> NSUUID { - //return ATNDeserializer.toUUID(data, offset); - //} - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#edgeFactory} instead. - ////@Deprecated - public static func edgeFactory(_ atn: ATN, _ type: Int, _ src: Int, _ trg: Int, _ arg1: Int, _ arg2: Int, _ arg3: Int, @@ -152,7 +131,6 @@ open class ATNSimulator { } /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#stateFactory} instead. - ////@Deprecated public static func stateFactory(_ type: Int, _ ruleIndex: Int) throws -> ATNState { return try ATNDeserializer().stateFactory(type, ruleIndex)! } diff --git a/runtime/Swift/Sources/Antlr4/atn/ParserATNSimulator.swift b/runtime/Swift/Sources/Antlr4/atn/ParserATNSimulator.swift index edc3c38fc..81f55fc35 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ParserATNSimulator.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ParserATNSimulator.swift @@ -1924,16 +1924,6 @@ open class ParserATNSimulator: ATNSimulator { } internal static func getUniqueAlt(_ configs: ATNConfigSet) -> Int { - // var alt: Int = ATN.INVALID_ALT_NUMBER - // for c: ATNConfig in configs.configs { - // if alt == ATN.INVALID_ALT_NUMBER { - // alt = c.alt // found first alt - // } else { - // if c.alt != alt { - // return ATN.INVALID_ALT_NUMBER - // } - // } - // } let alt = configs.getUniqueAlt() return alt } diff --git a/runtime/Swift/Sources/Antlr4/dfa/DFA.swift b/runtime/Swift/Sources/Antlr4/dfa/DFA.swift index a966fe25b..0bcd2e970 100644 --- a/runtime/Swift/Sources/Antlr4/dfa/DFA.swift +++ b/runtime/Swift/Sources/Antlr4/dfa/DFA.swift @@ -66,9 +66,8 @@ public class DFA: CustomStringConvertible { /// - returns: The start state corresponding to the specified precedence, or /// {@code null} if no start state exists for the specified precedence. /// - /// - IllegalStateException if this is not a precedence DFA. + /// - throws: _ANTLRError.illegalState_ if this is not a precedence DFA. /// - seealso: #isPrecedenceDfa() - ////@SuppressWarnings("null") public final func getPrecedenceStartState(_ precedence: Int) throws -> DFAState? { if !isPrecedenceDfa() { throw ANTLRError.illegalState(msg: "Only precedence DFAs may contain a precedence start state.") @@ -91,9 +90,8 @@ public class DFA: CustomStringConvertible { /// - parameter startState: The start state corresponding to the specified /// precedence. /// - /// - IllegalStateException if this is not a precedence DFA. + /// - throws: _ANTLRError.illegalState_ if this is not a precedence DFA. /// - seealso: #isPrecedenceDfa() - ////@SuppressWarnings({"SynchronizeOnNonFinalField", "null"}) public final func setPrecedenceStartState(_ precedence: Int, _ startState: DFAState) throws { if !isPrecedenceDfa() { throw ANTLRError.illegalState(msg: "Only precedence DFAs may contain a precedence start state.") @@ -121,11 +119,10 @@ public class DFA: CustomStringConvertible { /// - parameter precedenceDfa: {@code true} if this is a precedence DFA; otherwise, /// {@code false} /// - /// - UnsupportedOperationException if {@code precedenceDfa} does not + /// - throws: ANTLRError.unsupportedOperation if {@code precedenceDfa} does not /// match the value of {@link #isPrecedenceDfa} for the current DFA. /// - /// - This method no longer performs any action. - ////@Deprecated + /// - note: This method no longer performs any action. public final func setPrecedenceDfa(_ precedenceDfa: Bool) throws { if precedenceDfa != isPrecedenceDfa() { throw ANTLRError.unsupportedOperation(msg: "The precedenceDfa field cannot change after a DFA is constructed.") @@ -134,7 +131,6 @@ public class DFA: CustomStringConvertible { } /// Return a list of all states in this DFA, ordered by state number. - public func getStates() -> Array { var result: Array = Array(states.keys) @@ -155,7 +151,6 @@ public class DFA: CustomStringConvertible { } /// - Use {@link #toString(org.antlr.v4.runtime.Vocabulary)} instead. - ////@Deprecated public func toString(_ tokenNames: [String?]?) -> String { if s0 == nil { return "" diff --git a/runtime/Swift/Sources/Antlr4/misc/BitSet.swift b/runtime/Swift/Sources/Antlr4/misc/BitSet.swift index fcc41e96f..d532a5fb7 100644 --- a/runtime/Swift/Sources/Antlr4/misc/BitSet.swift +++ b/runtime/Swift/Sources/Antlr4/misc/BitSet.swift @@ -31,15 +31,15 @@ import Foundation /// ///

    Unless otherwise noted, passing a null parameter to any of the /// methods in a {@code BitSet} will result in a -/// {@code NullPointerException}. +/// {@code ANTLRError.nullPointer}. /// ///

    A {@code BitSet} is not safe for multithreaded use without /// external synchronization. /// -/// - Arthur van Hoff -/// - Michael McCloskey -/// - Martin Buchholz -/// - JDK1.0 +/// - note: Arthur van Hoff +/// - note: Michael McCloskey +/// - note: Martin Buchholz +/// - note: JDK1.0 public class BitSet: Hashable, CustomStringConvertible { /// BitSets are packed into arrays of "words." Currently a word is @@ -119,7 +119,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// {@code nbits-1}. All bits are initially {@code false}. /// /// - parameter nbits: the initial size of the bit set - /// - NegativeArraySizeException if the specified initial size + /// - throws: _ANTLRError.negativeArraySize_ if the specified initial size /// is negative public init(_ nbits: Int) throws { // nbits can't be negative; size 0 is OK @@ -158,7 +158,6 @@ public class BitSet: Hashable, CustomStringConvertible { /// /// - returns: a long array containing a little-endian representation /// of all the bits in this bit set - /// - 1.7 public func toLongArray() -> [Int64] { return copyOf(words, wordsInUse) } @@ -214,8 +213,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// current value. /// /// - parameter bitIndex: the index of the bit to flip - /// - IndexOutOfBoundsException if the specified index is negative - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative public func flip(_ bitIndex: Int) throws { if bitIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "bitIndex < 0: \(bitIndex)") @@ -237,10 +235,9 @@ public class BitSet: Hashable, CustomStringConvertible { /// /// - parameter fromIndex: index of the first bit to flip /// - parameter toIndex: index after the last bit to flip - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, + /// - throws: _ANTLRError.IndexOutOfBounds_ if {@code fromIndex} is negative, /// or {@code toIndex} is negative, or {@code fromIndex} is /// larger than {@code toIndex} - /// - 1.4 public func flip(_ fromIndex: Int, _ toIndex: Int) throws { try BitSet.checkRange(fromIndex, toIndex) @@ -280,8 +277,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// Sets the bit at the specified index to {@code true}. /// /// - parameter bitIndex: a bit index - /// - IndexOutOfBoundsException if the specified index is negative - /// - JDK1.0 + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative public func set(_ bitIndex: Int) throws { if bitIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "bitIndex < 0: \(bitIndex)") @@ -300,8 +296,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// /// - parameter bitIndex: a bit index /// - parameter value: a boolean value to set - /// - IndexOutOfBoundsException if the specified index is negative - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative public func set(_ bitIndex: Int, _ value: Bool) throws { if value { try set(bitIndex) @@ -315,10 +310,9 @@ public class BitSet: Hashable, CustomStringConvertible { /// /// - parameter fromIndex: index of the first bit to be set /// - parameter toIndex: index after the last bit to be set - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, + /// - throws: _ANTLRError.IndexOutOfBounds_ if {@code fromIndex} is negative, /// or {@code toIndex} is negative, or {@code fromIndex} is /// larger than {@code toIndex} - /// - 1.4 public func set(_ fromIndex: Int, _ toIndex: Int) throws { try BitSet.checkRange(fromIndex, toIndex) @@ -361,10 +355,9 @@ public class BitSet: Hashable, CustomStringConvertible { /// - parameter fromIndex: index of the first bit to be set /// - parameter toIndex: index after the last bit to be set /// - parameter value: value to set the selected bits to - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, + /// - throws: _ANTLRError.IndexOutOfBounds_ if {@code fromIndex} is negative, /// or {@code toIndex} is negative, or {@code fromIndex} is /// larger than {@code toIndex} - /// - 1.4 public func set(_ fromIndex: Int, _ toIndex: Int, _ value: Bool) throws { if value { try set(fromIndex, toIndex) @@ -376,7 +369,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// Sets the bit specified by the index to {@code false}. /// /// - parameter bitIndex: the index of the bit to be cleared - /// - IndexOutOfBoundsException if the specified index is negative + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative /// - JDK1.0 public func clear(_ bitIndex: Int) throws { if bitIndex < 0 { @@ -398,10 +391,9 @@ public class BitSet: Hashable, CustomStringConvertible { /// /// - parameter fromIndex: index of the first bit to be cleared /// - parameter toIndex: index after the last bit to be cleared - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, + /// - throws: _ANTLRError.IndexOutOfBounds_ if {@code fromIndex} is negative, /// or {@code toIndex} is negative, or {@code fromIndex} is /// larger than {@code toIndex} - /// - 1.4 public func clear(_ fromIndex: Int, _ toIndex: Int) throws { var toIndex = toIndex try BitSet.checkRange(fromIndex, toIndex) @@ -447,8 +439,6 @@ public class BitSet: Hashable, CustomStringConvertible { } /// Sets all of the bits in this BitSet to {@code false}. - /// - /// - 1.4 public func clear() { while wordsInUse > 0 { wordsInUse -= 1 @@ -463,7 +453,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// /// - parameter bitIndex: the bit index /// - returns: the value of the bit with the specified index - /// - IndexOutOfBoundsException if the specified index is negative + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative public func get(_ bitIndex: Int) throws -> Bool { if bitIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "bitIndex < 0: \(bitIndex)") @@ -483,10 +473,9 @@ public class BitSet: Hashable, CustomStringConvertible { /// - parameter fromIndex: index of the first bit to include /// - parameter toIndex: index after the last bit to include /// - returns: a new {@code BitSet} from a range of this {@code BitSet} - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, + /// - throws: _ANTLRError.IndexOutOfBounds_ if {@code fromIndex} is negative, /// or {@code toIndex} is negative, or {@code fromIndex} is /// larger than {@code toIndex} - /// - 1.4 public func get(_ fromIndex: Int, _ toIndex: Int) throws -> BitSet { var toIndex = toIndex try BitSet.checkRange(fromIndex, toIndex) @@ -562,8 +551,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// - parameter fromIndex: the index to start checking from (inclusive) /// - returns: the index of the next set bit, or {@code -1} if there /// is no such bit - /// - IndexOutOfBoundsException if the specified index is negative - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative public func nextSetBit(_ fromIndex: Int) throws -> Int { if fromIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "fromIndex < 0: \(fromIndex)") @@ -634,8 +622,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// /// - parameter fromIndex: the index to start checking from (inclusive) /// - returns: the index of the next clear bit - /// - IndexOutOfBoundsException if the specified index is negative - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds if the specified index is negative public func nextClearBit(_ fromIndex: Int) throws -> Int { // Neither spec nor implementation handle bitsets of maximal length. // See 4816253. @@ -681,9 +668,9 @@ public class BitSet: Hashable, CustomStringConvertible { /// - parameter fromIndex: the index to start checking from (inclusive) /// - returns: the index of the previous set bit, or {@code -1} if there /// is no such bit - /// - IndexOutOfBoundsException if the specified index is less + /// - throws: _ANTLRError.IndexOutOfBounds if the specified index is less /// than {@code -1} - /// - 1.7 + /// - note: 1.7 public func previousSetBit(_ fromIndex: Int) throws -> Int { if fromIndex < 0 { if fromIndex == -1 { @@ -721,9 +708,9 @@ public class BitSet: Hashable, CustomStringConvertible { /// - parameter fromIndex: the index to start checking from (inclusive) /// - returns: the index of the previous clear bit, or {@code -1} if there /// is no such bit - /// - IndexOutOfBoundsException if the specified index is less + /// - throws: _ANTLRError.IndexOutOfBounds if the specified index is less /// than {@code -1} - /// - 1.7 + /// - note: 1.7 public func previousClearBit(_ fromIndex: Int) throws -> Int { if fromIndex < 0 { if fromIndex == -1 { @@ -791,7 +778,6 @@ public class BitSet: Hashable, CustomStringConvertible { /// if the {@code BitSet} contains no set bits. /// /// - returns: the logical size of this {@code BitSet} - /// - 1.2 public func length() -> Int { if wordsInUse == 0 { return 0 @@ -805,7 +791,6 @@ public class BitSet: Hashable, CustomStringConvertible { /// to {@code true}. /// /// - returns: boolean indicating whether this {@code BitSet} is empty - /// - 1.4 public func isEmpty() -> Bool { return wordsInUse == 0 } @@ -816,7 +801,6 @@ public class BitSet: Hashable, CustomStringConvertible { /// - parameter set: {@code BitSet} to intersect with /// - returns: boolean indicating whether this {@code BitSet} intersects /// the specified {@code BitSet} - /// - 1.4 public func intersects(_ set: BitSet) -> Bool { var i: Int = min(wordsInUse, set.wordsInUse) - 1 while i >= 0 { @@ -831,7 +815,6 @@ public class BitSet: Hashable, CustomStringConvertible { /// Returns the number of bits set to {@code true} in this {@code BitSet}. /// /// - returns: the number of bits set to {@code true} in this {@code BitSet} - /// - 1.4 public func cardinality() -> Int { var sum: Int = 0 for i in 0..= 0; i = try nextSetBit(i + 1) { -// let endOfRun: Int = try nextClearBit(i) -// repeat { -// b.append(", ").append(i) -// } while ++i < endOfRun -// } } } catch { print("BitSet description error") diff --git a/runtime/Swift/Sources/Antlr4/misc/IntSet.swift b/runtime/Swift/Sources/Antlr4/misc/IntSet.swift index 157ab991c..c88e4dbe6 100644 --- a/runtime/Swift/Sources/Antlr4/misc/IntSet.swift +++ b/runtime/Swift/Sources/Antlr4/misc/IntSet.swift @@ -12,7 +12,7 @@ public protocol IntSet { /// /// - parameter el: the value to add /// - /// - IllegalStateException if the current set is read-only + /// - throws: _ANTLRError.illegalState_ if the current set is read-only func add(_ el: Int) throws /// Modify the current {@link org.antlr.v4.runtime.misc.IntSet} object to contain all elements that are @@ -22,7 +22,7 @@ public protocol IntSet { /// treated as though it were an empty set. /// - returns: {@code this} (to support chained calls) /// - /// - IllegalStateException if the current set is read-only + /// - throws: _ANTLRError.illegalState_ if the current set is read-only func addAll(_ set: IntSet?) throws -> IntSet @@ -122,7 +122,7 @@ public protocol IntSet { /// /// - parameter el: the value to remove /// - /// - IllegalStateException if the current set is read-only + /// - throws: _ANTLRError.illegalState_ if the current set is read-only func remove(_ el: Int) throws /// Return a list containing the elements represented by the current set. The diff --git a/runtime/Swift/Sources/Antlr4/tree/ErrorNode.swift b/runtime/Swift/Sources/Antlr4/tree/ErrorNode.swift index 9b896fd74..b751f2366 100644 --- a/runtime/Swift/Sources/Antlr4/tree/ErrorNode.swift +++ b/runtime/Swift/Sources/Antlr4/tree/ErrorNode.swift @@ -10,8 +10,6 @@ * and deletion as well as during "consume until error recovery set" * upon no viable alternative exceptions. */ -//public class ErrorNodeImpl : TerminalNodeImpl,ErrorNode{ - public class ErrorNode: TerminalNodeImpl { public override init(_ token: Token) { super.init(token) diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreeMatch.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreeMatch.swift index 3a93ab155..6e70b7982 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreeMatch.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreeMatch.swift @@ -40,9 +40,9 @@ public class ParseTreeMatch: CustomStringConvertible { * @param mismatchedNode The first node which failed to match the tree * pattern during the matching process. * - * @exception IllegalArgumentException if {@code tree} is {@code null} - * @exception IllegalArgumentException if {@code pattern} is {@code null} - * @exception IllegalArgumentException if {@code labels} is {@code null} + * @exception ANTLRError.ilegalArgument if {@code tree} is {@code null} + * @exception ANTLRError.ilegalArgument if {@code pattern} is {@code null} + * @exception ANTLRError.ilegalArgument if {@code labels} is {@code null} */ public init(_ tree: ParseTree, _ pattern: ParseTreePattern, _ labels: MultiMap, _ mismatchedNode: ParseTree?) { diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePatternMatcher.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePatternMatcher.swift index cb42b5c52..85d0429e2 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePatternMatcher.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePatternMatcher.swift @@ -64,16 +64,6 @@ */ public class ParseTreePatternMatcher { -// public class CannotInvokeStartRule : RuntimeException { -// public convenience init(_ e : Throwable) { -// super.init(e); -// } -// } -// -// // Fixes https://github.com/antlr/antlr4/issues/413 -// // "Tree pattern compilation doesn't check for a complete parse" -// public class StartRuleDoesNotConsumeFullPattern : RuntimeException { -// } /** * This is the backing field for {@link #getLexer()}. @@ -88,7 +78,6 @@ public class ParseTreePatternMatcher { internal var start: String = "<" internal var stop: String = ">" internal var escape: String = "\\" - // e.g., \< and \> must escape BOTH! /** * Constructs a {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher} or from a {@link org.antlr.v4.runtime.Lexer} and @@ -109,22 +98,15 @@ public class ParseTreePatternMatcher { * @param stop The stop delimiter. * @param escapeLeft The escape sequence to use for escaping a start or stop delimiter. * - * @exception IllegalArgumentException if {@code start} is {@code null} or empty. - * @exception IllegalArgumentException if {@code stop} is {@code null} or empty. + * @exception ANTLRError.ilegalArgument if {@code start} is {@code null} or empty. + * @exception ANTLRError.ilegalArgument if {@code stop} is {@code null} or empty. */ public func setDelimiters(_ start: String, _ stop: String, _ escapeLeft: String) throws { - //start == nil || if start.isEmpty { throw ANTLRError.illegalArgument(msg: "start cannot be null or empty") - // RuntimeException("start cannot be null or empty") - //throwException() /* throw IllegalArgumentException("start cannot be null or empty"); */ } - //stop == nil || if stop.isEmpty { throw ANTLRError.illegalArgument(msg: "stop cannot be null or empty") - //RuntimeException("stop cannot be null or empty") - - //throwException() /* throw IllegalArgumentException("stop cannot be null or empty"); */ } self.start = start @@ -132,14 +114,17 @@ public class ParseTreePatternMatcher { self.escape = escapeLeft } - /** Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}? */ + /** + * Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}? + */ public func matches(_ tree: ParseTree, _ pattern: String, _ patternRuleIndex: Int) throws -> Bool { let p: ParseTreePattern = try compile(pattern, patternRuleIndex) return try matches(tree, p) } - /** Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a - * compiled pattern instead of a string representation of a tree pattern. + /** + * Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a + * compiled pattern instead of a string representation of a tree pattern. */ public func matches(_ tree: ParseTree, _ pattern: ParseTreePattern) throws -> Bool { let labels: MultiMap = MultiMap() @@ -163,7 +148,6 @@ public class ParseTreePatternMatcher { * node at which the match failed. Pass in a compiled pattern instead of a * string representation of a tree pattern. */ - public func match(_ tree: ParseTree, _ pattern: ParseTreePattern) throws -> ParseTreeMatch { let labels: MultiMap = MultiMap() let mismatchedNode: ParseTree? = try matchImpl(tree, pattern.getPatternTree(), labels) @@ -185,29 +169,13 @@ public class ParseTreePatternMatcher { parser.getATNWithBypassAlts(), tokens) - var tree: ParseTree //= nil; - //TODO: exception handler - //try { + var tree: ParseTree parserInterp.setErrorHandler(BailErrorStrategy()) tree = try parserInterp.parse(patternRuleIndex) -// print("pattern tree = "+tree.toStringTree(parserInterp)); -// } -// catch (ParseCancellationException e) { -// throwException() /* throw e.getCause() as RecognitionException; */ -// } -// catch (RecognitionException re) { -// throwException() /* throw re; */ -// } -// catch (Exception e) { -// throwException() /* throw CannotInvokeStartRule(e); */ -// } // Make sure tree pattern compilation checks for a complete parse if try tokens.LA(1) != CommonToken.EOF { throw ANTLRError.illegalState(msg: "Tree pattern compilation doesn't check for a complete parse") - // RuntimeException("Tree pattern compilation doesn't check for a complete parse") - //throw ANTLRException.StartRuleDoesNotConsumeFullPattern - //throwException() /* throw StartRuleDoesNotConsumeFullPattern(); */ } return ParseTreePattern(self, pattern, patternRuleIndex, tree) @@ -217,7 +185,6 @@ public class ParseTreePatternMatcher { * Used to convert the tree pattern string into a series of tokens. The * input stream is reset. */ - public func getLexer() -> Lexer { return lexer } @@ -226,7 +193,6 @@ public class ParseTreePatternMatcher { * Used to collect to the grammar file name, token names, rule names for * used to parse the pattern into a parse tree. */ - public func getParser() -> Parser { return parser } @@ -242,7 +208,6 @@ public class ParseTreePatternMatcher { * was successful. The specific node returned depends on the matching * algorithm used by the implementation, and may be overridden. */ - internal func matchImpl(_ tree: ParseTree, _ patternTree: ParseTree, _ labels: MultiMap) throws -> ParseTree? { @@ -391,12 +356,13 @@ public class ParseTreePatternMatcher { return tokens } - /** Split {@code = ;} into 4 chunks for tokenizing by {@link #tokenize}. */ + /** + * Split {@code = ;} into 4 chunks for tokenizing by {@link #tokenize}. + */ public func split(_ pattern: String) throws -> Array { var p: Int = 0 let n: Int = pattern.length var chunks: Array = Array() - //var buf : StringBuilder = StringBuilder(); // find all start and stop indexes first, then collect var starts: Array = Array() var stops: Array = Array() diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/RuleTagToken.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/RuleTagToken.swift index 45f45caea..c49062a7e 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/RuleTagToken.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/RuleTagToken.swift @@ -35,7 +35,7 @@ public class RuleTagToken: Token, CustomStringConvertible { * @param ruleName The name of the parser rule this rule tag matches. * @param bypassTokenType The bypass token type assigned to the parser rule. * - * @exception IllegalArgumentException if {@code ruleName} is {@code null} + * @exception ANTLRError.illegalArgument if {@code ruleName} is {@code null} * or empty. */ public convenience init(_ ruleName: String, _ bypassTokenType: Int) { @@ -51,7 +51,7 @@ public class RuleTagToken: Token, CustomStringConvertible { * @param label The label associated with the rule tag, or {@code null} if * the rule tag is unlabeled. * - * @exception IllegalArgumentException if {@code ruleName} is {@code null} + * @exception ANTLRError.illegalArgument if {@code ruleName} is {@code null} * or empty. */ public init(_ ruleName: String, _ bypassTokenType: Int, _ label: String?) { @@ -67,7 +67,6 @@ public class RuleTagToken: Token, CustomStringConvertible { * * @return The name of the parser rule associated with this rule tag. */ - public final func getRuleName() -> String { return ruleName } @@ -78,28 +77,21 @@ public class RuleTagToken: Token, CustomStringConvertible { * @return The name of the label associated with the rule tag, or * {@code null} if this is an unlabeled rule tag. */ - public final func getLabel() -> String? { return label } /** - * {@inheritDoc} - * *

    Rule tag tokens are always placed on the {@link #DEFAULT_CHANNEL}.

    */ - public func getChannel() -> Int { return RuleTagToken.DEFAULT_CHANNEL } /** - * {@inheritDoc} - * *

    This method returns the rule tag formatted with {@code <} and {@code >} * delimiters.

    */ - public func getText() -> String? { if label != nil { return "<" + label! + ":" + ruleName + ">" @@ -109,32 +101,23 @@ public class RuleTagToken: Token, CustomStringConvertible { } /** - * {@inheritDoc} - * *

    Rule tag tokens have types assigned according to the rule bypass * transitions created during ATN deserialization.

    */ - public func getType() -> Int { return bypassTokenType } /** - * {@inheritDoc} - * *

    The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns 0.

    */ - public func getLine() -> Int { return 0 } /** - * {@inheritDoc} - * *

    The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns -1.

    */ - public func getCharPositionInLine() -> Int { return -1 } @@ -144,59 +127,42 @@ public class RuleTagToken: Token, CustomStringConvertible { * *

    The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns -1.

    */ - public func getTokenIndex() -> Int { return -1 } /** - * {@inheritDoc} - * *

    The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns -1.

    */ - public func getStartIndex() -> Int { return -1 } /** - * {@inheritDoc} - * *

    The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns -1.

    */ - public func getStopIndex() -> Int { return -1 } /** - * {@inheritDoc} - * *

    The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns {@code null}.

    */ - public func getTokenSource() -> TokenSource? { return nil } /** - * {@inheritDoc} - * *

    The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns {@code null}.

    */ - public func getInputStream() -> CharStream? { return nil } /** - * {@inheritDoc} - * *

    The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} returns a string of the form * {@code ruleName:bypassTokenType}.

    */ - - public var description: String { return ruleName + ":" + String(bypassTokenType) } diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/TagChunk.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/TagChunk.swift index 0538f6dc6..11449b1bd 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/TagChunk.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/TagChunk.swift @@ -36,7 +36,7 @@ public class TagChunk: Chunk, CustomStringConvertible { * @param tag The tag, which should be the name of a parser rule or token * type. * - * @exception IllegalArgumentException if {@code tag} is {@code null} or + * @exception ANTLRError.illegalArgument if {@code tag} is {@code null} or * empty. */ public convenience init(_ tag: String) throws { @@ -52,7 +52,7 @@ public class TagChunk: Chunk, CustomStringConvertible { * @param tag The tag, which should be the name of a parser rule or token * type. * - * @exception IllegalArgumentException if {@code tag} is {@code null} or + * @exception ANTLRError.illegalArgument if {@code tag} is {@code null} or * empty. */ public init(_ label: String?, _ tag: String) throws { @@ -70,7 +70,6 @@ public class TagChunk: Chunk, CustomStringConvertible { * * @return The tag for the chunk. */ - public final func getTag() -> String { return tag } @@ -81,7 +80,6 @@ public class TagChunk: Chunk, CustomStringConvertible { * @return The label assigned to this chunk, or {@code null} if no label is * assigned to the chunk. */ - public final func getLabel() -> String? { return label } @@ -91,8 +89,6 @@ public class TagChunk: Chunk, CustomStringConvertible { * are returned in the form {@code label:tag}, and unlabeled tags are * returned as just the tag name. */ - - public var description: String { if label != nil { return label! + ":" + tag diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/TextChunk.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/TextChunk.swift index 6d4ed317e..c0b71d888 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/TextChunk.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/TextChunk.swift @@ -20,7 +20,7 @@ public class TextChunk: Chunk, CustomStringConvertible { * Constructs a new instance of {@link org.antlr.v4.runtime.tree.pattern.TextChunk} with the specified text. * * @param text The text of this chunk. - * @exception IllegalArgumentException if {@code text} is {@code null}. + * @exception ANTLRError.illegalArgument if {@code text} is {@code null}. */ public init(_ text: String) { self.text = text From 82b2480fe5b6617db0292f4ddbea24b589673f4e Mon Sep 17 00:00:00 2001 From: Hanzhou Shi Date: Sat, 29 Jul 2017 22:53:44 -0700 Subject: [PATCH 037/102] Tweak HTML comments. --- .../Sources/Antlr4/ANTLRErrorListener.swift | 22 ++--- .../Sources/Antlr4/ANTLRErrorStrategy.swift | 22 ++--- .../Sources/Antlr4/BailErrorStrategy.swift | 24 +++--- .../Sources/Antlr4/BufferedTokenStream.swift | 11 ++- .../Sources/Antlr4/ConsoleErrorListener.swift | 12 +-- .../Sources/Antlr4/DefaultErrorStrategy.swift | 32 ++++--- .../Antlr4/DiagnosticErrorListener.swift | 16 ++-- runtime/Swift/Sources/Antlr4/IntStream.swift | 62 +++++-------- runtime/Swift/Sources/Antlr4/Parser.swift | 26 +++--- .../Sources/Antlr4/RuntimeMetaData.swift | 60 ++++++------- .../Swift/Sources/Antlr4/atn/ATNConfig.swift | 2 +- .../Swift/Sources/Antlr4/atn/ATNState.swift | 44 +++++----- .../Sources/Antlr4/atn/LL1Analyzer.swift | 4 +- .../Antlr4/atn/LexerActionExecutor.swift | 2 +- .../Antlr4/atn/ParserATNSimulator.swift | 86 +++++++------------ .../Antlr4/atn/PredictionContext.swift | 23 ----- .../Sources/Antlr4/atn/PredictionMode.swift | 64 +++++++------- .../Sources/Antlr4/atn/SemanticContext.swift | 18 ++-- .../Swift/Sources/Antlr4/misc/BitSet.swift | 13 ++- .../Swift/Sources/Antlr4/misc/IntSet.swift | 24 +----- .../Antlr4/tree/pattern/ParseTreeMatch.swift | 21 ++--- .../pattern/ParseTreePatternMatcher.swift | 2 +- .../Antlr4/tree/pattern/TagChunk.swift | 11 +-- 23 files changed, 239 insertions(+), 362 deletions(-) diff --git a/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift b/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift index 930665825..ad8ff9ae3 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift @@ -46,28 +46,28 @@ public protocol ANTLRErrorListener: class { /// This method is called by the parser when a full-context prediction /// results in an ambiguity. /// - ///

    Each full-context prediction which does not result in a syntax error + /// Each full-context prediction which does not result in a syntax error /// will call either {@link #reportContextSensitivity} or - /// {@link #reportAmbiguity}.

    + /// {@link #reportAmbiguity}. /// - ///

    When {@code ambigAlts} is not null, it contains the set of potentially + /// When {@code ambigAlts} is not null, it contains the set of potentially /// viable alternatives identified by the prediction algorithm. When /// {@code ambigAlts} is null, use {@link org.antlr.v4.runtime.atn.ATNConfigSet#getAlts} to obtain the - /// represented alternatives from the {@code configs} argument.

    + /// represented alternatives from the {@code configs} argument. /// - ///

    When {@code exact} is {@code true}, all of the potentially + /// When {@code exact} is {@code true}, __all__ of the potentially /// viable alternatives are truly viable, i.e. this is reporting an exact - /// ambiguity. When {@code exact} is {@code false}, at least two of + /// ambiguity. When {@code exact} is {@code false}, __at least two__ of /// the potentially viable alternatives are viable for the current input, but /// the prediction algorithm terminated as soon as it determined that at - /// least the minimum potentially viable alternative is truly - /// viable.

    + /// least the __minimum__ potentially viable alternative is truly + /// viable. /// - ///

    When the {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction + /// When the {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction /// mode is used, the parser is required to identify exact ambiguities so - /// {@code exact} will always be {@code true}.

    + /// {@code exact} will always be {@code true}. /// - ///

    This method is not used by lexers.

    + /// This method is not used by lexers. /// /// - parameter recognizer: the parser instance /// - parameter dfa: the DFA for the current decision diff --git a/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift index 3ee9ce972..418228c7f 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift @@ -1,22 +1,24 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// The interface for defining strategies to deal with syntax errors encountered -/// during a parse by ANTLR-generated parsers. We distinguish between three +/// + + +/// +/// The interface for defining strategies to deal with syntax errors +/// encountered during a parse by ANTLR-generated parsers. We distinguish between three /// different kinds of errors: /// -///
      -///
    • The parser could not figure out which path to take in the ATN (none of -/// the available alternatives could possibly match)
    • -///
    • The current input does not match what we were looking for
    • -///
    • A predicate evaluated to false
    • -///
    +/// * The parser could not figure out which path to take in the ATN (none of +/// the available alternatives could possibly match) +/// * The current input does not match what we were looking for +/// * A predicate evaluated to false /// /// Implementations of this interface report syntax errors by calling /// {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}. /// ///

    TODO: what to do about lexers

    - public protocol ANTLRErrorStrategy { /// Reset the error handler state for the specified {@code recognizer}. /// - parameter recognizer: the parser instance @@ -29,7 +31,7 @@ public protocol ANTLRErrorStrategy { /// successful result of the match. /// ///

    This method handles the consumption of any tokens - the caller should - /// not call {@link org.antlr.v4.runtime.Parser#consume} after a successful recovery.

    + /// __not__ call {@link org.antlr.v4.runtime.Parser#consume} after a successful recovery.

    /// ///

    Note that the calling code will not report an error if this method /// returns successfully. The error strategy implementation is responsible diff --git a/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift index eca1de705..996b8429a 100644 --- a/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift +++ b/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift @@ -3,42 +3,38 @@ /// can be found in the LICENSE.txt file in the project root. - +/// /// This implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} responds to syntax errors /// by immediately canceling the parse operation with a /// {@link org.antlr.v4.runtime.misc.ParseCancellationException}. The implementation ensures that the /// {@link org.antlr.v4.runtime.ParserRuleContext#exception} field is set for all parse tree nodes /// that were not completed prior to encountering the error. /// -///

    -/// This error strategy is useful in the following scenarios.

    +/// This error strategy is useful in the following scenarios. /// -///
      -///
    • Two-stage parsing: This error strategy allows the first +/// * __Two-stage parsing:__ This error strategy allows the first /// stage of two-stage parsing to immediately terminate if an error is /// encountered, and immediately fall back to the second stage. In addition to /// avoiding wasted work by attempting to recover from errors here, the empty /// implementation of {@link org.antlr.v4.runtime.BailErrorStrategy#sync} improves the performance of -/// the first stage.
    • -///
    • Silent validation: When syntax errors are not being +/// the first stage. +/// +/// * __Silent validation:__ When syntax errors are not being /// reported or logged, and the parse result is simply ignored if errors occur, /// the {@link org.antlr.v4.runtime.BailErrorStrategy} avoids wasting work on recovering from errors -/// when the result will be ignored either way.
    • -///
    +/// when the result will be ignored either way. /// -///

    -/// {@code myparser.setErrorHandler(new BailErrorStrategy());}

    +/// {@code myparser.setErrorHandler(new BailErrorStrategy());} /// /// - seealso: org.antlr.v4.runtime.Parser#setErrorHandler(org.antlr.v4.runtime.ANTLRErrorStrategy) - +/// public class BailErrorStrategy: DefaultErrorStrategy { public override init(){} /// Instead of recovering from exception {@code e}, re-throw it wrapped /// in a {@link org.antlr.v4.runtime.misc.ParseCancellationException} so it is not caught by the /// rule function catches. Use {@link Exception#getCause()} to get the /// original {@link org.antlr.v4.runtime.RecognitionException}. - override - public func recover(_ recognizer: Parser, _ e: AnyObject) throws { + override public func recover(_ recognizer: Parser, _ e: AnyObject) throws { var context: ParserRuleContext? = recognizer.getContext() while let contextWrap = context{ contextWrap.exception = e diff --git a/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift b/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift index abc010c72..57628212c 100644 --- a/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift +++ b/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift @@ -40,13 +40,12 @@ public class BufferedTokenStream: TokenStream { /// {@link #tokenSource} and added to {@link #tokens}. This field improves /// performance for the following cases: /// - ///