2 corrections (C++ runtime and interpreter data writer)

- A wrong check for EOF has been corrected in the UnbufferedTokenStream (now using the correct data type for the cast to avoid warnings).
- The interpreter data write function no longer implicitly writes out imported grammars. Grammars are merged and hence contain everything from imported grammars already. If interpreter data for an imported grammar is required pass that grammar explicitly to the ANTLR tool.
This commit is contained in:
Mike Lischke 2017-04-23 13:05:27 +02:00
parent 3bffb070b2
commit 126eec7092
3 changed files with 52 additions and 56 deletions

View File

@ -52,7 +52,7 @@ void UnbufferedCharStream::sync(size_t want) {
size_t UnbufferedCharStream::fill(size_t n) {
for (size_t i = 0; i < n; i++) {
if (_data.size() > 0 && _data.back() == 0xFFFF) {
if (_data.size() > 0 && _data.back() == static_cast<storage_type>(EOF)) {
return i;
}
@ -101,7 +101,7 @@ size_t UnbufferedCharStream::LA(ssize_t i) {
return EOF;
}
if (_data[(size_t)index] == 0xFFFF) {
if (_data[(size_t)index] == static_cast<storage_type>(EOF)) {
return EOF;
}

View File

@ -54,8 +54,10 @@ namespace antlr4 {
// UTF-32 encoded.
#if defined(_MSC_VER) && _MSC_VER == 1900
i32string _data; // Custom type for VS 2015.
typedef __int32 storage_type;
#else
std::u32string _data;
typedef char32_t storage_type;
#endif
/// <summary>

View File

@ -698,66 +698,60 @@ public class Tool {
}
private void generateInterpreterData(Grammar g) {
List<Grammar> grammars = new ArrayList<Grammar>();
grammars.add(g);
List<Grammar> imported = g.getAllImportedGrammars();
if ( imported!=null ) grammars.addAll(imported);
for (Grammar ig : grammars) {
StringBuilder content = new StringBuilder();
StringBuilder content = new StringBuilder();
content.append("token literal names:\n");
String[] names = g.getTokenLiteralNames();
for (String name : names) {
content.append(name + "\n");
}
content.append("\n");
content.append("token symbolic names:\n");
names = g.getTokenSymbolicNames();
for (String name : names) {
content.append(name + "\n");
}
content.append("\n");
if ( g.isLexer() ) {
content.append("channel names:\n");
content.append("DEFAULT_TOKEN_CHANNEL\n");
content.append("HIDDEN\n");
for (String channel : g.channelValueToNameList) {
content.append(channel + "\n");
}
content.append("\n");
content.append("token literal names:\n");
String[] names = ig.getTokenLiteralNames();
content.append("mode names:\n");
for (String mode : ((LexerGrammar)g).modes.keySet()) {
content.append(mode + "\n");
}
}
else {
content.append("rule names:\n");
names = g.getRuleNames();
for (String name : names) {
content.append(name + "\n");
}
content.append("\n");
content.append("token symbolic names:\n");
names = ig.getTokenSymbolicNames();
for (String name : names) {
content.append(name + "\n");
}
content.append("\n");
if ( ig.isLexer() ) {
content.append("channel names:\n");
content.append("DEFAULT_TOKEN_CHANNEL\n");
content.append("HIDDEN\n");
for (String channel : ig.channelValueToNameList) {
content.append(channel + "\n");
}
content.append("\n");
content.append("mode names:\n");
for (String mode : ((LexerGrammar)ig).modes.keySet()) {
content.append(mode + "\n");
}
}
else {
content.append("rule names:\n");
names = ig.getRuleNames();
for (String name : names) {
content.append(name + "\n");
}
}
content.append("\n");
IntegerList serializedATN = ATNSerializer.getSerialized(ig.atn);
content.append("atn:\n");
content.append(serializedATN.toString());
}
content.append("\n");
IntegerList serializedATN = ATNSerializer.getSerialized(g.atn);
content.append("atn:\n");
content.append(serializedATN.toString());
try {
Writer fw = getOutputFileWriter(g, g.name + ".interp");
try {
Writer fw = getOutputFileWriter(ig, ig.name + ".interp");
try {
fw.write(content.toString());
}
finally {
fw.close();
}
}
catch (IOException ioe) {
errMgr.toolError(ErrorType.CANNOT_WRITE_FILE, ioe);
fw.write(content.toString());
}
finally {
fw.close();
}
}
catch (IOException ioe) {
errMgr.toolError(ErrorType.CANNOT_WRITE_FILE, ioe);
}
}