diff --git a/tool/playground/T.g b/tool/playground/T.g index a10247dfe..d5529bf23 100644 --- a/tool/playground/T.g +++ b/tool/playground/T.g @@ -1,9 +1,8 @@ grammar T; options {output=AST;} -tokens {I;D;} +tokens {I;} -a : A B+ -> B["foo"] - ; +a : x=A -> $a ; b : B | C ; diff --git a/tool/src/org/antlr/v4/Tool.java b/tool/src/org/antlr/v4/Tool.java index d4f371e11..bd5a11bae 100644 --- a/tool/src/org/antlr/v4/Tool.java +++ b/tool/src/org/antlr/v4/Tool.java @@ -215,26 +215,30 @@ public class Tool { } public void process(Grammar g) { + LexerGrammar lexerg = null; GrammarRootAST lexerAST = null; if ( g.ast!=null && g.ast.grammarType== ANTLRParser.COMBINED && - !g.ast.hasErrors ) - { - lexerAST = extractImplicitLexer(g); // alters ast - } - processNonCombinedGrammar(g); - if ( g.ast!=null && g.ast.grammarType== ANTLRParser.COMBINED && - !g.ast.hasErrors ) + !g.ast.hasErrors ) { + lexerAST = extractImplicitLexer(g); // alters g.ast if ( lexerAST!=null ) { - LexerGrammar lexerg = new LexerGrammar(this, lexerAST); + lexerg = new LexerGrammar(this, lexerAST); lexerg.fileName = g.fileName; g.implicitLexer = lexerg; lexerg.implicitLexerOwner = g; - lexerg.importVocab(g); + +// // copy vocab from combined to implicit lexer +// g.importVocab(g.implicitLexerOwner); // TODO: don't need i don't think; done in tool process() + processNonCombinedGrammar(lexerg); - g.importVocab(lexerg); + System.out.println("lexer tokens="+lexerg.tokenNameToTypeMap); + System.out.println("lexer strings="+lexerg.stringLiteralToTypeMap); } } + if ( g.implicitLexer!=null ) g.importVocab(g.implicitLexer); + System.out.println("tokens="+g.tokenNameToTypeMap); + System.out.println("strings="+g.stringLiteralToTypeMap); + processNonCombinedGrammar(g); } public void processNonCombinedGrammar(Grammar g) { @@ -337,24 +341,24 @@ public class Tool { GrammarRootAST combinedAST = combinedGrammar.ast; //System.out.println("before="+combinedAST.toStringTree()); GrammarASTAdaptor adaptor = new GrammarASTAdaptor(combinedAST.token.getInputStream()); - List elements = combinedAST.getChildren(); + List elements = combinedAST.getChildren(); // MAKE A GRAMMAR ROOT and ID String lexerName = combinedAST.getChild(0).getText()+"Lexer"; GrammarRootAST lexerAST = - new GrammarRootAST(new CommonToken(ANTLRParser.GRAMMAR,"LEXER_GRAMMAR")); + new GrammarRootAST(new CommonToken(ANTLRParser.GRAMMAR,"LEXER_GRAMMAR")); lexerAST.grammarType = ANTLRParser.LEXER; lexerAST.token.setInputStream(combinedAST.token.getInputStream()); - lexerAST.addChild((org.antlr.v4.tool.GrammarAST)adaptor.create(ANTLRParser.ID, lexerName)); + lexerAST.addChild((GrammarAST)adaptor.create(ANTLRParser.ID, lexerName)); // MOVE OPTIONS - org.antlr.v4.tool.GrammarAST optionsRoot = - (org.antlr.v4.tool.GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.OPTIONS); + GrammarAST optionsRoot = + (GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.OPTIONS); if ( optionsRoot!=null ) { - org.antlr.v4.tool.GrammarAST lexerOptionsRoot = (org.antlr.v4.tool.GrammarAST)adaptor.dupNode(optionsRoot); + GrammarAST lexerOptionsRoot = (GrammarAST)adaptor.dupNode(optionsRoot); lexerAST.addChild(lexerOptionsRoot); - List options = optionsRoot.getChildren(); - for (org.antlr.v4.tool.GrammarAST o : options) { + List options = optionsRoot.getChildren(); + for (GrammarAST o : options) { String optionName = o.getChild(0).getText(); if ( !Grammar.doNotCopyOptionsToLexer.contains(optionName) ) { lexerOptionsRoot.addChild(o); @@ -363,8 +367,8 @@ public class Tool { } // MOVE lexer:: actions - List actionsWeMoved = new ArrayList(); - for (org.antlr.v4.tool.GrammarAST e : elements) { + List actionsWeMoved = new ArrayList(); + for (GrammarAST e : elements) { if ( e.getType()==ANTLRParser.AT ) { if ( e.getChild(0).getText().equals("lexer") ) { lexerAST.addChild(e); @@ -373,16 +377,16 @@ public class Tool { } } elements.removeAll(actionsWeMoved); - org.antlr.v4.tool.GrammarAST combinedRulesRoot = - (org.antlr.v4.tool.GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.RULES); + GrammarAST combinedRulesRoot = + (GrammarAST)combinedAST.getFirstChildWithType(ANTLRParser.RULES); if ( combinedRulesRoot==null ) return lexerAST; // MOVE lexer rules - org.antlr.v4.tool.GrammarAST lexerRulesRoot = - (org.antlr.v4.tool.GrammarAST)adaptor.create(ANTLRParser.RULES, "RULES"); + GrammarAST lexerRulesRoot = + (GrammarAST)adaptor.create(ANTLRParser.RULES, "RULES"); lexerAST.addChild(lexerRulesRoot); - List rulesWeMoved = new ArrayList(); + List rulesWeMoved = new ArrayList(); List rules = combinedRulesRoot.getChildren(); for (GrammarASTWithOptions r : rules) { String ruleName = r.getChild(0).getText(); @@ -396,22 +400,24 @@ public class Tool { // Will track 'if' from IF : 'if' ; rules to avoid defining new token for 'if' Map litAliases = - Grammar.getStringLiteralAliasesFromLexerRules(lexerAST); + Grammar.getStringLiteralAliasesFromLexerRules(lexerAST); if ( nLexicalRules==0 && (litAliases==null||litAliases.size()==0) && - combinedGrammar.stringLiteralToTypeMap.size()==0 ) + combinedGrammar.stringLiteralToTypeMap.size()==0 ) { // no rules, tokens{}, or 'literals' in grammar return null; } - // add strings from combined grammar (and imported grammars) into to lexer - for (String lit : combinedGrammar.stringLiteralToTypeMap.keySet()) { + Set stringLiterals = combinedGrammar.getStringLiterals(); + // add strings from combined grammar (and imported grammars) into lexer + // put them first as they are keywords; must resolve ambigs to these rules + for (String lit : stringLiterals) { if ( litAliases!=null && litAliases.containsKey(lit) ) continue; // already has rule // create for each literal: (RULE (BLOCK (ALT )) String rname = combinedGrammar.getStringLiteralLexerRuleName(lit); // can't use wizard; need special node types - org.antlr.v4.tool.GrammarAST litRule = new RuleAST(ANTLRParser.RULE); + GrammarAST litRule = new RuleAST(ANTLRParser.RULE); BlockAST blk = new BlockAST(ANTLRParser.BLOCK); AltAST alt = new AltAST(ANTLRParser.ALT); TerminalAST slit = new TerminalAST(new org.antlr.runtime.CommonToken(ANTLRParser.STRING_LITERAL, lit)); @@ -420,10 +426,7 @@ public class Tool { CommonToken idToken = new CommonToken(ANTLRParser.ID, rname); litRule.addChild(new TerminalAST(idToken)); litRule.addChild(blk); - lexerRulesRoot.addChild(litRule); - -// (GrammarAST) -// wiz.create("(RULE ID["+rname+"] (BLOCK (ALT STRING_LITERAL["+lit+"])))"); + lexerRulesRoot.getChildren().add(0, litRule); // add first } System.out.println("after ="+combinedAST.toStringTree()); diff --git a/tool/src/org/antlr/v4/semantics/BasicSemanticChecks.java b/tool/src/org/antlr/v4/semantics/BasicSemanticChecks.java index e6f386ba2..d03d9274d 100644 --- a/tool/src/org/antlr/v4/semantics/BasicSemanticChecks.java +++ b/tool/src/org/antlr/v4/semantics/BasicSemanticChecks.java @@ -156,7 +156,8 @@ public class BasicSemanticChecks { String fullyQualifiedName = nameToken.getInputStream().getSourceName(); File f = new File(fullyQualifiedName); String fileName = f.getName(); - if ( !Utils.stripFileExtension(fileName).equals(nameToken.getText()) ) { + if ( !Utils.stripFileExtension(fileName).equals(nameToken.getText()) && + !fileName.equals(Grammar.GRAMMAR_FROM_STRING_NAME)) { g.tool.errMgr.grammarError(ErrorType.FILE_AND_GRAMMAR_NAME_DIFFER, fileName, nameToken, nameToken.getText(), fileName); } diff --git a/tool/src/org/antlr/v4/semantics/SemanticPipeline.java b/tool/src/org/antlr/v4/semantics/SemanticPipeline.java index d75c99ee1..693b68082 100644 --- a/tool/src/org/antlr/v4/semantics/SemanticPipeline.java +++ b/tool/src/org/antlr/v4/semantics/SemanticPipeline.java @@ -166,39 +166,33 @@ public class SemanticPipeline { } void assignTokenTypes(Grammar g, CollectSymbols collector, SymbolChecks symcheck) { - if ( g.implicitLexerOwner!=null ) { - // copy vocab from combined to implicit lexer - g.importVocab(g.implicitLexerOwner); - System.out.println("tokens="+g.tokenNameToTypeMap); - System.out.println("strings="+g.stringLiteralToTypeMap); - } - else { - Grammar G = g.getOutermostGrammar(); // put in root, even if imported + Grammar G = g.getOutermostGrammar(); // put in root, even if imported - // DEFINE tokens { X='x'; } ALIASES - for (GrammarAST alias : collector.tokensDefs) { - if ( alias.getType()== ANTLRParser.ASSIGN ) { - String name = alias.getChild(0).getText(); - String lit = alias.getChild(1).getText(); - G.defineTokenAlias(name, lit); - } + // DEFINE tokens { X='x'; } ALIASES + for (GrammarAST alias : collector.tokensDefs) { + if ( alias.getType()== ANTLRParser.ASSIGN ) { + String name = alias.getChild(0).getText(); + String lit = alias.getChild(1).getText(); + G.defineTokenAlias(name, lit); } - - // DEFINE TOKEN TYPES FOR X : 'x' ; RULES - Map litAliases = Grammar.getStringLiteralAliasesFromLexerRules(g.ast); - if ( litAliases!=null ) { - for (String lit : litAliases.keySet()) { - G.defineTokenAlias(litAliases.get(lit), lit); - } - } - - // DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT - for (String id : symcheck.tokenIDs) { G.defineTokenName(id); } - - // DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';' - for (String s : collector.strings) { G.defineStringLiteral(s); } -// System.out.println("tokens="+G.tokenNameToTypeMap); -// System.out.println("strings="+G.stringLiteralToTypeMap); } + + // DEFINE TOKEN TYPES FOR X : 'x' ; RULES + /* done by previous import + Map litAliases = Grammar.getStringLiteralAliasesFromLexerRules(g.ast); + if ( litAliases!=null ) { + for (String lit : litAliases.keySet()) { + G.defineTokenAlias(litAliases.get(lit), lit); + } + } + */ + + // DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT + for (String id : symcheck.tokenIDs) { G.defineTokenName(id); } + + // DEFINE TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';' + for (String s : collector.strings) { G.defineStringLiteral(s); } + System.out.println("tokens="+G.tokenNameToTypeMap); + System.out.println("strings="+G.stringLiteralToTypeMap); } } diff --git a/tool/src/org/antlr/v4/tool/ErrorType.java b/tool/src/org/antlr/v4/tool/ErrorType.java index cf64aebf6..c787dc353 100644 --- a/tool/src/org/antlr/v4/tool/ErrorType.java +++ b/tool/src/org/antlr/v4/tool/ErrorType.java @@ -50,8 +50,8 @@ public enum ErrorType { DIR_NOT_FOUND("directory not found: ", ErrorSeverity.ERROR), OUTPUT_DIR_IS_FILE("output directory is a file: ", ErrorSeverity.ERROR), CANNOT_OPEN_FILE("cannot find or open file: ; reason: ", ErrorSeverity.ERROR), - FILE_AND_GRAMMAR_NAME_DIFFER("", ErrorSeverity.ERROR), - FILENAME_EXTENSION_ERROR("", ErrorSeverity.ERROR), + FILE_AND_GRAMMAR_NAME_DIFFER("grammar name and file name differ", ErrorSeverity.ERROR), +// FILENAME_EXTENSION_ERROR("", ErrorSeverity.ERROR), INTERNAL_ERROR("internal error: : \n" + "", ErrorSeverity.ERROR), diff --git a/tool/src/org/antlr/v4/tool/Grammar.java b/tool/src/org/antlr/v4/tool/Grammar.java index 246563738..9d29055c1 100644 --- a/tool/src/org/antlr/v4/tool/Grammar.java +++ b/tool/src/org/antlr/v4/tool/Grammar.java @@ -31,7 +31,7 @@ package org.antlr.v4.tool; import org.antlr.runtime.CommonTokenStream; import org.antlr.runtime.*; -import org.antlr.runtime.tree.TreeWizard; +import org.antlr.runtime.tree.*; import org.antlr.v4.Tool; import org.antlr.v4.misc.*; import org.antlr.v4.parse.*; @@ -40,10 +40,13 @@ import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.dfa.DFA; import org.antlr.v4.runtime.misc.*; +import org.antlr.v4.semantics.CollectSymbols; import java.util.*; public class Grammar implements AttributeResolver { + public static final String GRAMMAR_FROM_STRING_NAME = ""; + public static final Set doNotCopyOptionsToLexer = new HashSet() { { @@ -71,9 +74,10 @@ public class Grammar implements AttributeResolver { public String text; // testing only public String fileName; - /** Was this created from a COMBINED grammar? */ - public Grammar implicitLexer; - public Grammar implicitLexerOwner; + /** Was this parser grammar created from a COMBINED grammar? If so, + * this is what we derived. + */ + public LexerGrammar implicitLexer; /** If we're imported, who imported us? If null, implies grammar is root */ public Grammar parent; @@ -86,6 +90,7 @@ public class Grammar implements AttributeResolver { public List indexToRule = new ArrayList(); int ruleNumber = 0; // used to get rule indexes (0..n-1) + int stringLiteralRuleNumber = 0; // used to invent rule names for 'keyword', ';', ... (0..n-1) /** The ATN that represents the grammar with edges labelled with tokens * or epsilon. It is more suitable to analysis than an AST representation. @@ -156,14 +161,14 @@ public class Grammar implements AttributeResolver { /** For testing */ public Grammar(String grammarText) throws org.antlr.runtime.RecognitionException { - this("", grammarText, null); + this(GRAMMAR_FROM_STRING_NAME, grammarText, null); } /** For testing */ public Grammar(String grammarText, ANTLRToolListener listener) throws org.antlr.runtime.RecognitionException { - this("", grammarText, listener); + this(GRAMMAR_FROM_STRING_NAME, grammarText, listener); } /** For testing; only builds trees; no sem anal */ @@ -365,8 +370,7 @@ public class Grammar implements AttributeResolver { } public String getStringLiteralLexerRuleName(String lit) { - int ttype = getTokenType(lit); - return AUTO_GENERATED_TOKEN_NAME_PREFIX +ttype; + return AUTO_GENERATED_TOKEN_NAME_PREFIX + stringLiteralRuleNumber++; } /** Return grammar directly imported by this grammar */ @@ -396,7 +400,6 @@ public class Grammar implements AttributeResolver { */ public String getTokenDisplayName(int ttype) { String tokenName = null; - int index=0; // inside any target's char range and is lexer grammar? if ( isLexer() && ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE ) @@ -411,6 +414,7 @@ public class Grammar implements AttributeResolver { tokenName = typeToTokenList.get(ttype); if ( tokenName!=null && tokenName.startsWith(AUTO_GENERATED_TOKEN_NAME_PREFIX) && + ttype < typeToStringLiteralList.size() && typeToStringLiteralList.get(ttype)!=null) { tokenName = typeToStringLiteralList.get(ttype); @@ -420,7 +424,7 @@ public class Grammar implements AttributeResolver { tokenName = String.valueOf(ttype); } } - //System.out.println("getTokenDisplayName ttype="+ttype+", index="+index+", name="+tokenName); +// System.out.println("getTokenDisplayName ttype="+ttype+", name="+tokenName); return tokenName; } @@ -433,9 +437,12 @@ public class Grammar implements AttributeResolver { public String[] getTokenNames() { int numTokens = getMaxTokenType(); String[] tokenNames = new String[numTokens+1]; - for (String t : tokenNameToTypeMap.keySet()) { - Integer ttype = tokenNameToTypeMap.get(t); - if ( ttype>0 ) tokenNames[ttype] = t; + for (String tokenName : tokenNameToTypeMap.keySet()) { + Integer ttype = tokenNameToTypeMap.get(tokenName); + if ( tokenName!=null && tokenName.startsWith(AUTO_GENERATED_TOKEN_NAME_PREFIX) ) { + tokenName = typeToStringLiteralList.get(ttype); + } + if ( ttype>0 ) tokenNames[ttype] = tokenName; } return tokenNames; } @@ -493,14 +500,20 @@ public class Grammar implements AttributeResolver { return maxTokenType; } - public void importVocab(Grammar g) { - this.tokenNameToTypeMap.putAll( g.tokenNameToTypeMap ); - this.stringLiteralToTypeMap.putAll( g.stringLiteralToTypeMap ); - int max = Math.max(this.typeToTokenList.size(), g.typeToTokenList.size()); + public void importVocab(Grammar importG) { + for (String tokenName: importG.tokenNameToTypeMap.keySet()) { + defineTokenName(tokenName, importG.tokenNameToTypeMap.get(tokenName)); + } + for (String tokenName: importG.stringLiteralToTypeMap.keySet()) { + defineStringLiteral(tokenName, importG.stringLiteralToTypeMap.get(tokenName)); + } +// this.tokenNameToTypeMap.putAll( importG.tokenNameToTypeMap ); +// this.stringLiteralToTypeMap.putAll( importG.stringLiteralToTypeMap ); + int max = Math.max(this.typeToTokenList.size(), importG.typeToTokenList.size()); this.typeToTokenList.setSize(max); - for (int ttype=0; ttype=typeToStringLiteralList.size() ) { typeToStringLiteralList.setSize(ttype+1); } - typeToStringLiteralList.set(ttype, text); + typeToStringLiteralList.set(ttype, lit); setTokenForType(ttype, lit); return ttype; @@ -652,6 +669,7 @@ public class Grammar implements AttributeResolver { Map lexerRuleToStringLiteral = new HashMap(); for (GrammarASTWithOptions r : ruleNodes) { + //System.out.println(r.toStringTree()); String ruleName = r.getChild(0).getText(); if ( Character.isUpperCase(ruleName.charAt(0)) ) { Map nodes = new HashMap(); @@ -667,6 +685,15 @@ public class Grammar implements AttributeResolver { return lexerRuleToStringLiteral; } + public Set getStringLiterals() { + GrammarASTAdaptor adaptor = new GrammarASTAdaptor(); + BufferedTreeNodeStream nodes = new BufferedTreeNodeStream(adaptor,ast); + CollectSymbols collector = new CollectSymbols(nodes,this); + collector.downup(ast); // no side-effects; compute lists + return collector.strings; + } + + public void setLookaheadDFA(int decision, DFA lookaheadDFA) { decisionDFAs.put(decision, lookaheadDFA); } diff --git a/tool/src/org/antlr/v4/tool/LexerGrammar.java b/tool/src/org/antlr/v4/tool/LexerGrammar.java index 0fd8feea3..ee262ad73 100644 --- a/tool/src/org/antlr/v4/tool/LexerGrammar.java +++ b/tool/src/org/antlr/v4/tool/LexerGrammar.java @@ -37,6 +37,9 @@ import org.stringtemplate.v4.misc.MultiMap; public class LexerGrammar extends Grammar { public static final String DEFAULT_MODE_NAME = "DEFAULT_MODE"; + /** The grammar from which this lexer grammar was derived (if implicit) */ + public Grammar implicitLexerOwner; + /** DEFAULT_MODE rules are added first due to grammar syntax order */ public MultiMap modes = new MultiMap(); diff --git a/tool/test/org/antlr/v4/test/TestRewriteAST.java b/tool/test/org/antlr/v4/test/TestRewriteAST.java index 0c41223b1..894401513 100644 --- a/tool/test/org/antlr/v4/test/TestRewriteAST.java +++ b/tool/test/org/antlr/v4/test/TestRewriteAST.java @@ -60,6 +60,19 @@ public class TestRewriteAST extends BaseTest { assertEquals("abc\n", found); } + @Test public void testSingleLabeledToken() throws Exception { + String grammar = + "grammar T;\n" + + "options {output=AST;}\n" + + "a : x=ID -> $x;\n" + + "ID : 'a'..'z'+ ;\n" + + "INT : '0'..'9'+;\n" + + "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; + String found = execParser("T.g", grammar, "TParser", "TLexer", + "a", "abc", debug); + assertEquals("abc\n", found); + } + @Test public void testSingleTokenToNewNode() throws Exception { String grammar = "grammar T;\n" + @@ -86,20 +99,6 @@ public class TestRewriteAST extends BaseTest { assertEquals("(x INT)\n", found); } - @Test public void testSingleTokenToNewNode2() throws Exception { - // Allow creation of new nodes w/o args. - String grammar = - "grammar TT;\n" + - "options {output=AST;}\n" + - "a : ID -> ID[ ];\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; - String found = execParser("TT.g", grammar, "TTParser", "TTLexer", - "a", "abc", debug); - assertEquals("ID\n", found); - } - @Test public void testSingleCharLiteral() throws Exception { String grammar = "grammar T;\n" + @@ -199,7 +198,7 @@ public class TestRewriteAST extends BaseTest { "grammar T;\n" + "options {output=AST;}\n" + "tokens {DUH;}\n" + - "a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ;\n" + + "a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )* ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; @@ -234,19 +233,6 @@ public class TestRewriteAST extends BaseTest { assertEquals("a b\n", found); } - @Test public void testPositiveClosureSingleToken() throws Exception { - String grammar = - "grammar T;\n" + - "options {output=AST;}\n" + - "a : ID ID -> ID+ ;\n" + - "ID : 'a'..'z'+ ;\n" + - "INT : '0'..'9'+;\n" + - "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; - String found = execParser("T.g", grammar, "TParser", "TLexer", - "a", "a b", debug); - assertEquals("a b\n", found); - } - @Test public void testOptionalSingleRule() throws Exception { String grammar = "grammar T;\n" + @@ -407,7 +393,7 @@ public class TestRewriteAST extends BaseTest { String grammar = "grammar T;\n" + "options {output=AST;}\n" + - "a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ;\n" + + "a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)*) ;\n" + "type : 'int' | 'float' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -422,7 +408,7 @@ public class TestRewriteAST extends BaseTest { "grammar T;\n" + "options {output=AST;}\n" + "tokens {VAR;}\n" + - "a : ID (',' ID)*-> ^(VAR ID)+ ;\n" + + "a : ID (',' ID)*-> ^(VAR ID)* ;\n" + "type : 'int' | 'float' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -452,7 +438,7 @@ public class TestRewriteAST extends BaseTest { "grammar T;\n" + "options {output=AST;}\n" + "tokens {VAR;}\n" + - "a : ID (',' ID)*-> ^(VAR[\"var\"] ID)+ ;\n" + + "a : ID (',' ID)*-> ^(VAR[\"var\"] ID)* ;\n" + "type : 'int' | 'float' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -467,7 +453,7 @@ public class TestRewriteAST extends BaseTest { "grammar T;\n" + "options {output=AST;}\n" + "tokens {BLOCK;}\n" + - "a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;\n" + + "a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID*) ;\n" + "type : 'int' | 'float' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -482,7 +468,7 @@ public class TestRewriteAST extends BaseTest { "grammar T;\n" + "options {output=AST;}\n" + "tokens {BLOCK;}\n" + - "a : lc='{' ID+ '}' -> ^(BLOCK[$lc,\"block\"] ID+) ;\n" + + "a : lc='{' ID+ '}' -> ^(BLOCK[$lc,\"block\"] ID*) ;\n" + "type : 'int' | 'float' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -551,7 +537,7 @@ public class TestRewriteAST extends BaseTest { "options {output=AST;}\n" + "tokens {BLOCK;}\n" + "a : b b ;\n" + - "b : ID ( ID (last=ID -> $last)+ ) ';'\n" + // get last ID + "b : ID ( ID (last=ID -> $last)* ) ';'\n" + // get last ID " | INT\n" + // should still get auto AST construction " ;\n" + "ID : 'a'..'z'+ ;\n" + @@ -621,11 +607,11 @@ public class TestRewriteAST extends BaseTest { } @Test public void testCopySemanticsForRules2() throws Exception { - // copy type as a root for each invocation of (...)+ in rewrite + // copy type as a root for each invocation of (...)* in rewrite String grammar = "grammar T;\n" + "options {output=AST;}\n" + - "a : type ID (',' ID)* ';' -> ^(type ID)+ ;\n" + + "a : type ID (',' ID)* ';' -> ^(type ID)* ;\n" + "type : 'int' ;\n" + "ID : 'a'..'z'+ ;\n" + "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; @@ -636,11 +622,11 @@ public class TestRewriteAST extends BaseTest { @Test public void testCopySemanticsForRules3() throws Exception { // copy type *and* modifier even though it's optional - // for each invocation of (...)+ in rewrite + // for each invocation of (...)* in rewrite String grammar = "grammar T;\n" + "options {output=AST;}\n" + - "a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ;\n" + + "a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)* ;\n" + "type : 'int' ;\n" + "modifier : 'public' ;\n" + "ID : 'a'..'z'+ ;\n" + @@ -652,11 +638,11 @@ public class TestRewriteAST extends BaseTest { @Test public void testCopySemanticsForRules3Double() throws Exception { // copy type *and* modifier even though it's optional - // for each invocation of (...)+ in rewrite + // for each invocation of (...)* in rewrite String grammar = "grammar T;\n" + "options {output=AST;}\n" + - "a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ;\n" + + "a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)* ^(type modifier? ID)* ;\n" + "type : 'int' ;\n" + "modifier : 'public' ;\n" + "ID : 'a'..'z'+ ;\n" + @@ -668,12 +654,12 @@ public class TestRewriteAST extends BaseTest { @Test public void testCopySemanticsForRules4() throws Exception { // copy type *and* modifier even though it's optional - // for each invocation of (...)+ in rewrite + // for each invocation of (...)* in rewrite String grammar = "grammar T;\n" + "options {output=AST;}\n" + "tokens {MOD;}\n" + - "a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ;\n" + + "a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)* ;\n" + "type : 'int' ;\n" + "modifier : 'public' ;\n" + "ID : 'a'..'z'+ ;\n" + @@ -688,7 +674,7 @@ public class TestRewriteAST extends BaseTest { "grammar T;\n" + "options {output=AST;}\n" + "tokens {MOD;}\n" + - "a : ID (',' ID)* ';' -> ID+ ID+ ;\n"+ + "a : ID (',' ID)* ';' -> ID* ID* ;\n"+ "ID : 'a'..'z'+ ;\n" + "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; String found = execParser("T.g", grammar, "TParser", "TLexer", @@ -728,7 +714,7 @@ public class TestRewriteAST extends BaseTest { String grammar = "grammar T;\n" + "options {output=AST;}\n" + - "a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ;\n" + + "a : 'int' ID (',' ID)* ';' -> ^('int' ID*) ;\n" + "op : '+'|'-' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -756,7 +742,7 @@ public class TestRewriteAST extends BaseTest { String grammar = "grammar T;\n" + "options {output=AST;}\n" + - "a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ;\n" + + "a : 'int' ID (',' ID)* ';' -> ^('int' ID)* ;\n" + "op : '+'|'-' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -771,7 +757,7 @@ public class TestRewriteAST extends BaseTest { String grammar = "grammar T;\n" + "options {output=AST;}\n" + - "a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ;\n" + + "a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)* ;\n" + "op : '+'|'-' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + @@ -993,7 +979,7 @@ public class TestRewriteAST extends BaseTest { String grammar = "grammar T;\n" + "options { output = AST; } \n" + - "a: (INT|ID)+ -> INT+ ID+ ;\n" + + "a: (INT|ID)+ -> INT* ID* ;\n" + "INT: '0'..'9'+;\n" + "ID : 'a'..'z'+;\n" + "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; @@ -1049,8 +1035,6 @@ public class TestRewriteAST extends BaseTest { } @Test public void testOptionalSubruleWithoutRealElements() throws Exception { - // copy type *and* modifier even though it's optional - // for each invocation of (...)+ in rewrite String grammar = "grammar T;\n" + "options {output=AST;} \n" + @@ -1074,7 +1058,7 @@ public class TestRewriteAST extends BaseTest { "grammar T;\n" + "options {output=AST;}\n" + "tokens {BLOCK;}\n" + - "a : ID ID INT INT INT -> (ID INT)+;\n"+ + "a : ID ID INT INT INT -> (ID INT)*;\n"+ "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+; \n" + "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; @@ -1124,7 +1108,7 @@ public class TestRewriteAST extends BaseTest { String grammar = "grammar T;\n" + "options {output=AST;}\n" + - "a : ID? INT -> ID+ INT ;\n" + + "a : ID? INT -> ID* INT ;\n" + "op : '+'|'-' ;\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+;\n" + diff --git a/tool/test/org/antlr/v4/test/TestTokenTypeAssignment.java b/tool/test/org/antlr/v4/test/TestTokenTypeAssignment.java index 8bccda5c6..d1a2c2c6b 100644 --- a/tool/test/org/antlr/v4/test/TestTokenTypeAssignment.java +++ b/tool/test/org/antlr/v4/test/TestTokenTypeAssignment.java @@ -95,7 +95,7 @@ public class TestTokenTypeAssignment extends BaseTest { String[] typeToTokenName = g.getTokenNames(); Set tokens = new HashSet(); for (String t : typeToTokenName) if ( t!=null ) tokens.add(t); - assertEquals("[E]", tokens.toString()); + assertEquals("[E, 'x']", tokens.toString()); } @Test public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception { @@ -155,7 +155,7 @@ public class TestTokenTypeAssignment extends BaseTest { "tokens { B='}'; }\n"+ "a : A '}' {System.out.println(input);} ;\n"+ "A : 'a' ;\n" + - "B : '}' {/* */} ;\n"+ + "B : '}' ;\n"+ "WS : (' '|'\\n') {skip();} ;"; String found = execParser("P.g", grammar, "PParser", "PLexer", "a", "a}", false); @@ -164,7 +164,7 @@ public class TestTokenTypeAssignment extends BaseTest { protected void checkSymbols(Grammar g, String rulesStr, - String tokensStr) + String allValidTokensStr) throws Exception { Tool antlr = new Tool(); @@ -175,7 +175,7 @@ public class TestTokenTypeAssignment extends BaseTest { for (String t : typeToTokenName) if ( t!=null ) tokens.add(t); // make sure expected tokens are there - StringTokenizer st = new StringTokenizer(tokensStr, ", "); + StringTokenizer st = new StringTokenizer(allValidTokensStr, ", "); while ( st.hasMoreTokens() ) { String tokenName = st.nextToken(); assertTrue("token "+tokenName+" expected, but was undefined",