From 80560efcf84b27ddf20f7a7cc177bcca8306746c Mon Sep 17 00:00:00 2001 From: Terence Parr Date: Tue, 31 Jul 2012 19:39:31 -0700 Subject: [PATCH] wow. i think i got threaded working. rm readwrite lock, sync add add to shared context and add dfa state --- .../v4/runtime/atn/LexerATNSimulator.java | 16 +- .../v4/runtime/atn/ParserATNSimulator.java | 146 ++++++++---------- .../runtime/atn/PredictionContextCache.java | 11 +- .../src/org/antlr/v4/runtime/dfa/DFA.java | 8 - .../org/antlr/v4/runtime/dfa/DFAState.java | 3 +- 5 files changed, 70 insertions(+), 114 deletions(-) diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNSimulator.java b/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNSimulator.java index f7a45b170..b36cf1711 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNSimulator.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNSimulator.java @@ -164,22 +164,10 @@ public class LexerATNSimulator extends ATNSimulator { try { DFA dfa = decisionToDFA[mode]; if ( dfa.s0==null ) { - dfa.write.lock(); - try { - return matchATN(input); - } - finally { - dfa.write.unlock(); - } + return matchATN(input); } else { - dfa.read.lock(); - try { - return execDFA(input, dfa.s0); - } - finally { - dfa.read.unlock(); - } + return execDFA(input, dfa.s0); } } finally { diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java index 7a76547a9..47114dc73 100755 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java @@ -200,30 +200,18 @@ public class ParserATNSimulator extends ATNSimulator { // Now we are certain to have a specific decision's DFA // But, do we still need an initial state? if ( dfa.s0==null ) { // recheck - dfa.write.lock(); - try { - if ( dfa.s0==null ) { // recheck - return predictATN(dfa, input, outerContext); - } - // fall through; another thread set dfa.s0 while we waited for lock - } - finally { - dfa.write.unlock(); + if ( dfa.s0==null ) { // recheck + return predictATN(dfa, input, outerContext); } + // fall through; another thread set dfa.s0 while we waited for lock } // We can start with an existing DFA int m = input.mark(); int index = input.index(); try { - dfa.read.lock(); - try { - int alt = execDFA(dfa, dfa.s0, input, index, outerContext); - return alt; - } - finally { - dfa.read.unlock(); - } + int alt = execDFA(dfa, dfa.s0, input, index, outerContext); + return alt; } finally { input.seek(index); @@ -294,35 +282,29 @@ public class ParserATNSimulator extends ATNSimulator { if ( dfa_debug ) System.out.println("DFA state "+s.stateNumber+" LA(1)=="+getLookaheadName(input)); if ( s.isCtxSensitive && !SLL ) { if ( dfa_debug ) System.out.println("ctx sensitive state "+outerContext+" in "+s); - dfa.read.unlock(); - dfa.write.lock(); - try { - PredictionContext predictionCtx = PredictionContext.fromRuleContext(outerContext); + PredictionContext predictionCtx = PredictionContext.fromRuleContext(outerContext); + synchronized (sharedContextCache) { predictionCtx = getCachedContext(predictionCtx); - Integer predI = s.contextToPredictedAlt.get(predictionCtx); - if ( predI!=null ) { - return predI; // ha! quick exit :) - } - boolean loopsSimulateTailRecursion = true; - boolean fullCtx = true; - ATNConfigSet s0_closure = - computeStartState(dfa.atnStartState, outerContext, - greedy, loopsSimulateTailRecursion, - fullCtx); - retry_with_context_from_dfa++; - ATNConfigSet fullCtxSet = - execATNWithFullContext(dfa, s, s0_closure, - input, startIndex, - outerContext, - ATN.INVALID_ALT_NUMBER, - greedy); - s.contextToPredictedAlt.put(predictionCtx, fullCtxSet.uniqueAlt); - return fullCtxSet.uniqueAlt; } - finally { - dfa.read.lock(); // get read lock again - dfa.write.unlock(); // now release write lock + Integer predI = s.contextToPredictedAlt.get(predictionCtx); + if ( predI!=null ) { + return predI; // ha! quick exit :) } + boolean loopsSimulateTailRecursion = true; + boolean fullCtx = true; + ATNConfigSet s0_closure = + computeStartState(dfa.atnStartState, outerContext, + greedy, loopsSimulateTailRecursion, + fullCtx); + retry_with_context_from_dfa++; + ATNConfigSet fullCtxSet = + execATNWithFullContext(dfa, s, s0_closure, + input, startIndex, + outerContext, + ATN.INVALID_ALT_NUMBER, + greedy); + s.contextToPredictedAlt.put(predictionCtx, fullCtxSet.uniqueAlt); + return fullCtxSet.uniqueAlt; } if ( s.isAcceptState ) { if ( s.predicates!=null ) { @@ -353,27 +335,19 @@ public class ParserATNSimulator extends ATNSimulator { " at DFA state "+s.stateNumber); } - dfa.read.unlock(); - dfa.write.lock(); // recheck; another thread might have added edge if ( s.edges == null || t >= s.edges.length || t < -1 || s.edges[t+1] == null ) { - try { - alt = execATN(dfa, s, input, startIndex, outerContext); - // this adds edge even if next state is accept for - // same alt; e.g., s0-A->:s1=>2-B->:s2=>2 - // TODO: This next stuff kills edge, but extra states remain. :( - if ( s.isAcceptState && alt!=-1 ) { - DFAState d = s.edges[input.LA(1)+1]; - if ( d.isAcceptState && d.prediction==s.prediction ) { - // we can carve it out. - s.edges[input.LA(1)+1] = ERROR; // IGNORE really not error - } + alt = execATN(dfa, s, input, startIndex, outerContext); + // this adds edge even if next state is accept for + // same alt; e.g., s0-A->:s1=>2-B->:s2=>2 + // TODO: This next stuff kills edge, but extra states remain. :( + if ( s.isAcceptState && alt!=-1 ) { + DFAState d = s.edges[input.LA(1)+1]; + if ( d.isAcceptState && d.prediction==s.prediction ) { + // we can carve it out. + s.edges[input.LA(1)+1] = ERROR; // IGNORE really not error } } - finally { - dfa.read.lock(); // get read lock again - dfa.write.unlock(); // now release write lock - } if ( dfa_debug ) { System.out.println("back from DFA update, alt="+alt+", dfa=\n"+dfa.toString(parser.getTokenNames())); //dump(dfa); @@ -395,10 +369,6 @@ public class ParserATNSimulator extends ATNSimulator { t = input.LA(1); } } -// if ( acceptState==null ) { -// if ( debug ) System.out.println("!!! no viable alt in dfa"); -// return -1; -// } // Before jumping to prediction, check to see if there are // disambiguating predicates to evaluate @@ -509,6 +479,7 @@ public class ParserATNSimulator extends ATNSimulator { DFAState D = new DFAState(reach); int predictedAlt = getUniqueAlt(reach); if ( predictedAlt!=ATN.INVALID_ALT_NUMBER ) { + // NO CONFLICT, UNIQUE PREDICTED ALT D.isAcceptState = true; D.configs.uniqueAlt = predictedAlt; D.prediction = predictedAlt; @@ -517,12 +488,12 @@ public class ParserATNSimulator extends ATNSimulator { D.configs.conflictingAlts = getConflictingAlts(reach); if ( D.configs.conflictingAlts!=null ) { if ( greedy ) { -// int k = input.index() - startIndex + 1; // how much input we used -// System.out.println("used k="+k); + // CONFLICT, GREEDY (TYPICAL SITUATION) if ( outerContext == ParserRuleContext.EMPTY || // in grammar start rule !D.configs.dipsIntoOuterContext || // didn't fall out of rule SLL ) // not forcing SLL only { + // SPECIAL CASE WHERE SLL KNOWS CONFLICT IS AMBIGUITY if ( !D.configs.hasSemanticContext ) { reportAmbiguity(dfa, D, startIndex, input.index(), D.configs.conflictingAlts, D.configs); @@ -533,6 +504,7 @@ public class ParserATNSimulator extends ATNSimulator { predictedAlt = D.prediction; } else { + // SLL CONFLICT; RETRY WITH FULL LL CONTEXT if ( debug ) System.out.println("RETRY with outerContext="+outerContext); // don't look up context in cache now since we're just creating state D loopsSimulateTailRecursion = true; @@ -550,16 +522,20 @@ public class ParserATNSimulator extends ATNSimulator { greedy); // not accept state: isCtxSensitive PredictionContext predictionCtx = PredictionContext.fromRuleContext(outerContext); - predictionCtx = getCachedContext(predictionCtx); + synchronized (sharedContextCache) { + predictionCtx = getCachedContext(predictionCtx); + } D.isCtxSensitive = true; // always force DFA to ATN simulate predictedAlt = fullCtxSet.uniqueAlt; D.prediction = ATN.INVALID_ALT_NUMBER; - D.contextToPredictedAlt.put(predictionCtx, predictedAlt); + // TODO: have to cache pred list to test also + D.contextToPredictedAlt.put(predictionCtx, predictedAlt); // CACHE addDFAEdge(dfa, previousD, t, D); return predictedAlt; // all done with preds, etc... } } else { + // CONFLICT, NONGREEDY (ATYPICAL SITUATION) // upon ambiguity for nongreedy, default to exit branch to avoid inf loop // this handles case where we find ambiguity that stops DFA construction // before a config hits rule stop state. Was leaving prediction blank. @@ -594,11 +570,9 @@ public class ParserATNSimulator extends ATNSimulator { int nalts = decState.getNumberOfTransitions(); // Update DFA so reach becomes accept state with (predicate,alt) // pairs if preds found for conflicting alts - IntervalSet conflictingAlts = getConflictingAltsFromConfigSet(D.configs); - SemanticContext[] altToPred = getPredsForAmbigAlts(conflictingAlts, D.configs, nalts); - if ( altToPred!=null ) { - D.predicates = getPredicatePredictions(conflictingAlts, altToPred); - } + IntervalSet altsToCollectPredsFrom = getConflictingAltsOrUniqueAlt(D.configs); + SemanticContext[] altToPred = getPredsForAmbigAlts(altsToCollectPredsFrom, D.configs, nalts); + D.predicates = getPredicatePredictions(altsToCollectPredsFrom, altToPred); D.prediction = ATN.INVALID_ALT_NUMBER; // make sure we use preds if ( D.predicates!=null ) { @@ -1324,7 +1298,7 @@ public class ParserATNSimulator extends ATNSimulator { return ambigAlts; } - protected IntervalSet getConflictingAltsFromConfigSet(ATNConfigSet configs) { + protected IntervalSet getConflictingAltsOrUniqueAlt(ATNConfigSet configs) { IntervalSet conflictingAlts; if ( configs.uniqueAlt!= ATN.INVALID_ALT_NUMBER ) { conflictingAlts = IntervalSet.of(configs.uniqueAlt); @@ -1462,21 +1436,27 @@ public class ParserATNSimulator extends ATNSimulator { if ( from.edges==null ) { from.edges = new DFAState[atn.maxTokenType+1+1]; // TODO: make adaptive } - from.edges[t+1] = to; // connect + synchronized (dfa) { + from.edges[t+1] = to; // connect + } if ( debug ) System.out.println("DFA=\n"+dfa.toString(parser!=null?parser.getTokenNames():null)); } /** Add D if not there and return D. Return previous if already present. */ protected DFAState addDFAState(@NotNull DFA dfa, @NotNull DFAState D) { - DFAState existing = dfa.states.get(D); - if ( existing!=null ) return existing; + synchronized (dfa) { + DFAState existing = dfa.states.get(D); + if ( existing!=null ) return existing; - D.stateNumber = dfa.states.size(); - D.configs.optimizeConfigs(this); - D.configs.setReadonly(true); - dfa.states.put(D, D); - if ( debug ) System.out.println("adding new DFA state: "+D); - return D; + D.stateNumber = dfa.states.size(); + synchronized (sharedContextCache) { + D.configs.optimizeConfigs(this); + } + D.configs.setReadonly(true); + dfa.states.put(D, D); + if ( debug ) System.out.println("adding new DFA state: "+D); + return D; + } } /** See comment on LexerInterpreter.addDFAState. */ diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContextCache.java b/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContextCache.java index ee5da0a6a..768034ce9 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContextCache.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContextCache.java @@ -9,19 +9,14 @@ import java.util.Map; * both lexers and parsers. */ public class PredictionContextCache { - protected String name; protected Map cache = new HashMap(); - public PredictionContextCache() { - this.name = name; - } - /** Add a context to the cache and return it. If the context already exists, * return that one instead and do not add a new context to the cache. * Protect shared cache from unsafe thread access. */ - public synchronized PredictionContext add(PredictionContext ctx) { + public PredictionContext add(PredictionContext ctx) { if ( ctx==PredictionContext.EMPTY ) return PredictionContext.EMPTY; PredictionContext existing = cache.get(ctx); if ( existing!=null ) { @@ -32,11 +27,11 @@ public class PredictionContextCache { return ctx; } - public synchronized PredictionContext get(PredictionContext ctx) { + public PredictionContext get(PredictionContext ctx) { return cache.get(ctx); } - public synchronized int size() { + public int size() { return cache.size(); } } diff --git a/runtime/Java/src/org/antlr/v4/runtime/dfa/DFA.java b/runtime/Java/src/org/antlr/v4/runtime/dfa/DFA.java index fdd90a258..6a36e9c64 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/dfa/DFA.java +++ b/runtime/Java/src/org/antlr/v4/runtime/dfa/DFA.java @@ -42,8 +42,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantReadWriteLock; public class DFA { /** A set of all DFA states. Use Map so we can get old state back @@ -60,12 +58,6 @@ public class DFA { @NotNull public final DecisionState atnStartState; - /** Each DFA has a multi-reader, single-writer lock */ - protected final ReentrantReadWriteLock readWriteLock = - new ReentrantReadWriteLock(); - public final Lock read = readWriteLock.readLock(); - public final Lock write = readWriteLock.writeLock(); - /** Set of configs for a DFA state with at least one conflict? Mainly used as "return value" * from predictATN() for retry. */ diff --git a/runtime/Java/src/org/antlr/v4/runtime/dfa/DFAState.java b/runtime/Java/src/org/antlr/v4/runtime/dfa/DFAState.java index 2c6d83696..32057d0e7 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/dfa/DFAState.java +++ b/runtime/Java/src/org/antlr/v4/runtime/dfa/DFAState.java @@ -35,6 +35,7 @@ import org.antlr.v4.runtime.atn.PredictionContext; import org.antlr.v4.runtime.atn.SemanticContext; import org.antlr.v4.runtime.misc.Nullable; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -108,7 +109,7 @@ public class DFAState { public List predicates; public Map contextToPredictedAlt = - new HashMap(); + Collections.synchronizedMap(new HashMap()); /** Map a predicate to a predicted alternative */ public static class PredPrediction {