Fixed #392 -- Fixed bug in memcache setup if arguments are given. Thanks, adrian@exoweb.net

git-svn-id: http://code.djangoproject.com/svn/django/trunk@598 bcc190cf-cafb-0310-a4f2-bffc1f526a37
This commit is contained in:
Adrian Holovaty 2005-09-01 23:09:48 +00:00
parent 29c50bc0d4
commit 582e6a1010
1 changed files with 39 additions and 39 deletions

View File

@ -2,8 +2,8 @@
Caching framework.
This module defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract Cache object, below.
Client code should not access a cache backend directly; instead
@ -12,49 +12,49 @@ settings.CACHE_BACKEND and use that to create and load a cache object.
The CACHE_BACKEND setting is a quasi-URI; examples are:
memcached://127.0.0.1:11211/ A memcached backend; the server is running
memcached://127.0.0.1:11211/ A memcached backend; the server is running
on localhost port 11211.
pgsql://tablename/ A pgsql backend (the pgsql backend uses
pgsql://tablename/ A pgsql backend (the pgsql backend uses
the same database/username as the rest of
the CMS, so only a table name is needed.)
file:///var/tmp/django.cache/ A file-based cache at /var/tmp/django.cache
simple:/// A simple single-process memory cache; you
probably don't want to use this except for
testing. Note that this cache backend is
testing. Note that this cache backend is
NOT threadsafe!
All caches may take arguments; these are given in query-string style. Valid
arguments are:
timeout
timeout
Default timeout, in seconds, to use for the cache. Defaults
to 5 minutes (300 seconds).
max_entries
max_entries
For the simple, file, and database backends, the maximum number of
entries allowed in the cache before it is cleaned. Defaults to
300.
cull_percentage
The percentage of entries that are culled when max_entries is reached.
cull_percentage
The percentage of entries that are culled when max_entries is reached.
The actual percentage is 1/cull_percentage, so set cull_percentage=3 to
cull 1/3 of the entries when max_entries is reached.
A value of 0 for cull_percentage means that the entire cache will be
dumped when max_entries is reached. This makes culling *much* faster
at the expense of more cache misses.
For example:
memcached://127.0.0.1:11211/?timeout=60
pgsql://tablename/?timeout=120&max_entries=500&cull_percentage=4
Invalid arguments are silently ignored, as are invalid values of known
Invalid arguments are silently ignored, as are invalid values of known
arguments.
So far, only the memcached and simple backend have been implemented; backends
using postgres, and file-system storage are planned.
"""
@ -79,14 +79,14 @@ class _Cache:
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
def get(self, key, default=None):
'''
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
'''
raise NotImplementedError
def set(self, key, value, timeout=None):
'''
Set a value in the cache. If timeout is given, that timeout will be
@ -104,7 +104,7 @@ class _Cache:
'''
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
'''
@ -114,7 +114,7 @@ class _Cache:
if val is not None:
d[k] = val
return d
def has_key(self, key):
'''
Returns True if the key is in the cache and has not expired.
@ -132,24 +132,24 @@ except ImportError:
else:
class _MemcachedCache(_Cache):
"""Memcached cache backend."""
def __init__(self, server, params):
_Cache.__init__(self, params)
self._cache = memcache.Client([server])
def get(self, key, default=None):
val = self._cache.get(key)
if val is None:
return default
else:
return val
def set(self, key, value, timeout=0):
self._cache.set(key, value, timeout)
def delete(self, key):
self._cache.delete(key)
def get_many(self, keys):
return self._cache.get_multi(keys)
@ -158,27 +158,27 @@ else:
##################################
import time
class _SimpleCache(_Cache):
"""Simple single-process in-memory cache"""
def __init__(self, host, params):
_Cache.__init__(self, params)
self._cache = {}
self._expire_info = {}
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
def get(self, key, default=None):
now = time.time()
exp = self._expire_info.get(key, now)
@ -188,7 +188,7 @@ class _SimpleCache(_Cache):
return default
else:
return self._cache.get(key, default)
def set(self, key, value, timeout=None):
if len(self._cache) >= self._max_entries:
self._cull()
@ -196,7 +196,7 @@ class _SimpleCache(_Cache):
timeout = self.default_timeout
self._cache[key] = value
self._expire_info[key] = time.time() + timeout
def delete(self, key):
try:
del self._cache[key]
@ -206,7 +206,7 @@ class _SimpleCache(_Cache):
del self._expire_info[key]
except KeyError:
pass
def has_key(self, key):
return self._cache.has_key(key)
@ -219,7 +219,7 @@ class _SimpleCache(_Cache):
for k in doomed:
self.delete(k)
##########################################
##########################################
# Read settings and load a cache backend #
##########################################
@ -238,12 +238,12 @@ def get_cache(backend_uri):
raise InvalidCacheBackendError("Backend URI must start with scheme://")
if scheme not in _BACKENDS.keys():
raise InvalidCacheBackendError("%r is not a valid cache backend" % scheme)
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[:qpos]
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):