2006-02-24 14:07:01 +08:00
|
|
|
"Database cache backend."
|
2011-07-13 17:35:51 +08:00
|
|
|
import base64
|
|
|
|
import time
|
2006-02-24 14:07:01 +08:00
|
|
|
from datetime import datetime
|
2011-07-13 17:35:51 +08:00
|
|
|
|
2006-02-24 14:07:01 +08:00
|
|
|
try:
|
|
|
|
import cPickle as pickle
|
|
|
|
except ImportError:
|
|
|
|
import pickle
|
|
|
|
|
2011-11-20 18:17:12 +08:00
|
|
|
from django.conf import settings
|
2011-07-13 17:35:51 +08:00
|
|
|
from django.core.cache.backends.base import BaseCache
|
|
|
|
from django.db import connections, router, transaction, DatabaseError
|
2011-11-20 18:17:12 +08:00
|
|
|
from django.utils import timezone
|
2011-07-13 17:35:51 +08:00
|
|
|
|
|
|
|
|
2010-08-05 10:13:32 +08:00
|
|
|
class Options(object):
|
|
|
|
"""A class that will quack like a Django model _meta class.
|
|
|
|
|
|
|
|
This allows cache operations to be controlled by the router
|
|
|
|
"""
|
|
|
|
def __init__(self, table):
|
|
|
|
self.db_table = table
|
|
|
|
self.app_label = 'django_cache'
|
|
|
|
self.module_name = 'cacheentry'
|
|
|
|
self.verbose_name = 'cache entry'
|
|
|
|
self.verbose_name_plural = 'cache entries'
|
|
|
|
self.object_name = 'CacheEntry'
|
|
|
|
self.abstract = False
|
|
|
|
self.managed = True
|
|
|
|
self.proxy = False
|
|
|
|
|
2010-12-21 23:19:19 +08:00
|
|
|
class BaseDatabaseCache(BaseCache):
|
|
|
|
def __init__(self, table, params):
|
|
|
|
BaseCache.__init__(self, params)
|
2010-08-05 10:13:32 +08:00
|
|
|
self._table = table
|
|
|
|
|
|
|
|
class CacheEntry(object):
|
|
|
|
_meta = Options(table)
|
|
|
|
self.cache_model_class = CacheEntry
|
|
|
|
|
2010-12-21 23:19:19 +08:00
|
|
|
class DatabaseCache(BaseDatabaseCache):
|
2011-11-20 18:17:12 +08:00
|
|
|
|
|
|
|
# This class uses cursors provided by the database connection. This means
|
|
|
|
# it reads expiration values as aware or naive datetimes depending on the
|
|
|
|
# value of USE_TZ. They must be compared to aware or naive representations
|
|
|
|
# of "now" respectively.
|
|
|
|
|
|
|
|
# But it bypasses the ORM for write operations. As a consequence, aware
|
|
|
|
# datetimes aren't made naive for databases that don't support time zones.
|
|
|
|
# We work around this problem by always using naive datetimes when writing
|
|
|
|
# expiration values, in UTC when USE_TZ = True and in local time otherwise.
|
|
|
|
|
2010-11-19 23:39:35 +08:00
|
|
|
def get(self, key, default=None, version=None):
|
|
|
|
key = self.make_key(key, version=version)
|
2010-09-13 02:45:26 +08:00
|
|
|
self.validate_key(key)
|
2010-08-05 10:13:32 +08:00
|
|
|
db = router.db_for_read(self.cache_model_class)
|
|
|
|
table = connections[db].ops.quote_name(self._table)
|
|
|
|
cursor = connections[db].cursor()
|
|
|
|
|
2011-11-20 18:17:12 +08:00
|
|
|
cursor.execute("SELECT cache_key, value, expires FROM %s "
|
|
|
|
"WHERE cache_key = %%s" % table, [key])
|
2006-02-24 14:07:01 +08:00
|
|
|
row = cursor.fetchone()
|
|
|
|
if row is None:
|
|
|
|
return default
|
2011-11-20 18:17:12 +08:00
|
|
|
now = timezone.now()
|
2006-02-24 14:07:01 +08:00
|
|
|
if row[2] < now:
|
2010-08-05 10:13:32 +08:00
|
|
|
db = router.db_for_write(self.cache_model_class)
|
|
|
|
cursor = connections[db].cursor()
|
2011-11-20 18:17:12 +08:00
|
|
|
cursor.execute("DELETE FROM %s "
|
|
|
|
"WHERE cache_key = %%s" % table, [key])
|
2010-08-05 10:13:32 +08:00
|
|
|
transaction.commit_unless_managed(using=db)
|
2006-02-24 14:07:01 +08:00
|
|
|
return default
|
2010-08-05 10:13:32 +08:00
|
|
|
value = connections[db].ops.process_clob(row[1])
|
2009-03-14 05:04:48 +08:00
|
|
|
return pickle.loads(base64.decodestring(value))
|
2006-02-24 14:07:01 +08:00
|
|
|
|
2010-11-19 23:39:35 +08:00
|
|
|
def set(self, key, value, timeout=None, version=None):
|
|
|
|
key = self.make_key(key, version=version)
|
2010-09-13 02:45:26 +08:00
|
|
|
self.validate_key(key)
|
2008-08-10 11:52:21 +08:00
|
|
|
self._base_set('set', key, value, timeout)
|
2007-10-20 23:16:34 +08:00
|
|
|
|
2010-11-19 23:39:35 +08:00
|
|
|
def add(self, key, value, timeout=None, version=None):
|
|
|
|
key = self.make_key(key, version=version)
|
2010-09-13 02:45:26 +08:00
|
|
|
self.validate_key(key)
|
2007-10-22 03:19:32 +08:00
|
|
|
return self._base_set('add', key, value, timeout)
|
|
|
|
|
2007-10-20 23:16:34 +08:00
|
|
|
def _base_set(self, mode, key, value, timeout=None):
|
2006-02-24 14:07:01 +08:00
|
|
|
if timeout is None:
|
|
|
|
timeout = self.default_timeout
|
2010-08-05 10:13:32 +08:00
|
|
|
db = router.db_for_write(self.cache_model_class)
|
|
|
|
table = connections[db].ops.quote_name(self._table)
|
|
|
|
cursor = connections[db].cursor()
|
|
|
|
|
|
|
|
cursor.execute("SELECT COUNT(*) FROM %s" % table)
|
2006-02-24 14:07:01 +08:00
|
|
|
num = cursor.fetchone()[0]
|
2011-11-20 18:17:12 +08:00
|
|
|
now = timezone.now()
|
|
|
|
now = now.replace(microsecond=0)
|
|
|
|
if settings.USE_TZ:
|
|
|
|
exp = datetime.utcfromtimestamp(time.time() + timeout)
|
|
|
|
else:
|
|
|
|
exp = datetime.fromtimestamp(time.time() + timeout)
|
|
|
|
exp = exp.replace(microsecond=0)
|
2006-02-24 14:07:01 +08:00
|
|
|
if num > self._max_entries:
|
2010-08-05 10:13:32 +08:00
|
|
|
self._cull(db, cursor, now)
|
2011-11-22 06:25:49 +08:00
|
|
|
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
|
|
|
|
encoded = base64.encodestring(pickled).strip()
|
2011-11-20 18:17:12 +08:00
|
|
|
cursor.execute("SELECT cache_key, expires FROM %s "
|
|
|
|
"WHERE cache_key = %%s" % table, [key])
|
2006-02-24 14:07:01 +08:00
|
|
|
try:
|
2009-03-02 11:15:27 +08:00
|
|
|
result = cursor.fetchone()
|
|
|
|
if result and (mode == 'set' or
|
|
|
|
(mode == 'add' and result[1] < now)):
|
2011-11-20 18:17:12 +08:00
|
|
|
cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
|
|
|
|
"WHERE cache_key = %%s" % table,
|
2010-08-05 10:13:32 +08:00
|
|
|
[encoded, connections[db].ops.value_to_db_datetime(exp), key])
|
2006-02-24 14:07:01 +08:00
|
|
|
else:
|
2011-11-20 18:17:12 +08:00
|
|
|
cursor.execute("INSERT INTO %s (cache_key, value, expires) "
|
|
|
|
"VALUES (%%s, %%s, %%s)" % table,
|
2010-08-05 10:13:32 +08:00
|
|
|
[key, encoded, connections[db].ops.value_to_db_datetime(exp)])
|
2006-02-24 14:07:01 +08:00
|
|
|
except DatabaseError:
|
|
|
|
# To be threadsafe, updates/inserts are allowed to fail silently
|
2010-08-05 10:13:32 +08:00
|
|
|
transaction.rollback_unless_managed(using=db)
|
2008-08-10 11:52:21 +08:00
|
|
|
return False
|
2006-02-24 14:07:01 +08:00
|
|
|
else:
|
2010-08-05 10:13:32 +08:00
|
|
|
transaction.commit_unless_managed(using=db)
|
2008-08-10 11:52:21 +08:00
|
|
|
return True
|
2006-02-24 14:07:01 +08:00
|
|
|
|
2010-11-19 23:39:35 +08:00
|
|
|
def delete(self, key, version=None):
|
|
|
|
key = self.make_key(key, version=version)
|
2010-09-13 02:45:26 +08:00
|
|
|
self.validate_key(key)
|
2010-11-19 23:39:35 +08:00
|
|
|
|
2010-08-05 10:13:32 +08:00
|
|
|
db = router.db_for_write(self.cache_model_class)
|
|
|
|
table = connections[db].ops.quote_name(self._table)
|
|
|
|
cursor = connections[db].cursor()
|
|
|
|
|
|
|
|
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
|
|
|
|
transaction.commit_unless_managed(using=db)
|
2006-02-24 14:07:01 +08:00
|
|
|
|
2010-11-19 23:39:35 +08:00
|
|
|
def has_key(self, key, version=None):
|
|
|
|
key = self.make_key(key, version=version)
|
2010-09-13 02:45:26 +08:00
|
|
|
self.validate_key(key)
|
2010-11-19 23:39:35 +08:00
|
|
|
|
2010-08-05 10:13:32 +08:00
|
|
|
db = router.db_for_read(self.cache_model_class)
|
|
|
|
table = connections[db].ops.quote_name(self._table)
|
|
|
|
cursor = connections[db].cursor()
|
|
|
|
|
2011-11-20 18:17:12 +08:00
|
|
|
if settings.USE_TZ:
|
|
|
|
now = datetime.utcnow()
|
|
|
|
else:
|
|
|
|
now = datetime.now()
|
|
|
|
now = now.replace(microsecond=0)
|
|
|
|
cursor.execute("SELECT cache_key FROM %s "
|
|
|
|
"WHERE cache_key = %%s and expires > %%s" % table,
|
2010-08-05 10:13:32 +08:00
|
|
|
[key, connections[db].ops.value_to_db_datetime(now)])
|
2006-02-24 14:07:01 +08:00
|
|
|
return cursor.fetchone() is not None
|
|
|
|
|
2010-08-05 10:13:32 +08:00
|
|
|
def _cull(self, db, cursor, now):
|
2006-02-24 14:07:01 +08:00
|
|
|
if self._cull_frequency == 0:
|
2010-01-27 16:21:35 +08:00
|
|
|
self.clear()
|
2006-02-24 14:07:01 +08:00
|
|
|
else:
|
2011-11-20 18:17:12 +08:00
|
|
|
# When USE_TZ is True, 'now' will be an aware datetime in UTC.
|
|
|
|
now = now.replace(tzinfo=None)
|
2010-08-31 08:44:46 +08:00
|
|
|
table = connections[db].ops.quote_name(self._table)
|
2010-08-05 10:13:32 +08:00
|
|
|
cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
|
|
|
|
[connections[db].ops.value_to_db_datetime(now)])
|
|
|
|
cursor.execute("SELECT COUNT(*) FROM %s" % table)
|
2006-02-24 14:07:01 +08:00
|
|
|
num = cursor.fetchone()[0]
|
|
|
|
if num > self._max_entries:
|
2011-08-22 01:19:35 +08:00
|
|
|
cull_num = num / self._cull_frequency
|
|
|
|
if connections[db].vendor == 'oracle':
|
2011-11-20 18:17:12 +08:00
|
|
|
# Oracle doesn't support LIMIT + OFFSET
|
|
|
|
cursor.execute("""SELECT cache_key FROM
|
|
|
|
(SELECT ROW_NUMBER() OVER (ORDER BY cache_key) AS counter, cache_key FROM %s)
|
|
|
|
WHERE counter > %%s AND COUNTER <= %%s""" % table, [cull_num, cull_num + 1])
|
2011-09-10 00:18:38 +08:00
|
|
|
else:
|
2011-11-20 18:17:12 +08:00
|
|
|
# This isn't standard SQL, it's likely to break
|
|
|
|
# with some non officially supported databases
|
|
|
|
cursor.execute("SELECT cache_key FROM %s "
|
|
|
|
"ORDER BY cache_key "
|
|
|
|
"LIMIT 1 OFFSET %%s" % table, [cull_num])
|
|
|
|
cursor.execute("DELETE FROM %s "
|
|
|
|
"WHERE cache_key < %%s" % table,
|
|
|
|
[cursor.fetchone()[0]])
|
2010-01-27 16:21:35 +08:00
|
|
|
|
|
|
|
def clear(self):
|
2010-08-05 10:13:32 +08:00
|
|
|
db = router.db_for_write(self.cache_model_class)
|
|
|
|
table = connections[db].ops.quote_name(self._table)
|
|
|
|
cursor = connections[db].cursor()
|
|
|
|
cursor.execute('DELETE FROM %s' % table)
|
2010-12-21 23:19:19 +08:00
|
|
|
|
|
|
|
# For backwards compatibility
|
|
|
|
class CacheClass(DatabaseCache):
|
|
|
|
pass
|