20
a cache for chewed-up "change" data structures, which are basically just a
21
different way of storing a revision. the cache improves lookup times 10x
22
over bazaar's xml revision structure, though, so currently still worth doing.
20
a cache for chewed-up 'file change' data structures, which are basically just
21
a different way of storing a revision delta. the cache improves lookup times
22
10x over bazaar's xml revision structure, though, so currently still worth
24
25
once a revision is committed in bazaar, it never changes, so once we have
25
26
cached a change, it's good forever.
31
from loggerhead import util
32
from loggerhead.lockfile import LockFile
34
with_lock = util.with_lock('_lock', 'ChangeCache')
36
SQLITE_INTERFACE = os.environ.get('SQLITE_INTERFACE', 'sqlite')
38
if SQLITE_INTERFACE == 'pysqlite2':
36
from sqlite3 import dbapi2
39
38
from pysqlite2 import dbapi2
41
elif SQLITE_INTERFACE == 'sqlite':
42
import sqlite as dbapi2
46
_select_stmt = ("select data from revisiondata where revid = ?"
47
).replace('?', _param_marker)
48
_insert_stmt = ("insert into revisiondata (revid, data) "
49
"values (?, ?)").replace('?', _param_marker)
40
# We take an optimistic approach to concurrency here: we might do work twice
41
# in the case of races, but not crash or corrupt data.
43
def safe_init_db(filename, init_sql):
44
# To avoid races around creating the database, we create the db in
45
# a temporary file and rename it into the ultimate location.
46
fd, temp_path = tempfile.mkstemp(dir=os.path.dirname(filename))
48
con = dbapi2.connect(temp_path)
53
os.rename(temp_path, filename)
54
55
class FakeShelf(object):
55
57
def __init__(self, filename):
56
58
create_table = not os.path.exists(filename)
61
filename, "create table RevisionData "
62
"(revid binary primary key, data binary)")
57
63
self.connection = dbapi2.connect(filename)
58
64
self.cursor = self.connection.cursor()
61
def _create_table(self):
66
def _create_table(self, filename):
67
con = dbapi2.connect(filename)
63
70
"create table RevisionData "
64
71
"(revid binary primary key, data binary)")
65
self.connection.commit()
66
75
def _serialize(self, obj):
67
r = dbapi2.Binary(cPickle.dumps(obj, protocol=2))
76
return dbapi2.Binary(cPickle.dumps(obj, protocol=2))
69
78
def _unserialize(self, data):
70
79
return cPickle.loads(str(data))
71
81
def get(self, revid):
72
self.cursor.execute(_select_stmt, (revid,))
83
"select data from revisiondata where revid = ?", (revid, ))
73
84
filechange = self.cursor.fetchone()
74
85
if filechange is None:
77
88
return self._unserialize(filechange[0])
78
def add(self, revid_obj_pairs):
79
for (r, d) in revid_obj_pairs:
80
self.cursor.execute(_insert_stmt, (r, self._serialize(d)))
81
self.connection.commit()
84
class FileChangeCache(object):
85
def __init__(self, history, cache_path):
86
self.history = history
90
def add(self, revid, object):
93
"insert into revisiondata (revid, data) values (?, ?)",
94
(revid, self._serialize(object)))
95
self.connection.commit()
96
except dbapi2.IntegrityError:
97
# If another thread or process attempted to set the same key, we
98
# assume it set it to the same value and carry on with our day.
102
class RevInfoDiskCache(object):
103
"""Like `RevInfoMemoryCache` but backed in a sqlite DB."""
105
def __init__(self, cache_path):
88
106
if not os.path.exists(cache_path):
89
107
os.mkdir(cache_path)
91
self._changes_filename = os.path.join(cache_path, 'filechanges.sql')
93
# use a lockfile since the cache folder could be shared across
94
# different processes.
95
self._lock = LockFile(os.path.join(cache_path, 'filechange-lock'))
98
def get_file_changes(self, entries):
101
missing_entry_indices = []
102
cache = FakeShelf(self._changes_filename)
103
for entry in entries:
104
changes = cache.get(entry.revid)
105
if changes is not None:
108
missing_entries.append(entry)
109
missing_entry_indices.append(len(out))
112
missing_changes = self.history.get_file_changes_uncached(missing_entries)
113
revid_changes_pairs = []
114
for i, entry, changes in zip(
115
missing_entry_indices, missing_entries, missing_changes):
116
revid_changes_pairs.append((entry.revid, changes))
118
cache.add(revid_changes_pairs)
108
filename = os.path.join(cache_path, 'revinfo.sql')
109
create_table = not os.path.exists(filename)
112
filename, "create table Data "
113
"(key binary primary key, revid binary, data binary)")
114
self.connection = dbapi2.connect(filename)
115
self.cursor = self.connection.cursor()
117
def get(self, key, revid):
119
"select revid, data from data where key = ?", (dbapi2.Binary(key),))
120
row = self.cursor.fetchone()
123
elif str(row[0]) != revid:
126
return marshal.loads(zlib.decompress(row[1]))
128
def set(self, key, revid, data):
131
'delete from data where key = ?', (dbapi2.Binary(key), ))
132
blob = zlib.compress(marshal.dumps(data))
134
"insert into data (key, revid, data) values (?, ?, ?)",
135
map(dbapi2.Binary, [key, revid, blob]))
136
self.connection.commit()
137
except dbapi2.IntegrityError:
138
# If another thread or process attempted to set the same key, we
139
# don't care too much -- it's only a cache after all!