20
a cache for chewed-up 'file change' data structures, which are basically just
21
a different way of storing a revision delta. the cache improves lookup times
22
10x over bazaar's xml revision structure, though, so currently still worth
20
a cache for chewed-up "change" data structures, which are basically just a
21
different way of storing a revision. the cache improves lookup times 10x
22
over bazaar's xml revision structure, though, so currently still worth doing.
25
24
once a revision is committed in bazaar, it never changes, so once we have
26
25
cached a change, it's good forever.
31
from loggerhead import util
32
from loggerhead.lockfile import LockFile
34
with_lock = util.with_lock('_lock', 'ChangeCache')
34
37
from sqlite3 import dbapi2
35
38
except ImportError:
36
39
from pysqlite2 import dbapi2
38
# We take an optimistic approach to concurrency here: we might do work twice
39
# in the case of races, but not crash or corrupt data.
41
42
class FakeShelf(object):
43
44
def __init__(self, filename):
44
45
create_table = not os.path.exists(filename)
46
# To avoid races around creating the database, we create the db in
47
# a temporary file and rename it into the ultimate location.
48
fd, path = tempfile.mkstemp(dir=os.path.dirname(filename))
49
self._create_table(path)
50
os.rename(path, filename)
51
46
self.connection = dbapi2.connect(filename)
52
47
self.cursor = self.connection.cursor()
54
def _create_table(self, filename):
55
con = dbapi2.connect(filename)
51
def _create_table(self):
58
53
"create table RevisionData "
59
54
"(revid binary primary key, data binary)")
55
self.connection.commit()
63
57
def _serialize(self, obj):
64
return dbapi2.Binary(cPickle.dumps(obj, protocol=2))
58
r = dbapi2.Binary(cPickle.dumps(obj, protocol=2))
66
61
def _unserialize(self, data):
67
62
return cPickle.loads(str(data))
76
71
return self._unserialize(filechange[0])
78
def add(self, revid, object):
73
def add(self, revid_obj_pairs):
74
for (r, d) in revid_obj_pairs:
80
75
self.cursor.execute(
81
76
"insert into revisiondata (revid, data) values (?, ?)",
82
(revid, self._serialize(object)))
83
self.connection.commit()
84
except dbapi2.IntegrityError:
85
# If another thread or process attempted to set the same key, we
86
# assume it set it to the same value and carry on with our day.
77
(r, self._serialize(d)))
78
self.connection.commit()
90
81
class FileChangeCache(object):
98
89
self._changes_filename = os.path.join(cache_path, 'filechanges.sql')
100
def get_file_changes(self, entry):
91
# use a lockfile since the cache folder could be shared across
92
# different processes.
93
self._lock = LockFile(os.path.join(cache_path, 'filechange-lock'))
96
def get_file_changes(self, entries):
99
missing_entry_indices = []
101
100
cache = FakeShelf(self._changes_filename)
102
changes = cache.get(entry.revid)
104
changes = self.history.get_file_changes_uncached(entry)
105
cache.add(entry.revid, changes)
101
for entry in entries:
102
changes = cache.get(entry.revid)
103
if changes is not None:
106
missing_entries.append(entry)
107
missing_entry_indices.append(len(out))
110
missing_changes = self.history.get_file_changes_uncached(
112
revid_changes_pairs = []
113
for i, entry, changes in zip(
114
missing_entry_indices, missing_entries, missing_changes):
115
revid_changes_pairs.append((entry.revid, changes))
117
cache.add(revid_changes_pairs)