36
from StringIO import StringIO
38
from loggerhead import search
39
38
from loggerhead import util
40
from loggerhead.wholehistory import compute_whole_history_data
39
from loggerhead.util import decorator
43
42
import bzrlib.branch
43
import bzrlib.bundle.serializer
46
45
import bzrlib.errors
47
import bzrlib.lru_cache
48
46
import bzrlib.progress
49
47
import bzrlib.revision
50
import bzrlib.textfile
51
48
import bzrlib.tsort
52
with_branch_lock = util.with_lock('_lock', 'branch')
56
def with_bzrlib_read_lock(unbound):
57
def bzrlib_read_locked(self, *args, **kw):
58
#self.log.debug('-> %r bzr lock', id(threading.currentThread()))
59
self._branch.repository.lock_read()
61
return unbound(self, *args, **kw)
63
self._branch.repository.unlock()
64
#self.log.debug('<- %r bzr lock', id(threading.currentThread()))
65
return bzrlib_read_locked
54
68
# bzrlib's UIFactory is not thread-safe
55
69
uihack = threading.local()
58
71
class ThreadSafeUIFactory (bzrlib.ui.SilentUIFactory):
60
72
def nested_progress_bar(self):
61
73
if getattr(uihack, '_progress_bar_stack', None) is None:
62
pbs = bzrlib.progress.ProgressBarStack(
63
klass=bzrlib.progress.DummyProgress)
64
uihack._progress_bar_stack = pbs
74
uihack._progress_bar_stack = bzrlib.progress.ProgressBarStack(klass=bzrlib.progress.DummyProgress)
65
75
return uihack._progress_bar_stack.get_nested()
67
77
bzrlib.ui.ui_factory = ThreadSafeUIFactory()
80
def _process_side_by_side_buffers(line_list, delete_list, insert_list):
81
while len(delete_list) < len(insert_list):
82
delete_list.append((None, '', 'context'))
83
while len(insert_list) < len(delete_list):
84
insert_list.append((None, '', 'context'))
85
while len(delete_list) > 0:
86
d = delete_list.pop(0)
87
i = insert_list.pop(0)
88
line_list.append(util.Container(old_lineno=d[0], new_lineno=i[0],
89
old_line=d[1], new_line=i[1],
90
old_type=d[2], new_type=i[2]))
93
def _make_side_by_side(chunk_list):
95
turn a normal unified-style diff (post-processed by parse_delta) into a
96
side-by-side diff structure. the new structure is::
104
type: str('context' or 'changed'),
109
for chunk in chunk_list:
111
delete_list, insert_list = [], []
112
for line in chunk.diff:
113
if line.type == 'context':
114
if len(delete_list) or len(insert_list):
115
_process_side_by_side_buffers(line_list, delete_list, insert_list)
116
delete_list, insert_list = [], []
117
line_list.append(util.Container(old_lineno=line.old_lineno, new_lineno=line.new_lineno,
118
old_line=line.line, new_line=line.line,
119
old_type=line.type, new_type=line.type))
120
elif line.type == 'delete':
121
delete_list.append((line.old_lineno, line.line, line.type))
122
elif line.type == 'insert':
123
insert_list.append((line.new_lineno, line.line, line.type))
124
if len(delete_list) or len(insert_list):
125
_process_side_by_side_buffers(line_list, delete_list, insert_list)
126
out_chunk_list.append(util.Container(diff=line_list))
127
return out_chunk_list
69
130
def is_branch(folder):
71
132
bzrlib.branch.Branch.open(folder)
123
183
def __getitem__(self, index):
124
184
"""Get the date of the index'd item"""
125
return datetime.datetime.fromtimestamp(self.repository.get_revision(
126
self.revid_list[index]).timestamp)
185
return datetime.datetime.fromtimestamp(self.repository.get_revision(self.revid_list[index]).timestamp)
128
187
def __len__(self):
129
188
return len(self.revid_list)
131
class FileChangeReporter(object):
132
def __init__(self, old_inv, new_inv):
137
self.text_changes = []
138
self.old_inv = old_inv
139
self.new_inv = new_inv
141
def revid(self, inv, file_id):
143
return inv[file_id].revision
144
except bzrlib.errors.NoSuchId:
147
def report(self, file_id, paths, versioned, renamed, modified,
149
if modified not in ('unchanged', 'kind changed'):
150
if versioned == 'removed':
151
filename = rich_filename(paths[0], kind[0])
153
filename = rich_filename(paths[1], kind[1])
154
self.text_changes.append(util.Container(
155
filename=filename, file_id=file_id,
156
old_revision=self.revid(self.old_inv, file_id),
157
new_revision=self.revid(self.new_inv, file_id)))
158
if versioned == 'added':
159
self.added.append(util.Container(
160
filename=rich_filename(paths[1], kind),
161
file_id=file_id, kind=kind[1]))
162
elif versioned == 'removed':
163
self.removed.append(util.Container(
164
filename=rich_filename(paths[0], kind),
165
file_id=file_id, kind=kind[0]))
167
self.renamed.append(util.Container(
168
old_filename=rich_filename(paths[0], kind[0]),
169
new_filename=rich_filename(paths[1], kind[1]),
171
text_modified=modified == 'modified'))
173
self.modified.append(util.Container(
174
filename=rich_filename(paths[1], kind),
178
class RevInfoMemoryCache(object):
179
"""A store that validates values against the revids they were stored with.
181
We use a unique key for each branch.
183
The reason for not just using the revid as the key is so that when a new
184
value is provided for a branch, we replace the old value used for the
187
There is another implementation of the same interface in
188
loggerhead.changecache.RevInfoDiskCache.
191
def __init__(self, cache):
194
def get(self, key, revid):
195
"""Return the data associated with `key`, subject to a revid check.
197
If a value was stored under `key`, with the same revid, return it.
198
Otherwise return None.
200
cached = self._cache.get(key)
203
stored_revid, data = cached
204
if revid == stored_revid:
209
def set(self, key, revid, data):
210
"""Store `data` under `key`, to be checked against `revid` on get().
212
self._cache[key] = (revid, data)
215
191
class History (object):
216
"""Decorate a branch to provide information for rendering.
218
History objects are expected to be short lived -- when serving a request
219
for a particular branch, open it, read-lock it, wrap a History object
220
around it, serve the request, throw the History object away, unlock the
221
branch and throw it away.
223
:ivar _file_change_cache: An object that caches information about the
224
files that changed between two revisions.
225
:ivar _rev_info: A list of information about revisions. This is by far
226
the most cryptic data structure in loggerhead. At the top level, it
227
is a list of 3-tuples [(merge-info, where-merged, parents)].
228
`merge-info` is (seq, revid, merge_depth, revno_str, end_of_merge) --
229
like a merged sorted list, but the revno is stringified.
230
`where-merged` is a tuple of revisions that have this revision as a
231
non-lefthand parent. Finally, `parents` is just the usual list of
232
parents of this revision.
233
:ivar _rev_indices: A dictionary mapping each revision id to the index of
234
the information about it in _rev_info.
235
:ivar _revno_revid: A dictionary mapping stringified revnos to revision
239
def _load_whole_history_data(self, caches, cache_key):
240
"""Set the attributes relating to the whole history of the branch.
242
:param caches: a list of caches with interfaces like
243
`RevInfoMemoryCache` and be ordered from fastest to slowest.
244
:param cache_key: the key to use with the caches.
246
self._rev_indices = None
247
self._rev_info = None
250
def update_missed_caches():
251
for cache in missed_caches:
252
cache.set(cache_key, self.last_revid, self._rev_info)
254
data = cache.get(cache_key, self.last_revid)
256
self._rev_info = data
257
update_missed_caches()
260
missed_caches.append(cache)
262
whole_history_data = compute_whole_history_data(self._branch)
263
self._rev_info, self._rev_indices = whole_history_data
264
update_missed_caches()
266
if self._rev_indices is not None:
267
self._revno_revid = {}
268
for ((_, revid, _, revno_str, _), _, _) in self._rev_info:
269
self._revno_revid[revno_str] = revid
271
self._revno_revid = {}
272
self._rev_indices = {}
273
for ((seq, revid, _, revno_str, _), _, _) in self._rev_info:
274
self._rev_indices[revid] = seq
275
self._revno_revid[revno_str] = revid
277
def __init__(self, branch, whole_history_data_cache, file_cache=None,
278
revinfo_disk_cache=None, cache_key=None):
279
assert branch.is_locked(), (
280
"Can only construct a History object with a read-locked branch.")
281
if file_cache is not None:
282
self._file_change_cache = file_cache
283
file_cache.history = self
285
self._file_change_cache = None
194
self._change_cache = None
195
self._file_change_cache = None
197
self._lock = threading.RLock()
200
def from_branch(cls, branch, name=None):
286
203
self._branch = branch
287
self._inventory_cache = {}
288
self._branch_nick = self._branch.get_config().get_nickname()
289
self.log = logging.getLogger('loggerhead.%s' % self._branch_nick)
291
self.last_revid = branch.last_revision()
293
caches = [RevInfoMemoryCache(whole_history_data_cache)]
294
if revinfo_disk_cache:
295
caches.append(revinfo_disk_cache)
296
self._load_whole_history_data(caches, cache_key)
204
self._last_revid = self._branch.last_revision()
205
self._revision_graph = branch.repository.get_revision_graph(self._last_revid)
208
name = self._branch.nick
210
self.log = logging.getLogger('loggerhead.%s' % (name,))
212
self._full_history = []
213
self._revision_info = {}
214
self._revno_revid = {}
215
if bzrlib.revision.is_null(self._last_revid):
216
self._merge_sort = []
218
self._merge_sort = bzrlib.tsort.merge_sort(
219
self._revision_graph, self._last_revid, generate_revno=True)
220
for (seq, revid, merge_depth, revno, end_of_merge) in self._merge_sort:
221
self._full_history.append(revid)
222
revno_str = '.'.join(str(n) for n in revno)
223
self._revno_revid[revno_str] = revid
224
self._revision_info[revid] = (seq, revid, merge_depth, revno_str, end_of_merge)
226
self._where_merged = {}
227
for revid in self._revision_graph.keys():
228
if self._revision_info[revid][2] == 0:
230
for parent in self._revision_graph[revid]:
231
self._where_merged.setdefault(parent, set()).add(revid)
233
self.log.info('built revision graph cache: %r secs' % (time.time() - z,))
237
def from_folder(cls, path, name=None):
238
b = bzrlib.branch.Branch.open(path)
241
return cls.from_branch(b, name)
246
def out_of_date(self):
247
# the branch may have been upgraded on disk, in which case we're stale.
248
newly_opened = bzrlib.branch.Branch.open(self._branch.base)
249
if self._branch.__class__ is not \
250
newly_opened.__class__:
252
if self._branch.repository.__class__ is not \
253
newly_opened.repository.__class__:
255
return self._branch.last_revision() != self._last_revid
257
def use_cache(self, cache):
258
self._change_cache = cache
260
def use_file_cache(self, cache):
261
self._file_change_cache = cache
263
def use_search_index(self, index):
299
267
def has_revisions(self):
300
268
return not bzrlib.revision.is_null(self.last_revid)
272
# called when a new history object needs to be created, because the
273
# branch history has changed. we need to immediately close and stop
274
# using our caches, because a new history object will be created to
275
# replace us, using the same cache files.
276
# (may also be called during server shutdown.)
277
if self._change_cache is not None:
278
self._change_cache.close()
279
self._change_cache = None
280
if self._index is not None:
284
def flush_cache(self):
285
if self._change_cache is None:
287
self._change_cache.flush()
289
def check_rebuild(self):
290
if self._change_cache is not None:
291
self._change_cache.check_rebuild()
292
if self._index is not None:
293
self._index.check_rebuild()
295
last_revid = property(lambda self: self._last_revid, None, None)
302
298
def get_config(self):
303
299
return self._branch.get_config()
305
301
def get_revno(self, revid):
306
if revid not in self._rev_indices:
302
if revid not in self._revision_info:
309
seq = self._rev_indices[revid]
310
revno = self._rev_info[seq][0][3]
313
def get_revids_from(self, revid_list, start_revid):
315
Yield the mainline (wrt start_revid) revisions that merged each
318
if revid_list is None:
319
revid_list = [r[0][1] for r in self._rev_info]
320
revid_set = set(revid_list)
323
def introduced_revisions(revid):
325
seq = self._rev_indices[revid]
326
md = self._rev_info[seq][0][2]
328
while i < len(self._rev_info) and self._rev_info[i][0][2] > md:
329
r.add(self._rev_info[i][0][1])
333
if bzrlib.revision.is_null(revid):
335
if introduced_revisions(revid) & revid_set:
305
seq, revid, merge_depth, revno_str, end_of_merge = self._revision_info[revid]
308
def get_revision_history(self):
309
return self._full_history
311
def get_revids_from(self, revid_list, revid):
313
given a list of revision ids, yield revisions in graph order,
314
starting from revid. the list can be None if you just want to travel
315
across all revisions.
318
if (revid_list is None) or (revid in revid_list):
337
parents = self._rev_info[self._rev_indices[revid]][2]
320
if not self._revision_graph.has_key(revid):
322
parents = self._revision_graph[revid]
338
323
if len(parents) == 0:
340
325
revid = parents[0]
342
328
def get_short_revision_history_by_fileid(self, file_id):
329
# wow. is this really the only way we can get this list? by
330
# man-handling the weave store directly? :-0
343
331
# FIXME: would be awesome if we could get, for a folder, the list of
344
# revisions where items within that folder changed.i
346
# FIXME: Workaround for bzr versions prior to 1.6b3.
347
# Remove me eventually pretty please :)
348
w = self._branch.repository.weave_store.get_weave(
349
file_id, self._branch.repository.get_transaction())
350
w_revids = w.versions()
351
revids = [r for r in self._rev_indices if r in w_revids]
352
except AttributeError:
353
possible_keys = [(file_id, revid) for revid in self._rev_indices]
354
get_parent_map = self._branch.repository.texts.get_parent_map
355
# We chunk the requests as this works better with GraphIndex.
356
# See _filter_revisions_touching_file_id in bzrlib/log.py
357
# for more information.
360
for start in xrange(0, len(possible_keys), chunk_size):
361
next_keys = possible_keys[start:start + chunk_size]
362
revids += [k[1] for k in get_parent_map(next_keys)]
363
del possible_keys, next_keys
332
# revisions where items within that folder changed.
333
w = self._branch.repository.weave_store.get_weave(file_id, self._branch.repository.get_transaction())
334
w_revids = w.versions()
335
revids = [r for r in self._full_history if r in w_revids]
366
339
def get_revision_history_since(self, revid_list, date):
367
340
# if a user asks for revisions starting at 01-sep, they mean inclusive,
368
341
# so start at midnight on 02-sep.
369
342
date = date + datetime.timedelta(days=1)
370
# our revid list is sorted in REVERSE date order,
371
# so go thru some hoops here...
343
# our revid list is sorted in REVERSE date order, so go thru some hoops here...
372
344
revid_list.reverse()
373
index = bisect.bisect(_RevListToTimestamps(revid_list,
374
self._branch.repository),
345
index = bisect.bisect(_RevListToTimestamps(revid_list, self._branch.repository), date)
378
348
revid_list.reverse()
380
350
return revid_list[index:]
353
def get_revision_history_matching(self, revid_list, text):
354
self.log.debug('searching %d revisions for %r', len(revid_list), text)
356
# this is going to be painfully slow. :(
359
for revid in revid_list:
360
change = self.get_changes([ revid ])[0]
361
if text in change.comment.lower():
363
self.log.debug('searched %d revisions for %r in %r secs', len(revid_list), text, time.time() - z)
366
def get_revision_history_matching_indexed(self, revid_list, text):
367
self.log.debug('searching %d revisions for %r', len(revid_list), text)
369
if self._index is None:
370
return self.get_revision_history_matching(revid_list, text)
371
out = self._index.find(text, revid_list)
372
self.log.debug('searched %d revisions for %r in %r secs: %d results', len(revid_list), text, time.time() - z, len(out))
373
# put them in some coherent order :)
374
out = [r for r in self._full_history if r in out]
382
378
def get_search_revid_list(self, query, revid_list):
384
380
given a "quick-search" query, try a few obvious possible meanings:
386
382
- revision id or # ("128.1.3")
387
- date (US style "mm/dd/yy", earth style "dd-mm-yy", or \
388
iso style "yyyy-mm-dd")
383
- date (US style "mm/dd/yy", earth style "dd-mm-yy", or iso style "yyyy-mm-dd")
389
384
- comment text as a fallback
391
386
and return a revid list that matches.
580
571
revnol = revno.split(".")
581
572
revnos = ".".join(revnol[:-2])
582
573
revnolast = int(revnol[-1])
583
if revnos in d.keys():
574
if d.has_key(revnos):
585
576
if revnolast < m:
586
d[revnos] = (revnolast, revid)
577
d[revnos] = ( revnolast, revid )
588
d[revnos] = (revnolast, revid)
590
return [d[revnos][1] for revnos in d.keys()]
592
def add_branch_nicks(self, change):
579
d[revnos] = ( revnolast, revid )
581
return [ d[revnos][1] for revnos in d.keys() ]
583
def get_branch_nicks(self, changes):
594
given a 'change', fill in the branch nicks on all parents and merge
585
given a list of changes from L{get_changes}, fill in the branch nicks
586
on all parents and merge points.
597
588
fetch_set = set()
598
for p in change.parents:
599
fetch_set.add(p.revid)
600
for p in change.merge_points:
601
fetch_set.add(p.revid)
589
for change in changes:
590
for p in change.parents:
591
fetch_set.add(p.revid)
592
for p in change.merge_points:
593
fetch_set.add(p.revid)
602
594
p_changes = self.get_changes(list(fetch_set))
603
595
p_change_dict = dict([(c.revid, c) for c in p_changes])
604
for p in change.parents:
605
if p.revid in p_change_dict:
606
p.branch_nick = p_change_dict[p.revid].branch_nick
608
p.branch_nick = '(missing)'
609
for p in change.merge_points:
610
if p.revid in p_change_dict:
611
p.branch_nick = p_change_dict[p.revid].branch_nick
613
p.branch_nick = '(missing)'
596
for change in changes:
597
# arch-converted branches may not have merged branch info :(
598
for p in change.parents:
599
if p.revid in p_change_dict:
600
p.branch_nick = p_change_dict[p.revid].branch_nick
602
p.branch_nick = '(missing)'
603
for p in change.merge_points:
604
if p.revid in p_change_dict:
605
p.branch_nick = p_change_dict[p.revid].branch_nick
607
p.branch_nick = '(missing)'
615
610
def get_changes(self, revid_list):
616
611
"""Return a list of changes objects for the given revids.
618
613
Revisions not present and NULL_REVISION will be ignored.
620
changes = self.get_changes_uncached(revid_list)
615
if self._change_cache is None:
616
changes = self.get_changes_uncached(revid_list)
618
changes = self._change_cache.get_changes(revid_list)
621
619
if len(changes) == 0:
624
622
# some data needs to be recalculated each time, because it may
625
623
# change as new revisions are added.
626
624
for change in changes:
627
merge_revids = self.simplify_merge_point_list(
628
self.get_merge_point_list(change.revid))
629
change.merge_points = [
630
util.Container(revid=r,
631
revno=self.get_revno(r)) for r in merge_revids]
625
merge_revids = self.simplify_merge_point_list(self.get_merge_point_list(change.revid))
626
change.merge_points = [util.Container(revid=r, revno=self.get_revno(r)) for r in merge_revids]
632
627
if len(change.parents) > 0:
633
change.parents = [util.Container(revid=r,
634
revno=self.get_revno(r)) for r in change.parents]
628
if isinstance(change.parents[0], util.Container):
629
# old cache stored a potentially-bogus revno
630
change.parents = [util.Container(revid=p.revid, revno=self.get_revno(p.revid)) for p in change.parents]
632
change.parents = [util.Container(revid=r, revno=self.get_revno(r)) for r in change.parents]
635
633
change.revno = self.get_revno(change.revid)
712
765
path = '/' + path
713
766
return path, inv_entry.name, rev_tree.get_file_text(file_id)
715
def file_changes_for_revision_ids(self, old_revid, new_revid):
768
def _parse_diffs(self, old_tree, new_tree, delta):
770
Return a list of processed diffs, in the format::
779
type: str('context', 'delete', or 'insert'),
788
for old_path, new_path, fid, kind, text_modified, meta_modified in delta.renamed:
790
process.append((old_path, new_path, fid, kind))
791
for path, fid, kind, text_modified, meta_modified in delta.modified:
792
process.append((path, path, fid, kind))
794
for old_path, new_path, fid, kind in process:
795
old_lines = old_tree.get_file_lines(fid)
796
new_lines = new_tree.get_file_lines(fid)
798
if old_lines != new_lines:
800
bzrlib.diff.internal_diff(old_path, old_lines,
801
new_path, new_lines, buffer)
802
except bzrlib.errors.BinaryFile:
805
diff = buffer.getvalue()
808
out.append(util.Container(filename=rich_filename(new_path, kind), file_id=fid, chunks=self._process_diff(diff)))
812
def _process_diff(self, diff):
813
# doesn't really need to be a method; could be static.
816
for line in diff.splitlines():
819
if line.startswith('+++ ') or line.startswith('--- '):
821
if line.startswith('@@ '):
823
if chunk is not None:
825
chunk = util.Container()
827
lines = [int(x.split(',')[0][1:]) for x in line.split(' ')[1:3]]
828
old_lineno = lines[0]
829
new_lineno = lines[1]
830
elif line.startswith(' '):
831
chunk.diff.append(util.Container(old_lineno=old_lineno, new_lineno=new_lineno,
832
type='context', line=util.fixed_width(line[1:])))
835
elif line.startswith('+'):
836
chunk.diff.append(util.Container(old_lineno=None, new_lineno=new_lineno,
837
type='insert', line=util.fixed_width(line[1:])))
839
elif line.startswith('-'):
840
chunk.diff.append(util.Container(old_lineno=old_lineno, new_lineno=None,
841
type='delete', line=util.fixed_width(line[1:])))
844
chunk.diff.append(util.Container(old_lineno=None, new_lineno=None,
845
type='unknown', line=util.fixed_width(repr(line))))
846
if chunk is not None:
850
def parse_delta(self, delta):
717
852
Return a nested data structure containing the changes in a delta::
726
text_changes: list((filename, file_id)),
728
repo = self._branch.repository
729
if bzrlib.revision.is_null(old_revid) or \
730
bzrlib.revision.is_null(new_revid):
731
old_tree, new_tree = map(
732
repo.revision_tree, [old_revid, new_revid])
734
old_tree, new_tree = repo.revision_trees([old_revid, new_revid])
736
reporter = FileChangeReporter(old_tree.inventory, new_tree.inventory)
738
bzrlib.delta.report_changes(new_tree.iter_changes(old_tree), reporter)
740
return util.Container(
741
added=sorted(reporter.added, key=lambda x:x.filename),
742
renamed=sorted(reporter.renamed, key=lambda x:x.new_filename),
743
removed=sorted(reporter.removed, key=lambda x:x.filename),
744
modified=sorted(reporter.modified, key=lambda x:x.filename),
745
text_changes=sorted(reporter.text_changes, key=lambda x:x.filename))
867
for path, fid, kind in delta.added:
868
added.append((rich_filename(path, kind), fid))
870
for path, fid, kind, text_modified, meta_modified in delta.modified:
871
modified.append(util.Container(filename=rich_filename(path, kind), file_id=fid))
873
for old_path, new_path, fid, kind, text_modified, meta_modified in delta.renamed:
874
renamed.append((rich_filename(old_path, kind), rich_filename(new_path, kind), fid))
875
if meta_modified or text_modified:
876
modified.append(util.Container(filename=rich_filename(new_path, kind), file_id=fid))
878
for path, fid, kind in delta.removed:
879
removed.append((rich_filename(path, kind), fid))
881
return util.Container(added=added, renamed=renamed, removed=removed, modified=modified)
884
def add_side_by_side(changes):
885
# FIXME: this is a rotten API.
886
for change in changes:
887
for m in change.changes.modified:
888
m.sbs_chunks = _make_side_by_side(m.chunks)
891
def get_filelist(self, inv, file_id, sort_type=None):
893
return the list of all files (and their attributes) within a given
897
dir_ie = inv[file_id]
898
path = inv.id2path(file_id)
903
for filename, entry in dir_ie.children.iteritems():
904
revid_set.add(entry.revision)
907
for change in self.get_changes(list(revid_set)):
908
change_dict[change.revid] = change
910
for filename, entry in dir_ie.children.iteritems():
912
if entry.kind == 'directory':
915
revid = entry.revision
917
file = util.Container(
918
filename=filename, executable=entry.executable, kind=entry.kind,
919
pathname=pathname, file_id=entry.file_id, size=entry.text_size,
920
revid=revid, change=change_dict[revid])
921
file_list.append(file)
923
if sort_type == 'filename' or sort_type is None:
924
file_list.sort(key=lambda x: x.filename)
925
elif sort_type == 'size':
926
file_list.sort(key=lambda x: x.size)
927
elif sort_type == 'date':
928
file_list.sort(key=lambda x: x.change.date)
931
for file in file_list:
938
_BADCHARS_RE = re.compile(ur'[\x00-\x08\x0b\x0e-\x1f]')
941
def annotate_file(self, file_id, revid):
946
file_revid = self.get_inventory(revid)[file_id].revision
949
# because we cache revision metadata ourselves, it's actually much
950
# faster to call 'annotate_iter' on the weave directly than it is to
951
# ask bzrlib to annotate for us.
952
w = self._branch.repository.weave_store.get_weave(file_id, self._branch.repository.get_transaction())
955
for line_revid, text in w.annotate_iter(file_revid):
956
revid_set.add(line_revid)
957
if self._BADCHARS_RE.match(text):
958
# bail out; this isn't displayable text
959
yield util.Container(parity=0, lineno=1, status='same',
960
text='(This is a binary file.)',
961
change=util.Container())
963
change_cache = dict([(c.revid, c) for c in self.get_changes(list(revid_set))])
965
last_line_revid = None
966
for line_revid, text in w.annotate_iter(file_revid):
967
if line_revid == last_line_revid:
968
# remember which lines have a new revno and which don't
973
last_line_revid = line_revid
974
change = change_cache[line_revid]
975
trunc_revno = change.revno
976
if len(trunc_revno) > 10:
977
trunc_revno = trunc_revno[:9] + '...'
979
yield util.Container(parity=parity, lineno=lineno, status=status,
980
change=change, text=util.fixed_width(text))
983
self.log.debug('annotate: %r secs' % (time.time() - z,))
986
def get_bundle(self, revid, compare_revid=None):
987
if compare_revid is None:
988
parents = self._revision_graph[revid]
990
compare_revid = parents[0]
994
bzrlib.bundle.serializer.write_bundle(self._branch.repository, revid, compare_revid, s)