38
from bzrlib import tag
36
from StringIO import StringIO
38
from loggerhead import util
39
from loggerhead.util import decorator
39
42
import bzrlib.branch
43
import bzrlib.bundle.serializer
41
45
import bzrlib.errors
46
import bzrlib.progress
43
47
import bzrlib.revision
45
from loggerhead import search
46
from loggerhead import util
47
from loggerhead.wholehistory import compute_whole_history_data
52
with_branch_lock = util.with_lock('_lock', 'branch')
56
def with_bzrlib_read_lock(unbound):
57
def bzrlib_read_locked(self, *args, **kw):
58
#self.log.debug('-> %r bzr lock', id(threading.currentThread()))
59
self._branch.repository.lock_read()
61
return unbound(self, *args, **kw)
63
self._branch.repository.unlock()
64
#self.log.debug('<- %r bzr lock', id(threading.currentThread()))
65
return bzrlib_read_locked
68
# bzrlib's UIFactory is not thread-safe
69
uihack = threading.local()
71
class ThreadSafeUIFactory (bzrlib.ui.SilentUIFactory):
72
def nested_progress_bar(self):
73
if getattr(uihack, '_progress_bar_stack', None) is None:
74
uihack._progress_bar_stack = bzrlib.progress.ProgressBarStack(klass=bzrlib.progress.DummyProgress)
75
return uihack._progress_bar_stack.get_nested()
77
bzrlib.ui.ui_factory = ThreadSafeUIFactory()
80
def _process_side_by_side_buffers(line_list, delete_list, insert_list):
81
while len(delete_list) < len(insert_list):
82
delete_list.append((None, '', 'context'))
83
while len(insert_list) < len(delete_list):
84
insert_list.append((None, '', 'context'))
85
while len(delete_list) > 0:
86
d = delete_list.pop(0)
87
i = insert_list.pop(0)
88
line_list.append(util.Container(old_lineno=d[0], new_lineno=i[0],
89
old_line=d[1], new_line=i[1],
90
old_type=d[2], new_type=i[2]))
93
def _make_side_by_side(chunk_list):
95
turn a normal unified-style diff (post-processed by parse_delta) into a
96
side-by-side diff structure. the new structure is::
104
type: str('context' or 'changed'),
109
for chunk in chunk_list:
111
delete_list, insert_list = [], []
112
for line in chunk.diff:
113
if line.type == 'context':
114
if len(delete_list) or len(insert_list):
115
_process_side_by_side_buffers(line_list, delete_list, insert_list)
116
delete_list, insert_list = [], []
117
line_list.append(util.Container(old_lineno=line.old_lineno, new_lineno=line.new_lineno,
118
old_line=line.line, new_line=line.line,
119
old_type=line.type, new_type=line.type))
120
elif line.type == 'delete':
121
delete_list.append((line.old_lineno, line.line, line.type))
122
elif line.type == 'insert':
123
insert_list.append((line.new_lineno, line.line, line.type))
124
if len(delete_list) or len(insert_list):
125
_process_side_by_side_buffers(line_list, delete_list, insert_list)
126
out_chunk_list.append(util.Container(diff=line_list))
127
return out_chunk_list
50
130
def is_branch(folder):
101
183
def __getitem__(self, index):
102
184
"""Get the date of the index'd item"""
103
return datetime.datetime.fromtimestamp(self.repository.get_revision(
104
self.revid_list[index]).timestamp)
185
return datetime.datetime.fromtimestamp(self.repository.get_revision(self.revid_list[index]).timestamp)
106
187
def __len__(self):
107
188
return len(self.revid_list)
109
class FileChangeReporter(object):
111
def __init__(self, old_inv, new_inv):
116
self.text_changes = []
117
self.old_inv = old_inv
118
self.new_inv = new_inv
120
def revid(self, inv, file_id):
122
return inv[file_id].revision
123
except bzrlib.errors.NoSuchId:
126
def report(self, file_id, paths, versioned, renamed, modified,
128
if modified not in ('unchanged', 'kind changed'):
129
if versioned == 'removed':
130
filename = rich_filename(paths[0], kind[0])
132
filename = rich_filename(paths[1], kind[1])
133
self.text_changes.append(util.Container(
134
filename=filename, file_id=file_id,
135
old_revision=self.revid(self.old_inv, file_id),
136
new_revision=self.revid(self.new_inv, file_id)))
137
if versioned == 'added':
138
self.added.append(util.Container(
139
filename=rich_filename(paths[1], kind),
140
file_id=file_id, kind=kind[1]))
141
elif versioned == 'removed':
142
self.removed.append(util.Container(
143
filename=rich_filename(paths[0], kind),
144
file_id=file_id, kind=kind[0]))
146
self.renamed.append(util.Container(
147
old_filename=rich_filename(paths[0], kind[0]),
148
new_filename=rich_filename(paths[1], kind[1]),
150
text_modified=modified == 'modified'))
152
self.modified.append(util.Container(
153
filename=rich_filename(paths[1], kind),
156
# The lru_cache is not thread-safe, so we need a lock around it for
158
rev_info_memory_cache_lock = threading.RLock()
160
class RevInfoMemoryCache(object):
161
"""A store that validates values against the revids they were stored with.
163
We use a unique key for each branch.
165
The reason for not just using the revid as the key is so that when a new
166
value is provided for a branch, we replace the old value used for the
169
There is another implementation of the same interface in
170
loggerhead.changecache.RevInfoDiskCache.
173
def __init__(self, cache):
176
def get(self, key, revid):
177
"""Return the data associated with `key`, subject to a revid check.
179
If a value was stored under `key`, with the same revid, return it.
180
Otherwise return None.
182
rev_info_memory_cache_lock.acquire()
184
cached = self._cache.get(key)
186
rev_info_memory_cache_lock.release()
189
stored_revid, data = cached
190
if revid == stored_revid:
195
def set(self, key, revid, data):
196
"""Store `data` under `key`, to be checked against `revid` on get().
198
rev_info_memory_cache_lock.acquire()
200
self._cache[key] = (revid, data)
202
rev_info_memory_cache_lock.release()
204
# Used to store locks that prevent multiple threads from building a
205
# revision graph for the same branch at the same time, because that can
206
# cause severe performance issues that are so bad that the system seems
208
revision_graph_locks = {}
209
revision_graph_check_lock = threading.Lock()
211
class History(object):
212
"""Decorate a branch to provide information for rendering.
214
History objects are expected to be short lived -- when serving a request
215
for a particular branch, open it, read-lock it, wrap a History object
216
around it, serve the request, throw the History object away, unlock the
217
branch and throw it away.
219
:ivar _rev_info: A list of information about revisions. This is by far
220
the most cryptic data structure in loggerhead. At the top level, it
221
is a list of 3-tuples [(merge-info, where-merged, parents)].
222
`merge-info` is (seq, revid, merge_depth, revno_str, end_of_merge) --
223
like a merged sorted list, but the revno is stringified.
224
`where-merged` is a tuple of revisions that have this revision as a
225
non-lefthand parent. Finally, `parents` is just the usual list of
226
parents of this revision.
227
:ivar _rev_indices: A dictionary mapping each revision id to the index of
228
the information about it in _rev_info.
229
:ivar _revno_revid: A dictionary mapping stringified revnos to revision
233
def _load_whole_history_data(self, caches, cache_key):
234
"""Set the attributes relating to the whole history of the branch.
236
:param caches: a list of caches with interfaces like
237
`RevInfoMemoryCache` and be ordered from fastest to slowest.
238
:param cache_key: the key to use with the caches.
240
self._rev_indices = None
241
self._rev_info = None
244
def update_missed_caches():
245
for cache in missed_caches:
246
cache.set(cache_key, self.last_revid, self._rev_info)
248
# Theoretically, it's possible for two threads to race in creating
249
# the Lock() object for their branch, so we put a lock around
250
# creating the per-branch Lock().
251
revision_graph_check_lock.acquire()
253
if cache_key not in revision_graph_locks:
254
revision_graph_locks[cache_key] = threading.Lock()
256
revision_graph_check_lock.release()
258
revision_graph_locks[cache_key].acquire()
261
data = cache.get(cache_key, self.last_revid)
263
self._rev_info = data
264
update_missed_caches()
267
missed_caches.append(cache)
269
whole_history_data = compute_whole_history_data(self._branch)
270
self._rev_info, self._rev_indices = whole_history_data
271
update_missed_caches()
273
revision_graph_locks[cache_key].release()
275
if self._rev_indices is not None:
276
self._revno_revid = {}
277
for ((_, revid, _, revno_str, _), _, _) in self._rev_info:
278
self._revno_revid[revno_str] = revid
280
self._revno_revid = {}
281
self._rev_indices = {}
282
for ((seq, revid, _, revno_str, _), _, _) in self._rev_info:
283
self._rev_indices[revid] = seq
284
self._revno_revid[revno_str] = revid
286
def __init__(self, branch, whole_history_data_cache,
287
revinfo_disk_cache=None, cache_key=None):
288
assert branch.is_locked(), (
289
"Can only construct a History object with a read-locked branch.")
191
class History (object):
194
self._file_change_cache = None
195
self._lock = threading.RLock()
198
def from_branch(cls, branch, name=None):
290
201
self._branch = branch
291
self._branch_tags = None
292
self._inventory_cache = {}
293
self._branch_nick = self._branch.get_config().get_nickname()
294
self.log = logging.getLogger('loggerhead.%s' % (self._branch_nick,))
296
self.last_revid = branch.last_revision()
298
caches = [RevInfoMemoryCache(whole_history_data_cache)]
299
if revinfo_disk_cache:
300
caches.append(revinfo_disk_cache)
301
self._load_whole_history_data(caches, cache_key)
202
self._last_revid = self._branch.last_revision()
205
name = self._branch.nick
207
self.log = logging.getLogger('loggerhead.%s' % (name,))
209
graph = branch.repository.get_graph()
210
parent_map = dict(((key, value) for key, value in
211
graph.iter_ancestry([self._last_revid]) if value is not None))
213
self._revision_graph = self._strip_NULL_ghosts(parent_map)
214
self._full_history = []
215
self._revision_info = {}
216
self._revno_revid = {}
217
if bzrlib.revision.is_null(self._last_revid):
218
self._merge_sort = []
220
self._merge_sort = bzrlib.tsort.merge_sort(
221
self._revision_graph, self._last_revid, generate_revno=True)
223
for (seq, revid, merge_depth, revno, end_of_merge) in self._merge_sort:
224
self._full_history.append(revid)
225
revno_str = '.'.join(str(n) for n in revno)
226
self._revno_revid[revno_str] = revid
227
self._revision_info[revid] = (
228
seq, revid, merge_depth, revno_str, end_of_merge)
231
self._where_merged = {}
233
for revid in self._revision_graph.keys():
234
if self._revision_info[revid][2] == 0:
236
for parent in self._revision_graph[revid]:
237
self._where_merged.setdefault(parent, set()).add(revid)
239
self.log.info('built revision graph cache: %r secs' % (time.time() - z,))
243
def _strip_NULL_ghosts(revision_graph):
245
Copied over from bzrlib meant as a temporary workaround deprecated
249
# Filter ghosts, and null:
250
if bzrlib.revision.NULL_REVISION in revision_graph:
251
del revision_graph[bzrlib.revision.NULL_REVISION]
252
for key, parents in revision_graph.items():
253
revision_graph[key] = tuple(parent for parent in parents if parent
255
return revision_graph
258
def from_folder(cls, path, name=None):
259
b = bzrlib.branch.Branch.open(path)
262
return cls.from_branch(b, name)
267
def out_of_date(self):
268
# the branch may have been upgraded on disk, in which case we're stale.
269
newly_opened = bzrlib.branch.Branch.open(self._branch.base)
270
if self._branch.__class__ is not \
271
newly_opened.__class__:
273
if self._branch.repository.__class__ is not \
274
newly_opened.repository.__class__:
276
return self._branch.last_revision() != self._last_revid
278
def use_file_cache(self, cache):
279
self._file_change_cache = cache
304
282
def has_revisions(self):
305
283
return not bzrlib.revision.is_null(self.last_revid)
285
last_revid = property(lambda self: self._last_revid, None, None)
307
288
def get_config(self):
308
289
return self._branch.get_config()
310
292
def get_revno(self, revid):
311
if revid not in self._rev_indices:
293
if revid not in self._revision_info:
314
seq = self._rev_indices[revid]
315
revno = self._rev_info[seq][0][3]
296
seq, revid, merge_depth, revno_str, end_of_merge = self._revision_info[revid]
299
def get_revision_history(self):
300
return self._full_history
318
302
def get_revids_from(self, revid_list, start_revid):
321
305
revid in revid_list.
323
307
if revid_list is None:
324
# Just yield the mainline, starting at start_revid
326
is_null = bzrlib.revision.is_null
327
while not is_null(revid):
329
parents = self._rev_info[self._rev_indices[revid]][2]
308
revid_list = self._full_history
334
309
revid_set = set(revid_list)
335
310
revid = start_revid
337
311
def introduced_revisions(revid):
339
seq = self._rev_indices[revid]
340
md = self._rev_info[seq][0][2]
313
seq, revid, md, revno, end_of_merge = self._revision_info[revid]
342
while i < len(self._rev_info) and self._rev_info[i][0][2] > md:
343
r.add(self._rev_info[i][0][1])
315
while i < len(self._merge_sort) and self._merge_sort[i][2] > md:
316
r.add(self._merge_sort[i][1])
347
320
if bzrlib.revision.is_null(revid):
349
rev_introduced = introduced_revisions(revid)
350
matching = rev_introduced.intersection(revid_set)
352
# We don't need to look for these anymore.
353
revid_set.difference_update(matching)
322
if introduced_revisions(revid) & revid_set:
355
parents = self._rev_info[self._rev_indices[revid]][2]
324
parents = self._revision_graph[revid]
356
325
if len(parents) == 0:
358
327
revid = parents[0]
360
330
def get_short_revision_history_by_fileid(self, file_id):
331
# wow. is this really the only way we can get this list? by
332
# man-handling the weave store directly? :-0
361
333
# FIXME: would be awesome if we could get, for a folder, the list of
362
# revisions where items within that folder changed.i
363
possible_keys = [(file_id, revid) for revid in self._rev_indices]
364
get_parent_map = self._branch.repository.texts.get_parent_map
365
# We chunk the requests as this works better with GraphIndex.
366
# See _filter_revisions_touching_file_id in bzrlib/log.py
367
# for more information.
370
for start in xrange(0, len(possible_keys), chunk_size):
371
next_keys = possible_keys[start:start + chunk_size]
372
revids += [k[1] for k in get_parent_map(next_keys)]
373
del possible_keys, next_keys
334
# revisions where items within that folder changed.
335
w = self._branch.repository.weave_store.get_weave(file_id, self._branch.repository.get_transaction())
336
w_revids = w.versions()
337
revids = [r for r in self._full_history if r in w_revids]
376
341
def get_revision_history_since(self, revid_list, date):
377
342
# if a user asks for revisions starting at 01-sep, they mean inclusive,
378
343
# so start at midnight on 02-sep.
379
344
date = date + datetime.timedelta(days=1)
380
# our revid list is sorted in REVERSE date order,
381
# so go thru some hoops here...
345
# our revid list is sorted in REVERSE date order, so go thru some hoops here...
382
346
revid_list.reverse()
383
index = bisect.bisect(_RevListToTimestamps(revid_list,
384
self._branch.repository),
347
index = bisect.bisect(_RevListToTimestamps(revid_list, self._branch.repository), date)
388
350
revid_list.reverse()
390
352
return revid_list[index:]
392
355
def get_search_revid_list(self, query, revid_list):
394
357
given a "quick-search" query, try a few obvious possible meanings:
396
359
- revision id or # ("128.1.3")
397
- date (US style "mm/dd/yy", earth style "dd-mm-yy", or \
398
iso style "yyyy-mm-dd")
360
- date (US style "mm/dd/yy", earth style "dd-mm-yy", or iso style "yyyy-mm-dd")
399
361
- comment text as a fallback
401
363
and return a revid list that matches.
700
619
return [self._change_from_revision(rev) for rev in rev_list]
621
def _get_deltas_for_revisions_with_trees(self, revisions):
622
"""Produce a list of revision deltas.
624
Note that the input is a sequence of REVISIONS, not revision_ids.
625
Trees will be held in memory until the generator exits.
626
Each delta is relative to the revision's lefthand predecessor.
627
(This is copied from bzrlib.)
629
required_trees = set()
630
for revision in revisions:
631
required_trees.add(revision.revid)
632
required_trees.update([p.revid for p in revision.parents[:1]])
633
trees = dict((t.get_revision_id(), t) for
634
t in self._branch.repository.revision_trees(required_trees))
636
self._branch.repository.lock_read()
638
for revision in revisions:
639
if not revision.parents:
640
old_tree = self._branch.repository.revision_tree(
641
bzrlib.revision.NULL_REVISION)
643
old_tree = trees[revision.parents[0].revid]
644
tree = trees[revision.revid]
645
ret.append(tree.changes_from(old_tree))
648
self._branch.repository.unlock()
702
650
def _change_from_revision(self, revision):
704
652
Given a bzrlib Revision, return a processed "change" for use in
655
commit_time = datetime.datetime.fromtimestamp(revision.timestamp)
657
parents = [util.Container(revid=r, revno=self.get_revno(r)) for r in revision.parent_ids]
707
659
message, short_message = clean_message(revision.message)
709
if self._branch_tags is None:
710
self._branch_tags = self._branch.tags.get_reverse_tag_dict()
713
if revision.revision_id in self._branch_tags:
714
# tag.sort_* functions expect (tag, data) pairs, so we generate them,
715
# and then strip them
716
tags = [(t, None) for t in self._branch_tags[revision.revision_id]]
717
sort_func = getattr(tag, 'sort_natural', None)
718
if sort_func is None:
721
sort_func(self._branch, tags)
722
revtags = u', '.join([t[0] for t in tags])
725
662
'revid': revision.revision_id,
726
'date': datetime.datetime.fromtimestamp(revision.timestamp),
727
'utc_date': datetime.datetime.utcfromtimestamp(revision.timestamp),
728
'committer': revision.committer,
729
'authors': revision.get_apparent_authors(),
664
'author': revision.committer,
730
665
'branch_nick': revision.properties.get('branch-nick', None),
731
666
'short_comment': short_message,
732
667
'comment': revision.message,
733
668
'comment_clean': [util.html_clean(s) for s in message],
734
669
'parents': revision.parent_ids,
735
'bugs': [bug.split()[0] for bug in revision.properties.get('bugs', '').splitlines()],
738
if isinstance(revision, bzrlib.foreign.ForeignRevision):
739
foreign_revid, mapping = (
740
revision.foreign_revid, revision.mapping)
741
elif ":" in revision.revision_id:
743
foreign_revid, mapping = \
744
bzrlib.foreign.foreign_vcs_registry.parse_revision_id(
745
revision.revision_id)
746
except bzrlib.errors.InvalidRevisionId:
751
if foreign_revid is not None:
752
entry["foreign_vcs"] = mapping.vcs.abbreviation
753
entry["foreign_revid"] = mapping.vcs.show_foreign_revid(foreign_revid)
754
671
return util.Container(entry)
756
def get_file_changes(self, entry):
758
old_revid = entry.parents[0].revid
673
def get_file_changes_uncached(self, entries):
674
delta_list = self._get_deltas_for_revisions_with_trees(entries)
676
return [self.parse_delta(delta) for delta in delta_list]
679
def get_file_changes(self, entries):
680
if self._file_change_cache is None:
681
return self.get_file_changes_uncached(entries)
760
old_revid = bzrlib.revision.NULL_REVISION
761
return self.file_changes_for_revision_ids(old_revid, entry.revid)
763
def add_changes(self, entry):
764
changes = self.get_file_changes(entry)
765
entry.changes = changes
683
return self._file_change_cache.get_file_changes(entries)
685
def add_changes(self, entries):
686
changes_list = self.get_file_changes(entries)
688
for entry, changes in zip(entries, changes_list):
689
entry.changes = changes
692
def get_change_with_diff(self, revid, compare_revid=None):
693
change = self.get_changes([revid])[0]
695
if compare_revid is None:
697
compare_revid = change.parents[0].revid
699
compare_revid = 'null:'
701
rev_tree1 = self._branch.repository.revision_tree(compare_revid)
702
rev_tree2 = self._branch.repository.revision_tree(revid)
703
delta = rev_tree2.changes_from(rev_tree1)
705
change.changes = self.parse_delta(delta)
706
change.changes.modified = self._parse_diffs(rev_tree1, rev_tree2, delta)
767
711
def get_file(self, file_id, revid):
768
"""Returns (path, filename, file contents)"""
712
"returns (path, filename, data)"
769
713
inv = self.get_inventory(revid)
770
714
inv_entry = inv[file_id]
771
715
rev_tree = self._branch.repository.revision_tree(inv_entry.revision)
774
718
path = '/' + path
775
719
return path, inv_entry.name, rev_tree.get_file_text(file_id)
777
def file_changes_for_revision_ids(self, old_revid, new_revid):
721
def _parse_diffs(self, old_tree, new_tree, delta):
723
Return a list of processed diffs, in the format::
732
type: str('context', 'delete', or 'insert'),
741
for old_path, new_path, fid, kind, text_modified, meta_modified in delta.renamed:
743
process.append((old_path, new_path, fid, kind))
744
for path, fid, kind, text_modified, meta_modified in delta.modified:
745
process.append((path, path, fid, kind))
747
for old_path, new_path, fid, kind in process:
748
old_lines = old_tree.get_file_lines(fid)
749
new_lines = new_tree.get_file_lines(fid)
751
if old_lines != new_lines:
753
bzrlib.diff.internal_diff(old_path, old_lines,
754
new_path, new_lines, buffer)
755
except bzrlib.errors.BinaryFile:
758
diff = buffer.getvalue()
761
out.append(util.Container(filename=rich_filename(new_path, kind), file_id=fid, chunks=self._process_diff(diff), raw_diff=diff))
765
def _process_diff(self, diff):
766
# doesn't really need to be a method; could be static.
769
for line in diff.splitlines():
772
if line.startswith('+++ ') or line.startswith('--- '):
774
if line.startswith('@@ '):
776
if chunk is not None:
778
chunk = util.Container()
780
lines = [int(x.split(',')[0][1:]) for x in line.split(' ')[1:3]]
781
old_lineno = lines[0]
782
new_lineno = lines[1]
783
elif line.startswith(' '):
784
chunk.diff.append(util.Container(old_lineno=old_lineno, new_lineno=new_lineno,
785
type='context', line=util.fixed_width(line[1:])))
788
elif line.startswith('+'):
789
chunk.diff.append(util.Container(old_lineno=None, new_lineno=new_lineno,
790
type='insert', line=util.fixed_width(line[1:])))
792
elif line.startswith('-'):
793
chunk.diff.append(util.Container(old_lineno=old_lineno, new_lineno=None,
794
type='delete', line=util.fixed_width(line[1:])))
797
chunk.diff.append(util.Container(old_lineno=None, new_lineno=None,
798
type='unknown', line=util.fixed_width(repr(line))))
799
if chunk is not None:
803
def parse_delta(self, delta):
779
805
Return a nested data structure containing the changes in a delta::
788
text_changes: list((filename, file_id)),
790
repo = self._branch.repository
791
if (bzrlib.revision.is_null(old_revid) or
792
bzrlib.revision.is_null(new_revid)):
793
old_tree, new_tree = map(
794
repo.revision_tree, [old_revid, new_revid])
796
old_tree, new_tree = repo.revision_trees([old_revid, new_revid])
798
reporter = FileChangeReporter(old_tree.inventory, new_tree.inventory)
800
bzrlib.delta.report_changes(new_tree.iter_changes(old_tree), reporter)
802
return util.Container(
803
added=sorted(reporter.added, key=lambda x: x.filename),
804
renamed=sorted(reporter.renamed, key=lambda x: x.new_filename),
805
removed=sorted(reporter.removed, key=lambda x: x.filename),
806
modified=sorted(reporter.modified, key=lambda x: x.filename),
807
text_changes=sorted(reporter.text_changes,
808
key=lambda x: x.filename))
820
for path, fid, kind in delta.added:
821
added.append((rich_filename(path, kind), fid))
823
for path, fid, kind, text_modified, meta_modified in delta.modified:
824
modified.append(util.Container(filename=rich_filename(path, kind), file_id=fid))
826
for old_path, new_path, fid, kind, text_modified, meta_modified in delta.renamed:
827
renamed.append((rich_filename(old_path, kind), rich_filename(new_path, kind), fid))
828
if meta_modified or text_modified:
829
modified.append(util.Container(filename=rich_filename(new_path, kind), file_id=fid))
831
for path, fid, kind in delta.removed:
832
removed.append((rich_filename(path, kind), fid))
834
return util.Container(added=added, renamed=renamed, removed=removed, modified=modified)
837
def add_side_by_side(changes):
838
# FIXME: this is a rotten API.
839
for change in changes:
840
for m in change.changes.modified:
841
m.sbs_chunks = _make_side_by_side(m.chunks)
844
def get_filelist(self, inv, file_id, sort_type=None):
846
return the list of all files (and their attributes) within a given
850
dir_ie = inv[file_id]
851
path = inv.id2path(file_id)
856
for filename, entry in dir_ie.children.iteritems():
857
revid_set.add(entry.revision)
860
for change in self.get_changes(list(revid_set)):
861
change_dict[change.revid] = change
863
for filename, entry in dir_ie.children.iteritems():
865
if entry.kind == 'directory':
868
revid = entry.revision
870
file = util.Container(
871
filename=filename, executable=entry.executable, kind=entry.kind,
872
pathname=pathname, file_id=entry.file_id, size=entry.text_size,
873
revid=revid, change=change_dict[revid])
874
file_list.append(file)
876
if sort_type == 'filename' or sort_type is None:
877
file_list.sort(key=lambda x: x.filename)
878
elif sort_type == 'size':
879
file_list.sort(key=lambda x: x.size)
880
elif sort_type == 'date':
881
file_list.sort(key=lambda x: x.change.date)
884
for file in file_list:
891
_BADCHARS_RE = re.compile(ur'[\x00-\x08\x0b\x0e-\x1f]')
894
def annotate_file(self, file_id, revid):
899
file_revid = self.get_inventory(revid)[file_id].revision
901
tree = self._branch.repository.revision_tree(file_revid)
904
for line_revid, text in tree.annotate_iter(file_id):
905
revid_set.add(line_revid)
906
if self._BADCHARS_RE.match(text):
907
# bail out; this isn't displayable text
908
yield util.Container(parity=0, lineno=1, status='same',
909
text='(This is a binary file.)',
910
change=util.Container())
912
change_cache = dict([(c.revid, c) \
913
for c in self.get_changes(list(revid_set))])
915
last_line_revid = None
916
for line_revid, text in tree.annotate_iter(file_id):
917
if line_revid == last_line_revid:
918
# remember which lines have a new revno and which don't
923
last_line_revid = line_revid
924
change = change_cache[line_revid]
925
trunc_revno = change.revno
926
if len(trunc_revno) > 10:
927
trunc_revno = trunc_revno[:9] + '...'
929
yield util.Container(parity=parity, lineno=lineno, status=status,
930
change=change, text=util.fixed_width(text))
933
self.log.debug('annotate: %r secs' % (time.time() - z,))
936
def get_bundle(self, revid, compare_revid=None):
937
if compare_revid is None:
938
parents = self._revision_graph[revid]
940
compare_revid = parents[0]
944
bzrlib.bundle.serializer.write_bundle(self._branch.repository, revid, compare_revid, s)