1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
39
revision as _mod_revision,
45
from bzrlib.bundle import serializer
46
from bzrlib.revisiontree import RevisionTree
47
from bzrlib.store.versioned import VersionedFileStore
48
from bzrlib.testament import Testament
51
from bzrlib.decorators import needs_read_lock, needs_write_lock
52
from bzrlib.inter import InterObject
53
from bzrlib.inventory import (
59
from bzrlib import registry
60
from bzrlib.symbol_versioning import (
63
from bzrlib.trace import (
64
log_exception_quietly, note, mutter, mutter_callsite, warning)
67
# Old formats display a warning, but only once
68
_deprecation_warning_done = False
71
class CommitBuilder(object):
72
"""Provides an interface to build up a commit.
74
This allows describing a tree to be committed without needing to
75
know the internals of the format of the repository.
78
# all clients should supply tree roots.
79
record_root_entry = True
80
# the default CommitBuilder does not manage trees whose root is versioned.
81
_versioned_root = False
83
def __init__(self, repository, parents, config, timestamp=None,
84
timezone=None, committer=None, revprops=None,
86
"""Initiate a CommitBuilder.
88
:param repository: Repository to commit to.
89
:param parents: Revision ids of the parents of the new revision.
90
:param config: Configuration to use.
91
:param timestamp: Optional timestamp recorded for commit.
92
:param timezone: Optional timezone for timestamp.
93
:param committer: Optional committer to set for commit.
94
:param revprops: Optional dictionary of revision properties.
95
:param revision_id: Optional revision id.
100
self._committer = self._config.username()
102
self._committer = committer
104
self.new_inventory = Inventory(None)
105
self._new_revision_id = revision_id
106
self.parents = parents
107
self.repository = repository
110
if revprops is not None:
111
self._validate_revprops(revprops)
112
self._revprops.update(revprops)
114
if timestamp is None:
115
timestamp = time.time()
116
# Restrict resolution to 1ms
117
self._timestamp = round(timestamp, 3)
120
self._timezone = osutils.local_time_offset()
122
self._timezone = int(timezone)
124
self._generate_revision_if_needed()
125
self.__heads = graph.HeadsCache(repository.get_graph()).heads
126
self._basis_delta = []
127
# API compatibility, older code that used CommitBuilder did not call
128
# .record_delete(), which means the delta that is computed would not be
129
# valid. Callers that will call record_delete() should call
130
# .will_record_deletes() to indicate that.
131
self._recording_deletes = False
132
# memo'd check for no-op commits.
133
self._any_changes = False
135
def any_changes(self):
136
"""Return True if any entries were changed.
138
This includes merge-only changes. It is the core for the --unchanged
141
:return: True if any changes have occured.
143
return self._any_changes
145
def _validate_unicode_text(self, text, context):
146
"""Verify things like commit messages don't have bogus characters."""
148
raise ValueError('Invalid value for %s: %r' % (context, text))
150
def _validate_revprops(self, revprops):
151
for key, value in revprops.iteritems():
152
# We know that the XML serializers do not round trip '\r'
153
# correctly, so refuse to accept them
154
if not isinstance(value, basestring):
155
raise ValueError('revision property (%s) is not a valid'
156
' (unicode) string: %r' % (key, value))
157
self._validate_unicode_text(value,
158
'revision property (%s)' % (key,))
160
def commit(self, message):
161
"""Make the actual commit.
163
:return: The revision id of the recorded revision.
165
self._validate_unicode_text(message, 'commit message')
166
rev = _mod_revision.Revision(
167
timestamp=self._timestamp,
168
timezone=self._timezone,
169
committer=self._committer,
171
inventory_sha1=self.inv_sha1,
172
revision_id=self._new_revision_id,
173
properties=self._revprops)
174
rev.parent_ids = self.parents
175
self.repository.add_revision(self._new_revision_id, rev,
176
self.new_inventory, self._config)
177
self.repository.commit_write_group()
178
return self._new_revision_id
181
"""Abort the commit that is being built.
183
self.repository.abort_write_group()
185
def revision_tree(self):
186
"""Return the tree that was just committed.
188
After calling commit() this can be called to get a RevisionTree
189
representing the newly committed tree. This is preferred to
190
calling Repository.revision_tree() because that may require
191
deserializing the inventory, while we already have a copy in
194
if self.new_inventory is None:
195
self.new_inventory = self.repository.get_inventory(
196
self._new_revision_id)
197
return RevisionTree(self.repository, self.new_inventory,
198
self._new_revision_id)
200
def finish_inventory(self):
201
"""Tell the builder that the inventory is finished.
203
:return: The inventory id in the repository, which can be used with
204
repository.get_inventory.
206
if self.new_inventory is None:
207
# an inventory delta was accumulated without creating a new
209
basis_id = self.basis_delta_revision
210
self.inv_sha1 = self.repository.add_inventory_by_delta(
211
basis_id, self._basis_delta, self._new_revision_id,
214
if self.new_inventory.root is None:
215
raise AssertionError('Root entry should be supplied to'
216
' record_entry_contents, as of bzr 0.10.')
217
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
218
self.new_inventory.revision_id = self._new_revision_id
219
self.inv_sha1 = self.repository.add_inventory(
220
self._new_revision_id,
224
return self._new_revision_id
226
def _gen_revision_id(self):
227
"""Return new revision-id."""
228
return generate_ids.gen_revision_id(self._config.username(),
231
def _generate_revision_if_needed(self):
232
"""Create a revision id if None was supplied.
234
If the repository can not support user-specified revision ids
235
they should override this function and raise CannotSetRevisionId
236
if _new_revision_id is not None.
238
:raises: CannotSetRevisionId
240
if self._new_revision_id is None:
241
self._new_revision_id = self._gen_revision_id()
242
self.random_revid = True
244
self.random_revid = False
246
def _heads(self, file_id, revision_ids):
247
"""Calculate the graph heads for revision_ids in the graph of file_id.
249
This can use either a per-file graph or a global revision graph as we
250
have an identity relationship between the two graphs.
252
return self.__heads(revision_ids)
254
def _check_root(self, ie, parent_invs, tree):
255
"""Helper for record_entry_contents.
257
:param ie: An entry being added.
258
:param parent_invs: The inventories of the parent revisions of the
260
:param tree: The tree that is being committed.
262
# In this revision format, root entries have no knit or weave When
263
# serializing out to disk and back in root.revision is always
265
ie.revision = self._new_revision_id
267
def _require_root_change(self, tree):
268
"""Enforce an appropriate root object change.
270
This is called once when record_iter_changes is called, if and only if
271
the root was not in the delta calculated by record_iter_changes.
273
:param tree: The tree which is being committed.
275
# NB: if there are no parents then this method is not called, so no
276
# need to guard on parents having length.
277
entry = entry_factory['directory'](tree.path2id(''), '',
279
entry.revision = self._new_revision_id
280
self._basis_delta.append(('', '', entry.file_id, entry))
282
def _get_delta(self, ie, basis_inv, path):
283
"""Get a delta against the basis inventory for ie."""
284
if ie.file_id not in basis_inv:
286
result = (None, path, ie.file_id, ie)
287
self._basis_delta.append(result)
289
elif ie != basis_inv[ie.file_id]:
291
# TODO: avoid tis id2path call.
292
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
293
self._basis_delta.append(result)
299
def get_basis_delta(self):
300
"""Return the complete inventory delta versus the basis inventory.
302
This has been built up with the calls to record_delete and
303
record_entry_contents. The client must have already called
304
will_record_deletes() to indicate that they will be generating a
307
:return: An inventory delta, suitable for use with apply_delta, or
308
Repository.add_inventory_by_delta, etc.
310
if not self._recording_deletes:
311
raise AssertionError("recording deletes not activated.")
312
return self._basis_delta
314
def record_delete(self, path, file_id):
315
"""Record that a delete occured against a basis tree.
317
This is an optional API - when used it adds items to the basis_delta
318
being accumulated by the commit builder. It cannot be called unless the
319
method will_record_deletes() has been called to inform the builder that
320
a delta is being supplied.
322
:param path: The path of the thing deleted.
323
:param file_id: The file id that was deleted.
325
if not self._recording_deletes:
326
raise AssertionError("recording deletes not activated.")
327
delta = (path, None, file_id, None)
328
self._basis_delta.append(delta)
329
self._any_changes = True
332
def will_record_deletes(self):
333
"""Tell the commit builder that deletes are being notified.
335
This enables the accumulation of an inventory delta; for the resulting
336
commit to be valid, deletes against the basis MUST be recorded via
337
builder.record_delete().
339
self._recording_deletes = True
341
basis_id = self.parents[0]
343
basis_id = _mod_revision.NULL_REVISION
344
self.basis_delta_revision = basis_id
346
def record_entry_contents(self, ie, parent_invs, path, tree,
348
"""Record the content of ie from tree into the commit if needed.
350
Side effect: sets ie.revision when unchanged
352
:param ie: An inventory entry present in the commit.
353
:param parent_invs: The inventories of the parent revisions of the
355
:param path: The path the entry is at in the tree.
356
:param tree: The tree which contains this entry and should be used to
358
:param content_summary: Summary data from the tree about the paths
359
content - stat, length, exec, sha/link target. This is only
360
accessed when the entry has a revision of None - that is when it is
361
a candidate to commit.
362
:return: A tuple (change_delta, version_recorded, fs_hash).
363
change_delta is an inventory_delta change for this entry against
364
the basis tree of the commit, or None if no change occured against
366
version_recorded is True if a new version of the entry has been
367
recorded. For instance, committing a merge where a file was only
368
changed on the other side will return (delta, False).
369
fs_hash is either None, or the hash details for the path (currently
370
a tuple of the contents sha1 and the statvalue returned by
371
tree.get_file_with_stat()).
373
if self.new_inventory.root is None:
374
if ie.parent_id is not None:
375
raise errors.RootMissing()
376
self._check_root(ie, parent_invs, tree)
377
if ie.revision is None:
378
kind = content_summary[0]
380
# ie is carried over from a prior commit
382
# XXX: repository specific check for nested tree support goes here - if
383
# the repo doesn't want nested trees we skip it ?
384
if (kind == 'tree-reference' and
385
not self.repository._format.supports_tree_reference):
386
# mismatch between commit builder logic and repository:
387
# this needs the entry creation pushed down into the builder.
388
raise NotImplementedError('Missing repository subtree support.')
389
self.new_inventory.add(ie)
391
# TODO: slow, take it out of the inner loop.
393
basis_inv = parent_invs[0]
395
basis_inv = Inventory(root_id=None)
397
# ie.revision is always None if the InventoryEntry is considered
398
# for committing. We may record the previous parents revision if the
399
# content is actually unchanged against a sole head.
400
if ie.revision is not None:
401
if not self._versioned_root and path == '':
402
# repositories that do not version the root set the root's
403
# revision to the new commit even when no change occurs (more
404
# specifically, they do not record a revision on the root; and
405
# the rev id is assigned to the root during deserialisation -
406
# this masks when a change may have occurred against the basis.
407
# To match this we always issue a delta, because the revision
408
# of the root will always be changing.
409
if ie.file_id in basis_inv:
410
delta = (basis_inv.id2path(ie.file_id), path,
414
delta = (None, path, ie.file_id, ie)
415
self._basis_delta.append(delta)
416
return delta, False, None
418
# we don't need to commit this, because the caller already
419
# determined that an existing revision of this file is
420
# appropriate. If its not being considered for committing then
421
# it and all its parents to the root must be unaltered so
422
# no-change against the basis.
423
if ie.revision == self._new_revision_id:
424
raise AssertionError("Impossible situation, a skipped "
425
"inventory entry (%r) claims to be modified in this "
426
"commit (%r).", (ie, self._new_revision_id))
427
return None, False, None
428
# XXX: Friction: parent_candidates should return a list not a dict
429
# so that we don't have to walk the inventories again.
430
parent_candiate_entries = ie.parent_candidates(parent_invs)
431
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
433
for inv in parent_invs:
434
if ie.file_id in inv:
435
old_rev = inv[ie.file_id].revision
436
if old_rev in head_set:
437
heads.append(inv[ie.file_id].revision)
438
head_set.remove(inv[ie.file_id].revision)
441
# now we check to see if we need to write a new record to the
443
# We write a new entry unless there is one head to the ancestors, and
444
# the kind-derived content is unchanged.
446
# Cheapest check first: no ancestors, or more the one head in the
447
# ancestors, we write a new node.
451
# There is a single head, look it up for comparison
452
parent_entry = parent_candiate_entries[heads[0]]
453
# if the non-content specific data has changed, we'll be writing a
455
if (parent_entry.parent_id != ie.parent_id or
456
parent_entry.name != ie.name):
458
# now we need to do content specific checks:
460
# if the kind changed the content obviously has
461
if kind != parent_entry.kind:
463
# Stat cache fingerprint feedback for the caller - None as we usually
464
# don't generate one.
467
if content_summary[2] is None:
468
raise ValueError("Files must not have executable = None")
470
if (# if the file length changed we have to store:
471
parent_entry.text_size != content_summary[1] or
472
# if the exec bit has changed we have to store:
473
parent_entry.executable != content_summary[2]):
475
elif parent_entry.text_sha1 == content_summary[3]:
476
# all meta and content is unchanged (using a hash cache
477
# hit to check the sha)
478
ie.revision = parent_entry.revision
479
ie.text_size = parent_entry.text_size
480
ie.text_sha1 = parent_entry.text_sha1
481
ie.executable = parent_entry.executable
482
return self._get_delta(ie, basis_inv, path), False, None
484
# Either there is only a hash change(no hash cache entry,
485
# or same size content change), or there is no change on
487
# Provide the parent's hash to the store layer, so that the
488
# content is unchanged we will not store a new node.
489
nostore_sha = parent_entry.text_sha1
491
# We want to record a new node regardless of the presence or
492
# absence of a content change in the file.
494
ie.executable = content_summary[2]
495
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
497
text = file_obj.read()
501
ie.text_sha1, ie.text_size = self._add_text_to_weave(
502
ie.file_id, text, heads, nostore_sha)
503
# Let the caller know we generated a stat fingerprint.
504
fingerprint = (ie.text_sha1, stat_value)
505
except errors.ExistingContent:
506
# Turns out that the file content was unchanged, and we were
507
# only going to store a new node if it was changed. Carry over
509
ie.revision = parent_entry.revision
510
ie.text_size = parent_entry.text_size
511
ie.text_sha1 = parent_entry.text_sha1
512
ie.executable = parent_entry.executable
513
return self._get_delta(ie, basis_inv, path), False, None
514
elif kind == 'directory':
516
# all data is meta here, nothing specific to directory, so
518
ie.revision = parent_entry.revision
519
return self._get_delta(ie, basis_inv, path), False, None
520
self._add_text_to_weave(ie.file_id, '', heads, None)
521
elif kind == 'symlink':
522
current_link_target = content_summary[3]
524
# symlink target is not generic metadata, check if it has
526
if current_link_target != parent_entry.symlink_target:
529
# unchanged, carry over.
530
ie.revision = parent_entry.revision
531
ie.symlink_target = parent_entry.symlink_target
532
return self._get_delta(ie, basis_inv, path), False, None
533
ie.symlink_target = current_link_target
534
self._add_text_to_weave(ie.file_id, '', heads, None)
535
elif kind == 'tree-reference':
537
if content_summary[3] != parent_entry.reference_revision:
540
# unchanged, carry over.
541
ie.reference_revision = parent_entry.reference_revision
542
ie.revision = parent_entry.revision
543
return self._get_delta(ie, basis_inv, path), False, None
544
ie.reference_revision = content_summary[3]
545
self._add_text_to_weave(ie.file_id, '', heads, None)
547
raise NotImplementedError('unknown kind')
548
ie.revision = self._new_revision_id
549
self._any_changes = True
550
return self._get_delta(ie, basis_inv, path), True, fingerprint
552
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
553
_entry_factory=entry_factory):
554
"""Record a new tree via iter_changes.
556
:param tree: The tree to obtain text contents from for changed objects.
557
:param basis_revision_id: The revision id of the tree the iter_changes
558
has been generated against. Currently assumed to be the same
559
as self.parents[0] - if it is not, errors may occur.
560
:param iter_changes: An iter_changes iterator with the changes to apply
561
to basis_revision_id. The iterator must not include any items with
562
a current kind of None - missing items must be either filtered out
563
or errored-on beefore record_iter_changes sees the item.
564
:param _entry_factory: Private method to bind entry_factory locally for
566
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
569
# Create an inventory delta based on deltas between all the parents and
570
# deltas between all the parent inventories. We use inventory delta's
571
# between the inventory objects because iter_changes masks
572
# last-changed-field only changes.
574
# file_id -> change map, change is fileid, paths, changed, versioneds,
575
# parents, names, kinds, executables
577
# {file_id -> revision_id -> inventory entry, for entries in parent
578
# trees that are not parents[0]
582
revtrees = list(self.repository.revision_trees(self.parents))
583
except errors.NoSuchRevision:
584
# one or more ghosts, slow path.
586
for revision_id in self.parents:
588
revtrees.append(self.repository.revision_tree(revision_id))
589
except errors.NoSuchRevision:
591
basis_revision_id = _mod_revision.NULL_REVISION
593
revtrees.append(self.repository.revision_tree(
594
_mod_revision.NULL_REVISION))
595
# The basis inventory from a repository
597
basis_inv = revtrees[0].inventory
599
basis_inv = self.repository.revision_tree(
600
_mod_revision.NULL_REVISION).inventory
601
if len(self.parents) > 0:
602
if basis_revision_id != self.parents[0] and not ghost_basis:
604
"arbitrary basis parents not yet supported with merges")
605
for revtree in revtrees[1:]:
606
for change in revtree.inventory._make_delta(basis_inv):
607
if change[1] is None:
608
# Not present in this parent.
610
if change[2] not in merged_ids:
611
if change[0] is not None:
612
basis_entry = basis_inv[change[2]]
613
merged_ids[change[2]] = [
615
basis_entry.revision,
618
parent_entries[change[2]] = {
620
basis_entry.revision:basis_entry,
622
change[3].revision:change[3],
625
merged_ids[change[2]] = [change[3].revision]
626
parent_entries[change[2]] = {change[3].revision:change[3]}
628
merged_ids[change[2]].append(change[3].revision)
629
parent_entries[change[2]][change[3].revision] = change[3]
632
# Setup the changes from the tree:
633
# changes maps file_id -> (change, [parent revision_ids])
635
for change in iter_changes:
636
# This probably looks up in basis_inv way to much.
637
if change[1][0] is not None:
638
head_candidate = [basis_inv[change[0]].revision]
641
changes[change[0]] = change, merged_ids.get(change[0],
643
unchanged_merged = set(merged_ids) - set(changes)
644
# Extend the changes dict with synthetic changes to record merges of
646
for file_id in unchanged_merged:
647
# Record a merged version of these items that did not change vs the
648
# basis. This can be either identical parallel changes, or a revert
649
# of a specific file after a merge. The recorded content will be
650
# that of the current tree (which is the same as the basis), but
651
# the per-file graph will reflect a merge.
652
# NB:XXX: We are reconstructing path information we had, this
653
# should be preserved instead.
654
# inv delta change: (file_id, (path_in_source, path_in_target),
655
# changed_content, versioned, parent, name, kind,
658
basis_entry = basis_inv[file_id]
659
except errors.NoSuchId:
660
# a change from basis->some_parents but file_id isn't in basis
661
# so was new in the merge, which means it must have changed
662
# from basis -> current, and as it hasn't the add was reverted
663
# by the user. So we discard this change.
667
(basis_inv.id2path(file_id), tree.id2path(file_id)),
669
(basis_entry.parent_id, basis_entry.parent_id),
670
(basis_entry.name, basis_entry.name),
671
(basis_entry.kind, basis_entry.kind),
672
(basis_entry.executable, basis_entry.executable))
673
changes[file_id] = (change, merged_ids[file_id])
674
# changes contains tuples with the change and a set of inventory
675
# candidates for the file.
677
# old_path, new_path, file_id, new_inventory_entry
678
seen_root = False # Is the root in the basis delta?
679
inv_delta = self._basis_delta
680
modified_rev = self._new_revision_id
681
for change, head_candidates in changes.values():
682
if change[3][1]: # versioned in target.
683
# Several things may be happening here:
684
# We may have a fork in the per-file graph
685
# - record a change with the content from tree
686
# We may have a change against < all trees
687
# - carry over the tree that hasn't changed
688
# We may have a change against all trees
689
# - record the change with the content from tree
692
entry = _entry_factory[kind](file_id, change[5][1],
694
head_set = self._heads(change[0], set(head_candidates))
697
for head_candidate in head_candidates:
698
if head_candidate in head_set:
699
heads.append(head_candidate)
700
head_set.remove(head_candidate)
703
# Could be a carry-over situation:
704
parent_entry_revs = parent_entries.get(file_id, None)
705
if parent_entry_revs:
706
parent_entry = parent_entry_revs.get(heads[0], None)
709
if parent_entry is None:
710
# The parent iter_changes was called against is the one
711
# that is the per-file head, so any change is relevant
712
# iter_changes is valid.
713
carry_over_possible = False
715
# could be a carry over situation
716
# A change against the basis may just indicate a merge,
717
# we need to check the content against the source of the
718
# merge to determine if it was changed after the merge
720
if (parent_entry.kind != entry.kind or
721
parent_entry.parent_id != entry.parent_id or
722
parent_entry.name != entry.name):
723
# Metadata common to all entries has changed
724
# against per-file parent
725
carry_over_possible = False
727
carry_over_possible = True
728
# per-type checks for changes against the parent_entry
731
# Cannot be a carry-over situation
732
carry_over_possible = False
733
# Populate the entry in the delta
735
# XXX: There is still a small race here: If someone reverts the content of a file
736
# after iter_changes examines and decides it has changed,
737
# we will unconditionally record a new version even if some
738
# other process reverts it while commit is running (with
739
# the revert happening after iter_changes did it's
742
entry.executable = True
744
entry.executable = False
745
if (carry_over_possible and
746
parent_entry.executable == entry.executable):
747
# Check the file length, content hash after reading
749
nostore_sha = parent_entry.text_sha1
752
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
754
text = file_obj.read()
758
entry.text_sha1, entry.text_size = self._add_text_to_weave(
759
file_id, text, heads, nostore_sha)
760
yield file_id, change[1][1], (entry.text_sha1, stat_value)
761
except errors.ExistingContent:
762
# No content change against a carry_over parent
763
# Perhaps this should also yield a fs hash update?
765
entry.text_size = parent_entry.text_size
766
entry.text_sha1 = parent_entry.text_sha1
767
elif kind == 'symlink':
769
entry.symlink_target = tree.get_symlink_target(file_id)
770
if (carry_over_possible and
771
parent_entry.symlink_target == entry.symlink_target):
774
self._add_text_to_weave(change[0], '', heads, None)
775
elif kind == 'directory':
776
if carry_over_possible:
779
# Nothing to set on the entry.
780
# XXX: split into the Root and nonRoot versions.
781
if change[1][1] != '' or self.repository.supports_rich_root():
782
self._add_text_to_weave(change[0], '', heads, None)
783
elif kind == 'tree-reference':
784
if not self.repository._format.supports_tree_reference:
785
# This isn't quite sane as an error, but we shouldn't
786
# ever see this code path in practice: tree's don't
787
# permit references when the repo doesn't support tree
789
raise errors.UnsupportedOperation(tree.add_reference,
791
entry.reference_revision = \
792
tree.get_reference_revision(change[0])
793
if (carry_over_possible and
794
parent_entry.reference_revision == reference_revision):
797
self._add_text_to_weave(change[0], '', heads, None)
799
raise AssertionError('unknown kind %r' % kind)
801
entry.revision = modified_rev
803
entry.revision = parent_entry.revision
806
new_path = change[1][1]
807
inv_delta.append((change[1][0], new_path, change[0], entry))
810
self.new_inventory = None
812
self._any_changes = True
814
# housekeeping root entry changes do not affect no-change commits.
815
self._require_root_change(tree)
816
self.basis_delta_revision = basis_revision_id
818
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
819
parent_keys = tuple([(file_id, parent) for parent in parents])
820
return self.repository.texts._add_text(
821
(file_id, self._new_revision_id), parent_keys, new_text,
822
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
825
class RootCommitBuilder(CommitBuilder):
826
"""This commitbuilder actually records the root id"""
828
# the root entry gets versioned properly by this builder.
829
_versioned_root = True
831
def _check_root(self, ie, parent_invs, tree):
832
"""Helper for record_entry_contents.
834
:param ie: An entry being added.
835
:param parent_invs: The inventories of the parent revisions of the
837
:param tree: The tree that is being committed.
840
def _require_root_change(self, tree):
841
"""Enforce an appropriate root object change.
843
This is called once when record_iter_changes is called, if and only if
844
the root was not in the delta calculated by record_iter_changes.
846
:param tree: The tree which is being committed.
848
# versioned roots do not change unless the tree found a change.
851
######################################################################
854
class Repository(object):
855
"""Repository holding history for one or more branches.
857
The repository holds and retrieves historical information including
858
revisions and file history. It's normally accessed only by the Branch,
859
which views a particular line of development through that history.
861
The Repository builds on top of some byte storage facilies (the revisions,
862
signatures, inventories, texts and chk_bytes attributes) and a Transport,
863
which respectively provide byte storage and a means to access the (possibly
866
The byte storage facilities are addressed via tuples, which we refer to
867
as 'keys' throughout the code base. Revision_keys, inventory_keys and
868
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
869
(file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
870
byte string made up of a hash identifier and a hash value.
871
We use this interface because it allows low friction with the underlying
872
code that implements disk indices, network encoding and other parts of
875
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
876
the serialised revisions for the repository. This can be used to obtain
877
revision graph information or to access raw serialised revisions.
878
The result of trying to insert data into the repository via this store
879
is undefined: it should be considered read-only except for implementors
881
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
882
the serialised signatures for the repository. This can be used to
883
obtain access to raw serialised signatures. The result of trying to
884
insert data into the repository via this store is undefined: it should
885
be considered read-only except for implementors of repositories.
886
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
887
the serialised inventories for the repository. This can be used to
888
obtain unserialised inventories. The result of trying to insert data
889
into the repository via this store is undefined: it should be
890
considered read-only except for implementors of repositories.
891
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
892
texts of files and directories for the repository. This can be used to
893
obtain file texts or file graphs. Note that Repository.iter_file_bytes
894
is usually a better interface for accessing file texts.
895
The result of trying to insert data into the repository via this store
896
is undefined: it should be considered read-only except for implementors
898
:ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
899
any data the repository chooses to store or have indexed by its hash.
900
The result of trying to insert data into the repository via this store
901
is undefined: it should be considered read-only except for implementors
903
:ivar _transport: Transport for file access to repository, typically
904
pointing to .bzr/repository.
907
# What class to use for a CommitBuilder. Often its simpler to change this
908
# in a Repository class subclass rather than to override
909
# get_commit_builder.
910
_commit_builder_class = CommitBuilder
911
# The search regex used by xml based repositories to determine what things
912
# where changed in a single commit.
913
_file_ids_altered_regex = lazy_regex.lazy_compile(
914
r'file_id="(?P<file_id>[^"]+)"'
915
r'.* revision="(?P<revision_id>[^"]+)"'
918
def abort_write_group(self, suppress_errors=False):
919
"""Commit the contents accrued within the current write group.
921
:param suppress_errors: if true, abort_write_group will catch and log
922
unexpected errors that happen during the abort, rather than
923
allowing them to propagate. Defaults to False.
925
:seealso: start_write_group.
927
if self._write_group is not self.get_transaction():
928
# has an unlock or relock occured ?
929
raise errors.BzrError(
930
'mismatched lock context and write group. %r, %r' %
931
(self._write_group, self.get_transaction()))
933
self._abort_write_group()
934
except Exception, exc:
935
self._write_group = None
936
if not suppress_errors:
938
mutter('abort_write_group failed')
939
log_exception_quietly()
940
note('bzr: ERROR (ignored): %s', exc)
941
self._write_group = None
943
def _abort_write_group(self):
944
"""Template method for per-repository write group cleanup.
946
This is called during abort before the write group is considered to be
947
finished and should cleanup any internal state accrued during the write
948
group. There is no requirement that data handed to the repository be
949
*not* made available - this is not a rollback - but neither should any
950
attempt be made to ensure that data added is fully commited. Abort is
951
invoked when an error has occured so futher disk or network operations
952
may not be possible or may error and if possible should not be
956
def add_fallback_repository(self, repository):
957
"""Add a repository to use for looking up data not held locally.
959
:param repository: A repository.
961
if not self._format.supports_external_lookups:
962
raise errors.UnstackableRepositoryFormat(self._format, self.base)
964
# This repository will call fallback.unlock() when we transition to
965
# the unlocked state, so we make sure to increment the lock count
966
repository.lock_read()
967
self._check_fallback_repository(repository)
968
self._fallback_repositories.append(repository)
969
self.texts.add_fallback_versioned_files(repository.texts)
970
self.inventories.add_fallback_versioned_files(repository.inventories)
971
self.revisions.add_fallback_versioned_files(repository.revisions)
972
self.signatures.add_fallback_versioned_files(repository.signatures)
973
if self.chk_bytes is not None:
974
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
976
def _check_fallback_repository(self, repository):
977
"""Check that this repository can fallback to repository safely.
979
Raise an error if not.
981
:param repository: A repository to fallback to.
983
return InterRepository._assert_same_model(self, repository)
985
def add_inventory(self, revision_id, inv, parents):
986
"""Add the inventory inv to the repository as revision_id.
988
:param parents: The revision ids of the parents that revision_id
989
is known to have and are in the repository already.
991
:returns: The validator(which is a sha1 digest, though what is sha'd is
992
repository format specific) of the serialized inventory.
994
if not self.is_in_write_group():
995
raise AssertionError("%r not in write group" % (self,))
996
_mod_revision.check_not_reserved_id(revision_id)
997
if not (inv.revision_id is None or inv.revision_id == revision_id):
998
raise AssertionError(
999
"Mismatch between inventory revision"
1000
" id and insertion revid (%r, %r)"
1001
% (inv.revision_id, revision_id))
1002
if inv.root is None:
1003
raise AssertionError()
1004
return self._add_inventory_checked(revision_id, inv, parents)
1006
def _add_inventory_checked(self, revision_id, inv, parents):
1007
"""Add inv to the repository after checking the inputs.
1009
This function can be overridden to allow different inventory styles.
1011
:seealso: add_inventory, for the contract.
1013
inv_lines = self._serialise_inventory_to_lines(inv)
1014
return self._inventory_add_lines(revision_id, parents,
1015
inv_lines, check_content=False)
1017
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1018
parents, basis_inv=None, propagate_caches=False):
1019
"""Add a new inventory expressed as a delta against another revision.
1021
:param basis_revision_id: The inventory id the delta was created
1022
against. (This does not have to be a direct parent.)
1023
:param delta: The inventory delta (see Inventory.apply_delta for
1025
:param new_revision_id: The revision id that the inventory is being
1027
:param parents: The revision ids of the parents that revision_id is
1028
known to have and are in the repository already. These are supplied
1029
for repositories that depend on the inventory graph for revision
1030
graph access, as well as for those that pun ancestry with delta
1032
:param basis_inv: The basis inventory if it is already known,
1034
:param propagate_caches: If True, the caches for this inventory are
1035
copied to and updated for the result if possible.
1037
:returns: (validator, new_inv)
1038
The validator(which is a sha1 digest, though what is sha'd is
1039
repository format specific) of the serialized inventory, and the
1040
resulting inventory.
1042
if not self.is_in_write_group():
1043
raise AssertionError("%r not in write group" % (self,))
1044
_mod_revision.check_not_reserved_id(new_revision_id)
1045
basis_tree = self.revision_tree(basis_revision_id)
1046
basis_tree.lock_read()
1048
# Note that this mutates the inventory of basis_tree, which not all
1049
# inventory implementations may support: A better idiom would be to
1050
# return a new inventory, but as there is no revision tree cache in
1051
# repository this is safe for now - RBC 20081013
1052
if basis_inv is None:
1053
basis_inv = basis_tree.inventory
1054
basis_inv.apply_delta(delta)
1055
basis_inv.revision_id = new_revision_id
1056
return (self.add_inventory(new_revision_id, basis_inv, parents),
1061
def _inventory_add_lines(self, revision_id, parents, lines,
1062
check_content=True):
1063
"""Store lines in inv_vf and return the sha1 of the inventory."""
1064
parents = [(parent,) for parent in parents]
1065
return self.inventories.add_lines((revision_id,), parents, lines,
1066
check_content=check_content)[0]
1068
def add_revision(self, revision_id, rev, inv=None, config=None):
1069
"""Add rev to the revision store as revision_id.
1071
:param revision_id: the revision id to use.
1072
:param rev: The revision object.
1073
:param inv: The inventory for the revision. if None, it will be looked
1074
up in the inventory storer
1075
:param config: If None no digital signature will be created.
1076
If supplied its signature_needed method will be used
1077
to determine if a signature should be made.
1079
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1081
_mod_revision.check_not_reserved_id(revision_id)
1082
if config is not None and config.signature_needed():
1084
inv = self.get_inventory(revision_id)
1085
plaintext = Testament(rev, inv).as_short_text()
1086
self.store_revision_signature(
1087
gpg.GPGStrategy(config), plaintext, revision_id)
1088
# check inventory present
1089
if not self.inventories.get_parent_map([(revision_id,)]):
1091
raise errors.WeaveRevisionNotPresent(revision_id,
1094
# yes, this is not suitable for adding with ghosts.
1095
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1098
key = (revision_id,)
1099
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1100
self._add_revision(rev)
1102
def _add_revision(self, revision):
1103
text = self._serializer.write_revision_to_string(revision)
1104
key = (revision.revision_id,)
1105
parents = tuple((parent,) for parent in revision.parent_ids)
1106
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1108
def all_revision_ids(self):
1109
"""Returns a list of all the revision ids in the repository.
1111
This is conceptually deprecated because code should generally work on
1112
the graph reachable from a particular revision, and ignore any other
1113
revisions that might be present. There is no direct replacement
1116
if 'evil' in debug.debug_flags:
1117
mutter_callsite(2, "all_revision_ids is linear with history.")
1118
return self._all_revision_ids()
1120
def _all_revision_ids(self):
1121
"""Returns a list of all the revision ids in the repository.
1123
These are in as much topological order as the underlying store can
1126
raise NotImplementedError(self._all_revision_ids)
1128
def break_lock(self):
1129
"""Break a lock if one is present from another instance.
1131
Uses the ui factory to ask for confirmation if the lock may be from
1134
self.control_files.break_lock()
1137
def _eliminate_revisions_not_present(self, revision_ids):
1138
"""Check every revision id in revision_ids to see if we have it.
1140
Returns a set of the present revisions.
1143
graph = self.get_graph()
1144
parent_map = graph.get_parent_map(revision_ids)
1145
# The old API returned a list, should this actually be a set?
1146
return parent_map.keys()
1149
def create(a_bzrdir):
1150
"""Construct the current default format repository in a_bzrdir."""
1151
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
1153
def __init__(self, _format, a_bzrdir, control_files):
1154
"""instantiate a Repository.
1156
:param _format: The format of the repository on disk.
1157
:param a_bzrdir: The BzrDir of the repository.
1159
In the future we will have a single api for all stores for
1160
getting file texts, inventories and revisions, then
1161
this construct will accept instances of those things.
1163
super(Repository, self).__init__()
1164
self._format = _format
1165
# the following are part of the public API for Repository:
1166
self.bzrdir = a_bzrdir
1167
self.control_files = control_files
1168
self._transport = control_files._transport
1169
self.base = self._transport.base
1171
self._reconcile_does_inventory_gc = True
1172
self._reconcile_fixes_text_parents = False
1173
self._reconcile_backsup_inventory = True
1174
# not right yet - should be more semantically clear ?
1176
# TODO: make sure to construct the right store classes, etc, depending
1177
# on whether escaping is required.
1178
self._warn_if_deprecated()
1179
self._write_group = None
1180
# Additional places to query for data.
1181
self._fallback_repositories = []
1182
# An InventoryEntry cache, used during deserialization
1183
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1186
return '%s(%r)' % (self.__class__.__name__,
1189
def has_same_location(self, other):
1190
"""Returns a boolean indicating if this repository is at the same
1191
location as another repository.
1193
This might return False even when two repository objects are accessing
1194
the same physical repository via different URLs.
1196
if self.__class__ is not other.__class__:
1198
return (self._transport.base == other._transport.base)
1200
def is_in_write_group(self):
1201
"""Return True if there is an open write group.
1203
:seealso: start_write_group.
1205
return self._write_group is not None
1207
def is_locked(self):
1208
return self.control_files.is_locked()
1210
def is_write_locked(self):
1211
"""Return True if this object is write locked."""
1212
return self.is_locked() and self.control_files._lock_mode == 'w'
1214
def lock_write(self, token=None):
1215
"""Lock this repository for writing.
1217
This causes caching within the repository obejct to start accumlating
1218
data during reads, and allows a 'write_group' to be obtained. Write
1219
groups must be used for actual data insertion.
1221
:param token: if this is already locked, then lock_write will fail
1222
unless the token matches the existing lock.
1223
:returns: a token if this instance supports tokens, otherwise None.
1224
:raises TokenLockingNotSupported: when a token is given but this
1225
instance doesn't support using token locks.
1226
:raises MismatchedToken: if the specified token doesn't match the token
1227
of the existing lock.
1228
:seealso: start_write_group.
1230
A token should be passed in if you know that you have locked the object
1231
some other way, and need to synchronise this object's state with that
1234
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1236
locked = self.is_locked()
1237
result = self.control_files.lock_write(token=token)
1239
for repo in self._fallback_repositories:
1240
# Writes don't affect fallback repos
1242
self._refresh_data()
1245
def lock_read(self):
1246
locked = self.is_locked()
1247
self.control_files.lock_read()
1249
for repo in self._fallback_repositories:
1251
self._refresh_data()
1253
def get_physical_lock_status(self):
1254
return self.control_files.get_physical_lock_status()
1256
def leave_lock_in_place(self):
1257
"""Tell this repository not to release the physical lock when this
1260
If lock_write doesn't return a token, then this method is not supported.
1262
self.control_files.leave_in_place()
1264
def dont_leave_lock_in_place(self):
1265
"""Tell this repository to release the physical lock when this
1266
object is unlocked, even if it didn't originally acquire it.
1268
If lock_write doesn't return a token, then this method is not supported.
1270
self.control_files.dont_leave_in_place()
1273
def gather_stats(self, revid=None, committers=None):
1274
"""Gather statistics from a revision id.
1276
:param revid: The revision id to gather statistics from, if None, then
1277
no revision specific statistics are gathered.
1278
:param committers: Optional parameter controlling whether to grab
1279
a count of committers from the revision specific statistics.
1280
:return: A dictionary of statistics. Currently this contains:
1281
committers: The number of committers if requested.
1282
firstrev: A tuple with timestamp, timezone for the penultimate left
1283
most ancestor of revid, if revid is not the NULL_REVISION.
1284
latestrev: A tuple with timestamp, timezone for revid, if revid is
1285
not the NULL_REVISION.
1286
revisions: The total revision count in the repository.
1287
size: An estimate disk size of the repository in bytes.
1290
if revid and committers:
1291
result['committers'] = 0
1292
if revid and revid != _mod_revision.NULL_REVISION:
1294
all_committers = set()
1295
revisions = self.get_ancestry(revid)
1296
# pop the leading None
1298
first_revision = None
1300
# ignore the revisions in the middle - just grab first and last
1301
revisions = revisions[0], revisions[-1]
1302
for revision in self.get_revisions(revisions):
1303
if not first_revision:
1304
first_revision = revision
1306
all_committers.add(revision.committer)
1307
last_revision = revision
1309
result['committers'] = len(all_committers)
1310
result['firstrev'] = (first_revision.timestamp,
1311
first_revision.timezone)
1312
result['latestrev'] = (last_revision.timestamp,
1313
last_revision.timezone)
1315
# now gather global repository information
1316
# XXX: This is available for many repos regardless of listability.
1317
if self.bzrdir.root_transport.listable():
1318
# XXX: do we want to __define len__() ?
1319
# Maybe the versionedfiles object should provide a different
1320
# method to get the number of keys.
1321
result['revisions'] = len(self.revisions.keys())
1322
# result['size'] = t
1325
def find_branches(self, using=False):
1326
"""Find branches underneath this repository.
1328
This will include branches inside other branches.
1330
:param using: If True, list only branches using this repository.
1332
if using and not self.is_shared():
1334
return [self.bzrdir.open_branch()]
1335
except errors.NotBranchError:
1337
class Evaluator(object):
1340
self.first_call = True
1342
def __call__(self, bzrdir):
1343
# On the first call, the parameter is always the bzrdir
1344
# containing the current repo.
1345
if not self.first_call:
1347
repository = bzrdir.open_repository()
1348
except errors.NoRepositoryPresent:
1351
return False, (None, repository)
1352
self.first_call = False
1354
value = (bzrdir.open_branch(), None)
1355
except errors.NotBranchError:
1356
value = (None, None)
1360
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1361
self.bzrdir.root_transport, evaluate=Evaluator()):
1362
if branch is not None:
1363
branches.append(branch)
1364
if not using and repository is not None:
1365
branches.extend(repository.find_branches())
1369
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1370
"""Return the revision ids that other has that this does not.
1372
These are returned in topological order.
1374
revision_id: only return revision ids included by revision_id.
1376
return InterRepository.get(other, self).search_missing_revision_ids(
1377
revision_id, find_ghosts)
1381
"""Open the repository rooted at base.
1383
For instance, if the repository is at URL/.bzr/repository,
1384
Repository.open(URL) -> a Repository instance.
1386
control = bzrdir.BzrDir.open(base)
1387
return control.open_repository()
1389
def copy_content_into(self, destination, revision_id=None):
1390
"""Make a complete copy of the content in self into destination.
1392
This is a destructive operation! Do not use it on existing
1395
return InterRepository.get(self, destination).copy_content(revision_id)
1397
def commit_write_group(self):
1398
"""Commit the contents accrued within the current write group.
1400
:seealso: start_write_group.
1402
if self._write_group is not self.get_transaction():
1403
# has an unlock or relock occured ?
1404
raise errors.BzrError('mismatched lock context %r and '
1406
(self.get_transaction(), self._write_group))
1407
self._commit_write_group()
1408
self._write_group = None
1410
def _commit_write_group(self):
1411
"""Template method for per-repository write group cleanup.
1413
This is called before the write group is considered to be
1414
finished and should ensure that all data handed to the repository
1415
for writing during the write group is safely committed (to the
1416
extent possible considering file system caching etc).
1419
def suspend_write_group(self):
1420
raise errors.UnsuspendableWriteGroup(self)
1422
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1423
"""Return the keys of missing inventory parents for revisions added in
1426
A revision is not complete if the inventory delta for that revision
1427
cannot be calculated. Therefore if the parent inventories of a
1428
revision are not present, the revision is incomplete, and e.g. cannot
1429
be streamed by a smart server. This method finds missing inventory
1430
parents for revisions added in this write group.
1432
if not self._format.supports_external_lookups:
1433
# This is only an issue for stacked repositories
1435
if not self.is_in_write_group():
1436
raise AssertionError('not in a write group')
1438
# XXX: We assume that every added revision already has its
1439
# corresponding inventory, so we only check for parent inventories that
1440
# might be missing, rather than all inventories.
1441
parents = set(self.revisions._index.get_missing_parents())
1442
parents.discard(_mod_revision.NULL_REVISION)
1443
unstacked_inventories = self.inventories._index
1444
present_inventories = unstacked_inventories.get_parent_map(
1445
key[-1:] for key in parents)
1446
parents.difference_update(present_inventories)
1447
if len(parents) == 0:
1448
# No missing parent inventories.
1450
if not check_for_missing_texts:
1451
return set(('inventories', rev_id) for (rev_id,) in parents)
1452
# Ok, now we have a list of missing inventories. But these only matter
1453
# if the inventories that reference them are missing some texts they
1454
# appear to introduce.
1455
# XXX: Texts referenced by all added inventories need to be present,
1456
# but at the moment we're only checking for texts referenced by
1457
# inventories at the graph's edge.
1458
key_deps = self.revisions._index._key_dependencies
1459
key_deps.add_keys(present_inventories)
1460
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1461
file_ids = self.fileids_altered_by_revision_ids(referrers)
1462
missing_texts = set()
1463
for file_id, version_ids in file_ids.iteritems():
1464
missing_texts.update(
1465
(file_id, version_id) for version_id in version_ids)
1466
present_texts = self.texts.get_parent_map(missing_texts)
1467
missing_texts.difference_update(present_texts)
1468
if not missing_texts:
1469
# No texts are missing, so all revisions and their deltas are
1472
# Alternatively the text versions could be returned as the missing
1473
# keys, but this is likely to be less data.
1474
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1477
def refresh_data(self):
1478
"""Re-read any data needed to to synchronise with disk.
1480
This method is intended to be called after another repository instance
1481
(such as one used by a smart server) has inserted data into the
1482
repository. It may not be called during a write group, but may be
1483
called at any other time.
1485
if self.is_in_write_group():
1486
raise errors.InternalBzrError(
1487
"May not refresh_data while in a write group.")
1488
self._refresh_data()
1490
def resume_write_group(self, tokens):
1491
if not self.is_write_locked():
1492
raise errors.NotWriteLocked(self)
1493
if self._write_group:
1494
raise errors.BzrError('already in a write group')
1495
self._resume_write_group(tokens)
1496
# so we can detect unlock/relock - the write group is now entered.
1497
self._write_group = self.get_transaction()
1499
def _resume_write_group(self, tokens):
1500
raise errors.UnsuspendableWriteGroup(self)
1502
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
1504
"""Fetch the content required to construct revision_id from source.
1506
If revision_id is None and fetch_spec is None, then all content is
1509
fetch() may not be used when the repository is in a write group -
1510
either finish the current write group before using fetch, or use
1511
fetch before starting the write group.
1513
:param find_ghosts: Find and copy revisions in the source that are
1514
ghosts in the target (and not reachable directly by walking out to
1515
the first-present revision in target from revision_id).
1516
:param revision_id: If specified, all the content needed for this
1517
revision ID will be copied to the target. Fetch will determine for
1518
itself which content needs to be copied.
1519
:param fetch_spec: If specified, a SearchResult or
1520
PendingAncestryResult that describes which revisions to copy. This
1521
allows copying multiple heads at once. Mutually exclusive with
1524
if fetch_spec is not None and revision_id is not None:
1525
raise AssertionError(
1526
"fetch_spec and revision_id are mutually exclusive.")
1527
if self.is_in_write_group():
1528
raise errors.InternalBzrError(
1529
"May not fetch while in a write group.")
1530
# fast path same-url fetch operations
1531
if self.has_same_location(source) and fetch_spec is None:
1532
# check that last_revision is in 'from' and then return a
1534
if (revision_id is not None and
1535
not _mod_revision.is_null(revision_id)):
1536
self.get_revision(revision_id)
1538
# if there is no specific appropriate InterRepository, this will get
1539
# the InterRepository base class, which raises an
1540
# IncompatibleRepositories when asked to fetch.
1541
inter = InterRepository.get(source, self)
1542
return inter.fetch(revision_id=revision_id, pb=pb,
1543
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1545
def create_bundle(self, target, base, fileobj, format=None):
1546
return serializer.write_bundle(self, target, base, fileobj, format)
1548
def get_commit_builder(self, branch, parents, config, timestamp=None,
1549
timezone=None, committer=None, revprops=None,
1551
"""Obtain a CommitBuilder for this repository.
1553
:param branch: Branch to commit to.
1554
:param parents: Revision ids of the parents of the new revision.
1555
:param config: Configuration to use.
1556
:param timestamp: Optional timestamp recorded for commit.
1557
:param timezone: Optional timezone for timestamp.
1558
:param committer: Optional committer to set for commit.
1559
:param revprops: Optional dictionary of revision properties.
1560
:param revision_id: Optional revision id.
1562
result = self._commit_builder_class(self, parents, config,
1563
timestamp, timezone, committer, revprops, revision_id)
1564
self.start_write_group()
1568
if (self.control_files._lock_count == 1 and
1569
self.control_files._lock_mode == 'w'):
1570
if self._write_group is not None:
1571
self.abort_write_group()
1572
self.control_files.unlock()
1573
raise errors.BzrError(
1574
'Must end write groups before releasing write locks.')
1575
self.control_files.unlock()
1576
if self.control_files._lock_count == 0:
1577
self._inventory_entry_cache.clear()
1578
for repo in self._fallback_repositories:
1582
def clone(self, a_bzrdir, revision_id=None):
1583
"""Clone this repository into a_bzrdir using the current format.
1585
Currently no check is made that the format of this repository and
1586
the bzrdir format are compatible. FIXME RBC 20060201.
1588
:return: The newly created destination repository.
1590
# TODO: deprecate after 0.16; cloning this with all its settings is
1591
# probably not very useful -- mbp 20070423
1592
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1593
self.copy_content_into(dest_repo, revision_id)
1596
def start_write_group(self):
1597
"""Start a write group in the repository.
1599
Write groups are used by repositories which do not have a 1:1 mapping
1600
between file ids and backend store to manage the insertion of data from
1601
both fetch and commit operations.
1603
A write lock is required around the start_write_group/commit_write_group
1604
for the support of lock-requiring repository formats.
1606
One can only insert data into a repository inside a write group.
1610
if not self.is_write_locked():
1611
raise errors.NotWriteLocked(self)
1612
if self._write_group:
1613
raise errors.BzrError('already in a write group')
1614
self._start_write_group()
1615
# so we can detect unlock/relock - the write group is now entered.
1616
self._write_group = self.get_transaction()
1618
def _start_write_group(self):
1619
"""Template method for per-repository write group startup.
1621
This is called before the write group is considered to be
1626
def sprout(self, to_bzrdir, revision_id=None):
1627
"""Create a descendent repository for new development.
1629
Unlike clone, this does not copy the settings of the repository.
1631
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1632
dest_repo.fetch(self, revision_id=revision_id)
1635
def _create_sprouting_repo(self, a_bzrdir, shared):
1636
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1637
# use target default format.
1638
dest_repo = a_bzrdir.create_repository()
1640
# Most control formats need the repository to be specifically
1641
# created, but on some old all-in-one formats it's not needed
1643
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1644
except errors.UninitializableFormat:
1645
dest_repo = a_bzrdir.open_repository()
1648
def _get_sink(self):
1649
"""Return a sink for streaming into this repository."""
1650
return StreamSink(self)
1652
def _get_source(self, to_format):
1653
"""Return a source for streaming from this repository."""
1654
return StreamSource(self, to_format)
1657
def has_revision(self, revision_id):
1658
"""True if this repository has a copy of the revision."""
1659
return revision_id in self.has_revisions((revision_id,))
1662
def has_revisions(self, revision_ids):
1663
"""Probe to find out the presence of multiple revisions.
1665
:param revision_ids: An iterable of revision_ids.
1666
:return: A set of the revision_ids that were present.
1668
parent_map = self.revisions.get_parent_map(
1669
[(rev_id,) for rev_id in revision_ids])
1671
if _mod_revision.NULL_REVISION in revision_ids:
1672
result.add(_mod_revision.NULL_REVISION)
1673
result.update([key[0] for key in parent_map])
1677
def get_revision(self, revision_id):
1678
"""Return the Revision object for a named revision."""
1679
return self.get_revisions([revision_id])[0]
1682
def get_revision_reconcile(self, revision_id):
1683
"""'reconcile' helper routine that allows access to a revision always.
1685
This variant of get_revision does not cross check the weave graph
1686
against the revision one as get_revision does: but it should only
1687
be used by reconcile, or reconcile-alike commands that are correcting
1688
or testing the revision graph.
1690
return self._get_revisions([revision_id])[0]
1693
def get_revisions(self, revision_ids):
1694
"""Get many revisions at once."""
1695
return self._get_revisions(revision_ids)
1698
def _get_revisions(self, revision_ids):
1699
"""Core work logic to get many revisions without sanity checks."""
1700
for rev_id in revision_ids:
1701
if not rev_id or not isinstance(rev_id, basestring):
1702
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1703
keys = [(key,) for key in revision_ids]
1704
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1706
for record in stream:
1707
if record.storage_kind == 'absent':
1708
raise errors.NoSuchRevision(self, record.key[0])
1709
text = record.get_bytes_as('fulltext')
1710
rev = self._serializer.read_revision_from_string(text)
1711
revs[record.key[0]] = rev
1712
return [revs[revid] for revid in revision_ids]
1715
def get_revision_xml(self, revision_id):
1716
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1717
# would have already do it.
1718
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1719
# TODO: this can't just be replaced by:
1720
# return self._serializer.write_revision_to_string(
1721
# self.get_revision(revision_id))
1722
# as cStringIO preservers the encoding unlike write_revision_to_string
1723
# or some other call down the path.
1724
rev = self.get_revision(revision_id)
1725
rev_tmp = cStringIO.StringIO()
1726
# the current serializer..
1727
self._serializer.write_revision(rev, rev_tmp)
1729
return rev_tmp.getvalue()
1731
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1732
"""Produce a generator of revision deltas.
1734
Note that the input is a sequence of REVISIONS, not revision_ids.
1735
Trees will be held in memory until the generator exits.
1736
Each delta is relative to the revision's lefthand predecessor.
1738
:param specific_fileids: if not None, the result is filtered
1739
so that only those file-ids, their parents and their
1740
children are included.
1742
# Get the revision-ids of interest
1743
required_trees = set()
1744
for revision in revisions:
1745
required_trees.add(revision.revision_id)
1746
required_trees.update(revision.parent_ids[:1])
1748
# Get the matching filtered trees. Note that it's more
1749
# efficient to pass filtered trees to changes_from() rather
1750
# than doing the filtering afterwards. changes_from() could
1751
# arguably do the filtering itself but it's path-based, not
1752
# file-id based, so filtering before or afterwards is
1754
if specific_fileids is None:
1755
trees = dict((t.get_revision_id(), t) for
1756
t in self.revision_trees(required_trees))
1758
trees = dict((t.get_revision_id(), t) for
1759
t in self._filtered_revision_trees(required_trees,
1762
# Calculate the deltas
1763
for revision in revisions:
1764
if not revision.parent_ids:
1765
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1767
old_tree = trees[revision.parent_ids[0]]
1768
yield trees[revision.revision_id].changes_from(old_tree)
1771
def get_revision_delta(self, revision_id, specific_fileids=None):
1772
"""Return the delta for one revision.
1774
The delta is relative to the left-hand predecessor of the
1777
:param specific_fileids: if not None, the result is filtered
1778
so that only those file-ids, their parents and their
1779
children are included.
1781
r = self.get_revision(revision_id)
1782
return list(self.get_deltas_for_revisions([r],
1783
specific_fileids=specific_fileids))[0]
1786
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1787
signature = gpg_strategy.sign(plaintext)
1788
self.add_signature_text(revision_id, signature)
1791
def add_signature_text(self, revision_id, signature):
1792
self.signatures.add_lines((revision_id,), (),
1793
osutils.split_lines(signature))
1795
def find_text_key_references(self):
1796
"""Find the text key references within the repository.
1798
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1799
to whether they were referred to by the inventory of the
1800
revision_id that they contain. The inventory texts from all present
1801
revision ids are assessed to generate this report.
1803
revision_keys = self.revisions.keys()
1804
w = self.inventories
1805
pb = ui.ui_factory.nested_progress_bar()
1807
return self._find_text_key_references_from_xml_inventory_lines(
1808
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1812
def _find_text_key_references_from_xml_inventory_lines(self,
1814
"""Core routine for extracting references to texts from inventories.
1816
This performs the translation of xml lines to revision ids.
1818
:param line_iterator: An iterator of lines, origin_version_id
1819
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1820
to whether they were referred to by the inventory of the
1821
revision_id that they contain. Note that if that revision_id was
1822
not part of the line_iterator's output then False will be given -
1823
even though it may actually refer to that key.
1825
if not self._serializer.support_altered_by_hack:
1826
raise AssertionError(
1827
"_find_text_key_references_from_xml_inventory_lines only "
1828
"supported for branches which store inventory as unnested xml"
1829
", not on %r" % self)
1832
# this code needs to read every new line in every inventory for the
1833
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1834
# not present in one of those inventories is unnecessary but not
1835
# harmful because we are filtering by the revision id marker in the
1836
# inventory lines : we only select file ids altered in one of those
1837
# revisions. We don't need to see all lines in the inventory because
1838
# only those added in an inventory in rev X can contain a revision=X
1840
unescape_revid_cache = {}
1841
unescape_fileid_cache = {}
1843
# jam 20061218 In a big fetch, this handles hundreds of thousands
1844
# of lines, so it has had a lot of inlining and optimizing done.
1845
# Sorry that it is a little bit messy.
1846
# Move several functions to be local variables, since this is a long
1848
search = self._file_ids_altered_regex.search
1849
unescape = _unescape_xml
1850
setdefault = result.setdefault
1851
for line, line_key in line_iterator:
1852
match = search(line)
1855
# One call to match.group() returning multiple items is quite a
1856
# bit faster than 2 calls to match.group() each returning 1
1857
file_id, revision_id = match.group('file_id', 'revision_id')
1859
# Inlining the cache lookups helps a lot when you make 170,000
1860
# lines and 350k ids, versus 8.4 unique ids.
1861
# Using a cache helps in 2 ways:
1862
# 1) Avoids unnecessary decoding calls
1863
# 2) Re-uses cached strings, which helps in future set and
1865
# (2) is enough that removing encoding entirely along with
1866
# the cache (so we are using plain strings) results in no
1867
# performance improvement.
1869
revision_id = unescape_revid_cache[revision_id]
1871
unescaped = unescape(revision_id)
1872
unescape_revid_cache[revision_id] = unescaped
1873
revision_id = unescaped
1875
# Note that unconditionally unescaping means that we deserialise
1876
# every fileid, which for general 'pull' is not great, but we don't
1877
# really want to have some many fulltexts that this matters anyway.
1880
file_id = unescape_fileid_cache[file_id]
1882
unescaped = unescape(file_id)
1883
unescape_fileid_cache[file_id] = unescaped
1886
key = (file_id, revision_id)
1887
setdefault(key, False)
1888
if revision_id == line_key[-1]:
1892
def _inventory_xml_lines_for_keys(self, keys):
1893
"""Get a line iterator of the sort needed for findind references.
1895
Not relevant for non-xml inventory repositories.
1897
Ghosts in revision_keys are ignored.
1899
:param revision_keys: The revision keys for the inventories to inspect.
1900
:return: An iterator over (inventory line, revid) for the fulltexts of
1901
all of the xml inventories specified by revision_keys.
1903
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1904
for record in stream:
1905
if record.storage_kind != 'absent':
1906
chunks = record.get_bytes_as('chunked')
1907
revid = record.key[-1]
1908
lines = osutils.chunks_to_lines(chunks)
1912
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1914
"""Helper routine for fileids_altered_by_revision_ids.
1916
This performs the translation of xml lines to revision ids.
1918
:param line_iterator: An iterator of lines, origin_version_id
1919
:param revision_keys: The revision ids to filter for. This should be a
1920
set or other type which supports efficient __contains__ lookups, as
1921
the revision key from each parsed line will be looked up in the
1922
revision_keys filter.
1923
:return: a dictionary mapping altered file-ids to an iterable of
1924
revision_ids. Each altered file-ids has the exact revision_ids that
1925
altered it listed explicitly.
1927
seen = set(self._find_text_key_references_from_xml_inventory_lines(
1928
line_iterator).iterkeys())
1929
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
1930
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
1931
self._inventory_xml_lines_for_keys(parent_keys)))
1932
new_keys = seen - parent_seen
1934
setdefault = result.setdefault
1935
for key in new_keys:
1936
setdefault(key[0], set()).add(key[-1])
1939
def _find_parent_ids_of_revisions(self, revision_ids):
1940
"""Find all parent ids that are mentioned in the revision graph.
1942
:return: set of revisions that are parents of revision_ids which are
1943
not part of revision_ids themselves
1945
parent_map = self.get_parent_map(revision_ids)
1947
map(parent_ids.update, parent_map.itervalues())
1948
parent_ids.difference_update(revision_ids)
1949
parent_ids.discard(_mod_revision.NULL_REVISION)
1952
def _find_parent_keys_of_revisions(self, revision_keys):
1953
"""Similar to _find_parent_ids_of_revisions, but used with keys.
1955
:param revision_keys: An iterable of revision_keys.
1956
:return: The parents of all revision_keys that are not already in
1959
parent_map = self.revisions.get_parent_map(revision_keys)
1961
map(parent_keys.update, parent_map.itervalues())
1962
parent_keys.difference_update(revision_keys)
1963
parent_keys.discard(_mod_revision.NULL_REVISION)
1966
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1967
"""Find the file ids and versions affected by revisions.
1969
:param revisions: an iterable containing revision ids.
1970
:param _inv_weave: The inventory weave from this repository or None.
1971
If None, the inventory weave will be opened automatically.
1972
:return: a dictionary mapping altered file-ids to an iterable of
1973
revision_ids. Each altered file-ids has the exact revision_ids that
1974
altered it listed explicitly.
1976
selected_keys = set((revid,) for revid in revision_ids)
1977
w = _inv_weave or self.inventories
1978
pb = ui.ui_factory.nested_progress_bar()
1980
return self._find_file_ids_from_xml_inventory_lines(
1981
w.iter_lines_added_or_present_in_keys(
1982
selected_keys, pb=pb),
1987
def iter_files_bytes(self, desired_files):
1988
"""Iterate through file versions.
1990
Files will not necessarily be returned in the order they occur in
1991
desired_files. No specific order is guaranteed.
1993
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1994
value supplied by the caller as part of desired_files. It should
1995
uniquely identify the file version in the caller's context. (Examples:
1996
an index number or a TreeTransform trans_id.)
1998
bytes_iterator is an iterable of bytestrings for the file. The
1999
kind of iterable and length of the bytestrings are unspecified, but for
2000
this implementation, it is a list of bytes produced by
2001
VersionedFile.get_record_stream().
2003
:param desired_files: a list of (file_id, revision_id, identifier)
2007
for file_id, revision_id, callable_data in desired_files:
2008
text_keys[(file_id, revision_id)] = callable_data
2009
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2010
if record.storage_kind == 'absent':
2011
raise errors.RevisionNotPresent(record.key, self)
2012
yield text_keys[record.key], record.get_bytes_as('chunked')
2014
def _generate_text_key_index(self, text_key_references=None,
2016
"""Generate a new text key index for the repository.
2018
This is an expensive function that will take considerable time to run.
2020
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2021
list of parents, also text keys. When a given key has no parents,
2022
the parents list will be [NULL_REVISION].
2024
# All revisions, to find inventory parents.
2025
if ancestors is None:
2026
graph = self.get_graph()
2027
ancestors = graph.get_parent_map(self.all_revision_ids())
2028
if text_key_references is None:
2029
text_key_references = self.find_text_key_references()
2030
pb = ui.ui_factory.nested_progress_bar()
2032
return self._do_generate_text_key_index(ancestors,
2033
text_key_references, pb)
2037
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2038
"""Helper for _generate_text_key_index to avoid deep nesting."""
2039
revision_order = tsort.topo_sort(ancestors)
2040
invalid_keys = set()
2042
for revision_id in revision_order:
2043
revision_keys[revision_id] = set()
2044
text_count = len(text_key_references)
2045
# a cache of the text keys to allow reuse; costs a dict of all the
2046
# keys, but saves a 2-tuple for every child of a given key.
2048
for text_key, valid in text_key_references.iteritems():
2050
invalid_keys.add(text_key)
2052
revision_keys[text_key[1]].add(text_key)
2053
text_key_cache[text_key] = text_key
2054
del text_key_references
2056
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2057
NULL_REVISION = _mod_revision.NULL_REVISION
2058
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2059
# too small for large or very branchy trees. However, for 55K path
2060
# trees, it would be easy to use too much memory trivially. Ideally we
2061
# could gauge this by looking at available real memory etc, but this is
2062
# always a tricky proposition.
2063
inventory_cache = lru_cache.LRUCache(10)
2064
batch_size = 10 # should be ~150MB on a 55K path tree
2065
batch_count = len(revision_order) / batch_size + 1
2067
pb.update("Calculating text parents", processed_texts, text_count)
2068
for offset in xrange(batch_count):
2069
to_query = revision_order[offset * batch_size:(offset + 1) *
2073
for rev_tree in self.revision_trees(to_query):
2074
revision_id = rev_tree.get_revision_id()
2075
parent_ids = ancestors[revision_id]
2076
for text_key in revision_keys[revision_id]:
2077
pb.update("Calculating text parents", processed_texts)
2078
processed_texts += 1
2079
candidate_parents = []
2080
for parent_id in parent_ids:
2081
parent_text_key = (text_key[0], parent_id)
2083
check_parent = parent_text_key not in \
2084
revision_keys[parent_id]
2086
# the parent parent_id is a ghost:
2087
check_parent = False
2088
# truncate the derived graph against this ghost.
2089
parent_text_key = None
2091
# look at the parent commit details inventories to
2092
# determine possible candidates in the per file graph.
2095
inv = inventory_cache[parent_id]
2097
inv = self.revision_tree(parent_id).inventory
2098
inventory_cache[parent_id] = inv
2100
parent_entry = inv[text_key[0]]
2101
except (KeyError, errors.NoSuchId):
2103
if parent_entry is not None:
2105
text_key[0], parent_entry.revision)
2107
parent_text_key = None
2108
if parent_text_key is not None:
2109
candidate_parents.append(
2110
text_key_cache[parent_text_key])
2111
parent_heads = text_graph.heads(candidate_parents)
2112
new_parents = list(parent_heads)
2113
new_parents.sort(key=lambda x:candidate_parents.index(x))
2114
if new_parents == []:
2115
new_parents = [NULL_REVISION]
2116
text_index[text_key] = new_parents
2118
for text_key in invalid_keys:
2119
text_index[text_key] = [NULL_REVISION]
2122
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2123
"""Get an iterable listing the keys of all the data introduced by a set
2126
The keys will be ordered so that the corresponding items can be safely
2127
fetched and inserted in that order.
2129
:returns: An iterable producing tuples of (knit-kind, file-id,
2130
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2131
'revisions'. file-id is None unless knit-kind is 'file'.
2133
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2136
for result in self._find_non_file_keys_to_fetch(revision_ids):
2139
def _find_file_keys_to_fetch(self, revision_ids, pb):
2140
# XXX: it's a bit weird to control the inventory weave caching in this
2141
# generator. Ideally the caching would be done in fetch.py I think. Or
2142
# maybe this generator should explicitly have the contract that it
2143
# should not be iterated until the previously yielded item has been
2145
inv_w = self.inventories
2147
# file ids that changed
2148
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2150
num_file_ids = len(file_ids)
2151
for file_id, altered_versions in file_ids.iteritems():
2153
pb.update("fetch texts", count, num_file_ids)
2155
yield ("file", file_id, altered_versions)
2157
def _find_non_file_keys_to_fetch(self, revision_ids):
2159
yield ("inventory", None, revision_ids)
2162
# XXX: Note ATM no callers actually pay attention to this return
2163
# instead they just use the list of revision ids and ignore
2164
# missing sigs. Consider removing this work entirely
2165
revisions_with_signatures = set(self.signatures.get_parent_map(
2166
[(r,) for r in revision_ids]))
2167
revisions_with_signatures = set(
2168
[r for (r,) in revisions_with_signatures])
2169
revisions_with_signatures.intersection_update(revision_ids)
2170
yield ("signatures", None, revisions_with_signatures)
2173
yield ("revisions", None, revision_ids)
2176
def get_inventory(self, revision_id):
2177
"""Get Inventory object by revision id."""
2178
return self.iter_inventories([revision_id]).next()
2180
def iter_inventories(self, revision_ids):
2181
"""Get many inventories by revision_ids.
2183
This will buffer some or all of the texts used in constructing the
2184
inventories in memory, but will only parse a single inventory at a
2187
:param revision_ids: The expected revision ids of the inventories.
2188
:return: An iterator of inventories.
2190
if ((None in revision_ids)
2191
or (_mod_revision.NULL_REVISION in revision_ids)):
2192
raise ValueError('cannot get null revision inventory')
2193
return self._iter_inventories(revision_ids)
2195
def _iter_inventories(self, revision_ids):
2196
"""single-document based inventory iteration."""
2197
for text, revision_id in self._iter_inventory_xmls(revision_ids):
2198
yield self.deserialise_inventory(revision_id, text)
2200
def _iter_inventory_xmls(self, revision_ids):
2201
keys = [(revision_id,) for revision_id in revision_ids]
2202
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2204
for record in stream:
2205
if record.storage_kind != 'absent':
2206
text_chunks[record.key] = record.get_bytes_as('chunked')
2208
raise errors.NoSuchRevision(self, record.key)
2210
chunks = text_chunks.pop(key)
2211
yield ''.join(chunks), key[-1]
2213
def deserialise_inventory(self, revision_id, xml):
2214
"""Transform the xml into an inventory object.
2216
:param revision_id: The expected revision id of the inventory.
2217
:param xml: A serialised inventory.
2219
result = self._serializer.read_inventory_from_string(xml, revision_id,
2220
entry_cache=self._inventory_entry_cache)
2221
if result.revision_id != revision_id:
2222
raise AssertionError('revision id mismatch %s != %s' % (
2223
result.revision_id, revision_id))
2226
def serialise_inventory(self, inv):
2227
return self._serializer.write_inventory_to_string(inv)
2229
def _serialise_inventory_to_lines(self, inv):
2230
return self._serializer.write_inventory_to_lines(inv)
2232
def get_serializer_format(self):
2233
return self._serializer.format_num
2236
def get_inventory_xml(self, revision_id):
2237
"""Get inventory XML as a file object."""
2238
texts = self._iter_inventory_xmls([revision_id])
2240
text, revision_id = texts.next()
2241
except StopIteration:
2242
raise errors.HistoryMissing(self, 'inventory', revision_id)
2246
def get_inventory_sha1(self, revision_id):
2247
"""Return the sha1 hash of the inventory entry
2249
return self.get_revision(revision_id).inventory_sha1
2251
def get_rev_id_for_revno(self, revno, known_pair):
2252
"""Return the revision id of a revno, given a later (revno, revid)
2253
pair in the same history.
2255
:return: if found (True, revid). If the available history ran out
2256
before reaching the revno, then this returns
2257
(False, (closest_revno, closest_revid)).
2259
known_revno, known_revid = known_pair
2260
partial_history = [known_revid]
2261
distance_from_known = known_revno - revno
2262
if distance_from_known < 0:
2264
'requested revno (%d) is later than given known revno (%d)'
2265
% (revno, known_revno))
2268
self, partial_history, stop_index=distance_from_known)
2269
except errors.RevisionNotPresent, err:
2270
if err.revision_id == known_revid:
2271
# The start revision (known_revid) wasn't found.
2273
# This is a stacked repository with no fallbacks, or a there's a
2274
# left-hand ghost. Either way, even though the revision named in
2275
# the error isn't in this repo, we know it's the next step in this
2276
# left-hand history.
2277
partial_history.append(err.revision_id)
2278
if len(partial_history) <= distance_from_known:
2279
# Didn't find enough history to get a revid for the revno.
2280
earliest_revno = known_revno - len(partial_history) + 1
2281
return (False, (earliest_revno, partial_history[-1]))
2282
if len(partial_history) - 1 > distance_from_known:
2283
raise AssertionError('_iter_for_revno returned too much history')
2284
return (True, partial_history[-1])
2286
def iter_reverse_revision_history(self, revision_id):
2287
"""Iterate backwards through revision ids in the lefthand history
2289
:param revision_id: The revision id to start with. All its lefthand
2290
ancestors will be traversed.
2292
graph = self.get_graph()
2293
next_id = revision_id
2295
if next_id in (None, _mod_revision.NULL_REVISION):
2298
parents = graph.get_parent_map([next_id])[next_id]
2300
raise errors.RevisionNotPresent(next_id, self)
2302
if len(parents) == 0:
2305
next_id = parents[0]
2308
def get_revision_inventory(self, revision_id):
2309
"""Return inventory of a past revision."""
2310
# TODO: Unify this with get_inventory()
2311
# bzr 0.0.6 and later imposes the constraint that the inventory_id
2312
# must be the same as its revision, so this is trivial.
2313
if revision_id is None:
2314
# This does not make sense: if there is no revision,
2315
# then it is the current tree inventory surely ?!
2316
# and thus get_root_id() is something that looks at the last
2317
# commit on the branch, and the get_root_id is an inventory check.
2318
raise NotImplementedError
2319
# return Inventory(self.get_root_id())
2321
return self.get_inventory(revision_id)
2323
def is_shared(self):
2324
"""Return True if this repository is flagged as a shared repository."""
2325
raise NotImplementedError(self.is_shared)
2328
def reconcile(self, other=None, thorough=False):
2329
"""Reconcile this repository."""
2330
from bzrlib.reconcile import RepoReconciler
2331
reconciler = RepoReconciler(self, thorough=thorough)
2332
reconciler.reconcile()
2335
def _refresh_data(self):
2336
"""Helper called from lock_* to ensure coherency with disk.
2338
The default implementation does nothing; it is however possible
2339
for repositories to maintain loaded indices across multiple locks
2340
by checking inside their implementation of this method to see
2341
whether their indices are still valid. This depends of course on
2342
the disk format being validatable in this manner. This method is
2343
also called by the refresh_data() public interface to cause a refresh
2344
to occur while in a write lock so that data inserted by a smart server
2345
push operation is visible on the client's instance of the physical
2350
def revision_tree(self, revision_id):
2351
"""Return Tree for a revision on this branch.
2353
`revision_id` may be NULL_REVISION for the empty tree revision.
2355
revision_id = _mod_revision.ensure_null(revision_id)
2356
# TODO: refactor this to use an existing revision object
2357
# so we don't need to read it in twice.
2358
if revision_id == _mod_revision.NULL_REVISION:
2359
return RevisionTree(self, Inventory(root_id=None),
2360
_mod_revision.NULL_REVISION)
2362
inv = self.get_revision_inventory(revision_id)
2363
return RevisionTree(self, inv, revision_id)
2365
def revision_trees(self, revision_ids):
2366
"""Return Trees for revisions in this repository.
2368
:param revision_ids: a sequence of revision-ids;
2369
a revision-id may not be None or 'null:'
2371
inventories = self.iter_inventories(revision_ids)
2372
for inv in inventories:
2373
yield RevisionTree(self, inv, inv.revision_id)
2375
def _filtered_revision_trees(self, revision_ids, file_ids):
2376
"""Return Tree for a revision on this branch with only some files.
2378
:param revision_ids: a sequence of revision-ids;
2379
a revision-id may not be None or 'null:'
2380
:param file_ids: if not None, the result is filtered
2381
so that only those file-ids, their parents and their
2382
children are included.
2384
inventories = self.iter_inventories(revision_ids)
2385
for inv in inventories:
2386
# Should we introduce a FilteredRevisionTree class rather
2387
# than pre-filter the inventory here?
2388
filtered_inv = inv.filter(file_ids)
2389
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
2392
def get_ancestry(self, revision_id, topo_sorted=True):
2393
"""Return a list of revision-ids integrated by a revision.
2395
The first element of the list is always None, indicating the origin
2396
revision. This might change when we have history horizons, or
2397
perhaps we should have a new API.
2399
This is topologically sorted.
2401
if _mod_revision.is_null(revision_id):
2403
if not self.has_revision(revision_id):
2404
raise errors.NoSuchRevision(self, revision_id)
2405
graph = self.get_graph()
2407
search = graph._make_breadth_first_searcher([revision_id])
2410
found, ghosts = search.next_with_ghosts()
2411
except StopIteration:
2414
if _mod_revision.NULL_REVISION in keys:
2415
keys.remove(_mod_revision.NULL_REVISION)
2417
parent_map = graph.get_parent_map(keys)
2418
keys = tsort.topo_sort(parent_map)
2419
return [None] + list(keys)
2422
"""Compress the data within the repository.
2424
This operation only makes sense for some repository types. For other
2425
types it should be a no-op that just returns.
2427
This stub method does not require a lock, but subclasses should use
2428
@needs_write_lock as this is a long running call its reasonable to
2429
implicitly lock for the user.
2432
def get_transaction(self):
2433
return self.control_files.get_transaction()
2435
def get_parent_map(self, revision_ids):
2436
"""See graph.StackedParentsProvider.get_parent_map"""
2437
# revisions index works in keys; this just works in revisions
2438
# therefore wrap and unwrap
2441
for revision_id in revision_ids:
2442
if revision_id == _mod_revision.NULL_REVISION:
2443
result[revision_id] = ()
2444
elif revision_id is None:
2445
raise ValueError('get_parent_map(None) is not valid')
2447
query_keys.append((revision_id ,))
2448
for ((revision_id,), parent_keys) in \
2449
self.revisions.get_parent_map(query_keys).iteritems():
2451
result[revision_id] = tuple(parent_revid
2452
for (parent_revid,) in parent_keys)
2454
result[revision_id] = (_mod_revision.NULL_REVISION,)
2457
def _make_parents_provider(self):
2460
def get_graph(self, other_repository=None):
2461
"""Return the graph walker for this repository format"""
2462
parents_provider = self._make_parents_provider()
2463
if (other_repository is not None and
2464
not self.has_same_location(other_repository)):
2465
parents_provider = graph.StackedParentsProvider(
2466
[parents_provider, other_repository._make_parents_provider()])
2467
return graph.Graph(parents_provider)
2469
def _get_versioned_file_checker(self, text_key_references=None):
2470
"""Return an object suitable for checking versioned files.
2472
:param text_key_references: if non-None, an already built
2473
dictionary mapping text keys ((fileid, revision_id) tuples)
2474
to whether they were referred to by the inventory of the
2475
revision_id that they contain. If None, this will be
2478
return _VersionedFileChecker(self,
2479
text_key_references=text_key_references)
2481
def revision_ids_to_search_result(self, result_set):
2482
"""Convert a set of revision ids to a graph SearchResult."""
2483
result_parents = set()
2484
for parents in self.get_graph().get_parent_map(
2485
result_set).itervalues():
2486
result_parents.update(parents)
2487
included_keys = result_set.intersection(result_parents)
2488
start_keys = result_set.difference(included_keys)
2489
exclude_keys = result_parents.difference(result_set)
2490
result = graph.SearchResult(start_keys, exclude_keys,
2491
len(result_set), result_set)
2495
def set_make_working_trees(self, new_value):
2496
"""Set the policy flag for making working trees when creating branches.
2498
This only applies to branches that use this repository.
2500
The default is 'True'.
2501
:param new_value: True to restore the default, False to disable making
2504
raise NotImplementedError(self.set_make_working_trees)
2506
def make_working_trees(self):
2507
"""Returns the policy for making working trees on new branches."""
2508
raise NotImplementedError(self.make_working_trees)
2511
def sign_revision(self, revision_id, gpg_strategy):
2512
plaintext = Testament.from_revision(self, revision_id).as_short_text()
2513
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
2516
def has_signature_for_revision_id(self, revision_id):
2517
"""Query for a revision signature for revision_id in the repository."""
2518
if not self.has_revision(revision_id):
2519
raise errors.NoSuchRevision(self, revision_id)
2520
sig_present = (1 == len(
2521
self.signatures.get_parent_map([(revision_id,)])))
2525
def get_signature_text(self, revision_id):
2526
"""Return the text for a signature."""
2527
stream = self.signatures.get_record_stream([(revision_id,)],
2529
record = stream.next()
2530
if record.storage_kind == 'absent':
2531
raise errors.NoSuchRevision(self, revision_id)
2532
return record.get_bytes_as('fulltext')
2535
def check(self, revision_ids=None):
2536
"""Check consistency of all history of given revision_ids.
2538
Different repository implementations should override _check().
2540
:param revision_ids: A non-empty list of revision_ids whose ancestry
2541
will be checked. Typically the last revision_id of a branch.
2543
return self._check(revision_ids)
2545
def _check(self, revision_ids):
2546
result = check.Check(self)
2550
def _warn_if_deprecated(self):
2551
global _deprecation_warning_done
2552
if _deprecation_warning_done:
2554
_deprecation_warning_done = True
2555
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2556
% (self._format, self.bzrdir.transport.base))
2558
def supports_rich_root(self):
2559
return self._format.rich_root_data
2561
def _check_ascii_revisionid(self, revision_id, method):
2562
"""Private helper for ascii-only repositories."""
2563
# weave repositories refuse to store revisionids that are non-ascii.
2564
if revision_id is not None:
2565
# weaves require ascii revision ids.
2566
if isinstance(revision_id, unicode):
2568
revision_id.encode('ascii')
2569
except UnicodeEncodeError:
2570
raise errors.NonAsciiRevisionId(method, self)
2573
revision_id.decode('ascii')
2574
except UnicodeDecodeError:
2575
raise errors.NonAsciiRevisionId(method, self)
2577
def revision_graph_can_have_wrong_parents(self):
2578
"""Is it possible for this repository to have a revision graph with
2581
If True, then this repository must also implement
2582
_find_inconsistent_revision_parents so that check and reconcile can
2583
check for inconsistencies before proceeding with other checks that may
2584
depend on the revision index being consistent.
2586
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2589
# remove these delegates a while after bzr 0.15
2590
def __make_delegated(name, from_module):
2591
def _deprecated_repository_forwarder():
2592
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2593
% (name, from_module),
2596
m = __import__(from_module, globals(), locals(), [name])
2598
return getattr(m, name)
2599
except AttributeError:
2600
raise AttributeError('module %s has no name %s'
2602
globals()[name] = _deprecated_repository_forwarder
2605
'AllInOneRepository',
2606
'WeaveMetaDirRepository',
2607
'PreSplitOutRepositoryFormat',
2608
'RepositoryFormat4',
2609
'RepositoryFormat5',
2610
'RepositoryFormat6',
2611
'RepositoryFormat7',
2613
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2617
'RepositoryFormatKnit',
2618
'RepositoryFormatKnit1',
2620
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2623
def install_revision(repository, rev, revision_tree):
2624
"""Install all revision data into a repository."""
2625
install_revisions(repository, [(rev, revision_tree, None)])
2628
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2629
"""Install all revision data into a repository.
2631
Accepts an iterable of revision, tree, signature tuples. The signature
2634
repository.start_write_group()
2636
inventory_cache = lru_cache.LRUCache(10)
2637
for n, (revision, revision_tree, signature) in enumerate(iterable):
2638
_install_revision(repository, revision, revision_tree, signature,
2641
pb.update('Transferring revisions', n + 1, num_revisions)
2643
repository.abort_write_group()
2646
repository.commit_write_group()
2649
def _install_revision(repository, rev, revision_tree, signature,
2651
"""Install all revision data into a repository."""
2652
present_parents = []
2654
for p_id in rev.parent_ids:
2655
if repository.has_revision(p_id):
2656
present_parents.append(p_id)
2657
parent_trees[p_id] = repository.revision_tree(p_id)
2659
parent_trees[p_id] = repository.revision_tree(
2660
_mod_revision.NULL_REVISION)
2662
inv = revision_tree.inventory
2663
entries = inv.iter_entries()
2664
# backwards compatibility hack: skip the root id.
2665
if not repository.supports_rich_root():
2666
path, root = entries.next()
2667
if root.revision != rev.revision_id:
2668
raise errors.IncompatibleRevision(repr(repository))
2670
for path, ie in entries:
2671
text_keys[(ie.file_id, ie.revision)] = ie
2672
text_parent_map = repository.texts.get_parent_map(text_keys)
2673
missing_texts = set(text_keys) - set(text_parent_map)
2674
# Add the texts that are not already present
2675
for text_key in missing_texts:
2676
ie = text_keys[text_key]
2678
# FIXME: TODO: The following loop overlaps/duplicates that done by
2679
# commit to determine parents. There is a latent/real bug here where
2680
# the parents inserted are not those commit would do - in particular
2681
# they are not filtered by heads(). RBC, AB
2682
for revision, tree in parent_trees.iteritems():
2683
if ie.file_id not in tree:
2685
parent_id = tree.inventory[ie.file_id].revision
2686
if parent_id in text_parents:
2688
text_parents.append((ie.file_id, parent_id))
2689
lines = revision_tree.get_file(ie.file_id).readlines()
2690
repository.texts.add_lines(text_key, text_parents, lines)
2692
# install the inventory
2693
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2694
# Cache this inventory
2695
inventory_cache[rev.revision_id] = inv
2697
basis_inv = inventory_cache[rev.parent_ids[0]]
2699
repository.add_inventory(rev.revision_id, inv, present_parents)
2701
delta = inv._make_delta(basis_inv)
2702
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2703
rev.revision_id, present_parents)
2705
repository.add_inventory(rev.revision_id, inv, present_parents)
2706
except errors.RevisionAlreadyPresent:
2708
if signature is not None:
2709
repository.add_signature_text(rev.revision_id, signature)
2710
repository.add_revision(rev.revision_id, rev, inv)
2713
class MetaDirRepository(Repository):
2714
"""Repositories in the new meta-dir layout.
2716
:ivar _transport: Transport for access to repository control files,
2717
typically pointing to .bzr/repository.
2720
def __init__(self, _format, a_bzrdir, control_files):
2721
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
2722
self._transport = control_files._transport
2724
def is_shared(self):
2725
"""Return True if this repository is flagged as a shared repository."""
2726
return self._transport.has('shared-storage')
2729
def set_make_working_trees(self, new_value):
2730
"""Set the policy flag for making working trees when creating branches.
2732
This only applies to branches that use this repository.
2734
The default is 'True'.
2735
:param new_value: True to restore the default, False to disable making
2740
self._transport.delete('no-working-trees')
2741
except errors.NoSuchFile:
2744
self._transport.put_bytes('no-working-trees', '',
2745
mode=self.bzrdir._get_file_mode())
2747
def make_working_trees(self):
2748
"""Returns the policy for making working trees on new branches."""
2749
return not self._transport.has('no-working-trees')
2752
class MetaDirVersionedFileRepository(MetaDirRepository):
2753
"""Repositories in a meta-dir, that work via versioned file objects."""
2755
def __init__(self, _format, a_bzrdir, control_files):
2756
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2760
network_format_registry = registry.FormatRegistry()
2761
"""Registry of formats indexed by their network name.
2763
The network name for a repository format is an identifier that can be used when
2764
referring to formats with smart server operations. See
2765
RepositoryFormat.network_name() for more detail.
2769
format_registry = registry.FormatRegistry(network_format_registry)
2770
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
2772
This can contain either format instances themselves, or classes/factories that
2773
can be called to obtain one.
2777
#####################################################################
2778
# Repository Formats
2780
class RepositoryFormat(object):
2781
"""A repository format.
2783
Formats provide four things:
2784
* An initialization routine to construct repository data on disk.
2785
* a optional format string which is used when the BzrDir supports
2787
* an open routine which returns a Repository instance.
2788
* A network name for referring to the format in smart server RPC
2791
There is one and only one Format subclass for each on-disk format. But
2792
there can be one Repository subclass that is used for several different
2793
formats. The _format attribute on a Repository instance can be used to
2794
determine the disk format.
2796
Formats are placed in a registry by their format string for reference
2797
during opening. These should be subclasses of RepositoryFormat for
2800
Once a format is deprecated, just deprecate the initialize and open
2801
methods on the format class. Do not deprecate the object, as the
2802
object may be created even when a repository instance hasn't been
2805
Common instance attributes:
2806
_matchingbzrdir - the bzrdir format that the repository format was
2807
originally written to work with. This can be used if manually
2808
constructing a bzrdir and repository, or more commonly for test suite
2812
# Set to True or False in derived classes. True indicates that the format
2813
# supports ghosts gracefully.
2814
supports_ghosts = None
2815
# Can this repository be given external locations to lookup additional
2816
# data. Set to True or False in derived classes.
2817
supports_external_lookups = None
2818
# Does this format support CHK bytestring lookups. Set to True or False in
2820
supports_chks = None
2821
# Should commit add an inventory, or an inventory delta to the repository.
2822
_commit_inv_deltas = True
2823
# What order should fetch operations request streams in?
2824
# The default is unordered as that is the cheapest for an origin to
2826
_fetch_order = 'unordered'
2827
# Does this repository format use deltas that can be fetched as-deltas ?
2828
# (E.g. knits, where the knit deltas can be transplanted intact.
2829
# We default to False, which will ensure that enough data to get
2830
# a full text out of any fetch stream will be grabbed.
2831
_fetch_uses_deltas = False
2832
# Should fetch trigger a reconcile after the fetch? Only needed for
2833
# some repository formats that can suffer internal inconsistencies.
2834
_fetch_reconcile = False
2835
# Does this format have < O(tree_size) delta generation. Used to hint what
2836
# code path for commit, amongst other things.
2840
return "<%s>" % self.__class__.__name__
2842
def __eq__(self, other):
2843
# format objects are generally stateless
2844
return isinstance(other, self.__class__)
2846
def __ne__(self, other):
2847
return not self == other
2850
def find_format(klass, a_bzrdir):
2851
"""Return the format for the repository object in a_bzrdir.
2853
This is used by bzr native formats that have a "format" file in
2854
the repository. Other methods may be used by different types of
2858
transport = a_bzrdir.get_repository_transport(None)
2859
format_string = transport.get("format").read()
2860
return format_registry.get(format_string)
2861
except errors.NoSuchFile:
2862
raise errors.NoRepositoryPresent(a_bzrdir)
2864
raise errors.UnknownFormatError(format=format_string,
2868
def register_format(klass, format):
2869
format_registry.register(format.get_format_string(), format)
2872
def unregister_format(klass, format):
2873
format_registry.remove(format.get_format_string())
2876
def get_default_format(klass):
2877
"""Return the current default format."""
2878
from bzrlib import bzrdir
2879
return bzrdir.format_registry.make_bzrdir('default').repository_format
2881
def get_format_string(self):
2882
"""Return the ASCII format string that identifies this format.
2884
Note that in pre format ?? repositories the format string is
2885
not permitted nor written to disk.
2887
raise NotImplementedError(self.get_format_string)
2889
def get_format_description(self):
2890
"""Return the short description for this format."""
2891
raise NotImplementedError(self.get_format_description)
2893
# TODO: this shouldn't be in the base class, it's specific to things that
2894
# use weaves or knits -- mbp 20070207
2895
def _get_versioned_file_store(self,
2900
versionedfile_class=None,
2901
versionedfile_kwargs={},
2903
if versionedfile_class is None:
2904
versionedfile_class = self._versionedfile_class
2905
weave_transport = control_files._transport.clone(name)
2906
dir_mode = control_files._dir_mode
2907
file_mode = control_files._file_mode
2908
return VersionedFileStore(weave_transport, prefixed=prefixed,
2910
file_mode=file_mode,
2911
versionedfile_class=versionedfile_class,
2912
versionedfile_kwargs=versionedfile_kwargs,
2915
def initialize(self, a_bzrdir, shared=False):
2916
"""Initialize a repository of this format in a_bzrdir.
2918
:param a_bzrdir: The bzrdir to put the new repository in it.
2919
:param shared: The repository should be initialized as a sharable one.
2920
:returns: The new repository object.
2922
This may raise UninitializableFormat if shared repository are not
2923
compatible the a_bzrdir.
2925
raise NotImplementedError(self.initialize)
2927
def is_supported(self):
2928
"""Is this format supported?
2930
Supported formats must be initializable and openable.
2931
Unsupported formats may not support initialization or committing or
2932
some other features depending on the reason for not being supported.
2936
def network_name(self):
2937
"""A simple byte string uniquely identifying this format for RPC calls.
2939
MetaDir repository formats use their disk format string to identify the
2940
repository over the wire. All in one formats such as bzr < 0.8, and
2941
foreign formats like svn/git and hg should use some marker which is
2942
unique and immutable.
2944
raise NotImplementedError(self.network_name)
2946
def check_conversion_target(self, target_format):
2947
raise NotImplementedError(self.check_conversion_target)
2949
def open(self, a_bzrdir, _found=False):
2950
"""Return an instance of this format for the bzrdir a_bzrdir.
2952
_found is a private parameter, do not use it.
2954
raise NotImplementedError(self.open)
2957
class MetaDirRepositoryFormat(RepositoryFormat):
2958
"""Common base class for the new repositories using the metadir layout."""
2960
rich_root_data = False
2961
supports_tree_reference = False
2962
supports_external_lookups = False
2965
def _matchingbzrdir(self):
2966
matching = bzrdir.BzrDirMetaFormat1()
2967
matching.repository_format = self
2971
super(MetaDirRepositoryFormat, self).__init__()
2973
def _create_control_files(self, a_bzrdir):
2974
"""Create the required files and the initial control_files object."""
2975
# FIXME: RBC 20060125 don't peek under the covers
2976
# NB: no need to escape relative paths that are url safe.
2977
repository_transport = a_bzrdir.get_repository_transport(self)
2978
control_files = lockable_files.LockableFiles(repository_transport,
2979
'lock', lockdir.LockDir)
2980
control_files.create_lock()
2981
return control_files
2983
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2984
"""Upload the initial blank content."""
2985
control_files = self._create_control_files(a_bzrdir)
2986
control_files.lock_write()
2987
transport = control_files._transport
2989
utf8_files += [('shared-storage', '')]
2991
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
2992
for (filename, content_stream) in files:
2993
transport.put_file(filename, content_stream,
2994
mode=a_bzrdir._get_file_mode())
2995
for (filename, content_bytes) in utf8_files:
2996
transport.put_bytes_non_atomic(filename, content_bytes,
2997
mode=a_bzrdir._get_file_mode())
2999
control_files.unlock()
3001
def network_name(self):
3002
"""Metadir formats have matching disk and network format strings."""
3003
return self.get_format_string()
3006
# Pre-0.8 formats that don't have a disk format string (because they are
3007
# versioned by the matching control directory). We use the control directories
3008
# disk format string as a key for the network_name because they meet the
3009
# constraints (simple string, unique, immutable).
3010
network_format_registry.register_lazy(
3011
"Bazaar-NG branch, format 5\n",
3012
'bzrlib.repofmt.weaverepo',
3013
'RepositoryFormat5',
3015
network_format_registry.register_lazy(
3016
"Bazaar-NG branch, format 6\n",
3017
'bzrlib.repofmt.weaverepo',
3018
'RepositoryFormat6',
3021
# formats which have no format string are not discoverable or independently
3022
# creatable on disk, so are not registered in format_registry. They're
3023
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
3024
# needed, it's constructed directly by the BzrDir. Non-native formats where
3025
# the repository is not separately opened are similar.
3027
format_registry.register_lazy(
3028
'Bazaar-NG Repository format 7',
3029
'bzrlib.repofmt.weaverepo',
3033
format_registry.register_lazy(
3034
'Bazaar-NG Knit Repository Format 1',
3035
'bzrlib.repofmt.knitrepo',
3036
'RepositoryFormatKnit1',
3039
format_registry.register_lazy(
3040
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
3041
'bzrlib.repofmt.knitrepo',
3042
'RepositoryFormatKnit3',
3045
format_registry.register_lazy(
3046
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
3047
'bzrlib.repofmt.knitrepo',
3048
'RepositoryFormatKnit4',
3051
# Pack-based formats. There is one format for pre-subtrees, and one for
3052
# post-subtrees to allow ease of testing.
3053
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3054
format_registry.register_lazy(
3055
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3056
'bzrlib.repofmt.pack_repo',
3057
'RepositoryFormatKnitPack1',
3059
format_registry.register_lazy(
3060
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3061
'bzrlib.repofmt.pack_repo',
3062
'RepositoryFormatKnitPack3',
3064
format_registry.register_lazy(
3065
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3066
'bzrlib.repofmt.pack_repo',
3067
'RepositoryFormatKnitPack4',
3069
format_registry.register_lazy(
3070
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3071
'bzrlib.repofmt.pack_repo',
3072
'RepositoryFormatKnitPack5',
3074
format_registry.register_lazy(
3075
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3076
'bzrlib.repofmt.pack_repo',
3077
'RepositoryFormatKnitPack5RichRoot',
3079
format_registry.register_lazy(
3080
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3081
'bzrlib.repofmt.pack_repo',
3082
'RepositoryFormatKnitPack5RichRootBroken',
3084
format_registry.register_lazy(
3085
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3086
'bzrlib.repofmt.pack_repo',
3087
'RepositoryFormatKnitPack6',
3089
format_registry.register_lazy(
3090
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3091
'bzrlib.repofmt.pack_repo',
3092
'RepositoryFormatKnitPack6RichRoot',
3095
# Development formats.
3096
# Obsolete but kept pending a CHK based subtree format.
3097
format_registry.register_lazy(
3098
("Bazaar development format 2 with subtree support "
3099
"(needs bzr.dev from before 1.8)\n"),
3100
'bzrlib.repofmt.pack_repo',
3101
'RepositoryFormatPackDevelopment2Subtree',
3104
# 1.14->1.16 go below here
3105
format_registry.register_lazy(
3106
'Bazaar development format - group compression and chk inventory'
3107
' (needs bzr.dev from 1.14)\n',
3108
'bzrlib.repofmt.groupcompress_repo',
3109
'RepositoryFormatCHK1',
3112
format_registry.register_lazy(
3113
'Bazaar development format - chk repository with bencode revision '
3114
'serialization (needs bzr.dev from 1.16)\n',
3115
'bzrlib.repofmt.groupcompress_repo',
3116
'RepositoryFormatCHK2',
3118
format_registry.register_lazy(
3119
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3120
'bzrlib.repofmt.groupcompress_repo',
3121
'RepositoryFormat2a',
3125
class InterRepository(InterObject):
3126
"""This class represents operations taking place between two repositories.
3128
Its instances have methods like copy_content and fetch, and contain
3129
references to the source and target repositories these operations can be
3132
Often we will provide convenience methods on 'repository' which carry out
3133
operations with another repository - they will always forward to
3134
InterRepository.get(other).method_name(parameters).
3137
_walk_to_common_revisions_batch_size = 50
3139
"""The available optimised InterRepository types."""
3142
def copy_content(self, revision_id=None):
3143
"""Make a complete copy of the content in self into destination.
3145
This is a destructive operation! Do not use it on existing
3148
:param revision_id: Only copy the content needed to construct
3149
revision_id and its parents.
3152
self.target.set_make_working_trees(self.source.make_working_trees())
3153
except NotImplementedError:
3155
self.target.fetch(self.source, revision_id=revision_id)
3158
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3160
"""Fetch the content required to construct revision_id.
3162
The content is copied from self.source to self.target.
3164
:param revision_id: if None all content is copied, if NULL_REVISION no
3166
:param pb: optional progress bar to use for progress reports. If not
3167
provided a default one will be created.
3170
from bzrlib.fetch import RepoFetcher
3171
f = RepoFetcher(to_repository=self.target,
3172
from_repository=self.source,
3173
last_revision=revision_id,
3174
fetch_spec=fetch_spec,
3175
pb=pb, find_ghosts=find_ghosts)
3177
def _walk_to_common_revisions(self, revision_ids):
3178
"""Walk out from revision_ids in source to revisions target has.
3180
:param revision_ids: The start point for the search.
3181
:return: A set of revision ids.
3183
target_graph = self.target.get_graph()
3184
revision_ids = frozenset(revision_ids)
3185
missing_revs = set()
3186
source_graph = self.source.get_graph()
3187
# ensure we don't pay silly lookup costs.
3188
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3189
null_set = frozenset([_mod_revision.NULL_REVISION])
3190
searcher_exhausted = False
3194
# Iterate the searcher until we have enough next_revs
3195
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3197
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3198
next_revs.update(next_revs_part)
3199
ghosts.update(ghosts_part)
3200
except StopIteration:
3201
searcher_exhausted = True
3203
# If there are ghosts in the source graph, and the caller asked for
3204
# them, make sure that they are present in the target.
3205
# We don't care about other ghosts as we can't fetch them and
3206
# haven't been asked to.
3207
ghosts_to_check = set(revision_ids.intersection(ghosts))
3208
revs_to_get = set(next_revs).union(ghosts_to_check)
3210
have_revs = set(target_graph.get_parent_map(revs_to_get))
3211
# we always have NULL_REVISION present.
3212
have_revs = have_revs.union(null_set)
3213
# Check if the target is missing any ghosts we need.
3214
ghosts_to_check.difference_update(have_revs)
3216
# One of the caller's revision_ids is a ghost in both the
3217
# source and the target.
3218
raise errors.NoSuchRevision(
3219
self.source, ghosts_to_check.pop())
3220
missing_revs.update(next_revs - have_revs)
3221
# Because we may have walked past the original stop point, make
3222
# sure everything is stopped
3223
stop_revs = searcher.find_seen_ancestors(have_revs)
3224
searcher.stop_searching_any(stop_revs)
3225
if searcher_exhausted:
3227
return searcher.get_result()
3230
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3231
"""Return the revision ids that source has that target does not.
3233
:param revision_id: only return revision ids included by this
3235
:param find_ghosts: If True find missing revisions in deep history
3236
rather than just finding the surface difference.
3237
:return: A bzrlib.graph.SearchResult.
3239
# stop searching at found target revisions.
3240
if not find_ghosts and revision_id is not None:
3241
return self._walk_to_common_revisions([revision_id])
3242
# generic, possibly worst case, slow code path.
3243
target_ids = set(self.target.all_revision_ids())
3244
if revision_id is not None:
3245
source_ids = self.source.get_ancestry(revision_id)
3246
if source_ids[0] is not None:
3247
raise AssertionError()
3250
source_ids = self.source.all_revision_ids()
3251
result_set = set(source_ids).difference(target_ids)
3252
return self.source.revision_ids_to_search_result(result_set)
3255
def _same_model(source, target):
3256
"""True if source and target have the same data representation.
3258
Note: this is always called on the base class; overriding it in a
3259
subclass will have no effect.
3262
InterRepository._assert_same_model(source, target)
3264
except errors.IncompatibleRepositories, e:
3268
def _assert_same_model(source, target):
3269
"""Raise an exception if two repositories do not use the same model.
3271
if source.supports_rich_root() != target.supports_rich_root():
3272
raise errors.IncompatibleRepositories(source, target,
3273
"different rich-root support")
3274
if source._serializer != target._serializer:
3275
raise errors.IncompatibleRepositories(source, target,
3276
"different serializers")
3279
class InterSameDataRepository(InterRepository):
3280
"""Code for converting between repositories that represent the same data.
3282
Data format and model must match for this to work.
3286
def _get_repo_format_to_test(self):
3287
"""Repository format for testing with.
3289
InterSameData can pull from subtree to subtree and from non-subtree to
3290
non-subtree, so we test this with the richest repository format.
3292
from bzrlib.repofmt import knitrepo
3293
return knitrepo.RepositoryFormatKnit3()
3296
def is_compatible(source, target):
3297
return InterRepository._same_model(source, target)
3300
class InterWeaveRepo(InterSameDataRepository):
3301
"""Optimised code paths between Weave based repositories.
3303
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3304
implemented lazy inter-object optimisation.
3308
def _get_repo_format_to_test(self):
3309
from bzrlib.repofmt import weaverepo
3310
return weaverepo.RepositoryFormat7()
3313
def is_compatible(source, target):
3314
"""Be compatible with known Weave formats.
3316
We don't test for the stores being of specific types because that
3317
could lead to confusing results, and there is no need to be
3320
from bzrlib.repofmt.weaverepo import (
3326
return (isinstance(source._format, (RepositoryFormat5,
3328
RepositoryFormat7)) and
3329
isinstance(target._format, (RepositoryFormat5,
3331
RepositoryFormat7)))
3332
except AttributeError:
3336
def copy_content(self, revision_id=None):
3337
"""See InterRepository.copy_content()."""
3338
# weave specific optimised path:
3340
self.target.set_make_working_trees(self.source.make_working_trees())
3341
except (errors.RepositoryUpgradeRequired, NotImplemented):
3343
# FIXME do not peek!
3344
if self.source._transport.listable():
3345
pb = ui.ui_factory.nested_progress_bar()
3347
self.target.texts.insert_record_stream(
3348
self.source.texts.get_record_stream(
3349
self.source.texts.keys(), 'topological', False))
3350
pb.update('copying inventory', 0, 1)
3351
self.target.inventories.insert_record_stream(
3352
self.source.inventories.get_record_stream(
3353
self.source.inventories.keys(), 'topological', False))
3354
self.target.signatures.insert_record_stream(
3355
self.source.signatures.get_record_stream(
3356
self.source.signatures.keys(),
3358
self.target.revisions.insert_record_stream(
3359
self.source.revisions.get_record_stream(
3360
self.source.revisions.keys(),
3361
'topological', True))
3365
self.target.fetch(self.source, revision_id=revision_id)
3368
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3369
"""See InterRepository.missing_revision_ids()."""
3370
# we want all revisions to satisfy revision_id in source.
3371
# but we don't want to stat every file here and there.
3372
# we want then, all revisions other needs to satisfy revision_id
3373
# checked, but not those that we have locally.
3374
# so the first thing is to get a subset of the revisions to
3375
# satisfy revision_id in source, and then eliminate those that
3376
# we do already have.
3377
# this is slow on high latency connection to self, but as this
3378
# disk format scales terribly for push anyway due to rewriting
3379
# inventory.weave, this is considered acceptable.
3381
if revision_id is not None:
3382
source_ids = self.source.get_ancestry(revision_id)
3383
if source_ids[0] is not None:
3384
raise AssertionError()
3387
source_ids = self.source._all_possible_ids()
3388
source_ids_set = set(source_ids)
3389
# source_ids is the worst possible case we may need to pull.
3390
# now we want to filter source_ids against what we actually
3391
# have in target, but don't try to check for existence where we know
3392
# we do not have a revision as that would be pointless.
3393
target_ids = set(self.target._all_possible_ids())
3394
possibly_present_revisions = target_ids.intersection(source_ids_set)
3395
actually_present_revisions = set(
3396
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3397
required_revisions = source_ids_set.difference(actually_present_revisions)
3398
if revision_id is not None:
3399
# we used get_ancestry to determine source_ids then we are assured all
3400
# revisions referenced are present as they are installed in topological order.
3401
# and the tip revision was validated by get_ancestry.
3402
result_set = required_revisions
3404
# if we just grabbed the possibly available ids, then
3405
# we only have an estimate of whats available and need to validate
3406
# that against the revision records.
3408
self.source._eliminate_revisions_not_present(required_revisions))
3409
return self.source.revision_ids_to_search_result(result_set)
3412
class InterKnitRepo(InterSameDataRepository):
3413
"""Optimised code paths between Knit based repositories."""
3416
def _get_repo_format_to_test(self):
3417
from bzrlib.repofmt import knitrepo
3418
return knitrepo.RepositoryFormatKnit1()
3421
def is_compatible(source, target):
3422
"""Be compatible with known Knit formats.
3424
We don't test for the stores being of specific types because that
3425
could lead to confusing results, and there is no need to be
3428
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3430
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3431
isinstance(target._format, RepositoryFormatKnit))
3432
except AttributeError:
3434
return are_knits and InterRepository._same_model(source, target)
3437
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3438
"""See InterRepository.missing_revision_ids()."""
3439
if revision_id is not None:
3440
source_ids = self.source.get_ancestry(revision_id)
3441
if source_ids[0] is not None:
3442
raise AssertionError()
3445
source_ids = self.source.all_revision_ids()
3446
source_ids_set = set(source_ids)
3447
# source_ids is the worst possible case we may need to pull.
3448
# now we want to filter source_ids against what we actually
3449
# have in target, but don't try to check for existence where we know
3450
# we do not have a revision as that would be pointless.
3451
target_ids = set(self.target.all_revision_ids())
3452
possibly_present_revisions = target_ids.intersection(source_ids_set)
3453
actually_present_revisions = set(
3454
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3455
required_revisions = source_ids_set.difference(actually_present_revisions)
3456
if revision_id is not None:
3457
# we used get_ancestry to determine source_ids then we are assured all
3458
# revisions referenced are present as they are installed in topological order.
3459
# and the tip revision was validated by get_ancestry.
3460
result_set = required_revisions
3462
# if we just grabbed the possibly available ids, then
3463
# we only have an estimate of whats available and need to validate
3464
# that against the revision records.
3466
self.source._eliminate_revisions_not_present(required_revisions))
3467
return self.source.revision_ids_to_search_result(result_set)
3470
class InterDifferingSerializer(InterRepository):
3473
def _get_repo_format_to_test(self):
3477
def is_compatible(source, target):
3478
"""Be compatible with Knit2 source and Knit3 target"""
3479
# This is redundant with format.check_conversion_target(), however that
3480
# raises an exception, and we just want to say "False" as in we won't
3481
# support converting between these formats.
3482
if source.supports_rich_root() and not target.supports_rich_root():
3484
if (source._format.supports_tree_reference
3485
and not target._format.supports_tree_reference):
3489
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3490
"""Get the best delta and base for this revision.
3492
:return: (basis_id, delta)
3494
possible_trees = [(parent_id, cache[parent_id])
3495
for parent_id in parent_ids
3496
if parent_id in cache]
3497
if len(possible_trees) == 0:
3498
# There either aren't any parents, or the parents aren't in the
3499
# cache, so just use the last converted tree
3500
possible_trees.append((basis_id, cache[basis_id]))
3502
for basis_id, basis_tree in possible_trees:
3503
delta = tree.inventory._make_delta(basis_tree.inventory)
3504
deltas.append((len(delta), basis_id, delta))
3506
return deltas[0][1:]
3508
def _get_parent_keys(self, root_key, parent_map):
3509
"""Get the parent keys for a given root id."""
3510
root_id, rev_id = root_key
3511
# Include direct parents of the revision, but only if they used
3512
# the same root_id and are heads.
3514
for parent_id in parent_map[rev_id]:
3515
if parent_id == _mod_revision.NULL_REVISION:
3517
if parent_id not in self._revision_id_to_root_id:
3518
# We probably didn't read this revision, go spend the
3519
# extra effort to actually check
3521
tree = self.source.revision_tree(parent_id)
3522
except errors.NoSuchRevision:
3523
# Ghost, fill out _revision_id_to_root_id in case we
3524
# encounter this again.
3525
# But set parent_root_id to None since we don't really know
3526
parent_root_id = None
3528
parent_root_id = tree.get_root_id()
3529
self._revision_id_to_root_id[parent_id] = None
3531
parent_root_id = self._revision_id_to_root_id[parent_id]
3532
if root_id == parent_root_id:
3533
# With stacking we _might_ want to refer to a non-local
3534
# revision, but this code path only applies when we have the
3535
# full content available, so ghosts really are ghosts, not just
3536
# the edge of local data.
3537
parent_keys.append((parent_id,))
3539
# root_id may be in the parent anyway.
3541
tree = self.source.revision_tree(parent_id)
3542
except errors.NoSuchRevision:
3543
# ghost, can't refer to it.
3547
parent_keys.append((tree.inventory[root_id].revision,))
3548
except errors.NoSuchId:
3551
g = graph.Graph(self.source.revisions)
3552
heads = g.heads(parent_keys)
3554
for key in parent_keys:
3555
if key in heads and key not in selected_keys:
3556
selected_keys.append(key)
3557
return tuple([(root_id,)+ key for key in selected_keys])
3559
def _new_root_data_stream(self, root_keys_to_create, parent_map):
3560
for root_key in root_keys_to_create:
3561
parent_keys = self._get_parent_keys(root_key, parent_map)
3562
yield versionedfile.FulltextContentFactory(root_key,
3563
parent_keys, None, '')
3565
def _fetch_batch(self, revision_ids, basis_id, cache):
3566
"""Fetch across a few revisions.
3568
:param revision_ids: The revisions to copy
3569
:param basis_id: The revision_id of a tree that must be in cache, used
3570
as a basis for delta when no other base is available
3571
:param cache: A cache of RevisionTrees that we can use.
3572
:return: The revision_id of the last converted tree. The RevisionTree
3573
for it will be in cache
3575
# Walk though all revisions; get inventory deltas, copy referenced
3576
# texts that delta references, insert the delta, revision and
3578
root_keys_to_create = set()
3581
pending_revisions = []
3582
parent_map = self.source.get_parent_map(revision_ids)
3583
for tree in self.source.revision_trees(revision_ids):
3584
current_revision_id = tree.get_revision_id()
3585
parent_ids = parent_map.get(current_revision_id, ())
3586
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3588
if self._converting_to_rich_root:
3589
self._revision_id_to_root_id[current_revision_id] = \
3591
# Find text entries that need to be copied
3592
for old_path, new_path, file_id, entry in delta:
3593
if new_path is not None:
3596
if not self.target.supports_rich_root():
3597
# The target doesn't support rich root, so we don't
3600
if self._converting_to_rich_root:
3601
# This can't be copied normally, we have to insert
3603
root_keys_to_create.add((file_id, entry.revision))
3605
text_keys.add((file_id, entry.revision))
3606
revision = self.source.get_revision(current_revision_id)
3607
pending_deltas.append((basis_id, delta,
3608
current_revision_id, revision.parent_ids))
3609
pending_revisions.append(revision)
3610
cache[current_revision_id] = tree
3611
basis_id = current_revision_id
3613
from_texts = self.source.texts
3614
to_texts = self.target.texts
3615
if root_keys_to_create:
3616
root_stream = self._new_root_data_stream(root_keys_to_create,
3618
to_texts.insert_record_stream(root_stream)
3619
to_texts.insert_record_stream(from_texts.get_record_stream(
3620
text_keys, self.target._format._fetch_order,
3621
not self.target._format._fetch_uses_deltas))
3622
# insert inventory deltas
3623
for delta in pending_deltas:
3624
self.target.add_inventory_by_delta(*delta)
3625
if self.target._fallback_repositories:
3626
# Make sure this stacked repository has all the parent inventories
3627
# for the new revisions that we are about to insert. We do this
3628
# before adding the revisions so that no revision is added until
3629
# all the inventories it may depend on are added.
3631
revision_ids = set()
3632
for revision in pending_revisions:
3633
revision_ids.add(revision.revision_id)
3634
parent_ids.update(revision.parent_ids)
3635
parent_ids.difference_update(revision_ids)
3636
parent_ids.discard(_mod_revision.NULL_REVISION)
3637
parent_map = self.source.get_parent_map(parent_ids)
3638
for parent_tree in self.source.revision_trees(parent_ids):
3639
basis_id, delta = self._get_delta_for_revision(tree, parent_ids, basis_id, cache)
3640
current_revision_id = parent_tree.get_revision_id()
3641
parents_parents = parent_map[current_revision_id]
3642
self.target.add_inventory_by_delta(
3643
basis_id, delta, current_revision_id, parents_parents)
3644
# insert signatures and revisions
3645
for revision in pending_revisions:
3647
signature = self.source.get_signature_text(
3648
revision.revision_id)
3649
self.target.add_signature_text(revision.revision_id,
3651
except errors.NoSuchRevision:
3653
self.target.add_revision(revision.revision_id, revision)
3656
def _fetch_all_revisions(self, revision_ids, pb):
3657
"""Fetch everything for the list of revisions.
3659
:param revision_ids: The list of revisions to fetch. Must be in
3661
:param pb: A ProgressBar
3664
basis_id, basis_tree = self._get_basis(revision_ids[0])
3666
cache = lru_cache.LRUCache(100)
3667
cache[basis_id] = basis_tree
3668
del basis_tree # We don't want to hang on to it here
3669
for offset in range(0, len(revision_ids), batch_size):
3670
self.target.start_write_group()
3672
pb.update('Transferring revisions', offset,
3674
batch = revision_ids[offset:offset+batch_size]
3675
basis_id = self._fetch_batch(batch, basis_id, cache)
3677
self.target.abort_write_group()
3680
self.target.commit_write_group()
3681
pb.update('Transferring revisions', len(revision_ids),
3685
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3687
"""See InterRepository.fetch()."""
3688
if fetch_spec is not None:
3689
raise AssertionError("Not implemented yet...")
3690
if (not self.source.supports_rich_root()
3691
and self.target.supports_rich_root()):
3692
self._converting_to_rich_root = True
3693
self._revision_id_to_root_id = {}
3695
self._converting_to_rich_root = False
3696
revision_ids = self.target.search_missing_revision_ids(self.source,
3697
revision_id, find_ghosts=find_ghosts).get_keys()
3698
if not revision_ids:
3700
revision_ids = tsort.topo_sort(
3701
self.source.get_graph().get_parent_map(revision_ids))
3702
if not revision_ids:
3704
# Walk though all revisions; get inventory deltas, copy referenced
3705
# texts that delta references, insert the delta, revision and
3707
first_rev = self.source.get_revision(revision_ids[0])
3709
my_pb = ui.ui_factory.nested_progress_bar()
3712
symbol_versioning.warn(
3713
symbol_versioning.deprecated_in((1, 14, 0))
3714
% "pb parameter to fetch()")
3717
self._fetch_all_revisions(revision_ids, pb)
3719
if my_pb is not None:
3721
return len(revision_ids), 0
3723
def _get_basis(self, first_revision_id):
3724
"""Get a revision and tree which exists in the target.
3726
This assumes that first_revision_id is selected for transmission
3727
because all other ancestors are already present. If we can't find an
3728
ancestor we fall back to NULL_REVISION since we know that is safe.
3730
:return: (basis_id, basis_tree)
3732
first_rev = self.source.get_revision(first_revision_id)
3734
basis_id = first_rev.parent_ids[0]
3735
# only valid as a basis if the target has it
3736
self.target.get_revision(basis_id)
3737
# Try to get a basis tree - if its a ghost it will hit the
3738
# NoSuchRevision case.
3739
basis_tree = self.source.revision_tree(basis_id)
3740
except (IndexError, errors.NoSuchRevision):
3741
basis_id = _mod_revision.NULL_REVISION
3742
basis_tree = self.source.revision_tree(basis_id)
3743
return basis_id, basis_tree
3746
InterRepository.register_optimiser(InterDifferingSerializer)
3747
InterRepository.register_optimiser(InterSameDataRepository)
3748
InterRepository.register_optimiser(InterWeaveRepo)
3749
InterRepository.register_optimiser(InterKnitRepo)
3752
class CopyConverter(object):
3753
"""A repository conversion tool which just performs a copy of the content.
3755
This is slow but quite reliable.
3758
def __init__(self, target_format):
3759
"""Create a CopyConverter.
3761
:param target_format: The format the resulting repository should be.
3763
self.target_format = target_format
3765
def convert(self, repo, pb):
3766
"""Perform the conversion of to_convert, giving feedback via pb.
3768
:param to_convert: The disk object to convert.
3769
:param pb: a progress bar to use for progress information.
3774
# this is only useful with metadir layouts - separated repo content.
3775
# trigger an assertion if not such
3776
repo._format.get_format_string()
3777
self.repo_dir = repo.bzrdir
3778
self.step('Moving repository to repository.backup')
3779
self.repo_dir.transport.move('repository', 'repository.backup')
3780
backup_transport = self.repo_dir.transport.clone('repository.backup')
3781
repo._format.check_conversion_target(self.target_format)
3782
self.source_repo = repo._format.open(self.repo_dir,
3784
_override_transport=backup_transport)
3785
self.step('Creating new repository')
3786
converted = self.target_format.initialize(self.repo_dir,
3787
self.source_repo.is_shared())
3788
converted.lock_write()
3790
self.step('Copying content into repository.')
3791
self.source_repo.copy_content_into(converted)
3794
self.step('Deleting old repository content.')
3795
self.repo_dir.transport.delete_tree('repository.backup')
3796
self.pb.note('repository converted')
3798
def step(self, message):
3799
"""Update the pb by a step."""
3801
self.pb.update(message, self.count, self.total)
3813
def _unescaper(match, _map=_unescape_map):
3814
code = match.group(1)
3818
if not code.startswith('#'):
3820
return unichr(int(code[1:])).encode('utf8')
3826
def _unescape_xml(data):
3827
"""Unescape predefined XML entities in a string of data."""
3829
if _unescape_re is None:
3830
_unescape_re = re.compile('\&([^;]*);')
3831
return _unescape_re.sub(_unescaper, data)
3834
class _VersionedFileChecker(object):
3836
def __init__(self, repository, text_key_references=None):
3837
self.repository = repository
3838
self.text_index = self.repository._generate_text_key_index(
3839
text_key_references=text_key_references)
3841
def calculate_file_version_parents(self, text_key):
3842
"""Calculate the correct parents for a file version according to
3845
parent_keys = self.text_index[text_key]
3846
if parent_keys == [_mod_revision.NULL_REVISION]:
3848
return tuple(parent_keys)
3850
def check_file_version_parents(self, texts, progress_bar=None):
3851
"""Check the parents stored in a versioned file are correct.
3853
It also detects file versions that are not referenced by their
3854
corresponding revision's inventory.
3856
:returns: A tuple of (wrong_parents, dangling_file_versions).
3857
wrong_parents is a dict mapping {revision_id: (stored_parents,
3858
correct_parents)} for each revision_id where the stored parents
3859
are not correct. dangling_file_versions is a set of (file_id,
3860
revision_id) tuples for versions that are present in this versioned
3861
file, but not used by the corresponding inventory.
3864
self.file_ids = set([file_id for file_id, _ in
3865
self.text_index.iterkeys()])
3866
# text keys is now grouped by file_id
3867
n_weaves = len(self.file_ids)
3868
files_in_revisions = {}
3869
revisions_of_files = {}
3870
n_versions = len(self.text_index)
3871
progress_bar.update('loading text store', 0, n_versions)
3872
parent_map = self.repository.texts.get_parent_map(self.text_index)
3873
# On unlistable transports this could well be empty/error...
3874
text_keys = self.repository.texts.keys()
3875
unused_keys = frozenset(text_keys) - set(self.text_index)
3876
for num, key in enumerate(self.text_index.iterkeys()):
3877
if progress_bar is not None:
3878
progress_bar.update('checking text graph', num, n_versions)
3879
correct_parents = self.calculate_file_version_parents(key)
3881
knit_parents = parent_map[key]
3882
except errors.RevisionNotPresent:
3885
if correct_parents != knit_parents:
3886
wrong_parents[key] = (knit_parents, correct_parents)
3887
return wrong_parents, unused_keys
3890
def _old_get_graph(repository, revision_id):
3891
"""DO NOT USE. That is all. I'm serious."""
3892
graph = repository.get_graph()
3893
revision_graph = dict(((key, value) for key, value in
3894
graph.iter_ancestry([revision_id]) if value is not None))
3895
return _strip_NULL_ghosts(revision_graph)
3898
def _strip_NULL_ghosts(revision_graph):
3899
"""Also don't use this. more compatibility code for unmigrated clients."""
3900
# Filter ghosts, and null:
3901
if _mod_revision.NULL_REVISION in revision_graph:
3902
del revision_graph[_mod_revision.NULL_REVISION]
3903
for key, parents in revision_graph.items():
3904
revision_graph[key] = tuple(parent for parent in parents if parent
3906
return revision_graph
3909
class StreamSink(object):
3910
"""An object that can insert a stream into a repository.
3912
This interface handles the complexity of reserialising inventories and
3913
revisions from different formats, and allows unidirectional insertion into
3914
stacked repositories without looking for the missing basis parents
3918
def __init__(self, target_repo):
3919
self.target_repo = target_repo
3921
def insert_stream(self, stream, src_format, resume_tokens):
3922
"""Insert a stream's content into the target repository.
3924
:param src_format: a bzr repository format.
3926
:return: a list of resume tokens and an iterable of keys additional
3927
items required before the insertion can be completed.
3929
self.target_repo.lock_write()
3932
self.target_repo.resume_write_group(resume_tokens)
3935
self.target_repo.start_write_group()
3938
# locked_insert_stream performs a commit|suspend.
3939
return self._locked_insert_stream(stream, src_format, is_resume)
3941
self.target_repo.abort_write_group(suppress_errors=True)
3944
self.target_repo.unlock()
3946
def _locked_insert_stream(self, stream, src_format, is_resume):
3947
to_serializer = self.target_repo._format._serializer
3948
src_serializer = src_format._serializer
3950
if to_serializer == src_serializer:
3951
# If serializers match and the target is a pack repository, set the
3952
# write cache size on the new pack. This avoids poor performance
3953
# on transports where append is unbuffered (such as
3954
# RemoteTransport). This is safe to do because nothing should read
3955
# back from the target repository while a stream with matching
3956
# serialization is being inserted.
3957
# The exception is that a delta record from the source that should
3958
# be a fulltext may need to be expanded by the target (see
3959
# test_fetch_revisions_with_deltas_into_pack); but we take care to
3960
# explicitly flush any buffered writes first in that rare case.
3962
new_pack = self.target_repo._pack_collection._new_pack
3963
except AttributeError:
3964
# Not a pack repository
3967
new_pack.set_write_cache_size(1024*1024)
3968
for substream_type, substream in stream:
3969
if substream_type == 'texts':
3970
self.target_repo.texts.insert_record_stream(substream)
3971
elif substream_type == 'inventories':
3972
if src_serializer == to_serializer:
3973
self.target_repo.inventories.insert_record_stream(
3976
self._extract_and_insert_inventories(
3977
substream, src_serializer)
3978
elif substream_type == 'chk_bytes':
3979
# XXX: This doesn't support conversions, as it assumes the
3980
# conversion was done in the fetch code.
3981
self.target_repo.chk_bytes.insert_record_stream(substream)
3982
elif substream_type == 'revisions':
3983
# This may fallback to extract-and-insert more often than
3984
# required if the serializers are different only in terms of
3986
if src_serializer == to_serializer:
3987
self.target_repo.revisions.insert_record_stream(
3990
self._extract_and_insert_revisions(substream,
3992
elif substream_type == 'signatures':
3993
self.target_repo.signatures.insert_record_stream(substream)
3995
raise AssertionError('kaboom! %s' % (substream_type,))
3996
# Done inserting data, and the missing_keys calculations will try to
3997
# read back from the inserted data, so flush the writes to the new pack
3998
# (if this is pack format).
3999
if new_pack is not None:
4000
new_pack._write_data('', flush=True)
4001
# Find all the new revisions (including ones from resume_tokens)
4002
missing_keys = self.target_repo.get_missing_parent_inventories(
4003
check_for_missing_texts=is_resume)
4005
for prefix, versioned_file in (
4006
('texts', self.target_repo.texts),
4007
('inventories', self.target_repo.inventories),
4008
('revisions', self.target_repo.revisions),
4009
('signatures', self.target_repo.signatures),
4010
('chk_bytes', self.target_repo.chk_bytes),
4012
if versioned_file is None:
4014
missing_keys.update((prefix,) + key for key in
4015
versioned_file.get_missing_compression_parent_keys())
4016
except NotImplementedError:
4017
# cannot even attempt suspending, and missing would have failed
4018
# during stream insertion.
4019
missing_keys = set()
4022
# suspend the write group and tell the caller what we is
4023
# missing. We know we can suspend or else we would not have
4024
# entered this code path. (All repositories that can handle
4025
# missing keys can handle suspending a write group).
4026
write_group_tokens = self.target_repo.suspend_write_group()
4027
return write_group_tokens, missing_keys
4028
self.target_repo.commit_write_group()
4031
def _extract_and_insert_inventories(self, substream, serializer):
4032
"""Generate a new inventory versionedfile in target, converting data.
4034
The inventory is retrieved from the source, (deserializing it), and
4035
stored in the target (reserializing it in a different format).
4037
for record in substream:
4038
bytes = record.get_bytes_as('fulltext')
4039
revision_id = record.key[0]
4040
inv = serializer.read_inventory_from_string(bytes, revision_id)
4041
parents = [key[0] for key in record.parents]
4042
self.target_repo.add_inventory(revision_id, inv, parents)
4044
def _extract_and_insert_revisions(self, substream, serializer):
4045
for record in substream:
4046
bytes = record.get_bytes_as('fulltext')
4047
revision_id = record.key[0]
4048
rev = serializer.read_revision_from_string(bytes)
4049
if rev.revision_id != revision_id:
4050
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4051
self.target_repo.add_revision(revision_id, rev)
4054
if self.target_repo._format._fetch_reconcile:
4055
self.target_repo.reconcile()
4058
class StreamSource(object):
4059
"""A source of a stream for fetching between repositories."""
4061
def __init__(self, from_repository, to_format):
4062
"""Create a StreamSource streaming from from_repository."""
4063
self.from_repository = from_repository
4064
self.to_format = to_format
4066
def delta_on_metadata(self):
4067
"""Return True if delta's are permitted on metadata streams.
4069
That is on revisions and signatures.
4071
src_serializer = self.from_repository._format._serializer
4072
target_serializer = self.to_format._serializer
4073
return (self.to_format._fetch_uses_deltas and
4074
src_serializer == target_serializer)
4076
def _fetch_revision_texts(self, revs):
4077
# fetch signatures first and then the revision texts
4078
# may need to be a InterRevisionStore call here.
4079
from_sf = self.from_repository.signatures
4080
# A missing signature is just skipped.
4081
keys = [(rev_id,) for rev_id in revs]
4082
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4084
self.to_format._fetch_order,
4085
not self.to_format._fetch_uses_deltas))
4086
# If a revision has a delta, this is actually expanded inside the
4087
# insert_record_stream code now, which is an alternate fix for
4089
from_rf = self.from_repository.revisions
4090
revisions = from_rf.get_record_stream(
4092
self.to_format._fetch_order,
4093
not self.delta_on_metadata())
4094
return [('signatures', signatures), ('revisions', revisions)]
4096
def _generate_root_texts(self, revs):
4097
"""This will be called by __fetch between fetching weave texts and
4098
fetching the inventory weave.
4100
Subclasses should override this if they need to generate root texts
4101
after fetching weave texts.
4103
if self._rich_root_upgrade():
4105
return bzrlib.fetch.Inter1and2Helper(
4106
self.from_repository).generate_root_texts(revs)
4110
def get_stream(self, search):
4112
revs = search.get_keys()
4113
graph = self.from_repository.get_graph()
4114
revs = list(graph.iter_topo_order(revs))
4115
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4117
for knit_kind, file_id, revisions in data_to_fetch:
4118
if knit_kind != phase:
4120
# Make a new progress bar for this phase
4121
if knit_kind == "file":
4122
# Accumulate file texts
4123
text_keys.extend([(file_id, revision) for revision in
4125
elif knit_kind == "inventory":
4126
# Now copy the file texts.
4127
from_texts = self.from_repository.texts
4128
yield ('texts', from_texts.get_record_stream(
4129
text_keys, self.to_format._fetch_order,
4130
not self.to_format._fetch_uses_deltas))
4131
# Cause an error if a text occurs after we have done the
4134
# Before we process the inventory we generate the root
4135
# texts (if necessary) so that the inventories references
4137
for _ in self._generate_root_texts(revs):
4139
# NB: This currently reopens the inventory weave in source;
4140
# using a single stream interface instead would avoid this.
4141
from_weave = self.from_repository.inventories
4142
# we fetch only the referenced inventories because we do not
4143
# know for unselected inventories whether all their required
4144
# texts are present in the other repository - it could be
4146
for info in self._get_inventory_stream(revs):
4148
elif knit_kind == "signatures":
4149
# Nothing to do here; this will be taken care of when
4150
# _fetch_revision_texts happens.
4152
elif knit_kind == "revisions":
4153
for record in self._fetch_revision_texts(revs):
4156
raise AssertionError("Unknown knit kind %r" % knit_kind)
4158
def get_stream_for_missing_keys(self, missing_keys):
4159
# missing keys can only occur when we are byte copying and not
4160
# translating (because translation means we don't send
4161
# unreconstructable deltas ever).
4163
keys['texts'] = set()
4164
keys['revisions'] = set()
4165
keys['inventories'] = set()
4166
keys['chk_bytes'] = set()
4167
keys['signatures'] = set()
4168
for key in missing_keys:
4169
keys[key[0]].add(key[1:])
4170
if len(keys['revisions']):
4171
# If we allowed copying revisions at this point, we could end up
4172
# copying a revision without copying its required texts: a
4173
# violation of the requirements for repository integrity.
4174
raise AssertionError(
4175
'cannot copy revisions to fill in missing deltas %s' % (
4176
keys['revisions'],))
4177
for substream_kind, keys in keys.iteritems():
4178
vf = getattr(self.from_repository, substream_kind)
4179
if vf is None and keys:
4180
raise AssertionError(
4181
"cannot fill in keys for a versioned file we don't"
4182
" have: %s needs %s" % (substream_kind, keys))
4184
# No need to stream something we don't have
4186
# Ask for full texts always so that we don't need more round trips
4187
# after this stream.
4188
# Some of the missing keys are genuinely ghosts, so filter absent
4189
# records. The Sink is responsible for doing another check to
4190
# ensure that ghosts don't introduce missing data for future
4192
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4193
self.to_format._fetch_order, True))
4194
yield substream_kind, stream
4196
def inventory_fetch_order(self):
4197
if self._rich_root_upgrade():
4198
return 'topological'
4200
return self.to_format._fetch_order
4202
def _rich_root_upgrade(self):
4203
return (not self.from_repository._format.rich_root_data and
4204
self.to_format.rich_root_data)
4206
def _get_inventory_stream(self, revision_ids):
4207
from_format = self.from_repository._format
4208
if (from_format.supports_chks and self.to_format.supports_chks
4209
and (from_format._serializer == self.to_format._serializer)):
4210
# Both sides support chks, and they use the same serializer, so it
4211
# is safe to transmit the chk pages and inventory pages across
4213
return self._get_chk_inventory_stream(revision_ids)
4214
elif (not from_format.supports_chks):
4215
# Source repository doesn't support chks. So we can transmit the
4216
# inventories 'as-is' and either they are just accepted on the
4217
# target, or the Sink will properly convert it.
4218
return self._get_simple_inventory_stream(revision_ids)
4220
# XXX: Hack to make not-chk->chk fetch: copy the inventories as
4221
# inventories. Note that this should probably be done somehow
4222
# as part of bzrlib.repository.StreamSink. Except JAM couldn't
4223
# figure out how a non-chk repository could possibly handle
4224
# deserializing an inventory stream from a chk repo, as it
4225
# doesn't have a way to understand individual pages.
4226
return self._get_convertable_inventory_stream(revision_ids)
4228
def _get_simple_inventory_stream(self, revision_ids):
4229
from_weave = self.from_repository.inventories
4230
yield ('inventories', from_weave.get_record_stream(
4231
[(rev_id,) for rev_id in revision_ids],
4232
self.inventory_fetch_order(),
4233
not self.delta_on_metadata()))
4235
def _get_chk_inventory_stream(self, revision_ids):
4236
"""Fetch the inventory texts, along with the associated chk maps."""
4237
# We want an inventory outside of the search set, so that we can filter
4238
# out uninteresting chk pages. For now we use
4239
# _find_revision_outside_set, but if we had a Search with cut_revs, we
4240
# could use that instead.
4241
start_rev_id = self.from_repository._find_revision_outside_set(
4243
start_rev_key = (start_rev_id,)
4244
inv_keys_to_fetch = [(rev_id,) for rev_id in revision_ids]
4245
if start_rev_id != _mod_revision.NULL_REVISION:
4246
inv_keys_to_fetch.append((start_rev_id,))
4247
# Any repo that supports chk_bytes must also support out-of-order
4248
# insertion. At least, that is how we expect it to work
4249
# We use get_record_stream instead of iter_inventories because we want
4250
# to be able to insert the stream as well. We could instead fetch
4251
# allowing deltas, and then iter_inventories, but we don't know whether
4252
# source or target is more 'local' anway.
4253
inv_stream = self.from_repository.inventories.get_record_stream(
4254
inv_keys_to_fetch, 'unordered',
4255
True) # We need them as full-texts so we can find their references
4256
uninteresting_chk_roots = set()
4257
interesting_chk_roots = set()
4258
def filter_inv_stream(inv_stream):
4259
for idx, record in enumerate(inv_stream):
4260
### child_pb.update('fetch inv', idx, len(inv_keys_to_fetch))
4261
bytes = record.get_bytes_as('fulltext')
4262
chk_inv = inventory.CHKInventory.deserialise(
4263
self.from_repository.chk_bytes, bytes, record.key)
4264
if record.key == start_rev_key:
4265
uninteresting_chk_roots.add(chk_inv.id_to_entry.key())
4266
p_id_map = chk_inv.parent_id_basename_to_file_id
4267
if p_id_map is not None:
4268
uninteresting_chk_roots.add(p_id_map.key())
4271
interesting_chk_roots.add(chk_inv.id_to_entry.key())
4272
p_id_map = chk_inv.parent_id_basename_to_file_id
4273
if p_id_map is not None:
4274
interesting_chk_roots.add(p_id_map.key())
4275
### pb.update('fetch inventory', 0, 2)
4276
yield ('inventories', filter_inv_stream(inv_stream))
4277
# Now that we have worked out all of the interesting root nodes, grab
4278
# all of the interesting pages and insert them
4279
### pb.update('fetch inventory', 1, 2)
4280
interesting = chk_map.iter_interesting_nodes(
4281
self.from_repository.chk_bytes, interesting_chk_roots,
4282
uninteresting_chk_roots)
4283
def to_stream_adapter():
4284
"""Adapt the iter_interesting_nodes result to a single stream.
4286
iter_interesting_nodes returns records as it processes them, along
4287
with keys. However, we only want to return the records themselves.
4289
for record, items in interesting:
4290
if record is not None:
4292
# XXX: We could instead call get_record_stream(records.keys())
4293
# ATM, this will always insert the records as fulltexts, and
4294
# requires that you can hang on to records once you have gone
4295
# on to the next one. Further, it causes the target to
4296
# recompress the data. Testing shows it to be faster than
4297
# requesting the records again, though.
4298
yield ('chk_bytes', to_stream_adapter())
4299
### pb.update('fetch inventory', 2, 2)
4301
def _get_convertable_inventory_stream(self, revision_ids):
4302
# XXX: One of source or target is using chks, and they don't have
4303
# compatible serializations. The StreamSink code expects to be
4304
# able to convert on the target, so we need to put
4305
# bytes-on-the-wire that can be converted
4306
yield ('inventories', self._stream_invs_as_fulltexts(revision_ids))
4308
def _stream_invs_as_fulltexts(self, revision_ids):
4309
from_repo = self.from_repository
4310
from_serializer = from_repo._format._serializer
4311
revision_keys = [(rev_id,) for rev_id in revision_ids]
4312
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4313
for inv in self.from_repository.iter_inventories(revision_ids):
4314
# XXX: This is a bit hackish, but it works. Basically,
4315
# CHKSerializer 'accidentally' supports
4316
# read/write_inventory_to_string, even though that is never
4317
# the format that is stored on disk. It *does* give us a
4318
# single string representation for an inventory, so live with
4320
# This would be far better if we had a 'serialized inventory
4321
# delta' form. Then we could use 'inventory._make_delta', and
4322
# transmit that. This would both be faster to generate, and
4323
# result in fewer bytes-on-the-wire.
4324
as_bytes = from_serializer.write_inventory_to_string(inv)
4325
key = (inv.revision_id,)
4326
parent_keys = parent_map.get(key, ())
4327
yield versionedfile.FulltextContentFactory(
4328
key, parent_keys, None, as_bytes)
4331
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4332
stop_revision=None):
4333
"""Extend the partial history to include a given index
4335
If a stop_index is supplied, stop when that index has been reached.
4336
If a stop_revision is supplied, stop when that revision is
4337
encountered. Otherwise, stop when the beginning of history is
4340
:param stop_index: The index which should be present. When it is
4341
present, history extension will stop.
4342
:param stop_revision: The revision id which should be present. When
4343
it is encountered, history extension will stop.
4345
start_revision = partial_history_cache[-1]
4346
iterator = repo.iter_reverse_revision_history(start_revision)
4348
#skip the last revision in the list
4351
if (stop_index is not None and
4352
len(partial_history_cache) > stop_index):
4354
if partial_history_cache[-1] == stop_revision:
4356
revision_id = iterator.next()
4357
partial_history_cache.append(revision_id)
4358
except StopIteration: