1
# Copyright (C) 2005-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from __future__ import absolute_import
19
from .lazy_import import lazy_import
20
lazy_import(globals(), """
31
revision as _mod_revision,
32
testament as _mod_testament,
35
from breezy.bundle import serializer
36
from breezy.i18n import gettext
44
from .decorators import needs_read_lock, needs_write_lock, only_raises
45
from .inter import InterObject
46
from .lock import _RelockDebugMixin, LogicalLockResult
52
log_exception_quietly, note, mutter, mutter_callsite, warning)
55
# Old formats display a warning, but only once
56
_deprecation_warning_done = False
59
class IsInWriteGroupError(errors.InternalBzrError):
61
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
63
def __init__(self, repo):
64
errors.InternalBzrError.__init__(self, repo=repo)
67
class CommitBuilder(object):
68
"""Provides an interface to build up a commit.
70
This allows describing a tree to be committed without needing to
71
know the internals of the format of the repository.
74
# all clients should supply tree roots.
75
record_root_entry = True
76
# whether this commit builder supports the record_entry_contents interface
77
supports_record_entry_contents = False
78
# whether this commit builder will automatically update the branch that is
80
updates_branch = False
82
def __init__(self, repository, parents, config_stack, timestamp=None,
83
timezone=None, committer=None, revprops=None,
84
revision_id=None, lossy=False):
85
"""Initiate a CommitBuilder.
87
:param repository: Repository to commit to.
88
:param parents: Revision ids of the parents of the new revision.
89
:param timestamp: Optional timestamp recorded for commit.
90
:param timezone: Optional timezone for timestamp.
91
:param committer: Optional committer to set for commit.
92
:param revprops: Optional dictionary of revision properties.
93
:param revision_id: Optional revision id.
94
:param lossy: Whether to discard data that can not be natively
95
represented, when pushing to a foreign VCS
97
self._config_stack = config_stack
100
if committer is None:
101
self._committer = self._config_stack.get('email')
102
elif not isinstance(committer, unicode):
103
self._committer = committer.decode() # throw if non-ascii
105
self._committer = committer
107
self._new_revision_id = revision_id
108
self.parents = parents
109
self.repository = repository
112
if revprops is not None:
113
self._validate_revprops(revprops)
114
self._revprops.update(revprops)
116
if timestamp is None:
117
timestamp = time.time()
118
# Restrict resolution to 1ms
119
self._timestamp = round(timestamp, 3)
122
self._timezone = osutils.local_time_offset()
124
self._timezone = int(timezone)
126
self._generate_revision_if_needed()
128
def any_changes(self):
129
"""Return True if any entries were changed.
131
This includes merge-only changes. It is the core for the --unchanged
134
:return: True if any changes have occured.
136
raise NotImplementedError(self.any_changes)
138
def _validate_unicode_text(self, text, context):
139
"""Verify things like commit messages don't have bogus characters."""
141
raise ValueError('Invalid value for %s: %r' % (context, text))
143
def _validate_revprops(self, revprops):
144
for key, value in viewitems(revprops):
145
# We know that the XML serializers do not round trip '\r'
146
# correctly, so refuse to accept them
147
if not isinstance(value, basestring):
148
raise ValueError('revision property (%s) is not a valid'
149
' (unicode) string: %r' % (key, value))
150
self._validate_unicode_text(value,
151
'revision property (%s)' % (key,))
153
def commit(self, message):
154
"""Make the actual commit.
156
:return: The revision id of the recorded revision.
158
raise NotImplementedError(self.commit)
161
"""Abort the commit that is being built.
163
raise NotImplementedError(self.abort)
165
def revision_tree(self):
166
"""Return the tree that was just committed.
168
After calling commit() this can be called to get a
169
RevisionTree representing the newly committed tree. This is
170
preferred to calling Repository.revision_tree() because that may
171
require deserializing the inventory, while we already have a copy in
174
raise NotImplementedError(self.revision_tree)
176
def finish_inventory(self):
177
"""Tell the builder that the inventory is finished.
179
:return: The inventory id in the repository, which can be used with
180
repository.get_inventory.
182
raise NotImplementedError(self.finish_inventory)
184
def _gen_revision_id(self):
185
"""Return new revision-id."""
186
return generate_ids.gen_revision_id(self._committer, self._timestamp)
188
def _generate_revision_if_needed(self):
189
"""Create a revision id if None was supplied.
191
If the repository can not support user-specified revision ids
192
they should override this function and raise CannotSetRevisionId
193
if _new_revision_id is not None.
195
:raises: CannotSetRevisionId
197
if self._new_revision_id is None:
198
self._new_revision_id = self._gen_revision_id()
199
self.random_revid = True
201
self.random_revid = False
203
def will_record_deletes(self):
204
"""Tell the commit builder that deletes are being notified.
206
This enables the accumulation of an inventory delta; for the resulting
207
commit to be valid, deletes against the basis MUST be recorded via
208
builder.record_delete().
210
raise NotImplementedError(self.will_record_deletes)
212
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
213
"""Record a new tree via iter_changes.
215
:param tree: The tree to obtain text contents from for changed objects.
216
:param basis_revision_id: The revision id of the tree the iter_changes
217
has been generated against. Currently assumed to be the same
218
as self.parents[0] - if it is not, errors may occur.
219
:param iter_changes: An iter_changes iterator with the changes to apply
220
to basis_revision_id. The iterator must not include any items with
221
a current kind of None - missing items must be either filtered out
222
or errored-on beefore record_iter_changes sees the item.
223
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
226
raise NotImplementedError(self.record_iter_changes)
229
class RepositoryWriteLockResult(LogicalLockResult):
230
"""The result of write locking a repository.
232
:ivar repository_token: The token obtained from the underlying lock, or
234
:ivar unlock: A callable which will unlock the lock.
237
def __init__(self, unlock, repository_token):
238
LogicalLockResult.__init__(self, unlock)
239
self.repository_token = repository_token
242
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
246
######################################################################
250
class Repository(controldir.ControlComponent, _RelockDebugMixin):
251
"""Repository holding history for one or more branches.
253
The repository holds and retrieves historical information including
254
revisions and file history. It's normally accessed only by the Branch,
255
which views a particular line of development through that history.
257
See VersionedFileRepository in breezy.vf_repository for the
258
base class for most Bazaar repositories.
261
def abort_write_group(self, suppress_errors=False):
262
"""Commit the contents accrued within the current write group.
264
:param suppress_errors: if true, abort_write_group will catch and log
265
unexpected errors that happen during the abort, rather than
266
allowing them to propagate. Defaults to False.
268
:seealso: start_write_group.
270
if self._write_group is not self.get_transaction():
271
# has an unlock or relock occured ?
274
'(suppressed) mismatched lock context and write group. %r, %r',
275
self._write_group, self.get_transaction())
277
raise errors.BzrError(
278
'mismatched lock context and write group. %r, %r' %
279
(self._write_group, self.get_transaction()))
281
self._abort_write_group()
282
except Exception as exc:
283
self._write_group = None
284
if not suppress_errors:
286
mutter('abort_write_group failed')
287
log_exception_quietly()
288
note(gettext('bzr: ERROR (ignored): %s'), exc)
289
self._write_group = None
291
def _abort_write_group(self):
292
"""Template method for per-repository write group cleanup.
294
This is called during abort before the write group is considered to be
295
finished and should cleanup any internal state accrued during the write
296
group. There is no requirement that data handed to the repository be
297
*not* made available - this is not a rollback - but neither should any
298
attempt be made to ensure that data added is fully commited. Abort is
299
invoked when an error has occured so futher disk or network operations
300
may not be possible or may error and if possible should not be
304
def add_fallback_repository(self, repository):
305
"""Add a repository to use for looking up data not held locally.
307
:param repository: A repository.
309
raise NotImplementedError(self.add_fallback_repository)
311
def _check_fallback_repository(self, repository):
312
"""Check that this repository can fallback to repository safely.
314
Raise an error if not.
316
:param repository: A repository to fallback to.
318
return InterRepository._assert_same_model(self, repository)
320
def all_revision_ids(self):
321
"""Returns a list of all the revision ids in the repository.
323
This is conceptually deprecated because code should generally work on
324
the graph reachable from a particular revision, and ignore any other
325
revisions that might be present. There is no direct replacement
328
if 'evil' in debug.debug_flags:
329
mutter_callsite(2, "all_revision_ids is linear with history.")
330
return self._all_revision_ids()
332
def _all_revision_ids(self):
333
"""Returns a list of all the revision ids in the repository.
335
These are in as much topological order as the underlying store can
338
raise NotImplementedError(self._all_revision_ids)
340
def break_lock(self):
341
"""Break a lock if one is present from another instance.
343
Uses the ui factory to ask for confirmation if the lock may be from
346
self.control_files.break_lock()
349
def create(controldir):
350
"""Construct the current default format repository in controldir."""
351
return RepositoryFormat.get_default_format().initialize(controldir)
353
def __init__(self, _format, controldir, control_files):
354
"""instantiate a Repository.
356
:param _format: The format of the repository on disk.
357
:param controldir: The ControlDir of the repository.
358
:param control_files: Control files to use for locking, etc.
360
# In the future we will have a single api for all stores for
361
# getting file texts, inventories and revisions, then
362
# this construct will accept instances of those things.
363
super(Repository, self).__init__()
364
self._format = _format
365
# the following are part of the public API for Repository:
366
self.bzrdir = controldir
367
self.control_files = control_files
369
self._write_group = None
370
# Additional places to query for data.
371
self._fallback_repositories = []
374
def user_transport(self):
375
return self.bzrdir.user_transport
378
def control_transport(self):
379
return self._transport
382
if self._fallback_repositories:
383
return '%s(%r, fallback_repositories=%r)' % (
384
self.__class__.__name__,
386
self._fallback_repositories)
388
return '%s(%r)' % (self.__class__.__name__,
391
def _has_same_fallbacks(self, other_repo):
392
"""Returns true if the repositories have the same fallbacks."""
393
my_fb = self._fallback_repositories
394
other_fb = other_repo._fallback_repositories
395
if len(my_fb) != len(other_fb):
397
for f, g in zip(my_fb, other_fb):
398
if not f.has_same_location(g):
402
def has_same_location(self, other):
403
"""Returns a boolean indicating if this repository is at the same
404
location as another repository.
406
This might return False even when two repository objects are accessing
407
the same physical repository via different URLs.
409
if self.__class__ is not other.__class__:
411
return (self.control_url == other.control_url)
413
def is_in_write_group(self):
414
"""Return True if there is an open write group.
416
:seealso: start_write_group.
418
return self._write_group is not None
421
return self.control_files.is_locked()
423
def is_write_locked(self):
424
"""Return True if this object is write locked."""
425
return self.is_locked() and self.control_files._lock_mode == 'w'
427
def lock_write(self, token=None):
428
"""Lock this repository for writing.
430
This causes caching within the repository obejct to start accumlating
431
data during reads, and allows a 'write_group' to be obtained. Write
432
groups must be used for actual data insertion.
434
A token should be passed in if you know that you have locked the object
435
some other way, and need to synchronise this object's state with that
438
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
440
:param token: if this is already locked, then lock_write will fail
441
unless the token matches the existing lock.
442
:returns: a token if this instance supports tokens, otherwise None.
443
:raises TokenLockingNotSupported: when a token is given but this
444
instance doesn't support using token locks.
445
:raises MismatchedToken: if the specified token doesn't match the token
446
of the existing lock.
447
:seealso: start_write_group.
448
:return: A RepositoryWriteLockResult.
450
locked = self.is_locked()
451
token = self.control_files.lock_write(token=token)
453
self._warn_if_deprecated()
455
for repo in self._fallback_repositories:
456
# Writes don't affect fallback repos
459
return RepositoryWriteLockResult(self.unlock, token)
462
"""Lock the repository for read operations.
464
:return: An object with an unlock method which will release the lock
467
locked = self.is_locked()
468
self.control_files.lock_read()
470
self._warn_if_deprecated()
472
for repo in self._fallback_repositories:
475
return LogicalLockResult(self.unlock)
477
def get_physical_lock_status(self):
478
return self.control_files.get_physical_lock_status()
480
def leave_lock_in_place(self):
481
"""Tell this repository not to release the physical lock when this
484
If lock_write doesn't return a token, then this method is not supported.
486
self.control_files.leave_in_place()
488
def dont_leave_lock_in_place(self):
489
"""Tell this repository to release the physical lock when this
490
object is unlocked, even if it didn't originally acquire it.
492
If lock_write doesn't return a token, then this method is not supported.
494
self.control_files.dont_leave_in_place()
497
def gather_stats(self, revid=None, committers=None):
498
"""Gather statistics from a revision id.
500
:param revid: The revision id to gather statistics from, if None, then
501
no revision specific statistics are gathered.
502
:param committers: Optional parameter controlling whether to grab
503
a count of committers from the revision specific statistics.
504
:return: A dictionary of statistics. Currently this contains:
505
committers: The number of committers if requested.
506
firstrev: A tuple with timestamp, timezone for the penultimate left
507
most ancestor of revid, if revid is not the NULL_REVISION.
508
latestrev: A tuple with timestamp, timezone for revid, if revid is
509
not the NULL_REVISION.
510
revisions: The total revision count in the repository.
511
size: An estimate disk size of the repository in bytes.
514
if revid and committers:
515
result['committers'] = 0
516
if revid and revid != _mod_revision.NULL_REVISION:
517
graph = self.get_graph()
519
all_committers = set()
520
revisions = [r for (r, p) in graph.iter_ancestry([revid])
521
if r != _mod_revision.NULL_REVISION]
524
# ignore the revisions in the middle - just grab first and last
525
revisions = revisions[0], revisions[-1]
526
for revision in self.get_revisions(revisions):
527
if not last_revision:
528
last_revision = revision
530
all_committers.add(revision.committer)
531
first_revision = revision
533
result['committers'] = len(all_committers)
534
result['firstrev'] = (first_revision.timestamp,
535
first_revision.timezone)
536
result['latestrev'] = (last_revision.timestamp,
537
last_revision.timezone)
540
def find_branches(self, using=False):
541
"""Find branches underneath this repository.
543
This will include branches inside other branches.
545
:param using: If True, list only branches using this repository.
547
if using and not self.is_shared():
548
return self.bzrdir.list_branches()
549
class Evaluator(object):
552
self.first_call = True
554
def __call__(self, controldir):
555
# On the first call, the parameter is always the controldir
556
# containing the current repo.
557
if not self.first_call:
559
repository = controldir.open_repository()
560
except errors.NoRepositoryPresent:
563
return False, ([], repository)
564
self.first_call = False
565
value = (controldir.list_branches(), None)
569
for branches, repository in controldir.ControlDir.find_bzrdirs(
570
self.user_transport, evaluate=Evaluator()):
571
if branches is not None:
573
if not using and repository is not None:
574
ret.extend(repository.find_branches())
578
def search_missing_revision_ids(self, other,
579
find_ghosts=True, revision_ids=None, if_present_ids=None,
581
"""Return the revision ids that other has that this does not.
583
These are returned in topological order.
585
revision_ids: only return revision ids included by revision_id.
587
return InterRepository.get(other, self).search_missing_revision_ids(
588
find_ghosts=find_ghosts, revision_ids=revision_ids,
589
if_present_ids=if_present_ids, limit=limit)
593
"""Open the repository rooted at base.
595
For instance, if the repository is at URL/.bzr/repository,
596
Repository.open(URL) -> a Repository instance.
598
control = controldir.ControlDir.open(base)
599
return control.open_repository()
601
def copy_content_into(self, destination, revision_id=None):
602
"""Make a complete copy of the content in self into destination.
604
This is a destructive operation! Do not use it on existing
607
return InterRepository.get(self, destination).copy_content(revision_id)
609
def commit_write_group(self):
610
"""Commit the contents accrued within the current write group.
612
:seealso: start_write_group.
614
:return: it may return an opaque hint that can be passed to 'pack'.
616
if self._write_group is not self.get_transaction():
617
# has an unlock or relock occured ?
618
raise errors.BzrError('mismatched lock context %r and '
620
(self.get_transaction(), self._write_group))
621
result = self._commit_write_group()
622
self._write_group = None
625
def _commit_write_group(self):
626
"""Template method for per-repository write group cleanup.
628
This is called before the write group is considered to be
629
finished and should ensure that all data handed to the repository
630
for writing during the write group is safely committed (to the
631
extent possible considering file system caching etc).
634
def suspend_write_group(self):
635
"""Suspend a write group.
637
:raise UnsuspendableWriteGroup: If the write group can not be
639
:return: List of tokens
641
raise errors.UnsuspendableWriteGroup(self)
643
def refresh_data(self):
644
"""Re-read any data needed to synchronise with disk.
646
This method is intended to be called after another repository instance
647
(such as one used by a smart server) has inserted data into the
648
repository. On all repositories this will work outside of write groups.
649
Some repository formats (pack and newer for breezy native formats)
650
support refresh_data inside write groups. If called inside a write
651
group on a repository that does not support refreshing in a write group
652
IsInWriteGroupError will be raised.
656
def resume_write_group(self, tokens):
657
if not self.is_write_locked():
658
raise errors.NotWriteLocked(self)
659
if self._write_group:
660
raise errors.BzrError('already in a write group')
661
self._resume_write_group(tokens)
662
# so we can detect unlock/relock - the write group is now entered.
663
self._write_group = self.get_transaction()
665
def _resume_write_group(self, tokens):
666
raise errors.UnsuspendableWriteGroup(self)
668
def fetch(self, source, revision_id=None, find_ghosts=False):
669
"""Fetch the content required to construct revision_id from source.
671
If revision_id is None, then all content is copied.
673
fetch() may not be used when the repository is in a write group -
674
either finish the current write group before using fetch, or use
675
fetch before starting the write group.
677
:param find_ghosts: Find and copy revisions in the source that are
678
ghosts in the target (and not reachable directly by walking out to
679
the first-present revision in target from revision_id).
680
:param revision_id: If specified, all the content needed for this
681
revision ID will be copied to the target. Fetch will determine for
682
itself which content needs to be copied.
684
if self.is_in_write_group():
685
raise errors.InternalBzrError(
686
"May not fetch while in a write group.")
687
# fast path same-url fetch operations
688
# TODO: lift out to somewhere common with RemoteRepository
689
# <https://bugs.launchpad.net/bzr/+bug/401646>
690
if (self.has_same_location(source)
691
and self._has_same_fallbacks(source)):
692
# check that last_revision is in 'from' and then return a
694
if (revision_id is not None and
695
not _mod_revision.is_null(revision_id)):
696
self.get_revision(revision_id)
698
inter = InterRepository.get(source, self)
699
return inter.fetch(revision_id=revision_id, find_ghosts=find_ghosts)
701
def create_bundle(self, target, base, fileobj, format=None):
702
return serializer.write_bundle(self, target, base, fileobj, format)
704
def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
705
timezone=None, committer=None, revprops=None,
706
revision_id=None, lossy=False):
707
"""Obtain a CommitBuilder for this repository.
709
:param branch: Branch to commit to.
710
:param parents: Revision ids of the parents of the new revision.
711
:param config_stack: Configuration stack to use.
712
:param timestamp: Optional timestamp recorded for commit.
713
:param timezone: Optional timezone for timestamp.
714
:param committer: Optional committer to set for commit.
715
:param revprops: Optional dictionary of revision properties.
716
:param revision_id: Optional revision id.
717
:param lossy: Whether to discard data that can not be natively
718
represented, when pushing to a foreign VCS
720
raise NotImplementedError(self.get_commit_builder)
722
@only_raises(errors.LockNotHeld, errors.LockBroken)
724
if (self.control_files._lock_count == 1 and
725
self.control_files._lock_mode == 'w'):
726
if self._write_group is not None:
727
self.abort_write_group()
728
self.control_files.unlock()
729
raise errors.BzrError(
730
'Must end write groups before releasing write locks.')
731
self.control_files.unlock()
732
if self.control_files._lock_count == 0:
733
for repo in self._fallback_repositories:
737
def clone(self, controldir, revision_id=None):
738
"""Clone this repository into controldir using the current format.
740
Currently no check is made that the format of this repository and
741
the bzrdir format are compatible. FIXME RBC 20060201.
743
:return: The newly created destination repository.
745
# TODO: deprecate after 0.16; cloning this with all its settings is
746
# probably not very useful -- mbp 20070423
747
dest_repo = self._create_sprouting_repo(
748
controldir, shared=self.is_shared())
749
self.copy_content_into(dest_repo, revision_id)
752
def start_write_group(self):
753
"""Start a write group in the repository.
755
Write groups are used by repositories which do not have a 1:1 mapping
756
between file ids and backend store to manage the insertion of data from
757
both fetch and commit operations.
759
A write lock is required around the start_write_group/commit_write_group
760
for the support of lock-requiring repository formats.
762
One can only insert data into a repository inside a write group.
766
if not self.is_write_locked():
767
raise errors.NotWriteLocked(self)
768
if self._write_group:
769
raise errors.BzrError('already in a write group')
770
self._start_write_group()
771
# so we can detect unlock/relock - the write group is now entered.
772
self._write_group = self.get_transaction()
774
def _start_write_group(self):
775
"""Template method for per-repository write group startup.
777
This is called before the write group is considered to be
782
def sprout(self, to_bzrdir, revision_id=None):
783
"""Create a descendent repository for new development.
785
Unlike clone, this does not copy the settings of the repository.
787
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
788
dest_repo.fetch(self, revision_id=revision_id)
791
def _create_sprouting_repo(self, a_bzrdir, shared):
792
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
793
# use target default format.
794
dest_repo = a_bzrdir.create_repository()
796
# Most control formats need the repository to be specifically
797
# created, but on some old all-in-one formats it's not needed
799
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
800
except errors.UninitializableFormat:
801
dest_repo = a_bzrdir.open_repository()
805
def has_revision(self, revision_id):
806
"""True if this repository has a copy of the revision."""
807
return revision_id in self.has_revisions((revision_id,))
810
def has_revisions(self, revision_ids):
811
"""Probe to find out the presence of multiple revisions.
813
:param revision_ids: An iterable of revision_ids.
814
:return: A set of the revision_ids that were present.
816
raise NotImplementedError(self.has_revisions)
819
def get_revision(self, revision_id):
820
"""Return the Revision object for a named revision."""
821
return self.get_revisions([revision_id])[0]
823
def get_revision_reconcile(self, revision_id):
824
"""'reconcile' helper routine that allows access to a revision always.
826
This variant of get_revision does not cross check the weave graph
827
against the revision one as get_revision does: but it should only
828
be used by reconcile, or reconcile-alike commands that are correcting
829
or testing the revision graph.
831
raise NotImplementedError(self.get_revision_reconcile)
833
def get_revisions(self, revision_ids):
834
"""Get many revisions at once.
836
Repositories that need to check data on every revision read should
837
subclass this method.
839
raise NotImplementedError(self.get_revisions)
841
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
842
"""Produce a generator of revision deltas.
844
Note that the input is a sequence of REVISIONS, not revision_ids.
845
Trees will be held in memory until the generator exits.
846
Each delta is relative to the revision's lefthand predecessor.
848
:param specific_fileids: if not None, the result is filtered
849
so that only those file-ids, their parents and their
850
children are included.
852
# Get the revision-ids of interest
853
required_trees = set()
854
for revision in revisions:
855
required_trees.add(revision.revision_id)
856
required_trees.update(revision.parent_ids[:1])
858
# Get the matching filtered trees. Note that it's more
859
# efficient to pass filtered trees to changes_from() rather
860
# than doing the filtering afterwards. changes_from() could
861
# arguably do the filtering itself but it's path-based, not
862
# file-id based, so filtering before or afterwards is
864
if specific_fileids is None:
865
trees = dict((t.get_revision_id(), t) for
866
t in self.revision_trees(required_trees))
868
trees = dict((t.get_revision_id(), t) for
869
t in self._filtered_revision_trees(required_trees,
872
# Calculate the deltas
873
for revision in revisions:
874
if not revision.parent_ids:
875
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
877
old_tree = trees[revision.parent_ids[0]]
878
yield trees[revision.revision_id].changes_from(old_tree)
881
def get_revision_delta(self, revision_id, specific_fileids=None):
882
"""Return the delta for one revision.
884
The delta is relative to the left-hand predecessor of the
887
:param specific_fileids: if not None, the result is filtered
888
so that only those file-ids, their parents and their
889
children are included.
891
r = self.get_revision(revision_id)
892
return list(self.get_deltas_for_revisions([r],
893
specific_fileids=specific_fileids))[0]
896
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
897
signature = gpg_strategy.sign(plaintext)
898
self.add_signature_text(revision_id, signature)
900
def add_signature_text(self, revision_id, signature):
901
"""Store a signature text for a revision.
903
:param revision_id: Revision id of the revision
904
:param signature: Signature text.
906
raise NotImplementedError(self.add_signature_text)
908
def iter_files_bytes(self, desired_files):
909
"""Iterate through file versions.
911
Files will not necessarily be returned in the order they occur in
912
desired_files. No specific order is guaranteed.
914
Yields pairs of identifier, bytes_iterator. identifier is an opaque
915
value supplied by the caller as part of desired_files. It should
916
uniquely identify the file version in the caller's context. (Examples:
917
an index number or a TreeTransform trans_id.)
919
:param desired_files: a list of (file_id, revision_id, identifier)
922
raise NotImplementedError(self.iter_files_bytes)
924
def get_rev_id_for_revno(self, revno, known_pair):
925
"""Return the revision id of a revno, given a later (revno, revid)
926
pair in the same history.
928
:return: if found (True, revid). If the available history ran out
929
before reaching the revno, then this returns
930
(False, (closest_revno, closest_revid)).
932
known_revno, known_revid = known_pair
933
partial_history = [known_revid]
934
distance_from_known = known_revno - revno
935
if distance_from_known < 0:
937
'requested revno (%d) is later than given known revno (%d)'
938
% (revno, known_revno))
941
self, partial_history, stop_index=distance_from_known)
942
except errors.RevisionNotPresent as err:
943
if err.revision_id == known_revid:
944
# The start revision (known_revid) wasn't found.
946
# This is a stacked repository with no fallbacks, or a there's a
947
# left-hand ghost. Either way, even though the revision named in
948
# the error isn't in this repo, we know it's the next step in this
950
partial_history.append(err.revision_id)
951
if len(partial_history) <= distance_from_known:
952
# Didn't find enough history to get a revid for the revno.
953
earliest_revno = known_revno - len(partial_history) + 1
954
return (False, (earliest_revno, partial_history[-1]))
955
if len(partial_history) - 1 > distance_from_known:
956
raise AssertionError('_iter_for_revno returned too much history')
957
return (True, partial_history[-1])
960
"""Return True if this repository is flagged as a shared repository."""
961
raise NotImplementedError(self.is_shared)
964
def reconcile(self, other=None, thorough=False):
965
"""Reconcile this repository."""
966
from .reconcile import RepoReconciler
967
reconciler = RepoReconciler(self, thorough=thorough)
968
reconciler.reconcile()
971
def _refresh_data(self):
972
"""Helper called from lock_* to ensure coherency with disk.
974
The default implementation does nothing; it is however possible
975
for repositories to maintain loaded indices across multiple locks
976
by checking inside their implementation of this method to see
977
whether their indices are still valid. This depends of course on
978
the disk format being validatable in this manner. This method is
979
also called by the refresh_data() public interface to cause a refresh
980
to occur while in a write lock so that data inserted by a smart server
981
push operation is visible on the client's instance of the physical
986
def revision_tree(self, revision_id):
987
"""Return Tree for a revision on this branch.
989
`revision_id` may be NULL_REVISION for the empty tree revision.
991
raise NotImplementedError(self.revision_tree)
993
def revision_trees(self, revision_ids):
994
"""Return Trees for revisions in this repository.
996
:param revision_ids: a sequence of revision-ids;
997
a revision-id may not be None or 'null:'
999
raise NotImplementedError(self.revision_trees)
1001
def pack(self, hint=None, clean_obsolete_packs=False):
1002
"""Compress the data within the repository.
1004
This operation only makes sense for some repository types. For other
1005
types it should be a no-op that just returns.
1007
This stub method does not require a lock, but subclasses should use
1008
@needs_write_lock as this is a long running call it's reasonable to
1009
implicitly lock for the user.
1011
:param hint: If not supplied, the whole repository is packed.
1012
If supplied, the repository may use the hint parameter as a
1013
hint for the parts of the repository to pack. A hint can be
1014
obtained from the result of commit_write_group(). Out of
1015
date hints are simply ignored, because concurrent operations
1016
can obsolete them rapidly.
1018
:param clean_obsolete_packs: Clean obsolete packs immediately after
1022
def get_transaction(self):
1023
return self.control_files.get_transaction()
1025
def get_parent_map(self, revision_ids):
1026
"""See graph.StackedParentsProvider.get_parent_map"""
1027
raise NotImplementedError(self.get_parent_map)
1029
def _get_parent_map_no_fallbacks(self, revision_ids):
1030
"""Same as Repository.get_parent_map except doesn't query fallbacks."""
1031
# revisions index works in keys; this just works in revisions
1032
# therefore wrap and unwrap
1035
for revision_id in revision_ids:
1036
if revision_id == _mod_revision.NULL_REVISION:
1037
result[revision_id] = ()
1038
elif revision_id is None:
1039
raise ValueError('get_parent_map(None) is not valid')
1041
query_keys.append((revision_id ,))
1042
vf = self.revisions.without_fallbacks()
1043
for (revision_id,), parent_keys in viewitems(
1044
vf.get_parent_map(query_keys)):
1046
result[revision_id] = tuple([parent_revid
1047
for (parent_revid,) in parent_keys])
1049
result[revision_id] = (_mod_revision.NULL_REVISION,)
1052
def _make_parents_provider(self):
1053
if not self._format.supports_external_lookups:
1055
return graph.StackedParentsProvider(_LazyListJoin(
1056
[self._make_parents_provider_unstacked()],
1057
self._fallback_repositories))
1059
def _make_parents_provider_unstacked(self):
1060
return graph.CallableToParentsProviderAdapter(
1061
self._get_parent_map_no_fallbacks)
1064
def get_known_graph_ancestry(self, revision_ids):
1065
"""Return the known graph for a set of revision ids and their ancestors.
1067
raise NotImplementedError(self.get_known_graph_ancestry)
1069
def get_file_graph(self):
1070
"""Return the graph walker for files."""
1071
raise NotImplementedError(self.get_file_graph)
1073
def get_graph(self, other_repository=None):
1074
"""Return the graph walker for this repository format"""
1075
parents_provider = self._make_parents_provider()
1076
if (other_repository is not None and
1077
not self.has_same_location(other_repository)):
1078
parents_provider = graph.StackedParentsProvider(
1079
[parents_provider, other_repository._make_parents_provider()])
1080
return graph.Graph(parents_provider)
1083
def set_make_working_trees(self, new_value):
1084
"""Set the policy flag for making working trees when creating branches.
1086
This only applies to branches that use this repository.
1088
The default is 'True'.
1089
:param new_value: True to restore the default, False to disable making
1092
raise NotImplementedError(self.set_make_working_trees)
1094
def make_working_trees(self):
1095
"""Returns the policy for making working trees on new branches."""
1096
raise NotImplementedError(self.make_working_trees)
1099
def sign_revision(self, revision_id, gpg_strategy):
1100
testament = _mod_testament.Testament.from_revision(self, revision_id)
1101
plaintext = testament.as_short_text()
1102
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1105
def verify_revision_signature(self, revision_id, gpg_strategy):
1106
"""Verify the signature on a revision.
1108
:param revision_id: the revision to verify
1109
:gpg_strategy: the GPGStrategy object to used
1111
:return: gpg.SIGNATURE_VALID or a failed SIGNATURE_ value
1113
if not self.has_signature_for_revision_id(revision_id):
1114
return gpg.SIGNATURE_NOT_SIGNED, None
1115
signature = self.get_signature_text(revision_id)
1117
testament = _mod_testament.Testament.from_revision(self, revision_id)
1118
plaintext = testament.as_short_text()
1120
return gpg_strategy.verify(signature, plaintext)
1123
def verify_revision_signatures(self, revision_ids, gpg_strategy):
1124
"""Verify revision signatures for a number of revisions.
1126
:param revision_id: the revision to verify
1127
:gpg_strategy: the GPGStrategy object to used
1128
:return: Iterator over tuples with revision id, result and keys
1130
for revid in revision_ids:
1131
(result, key) = self.verify_revision_signature(revid, gpg_strategy)
1132
yield revid, result, key
1134
def has_signature_for_revision_id(self, revision_id):
1135
"""Query for a revision signature for revision_id in the repository."""
1136
raise NotImplementedError(self.has_signature_for_revision_id)
1138
def get_signature_text(self, revision_id):
1139
"""Return the text for a signature."""
1140
raise NotImplementedError(self.get_signature_text)
1142
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
1143
"""Check consistency of all history of given revision_ids.
1145
Different repository implementations should override _check().
1147
:param revision_ids: A non-empty list of revision_ids whose ancestry
1148
will be checked. Typically the last revision_id of a branch.
1149
:param callback_refs: A dict of check-refs to resolve and callback
1150
the check/_check method on the items listed as wanting the ref.
1152
:param check_repo: If False do not check the repository contents, just
1153
calculate the data callback_refs requires and call them back.
1155
return self._check(revision_ids=revision_ids, callback_refs=callback_refs,
1156
check_repo=check_repo)
1158
def _check(self, revision_ids=None, callback_refs=None, check_repo=True):
1159
raise NotImplementedError(self.check)
1161
def _warn_if_deprecated(self, branch=None):
1162
if not self._format.is_deprecated():
1164
global _deprecation_warning_done
1165
if _deprecation_warning_done:
1169
conf = config.GlobalStack()
1171
conf = branch.get_config_stack()
1172
if 'format_deprecation' in conf.get('suppress_warnings'):
1174
warning("Format %s for %s is deprecated -"
1175
" please use 'brz upgrade' to get better performance"
1176
% (self._format, self.bzrdir.transport.base))
1178
_deprecation_warning_done = True
1180
def supports_rich_root(self):
1181
return self._format.rich_root_data
1183
def _check_ascii_revisionid(self, revision_id, method):
1184
"""Private helper for ascii-only repositories."""
1185
# weave repositories refuse to store revisionids that are non-ascii.
1186
if revision_id is not None:
1187
# weaves require ascii revision ids.
1188
if isinstance(revision_id, unicode):
1190
revision_id.encode('ascii')
1191
except UnicodeEncodeError:
1192
raise errors.NonAsciiRevisionId(method, self)
1195
revision_id.decode('ascii')
1196
except UnicodeDecodeError:
1197
raise errors.NonAsciiRevisionId(method, self)
1200
class RepositoryFormatRegistry(controldir.ControlComponentFormatRegistry):
1201
"""Repository format registry."""
1203
def get_default(self):
1204
"""Return the current default format."""
1205
return controldir.format_registry.make_bzrdir('default').repository_format
1208
network_format_registry = registry.FormatRegistry()
1209
"""Registry of formats indexed by their network name.
1211
The network name for a repository format is an identifier that can be used when
1212
referring to formats with smart server operations. See
1213
RepositoryFormat.network_name() for more detail.
1217
format_registry = RepositoryFormatRegistry(network_format_registry)
1218
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
1220
This can contain either format instances themselves, or classes/factories that
1221
can be called to obtain one.
1225
#####################################################################
1226
# Repository Formats
1228
class RepositoryFormat(controldir.ControlComponentFormat):
1229
"""A repository format.
1231
Formats provide four things:
1232
* An initialization routine to construct repository data on disk.
1233
* a optional format string which is used when the BzrDir supports
1235
* an open routine which returns a Repository instance.
1236
* A network name for referring to the format in smart server RPC
1239
There is one and only one Format subclass for each on-disk format. But
1240
there can be one Repository subclass that is used for several different
1241
formats. The _format attribute on a Repository instance can be used to
1242
determine the disk format.
1244
Formats are placed in a registry by their format string for reference
1245
during opening. These should be subclasses of RepositoryFormat for
1248
Once a format is deprecated, just deprecate the initialize and open
1249
methods on the format class. Do not deprecate the object, as the
1250
object may be created even when a repository instance hasn't been
1253
Common instance attributes:
1254
_matchingbzrdir - the controldir format that the repository format was
1255
originally written to work with. This can be used if manually
1256
constructing a bzrdir and repository, or more commonly for test suite
1260
# Set to True or False in derived classes. True indicates that the format
1261
# supports ghosts gracefully.
1262
supports_ghosts = None
1263
# Can this repository be given external locations to lookup additional
1264
# data. Set to True or False in derived classes.
1265
supports_external_lookups = None
1266
# Does this format support CHK bytestring lookups. Set to True or False in
1268
supports_chks = None
1269
# Should fetch trigger a reconcile after the fetch? Only needed for
1270
# some repository formats that can suffer internal inconsistencies.
1271
_fetch_reconcile = False
1272
# Does this format have < O(tree_size) delta generation. Used to hint what
1273
# code path for commit, amongst other things.
1275
# Does doing a pack operation compress data? Useful for the pack UI command
1276
# (so if there is one pack, the operation can still proceed because it may
1277
# help), and for fetching when data won't have come from the same
1279
pack_compresses = False
1280
# Does the repository storage understand references to trees?
1281
supports_tree_reference = None
1282
# Is the format experimental ?
1283
experimental = False
1284
# Does this repository format escape funky characters, or does it create
1285
# files with similar names as the versioned files in its contents on disk
1287
supports_funky_characters = None
1288
# Does this repository format support leaving locks?
1289
supports_leaving_lock = None
1290
# Does this format support the full VersionedFiles interface?
1291
supports_full_versioned_files = None
1292
# Does this format support signing revision signatures?
1293
supports_revision_signatures = True
1294
# Can the revision graph have incorrect parents?
1295
revision_graph_can_have_wrong_parents = None
1296
# Does this format support rich root data?
1297
rich_root_data = None
1298
# Does this format support explicitly versioned directories?
1299
supports_versioned_directories = None
1300
# Can other repositories be nested into one of this format?
1301
supports_nesting_repositories = None
1302
# Is it possible for revisions to be present without being referenced
1304
supports_unreferenced_revisions = None
1307
return "%s()" % self.__class__.__name__
1309
def __eq__(self, other):
1310
# format objects are generally stateless
1311
return isinstance(other, self.__class__)
1313
def __ne__(self, other):
1314
return not self == other
1316
def get_format_description(self):
1317
"""Return the short description for this format."""
1318
raise NotImplementedError(self.get_format_description)
1320
def initialize(self, controldir, shared=False):
1321
"""Initialize a repository of this format in controldir.
1323
:param controldir: The controldir to put the new repository in it.
1324
:param shared: The repository should be initialized as a sharable one.
1325
:returns: The new repository object.
1327
This may raise UninitializableFormat if shared repository are not
1328
compatible the controldir.
1330
raise NotImplementedError(self.initialize)
1332
def is_supported(self):
1333
"""Is this format supported?
1335
Supported formats must be initializable and openable.
1336
Unsupported formats may not support initialization or committing or
1337
some other features depending on the reason for not being supported.
1341
def is_deprecated(self):
1342
"""Is this format deprecated?
1344
Deprecated formats may trigger a user-visible warning recommending
1345
the user to upgrade. They are still fully supported.
1349
def network_name(self):
1350
"""A simple byte string uniquely identifying this format for RPC calls.
1352
MetaDir repository formats use their disk format string to identify the
1353
repository over the wire. All in one formats such as bzr < 0.8, and
1354
foreign formats like svn/git and hg should use some marker which is
1355
unique and immutable.
1357
raise NotImplementedError(self.network_name)
1359
def check_conversion_target(self, target_format):
1360
if self.rich_root_data and not target_format.rich_root_data:
1361
raise errors.BadConversionTarget(
1362
'Does not support rich root data.', target_format,
1364
if (self.supports_tree_reference and
1365
not getattr(target_format, 'supports_tree_reference', False)):
1366
raise errors.BadConversionTarget(
1367
'Does not support nested trees', target_format,
1370
def open(self, controldir, _found=False):
1371
"""Return an instance of this format for a controldir.
1373
_found is a private parameter, do not use it.
1375
raise NotImplementedError(self.open)
1377
def _run_post_repo_init_hooks(self, repository, controldir, shared):
1378
from .controldir import ControlDir, RepoInitHookParams
1379
hooks = ControlDir.hooks['post_repo_init']
1382
params = RepoInitHookParams(repository, self, controldir, shared)
1387
# formats which have no format string are not discoverable or independently
1388
# creatable on disk, so are not registered in format_registry. They're
1389
# all in breezy.bzr.knitreponow. When an instance of one of these is
1390
# needed, it's constructed directly by the ControlDir. Non-native formats where
1391
# the repository is not separately opened are similar.
1393
format_registry.register_lazy(
1394
'Bazaar-NG Knit Repository Format 1',
1395
'breezy.bzr.knitrepo',
1396
'RepositoryFormatKnit1',
1399
format_registry.register_lazy(
1400
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
1401
'breezy.bzr.knitrepo',
1402
'RepositoryFormatKnit3',
1405
format_registry.register_lazy(
1406
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
1407
'breezy.bzr.knitrepo',
1408
'RepositoryFormatKnit4',
1411
# Pack-based formats. There is one format for pre-subtrees, and one for
1412
# post-subtrees to allow ease of testing.
1413
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
1414
format_registry.register_lazy(
1415
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1416
'breezy.bzr.knitpack_repo',
1417
'RepositoryFormatKnitPack1',
1419
format_registry.register_lazy(
1420
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1421
'breezy.bzr.knitpack_repo',
1422
'RepositoryFormatKnitPack3',
1424
format_registry.register_lazy(
1425
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
1426
'breezy.bzr.knitpack_repo',
1427
'RepositoryFormatKnitPack4',
1429
format_registry.register_lazy(
1430
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
1431
'breezy.bzr.knitpack_repo',
1432
'RepositoryFormatKnitPack5',
1434
format_registry.register_lazy(
1435
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
1436
'breezy.bzr.knitpack_repo',
1437
'RepositoryFormatKnitPack5RichRoot',
1439
format_registry.register_lazy(
1440
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
1441
'breezy.bzr.knitpack_repo',
1442
'RepositoryFormatKnitPack5RichRootBroken',
1444
format_registry.register_lazy(
1445
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
1446
'breezy.bzr.knitpack_repo',
1447
'RepositoryFormatKnitPack6',
1449
format_registry.register_lazy(
1450
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
1451
'breezy.bzr.knitpack_repo',
1452
'RepositoryFormatKnitPack6RichRoot',
1454
format_registry.register_lazy(
1455
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1456
'breezy.bzr.groupcompress_repo',
1457
'RepositoryFormat2a',
1460
# Development formats.
1461
# Check their docstrings to see if/when they are obsolete.
1462
format_registry.register_lazy(
1463
("Bazaar development format 2 with subtree support "
1464
"(needs bzr.dev from before 1.8)\n"),
1465
'breezy.bzr.knitpack_repo',
1466
'RepositoryFormatPackDevelopment2Subtree',
1468
format_registry.register_lazy(
1469
'Bazaar development format 8\n',
1470
'breezy.bzr.groupcompress_repo',
1471
'RepositoryFormat2aSubtree',
1475
class InterRepository(InterObject):
1476
"""This class represents operations taking place between two repositories.
1478
Its instances have methods like copy_content and fetch, and contain
1479
references to the source and target repositories these operations can be
1482
Often we will provide convenience methods on 'repository' which carry out
1483
operations with another repository - they will always forward to
1484
InterRepository.get(other).method_name(parameters).
1488
"""The available optimised InterRepository types."""
1491
def copy_content(self, revision_id=None):
1492
"""Make a complete copy of the content in self into destination.
1494
This is a destructive operation! Do not use it on existing
1497
:param revision_id: Only copy the content needed to construct
1498
revision_id and its parents.
1501
self.target.set_make_working_trees(
1502
self.source.make_working_trees())
1503
except NotImplementedError:
1505
self.target.fetch(self.source, revision_id=revision_id)
1508
def fetch(self, revision_id=None, find_ghosts=False):
1509
"""Fetch the content required to construct revision_id.
1511
The content is copied from self.source to self.target.
1513
:param revision_id: if None all content is copied, if NULL_REVISION no
1517
raise NotImplementedError(self.fetch)
1520
def search_missing_revision_ids(
1521
self, find_ghosts=True, revision_ids=None, if_present_ids=None,
1523
"""Return the revision ids that source has that target does not.
1525
:param revision_ids: return revision ids included by these
1526
revision_ids. NoSuchRevision will be raised if any of these
1527
revisions are not present.
1528
:param if_present_ids: like revision_ids, but will not cause
1529
NoSuchRevision if any of these are absent, instead they will simply
1530
not be in the result. This is useful for e.g. finding revisions
1531
to fetch for tags, which may reference absent revisions.
1532
:param find_ghosts: If True find missing revisions in deep history
1533
rather than just finding the surface difference.
1534
:param limit: Maximum number of revisions to return, topologically
1536
:return: A breezy.graph.SearchResult.
1538
raise NotImplementedError(self.search_missing_revision_ids)
1541
def _same_model(source, target):
1542
"""True if source and target have the same data representation.
1544
Note: this is always called on the base class; overriding it in a
1545
subclass will have no effect.
1548
InterRepository._assert_same_model(source, target)
1550
except errors.IncompatibleRepositories as e:
1554
def _assert_same_model(source, target):
1555
"""Raise an exception if two repositories do not use the same model.
1557
if source.supports_rich_root() != target.supports_rich_root():
1558
raise errors.IncompatibleRepositories(source, target,
1559
"different rich-root support")
1560
if source._serializer != target._serializer:
1561
raise errors.IncompatibleRepositories(source, target,
1562
"different serializers")
1565
class CopyConverter(object):
1566
"""A repository conversion tool which just performs a copy of the content.
1568
This is slow but quite reliable.
1571
def __init__(self, target_format):
1572
"""Create a CopyConverter.
1574
:param target_format: The format the resulting repository should be.
1576
self.target_format = target_format
1578
def convert(self, repo, pb):
1579
"""Perform the conversion of to_convert, giving feedback via pb.
1581
:param to_convert: The disk object to convert.
1582
:param pb: a progress bar to use for progress information.
1584
pb = ui.ui_factory.nested_progress_bar()
1587
# this is only useful with metadir layouts - separated repo content.
1588
# trigger an assertion if not such
1589
repo._format.get_format_string()
1590
self.repo_dir = repo.bzrdir
1591
pb.update(gettext('Moving repository to repository.backup'))
1592
self.repo_dir.transport.move('repository', 'repository.backup')
1593
backup_transport = self.repo_dir.transport.clone('repository.backup')
1594
repo._format.check_conversion_target(self.target_format)
1595
self.source_repo = repo._format.open(self.repo_dir,
1597
_override_transport=backup_transport)
1598
pb.update(gettext('Creating new repository'))
1599
converted = self.target_format.initialize(self.repo_dir,
1600
self.source_repo.is_shared())
1601
converted.lock_write()
1603
pb.update(gettext('Copying content'))
1604
self.source_repo.copy_content_into(converted)
1607
pb.update(gettext('Deleting old repository content'))
1608
self.repo_dir.transport.delete_tree('repository.backup')
1609
ui.ui_factory.note(gettext('repository converted'))
1613
def _strip_NULL_ghosts(revision_graph):
1614
"""Also don't use this. more compatibility code for unmigrated clients."""
1615
# Filter ghosts, and null:
1616
if _mod_revision.NULL_REVISION in revision_graph:
1617
del revision_graph[_mod_revision.NULL_REVISION]
1618
for key, parents in viewitems(revision_graph):
1619
revision_graph[key] = tuple(parent for parent in parents if parent
1621
return revision_graph
1624
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
1625
stop_revision=None):
1626
"""Extend the partial history to include a given index
1628
If a stop_index is supplied, stop when that index has been reached.
1629
If a stop_revision is supplied, stop when that revision is
1630
encountered. Otherwise, stop when the beginning of history is
1633
:param stop_index: The index which should be present. When it is
1634
present, history extension will stop.
1635
:param stop_revision: The revision id which should be present. When
1636
it is encountered, history extension will stop.
1638
start_revision = partial_history_cache[-1]
1639
graph = repo.get_graph()
1640
iterator = graph.iter_lefthand_ancestry(start_revision,
1641
(_mod_revision.NULL_REVISION,))
1643
# skip the last revision in the list
1646
if (stop_index is not None and
1647
len(partial_history_cache) > stop_index):
1649
if partial_history_cache[-1] == stop_revision:
1651
revision_id = next(iterator)
1652
partial_history_cache.append(revision_id)
1653
except StopIteration:
1658
class _LazyListJoin(object):
1659
"""An iterable yielding the contents of many lists as one list.
1661
Each iterator made from this will reflect the current contents of the lists
1662
at the time the iterator is made.
1664
This is used by Repository's _make_parents_provider implementation so that
1667
pp = repo._make_parents_provider() # uses a list of fallback repos
1668
pp.add_fallback_repository(other_repo) # appends to that list
1669
result = pp.get_parent_map(...)
1670
# The result will include revs from other_repo
1673
def __init__(self, *list_parts):
1674
self.list_parts = list_parts
1678
for list_part in self.list_parts:
1679
full_list.extend(list_part)
1680
return iter(full_list)
1683
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,