1
# Copyright (C) 2005-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from .lazy_import import lazy_import
18
lazy_import(globals(), """
27
revision as _mod_revision,
30
from breezy.i18n import gettext
38
from .decorators import only_raises
39
from .inter import InterObject
40
from .lock import _RelockDebugMixin, LogicalLockResult
42
log_exception_quietly, note, mutter, mutter_callsite, warning)
45
# Old formats display a warning, but only once
46
_deprecation_warning_done = False
49
class IsInWriteGroupError(errors.InternalBzrError):
51
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
53
def __init__(self, repo):
54
errors.InternalBzrError.__init__(self, repo=repo)
57
class CannotSetRevisionId(errors.BzrError):
59
_fmt = "Repository format does not support setting revision ids."
62
class FetchResult(object):
63
"""Result of a fetch operation.
65
:ivar revidmap: For lossy fetches, map from source revid to target revid.
66
:ivar total_fetched: Number of revisions fetched
69
def __init__(self, total_fetched=None, revidmap=None):
70
self.total_fetched = total_fetched
71
self.revidmap = revidmap
74
class CommitBuilder(object):
75
"""Provides an interface to build up a commit.
77
This allows describing a tree to be committed without needing to
78
know the internals of the format of the repository.
81
# all clients should supply tree roots.
82
record_root_entry = True
83
# whether this commit builder will automatically update the branch that is
85
updates_branch = False
87
def __init__(self, repository, parents, config_stack, timestamp=None,
88
timezone=None, committer=None, revprops=None,
89
revision_id=None, lossy=False):
90
"""Initiate a CommitBuilder.
92
:param repository: Repository to commit to.
93
:param parents: Revision ids of the parents of the new revision.
94
:param timestamp: Optional timestamp recorded for commit.
95
:param timezone: Optional timezone for timestamp.
96
:param committer: Optional committer to set for commit.
97
:param revprops: Optional dictionary of revision properties.
98
:param revision_id: Optional revision id.
99
:param lossy: Whether to discard data that can not be natively
100
represented, when pushing to a foreign VCS
102
self._config_stack = config_stack
105
if committer is None:
106
self._committer = self._config_stack.get('email')
107
elif not isinstance(committer, str):
108
self._committer = committer.decode() # throw if non-ascii
110
self._committer = committer
112
self.parents = parents
113
self.repository = repository
116
if revprops is not None:
117
self._validate_revprops(revprops)
118
self._revprops.update(revprops)
120
if timestamp is None:
121
timestamp = time.time()
122
# Restrict resolution to 1ms
123
self._timestamp = round(timestamp, 3)
126
self._timezone = osutils.local_time_offset()
128
self._timezone = int(timezone)
130
self._generate_revision_if_needed(revision_id)
132
def any_changes(self):
133
"""Return True if any entries were changed.
135
This includes merge-only changes. It is the core for the --unchanged
138
:return: True if any changes have occured.
140
raise NotImplementedError(self.any_changes)
142
def _validate_unicode_text(self, text, context):
143
"""Verify things like commit messages don't have bogus characters."""
144
# TODO(jelmer): Make this repository-format specific
146
raise ValueError('Invalid value for %s: %r' % (context, text))
148
def _validate_revprops(self, revprops):
149
for key, value in revprops.items():
150
# We know that the XML serializers do not round trip '\r'
151
# correctly, so refuse to accept them
152
if not isinstance(value, str):
153
raise ValueError('revision property (%s) is not a valid'
154
' (unicode) string: %r' % (key, value))
155
# TODO(jelmer): Make this repository-format specific
156
self._validate_unicode_text(value,
157
'revision property (%s)' % (key,))
159
def commit(self, message):
160
"""Make the actual commit.
162
:return: The revision id of the recorded revision.
164
raise NotImplementedError(self.commit)
167
"""Abort the commit that is being built.
169
raise NotImplementedError(self.abort)
171
def revision_tree(self):
172
"""Return the tree that was just committed.
174
After calling commit() this can be called to get a
175
RevisionTree representing the newly committed tree. This is
176
preferred to calling Repository.revision_tree() because that may
177
require deserializing the inventory, while we already have a copy in
180
raise NotImplementedError(self.revision_tree)
182
def finish_inventory(self):
183
"""Tell the builder that the inventory is finished.
185
:return: The inventory id in the repository, which can be used with
186
repository.get_inventory.
188
raise NotImplementedError(self.finish_inventory)
190
def _generate_revision_if_needed(self, revision_id):
191
"""Create a revision id if None was supplied.
193
If the repository can not support user-specified revision ids
194
they should override this function and raise CannotSetRevisionId
195
if _new_revision_id is not None.
197
:raises: CannotSetRevisionId
199
if not self.repository._format.supports_setting_revision_ids:
200
if revision_id is not None:
201
raise CannotSetRevisionId()
203
if revision_id is None:
204
self._new_revision_id = self._gen_revision_id()
205
self.random_revid = True
207
self._new_revision_id = revision_id
208
self.random_revid = False
210
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
211
"""Record a new tree via iter_changes.
213
:param tree: The tree to obtain text contents from for changed objects.
214
:param basis_revision_id: The revision id of the tree the iter_changes
215
has been generated against. Currently assumed to be the same
216
as self.parents[0] - if it is not, errors may occur.
217
:param iter_changes: An iter_changes iterator with the changes to apply
218
to basis_revision_id. The iterator must not include any items with
219
a current kind of None - missing items must be either filtered out
220
or errored-on beefore record_iter_changes sees the item.
221
:return: A generator of (relpath, fs_hash) tuples for use with
224
raise NotImplementedError(self.record_iter_changes)
227
class RepositoryWriteLockResult(LogicalLockResult):
228
"""The result of write locking a repository.
230
:ivar repository_token: The token obtained from the underlying lock, or
232
:ivar unlock: A callable which will unlock the lock.
235
def __init__(self, unlock, repository_token):
236
LogicalLockResult.__init__(self, unlock)
237
self.repository_token = repository_token
240
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
244
class WriteGroup(object):
245
"""Context manager that manages a write group.
247
Raising an exception will result in the write group being aborted.
250
def __init__(self, repository, suppress_errors=False):
251
self.repository = repository
252
self._suppress_errors = suppress_errors
255
self.repository.start_write_group()
258
def __exit__(self, exc_type, exc_val, exc_tb):
260
self.repository.abort_write_group(self._suppress_errors)
263
self.repository.commit_write_group()
266
######################################################################
270
class Repository(controldir.ControlComponent, _RelockDebugMixin):
271
"""Repository holding history for one or more branches.
273
The repository holds and retrieves historical information including
274
revisions and file history. It's normally accessed only by the Branch,
275
which views a particular line of development through that history.
277
See VersionedFileRepository in breezy.vf_repository for the
278
base class for most Bazaar repositories.
281
# Does this repository implementation support random access to
282
# items in the tree, or just bulk fetching/pushing of data?
283
supports_random_access = True
285
def abort_write_group(self, suppress_errors=False):
286
"""Commit the contents accrued within the current write group.
288
:param suppress_errors: if true, abort_write_group will catch and log
289
unexpected errors that happen during the abort, rather than
290
allowing them to propagate. Defaults to False.
292
:seealso: start_write_group.
294
if self._write_group is not self.get_transaction():
295
# has an unlock or relock occured ?
298
'(suppressed) mismatched lock context and write group. %r, %r',
299
self._write_group, self.get_transaction())
301
raise errors.BzrError(
302
'mismatched lock context and write group. %r, %r' %
303
(self._write_group, self.get_transaction()))
305
self._abort_write_group()
306
except Exception as exc:
307
self._write_group = None
308
if not suppress_errors:
310
mutter('abort_write_group failed')
311
log_exception_quietly()
312
note(gettext('brz: ERROR (ignored): %s'), exc)
313
self._write_group = None
315
def _abort_write_group(self):
316
"""Template method for per-repository write group cleanup.
318
This is called during abort before the write group is considered to be
319
finished and should cleanup any internal state accrued during the write
320
group. There is no requirement that data handed to the repository be
321
*not* made available - this is not a rollback - but neither should any
322
attempt be made to ensure that data added is fully commited. Abort is
323
invoked when an error has occured so futher disk or network operations
324
may not be possible or may error and if possible should not be
328
def add_fallback_repository(self, repository):
329
"""Add a repository to use for looking up data not held locally.
331
:param repository: A repository.
333
raise NotImplementedError(self.add_fallback_repository)
335
def _check_fallback_repository(self, repository):
336
"""Check that this repository can fallback to repository safely.
338
Raise an error if not.
340
:param repository: A repository to fallback to.
342
return InterRepository._assert_same_model(self, repository)
344
def all_revision_ids(self):
345
"""Returns a list of all the revision ids in the repository.
347
This is conceptually deprecated because code should generally work on
348
the graph reachable from a particular revision, and ignore any other
349
revisions that might be present. There is no direct replacement
352
if 'evil' in debug.debug_flags:
353
mutter_callsite(2, "all_revision_ids is linear with history.")
354
return self._all_revision_ids()
356
def _all_revision_ids(self):
357
"""Returns a list of all the revision ids in the repository.
359
These are in as much topological order as the underlying store can
362
raise NotImplementedError(self._all_revision_ids)
364
def break_lock(self):
365
"""Break a lock if one is present from another instance.
367
Uses the ui factory to ask for confirmation if the lock may be from
370
self.control_files.break_lock()
373
def create(controldir):
374
"""Construct the current default format repository in controldir."""
375
return RepositoryFormat.get_default_format().initialize(controldir)
377
def __init__(self, _format, controldir, control_files):
378
"""instantiate a Repository.
380
:param _format: The format of the repository on disk.
381
:param controldir: The ControlDir of the repository.
382
:param control_files: Control files to use for locking, etc.
384
# In the future we will have a single api for all stores for
385
# getting file texts, inventories and revisions, then
386
# this construct will accept instances of those things.
387
super(Repository, self).__init__()
388
self._format = _format
389
# the following are part of the public API for Repository:
390
self.controldir = controldir
391
self.control_files = control_files
393
self._write_group = None
394
# Additional places to query for data.
395
self._fallback_repositories = []
398
def user_transport(self):
399
return self.controldir.user_transport
402
def control_transport(self):
403
return self._transport
406
if self._fallback_repositories:
407
return '%s(%r, fallback_repositories=%r)' % (
408
self.__class__.__name__,
410
self._fallback_repositories)
412
return '%s(%r)' % (self.__class__.__name__,
415
def _has_same_fallbacks(self, other_repo):
416
"""Returns true if the repositories have the same fallbacks."""
417
my_fb = self._fallback_repositories
418
other_fb = other_repo._fallback_repositories
419
if len(my_fb) != len(other_fb):
421
for f, g in zip(my_fb, other_fb):
422
if not f.has_same_location(g):
426
def has_same_location(self, other):
427
"""Returns a boolean indicating if this repository is at the same
428
location as another repository.
430
This might return False even when two repository objects are accessing
431
the same physical repository via different URLs.
433
if self.__class__ is not other.__class__:
435
return (self.control_url == other.control_url)
437
def is_in_write_group(self):
438
"""Return True if there is an open write group.
440
:seealso: start_write_group.
442
return self._write_group is not None
445
return self.control_files.is_locked()
447
def is_write_locked(self):
448
"""Return True if this object is write locked."""
449
return self.is_locked() and self.control_files._lock_mode == 'w'
451
def lock_write(self, token=None):
452
"""Lock this repository for writing.
454
This causes caching within the repository obejct to start accumlating
455
data during reads, and allows a 'write_group' to be obtained. Write
456
groups must be used for actual data insertion.
458
A token should be passed in if you know that you have locked the object
459
some other way, and need to synchronise this object's state with that
462
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
464
:param token: if this is already locked, then lock_write will fail
465
unless the token matches the existing lock.
466
:returns: a token if this instance supports tokens, otherwise None.
467
:raises TokenLockingNotSupported: when a token is given but this
468
instance doesn't support using token locks.
469
:raises MismatchedToken: if the specified token doesn't match the token
470
of the existing lock.
471
:seealso: start_write_group.
472
:return: A RepositoryWriteLockResult.
474
locked = self.is_locked()
475
token = self.control_files.lock_write(token=token)
477
self._warn_if_deprecated()
479
for repo in self._fallback_repositories:
480
# Writes don't affect fallback repos
483
return RepositoryWriteLockResult(self.unlock, token)
486
"""Lock the repository for read operations.
488
:return: An object with an unlock method which will release the lock
491
locked = self.is_locked()
492
self.control_files.lock_read()
494
self._warn_if_deprecated()
496
for repo in self._fallback_repositories:
499
return LogicalLockResult(self.unlock)
501
def get_physical_lock_status(self):
502
return self.control_files.get_physical_lock_status()
504
def leave_lock_in_place(self):
505
"""Tell this repository not to release the physical lock when this
508
If lock_write doesn't return a token, then this method is not supported.
510
self.control_files.leave_in_place()
512
def dont_leave_lock_in_place(self):
513
"""Tell this repository to release the physical lock when this
514
object is unlocked, even if it didn't originally acquire it.
516
If lock_write doesn't return a token, then this method is not supported.
518
self.control_files.dont_leave_in_place()
520
def gather_stats(self, revid=None, committers=None):
521
"""Gather statistics from a revision id.
523
:param revid: The revision id to gather statistics from, if None, then
524
no revision specific statistics are gathered.
525
:param committers: Optional parameter controlling whether to grab
526
a count of committers from the revision specific statistics.
527
:return: A dictionary of statistics. Currently this contains:
528
committers: The number of committers if requested.
529
firstrev: A tuple with timestamp, timezone for the penultimate left
530
most ancestor of revid, if revid is not the NULL_REVISION.
531
latestrev: A tuple with timestamp, timezone for revid, if revid is
532
not the NULL_REVISION.
533
revisions: The total revision count in the repository.
534
size: An estimate disk size of the repository in bytes.
536
with self.lock_read():
538
if revid and committers:
539
result['committers'] = 0
540
if revid and revid != _mod_revision.NULL_REVISION:
541
graph = self.get_graph()
543
all_committers = set()
544
revisions = [r for (r, p) in graph.iter_ancestry([revid])
545
if r != _mod_revision.NULL_REVISION]
548
# ignore the revisions in the middle - just grab first and last
549
revisions = revisions[0], revisions[-1]
550
for revision in self.get_revisions(revisions):
551
if not last_revision:
552
last_revision = revision
554
all_committers.add(revision.committer)
555
first_revision = revision
557
result['committers'] = len(all_committers)
558
result['firstrev'] = (first_revision.timestamp,
559
first_revision.timezone)
560
result['latestrev'] = (last_revision.timestamp,
561
last_revision.timezone)
564
def find_branches(self, using=False):
565
"""Find branches underneath this repository.
567
This will include branches inside other branches.
569
:param using: If True, list only branches using this repository.
571
if using and not self.is_shared():
572
for branch in self.controldir.list_branches():
576
class Evaluator(object):
579
self.first_call = True
581
def __call__(self, controldir):
582
# On the first call, the parameter is always the controldir
583
# containing the current repo.
584
if not self.first_call:
586
repository = controldir.open_repository()
587
except errors.NoRepositoryPresent:
590
return False, ([], repository)
591
self.first_call = False
592
value = (controldir.list_branches(), None)
595
for branches, repository in controldir.ControlDir.find_controldirs(
596
self.user_transport, evaluate=Evaluator()):
597
if branches is not None:
598
for branch in branches:
600
if not using and repository is not None:
601
for branch in repository.find_branches():
604
def search_missing_revision_ids(self, other,
605
find_ghosts=True, revision_ids=None, if_present_ids=None,
607
"""Return the revision ids that other has that this does not.
609
These are returned in topological order.
611
revision_ids: only return revision ids included by revision_id.
613
with self.lock_read():
614
return InterRepository.get(other, self).search_missing_revision_ids(
615
find_ghosts=find_ghosts, revision_ids=revision_ids,
616
if_present_ids=if_present_ids, limit=limit)
620
"""Open the repository rooted at base.
622
For instance, if the repository is at URL/.bzr/repository,
623
Repository.open(URL) -> a Repository instance.
625
control = controldir.ControlDir.open(base)
626
return control.open_repository()
628
def copy_content_into(self, destination, revision_id=None):
629
"""Make a complete copy of the content in self into destination.
631
This is a destructive operation! Do not use it on existing
634
return InterRepository.get(self, destination).copy_content(revision_id)
636
def commit_write_group(self):
637
"""Commit the contents accrued within the current write group.
639
:seealso: start_write_group.
641
:return: it may return an opaque hint that can be passed to 'pack'.
643
if self._write_group is not self.get_transaction():
644
# has an unlock or relock occured ?
645
raise errors.BzrError('mismatched lock context %r and '
647
(self.get_transaction(), self._write_group))
648
result = self._commit_write_group()
649
self._write_group = None
652
def _commit_write_group(self):
653
"""Template method for per-repository write group cleanup.
655
This is called before the write group is considered to be
656
finished and should ensure that all data handed to the repository
657
for writing during the write group is safely committed (to the
658
extent possible considering file system caching etc).
661
def suspend_write_group(self):
662
"""Suspend a write group.
664
:raise UnsuspendableWriteGroup: If the write group can not be
666
:return: List of tokens
668
raise errors.UnsuspendableWriteGroup(self)
670
def refresh_data(self):
671
"""Re-read any data needed to synchronise with disk.
673
This method is intended to be called after another repository instance
674
(such as one used by a smart server) has inserted data into the
675
repository. On all repositories this will work outside of write groups.
676
Some repository formats (pack and newer for breezy native formats)
677
support refresh_data inside write groups. If called inside a write
678
group on a repository that does not support refreshing in a write group
679
IsInWriteGroupError will be raised.
683
def resume_write_group(self, tokens):
684
if not self.is_write_locked():
685
raise errors.NotWriteLocked(self)
686
if self._write_group:
687
raise errors.BzrError('already in a write group')
688
self._resume_write_group(tokens)
689
# so we can detect unlock/relock - the write group is now entered.
690
self._write_group = self.get_transaction()
692
def _resume_write_group(self, tokens):
693
raise errors.UnsuspendableWriteGroup(self)
695
def fetch(self, source, revision_id=None, find_ghosts=False, lossy=False):
696
"""Fetch the content required to construct revision_id from source.
698
If revision_id is None, then all content is copied.
700
fetch() may not be used when the repository is in a write group -
701
either finish the current write group before using fetch, or use
702
fetch before starting the write group.
704
:param find_ghosts: Find and copy revisions in the source that are
705
ghosts in the target (and not reachable directly by walking out to
706
the first-present revision in target from revision_id).
707
:param revision_id: If specified, all the content needed for this
708
revision ID will be copied to the target. Fetch will determine for
709
itself which content needs to be copied.
710
:return: A FetchResult object
712
if self.is_in_write_group():
713
raise errors.InternalBzrError(
714
"May not fetch while in a write group.")
715
# fast path same-url fetch operations
716
# TODO: lift out to somewhere common with RemoteRepository
717
# <https://bugs.launchpad.net/bzr/+bug/401646>
718
if (self.has_same_location(source)
719
and self._has_same_fallbacks(source)):
720
# check that last_revision is in 'from' and then return a
722
if (revision_id is not None and
723
not _mod_revision.is_null(revision_id)):
724
self.get_revision(revision_id)
726
inter = InterRepository.get(source, self)
728
revision_id=revision_id, find_ghosts=find_ghosts, lossy=lossy)
730
def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
731
timezone=None, committer=None, revprops=None,
732
revision_id=None, lossy=False):
733
"""Obtain a CommitBuilder for this repository.
735
:param branch: Branch to commit to.
736
:param parents: Revision ids of the parents of the new revision.
737
:param config_stack: Configuration stack to use.
738
:param timestamp: Optional timestamp recorded for commit.
739
:param timezone: Optional timezone for timestamp.
740
:param committer: Optional committer to set for commit.
741
:param revprops: Optional dictionary of revision properties.
742
:param revision_id: Optional revision id.
743
:param lossy: Whether to discard data that can not be natively
744
represented, when pushing to a foreign VCS
746
raise NotImplementedError(self.get_commit_builder)
748
@only_raises(errors.LockNotHeld, errors.LockBroken)
750
if (self.control_files._lock_count == 1 and
751
self.control_files._lock_mode == 'w'):
752
if self._write_group is not None:
753
self.abort_write_group()
754
self.control_files.unlock()
755
raise errors.BzrError(
756
'Must end write groups before releasing write locks.')
757
self.control_files.unlock()
758
if self.control_files._lock_count == 0:
759
for repo in self._fallback_repositories:
762
def clone(self, controldir, revision_id=None):
763
"""Clone this repository into controldir using the current format.
765
Currently no check is made that the format of this repository and
766
the bzrdir format are compatible. FIXME RBC 20060201.
768
:return: The newly created destination repository.
770
with self.lock_read():
771
# TODO: deprecate after 0.16; cloning this with all its settings is
772
# probably not very useful -- mbp 20070423
773
dest_repo = self._create_sprouting_repo(
774
controldir, shared=self.is_shared())
775
self.copy_content_into(dest_repo, revision_id)
778
def start_write_group(self):
779
"""Start a write group in the repository.
781
Write groups are used by repositories which do not have a 1:1 mapping
782
between file ids and backend store to manage the insertion of data from
783
both fetch and commit operations.
785
A write lock is required around the
786
start_write_group/commit_write_group for the support of lock-requiring
789
One can only insert data into a repository inside a write group.
793
if not self.is_write_locked():
794
raise errors.NotWriteLocked(self)
795
if self._write_group:
796
raise errors.BzrError('already in a write group')
797
self._start_write_group()
798
# so we can detect unlock/relock - the write group is now entered.
799
self._write_group = self.get_transaction()
801
def _start_write_group(self):
802
"""Template method for per-repository write group startup.
804
This is called before the write group is considered to be
808
def sprout(self, to_bzrdir, revision_id=None):
809
"""Create a descendent repository for new development.
811
Unlike clone, this does not copy the settings of the repository.
813
with self.lock_read():
814
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
815
dest_repo.fetch(self, revision_id=revision_id)
818
def _create_sprouting_repo(self, a_controldir, shared):
820
a_controldir._format, self.controldir._format.__class__):
821
# use target default format.
822
dest_repo = a_controldir.create_repository()
824
# Most control formats need the repository to be specifically
825
# created, but on some old all-in-one formats it's not needed
827
dest_repo = self._format.initialize(
828
a_controldir, shared=shared)
829
except errors.UninitializableFormat:
830
dest_repo = a_controldir.open_repository()
833
def has_revision(self, revision_id):
834
"""True if this repository has a copy of the revision."""
835
with self.lock_read():
836
return revision_id in self.has_revisions((revision_id,))
838
def has_revisions(self, revision_ids):
839
"""Probe to find out the presence of multiple revisions.
841
:param revision_ids: An iterable of revision_ids.
842
:return: A set of the revision_ids that were present.
844
raise NotImplementedError(self.has_revisions)
846
def get_revision(self, revision_id):
847
"""Return the Revision object for a named revision."""
848
with self.lock_read():
849
return self.get_revisions([revision_id])[0]
851
def get_revision_reconcile(self, revision_id):
852
"""'reconcile' helper routine that allows access to a revision always.
854
This variant of get_revision does not cross check the weave graph
855
against the revision one as get_revision does: but it should only
856
be used by reconcile, or reconcile-alike commands that are correcting
857
or testing the revision graph.
859
raise NotImplementedError(self.get_revision_reconcile)
861
def get_revisions(self, revision_ids):
862
"""Get many revisions at once.
864
Repositories that need to check data on every revision read should
865
subclass this method.
868
for revid, rev in self.iter_revisions(revision_ids):
870
raise errors.NoSuchRevision(self, revid)
872
return [revs[revid] for revid in revision_ids]
874
def iter_revisions(self, revision_ids):
875
"""Iterate over revision objects.
877
:param revision_ids: An iterable of revisions to examine. None may be
878
passed to request all revisions known to the repository. Note that
879
not all repositories can find unreferenced revisions; for those
880
repositories only referenced ones will be returned.
881
:return: An iterator of (revid, revision) tuples. Absent revisions (
882
those asked for but not available) are returned as (revid, None).
883
N.B.: Revisions are not necessarily yielded in order.
885
raise NotImplementedError(self.iter_revisions)
887
def get_revision_delta(self, revision_id):
888
"""Return the delta for one revision.
890
The delta is relative to the left-hand predecessor of the
893
with self.lock_read():
894
r = self.get_revision(revision_id)
895
return list(self.get_revision_deltas([r]))[0]
897
def get_revision_deltas(self, revisions, specific_files=None):
898
"""Produce a generator of revision deltas.
900
Note that the input is a sequence of REVISIONS, not revision ids.
901
Trees will be held in memory until the generator exits.
902
Each delta is relative to the revision's lefthand predecessor.
904
specific_files should exist in the first revision.
906
:param specific_files: if not None, the result is filtered
907
so that only those files, their parents and their
908
children are included.
910
from .tree import InterTree
911
# Get the revision-ids of interest
912
required_trees = set()
913
for revision in revisions:
914
required_trees.add(revision.revision_id)
915
required_trees.update(revision.parent_ids[:1])
918
t.get_revision_id(): t
919
for t in self.revision_trees(required_trees)}
921
# Calculate the deltas
922
for revision in revisions:
923
if not revision.parent_ids:
924
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
926
old_tree = trees[revision.parent_ids[0]]
927
intertree = InterTree.get(old_tree, trees[revision.revision_id])
928
yield intertree.compare(specific_files=specific_files)
929
if specific_files is not None:
931
p for p in intertree.find_source_paths(
932
specific_files).values()
935
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
936
raise NotImplementedError(self.store_revision_signature)
938
def add_signature_text(self, revision_id, signature):
939
"""Store a signature text for a revision.
941
:param revision_id: Revision id of the revision
942
:param signature: Signature text.
944
raise NotImplementedError(self.add_signature_text)
946
def iter_files_bytes(self, desired_files):
947
"""Iterate through file versions.
949
Files will not necessarily be returned in the order they occur in
950
desired_files. No specific order is guaranteed.
952
Yields pairs of identifier, bytes_iterator. identifier is an opaque
953
value supplied by the caller as part of desired_files. It should
954
uniquely identify the file version in the caller's context. (Examples:
955
an index number or a TreeTransform trans_id.)
957
:param desired_files: a list of (file_id, revision_id, identifier)
960
raise NotImplementedError(self.iter_files_bytes)
962
def get_rev_id_for_revno(self, revno, known_pair):
963
"""Return the revision id of a revno, given a later (revno, revid)
964
pair in the same history.
966
:return: if found (True, revid). If the available history ran out
967
before reaching the revno, then this returns
968
(False, (closest_revno, closest_revid)).
970
known_revno, known_revid = known_pair
971
partial_history = [known_revid]
972
distance_from_known = known_revno - revno
973
if distance_from_known < 0:
974
raise errors.RevnoOutOfBounds(revno, (0, known_revno))
977
self, partial_history, stop_index=distance_from_known)
978
except errors.RevisionNotPresent as err:
979
if err.revision_id == known_revid:
980
# The start revision (known_revid) wasn't found.
981
raise errors.NoSuchRevision(self, known_revid)
982
# This is a stacked repository with no fallbacks, or a there's a
983
# left-hand ghost. Either way, even though the revision named in
984
# the error isn't in this repo, we know it's the next step in this
986
partial_history.append(err.revision_id)
987
if len(partial_history) <= distance_from_known:
988
# Didn't find enough history to get a revid for the revno.
989
earliest_revno = known_revno - len(partial_history) + 1
990
return (False, (earliest_revno, partial_history[-1]))
991
if len(partial_history) - 1 > distance_from_known:
992
raise AssertionError('_iter_for_revno returned too much history')
993
return (True, partial_history[-1])
996
"""Return True if this repository is flagged as a shared repository."""
997
raise NotImplementedError(self.is_shared)
999
def reconcile(self, other=None, thorough=False):
1000
"""Reconcile this repository."""
1001
raise NotImplementedError(self.reconcile)
1003
def _refresh_data(self):
1004
"""Helper called from lock_* to ensure coherency with disk.
1006
The default implementation does nothing; it is however possible
1007
for repositories to maintain loaded indices across multiple locks
1008
by checking inside their implementation of this method to see
1009
whether their indices are still valid. This depends of course on
1010
the disk format being validatable in this manner. This method is
1011
also called by the refresh_data() public interface to cause a refresh
1012
to occur while in a write lock so that data inserted by a smart server
1013
push operation is visible on the client's instance of the physical
1017
def revision_tree(self, revision_id):
1018
"""Return Tree for a revision on this branch.
1020
`revision_id` may be NULL_REVISION for the empty tree revision.
1022
raise NotImplementedError(self.revision_tree)
1024
def revision_trees(self, revision_ids):
1025
"""Return Trees for revisions in this repository.
1027
:param revision_ids: a sequence of revision-ids;
1028
a revision-id may not be None or b'null:'
1030
raise NotImplementedError(self.revision_trees)
1032
def pack(self, hint=None, clean_obsolete_packs=False):
1033
"""Compress the data within the repository.
1035
This operation only makes sense for some repository types. For other
1036
types it should be a no-op that just returns.
1038
This stub method does not require a lock, but subclasses should use
1039
self.write_lock as this is a long running call it's reasonable to
1040
implicitly lock for the user.
1042
:param hint: If not supplied, the whole repository is packed.
1043
If supplied, the repository may use the hint parameter as a
1044
hint for the parts of the repository to pack. A hint can be
1045
obtained from the result of commit_write_group(). Out of
1046
date hints are simply ignored, because concurrent operations
1047
can obsolete them rapidly.
1049
:param clean_obsolete_packs: Clean obsolete packs immediately after
1053
def get_transaction(self):
1054
return self.control_files.get_transaction()
1056
def get_parent_map(self, revision_ids):
1057
"""See graph.StackedParentsProvider.get_parent_map"""
1058
raise NotImplementedError(self.get_parent_map)
1060
def _get_parent_map_no_fallbacks(self, revision_ids):
1061
"""Same as Repository.get_parent_map except doesn't query fallbacks."""
1062
# revisions index works in keys; this just works in revisions
1063
# therefore wrap and unwrap
1066
for revision_id in revision_ids:
1067
if revision_id == _mod_revision.NULL_REVISION:
1068
result[revision_id] = ()
1069
elif revision_id is None:
1070
raise ValueError('get_parent_map(None) is not valid')
1072
query_keys.append((revision_id,))
1073
vf = self.revisions.without_fallbacks()
1074
for (revision_id,), parent_keys in (
1075
vf.get_parent_map(query_keys).items()):
1077
result[revision_id] = tuple([parent_revid
1078
for (parent_revid,) in parent_keys])
1080
result[revision_id] = (_mod_revision.NULL_REVISION,)
1083
def _make_parents_provider(self):
1084
if not self._format.supports_external_lookups:
1086
return graph.StackedParentsProvider(_LazyListJoin(
1087
[self._make_parents_provider_unstacked()],
1088
self._fallback_repositories))
1090
def _make_parents_provider_unstacked(self):
1091
return graph.CallableToParentsProviderAdapter(
1092
self._get_parent_map_no_fallbacks)
1094
def get_known_graph_ancestry(self, revision_ids):
1095
"""Return the known graph for a set of revision ids and their ancestors.
1097
raise NotImplementedError(self.get_known_graph_ancestry)
1099
def get_file_graph(self):
1100
"""Return the graph walker for files."""
1101
raise NotImplementedError(self.get_file_graph)
1103
def get_graph(self, other_repository=None):
1104
"""Return the graph walker for this repository format"""
1105
parents_provider = self._make_parents_provider()
1106
if (other_repository is not None and
1107
not self.has_same_location(other_repository)):
1108
parents_provider = graph.StackedParentsProvider(
1109
[parents_provider, other_repository._make_parents_provider()])
1110
return graph.Graph(parents_provider)
1112
def set_make_working_trees(self, new_value):
1113
"""Set the policy flag for making working trees when creating branches.
1115
This only applies to branches that use this repository.
1117
The default is 'True'.
1118
:param new_value: True to restore the default, False to disable making
1121
raise NotImplementedError(self.set_make_working_trees)
1123
def make_working_trees(self):
1124
"""Returns the policy for making working trees on new branches."""
1125
raise NotImplementedError(self.make_working_trees)
1127
def sign_revision(self, revision_id, gpg_strategy):
1128
raise NotImplementedError(self.sign_revision)
1130
def verify_revision_signature(self, revision_id, gpg_strategy):
1131
"""Verify the signature on a revision.
1133
:param revision_id: the revision to verify
1134
:gpg_strategy: the GPGStrategy object to used
1136
:return: gpg.SIGNATURE_VALID or a failed SIGNATURE_ value
1138
raise NotImplementedError(self.verify_revision_signature)
1140
def verify_revision_signatures(self, revision_ids, gpg_strategy):
1141
"""Verify revision signatures for a number of revisions.
1143
:param revision_id: the revision to verify
1144
:gpg_strategy: the GPGStrategy object to used
1145
:return: Iterator over tuples with revision id, result and keys
1147
with self.lock_read():
1148
for revid in revision_ids:
1149
(result, key) = self.verify_revision_signature(revid, gpg_strategy)
1150
yield revid, result, key
1152
def has_signature_for_revision_id(self, revision_id):
1153
"""Query for a revision signature for revision_id in the repository."""
1154
raise NotImplementedError(self.has_signature_for_revision_id)
1156
def get_signature_text(self, revision_id):
1157
"""Return the text for a signature."""
1158
raise NotImplementedError(self.get_signature_text)
1160
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
1161
"""Check consistency of all history of given revision_ids.
1163
Different repository implementations should override _check().
1165
:param revision_ids: A non-empty list of revision_ids whose ancestry
1166
will be checked. Typically the last revision_id of a branch.
1167
:param callback_refs: A dict of check-refs to resolve and callback
1168
the check/_check method on the items listed as wanting the ref.
1170
:param check_repo: If False do not check the repository contents, just
1171
calculate the data callback_refs requires and call them back.
1173
return self._check(revision_ids=revision_ids, callback_refs=callback_refs,
1174
check_repo=check_repo)
1176
def _check(self, revision_ids=None, callback_refs=None, check_repo=True):
1177
raise NotImplementedError(self.check)
1179
def _warn_if_deprecated(self, branch=None):
1180
if not self._format.is_deprecated():
1182
global _deprecation_warning_done
1183
if _deprecation_warning_done:
1187
conf = config.GlobalStack()
1189
conf = branch.get_config_stack()
1190
if 'format_deprecation' in conf.get('suppress_warnings'):
1192
warning("Format %s for %s is deprecated -"
1193
" please use 'brz upgrade' to get better performance"
1194
% (self._format, self.controldir.transport.base))
1196
_deprecation_warning_done = True
1198
def supports_rich_root(self):
1199
return self._format.rich_root_data
1201
def _check_ascii_revisionid(self, revision_id, method):
1202
"""Private helper for ascii-only repositories."""
1203
# weave repositories refuse to store revisionids that are non-ascii.
1204
if revision_id is not None:
1205
# weaves require ascii revision ids.
1206
if isinstance(revision_id, str):
1208
revision_id.encode('ascii')
1209
except UnicodeEncodeError:
1210
raise errors.NonAsciiRevisionId(method, self)
1213
revision_id.decode('ascii')
1214
except UnicodeDecodeError:
1215
raise errors.NonAsciiRevisionId(method, self)
1218
class RepositoryFormatRegistry(controldir.ControlComponentFormatRegistry):
1219
"""Repository format registry."""
1221
def get_default(self):
1222
"""Return the current default format."""
1223
return controldir.format_registry.make_controldir('default').repository_format
1226
network_format_registry = registry.FormatRegistry()
1227
"""Registry of formats indexed by their network name.
1229
The network name for a repository format is an identifier that can be used when
1230
referring to formats with smart server operations. See
1231
RepositoryFormat.network_name() for more detail.
1235
format_registry = RepositoryFormatRegistry(network_format_registry)
1236
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
1238
This can contain either format instances themselves, or classes/factories that
1239
can be called to obtain one.
1243
#####################################################################
1244
# Repository Formats
1246
class RepositoryFormat(controldir.ControlComponentFormat):
1247
"""A repository format.
1249
Formats provide four things:
1250
* An initialization routine to construct repository data on disk.
1251
* a optional format string which is used when the BzrDir supports
1253
* an open routine which returns a Repository instance.
1254
* A network name for referring to the format in smart server RPC
1257
There is one and only one Format subclass for each on-disk format. But
1258
there can be one Repository subclass that is used for several different
1259
formats. The _format attribute on a Repository instance can be used to
1260
determine the disk format.
1262
Formats are placed in a registry by their format string for reference
1263
during opening. These should be subclasses of RepositoryFormat for
1266
Once a format is deprecated, just deprecate the initialize and open
1267
methods on the format class. Do not deprecate the object, as the
1268
object may be created even when a repository instance hasn't been
1271
Common instance attributes:
1272
_matchingcontroldir - the controldir format that the repository format was
1273
originally written to work with. This can be used if manually
1274
constructing a bzrdir and repository, or more commonly for test suite
1278
# Set to True or False in derived classes. True indicates that the format
1279
# supports ghosts gracefully.
1280
supports_ghosts = None
1281
# Can this repository be given external locations to lookup additional
1282
# data. Set to True or False in derived classes.
1283
supports_external_lookups = None
1284
# Does this format support CHK bytestring lookups. Set to True or False in
1286
supports_chks = None
1287
# Should fetch trigger a reconcile after the fetch? Only needed for
1288
# some repository formats that can suffer internal inconsistencies.
1289
_fetch_reconcile = False
1290
# Does this format have < O(tree_size) delta generation. Used to hint what
1291
# code path for commit, amongst other things.
1293
# Does doing a pack operation compress data? Useful for the pack UI command
1294
# (so if there is one pack, the operation can still proceed because it may
1295
# help), and for fetching when data won't have come from the same
1297
pack_compresses = False
1298
# Does the repository storage understand references to trees?
1299
supports_tree_reference = None
1300
# Is the format experimental ?
1301
experimental = False
1302
# Does this repository format escape funky characters, or does it create
1303
# files with similar names as the versioned files in its contents on disk
1305
supports_funky_characters = None
1306
# Does this repository format support leaving locks?
1307
supports_leaving_lock = None
1308
# Does this format support the full VersionedFiles interface?
1309
supports_full_versioned_files = None
1310
# Does this format support signing revision signatures?
1311
supports_revision_signatures = True
1312
# Can the revision graph have incorrect parents?
1313
revision_graph_can_have_wrong_parents = None
1314
# Does this format support setting revision ids?
1315
supports_setting_revision_ids = True
1316
# Does this format support rich root data?
1317
rich_root_data = None
1318
# Does this format support explicitly versioned directories?
1319
supports_versioned_directories = None
1320
# Can other repositories be nested into one of this format?
1321
supports_nesting_repositories = None
1322
# Is it possible for revisions to be present without being referenced
1324
supports_unreferenced_revisions = None
1325
# Does this format store the current Branch.nick in a revision when
1327
supports_storing_branch_nick = True
1328
# Does the format support overriding the transport to use
1329
supports_overriding_transport = True
1330
# Does the format support setting custom revision properties?
1331
supports_custom_revision_properties = True
1332
# Does the format record per-file revision metadata?
1333
records_per_file_revision = True
1336
return "%s()" % self.__class__.__name__
1338
def __eq__(self, other):
1339
# format objects are generally stateless
1340
return isinstance(other, self.__class__)
1342
def __ne__(self, other):
1343
return not self == other
1345
def get_format_description(self):
1346
"""Return the short description for this format."""
1347
raise NotImplementedError(self.get_format_description)
1349
def initialize(self, controldir, shared=False):
1350
"""Initialize a repository of this format in controldir.
1352
:param controldir: The controldir to put the new repository in it.
1353
:param shared: The repository should be initialized as a sharable one.
1354
:returns: The new repository object.
1356
This may raise UninitializableFormat if shared repository are not
1357
compatible the controldir.
1359
raise NotImplementedError(self.initialize)
1361
def is_supported(self):
1362
"""Is this format supported?
1364
Supported formats must be initializable and openable.
1365
Unsupported formats may not support initialization or committing or
1366
some other features depending on the reason for not being supported.
1370
def is_deprecated(self):
1371
"""Is this format deprecated?
1373
Deprecated formats may trigger a user-visible warning recommending
1374
the user to upgrade. They are still fully supported.
1378
def network_name(self):
1379
"""A simple byte string uniquely identifying this format for RPC calls.
1381
MetaDir repository formats use their disk format string to identify the
1382
repository over the wire. All in one formats such as bzr < 0.8, and
1383
foreign formats like svn/git and hg should use some marker which is
1384
unique and immutable.
1386
raise NotImplementedError(self.network_name)
1388
def check_conversion_target(self, target_format):
1389
if self.rich_root_data and not target_format.rich_root_data:
1390
raise errors.BadConversionTarget(
1391
'Does not support rich root data.', target_format,
1393
if (self.supports_tree_reference
1394
and not getattr(target_format, 'supports_tree_reference', False)):
1395
raise errors.BadConversionTarget(
1396
'Does not support nested trees', target_format,
1399
def open(self, controldir, _found=False):
1400
"""Return an instance of this format for a controldir.
1402
_found is a private parameter, do not use it.
1404
raise NotImplementedError(self.open)
1406
def _run_post_repo_init_hooks(self, repository, controldir, shared):
1407
from .controldir import ControlDir, RepoInitHookParams
1408
hooks = ControlDir.hooks['post_repo_init']
1411
params = RepoInitHookParams(repository, self, controldir, shared)
1416
# formats which have no format string are not discoverable or independently
1417
# creatable on disk, so are not registered in format_registry. They're
1418
# all in breezy.bzr.knitreponow. When an instance of one of these is
1419
# needed, it's constructed directly by the ControlDir. Non-native formats where
1420
# the repository is not separately opened are similar.
1422
format_registry.register_lazy(
1423
b'Bazaar-NG Knit Repository Format 1',
1424
'breezy.bzr.knitrepo',
1425
'RepositoryFormatKnit1',
1428
format_registry.register_lazy(
1429
b'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
1430
'breezy.bzr.knitrepo',
1431
'RepositoryFormatKnit3',
1434
format_registry.register_lazy(
1435
b'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
1436
'breezy.bzr.knitrepo',
1437
'RepositoryFormatKnit4',
1440
# Pack-based formats. There is one format for pre-subtrees, and one for
1441
# post-subtrees to allow ease of testing.
1442
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
1443
format_registry.register_lazy(
1444
b'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1445
'breezy.bzr.knitpack_repo',
1446
'RepositoryFormatKnitPack1',
1448
format_registry.register_lazy(
1449
b'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1450
'breezy.bzr.knitpack_repo',
1451
'RepositoryFormatKnitPack3',
1453
format_registry.register_lazy(
1454
b'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
1455
'breezy.bzr.knitpack_repo',
1456
'RepositoryFormatKnitPack4',
1458
format_registry.register_lazy(
1459
b'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
1460
'breezy.bzr.knitpack_repo',
1461
'RepositoryFormatKnitPack5',
1463
format_registry.register_lazy(
1464
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
1465
'breezy.bzr.knitpack_repo',
1466
'RepositoryFormatKnitPack5RichRoot',
1468
format_registry.register_lazy(
1469
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
1470
'breezy.bzr.knitpack_repo',
1471
'RepositoryFormatKnitPack5RichRootBroken',
1473
format_registry.register_lazy(
1474
b'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
1475
'breezy.bzr.knitpack_repo',
1476
'RepositoryFormatKnitPack6',
1478
format_registry.register_lazy(
1479
b'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
1480
'breezy.bzr.knitpack_repo',
1481
'RepositoryFormatKnitPack6RichRoot',
1483
format_registry.register_lazy(
1484
b'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1485
'breezy.bzr.groupcompress_repo',
1486
'RepositoryFormat2a',
1489
# Development formats.
1490
# Check their docstrings to see if/when they are obsolete.
1491
format_registry.register_lazy(
1492
(b"Bazaar development format 2 with subtree support "
1493
b"(needs bzr.dev from before 1.8)\n"),
1494
'breezy.bzr.knitpack_repo',
1495
'RepositoryFormatPackDevelopment2Subtree',
1497
format_registry.register_lazy(
1498
b'Bazaar development format 8\n',
1499
'breezy.bzr.groupcompress_repo',
1500
'RepositoryFormat2aSubtree',
1504
class InterRepository(InterObject):
1505
"""This class represents operations taking place between two repositories.
1507
Its instances have methods like copy_content and fetch, and contain
1508
references to the source and target repositories these operations can be
1511
Often we will provide convenience methods on 'repository' which carry out
1512
operations with another repository - they will always forward to
1513
InterRepository.get(other).method_name(parameters).
1517
"""The available optimised InterRepository types."""
1519
def copy_content(self, revision_id=None):
1520
"""Make a complete copy of the content in self into destination.
1522
This is a destructive operation! Do not use it on existing
1525
:param revision_id: Only copy the content needed to construct
1526
revision_id and its parents.
1528
with self.lock_write():
1530
self.target.set_make_working_trees(
1531
self.source.make_working_trees())
1532
except (NotImplementedError, errors.RepositoryUpgradeRequired):
1534
self.target.fetch(self.source, revision_id=revision_id)
1536
def fetch(self, revision_id=None, find_ghosts=False, lossy=False):
1537
"""Fetch the content required to construct revision_id.
1539
The content is copied from self.source to self.target.
1541
:param revision_id: if None all content is copied, if NULL_REVISION no
1543
:return: FetchResult
1545
raise NotImplementedError(self.fetch)
1547
def search_missing_revision_ids(
1548
self, find_ghosts=True, revision_ids=None, if_present_ids=None,
1550
"""Return the revision ids that source has that target does not.
1552
:param revision_ids: return revision ids included by these
1553
revision_ids. NoSuchRevision will be raised if any of these
1554
revisions are not present.
1555
:param if_present_ids: like revision_ids, but will not cause
1556
NoSuchRevision if any of these are absent, instead they will simply
1557
not be in the result. This is useful for e.g. finding revisions
1558
to fetch for tags, which may reference absent revisions.
1559
:param find_ghosts: If True find missing revisions in deep history
1560
rather than just finding the surface difference.
1561
:param limit: Maximum number of revisions to return, topologically
1563
:return: A breezy.graph.SearchResult.
1565
raise NotImplementedError(self.search_missing_revision_ids)
1568
def _same_model(source, target):
1569
"""True if source and target have the same data representation.
1571
Note: this is always called on the base class; overriding it in a
1572
subclass will have no effect.
1575
InterRepository._assert_same_model(source, target)
1577
except errors.IncompatibleRepositories as e:
1581
def _assert_same_model(source, target):
1582
"""Raise an exception if two repositories do not use the same model.
1584
if source.supports_rich_root() != target.supports_rich_root():
1585
raise errors.IncompatibleRepositories(source, target,
1586
"different rich-root support")
1587
if source._serializer != target._serializer:
1588
raise errors.IncompatibleRepositories(source, target,
1589
"different serializers")
1592
class CopyConverter(object):
1593
"""A repository conversion tool which just performs a copy of the content.
1595
This is slow but quite reliable.
1598
def __init__(self, target_format):
1599
"""Create a CopyConverter.
1601
:param target_format: The format the resulting repository should be.
1603
self.target_format = target_format
1605
def convert(self, repo, pb):
1606
"""Perform the conversion of to_convert, giving feedback via pb.
1608
:param to_convert: The disk object to convert.
1609
:param pb: a progress bar to use for progress information.
1611
with ui.ui_factory.nested_progress_bar() as pb:
1614
# this is only useful with metadir layouts - separated repo content.
1615
# trigger an assertion if not such
1616
repo._format.get_format_string()
1617
self.repo_dir = repo.controldir
1618
pb.update(gettext('Moving repository to repository.backup'))
1619
self.repo_dir.transport.move('repository', 'repository.backup')
1620
backup_transport = self.repo_dir.transport.clone(
1621
'repository.backup')
1622
repo._format.check_conversion_target(self.target_format)
1623
self.source_repo = repo._format.open(self.repo_dir,
1625
_override_transport=backup_transport)
1626
pb.update(gettext('Creating new repository'))
1627
converted = self.target_format.initialize(self.repo_dir,
1628
self.source_repo.is_shared())
1629
with converted.lock_write():
1630
pb.update(gettext('Copying content'))
1631
self.source_repo.copy_content_into(converted)
1632
pb.update(gettext('Deleting old repository content'))
1633
self.repo_dir.transport.delete_tree('repository.backup')
1634
ui.ui_factory.note(gettext('repository converted'))
1637
def _strip_NULL_ghosts(revision_graph):
1638
"""Also don't use this. more compatibility code for unmigrated clients."""
1639
# Filter ghosts, and null:
1640
if _mod_revision.NULL_REVISION in revision_graph:
1641
del revision_graph[_mod_revision.NULL_REVISION]
1642
for key, parents in revision_graph.items():
1643
revision_graph[key] = tuple(parent for parent in parents if parent
1645
return revision_graph
1648
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
1649
stop_revision=None):
1650
"""Extend the partial history to include a given index
1652
If a stop_index is supplied, stop when that index has been reached.
1653
If a stop_revision is supplied, stop when that revision is
1654
encountered. Otherwise, stop when the beginning of history is
1657
:param stop_index: The index which should be present. When it is
1658
present, history extension will stop.
1659
:param stop_revision: The revision id which should be present. When
1660
it is encountered, history extension will stop.
1662
start_revision = partial_history_cache[-1]
1663
graph = repo.get_graph()
1664
iterator = graph.iter_lefthand_ancestry(start_revision,
1665
(_mod_revision.NULL_REVISION,))
1667
# skip the last revision in the list
1670
if (stop_index is not None and
1671
len(partial_history_cache) > stop_index):
1673
if partial_history_cache[-1] == stop_revision:
1675
revision_id = next(iterator)
1676
partial_history_cache.append(revision_id)
1677
except StopIteration:
1682
class _LazyListJoin(object):
1683
"""An iterable yielding the contents of many lists as one list.
1685
Each iterator made from this will reflect the current contents of the lists
1686
at the time the iterator is made.
1688
This is used by Repository's _make_parents_provider implementation so that
1691
pp = repo._make_parents_provider() # uses a list of fallback repos
1692
pp.add_fallback_repository(other_repo) # appends to that list
1693
result = pp.get_parent_map(...)
1694
# The result will include revs from other_repo
1697
def __init__(self, *list_parts):
1698
self.list_parts = list_parts
1702
for list_part in self.list_parts:
1703
full_list.extend(list_part)
1704
return iter(full_list)
1707
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,