1
# Copyright (C) 2005-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from .lazy_import import lazy_import
18
lazy_import(globals(), """
27
revision as _mod_revision,
30
from breezy.i18n import gettext
38
from .decorators import only_raises
39
from .inter import InterObject
40
from .lock import _RelockDebugMixin, LogicalLockResult
42
log_exception_quietly, note, mutter, mutter_callsite, warning)
45
# Old formats display a warning, but only once
46
_deprecation_warning_done = False
49
class IsInWriteGroupError(errors.InternalBzrError):
51
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
53
def __init__(self, repo):
54
errors.InternalBzrError.__init__(self, repo=repo)
57
class CannotSetRevisionId(errors.BzrError):
59
_fmt = "Repository format does not support setting revision ids."
62
class FetchResult(object):
63
"""Result of a fetch operation.
65
:ivar revidmap: For lossy fetches, map from source revid to target revid.
66
:ivar total_fetched: Number of revisions fetched
69
def __init__(self, total_fetched=None, revidmap=None):
70
self.total_fetched = total_fetched
71
self.revidmap = revidmap
74
class CommitBuilder(object):
75
"""Provides an interface to build up a commit.
77
This allows describing a tree to be committed without needing to
78
know the internals of the format of the repository.
81
# all clients should supply tree roots.
82
record_root_entry = True
83
# whether this commit builder will automatically update the branch that is
85
updates_branch = False
87
def __init__(self, repository, parents, config_stack, timestamp=None,
88
timezone=None, committer=None, revprops=None,
89
revision_id=None, lossy=False):
90
"""Initiate a CommitBuilder.
92
:param repository: Repository to commit to.
93
:param parents: Revision ids of the parents of the new revision.
94
:param timestamp: Optional timestamp recorded for commit.
95
:param timezone: Optional timezone for timestamp.
96
:param committer: Optional committer to set for commit.
97
:param revprops: Optional dictionary of revision properties.
98
:param revision_id: Optional revision id.
99
:param lossy: Whether to discard data that can not be natively
100
represented, when pushing to a foreign VCS
102
self._config_stack = config_stack
105
if committer is None:
106
self._committer = self._config_stack.get('email')
107
elif not isinstance(committer, str):
108
self._committer = committer.decode() # throw if non-ascii
110
self._committer = committer
112
self.parents = parents
113
self.repository = repository
116
if revprops is not None:
117
self._validate_revprops(revprops)
118
self._revprops.update(revprops)
120
if timestamp is None:
121
timestamp = time.time()
122
# Restrict resolution to 1ms
123
self._timestamp = round(timestamp, 3)
126
self._timezone = osutils.local_time_offset()
128
self._timezone = int(timezone)
130
self._generate_revision_if_needed(revision_id)
132
def any_changes(self):
133
"""Return True if any entries were changed.
135
This includes merge-only changes. It is the core for the --unchanged
138
:return: True if any changes have occured.
140
raise NotImplementedError(self.any_changes)
142
def _validate_unicode_text(self, text, context):
143
"""Verify things like commit messages don't have bogus characters."""
144
# TODO(jelmer): Make this repository-format specific
146
raise ValueError('Invalid value for %s: %r' % (context, text))
148
def _validate_revprops(self, revprops):
149
for key, value in revprops.items():
150
# We know that the XML serializers do not round trip '\r'
151
# correctly, so refuse to accept them
152
if not isinstance(value, str):
153
raise ValueError('revision property (%s) is not a valid'
154
' (unicode) string: %r' % (key, value))
155
# TODO(jelmer): Make this repository-format specific
156
self._validate_unicode_text(value,
157
'revision property (%s)' % (key,))
159
def commit(self, message):
160
"""Make the actual commit.
162
:return: The revision id of the recorded revision.
164
raise NotImplementedError(self.commit)
167
"""Abort the commit that is being built.
169
raise NotImplementedError(self.abort)
171
def revision_tree(self):
172
"""Return the tree that was just committed.
174
After calling commit() this can be called to get a
175
RevisionTree representing the newly committed tree. This is
176
preferred to calling Repository.revision_tree() because that may
177
require deserializing the inventory, while we already have a copy in
180
raise NotImplementedError(self.revision_tree)
182
def finish_inventory(self):
183
"""Tell the builder that the inventory is finished.
185
:return: The inventory id in the repository, which can be used with
186
repository.get_inventory.
188
raise NotImplementedError(self.finish_inventory)
190
def _generate_revision_if_needed(self, revision_id):
191
"""Create a revision id if None was supplied.
193
If the repository can not support user-specified revision ids
194
they should override this function and raise CannotSetRevisionId
195
if _new_revision_id is not None.
197
:raises: CannotSetRevisionId
199
if not self.repository._format.supports_setting_revision_ids:
200
if revision_id is not None:
201
raise CannotSetRevisionId()
203
if revision_id is None:
204
self._new_revision_id = self._gen_revision_id()
205
self.random_revid = True
207
self._new_revision_id = revision_id
208
self.random_revid = False
210
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
211
"""Record a new tree via iter_changes.
213
:param tree: The tree to obtain text contents from for changed objects.
214
:param basis_revision_id: The revision id of the tree the iter_changes
215
has been generated against. Currently assumed to be the same
216
as self.parents[0] - if it is not, errors may occur.
217
:param iter_changes: An iter_changes iterator with the changes to apply
218
to basis_revision_id. The iterator must not include any items with
219
a current kind of None - missing items must be either filtered out
220
or errored-on beefore record_iter_changes sees the item.
221
:return: A generator of (relpath, fs_hash) tuples for use with
224
raise NotImplementedError(self.record_iter_changes)
227
class RepositoryWriteLockResult(LogicalLockResult):
228
"""The result of write locking a repository.
230
:ivar repository_token: The token obtained from the underlying lock, or
232
:ivar unlock: A callable which will unlock the lock.
235
def __init__(self, unlock, repository_token):
236
LogicalLockResult.__init__(self, unlock)
237
self.repository_token = repository_token
240
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
244
class WriteGroup(object):
245
"""Context manager that manages a write group.
247
Raising an exception will result in the write group being aborted.
250
def __init__(self, repository, suppress_errors=False):
251
self.repository = repository
252
self._suppress_errors = suppress_errors
255
self.repository.start_write_group()
258
def __exit__(self, exc_type, exc_val, exc_tb):
260
self.repository.abort_write_group(self._suppress_errors)
263
self.repository.commit_write_group()
266
######################################################################
270
class Repository(controldir.ControlComponent, _RelockDebugMixin):
271
"""Repository holding history for one or more branches.
273
The repository holds and retrieves historical information including
274
revisions and file history. It's normally accessed only by the Branch,
275
which views a particular line of development through that history.
277
See VersionedFileRepository in breezy.vf_repository for the
278
base class for most Bazaar repositories.
281
# Does this repository implementation support random access to
282
# items in the tree, or just bulk fetching/pushing of data?
283
supports_random_access = True
285
def abort_write_group(self, suppress_errors=False):
286
"""Commit the contents accrued within the current write group.
288
:param suppress_errors: if true, abort_write_group will catch and log
289
unexpected errors that happen during the abort, rather than
290
allowing them to propagate. Defaults to False.
292
:seealso: start_write_group.
294
if self._write_group is not self.get_transaction():
295
# has an unlock or relock occured ?
298
'(suppressed) mismatched lock context and write group. %r, %r',
299
self._write_group, self.get_transaction())
301
raise errors.BzrError(
302
'mismatched lock context and write group. %r, %r' %
303
(self._write_group, self.get_transaction()))
305
self._abort_write_group()
306
except Exception as exc:
307
self._write_group = None
308
if not suppress_errors:
310
mutter('abort_write_group failed')
311
log_exception_quietly()
312
note(gettext('brz: ERROR (ignored): %s'), exc)
313
self._write_group = None
315
def _abort_write_group(self):
316
"""Template method for per-repository write group cleanup.
318
This is called during abort before the write group is considered to be
319
finished and should cleanup any internal state accrued during the write
320
group. There is no requirement that data handed to the repository be
321
*not* made available - this is not a rollback - but neither should any
322
attempt be made to ensure that data added is fully commited. Abort is
323
invoked when an error has occured so futher disk or network operations
324
may not be possible or may error and if possible should not be
328
def add_fallback_repository(self, repository):
329
"""Add a repository to use for looking up data not held locally.
331
:param repository: A repository.
333
raise NotImplementedError(self.add_fallback_repository)
335
def _check_fallback_repository(self, repository):
336
"""Check that this repository can fallback to repository safely.
338
Raise an error if not.
340
:param repository: A repository to fallback to.
342
return InterRepository._assert_same_model(self, repository)
344
def all_revision_ids(self):
345
"""Returns a list of all the revision ids in the repository.
347
This is conceptually deprecated because code should generally work on
348
the graph reachable from a particular revision, and ignore any other
349
revisions that might be present. There is no direct replacement
352
if 'evil' in debug.debug_flags:
353
mutter_callsite(2, "all_revision_ids is linear with history.")
354
return self._all_revision_ids()
356
def _all_revision_ids(self):
357
"""Returns a list of all the revision ids in the repository.
359
These are in as much topological order as the underlying store can
362
raise NotImplementedError(self._all_revision_ids)
364
def break_lock(self):
365
"""Break a lock if one is present from another instance.
367
Uses the ui factory to ask for confirmation if the lock may be from
370
self.control_files.break_lock()
373
def create(controldir):
374
"""Construct the current default format repository in controldir."""
375
return RepositoryFormat.get_default_format().initialize(controldir)
377
def __init__(self, _format, controldir, control_files):
378
"""instantiate a Repository.
380
:param _format: The format of the repository on disk.
381
:param controldir: The ControlDir of the repository.
382
:param control_files: Control files to use for locking, etc.
384
# In the future we will have a single api for all stores for
385
# getting file texts, inventories and revisions, then
386
# this construct will accept instances of those things.
387
super(Repository, self).__init__()
388
self._format = _format
389
# the following are part of the public API for Repository:
390
self.controldir = controldir
391
self.control_files = control_files
393
self._write_group = None
394
# Additional places to query for data.
395
self._fallback_repositories = []
398
def user_transport(self):
399
return self.controldir.user_transport
402
def control_transport(self):
403
return self._transport
406
if self._fallback_repositories:
407
return '%s(%r, fallback_repositories=%r)' % (
408
self.__class__.__name__,
410
self._fallback_repositories)
412
return '%s(%r)' % (self.__class__.__name__,
415
def _has_same_fallbacks(self, other_repo):
416
"""Returns true if the repositories have the same fallbacks."""
417
my_fb = self._fallback_repositories
418
other_fb = other_repo._fallback_repositories
419
if len(my_fb) != len(other_fb):
421
for f, g in zip(my_fb, other_fb):
422
if not f.has_same_location(g):
426
def has_same_location(self, other):
427
"""Returns a boolean indicating if this repository is at the same
428
location as another repository.
430
This might return False even when two repository objects are accessing
431
the same physical repository via different URLs.
433
if self.__class__ is not other.__class__:
435
return (self.control_url == other.control_url)
437
def is_in_write_group(self):
438
"""Return True if there is an open write group.
440
:seealso: start_write_group.
442
return self._write_group is not None
445
return self.control_files.is_locked()
447
def is_write_locked(self):
448
"""Return True if this object is write locked."""
449
return self.is_locked() and self.control_files._lock_mode == 'w'
451
def lock_write(self, token=None):
452
"""Lock this repository for writing.
454
This causes caching within the repository obejct to start accumlating
455
data during reads, and allows a 'write_group' to be obtained. Write
456
groups must be used for actual data insertion.
458
A token should be passed in if you know that you have locked the object
459
some other way, and need to synchronise this object's state with that
462
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
464
:param token: if this is already locked, then lock_write will fail
465
unless the token matches the existing lock.
466
:returns: a token if this instance supports tokens, otherwise None.
467
:raises TokenLockingNotSupported: when a token is given but this
468
instance doesn't support using token locks.
469
:raises MismatchedToken: if the specified token doesn't match the token
470
of the existing lock.
471
:seealso: start_write_group.
472
:return: A RepositoryWriteLockResult.
474
locked = self.is_locked()
475
token = self.control_files.lock_write(token=token)
477
self._warn_if_deprecated()
479
for repo in self._fallback_repositories:
480
# Writes don't affect fallback repos
483
return RepositoryWriteLockResult(self.unlock, token)
486
"""Lock the repository for read operations.
488
:return: An object with an unlock method which will release the lock
491
locked = self.is_locked()
492
self.control_files.lock_read()
494
self._warn_if_deprecated()
496
for repo in self._fallback_repositories:
499
return LogicalLockResult(self.unlock)
501
def get_physical_lock_status(self):
502
return self.control_files.get_physical_lock_status()
504
def leave_lock_in_place(self):
505
"""Tell this repository not to release the physical lock when this
508
If lock_write doesn't return a token, then this method is not supported.
510
self.control_files.leave_in_place()
512
def dont_leave_lock_in_place(self):
513
"""Tell this repository to release the physical lock when this
514
object is unlocked, even if it didn't originally acquire it.
516
If lock_write doesn't return a token, then this method is not supported.
518
self.control_files.dont_leave_in_place()
520
def gather_stats(self, revid=None, committers=None):
521
"""Gather statistics from a revision id.
523
:param revid: The revision id to gather statistics from, if None, then
524
no revision specific statistics are gathered.
525
:param committers: Optional parameter controlling whether to grab
526
a count of committers from the revision specific statistics.
527
:return: A dictionary of statistics. Currently this contains:
528
committers: The number of committers if requested.
529
firstrev: A tuple with timestamp, timezone for the penultimate left
530
most ancestor of revid, if revid is not the NULL_REVISION.
531
latestrev: A tuple with timestamp, timezone for revid, if revid is
532
not the NULL_REVISION.
533
revisions: The total revision count in the repository.
534
size: An estimate disk size of the repository in bytes.
536
with self.lock_read():
538
if revid and committers:
539
result['committers'] = 0
540
if revid and revid != _mod_revision.NULL_REVISION:
541
graph = self.get_graph()
543
all_committers = set()
544
revisions = [r for (r, p) in graph.iter_ancestry([revid])
545
if r != _mod_revision.NULL_REVISION]
548
# ignore the revisions in the middle - just grab first and last
549
revisions = revisions[0], revisions[-1]
550
for revision in self.get_revisions(revisions):
551
if not last_revision:
552
last_revision = revision
554
all_committers.add(revision.committer)
555
first_revision = revision
557
result['committers'] = len(all_committers)
558
result['firstrev'] = (first_revision.timestamp,
559
first_revision.timezone)
560
result['latestrev'] = (last_revision.timestamp,
561
last_revision.timezone)
564
def find_branches(self, using=False):
565
"""Find branches underneath this repository.
567
This will include branches inside other branches.
569
:param using: If True, list only branches using this repository.
571
if using and not self.is_shared():
572
for branch in self.controldir.list_branches():
576
class Evaluator(object):
579
self.first_call = True
581
def __call__(self, controldir):
582
# On the first call, the parameter is always the controldir
583
# containing the current repo.
584
if not self.first_call:
586
repository = controldir.open_repository()
587
except errors.NoRepositoryPresent:
590
return False, ([], repository)
591
self.first_call = False
592
value = (controldir.list_branches(), None)
595
for branches, repository in controldir.ControlDir.find_controldirs(
596
self.user_transport, evaluate=Evaluator()):
597
if branches is not None:
598
for branch in branches:
600
if not using and repository is not None:
601
for branch in repository.find_branches():
604
def search_missing_revision_ids(self, other,
605
find_ghosts=True, revision_ids=None, if_present_ids=None,
607
"""Return the revision ids that other has that this does not.
609
These are returned in topological order.
611
revision_ids: only return revision ids included by revision_id.
613
with self.lock_read():
614
return InterRepository.get(other, self).search_missing_revision_ids(
615
find_ghosts=find_ghosts, revision_ids=revision_ids,
616
if_present_ids=if_present_ids, limit=limit)
620
"""Open the repository rooted at base.
622
For instance, if the repository is at URL/.bzr/repository,
623
Repository.open(URL) -> a Repository instance.
625
control = controldir.ControlDir.open(base)
626
return control.open_repository()
628
def copy_content_into(self, destination, revision_id=None):
629
"""Make a complete copy of the content in self into destination.
631
This is a destructive operation! Do not use it on existing
634
return InterRepository.get(self, destination).copy_content(revision_id)
636
def commit_write_group(self):
637
"""Commit the contents accrued within the current write group.
639
:seealso: start_write_group.
641
:return: it may return an opaque hint that can be passed to 'pack'.
643
if self._write_group is not self.get_transaction():
644
# has an unlock or relock occured ?
645
raise errors.BzrError('mismatched lock context %r and '
647
(self.get_transaction(), self._write_group))
648
result = self._commit_write_group()
649
self._write_group = None
652
def _commit_write_group(self):
653
"""Template method for per-repository write group cleanup.
655
This is called before the write group is considered to be
656
finished and should ensure that all data handed to the repository
657
for writing during the write group is safely committed (to the
658
extent possible considering file system caching etc).
661
def suspend_write_group(self):
662
"""Suspend a write group.
664
:raise UnsuspendableWriteGroup: If the write group can not be
666
:return: List of tokens
668
raise errors.UnsuspendableWriteGroup(self)
670
def refresh_data(self):
671
"""Re-read any data needed to synchronise with disk.
673
This method is intended to be called after another repository instance
674
(such as one used by a smart server) has inserted data into the
675
repository. On all repositories this will work outside of write groups.
676
Some repository formats (pack and newer for breezy native formats)
677
support refresh_data inside write groups. If called inside a write
678
group on a repository that does not support refreshing in a write group
679
IsInWriteGroupError will be raised.
683
def resume_write_group(self, tokens):
684
if not self.is_write_locked():
685
raise errors.NotWriteLocked(self)
686
if self._write_group:
687
raise errors.BzrError('already in a write group')
688
self._resume_write_group(tokens)
689
# so we can detect unlock/relock - the write group is now entered.
690
self._write_group = self.get_transaction()
692
def _resume_write_group(self, tokens):
693
raise errors.UnsuspendableWriteGroup(self)
695
def fetch(self, source, revision_id=None, find_ghosts=False, lossy=False):
696
"""Fetch the content required to construct revision_id from source.
698
If revision_id is None, then all content is copied.
700
fetch() may not be used when the repository is in a write group -
701
either finish the current write group before using fetch, or use
702
fetch before starting the write group.
704
:param find_ghosts: Find and copy revisions in the source that are
705
ghosts in the target (and not reachable directly by walking out to
706
the first-present revision in target from revision_id).
707
:param revision_id: If specified, all the content needed for this
708
revision ID will be copied to the target. Fetch will determine for
709
itself which content needs to be copied.
710
:return: A FetchResult object
712
if self.is_in_write_group():
713
raise errors.InternalBzrError(
714
"May not fetch while in a write group.")
715
# fast path same-url fetch operations
716
# TODO: lift out to somewhere common with RemoteRepository
717
# <https://bugs.launchpad.net/bzr/+bug/401646>
718
if (self.has_same_location(source)
719
and self._has_same_fallbacks(source)):
720
# check that last_revision is in 'from' and then return a
722
if (revision_id is not None and
723
not _mod_revision.is_null(revision_id)):
724
self.get_revision(revision_id)
726
inter = InterRepository.get(source, self)
728
revision_id=revision_id, find_ghosts=find_ghosts, lossy=lossy)
730
def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
731
timezone=None, committer=None, revprops=None,
732
revision_id=None, lossy=False):
733
"""Obtain a CommitBuilder for this repository.
735
:param branch: Branch to commit to.
736
:param parents: Revision ids of the parents of the new revision.
737
:param config_stack: Configuration stack to use.
738
:param timestamp: Optional timestamp recorded for commit.
739
:param timezone: Optional timezone for timestamp.
740
:param committer: Optional committer to set for commit.
741
:param revprops: Optional dictionary of revision properties.
742
:param revision_id: Optional revision id.
743
:param lossy: Whether to discard data that can not be natively
744
represented, when pushing to a foreign VCS
746
raise NotImplementedError(self.get_commit_builder)
748
@only_raises(errors.LockNotHeld, errors.LockBroken)
750
if (self.control_files._lock_count == 1 and
751
self.control_files._lock_mode == 'w'):
752
if self._write_group is not None:
753
self.abort_write_group()
754
self.control_files.unlock()
755
raise errors.BzrError(
756
'Must end write groups before releasing write locks.')
757
self.control_files.unlock()
758
if self.control_files._lock_count == 0:
759
for repo in self._fallback_repositories:
762
def clone(self, controldir, revision_id=None):
763
"""Clone this repository into controldir using the current format.
765
Currently no check is made that the format of this repository and
766
the bzrdir format are compatible. FIXME RBC 20060201.
768
:return: The newly created destination repository.
770
with self.lock_read():
771
# TODO: deprecate after 0.16; cloning this with all its settings is
772
# probably not very useful -- mbp 20070423
773
dest_repo = self._create_sprouting_repo(
774
controldir, shared=self.is_shared())
775
self.copy_content_into(dest_repo, revision_id)
778
def start_write_group(self):
779
"""Start a write group in the repository.
781
Write groups are used by repositories which do not have a 1:1 mapping
782
between file ids and backend store to manage the insertion of data from
783
both fetch and commit operations.
785
A write lock is required around the
786
start_write_group/commit_write_group for the support of lock-requiring
789
One can only insert data into a repository inside a write group.
793
if not self.is_write_locked():
794
raise errors.NotWriteLocked(self)
795
if self._write_group:
796
raise errors.BzrError('already in a write group')
797
self._start_write_group()
798
# so we can detect unlock/relock - the write group is now entered.
799
self._write_group = self.get_transaction()
801
def _start_write_group(self):
802
"""Template method for per-repository write group startup.
804
This is called before the write group is considered to be
808
def sprout(self, to_bzrdir, revision_id=None):
809
"""Create a descendent repository for new development.
811
Unlike clone, this does not copy the settings of the repository.
813
with self.lock_read():
814
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
815
dest_repo.fetch(self, revision_id=revision_id)
818
def _create_sprouting_repo(self, a_controldir, shared):
820
a_controldir._format, self.controldir._format.__class__):
821
# use target default format.
822
dest_repo = a_controldir.create_repository()
824
# Most control formats need the repository to be specifically
825
# created, but on some old all-in-one formats it's not needed
827
dest_repo = self._format.initialize(
828
a_controldir, shared=shared)
829
except errors.UninitializableFormat:
830
dest_repo = a_controldir.open_repository()
833
def has_revision(self, revision_id):
834
"""True if this repository has a copy of the revision."""
835
with self.lock_read():
836
return revision_id in self.has_revisions((revision_id,))
838
def has_revisions(self, revision_ids):
839
"""Probe to find out the presence of multiple revisions.
841
:param revision_ids: An iterable of revision_ids.
842
:return: A set of the revision_ids that were present.
844
raise NotImplementedError(self.has_revisions)
846
def get_revision(self, revision_id):
847
"""Return the Revision object for a named revision."""
848
with self.lock_read():
849
return self.get_revisions([revision_id])[0]
851
def get_revision_reconcile(self, revision_id):
852
"""'reconcile' helper routine that allows access to a revision always.
854
This variant of get_revision does not cross check the weave graph
855
against the revision one as get_revision does: but it should only
856
be used by reconcile, or reconcile-alike commands that are correcting
857
or testing the revision graph.
859
raise NotImplementedError(self.get_revision_reconcile)
861
def get_revisions(self, revision_ids):
862
"""Get many revisions at once.
864
Repositories that need to check data on every revision read should
865
subclass this method.
868
for revid, rev in self.iter_revisions(revision_ids):
870
raise errors.NoSuchRevision(self, revid)
872
return [revs[revid] for revid in revision_ids]
874
def iter_revisions(self, revision_ids):
875
"""Iterate over revision objects.
877
:param revision_ids: An iterable of revisions to examine. None may be
878
passed to request all revisions known to the repository. Note that
879
not all repositories can find unreferenced revisions; for those
880
repositories only referenced ones will be returned.
881
:return: An iterator of (revid, revision) tuples. Absent revisions (
882
those asked for but not available) are returned as (revid, None).
883
N.B.: Revisions are not necessarily yielded in order.
885
raise NotImplementedError(self.iter_revisions)
887
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
888
"""Produce a generator of revision deltas.
890
Note that the input is a sequence of REVISIONS, not revision_ids.
891
Trees will be held in memory until the generator exits.
892
Each delta is relative to the revision's lefthand predecessor.
894
:param specific_fileids: if not None, the result is filtered
895
so that only those file-ids, their parents and their
896
children are included.
898
raise NotImplementedError(self.get_deltas_for_revisions)
900
def get_revision_delta(self, revision_id):
901
"""Return the delta for one revision.
903
The delta is relative to the left-hand predecessor of the
906
with self.lock_read():
907
r = self.get_revision(revision_id)
908
return list(self.get_deltas_for_revisions([r]))[0]
910
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
911
raise NotImplementedError(self.store_revision_signature)
913
def add_signature_text(self, revision_id, signature):
914
"""Store a signature text for a revision.
916
:param revision_id: Revision id of the revision
917
:param signature: Signature text.
919
raise NotImplementedError(self.add_signature_text)
921
def iter_files_bytes(self, desired_files):
922
"""Iterate through file versions.
924
Files will not necessarily be returned in the order they occur in
925
desired_files. No specific order is guaranteed.
927
Yields pairs of identifier, bytes_iterator. identifier is an opaque
928
value supplied by the caller as part of desired_files. It should
929
uniquely identify the file version in the caller's context. (Examples:
930
an index number or a TreeTransform trans_id.)
932
:param desired_files: a list of (file_id, revision_id, identifier)
935
raise NotImplementedError(self.iter_files_bytes)
937
def get_rev_id_for_revno(self, revno, known_pair):
938
"""Return the revision id of a revno, given a later (revno, revid)
939
pair in the same history.
941
:return: if found (True, revid). If the available history ran out
942
before reaching the revno, then this returns
943
(False, (closest_revno, closest_revid)).
945
known_revno, known_revid = known_pair
946
partial_history = [known_revid]
947
distance_from_known = known_revno - revno
948
if distance_from_known < 0:
949
raise errors.RevnoOutOfBounds(revno, (0, known_revno))
952
self, partial_history, stop_index=distance_from_known)
953
except errors.RevisionNotPresent as err:
954
if err.revision_id == known_revid:
955
# The start revision (known_revid) wasn't found.
956
raise errors.NoSuchRevision(self, known_revid)
957
# This is a stacked repository with no fallbacks, or a there's a
958
# left-hand ghost. Either way, even though the revision named in
959
# the error isn't in this repo, we know it's the next step in this
961
partial_history.append(err.revision_id)
962
if len(partial_history) <= distance_from_known:
963
# Didn't find enough history to get a revid for the revno.
964
earliest_revno = known_revno - len(partial_history) + 1
965
return (False, (earliest_revno, partial_history[-1]))
966
if len(partial_history) - 1 > distance_from_known:
967
raise AssertionError('_iter_for_revno returned too much history')
968
return (True, partial_history[-1])
971
"""Return True if this repository is flagged as a shared repository."""
972
raise NotImplementedError(self.is_shared)
974
def reconcile(self, other=None, thorough=False):
975
"""Reconcile this repository."""
976
raise NotImplementedError(self.reconcile)
978
def _refresh_data(self):
979
"""Helper called from lock_* to ensure coherency with disk.
981
The default implementation does nothing; it is however possible
982
for repositories to maintain loaded indices across multiple locks
983
by checking inside their implementation of this method to see
984
whether their indices are still valid. This depends of course on
985
the disk format being validatable in this manner. This method is
986
also called by the refresh_data() public interface to cause a refresh
987
to occur while in a write lock so that data inserted by a smart server
988
push operation is visible on the client's instance of the physical
992
def revision_tree(self, revision_id):
993
"""Return Tree for a revision on this branch.
995
`revision_id` may be NULL_REVISION for the empty tree revision.
997
raise NotImplementedError(self.revision_tree)
999
def revision_trees(self, revision_ids):
1000
"""Return Trees for revisions in this repository.
1002
:param revision_ids: a sequence of revision-ids;
1003
a revision-id may not be None or b'null:'
1005
raise NotImplementedError(self.revision_trees)
1007
def pack(self, hint=None, clean_obsolete_packs=False):
1008
"""Compress the data within the repository.
1010
This operation only makes sense for some repository types. For other
1011
types it should be a no-op that just returns.
1013
This stub method does not require a lock, but subclasses should use
1014
self.write_lock as this is a long running call it's reasonable to
1015
implicitly lock for the user.
1017
:param hint: If not supplied, the whole repository is packed.
1018
If supplied, the repository may use the hint parameter as a
1019
hint for the parts of the repository to pack. A hint can be
1020
obtained from the result of commit_write_group(). Out of
1021
date hints are simply ignored, because concurrent operations
1022
can obsolete them rapidly.
1024
:param clean_obsolete_packs: Clean obsolete packs immediately after
1028
def get_transaction(self):
1029
return self.control_files.get_transaction()
1031
def get_parent_map(self, revision_ids):
1032
"""See graph.StackedParentsProvider.get_parent_map"""
1033
raise NotImplementedError(self.get_parent_map)
1035
def _get_parent_map_no_fallbacks(self, revision_ids):
1036
"""Same as Repository.get_parent_map except doesn't query fallbacks."""
1037
# revisions index works in keys; this just works in revisions
1038
# therefore wrap and unwrap
1041
for revision_id in revision_ids:
1042
if revision_id == _mod_revision.NULL_REVISION:
1043
result[revision_id] = ()
1044
elif revision_id is None:
1045
raise ValueError('get_parent_map(None) is not valid')
1047
query_keys.append((revision_id,))
1048
vf = self.revisions.without_fallbacks()
1049
for (revision_id,), parent_keys in (
1050
vf.get_parent_map(query_keys).items()):
1052
result[revision_id] = tuple([parent_revid
1053
for (parent_revid,) in parent_keys])
1055
result[revision_id] = (_mod_revision.NULL_REVISION,)
1058
def _make_parents_provider(self):
1059
if not self._format.supports_external_lookups:
1061
return graph.StackedParentsProvider(_LazyListJoin(
1062
[self._make_parents_provider_unstacked()],
1063
self._fallback_repositories))
1065
def _make_parents_provider_unstacked(self):
1066
return graph.CallableToParentsProviderAdapter(
1067
self._get_parent_map_no_fallbacks)
1069
def get_known_graph_ancestry(self, revision_ids):
1070
"""Return the known graph for a set of revision ids and their ancestors.
1072
raise NotImplementedError(self.get_known_graph_ancestry)
1074
def get_file_graph(self):
1075
"""Return the graph walker for files."""
1076
raise NotImplementedError(self.get_file_graph)
1078
def get_graph(self, other_repository=None):
1079
"""Return the graph walker for this repository format"""
1080
parents_provider = self._make_parents_provider()
1081
if (other_repository is not None and
1082
not self.has_same_location(other_repository)):
1083
parents_provider = graph.StackedParentsProvider(
1084
[parents_provider, other_repository._make_parents_provider()])
1085
return graph.Graph(parents_provider)
1087
def set_make_working_trees(self, new_value):
1088
"""Set the policy flag for making working trees when creating branches.
1090
This only applies to branches that use this repository.
1092
The default is 'True'.
1093
:param new_value: True to restore the default, False to disable making
1096
raise NotImplementedError(self.set_make_working_trees)
1098
def make_working_trees(self):
1099
"""Returns the policy for making working trees on new branches."""
1100
raise NotImplementedError(self.make_working_trees)
1102
def sign_revision(self, revision_id, gpg_strategy):
1103
raise NotImplementedError(self.sign_revision)
1105
def verify_revision_signature(self, revision_id, gpg_strategy):
1106
"""Verify the signature on a revision.
1108
:param revision_id: the revision to verify
1109
:gpg_strategy: the GPGStrategy object to used
1111
:return: gpg.SIGNATURE_VALID or a failed SIGNATURE_ value
1113
raise NotImplementedError(self.verify_revision_signature)
1115
def verify_revision_signatures(self, revision_ids, gpg_strategy):
1116
"""Verify revision signatures for a number of revisions.
1118
:param revision_id: the revision to verify
1119
:gpg_strategy: the GPGStrategy object to used
1120
:return: Iterator over tuples with revision id, result and keys
1122
with self.lock_read():
1123
for revid in revision_ids:
1124
(result, key) = self.verify_revision_signature(revid, gpg_strategy)
1125
yield revid, result, key
1127
def has_signature_for_revision_id(self, revision_id):
1128
"""Query for a revision signature for revision_id in the repository."""
1129
raise NotImplementedError(self.has_signature_for_revision_id)
1131
def get_signature_text(self, revision_id):
1132
"""Return the text for a signature."""
1133
raise NotImplementedError(self.get_signature_text)
1135
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
1136
"""Check consistency of all history of given revision_ids.
1138
Different repository implementations should override _check().
1140
:param revision_ids: A non-empty list of revision_ids whose ancestry
1141
will be checked. Typically the last revision_id of a branch.
1142
:param callback_refs: A dict of check-refs to resolve and callback
1143
the check/_check method on the items listed as wanting the ref.
1145
:param check_repo: If False do not check the repository contents, just
1146
calculate the data callback_refs requires and call them back.
1148
return self._check(revision_ids=revision_ids, callback_refs=callback_refs,
1149
check_repo=check_repo)
1151
def _check(self, revision_ids=None, callback_refs=None, check_repo=True):
1152
raise NotImplementedError(self.check)
1154
def _warn_if_deprecated(self, branch=None):
1155
if not self._format.is_deprecated():
1157
global _deprecation_warning_done
1158
if _deprecation_warning_done:
1162
conf = config.GlobalStack()
1164
conf = branch.get_config_stack()
1165
if 'format_deprecation' in conf.get('suppress_warnings'):
1167
warning("Format %s for %s is deprecated -"
1168
" please use 'brz upgrade' to get better performance"
1169
% (self._format, self.controldir.transport.base))
1171
_deprecation_warning_done = True
1173
def supports_rich_root(self):
1174
return self._format.rich_root_data
1176
def _check_ascii_revisionid(self, revision_id, method):
1177
"""Private helper for ascii-only repositories."""
1178
# weave repositories refuse to store revisionids that are non-ascii.
1179
if revision_id is not None:
1180
# weaves require ascii revision ids.
1181
if isinstance(revision_id, str):
1183
revision_id.encode('ascii')
1184
except UnicodeEncodeError:
1185
raise errors.NonAsciiRevisionId(method, self)
1188
revision_id.decode('ascii')
1189
except UnicodeDecodeError:
1190
raise errors.NonAsciiRevisionId(method, self)
1193
class RepositoryFormatRegistry(controldir.ControlComponentFormatRegistry):
1194
"""Repository format registry."""
1196
def get_default(self):
1197
"""Return the current default format."""
1198
return controldir.format_registry.make_controldir('default').repository_format
1201
network_format_registry = registry.FormatRegistry()
1202
"""Registry of formats indexed by their network name.
1204
The network name for a repository format is an identifier that can be used when
1205
referring to formats with smart server operations. See
1206
RepositoryFormat.network_name() for more detail.
1210
format_registry = RepositoryFormatRegistry(network_format_registry)
1211
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
1213
This can contain either format instances themselves, or classes/factories that
1214
can be called to obtain one.
1218
#####################################################################
1219
# Repository Formats
1221
class RepositoryFormat(controldir.ControlComponentFormat):
1222
"""A repository format.
1224
Formats provide four things:
1225
* An initialization routine to construct repository data on disk.
1226
* a optional format string which is used when the BzrDir supports
1228
* an open routine which returns a Repository instance.
1229
* A network name for referring to the format in smart server RPC
1232
There is one and only one Format subclass for each on-disk format. But
1233
there can be one Repository subclass that is used for several different
1234
formats. The _format attribute on a Repository instance can be used to
1235
determine the disk format.
1237
Formats are placed in a registry by their format string for reference
1238
during opening. These should be subclasses of RepositoryFormat for
1241
Once a format is deprecated, just deprecate the initialize and open
1242
methods on the format class. Do not deprecate the object, as the
1243
object may be created even when a repository instance hasn't been
1246
Common instance attributes:
1247
_matchingcontroldir - the controldir format that the repository format was
1248
originally written to work with. This can be used if manually
1249
constructing a bzrdir and repository, or more commonly for test suite
1253
# Set to True or False in derived classes. True indicates that the format
1254
# supports ghosts gracefully.
1255
supports_ghosts = None
1256
# Can this repository be given external locations to lookup additional
1257
# data. Set to True or False in derived classes.
1258
supports_external_lookups = None
1259
# Does this format support CHK bytestring lookups. Set to True or False in
1261
supports_chks = None
1262
# Should fetch trigger a reconcile after the fetch? Only needed for
1263
# some repository formats that can suffer internal inconsistencies.
1264
_fetch_reconcile = False
1265
# Does this format have < O(tree_size) delta generation. Used to hint what
1266
# code path for commit, amongst other things.
1268
# Does doing a pack operation compress data? Useful for the pack UI command
1269
# (so if there is one pack, the operation can still proceed because it may
1270
# help), and for fetching when data won't have come from the same
1272
pack_compresses = False
1273
# Does the repository storage understand references to trees?
1274
supports_tree_reference = None
1275
# Is the format experimental ?
1276
experimental = False
1277
# Does this repository format escape funky characters, or does it create
1278
# files with similar names as the versioned files in its contents on disk
1280
supports_funky_characters = None
1281
# Does this repository format support leaving locks?
1282
supports_leaving_lock = None
1283
# Does this format support the full VersionedFiles interface?
1284
supports_full_versioned_files = None
1285
# Does this format support signing revision signatures?
1286
supports_revision_signatures = True
1287
# Can the revision graph have incorrect parents?
1288
revision_graph_can_have_wrong_parents = None
1289
# Does this format support setting revision ids?
1290
supports_setting_revision_ids = True
1291
# Does this format support rich root data?
1292
rich_root_data = None
1293
# Does this format support explicitly versioned directories?
1294
supports_versioned_directories = None
1295
# Can other repositories be nested into one of this format?
1296
supports_nesting_repositories = None
1297
# Is it possible for revisions to be present without being referenced
1299
supports_unreferenced_revisions = None
1300
# Does this format store the current Branch.nick in a revision when
1302
supports_storing_branch_nick = True
1303
# Does the format support overriding the transport to use
1304
supports_overriding_transport = True
1305
# Does the format support setting custom revision properties?
1306
supports_custom_revision_properties = True
1307
# Does the format record per-file revision metadata?
1308
records_per_file_revision = True
1311
return "%s()" % self.__class__.__name__
1313
def __eq__(self, other):
1314
# format objects are generally stateless
1315
return isinstance(other, self.__class__)
1317
def __ne__(self, other):
1318
return not self == other
1320
def get_format_description(self):
1321
"""Return the short description for this format."""
1322
raise NotImplementedError(self.get_format_description)
1324
def initialize(self, controldir, shared=False):
1325
"""Initialize a repository of this format in controldir.
1327
:param controldir: The controldir to put the new repository in it.
1328
:param shared: The repository should be initialized as a sharable one.
1329
:returns: The new repository object.
1331
This may raise UninitializableFormat if shared repository are not
1332
compatible the controldir.
1334
raise NotImplementedError(self.initialize)
1336
def is_supported(self):
1337
"""Is this format supported?
1339
Supported formats must be initializable and openable.
1340
Unsupported formats may not support initialization or committing or
1341
some other features depending on the reason for not being supported.
1345
def is_deprecated(self):
1346
"""Is this format deprecated?
1348
Deprecated formats may trigger a user-visible warning recommending
1349
the user to upgrade. They are still fully supported.
1353
def network_name(self):
1354
"""A simple byte string uniquely identifying this format for RPC calls.
1356
MetaDir repository formats use their disk format string to identify the
1357
repository over the wire. All in one formats such as bzr < 0.8, and
1358
foreign formats like svn/git and hg should use some marker which is
1359
unique and immutable.
1361
raise NotImplementedError(self.network_name)
1363
def check_conversion_target(self, target_format):
1364
if self.rich_root_data and not target_format.rich_root_data:
1365
raise errors.BadConversionTarget(
1366
'Does not support rich root data.', target_format,
1368
if (self.supports_tree_reference
1369
and not getattr(target_format, 'supports_tree_reference', False)):
1370
raise errors.BadConversionTarget(
1371
'Does not support nested trees', target_format,
1374
def open(self, controldir, _found=False):
1375
"""Return an instance of this format for a controldir.
1377
_found is a private parameter, do not use it.
1379
raise NotImplementedError(self.open)
1381
def _run_post_repo_init_hooks(self, repository, controldir, shared):
1382
from .controldir import ControlDir, RepoInitHookParams
1383
hooks = ControlDir.hooks['post_repo_init']
1386
params = RepoInitHookParams(repository, self, controldir, shared)
1391
# formats which have no format string are not discoverable or independently
1392
# creatable on disk, so are not registered in format_registry. They're
1393
# all in breezy.bzr.knitreponow. When an instance of one of these is
1394
# needed, it's constructed directly by the ControlDir. Non-native formats where
1395
# the repository is not separately opened are similar.
1397
format_registry.register_lazy(
1398
b'Bazaar-NG Knit Repository Format 1',
1399
'breezy.bzr.knitrepo',
1400
'RepositoryFormatKnit1',
1403
format_registry.register_lazy(
1404
b'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
1405
'breezy.bzr.knitrepo',
1406
'RepositoryFormatKnit3',
1409
format_registry.register_lazy(
1410
b'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
1411
'breezy.bzr.knitrepo',
1412
'RepositoryFormatKnit4',
1415
# Pack-based formats. There is one format for pre-subtrees, and one for
1416
# post-subtrees to allow ease of testing.
1417
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
1418
format_registry.register_lazy(
1419
b'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1420
'breezy.bzr.knitpack_repo',
1421
'RepositoryFormatKnitPack1',
1423
format_registry.register_lazy(
1424
b'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1425
'breezy.bzr.knitpack_repo',
1426
'RepositoryFormatKnitPack3',
1428
format_registry.register_lazy(
1429
b'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
1430
'breezy.bzr.knitpack_repo',
1431
'RepositoryFormatKnitPack4',
1433
format_registry.register_lazy(
1434
b'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
1435
'breezy.bzr.knitpack_repo',
1436
'RepositoryFormatKnitPack5',
1438
format_registry.register_lazy(
1439
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
1440
'breezy.bzr.knitpack_repo',
1441
'RepositoryFormatKnitPack5RichRoot',
1443
format_registry.register_lazy(
1444
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
1445
'breezy.bzr.knitpack_repo',
1446
'RepositoryFormatKnitPack5RichRootBroken',
1448
format_registry.register_lazy(
1449
b'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
1450
'breezy.bzr.knitpack_repo',
1451
'RepositoryFormatKnitPack6',
1453
format_registry.register_lazy(
1454
b'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
1455
'breezy.bzr.knitpack_repo',
1456
'RepositoryFormatKnitPack6RichRoot',
1458
format_registry.register_lazy(
1459
b'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1460
'breezy.bzr.groupcompress_repo',
1461
'RepositoryFormat2a',
1464
# Development formats.
1465
# Check their docstrings to see if/when they are obsolete.
1466
format_registry.register_lazy(
1467
(b"Bazaar development format 2 with subtree support "
1468
b"(needs bzr.dev from before 1.8)\n"),
1469
'breezy.bzr.knitpack_repo',
1470
'RepositoryFormatPackDevelopment2Subtree',
1472
format_registry.register_lazy(
1473
b'Bazaar development format 8\n',
1474
'breezy.bzr.groupcompress_repo',
1475
'RepositoryFormat2aSubtree',
1479
class InterRepository(InterObject):
1480
"""This class represents operations taking place between two repositories.
1482
Its instances have methods like copy_content and fetch, and contain
1483
references to the source and target repositories these operations can be
1486
Often we will provide convenience methods on 'repository' which carry out
1487
operations with another repository - they will always forward to
1488
InterRepository.get(other).method_name(parameters).
1492
"""The available optimised InterRepository types."""
1494
def copy_content(self, revision_id=None):
1495
"""Make a complete copy of the content in self into destination.
1497
This is a destructive operation! Do not use it on existing
1500
:param revision_id: Only copy the content needed to construct
1501
revision_id and its parents.
1503
with self.lock_write():
1505
self.target.set_make_working_trees(
1506
self.source.make_working_trees())
1507
except NotImplementedError:
1509
self.target.fetch(self.source, revision_id=revision_id)
1511
def fetch(self, revision_id=None, find_ghosts=False, lossy=False):
1512
"""Fetch the content required to construct revision_id.
1514
The content is copied from self.source to self.target.
1516
:param revision_id: if None all content is copied, if NULL_REVISION no
1518
:return: FetchResult
1520
raise NotImplementedError(self.fetch)
1522
def search_missing_revision_ids(
1523
self, find_ghosts=True, revision_ids=None, if_present_ids=None,
1525
"""Return the revision ids that source has that target does not.
1527
:param revision_ids: return revision ids included by these
1528
revision_ids. NoSuchRevision will be raised if any of these
1529
revisions are not present.
1530
:param if_present_ids: like revision_ids, but will not cause
1531
NoSuchRevision if any of these are absent, instead they will simply
1532
not be in the result. This is useful for e.g. finding revisions
1533
to fetch for tags, which may reference absent revisions.
1534
:param find_ghosts: If True find missing revisions in deep history
1535
rather than just finding the surface difference.
1536
:param limit: Maximum number of revisions to return, topologically
1538
:return: A breezy.graph.SearchResult.
1540
raise NotImplementedError(self.search_missing_revision_ids)
1543
def _same_model(source, target):
1544
"""True if source and target have the same data representation.
1546
Note: this is always called on the base class; overriding it in a
1547
subclass will have no effect.
1550
InterRepository._assert_same_model(source, target)
1552
except errors.IncompatibleRepositories as e:
1556
def _assert_same_model(source, target):
1557
"""Raise an exception if two repositories do not use the same model.
1559
if source.supports_rich_root() != target.supports_rich_root():
1560
raise errors.IncompatibleRepositories(source, target,
1561
"different rich-root support")
1562
if source._serializer != target._serializer:
1563
raise errors.IncompatibleRepositories(source, target,
1564
"different serializers")
1567
class CopyConverter(object):
1568
"""A repository conversion tool which just performs a copy of the content.
1570
This is slow but quite reliable.
1573
def __init__(self, target_format):
1574
"""Create a CopyConverter.
1576
:param target_format: The format the resulting repository should be.
1578
self.target_format = target_format
1580
def convert(self, repo, pb):
1581
"""Perform the conversion of to_convert, giving feedback via pb.
1583
:param to_convert: The disk object to convert.
1584
:param pb: a progress bar to use for progress information.
1586
with ui.ui_factory.nested_progress_bar() as pb:
1589
# this is only useful with metadir layouts - separated repo content.
1590
# trigger an assertion if not such
1591
repo._format.get_format_string()
1592
self.repo_dir = repo.controldir
1593
pb.update(gettext('Moving repository to repository.backup'))
1594
self.repo_dir.transport.move('repository', 'repository.backup')
1595
backup_transport = self.repo_dir.transport.clone(
1596
'repository.backup')
1597
repo._format.check_conversion_target(self.target_format)
1598
self.source_repo = repo._format.open(self.repo_dir,
1600
_override_transport=backup_transport)
1601
pb.update(gettext('Creating new repository'))
1602
converted = self.target_format.initialize(self.repo_dir,
1603
self.source_repo.is_shared())
1604
with converted.lock_write():
1605
pb.update(gettext('Copying content'))
1606
self.source_repo.copy_content_into(converted)
1607
pb.update(gettext('Deleting old repository content'))
1608
self.repo_dir.transport.delete_tree('repository.backup')
1609
ui.ui_factory.note(gettext('repository converted'))
1612
def _strip_NULL_ghosts(revision_graph):
1613
"""Also don't use this. more compatibility code for unmigrated clients."""
1614
# Filter ghosts, and null:
1615
if _mod_revision.NULL_REVISION in revision_graph:
1616
del revision_graph[_mod_revision.NULL_REVISION]
1617
for key, parents in revision_graph.items():
1618
revision_graph[key] = tuple(parent for parent in parents if parent
1620
return revision_graph
1623
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
1624
stop_revision=None):
1625
"""Extend the partial history to include a given index
1627
If a stop_index is supplied, stop when that index has been reached.
1628
If a stop_revision is supplied, stop when that revision is
1629
encountered. Otherwise, stop when the beginning of history is
1632
:param stop_index: The index which should be present. When it is
1633
present, history extension will stop.
1634
:param stop_revision: The revision id which should be present. When
1635
it is encountered, history extension will stop.
1637
start_revision = partial_history_cache[-1]
1638
graph = repo.get_graph()
1639
iterator = graph.iter_lefthand_ancestry(start_revision,
1640
(_mod_revision.NULL_REVISION,))
1642
# skip the last revision in the list
1645
if (stop_index is not None and
1646
len(partial_history_cache) > stop_index):
1648
if partial_history_cache[-1] == stop_revision:
1650
revision_id = next(iterator)
1651
partial_history_cache.append(revision_id)
1652
except StopIteration:
1657
class _LazyListJoin(object):
1658
"""An iterable yielding the contents of many lists as one list.
1660
Each iterator made from this will reflect the current contents of the lists
1661
at the time the iterator is made.
1663
This is used by Repository's _make_parents_provider implementation so that
1666
pp = repo._make_parents_provider() # uses a list of fallback repos
1667
pp.add_fallback_repository(other_repo) # appends to that list
1668
result = pp.get_parent_map(...)
1669
# The result will include revs from other_repo
1672
def __init__(self, *list_parts):
1673
self.list_parts = list_parts
1677
for list_part in self.list_parts:
1678
full_list.extend(list_part)
1679
return iter(full_list)
1682
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,