1
# Copyright (C) 2005-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from __future__ import absolute_import
19
from .lazy_import import lazy_import
20
lazy_import(globals(), """
29
revision as _mod_revision,
32
from breezy.i18n import gettext
40
from .decorators import only_raises
41
from .inter import InterObject
42
from .lock import _RelockDebugMixin, LogicalLockResult
44
log_exception_quietly, note, mutter, mutter_callsite, warning)
47
# Old formats display a warning, but only once
48
_deprecation_warning_done = False
51
class IsInWriteGroupError(errors.InternalBzrError):
53
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
55
def __init__(self, repo):
56
errors.InternalBzrError.__init__(self, repo=repo)
59
class CannotSetRevisionId(errors.BzrError):
61
_fmt = "Repository format does not support setting revision ids."
64
class FetchResult(object):
65
"""Result of a fetch operation.
67
:ivar revidmap: For lossy fetches, map from source revid to target revid.
68
:ivar total_fetched: Number of revisions fetched
71
def __init__(self, total_fetched=None, revidmap=None):
72
self.total_fetched = total_fetched
73
self.revidmap = revidmap
76
class CommitBuilder(object):
77
"""Provides an interface to build up a commit.
79
This allows describing a tree to be committed without needing to
80
know the internals of the format of the repository.
83
# all clients should supply tree roots.
84
record_root_entry = True
85
# whether this commit builder will automatically update the branch that is
87
updates_branch = False
89
def __init__(self, repository, parents, config_stack, timestamp=None,
90
timezone=None, committer=None, revprops=None,
91
revision_id=None, lossy=False):
92
"""Initiate a CommitBuilder.
94
:param repository: Repository to commit to.
95
:param parents: Revision ids of the parents of the new revision.
96
:param timestamp: Optional timestamp recorded for commit.
97
:param timezone: Optional timezone for timestamp.
98
:param committer: Optional committer to set for commit.
99
:param revprops: Optional dictionary of revision properties.
100
:param revision_id: Optional revision id.
101
:param lossy: Whether to discard data that can not be natively
102
represented, when pushing to a foreign VCS
104
self._config_stack = config_stack
107
if committer is None:
108
self._committer = self._config_stack.get('email')
109
elif not isinstance(committer, str):
110
self._committer = committer.decode() # throw if non-ascii
112
self._committer = committer
114
self.parents = parents
115
self.repository = repository
118
if revprops is not None:
119
self._validate_revprops(revprops)
120
self._revprops.update(revprops)
122
if timestamp is None:
123
timestamp = time.time()
124
# Restrict resolution to 1ms
125
self._timestamp = round(timestamp, 3)
128
self._timezone = osutils.local_time_offset()
130
self._timezone = int(timezone)
132
self._generate_revision_if_needed(revision_id)
134
def any_changes(self):
135
"""Return True if any entries were changed.
137
This includes merge-only changes. It is the core for the --unchanged
140
:return: True if any changes have occured.
142
raise NotImplementedError(self.any_changes)
144
def _validate_unicode_text(self, text, context):
145
"""Verify things like commit messages don't have bogus characters."""
146
# TODO(jelmer): Make this repository-format specific
148
raise ValueError('Invalid value for %s: %r' % (context, text))
150
def _validate_revprops(self, revprops):
151
for key, value in revprops.items():
152
# We know that the XML serializers do not round trip '\r'
153
# correctly, so refuse to accept them
154
if not isinstance(value, str):
155
raise ValueError('revision property (%s) is not a valid'
156
' (unicode) string: %r' % (key, value))
157
# TODO(jelmer): Make this repository-format specific
158
self._validate_unicode_text(value,
159
'revision property (%s)' % (key,))
161
def commit(self, message):
162
"""Make the actual commit.
164
:return: The revision id of the recorded revision.
166
raise NotImplementedError(self.commit)
169
"""Abort the commit that is being built.
171
raise NotImplementedError(self.abort)
173
def revision_tree(self):
174
"""Return the tree that was just committed.
176
After calling commit() this can be called to get a
177
RevisionTree representing the newly committed tree. This is
178
preferred to calling Repository.revision_tree() because that may
179
require deserializing the inventory, while we already have a copy in
182
raise NotImplementedError(self.revision_tree)
184
def finish_inventory(self):
185
"""Tell the builder that the inventory is finished.
187
:return: The inventory id in the repository, which can be used with
188
repository.get_inventory.
190
raise NotImplementedError(self.finish_inventory)
192
def _generate_revision_if_needed(self, revision_id):
193
"""Create a revision id if None was supplied.
195
If the repository can not support user-specified revision ids
196
they should override this function and raise CannotSetRevisionId
197
if _new_revision_id is not None.
199
:raises: CannotSetRevisionId
201
if not self.repository._format.supports_setting_revision_ids:
202
if revision_id is not None:
203
raise CannotSetRevisionId()
205
if revision_id is None:
206
self._new_revision_id = self._gen_revision_id()
207
self.random_revid = True
209
self._new_revision_id = revision_id
210
self.random_revid = False
212
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
213
"""Record a new tree via iter_changes.
215
:param tree: The tree to obtain text contents from for changed objects.
216
:param basis_revision_id: The revision id of the tree the iter_changes
217
has been generated against. Currently assumed to be the same
218
as self.parents[0] - if it is not, errors may occur.
219
:param iter_changes: An iter_changes iterator with the changes to apply
220
to basis_revision_id. The iterator must not include any items with
221
a current kind of None - missing items must be either filtered out
222
or errored-on beefore record_iter_changes sees the item.
223
:return: A generator of (relpath, fs_hash) tuples for use with
226
raise NotImplementedError(self.record_iter_changes)
229
class RepositoryWriteLockResult(LogicalLockResult):
230
"""The result of write locking a repository.
232
:ivar repository_token: The token obtained from the underlying lock, or
234
:ivar unlock: A callable which will unlock the lock.
237
def __init__(self, unlock, repository_token):
238
LogicalLockResult.__init__(self, unlock)
239
self.repository_token = repository_token
242
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
246
class WriteGroup(object):
247
"""Context manager that manages a write group.
249
Raising an exception will result in the write group being aborted.
252
def __init__(self, repository, suppress_errors=False):
253
self.repository = repository
254
self._suppress_errors = suppress_errors
257
self.repository.start_write_group()
260
def __exit__(self, exc_type, exc_val, exc_tb):
262
self.repository.abort_write_group(self._suppress_errors)
265
self.repository.commit_write_group()
268
######################################################################
272
class Repository(controldir.ControlComponent, _RelockDebugMixin):
273
"""Repository holding history for one or more branches.
275
The repository holds and retrieves historical information including
276
revisions and file history. It's normally accessed only by the Branch,
277
which views a particular line of development through that history.
279
See VersionedFileRepository in breezy.vf_repository for the
280
base class for most Bazaar repositories.
283
# Does this repository implementation support random access to
284
# items in the tree, or just bulk fetching/pushing of data?
285
supports_random_access = True
287
def abort_write_group(self, suppress_errors=False):
288
"""Commit the contents accrued within the current write group.
290
:param suppress_errors: if true, abort_write_group will catch and log
291
unexpected errors that happen during the abort, rather than
292
allowing them to propagate. Defaults to False.
294
:seealso: start_write_group.
296
if self._write_group is not self.get_transaction():
297
# has an unlock or relock occured ?
300
'(suppressed) mismatched lock context and write group. %r, %r',
301
self._write_group, self.get_transaction())
303
raise errors.BzrError(
304
'mismatched lock context and write group. %r, %r' %
305
(self._write_group, self.get_transaction()))
307
self._abort_write_group()
308
except Exception as exc:
309
self._write_group = None
310
if not suppress_errors:
312
mutter('abort_write_group failed')
313
log_exception_quietly()
314
note(gettext('brz: ERROR (ignored): %s'), exc)
315
self._write_group = None
317
def _abort_write_group(self):
318
"""Template method for per-repository write group cleanup.
320
This is called during abort before the write group is considered to be
321
finished and should cleanup any internal state accrued during the write
322
group. There is no requirement that data handed to the repository be
323
*not* made available - this is not a rollback - but neither should any
324
attempt be made to ensure that data added is fully commited. Abort is
325
invoked when an error has occured so futher disk or network operations
326
may not be possible or may error and if possible should not be
330
def add_fallback_repository(self, repository):
331
"""Add a repository to use for looking up data not held locally.
333
:param repository: A repository.
335
raise NotImplementedError(self.add_fallback_repository)
337
def _check_fallback_repository(self, repository):
338
"""Check that this repository can fallback to repository safely.
340
Raise an error if not.
342
:param repository: A repository to fallback to.
344
return InterRepository._assert_same_model(self, repository)
346
def all_revision_ids(self):
347
"""Returns a list of all the revision ids in the repository.
349
This is conceptually deprecated because code should generally work on
350
the graph reachable from a particular revision, and ignore any other
351
revisions that might be present. There is no direct replacement
354
if 'evil' in debug.debug_flags:
355
mutter_callsite(2, "all_revision_ids is linear with history.")
356
return self._all_revision_ids()
358
def _all_revision_ids(self):
359
"""Returns a list of all the revision ids in the repository.
361
These are in as much topological order as the underlying store can
364
raise NotImplementedError(self._all_revision_ids)
366
def break_lock(self):
367
"""Break a lock if one is present from another instance.
369
Uses the ui factory to ask for confirmation if the lock may be from
372
self.control_files.break_lock()
375
def create(controldir):
376
"""Construct the current default format repository in controldir."""
377
return RepositoryFormat.get_default_format().initialize(controldir)
379
def __init__(self, _format, controldir, control_files):
380
"""instantiate a Repository.
382
:param _format: The format of the repository on disk.
383
:param controldir: The ControlDir of the repository.
384
:param control_files: Control files to use for locking, etc.
386
# In the future we will have a single api for all stores for
387
# getting file texts, inventories and revisions, then
388
# this construct will accept instances of those things.
389
super(Repository, self).__init__()
390
self._format = _format
391
# the following are part of the public API for Repository:
392
self.controldir = controldir
393
self.control_files = control_files
395
self._write_group = None
396
# Additional places to query for data.
397
self._fallback_repositories = []
400
def user_transport(self):
401
return self.controldir.user_transport
404
def control_transport(self):
405
return self._transport
408
if self._fallback_repositories:
409
return '%s(%r, fallback_repositories=%r)' % (
410
self.__class__.__name__,
412
self._fallback_repositories)
414
return '%s(%r)' % (self.__class__.__name__,
417
def _has_same_fallbacks(self, other_repo):
418
"""Returns true if the repositories have the same fallbacks."""
419
my_fb = self._fallback_repositories
420
other_fb = other_repo._fallback_repositories
421
if len(my_fb) != len(other_fb):
423
for f, g in zip(my_fb, other_fb):
424
if not f.has_same_location(g):
428
def has_same_location(self, other):
429
"""Returns a boolean indicating if this repository is at the same
430
location as another repository.
432
This might return False even when two repository objects are accessing
433
the same physical repository via different URLs.
435
if self.__class__ is not other.__class__:
437
return (self.control_url == other.control_url)
439
def is_in_write_group(self):
440
"""Return True if there is an open write group.
442
:seealso: start_write_group.
444
return self._write_group is not None
447
return self.control_files.is_locked()
449
def is_write_locked(self):
450
"""Return True if this object is write locked."""
451
return self.is_locked() and self.control_files._lock_mode == 'w'
453
def lock_write(self, token=None):
454
"""Lock this repository for writing.
456
This causes caching within the repository obejct to start accumlating
457
data during reads, and allows a 'write_group' to be obtained. Write
458
groups must be used for actual data insertion.
460
A token should be passed in if you know that you have locked the object
461
some other way, and need to synchronise this object's state with that
464
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
466
:param token: if this is already locked, then lock_write will fail
467
unless the token matches the existing lock.
468
:returns: a token if this instance supports tokens, otherwise None.
469
:raises TokenLockingNotSupported: when a token is given but this
470
instance doesn't support using token locks.
471
:raises MismatchedToken: if the specified token doesn't match the token
472
of the existing lock.
473
:seealso: start_write_group.
474
:return: A RepositoryWriteLockResult.
476
locked = self.is_locked()
477
token = self.control_files.lock_write(token=token)
479
self._warn_if_deprecated()
481
for repo in self._fallback_repositories:
482
# Writes don't affect fallback repos
485
return RepositoryWriteLockResult(self.unlock, token)
488
"""Lock the repository for read operations.
490
:return: An object with an unlock method which will release the lock
493
locked = self.is_locked()
494
self.control_files.lock_read()
496
self._warn_if_deprecated()
498
for repo in self._fallback_repositories:
501
return LogicalLockResult(self.unlock)
503
def get_physical_lock_status(self):
504
return self.control_files.get_physical_lock_status()
506
def leave_lock_in_place(self):
507
"""Tell this repository not to release the physical lock when this
510
If lock_write doesn't return a token, then this method is not supported.
512
self.control_files.leave_in_place()
514
def dont_leave_lock_in_place(self):
515
"""Tell this repository to release the physical lock when this
516
object is unlocked, even if it didn't originally acquire it.
518
If lock_write doesn't return a token, then this method is not supported.
520
self.control_files.dont_leave_in_place()
522
def gather_stats(self, revid=None, committers=None):
523
"""Gather statistics from a revision id.
525
:param revid: The revision id to gather statistics from, if None, then
526
no revision specific statistics are gathered.
527
:param committers: Optional parameter controlling whether to grab
528
a count of committers from the revision specific statistics.
529
:return: A dictionary of statistics. Currently this contains:
530
committers: The number of committers if requested.
531
firstrev: A tuple with timestamp, timezone for the penultimate left
532
most ancestor of revid, if revid is not the NULL_REVISION.
533
latestrev: A tuple with timestamp, timezone for revid, if revid is
534
not the NULL_REVISION.
535
revisions: The total revision count in the repository.
536
size: An estimate disk size of the repository in bytes.
538
with self.lock_read():
540
if revid and committers:
541
result['committers'] = 0
542
if revid and revid != _mod_revision.NULL_REVISION:
543
graph = self.get_graph()
545
all_committers = set()
546
revisions = [r for (r, p) in graph.iter_ancestry([revid])
547
if r != _mod_revision.NULL_REVISION]
550
# ignore the revisions in the middle - just grab first and last
551
revisions = revisions[0], revisions[-1]
552
for revision in self.get_revisions(revisions):
553
if not last_revision:
554
last_revision = revision
556
all_committers.add(revision.committer)
557
first_revision = revision
559
result['committers'] = len(all_committers)
560
result['firstrev'] = (first_revision.timestamp,
561
first_revision.timezone)
562
result['latestrev'] = (last_revision.timestamp,
563
last_revision.timezone)
566
def find_branches(self, using=False):
567
"""Find branches underneath this repository.
569
This will include branches inside other branches.
571
:param using: If True, list only branches using this repository.
573
if using and not self.is_shared():
574
for branch in self.controldir.list_branches():
578
class Evaluator(object):
581
self.first_call = True
583
def __call__(self, controldir):
584
# On the first call, the parameter is always the controldir
585
# containing the current repo.
586
if not self.first_call:
588
repository = controldir.open_repository()
589
except errors.NoRepositoryPresent:
592
return False, ([], repository)
593
self.first_call = False
594
value = (controldir.list_branches(), None)
597
for branches, repository in controldir.ControlDir.find_controldirs(
598
self.user_transport, evaluate=Evaluator()):
599
if branches is not None:
600
for branch in branches:
602
if not using and repository is not None:
603
for branch in repository.find_branches():
606
def search_missing_revision_ids(self, other,
607
find_ghosts=True, revision_ids=None, if_present_ids=None,
609
"""Return the revision ids that other has that this does not.
611
These are returned in topological order.
613
revision_ids: only return revision ids included by revision_id.
615
with self.lock_read():
616
return InterRepository.get(other, self).search_missing_revision_ids(
617
find_ghosts=find_ghosts, revision_ids=revision_ids,
618
if_present_ids=if_present_ids, limit=limit)
622
"""Open the repository rooted at base.
624
For instance, if the repository is at URL/.bzr/repository,
625
Repository.open(URL) -> a Repository instance.
627
control = controldir.ControlDir.open(base)
628
return control.open_repository()
630
def copy_content_into(self, destination, revision_id=None):
631
"""Make a complete copy of the content in self into destination.
633
This is a destructive operation! Do not use it on existing
636
return InterRepository.get(self, destination).copy_content(revision_id)
638
def commit_write_group(self):
639
"""Commit the contents accrued within the current write group.
641
:seealso: start_write_group.
643
:return: it may return an opaque hint that can be passed to 'pack'.
645
if self._write_group is not self.get_transaction():
646
# has an unlock or relock occured ?
647
raise errors.BzrError('mismatched lock context %r and '
649
(self.get_transaction(), self._write_group))
650
result = self._commit_write_group()
651
self._write_group = None
654
def _commit_write_group(self):
655
"""Template method for per-repository write group cleanup.
657
This is called before the write group is considered to be
658
finished and should ensure that all data handed to the repository
659
for writing during the write group is safely committed (to the
660
extent possible considering file system caching etc).
663
def suspend_write_group(self):
664
"""Suspend a write group.
666
:raise UnsuspendableWriteGroup: If the write group can not be
668
:return: List of tokens
670
raise errors.UnsuspendableWriteGroup(self)
672
def refresh_data(self):
673
"""Re-read any data needed to synchronise with disk.
675
This method is intended to be called after another repository instance
676
(such as one used by a smart server) has inserted data into the
677
repository. On all repositories this will work outside of write groups.
678
Some repository formats (pack and newer for breezy native formats)
679
support refresh_data inside write groups. If called inside a write
680
group on a repository that does not support refreshing in a write group
681
IsInWriteGroupError will be raised.
685
def resume_write_group(self, tokens):
686
if not self.is_write_locked():
687
raise errors.NotWriteLocked(self)
688
if self._write_group:
689
raise errors.BzrError('already in a write group')
690
self._resume_write_group(tokens)
691
# so we can detect unlock/relock - the write group is now entered.
692
self._write_group = self.get_transaction()
694
def _resume_write_group(self, tokens):
695
raise errors.UnsuspendableWriteGroup(self)
697
def fetch(self, source, revision_id=None, find_ghosts=False, lossy=False):
698
"""Fetch the content required to construct revision_id from source.
700
If revision_id is None, then all content is copied.
702
fetch() may not be used when the repository is in a write group -
703
either finish the current write group before using fetch, or use
704
fetch before starting the write group.
706
:param find_ghosts: Find and copy revisions in the source that are
707
ghosts in the target (and not reachable directly by walking out to
708
the first-present revision in target from revision_id).
709
:param revision_id: If specified, all the content needed for this
710
revision ID will be copied to the target. Fetch will determine for
711
itself which content needs to be copied.
712
:return: A FetchResult object
714
if self.is_in_write_group():
715
raise errors.InternalBzrError(
716
"May not fetch while in a write group.")
717
# fast path same-url fetch operations
718
# TODO: lift out to somewhere common with RemoteRepository
719
# <https://bugs.launchpad.net/bzr/+bug/401646>
720
if (self.has_same_location(source)
721
and self._has_same_fallbacks(source)):
722
# check that last_revision is in 'from' and then return a
724
if (revision_id is not None and
725
not _mod_revision.is_null(revision_id)):
726
self.get_revision(revision_id)
728
inter = InterRepository.get(source, self)
730
revision_id=revision_id, find_ghosts=find_ghosts, lossy=lossy)
732
def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
733
timezone=None, committer=None, revprops=None,
734
revision_id=None, lossy=False):
735
"""Obtain a CommitBuilder for this repository.
737
:param branch: Branch to commit to.
738
:param parents: Revision ids of the parents of the new revision.
739
:param config_stack: Configuration stack to use.
740
:param timestamp: Optional timestamp recorded for commit.
741
:param timezone: Optional timezone for timestamp.
742
:param committer: Optional committer to set for commit.
743
:param revprops: Optional dictionary of revision properties.
744
:param revision_id: Optional revision id.
745
:param lossy: Whether to discard data that can not be natively
746
represented, when pushing to a foreign VCS
748
raise NotImplementedError(self.get_commit_builder)
750
@only_raises(errors.LockNotHeld, errors.LockBroken)
752
if (self.control_files._lock_count == 1 and
753
self.control_files._lock_mode == 'w'):
754
if self._write_group is not None:
755
self.abort_write_group()
756
self.control_files.unlock()
757
raise errors.BzrError(
758
'Must end write groups before releasing write locks.')
759
self.control_files.unlock()
760
if self.control_files._lock_count == 0:
761
for repo in self._fallback_repositories:
764
def clone(self, controldir, revision_id=None):
765
"""Clone this repository into controldir using the current format.
767
Currently no check is made that the format of this repository and
768
the bzrdir format are compatible. FIXME RBC 20060201.
770
:return: The newly created destination repository.
772
with self.lock_read():
773
# TODO: deprecate after 0.16; cloning this with all its settings is
774
# probably not very useful -- mbp 20070423
775
dest_repo = self._create_sprouting_repo(
776
controldir, shared=self.is_shared())
777
self.copy_content_into(dest_repo, revision_id)
780
def start_write_group(self):
781
"""Start a write group in the repository.
783
Write groups are used by repositories which do not have a 1:1 mapping
784
between file ids and backend store to manage the insertion of data from
785
both fetch and commit operations.
787
A write lock is required around the
788
start_write_group/commit_write_group for the support of lock-requiring
791
One can only insert data into a repository inside a write group.
795
if not self.is_write_locked():
796
raise errors.NotWriteLocked(self)
797
if self._write_group:
798
raise errors.BzrError('already in a write group')
799
self._start_write_group()
800
# so we can detect unlock/relock - the write group is now entered.
801
self._write_group = self.get_transaction()
803
def _start_write_group(self):
804
"""Template method for per-repository write group startup.
806
This is called before the write group is considered to be
810
def sprout(self, to_bzrdir, revision_id=None):
811
"""Create a descendent repository for new development.
813
Unlike clone, this does not copy the settings of the repository.
815
with self.lock_read():
816
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
817
dest_repo.fetch(self, revision_id=revision_id)
820
def _create_sprouting_repo(self, a_controldir, shared):
822
a_controldir._format, self.controldir._format.__class__):
823
# use target default format.
824
dest_repo = a_controldir.create_repository()
826
# Most control formats need the repository to be specifically
827
# created, but on some old all-in-one formats it's not needed
829
dest_repo = self._format.initialize(
830
a_controldir, shared=shared)
831
except errors.UninitializableFormat:
832
dest_repo = a_controldir.open_repository()
835
def has_revision(self, revision_id):
836
"""True if this repository has a copy of the revision."""
837
with self.lock_read():
838
return revision_id in self.has_revisions((revision_id,))
840
def has_revisions(self, revision_ids):
841
"""Probe to find out the presence of multiple revisions.
843
:param revision_ids: An iterable of revision_ids.
844
:return: A set of the revision_ids that were present.
846
raise NotImplementedError(self.has_revisions)
848
def get_revision(self, revision_id):
849
"""Return the Revision object for a named revision."""
850
with self.lock_read():
851
return self.get_revisions([revision_id])[0]
853
def get_revision_reconcile(self, revision_id):
854
"""'reconcile' helper routine that allows access to a revision always.
856
This variant of get_revision does not cross check the weave graph
857
against the revision one as get_revision does: but it should only
858
be used by reconcile, or reconcile-alike commands that are correcting
859
or testing the revision graph.
861
raise NotImplementedError(self.get_revision_reconcile)
863
def get_revisions(self, revision_ids):
864
"""Get many revisions at once.
866
Repositories that need to check data on every revision read should
867
subclass this method.
870
for revid, rev in self.iter_revisions(revision_ids):
872
raise errors.NoSuchRevision(self, revid)
874
return [revs[revid] for revid in revision_ids]
876
def iter_revisions(self, revision_ids):
877
"""Iterate over revision objects.
879
:param revision_ids: An iterable of revisions to examine. None may be
880
passed to request all revisions known to the repository. Note that
881
not all repositories can find unreferenced revisions; for those
882
repositories only referenced ones will be returned.
883
:return: An iterator of (revid, revision) tuples. Absent revisions (
884
those asked for but not available) are returned as (revid, None).
885
N.B.: Revisions are not necessarily yielded in order.
887
raise NotImplementedError(self.iter_revisions)
889
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
890
"""Produce a generator of revision deltas.
892
Note that the input is a sequence of REVISIONS, not revision_ids.
893
Trees will be held in memory until the generator exits.
894
Each delta is relative to the revision's lefthand predecessor.
896
:param specific_fileids: if not None, the result is filtered
897
so that only those file-ids, their parents and their
898
children are included.
900
raise NotImplementedError(self.get_deltas_for_revisions)
902
def get_revision_delta(self, revision_id):
903
"""Return the delta for one revision.
905
The delta is relative to the left-hand predecessor of the
908
with self.lock_read():
909
r = self.get_revision(revision_id)
910
return list(self.get_deltas_for_revisions([r]))[0]
912
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
913
raise NotImplementedError(self.store_revision_signature)
915
def add_signature_text(self, revision_id, signature):
916
"""Store a signature text for a revision.
918
:param revision_id: Revision id of the revision
919
:param signature: Signature text.
921
raise NotImplementedError(self.add_signature_text)
923
def iter_files_bytes(self, desired_files):
924
"""Iterate through file versions.
926
Files will not necessarily be returned in the order they occur in
927
desired_files. No specific order is guaranteed.
929
Yields pairs of identifier, bytes_iterator. identifier is an opaque
930
value supplied by the caller as part of desired_files. It should
931
uniquely identify the file version in the caller's context. (Examples:
932
an index number or a TreeTransform trans_id.)
934
:param desired_files: a list of (file_id, revision_id, identifier)
937
raise NotImplementedError(self.iter_files_bytes)
939
def get_rev_id_for_revno(self, revno, known_pair):
940
"""Return the revision id of a revno, given a later (revno, revid)
941
pair in the same history.
943
:return: if found (True, revid). If the available history ran out
944
before reaching the revno, then this returns
945
(False, (closest_revno, closest_revid)).
947
known_revno, known_revid = known_pair
948
partial_history = [known_revid]
949
distance_from_known = known_revno - revno
950
if distance_from_known < 0:
951
raise errors.RevnoOutOfBounds(revno, (0, known_revno))
954
self, partial_history, stop_index=distance_from_known)
955
except errors.RevisionNotPresent as err:
956
if err.revision_id == known_revid:
957
# The start revision (known_revid) wasn't found.
958
raise errors.NoSuchRevision(self, known_revid)
959
# This is a stacked repository with no fallbacks, or a there's a
960
# left-hand ghost. Either way, even though the revision named in
961
# the error isn't in this repo, we know it's the next step in this
963
partial_history.append(err.revision_id)
964
if len(partial_history) <= distance_from_known:
965
# Didn't find enough history to get a revid for the revno.
966
earliest_revno = known_revno - len(partial_history) + 1
967
return (False, (earliest_revno, partial_history[-1]))
968
if len(partial_history) - 1 > distance_from_known:
969
raise AssertionError('_iter_for_revno returned too much history')
970
return (True, partial_history[-1])
973
"""Return True if this repository is flagged as a shared repository."""
974
raise NotImplementedError(self.is_shared)
976
def reconcile(self, other=None, thorough=False):
977
"""Reconcile this repository."""
978
raise NotImplementedError(self.reconcile)
980
def _refresh_data(self):
981
"""Helper called from lock_* to ensure coherency with disk.
983
The default implementation does nothing; it is however possible
984
for repositories to maintain loaded indices across multiple locks
985
by checking inside their implementation of this method to see
986
whether their indices are still valid. This depends of course on
987
the disk format being validatable in this manner. This method is
988
also called by the refresh_data() public interface to cause a refresh
989
to occur while in a write lock so that data inserted by a smart server
990
push operation is visible on the client's instance of the physical
994
def revision_tree(self, revision_id):
995
"""Return Tree for a revision on this branch.
997
`revision_id` may be NULL_REVISION for the empty tree revision.
999
raise NotImplementedError(self.revision_tree)
1001
def revision_trees(self, revision_ids):
1002
"""Return Trees for revisions in this repository.
1004
:param revision_ids: a sequence of revision-ids;
1005
a revision-id may not be None or b'null:'
1007
raise NotImplementedError(self.revision_trees)
1009
def pack(self, hint=None, clean_obsolete_packs=False):
1010
"""Compress the data within the repository.
1012
This operation only makes sense for some repository types. For other
1013
types it should be a no-op that just returns.
1015
This stub method does not require a lock, but subclasses should use
1016
self.write_lock as this is a long running call it's reasonable to
1017
implicitly lock for the user.
1019
:param hint: If not supplied, the whole repository is packed.
1020
If supplied, the repository may use the hint parameter as a
1021
hint for the parts of the repository to pack. A hint can be
1022
obtained from the result of commit_write_group(). Out of
1023
date hints are simply ignored, because concurrent operations
1024
can obsolete them rapidly.
1026
:param clean_obsolete_packs: Clean obsolete packs immediately after
1030
def get_transaction(self):
1031
return self.control_files.get_transaction()
1033
def get_parent_map(self, revision_ids):
1034
"""See graph.StackedParentsProvider.get_parent_map"""
1035
raise NotImplementedError(self.get_parent_map)
1037
def _get_parent_map_no_fallbacks(self, revision_ids):
1038
"""Same as Repository.get_parent_map except doesn't query fallbacks."""
1039
# revisions index works in keys; this just works in revisions
1040
# therefore wrap and unwrap
1043
for revision_id in revision_ids:
1044
if revision_id == _mod_revision.NULL_REVISION:
1045
result[revision_id] = ()
1046
elif revision_id is None:
1047
raise ValueError('get_parent_map(None) is not valid')
1049
query_keys.append((revision_id,))
1050
vf = self.revisions.without_fallbacks()
1051
for (revision_id,), parent_keys in (
1052
vf.get_parent_map(query_keys).items()):
1054
result[revision_id] = tuple([parent_revid
1055
for (parent_revid,) in parent_keys])
1057
result[revision_id] = (_mod_revision.NULL_REVISION,)
1060
def _make_parents_provider(self):
1061
if not self._format.supports_external_lookups:
1063
return graph.StackedParentsProvider(_LazyListJoin(
1064
[self._make_parents_provider_unstacked()],
1065
self._fallback_repositories))
1067
def _make_parents_provider_unstacked(self):
1068
return graph.CallableToParentsProviderAdapter(
1069
self._get_parent_map_no_fallbacks)
1071
def get_known_graph_ancestry(self, revision_ids):
1072
"""Return the known graph for a set of revision ids and their ancestors.
1074
raise NotImplementedError(self.get_known_graph_ancestry)
1076
def get_file_graph(self):
1077
"""Return the graph walker for files."""
1078
raise NotImplementedError(self.get_file_graph)
1080
def get_graph(self, other_repository=None):
1081
"""Return the graph walker for this repository format"""
1082
parents_provider = self._make_parents_provider()
1083
if (other_repository is not None and
1084
not self.has_same_location(other_repository)):
1085
parents_provider = graph.StackedParentsProvider(
1086
[parents_provider, other_repository._make_parents_provider()])
1087
return graph.Graph(parents_provider)
1089
def set_make_working_trees(self, new_value):
1090
"""Set the policy flag for making working trees when creating branches.
1092
This only applies to branches that use this repository.
1094
The default is 'True'.
1095
:param new_value: True to restore the default, False to disable making
1098
raise NotImplementedError(self.set_make_working_trees)
1100
def make_working_trees(self):
1101
"""Returns the policy for making working trees on new branches."""
1102
raise NotImplementedError(self.make_working_trees)
1104
def sign_revision(self, revision_id, gpg_strategy):
1105
raise NotImplementedError(self.sign_revision)
1107
def verify_revision_signature(self, revision_id, gpg_strategy):
1108
"""Verify the signature on a revision.
1110
:param revision_id: the revision to verify
1111
:gpg_strategy: the GPGStrategy object to used
1113
:return: gpg.SIGNATURE_VALID or a failed SIGNATURE_ value
1115
raise NotImplementedError(self.verify_revision_signature)
1117
def verify_revision_signatures(self, revision_ids, gpg_strategy):
1118
"""Verify revision signatures for a number of revisions.
1120
:param revision_id: the revision to verify
1121
:gpg_strategy: the GPGStrategy object to used
1122
:return: Iterator over tuples with revision id, result and keys
1124
with self.lock_read():
1125
for revid in revision_ids:
1126
(result, key) = self.verify_revision_signature(revid, gpg_strategy)
1127
yield revid, result, key
1129
def has_signature_for_revision_id(self, revision_id):
1130
"""Query for a revision signature for revision_id in the repository."""
1131
raise NotImplementedError(self.has_signature_for_revision_id)
1133
def get_signature_text(self, revision_id):
1134
"""Return the text for a signature."""
1135
raise NotImplementedError(self.get_signature_text)
1137
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
1138
"""Check consistency of all history of given revision_ids.
1140
Different repository implementations should override _check().
1142
:param revision_ids: A non-empty list of revision_ids whose ancestry
1143
will be checked. Typically the last revision_id of a branch.
1144
:param callback_refs: A dict of check-refs to resolve and callback
1145
the check/_check method on the items listed as wanting the ref.
1147
:param check_repo: If False do not check the repository contents, just
1148
calculate the data callback_refs requires and call them back.
1150
return self._check(revision_ids=revision_ids, callback_refs=callback_refs,
1151
check_repo=check_repo)
1153
def _check(self, revision_ids=None, callback_refs=None, check_repo=True):
1154
raise NotImplementedError(self.check)
1156
def _warn_if_deprecated(self, branch=None):
1157
if not self._format.is_deprecated():
1159
global _deprecation_warning_done
1160
if _deprecation_warning_done:
1164
conf = config.GlobalStack()
1166
conf = branch.get_config_stack()
1167
if 'format_deprecation' in conf.get('suppress_warnings'):
1169
warning("Format %s for %s is deprecated -"
1170
" please use 'brz upgrade' to get better performance"
1171
% (self._format, self.controldir.transport.base))
1173
_deprecation_warning_done = True
1175
def supports_rich_root(self):
1176
return self._format.rich_root_data
1178
def _check_ascii_revisionid(self, revision_id, method):
1179
"""Private helper for ascii-only repositories."""
1180
# weave repositories refuse to store revisionids that are non-ascii.
1181
if revision_id is not None:
1182
# weaves require ascii revision ids.
1183
if isinstance(revision_id, str):
1185
revision_id.encode('ascii')
1186
except UnicodeEncodeError:
1187
raise errors.NonAsciiRevisionId(method, self)
1190
revision_id.decode('ascii')
1191
except UnicodeDecodeError:
1192
raise errors.NonAsciiRevisionId(method, self)
1195
class RepositoryFormatRegistry(controldir.ControlComponentFormatRegistry):
1196
"""Repository format registry."""
1198
def get_default(self):
1199
"""Return the current default format."""
1200
return controldir.format_registry.make_controldir('default').repository_format
1203
network_format_registry = registry.FormatRegistry()
1204
"""Registry of formats indexed by their network name.
1206
The network name for a repository format is an identifier that can be used when
1207
referring to formats with smart server operations. See
1208
RepositoryFormat.network_name() for more detail.
1212
format_registry = RepositoryFormatRegistry(network_format_registry)
1213
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
1215
This can contain either format instances themselves, or classes/factories that
1216
can be called to obtain one.
1220
#####################################################################
1221
# Repository Formats
1223
class RepositoryFormat(controldir.ControlComponentFormat):
1224
"""A repository format.
1226
Formats provide four things:
1227
* An initialization routine to construct repository data on disk.
1228
* a optional format string which is used when the BzrDir supports
1230
* an open routine which returns a Repository instance.
1231
* A network name for referring to the format in smart server RPC
1234
There is one and only one Format subclass for each on-disk format. But
1235
there can be one Repository subclass that is used for several different
1236
formats. The _format attribute on a Repository instance can be used to
1237
determine the disk format.
1239
Formats are placed in a registry by their format string for reference
1240
during opening. These should be subclasses of RepositoryFormat for
1243
Once a format is deprecated, just deprecate the initialize and open
1244
methods on the format class. Do not deprecate the object, as the
1245
object may be created even when a repository instance hasn't been
1248
Common instance attributes:
1249
_matchingcontroldir - the controldir format that the repository format was
1250
originally written to work with. This can be used if manually
1251
constructing a bzrdir and repository, or more commonly for test suite
1255
# Set to True or False in derived classes. True indicates that the format
1256
# supports ghosts gracefully.
1257
supports_ghosts = None
1258
# Can this repository be given external locations to lookup additional
1259
# data. Set to True or False in derived classes.
1260
supports_external_lookups = None
1261
# Does this format support CHK bytestring lookups. Set to True or False in
1263
supports_chks = None
1264
# Should fetch trigger a reconcile after the fetch? Only needed for
1265
# some repository formats that can suffer internal inconsistencies.
1266
_fetch_reconcile = False
1267
# Does this format have < O(tree_size) delta generation. Used to hint what
1268
# code path for commit, amongst other things.
1270
# Does doing a pack operation compress data? Useful for the pack UI command
1271
# (so if there is one pack, the operation can still proceed because it may
1272
# help), and for fetching when data won't have come from the same
1274
pack_compresses = False
1275
# Does the repository storage understand references to trees?
1276
supports_tree_reference = None
1277
# Is the format experimental ?
1278
experimental = False
1279
# Does this repository format escape funky characters, or does it create
1280
# files with similar names as the versioned files in its contents on disk
1282
supports_funky_characters = None
1283
# Does this repository format support leaving locks?
1284
supports_leaving_lock = None
1285
# Does this format support the full VersionedFiles interface?
1286
supports_full_versioned_files = None
1287
# Does this format support signing revision signatures?
1288
supports_revision_signatures = True
1289
# Can the revision graph have incorrect parents?
1290
revision_graph_can_have_wrong_parents = None
1291
# Does this format support setting revision ids?
1292
supports_setting_revision_ids = True
1293
# Does this format support rich root data?
1294
rich_root_data = None
1295
# Does this format support explicitly versioned directories?
1296
supports_versioned_directories = None
1297
# Can other repositories be nested into one of this format?
1298
supports_nesting_repositories = None
1299
# Is it possible for revisions to be present without being referenced
1301
supports_unreferenced_revisions = None
1302
# Does this format store the current Branch.nick in a revision when
1304
supports_storing_branch_nick = True
1305
# Does the format support overriding the transport to use
1306
supports_overriding_transport = True
1307
# Does the format support setting custom revision properties?
1308
supports_custom_revision_properties = True
1309
# Does the format record per-file revision metadata?
1310
records_per_file_revision = True
1313
return "%s()" % self.__class__.__name__
1315
def __eq__(self, other):
1316
# format objects are generally stateless
1317
return isinstance(other, self.__class__)
1319
def __ne__(self, other):
1320
return not self == other
1322
def get_format_description(self):
1323
"""Return the short description for this format."""
1324
raise NotImplementedError(self.get_format_description)
1326
def initialize(self, controldir, shared=False):
1327
"""Initialize a repository of this format in controldir.
1329
:param controldir: The controldir to put the new repository in it.
1330
:param shared: The repository should be initialized as a sharable one.
1331
:returns: The new repository object.
1333
This may raise UninitializableFormat if shared repository are not
1334
compatible the controldir.
1336
raise NotImplementedError(self.initialize)
1338
def is_supported(self):
1339
"""Is this format supported?
1341
Supported formats must be initializable and openable.
1342
Unsupported formats may not support initialization or committing or
1343
some other features depending on the reason for not being supported.
1347
def is_deprecated(self):
1348
"""Is this format deprecated?
1350
Deprecated formats may trigger a user-visible warning recommending
1351
the user to upgrade. They are still fully supported.
1355
def network_name(self):
1356
"""A simple byte string uniquely identifying this format for RPC calls.
1358
MetaDir repository formats use their disk format string to identify the
1359
repository over the wire. All in one formats such as bzr < 0.8, and
1360
foreign formats like svn/git and hg should use some marker which is
1361
unique and immutable.
1363
raise NotImplementedError(self.network_name)
1365
def check_conversion_target(self, target_format):
1366
if self.rich_root_data and not target_format.rich_root_data:
1367
raise errors.BadConversionTarget(
1368
'Does not support rich root data.', target_format,
1370
if (self.supports_tree_reference
1371
and not getattr(target_format, 'supports_tree_reference', False)):
1372
raise errors.BadConversionTarget(
1373
'Does not support nested trees', target_format,
1376
def open(self, controldir, _found=False):
1377
"""Return an instance of this format for a controldir.
1379
_found is a private parameter, do not use it.
1381
raise NotImplementedError(self.open)
1383
def _run_post_repo_init_hooks(self, repository, controldir, shared):
1384
from .controldir import ControlDir, RepoInitHookParams
1385
hooks = ControlDir.hooks['post_repo_init']
1388
params = RepoInitHookParams(repository, self, controldir, shared)
1393
# formats which have no format string are not discoverable or independently
1394
# creatable on disk, so are not registered in format_registry. They're
1395
# all in breezy.bzr.knitreponow. When an instance of one of these is
1396
# needed, it's constructed directly by the ControlDir. Non-native formats where
1397
# the repository is not separately opened are similar.
1399
format_registry.register_lazy(
1400
b'Bazaar-NG Knit Repository Format 1',
1401
'breezy.bzr.knitrepo',
1402
'RepositoryFormatKnit1',
1405
format_registry.register_lazy(
1406
b'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
1407
'breezy.bzr.knitrepo',
1408
'RepositoryFormatKnit3',
1411
format_registry.register_lazy(
1412
b'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
1413
'breezy.bzr.knitrepo',
1414
'RepositoryFormatKnit4',
1417
# Pack-based formats. There is one format for pre-subtrees, and one for
1418
# post-subtrees to allow ease of testing.
1419
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
1420
format_registry.register_lazy(
1421
b'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1422
'breezy.bzr.knitpack_repo',
1423
'RepositoryFormatKnitPack1',
1425
format_registry.register_lazy(
1426
b'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1427
'breezy.bzr.knitpack_repo',
1428
'RepositoryFormatKnitPack3',
1430
format_registry.register_lazy(
1431
b'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
1432
'breezy.bzr.knitpack_repo',
1433
'RepositoryFormatKnitPack4',
1435
format_registry.register_lazy(
1436
b'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
1437
'breezy.bzr.knitpack_repo',
1438
'RepositoryFormatKnitPack5',
1440
format_registry.register_lazy(
1441
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
1442
'breezy.bzr.knitpack_repo',
1443
'RepositoryFormatKnitPack5RichRoot',
1445
format_registry.register_lazy(
1446
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
1447
'breezy.bzr.knitpack_repo',
1448
'RepositoryFormatKnitPack5RichRootBroken',
1450
format_registry.register_lazy(
1451
b'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
1452
'breezy.bzr.knitpack_repo',
1453
'RepositoryFormatKnitPack6',
1455
format_registry.register_lazy(
1456
b'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
1457
'breezy.bzr.knitpack_repo',
1458
'RepositoryFormatKnitPack6RichRoot',
1460
format_registry.register_lazy(
1461
b'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1462
'breezy.bzr.groupcompress_repo',
1463
'RepositoryFormat2a',
1466
# Development formats.
1467
# Check their docstrings to see if/when they are obsolete.
1468
format_registry.register_lazy(
1469
(b"Bazaar development format 2 with subtree support "
1470
b"(needs bzr.dev from before 1.8)\n"),
1471
'breezy.bzr.knitpack_repo',
1472
'RepositoryFormatPackDevelopment2Subtree',
1474
format_registry.register_lazy(
1475
b'Bazaar development format 8\n',
1476
'breezy.bzr.groupcompress_repo',
1477
'RepositoryFormat2aSubtree',
1481
class InterRepository(InterObject):
1482
"""This class represents operations taking place between two repositories.
1484
Its instances have methods like copy_content and fetch, and contain
1485
references to the source and target repositories these operations can be
1488
Often we will provide convenience methods on 'repository' which carry out
1489
operations with another repository - they will always forward to
1490
InterRepository.get(other).method_name(parameters).
1494
"""The available optimised InterRepository types."""
1496
def copy_content(self, revision_id=None):
1497
"""Make a complete copy of the content in self into destination.
1499
This is a destructive operation! Do not use it on existing
1502
:param revision_id: Only copy the content needed to construct
1503
revision_id and its parents.
1505
with self.lock_write():
1507
self.target.set_make_working_trees(
1508
self.source.make_working_trees())
1509
except NotImplementedError:
1511
self.target.fetch(self.source, revision_id=revision_id)
1513
def fetch(self, revision_id=None, find_ghosts=False, lossy=False):
1514
"""Fetch the content required to construct revision_id.
1516
The content is copied from self.source to self.target.
1518
:param revision_id: if None all content is copied, if NULL_REVISION no
1520
:return: FetchResult
1522
raise NotImplementedError(self.fetch)
1524
def search_missing_revision_ids(
1525
self, find_ghosts=True, revision_ids=None, if_present_ids=None,
1527
"""Return the revision ids that source has that target does not.
1529
:param revision_ids: return revision ids included by these
1530
revision_ids. NoSuchRevision will be raised if any of these
1531
revisions are not present.
1532
:param if_present_ids: like revision_ids, but will not cause
1533
NoSuchRevision if any of these are absent, instead they will simply
1534
not be in the result. This is useful for e.g. finding revisions
1535
to fetch for tags, which may reference absent revisions.
1536
:param find_ghosts: If True find missing revisions in deep history
1537
rather than just finding the surface difference.
1538
:param limit: Maximum number of revisions to return, topologically
1540
:return: A breezy.graph.SearchResult.
1542
raise NotImplementedError(self.search_missing_revision_ids)
1545
def _same_model(source, target):
1546
"""True if source and target have the same data representation.
1548
Note: this is always called on the base class; overriding it in a
1549
subclass will have no effect.
1552
InterRepository._assert_same_model(source, target)
1554
except errors.IncompatibleRepositories as e:
1558
def _assert_same_model(source, target):
1559
"""Raise an exception if two repositories do not use the same model.
1561
if source.supports_rich_root() != target.supports_rich_root():
1562
raise errors.IncompatibleRepositories(source, target,
1563
"different rich-root support")
1564
if source._serializer != target._serializer:
1565
raise errors.IncompatibleRepositories(source, target,
1566
"different serializers")
1569
class CopyConverter(object):
1570
"""A repository conversion tool which just performs a copy of the content.
1572
This is slow but quite reliable.
1575
def __init__(self, target_format):
1576
"""Create a CopyConverter.
1578
:param target_format: The format the resulting repository should be.
1580
self.target_format = target_format
1582
def convert(self, repo, pb):
1583
"""Perform the conversion of to_convert, giving feedback via pb.
1585
:param to_convert: The disk object to convert.
1586
:param pb: a progress bar to use for progress information.
1588
with ui.ui_factory.nested_progress_bar() as pb:
1591
# this is only useful with metadir layouts - separated repo content.
1592
# trigger an assertion if not such
1593
repo._format.get_format_string()
1594
self.repo_dir = repo.controldir
1595
pb.update(gettext('Moving repository to repository.backup'))
1596
self.repo_dir.transport.move('repository', 'repository.backup')
1597
backup_transport = self.repo_dir.transport.clone(
1598
'repository.backup')
1599
repo._format.check_conversion_target(self.target_format)
1600
self.source_repo = repo._format.open(self.repo_dir,
1602
_override_transport=backup_transport)
1603
pb.update(gettext('Creating new repository'))
1604
converted = self.target_format.initialize(self.repo_dir,
1605
self.source_repo.is_shared())
1606
with converted.lock_write():
1607
pb.update(gettext('Copying content'))
1608
self.source_repo.copy_content_into(converted)
1609
pb.update(gettext('Deleting old repository content'))
1610
self.repo_dir.transport.delete_tree('repository.backup')
1611
ui.ui_factory.note(gettext('repository converted'))
1614
def _strip_NULL_ghosts(revision_graph):
1615
"""Also don't use this. more compatibility code for unmigrated clients."""
1616
# Filter ghosts, and null:
1617
if _mod_revision.NULL_REVISION in revision_graph:
1618
del revision_graph[_mod_revision.NULL_REVISION]
1619
for key, parents in revision_graph.items():
1620
revision_graph[key] = tuple(parent for parent in parents if parent
1622
return revision_graph
1625
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
1626
stop_revision=None):
1627
"""Extend the partial history to include a given index
1629
If a stop_index is supplied, stop when that index has been reached.
1630
If a stop_revision is supplied, stop when that revision is
1631
encountered. Otherwise, stop when the beginning of history is
1634
:param stop_index: The index which should be present. When it is
1635
present, history extension will stop.
1636
:param stop_revision: The revision id which should be present. When
1637
it is encountered, history extension will stop.
1639
start_revision = partial_history_cache[-1]
1640
graph = repo.get_graph()
1641
iterator = graph.iter_lefthand_ancestry(start_revision,
1642
(_mod_revision.NULL_REVISION,))
1644
# skip the last revision in the list
1647
if (stop_index is not None and
1648
len(partial_history_cache) > stop_index):
1650
if partial_history_cache[-1] == stop_revision:
1652
revision_id = next(iterator)
1653
partial_history_cache.append(revision_id)
1654
except StopIteration:
1659
class _LazyListJoin(object):
1660
"""An iterable yielding the contents of many lists as one list.
1662
Each iterator made from this will reflect the current contents of the lists
1663
at the time the iterator is made.
1665
This is used by Repository's _make_parents_provider implementation so that
1668
pp = repo._make_parents_provider() # uses a list of fallback repos
1669
pp.add_fallback_repository(other_repo) # appends to that list
1670
result = pp.get_parent_map(...)
1671
# The result will include revs from other_repo
1674
def __init__(self, *list_parts):
1675
self.list_parts = list_parts
1679
for list_part in self.list_parts:
1680
full_list.extend(list_part)
1681
return iter(full_list)
1684
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,