1
# Copyright (C) 2005-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from __future__ import absolute_import
19
from .lazy_import import lazy_import
20
lazy_import(globals(), """
30
revision as _mod_revision,
31
testament as _mod_testament,
34
from breezy.bundle import serializer
35
from breezy.i18n import gettext
43
from .decorators import only_raises
44
from .inter import InterObject
45
from .lock import _RelockDebugMixin, LogicalLockResult
51
log_exception_quietly, note, mutter, mutter_callsite, warning)
54
# Old formats display a warning, but only once
55
_deprecation_warning_done = False
58
class IsInWriteGroupError(errors.InternalBzrError):
60
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
62
def __init__(self, repo):
63
errors.InternalBzrError.__init__(self, repo=repo)
66
class CannotSetRevisionId(errors.BzrError):
68
_fmt = "Repository format does not support setting revision ids."
71
class CommitBuilder(object):
72
"""Provides an interface to build up a commit.
74
This allows describing a tree to be committed without needing to
75
know the internals of the format of the repository.
78
# all clients should supply tree roots.
79
record_root_entry = True
80
# whether this commit builder will automatically update the branch that is
82
updates_branch = False
84
def __init__(self, repository, parents, config_stack, timestamp=None,
85
timezone=None, committer=None, revprops=None,
86
revision_id=None, lossy=False):
87
"""Initiate a CommitBuilder.
89
:param repository: Repository to commit to.
90
:param parents: Revision ids of the parents of the new revision.
91
:param timestamp: Optional timestamp recorded for commit.
92
:param timezone: Optional timezone for timestamp.
93
:param committer: Optional committer to set for commit.
94
:param revprops: Optional dictionary of revision properties.
95
:param revision_id: Optional revision id.
96
:param lossy: Whether to discard data that can not be natively
97
represented, when pushing to a foreign VCS
99
self._config_stack = config_stack
102
if committer is None:
103
self._committer = self._config_stack.get('email')
104
elif not isinstance(committer, text_type):
105
self._committer = committer.decode() # throw if non-ascii
107
self._committer = committer
109
self.parents = parents
110
self.repository = repository
113
if revprops is not None:
114
self._validate_revprops(revprops)
115
self._revprops.update(revprops)
117
if timestamp is None:
118
timestamp = time.time()
119
# Restrict resolution to 1ms
120
self._timestamp = round(timestamp, 3)
123
self._timezone = osutils.local_time_offset()
125
self._timezone = int(timezone)
127
self._generate_revision_if_needed(revision_id)
129
def any_changes(self):
130
"""Return True if any entries were changed.
132
This includes merge-only changes. It is the core for the --unchanged
135
:return: True if any changes have occured.
137
raise NotImplementedError(self.any_changes)
139
def _validate_unicode_text(self, text, context):
140
"""Verify things like commit messages don't have bogus characters."""
141
# TODO(jelmer): Make this repository-format specific
143
raise ValueError('Invalid value for %s: %r' % (context, text))
145
def _validate_revprops(self, revprops):
146
for key, value in viewitems(revprops):
147
# We know that the XML serializers do not round trip '\r'
148
# correctly, so refuse to accept them
149
if not isinstance(value, (text_type, str)):
150
raise ValueError('revision property (%s) is not a valid'
151
' (unicode) string: %r' % (key, value))
152
# TODO(jelmer): Make this repository-format specific
153
self._validate_unicode_text(value,
154
'revision property (%s)' % (key,))
156
def commit(self, message):
157
"""Make the actual commit.
159
:return: The revision id of the recorded revision.
161
raise NotImplementedError(self.commit)
164
"""Abort the commit that is being built.
166
raise NotImplementedError(self.abort)
168
def revision_tree(self):
169
"""Return the tree that was just committed.
171
After calling commit() this can be called to get a
172
RevisionTree representing the newly committed tree. This is
173
preferred to calling Repository.revision_tree() because that may
174
require deserializing the inventory, while we already have a copy in
177
raise NotImplementedError(self.revision_tree)
179
def finish_inventory(self):
180
"""Tell the builder that the inventory is finished.
182
:return: The inventory id in the repository, which can be used with
183
repository.get_inventory.
185
raise NotImplementedError(self.finish_inventory)
187
def _gen_revision_id(self):
188
"""Return new revision-id."""
189
return generate_ids.gen_revision_id(self._committer, self._timestamp)
191
def _generate_revision_if_needed(self, revision_id):
192
"""Create a revision id if None was supplied.
194
If the repository can not support user-specified revision ids
195
they should override this function and raise CannotSetRevisionId
196
if _new_revision_id is not None.
198
:raises: CannotSetRevisionId
200
if not self.repository._format.supports_setting_revision_ids:
201
if revision_id is not None:
202
raise CannotSetRevisionId()
204
if revision_id is None:
205
self._new_revision_id = self._gen_revision_id()
206
self.random_revid = True
208
self._new_revision_id = revision_id
209
self.random_revid = False
211
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
212
"""Record a new tree via iter_changes.
214
:param tree: The tree to obtain text contents from for changed objects.
215
:param basis_revision_id: The revision id of the tree the iter_changes
216
has been generated against. Currently assumed to be the same
217
as self.parents[0] - if it is not, errors may occur.
218
:param iter_changes: An iter_changes iterator with the changes to apply
219
to basis_revision_id. The iterator must not include any items with
220
a current kind of None - missing items must be either filtered out
221
or errored-on beefore record_iter_changes sees the item.
222
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
225
raise NotImplementedError(self.record_iter_changes)
228
class RepositoryWriteLockResult(LogicalLockResult):
229
"""The result of write locking a repository.
231
:ivar repository_token: The token obtained from the underlying lock, or
233
:ivar unlock: A callable which will unlock the lock.
236
def __init__(self, unlock, repository_token):
237
LogicalLockResult.__init__(self, unlock)
238
self.repository_token = repository_token
241
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
245
######################################################################
249
class Repository(controldir.ControlComponent, _RelockDebugMixin):
250
"""Repository holding history for one or more branches.
252
The repository holds and retrieves historical information including
253
revisions and file history. It's normally accessed only by the Branch,
254
which views a particular line of development through that history.
256
See VersionedFileRepository in breezy.vf_repository for the
257
base class for most Bazaar repositories.
260
def abort_write_group(self, suppress_errors=False):
261
"""Commit the contents accrued within the current write group.
263
:param suppress_errors: if true, abort_write_group will catch and log
264
unexpected errors that happen during the abort, rather than
265
allowing them to propagate. Defaults to False.
267
:seealso: start_write_group.
269
if self._write_group is not self.get_transaction():
270
# has an unlock or relock occured ?
273
'(suppressed) mismatched lock context and write group. %r, %r',
274
self._write_group, self.get_transaction())
276
raise errors.BzrError(
277
'mismatched lock context and write group. %r, %r' %
278
(self._write_group, self.get_transaction()))
280
self._abort_write_group()
281
except Exception as exc:
282
self._write_group = None
283
if not suppress_errors:
285
mutter('abort_write_group failed')
286
log_exception_quietly()
287
note(gettext('brz: ERROR (ignored): %s'), exc)
288
self._write_group = None
290
def _abort_write_group(self):
291
"""Template method for per-repository write group cleanup.
293
This is called during abort before the write group is considered to be
294
finished and should cleanup any internal state accrued during the write
295
group. There is no requirement that data handed to the repository be
296
*not* made available - this is not a rollback - but neither should any
297
attempt be made to ensure that data added is fully commited. Abort is
298
invoked when an error has occured so futher disk or network operations
299
may not be possible or may error and if possible should not be
303
def add_fallback_repository(self, repository):
304
"""Add a repository to use for looking up data not held locally.
306
:param repository: A repository.
308
raise NotImplementedError(self.add_fallback_repository)
310
def _check_fallback_repository(self, repository):
311
"""Check that this repository can fallback to repository safely.
313
Raise an error if not.
315
:param repository: A repository to fallback to.
317
return InterRepository._assert_same_model(self, repository)
319
def all_revision_ids(self):
320
"""Returns a list of all the revision ids in the repository.
322
This is conceptually deprecated because code should generally work on
323
the graph reachable from a particular revision, and ignore any other
324
revisions that might be present. There is no direct replacement
327
if 'evil' in debug.debug_flags:
328
mutter_callsite(2, "all_revision_ids is linear with history.")
329
return self._all_revision_ids()
331
def _all_revision_ids(self):
332
"""Returns a list of all the revision ids in the repository.
334
These are in as much topological order as the underlying store can
337
raise NotImplementedError(self._all_revision_ids)
339
def break_lock(self):
340
"""Break a lock if one is present from another instance.
342
Uses the ui factory to ask for confirmation if the lock may be from
345
self.control_files.break_lock()
348
def create(controldir):
349
"""Construct the current default format repository in controldir."""
350
return RepositoryFormat.get_default_format().initialize(controldir)
352
def __init__(self, _format, controldir, control_files):
353
"""instantiate a Repository.
355
:param _format: The format of the repository on disk.
356
:param controldir: The ControlDir of the repository.
357
:param control_files: Control files to use for locking, etc.
359
# In the future we will have a single api for all stores for
360
# getting file texts, inventories and revisions, then
361
# this construct will accept instances of those things.
362
super(Repository, self).__init__()
363
self._format = _format
364
# the following are part of the public API for Repository:
365
self.controldir = controldir
366
self.control_files = control_files
368
self._write_group = None
369
# Additional places to query for data.
370
self._fallback_repositories = []
373
def user_transport(self):
374
return self.controldir.user_transport
377
def control_transport(self):
378
return self._transport
381
if self._fallback_repositories:
382
return '%s(%r, fallback_repositories=%r)' % (
383
self.__class__.__name__,
385
self._fallback_repositories)
387
return '%s(%r)' % (self.__class__.__name__,
390
def _has_same_fallbacks(self, other_repo):
391
"""Returns true if the repositories have the same fallbacks."""
392
my_fb = self._fallback_repositories
393
other_fb = other_repo._fallback_repositories
394
if len(my_fb) != len(other_fb):
396
for f, g in zip(my_fb, other_fb):
397
if not f.has_same_location(g):
401
def has_same_location(self, other):
402
"""Returns a boolean indicating if this repository is at the same
403
location as another repository.
405
This might return False even when two repository objects are accessing
406
the same physical repository via different URLs.
408
if self.__class__ is not other.__class__:
410
return (self.control_url == other.control_url)
412
def is_in_write_group(self):
413
"""Return True if there is an open write group.
415
:seealso: start_write_group.
417
return self._write_group is not None
420
return self.control_files.is_locked()
422
def is_write_locked(self):
423
"""Return True if this object is write locked."""
424
return self.is_locked() and self.control_files._lock_mode == 'w'
426
def lock_write(self, token=None):
427
"""Lock this repository for writing.
429
This causes caching within the repository obejct to start accumlating
430
data during reads, and allows a 'write_group' to be obtained. Write
431
groups must be used for actual data insertion.
433
A token should be passed in if you know that you have locked the object
434
some other way, and need to synchronise this object's state with that
437
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
439
:param token: if this is already locked, then lock_write will fail
440
unless the token matches the existing lock.
441
:returns: a token if this instance supports tokens, otherwise None.
442
:raises TokenLockingNotSupported: when a token is given but this
443
instance doesn't support using token locks.
444
:raises MismatchedToken: if the specified token doesn't match the token
445
of the existing lock.
446
:seealso: start_write_group.
447
:return: A RepositoryWriteLockResult.
449
locked = self.is_locked()
450
token = self.control_files.lock_write(token=token)
452
self._warn_if_deprecated()
454
for repo in self._fallback_repositories:
455
# Writes don't affect fallback repos
458
return RepositoryWriteLockResult(self.unlock, token)
461
"""Lock the repository for read operations.
463
:return: An object with an unlock method which will release the lock
466
locked = self.is_locked()
467
self.control_files.lock_read()
469
self._warn_if_deprecated()
471
for repo in self._fallback_repositories:
474
return LogicalLockResult(self.unlock)
476
def get_physical_lock_status(self):
477
return self.control_files.get_physical_lock_status()
479
def leave_lock_in_place(self):
480
"""Tell this repository not to release the physical lock when this
483
If lock_write doesn't return a token, then this method is not supported.
485
self.control_files.leave_in_place()
487
def dont_leave_lock_in_place(self):
488
"""Tell this repository to release the physical lock when this
489
object is unlocked, even if it didn't originally acquire it.
491
If lock_write doesn't return a token, then this method is not supported.
493
self.control_files.dont_leave_in_place()
495
def gather_stats(self, revid=None, committers=None):
496
"""Gather statistics from a revision id.
498
:param revid: The revision id to gather statistics from, if None, then
499
no revision specific statistics are gathered.
500
:param committers: Optional parameter controlling whether to grab
501
a count of committers from the revision specific statistics.
502
:return: A dictionary of statistics. Currently this contains:
503
committers: The number of committers if requested.
504
firstrev: A tuple with timestamp, timezone for the penultimate left
505
most ancestor of revid, if revid is not the NULL_REVISION.
506
latestrev: A tuple with timestamp, timezone for revid, if revid is
507
not the NULL_REVISION.
508
revisions: The total revision count in the repository.
509
size: An estimate disk size of the repository in bytes.
511
with self.lock_read():
513
if revid and committers:
514
result['committers'] = 0
515
if revid and revid != _mod_revision.NULL_REVISION:
516
graph = self.get_graph()
518
all_committers = set()
519
revisions = [r for (r, p) in graph.iter_ancestry([revid])
520
if r != _mod_revision.NULL_REVISION]
523
# ignore the revisions in the middle - just grab first and last
524
revisions = revisions[0], revisions[-1]
525
for revision in self.get_revisions(revisions):
526
if not last_revision:
527
last_revision = revision
529
all_committers.add(revision.committer)
530
first_revision = revision
532
result['committers'] = len(all_committers)
533
result['firstrev'] = (first_revision.timestamp,
534
first_revision.timezone)
535
result['latestrev'] = (last_revision.timestamp,
536
last_revision.timezone)
539
def find_branches(self, using=False):
540
"""Find branches underneath this repository.
542
This will include branches inside other branches.
544
:param using: If True, list only branches using this repository.
546
if using and not self.is_shared():
547
return self.controldir.list_branches()
549
class Evaluator(object):
552
self.first_call = True
554
def __call__(self, controldir):
555
# On the first call, the parameter is always the controldir
556
# containing the current repo.
557
if not self.first_call:
559
repository = controldir.open_repository()
560
except errors.NoRepositoryPresent:
563
return False, ([], repository)
564
self.first_call = False
565
value = (controldir.list_branches(), None)
569
for branches, repository in controldir.ControlDir.find_controldirs(
570
self.user_transport, evaluate=Evaluator()):
571
if branches is not None:
573
if not using and repository is not None:
574
ret.extend(repository.find_branches())
577
def search_missing_revision_ids(self, other,
578
find_ghosts=True, revision_ids=None, if_present_ids=None,
580
"""Return the revision ids that other has that this does not.
582
These are returned in topological order.
584
revision_ids: only return revision ids included by revision_id.
586
with self.lock_read():
587
return InterRepository.get(other, self).search_missing_revision_ids(
588
find_ghosts=find_ghosts, revision_ids=revision_ids,
589
if_present_ids=if_present_ids, limit=limit)
593
"""Open the repository rooted at base.
595
For instance, if the repository is at URL/.bzr/repository,
596
Repository.open(URL) -> a Repository instance.
598
control = controldir.ControlDir.open(base)
599
return control.open_repository()
601
def copy_content_into(self, destination, revision_id=None):
602
"""Make a complete copy of the content in self into destination.
604
This is a destructive operation! Do not use it on existing
607
return InterRepository.get(self, destination).copy_content(revision_id)
609
def commit_write_group(self):
610
"""Commit the contents accrued within the current write group.
612
:seealso: start_write_group.
614
:return: it may return an opaque hint that can be passed to 'pack'.
616
if self._write_group is not self.get_transaction():
617
# has an unlock or relock occured ?
618
raise errors.BzrError('mismatched lock context %r and '
620
(self.get_transaction(), self._write_group))
621
result = self._commit_write_group()
622
self._write_group = None
625
def _commit_write_group(self):
626
"""Template method for per-repository write group cleanup.
628
This is called before the write group is considered to be
629
finished and should ensure that all data handed to the repository
630
for writing during the write group is safely committed (to the
631
extent possible considering file system caching etc).
634
def suspend_write_group(self):
635
"""Suspend a write group.
637
:raise UnsuspendableWriteGroup: If the write group can not be
639
:return: List of tokens
641
raise errors.UnsuspendableWriteGroup(self)
643
def refresh_data(self):
644
"""Re-read any data needed to synchronise with disk.
646
This method is intended to be called after another repository instance
647
(such as one used by a smart server) has inserted data into the
648
repository. On all repositories this will work outside of write groups.
649
Some repository formats (pack and newer for breezy native formats)
650
support refresh_data inside write groups. If called inside a write
651
group on a repository that does not support refreshing in a write group
652
IsInWriteGroupError will be raised.
656
def resume_write_group(self, tokens):
657
if not self.is_write_locked():
658
raise errors.NotWriteLocked(self)
659
if self._write_group:
660
raise errors.BzrError('already in a write group')
661
self._resume_write_group(tokens)
662
# so we can detect unlock/relock - the write group is now entered.
663
self._write_group = self.get_transaction()
665
def _resume_write_group(self, tokens):
666
raise errors.UnsuspendableWriteGroup(self)
668
def fetch(self, source, revision_id=None, find_ghosts=False):
669
"""Fetch the content required to construct revision_id from source.
671
If revision_id is None, then all content is copied.
673
fetch() may not be used when the repository is in a write group -
674
either finish the current write group before using fetch, or use
675
fetch before starting the write group.
677
:param find_ghosts: Find and copy revisions in the source that are
678
ghosts in the target (and not reachable directly by walking out to
679
the first-present revision in target from revision_id).
680
:param revision_id: If specified, all the content needed for this
681
revision ID will be copied to the target. Fetch will determine for
682
itself which content needs to be copied.
684
if self.is_in_write_group():
685
raise errors.InternalBzrError(
686
"May not fetch while in a write group.")
687
# fast path same-url fetch operations
688
# TODO: lift out to somewhere common with RemoteRepository
689
# <https://bugs.launchpad.net/bzr/+bug/401646>
690
if (self.has_same_location(source)
691
and self._has_same_fallbacks(source)):
692
# check that last_revision is in 'from' and then return a
694
if (revision_id is not None and
695
not _mod_revision.is_null(revision_id)):
696
self.get_revision(revision_id)
698
inter = InterRepository.get(source, self)
699
return inter.fetch(revision_id=revision_id, find_ghosts=find_ghosts)
701
def create_bundle(self, target, base, fileobj, format=None):
702
return serializer.write_bundle(self, target, base, fileobj, format)
704
def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
705
timezone=None, committer=None, revprops=None,
706
revision_id=None, lossy=False):
707
"""Obtain a CommitBuilder for this repository.
709
:param branch: Branch to commit to.
710
:param parents: Revision ids of the parents of the new revision.
711
:param config_stack: Configuration stack to use.
712
:param timestamp: Optional timestamp recorded for commit.
713
:param timezone: Optional timezone for timestamp.
714
:param committer: Optional committer to set for commit.
715
:param revprops: Optional dictionary of revision properties.
716
:param revision_id: Optional revision id.
717
:param lossy: Whether to discard data that can not be natively
718
represented, when pushing to a foreign VCS
720
raise NotImplementedError(self.get_commit_builder)
722
@only_raises(errors.LockNotHeld, errors.LockBroken)
724
if (self.control_files._lock_count == 1 and
725
self.control_files._lock_mode == 'w'):
726
if self._write_group is not None:
727
self.abort_write_group()
728
self.control_files.unlock()
729
raise errors.BzrError(
730
'Must end write groups before releasing write locks.')
731
self.control_files.unlock()
732
if self.control_files._lock_count == 0:
733
for repo in self._fallback_repositories:
736
def clone(self, controldir, revision_id=None):
737
"""Clone this repository into controldir using the current format.
739
Currently no check is made that the format of this repository and
740
the bzrdir format are compatible. FIXME RBC 20060201.
742
:return: The newly created destination repository.
744
with self.lock_read():
745
# TODO: deprecate after 0.16; cloning this with all its settings is
746
# probably not very useful -- mbp 20070423
747
dest_repo = self._create_sprouting_repo(
748
controldir, shared=self.is_shared())
749
self.copy_content_into(dest_repo, revision_id)
752
def start_write_group(self):
753
"""Start a write group in the repository.
755
Write groups are used by repositories which do not have a 1:1 mapping
756
between file ids and backend store to manage the insertion of data from
757
both fetch and commit operations.
759
A write lock is required around the
760
start_write_group/commit_write_group for the support of lock-requiring
763
One can only insert data into a repository inside a write group.
767
if not self.is_write_locked():
768
raise errors.NotWriteLocked(self)
769
if self._write_group:
770
raise errors.BzrError('already in a write group')
771
self._start_write_group()
772
# so we can detect unlock/relock - the write group is now entered.
773
self._write_group = self.get_transaction()
775
def _start_write_group(self):
776
"""Template method for per-repository write group startup.
778
This is called before the write group is considered to be
782
def sprout(self, to_bzrdir, revision_id=None):
783
"""Create a descendent repository for new development.
785
Unlike clone, this does not copy the settings of the repository.
787
with self.lock_read():
788
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
789
dest_repo.fetch(self, revision_id=revision_id)
792
def _create_sprouting_repo(self, a_controldir, shared):
794
a_controldir._format, self.controldir._format.__class__):
795
# use target default format.
796
dest_repo = a_controldir.create_repository()
798
# Most control formats need the repository to be specifically
799
# created, but on some old all-in-one formats it's not needed
801
dest_repo = self._format.initialize(
802
a_controldir, shared=shared)
803
except errors.UninitializableFormat:
804
dest_repo = a_controldir.open_repository()
807
def has_revision(self, revision_id):
808
"""True if this repository has a copy of the revision."""
809
with self.lock_read():
810
return revision_id in self.has_revisions((revision_id,))
812
def has_revisions(self, revision_ids):
813
"""Probe to find out the presence of multiple revisions.
815
:param revision_ids: An iterable of revision_ids.
816
:return: A set of the revision_ids that were present.
818
raise NotImplementedError(self.has_revisions)
820
def get_revision(self, revision_id):
821
"""Return the Revision object for a named revision."""
822
with self.lock_read():
823
return self.get_revisions([revision_id])[0]
825
def get_revision_reconcile(self, revision_id):
826
"""'reconcile' helper routine that allows access to a revision always.
828
This variant of get_revision does not cross check the weave graph
829
against the revision one as get_revision does: but it should only
830
be used by reconcile, or reconcile-alike commands that are correcting
831
or testing the revision graph.
833
raise NotImplementedError(self.get_revision_reconcile)
835
def get_revisions(self, revision_ids):
836
"""Get many revisions at once.
838
Repositories that need to check data on every revision read should
839
subclass this method.
842
for revid, rev in self.iter_revisions(revision_ids):
844
raise errors.NoSuchRevision(self, revid)
846
return [revs[revid] for revid in revision_ids]
848
def iter_revisions(self, revision_ids):
849
"""Iterate over revision objects.
851
:param revision_ids: An iterable of revisions to examine. None may be
852
passed to request all revisions known to the repository. Note that
853
not all repositories can find unreferenced revisions; for those
854
repositories only referenced ones will be returned.
855
:return: An iterator of (revid, revision) tuples. Absent revisions (
856
those asked for but not available) are returned as (revid, None).
857
N.B.: Revisions are not necessarily yielded in order.
859
raise NotImplementedError(self.iter_revisions)
861
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
862
"""Produce a generator of revision deltas.
864
Note that the input is a sequence of REVISIONS, not revision_ids.
865
Trees will be held in memory until the generator exits.
866
Each delta is relative to the revision's lefthand predecessor.
868
:param specific_fileids: if not None, the result is filtered
869
so that only those file-ids, their parents and their
870
children are included.
872
raise NotImplementedError(self.get_deltas_for_revisions)
874
def get_revision_delta(self, revision_id, specific_fileids=None):
875
"""Return the delta for one revision.
877
The delta is relative to the left-hand predecessor of the
880
:param specific_fileids: if not None, the result is filtered
881
so that only those file-ids, their parents and their
882
children are included.
884
with self.lock_read():
885
r = self.get_revision(revision_id)
886
return list(self.get_deltas_for_revisions(
887
[r], specific_fileids=specific_fileids))[0]
889
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
890
with self.lock_write():
891
signature = gpg_strategy.sign(plaintext, gpg.MODE_CLEAR)
892
self.add_signature_text(revision_id, signature)
894
def add_signature_text(self, revision_id, signature):
895
"""Store a signature text for a revision.
897
:param revision_id: Revision id of the revision
898
:param signature: Signature text.
900
raise NotImplementedError(self.add_signature_text)
902
def iter_files_bytes(self, desired_files):
903
"""Iterate through file versions.
905
Files will not necessarily be returned in the order they occur in
906
desired_files. No specific order is guaranteed.
908
Yields pairs of identifier, bytes_iterator. identifier is an opaque
909
value supplied by the caller as part of desired_files. It should
910
uniquely identify the file version in the caller's context. (Examples:
911
an index number or a TreeTransform trans_id.)
913
:param desired_files: a list of (file_id, revision_id, identifier)
916
raise NotImplementedError(self.iter_files_bytes)
918
def get_rev_id_for_revno(self, revno, known_pair):
919
"""Return the revision id of a revno, given a later (revno, revid)
920
pair in the same history.
922
:return: if found (True, revid). If the available history ran out
923
before reaching the revno, then this returns
924
(False, (closest_revno, closest_revid)).
926
known_revno, known_revid = known_pair
927
partial_history = [known_revid]
928
distance_from_known = known_revno - revno
929
if distance_from_known < 0:
931
'requested revno (%d) is later than given known revno (%d)'
932
% (revno, known_revno))
935
self, partial_history, stop_index=distance_from_known)
936
except errors.RevisionNotPresent as err:
937
if err.revision_id == known_revid:
938
# The start revision (known_revid) wasn't found.
940
# This is a stacked repository with no fallbacks, or a there's a
941
# left-hand ghost. Either way, even though the revision named in
942
# the error isn't in this repo, we know it's the next step in this
944
partial_history.append(err.revision_id)
945
if len(partial_history) <= distance_from_known:
946
# Didn't find enough history to get a revid for the revno.
947
earliest_revno = known_revno - len(partial_history) + 1
948
return (False, (earliest_revno, partial_history[-1]))
949
if len(partial_history) - 1 > distance_from_known:
950
raise AssertionError('_iter_for_revno returned too much history')
951
return (True, partial_history[-1])
954
"""Return True if this repository is flagged as a shared repository."""
955
raise NotImplementedError(self.is_shared)
957
def reconcile(self, other=None, thorough=False):
958
"""Reconcile this repository."""
959
from .reconcile import RepoReconciler
960
with self.lock_write():
961
reconciler = RepoReconciler(self, thorough=thorough)
962
reconciler.reconcile()
965
def _refresh_data(self):
966
"""Helper called from lock_* to ensure coherency with disk.
968
The default implementation does nothing; it is however possible
969
for repositories to maintain loaded indices across multiple locks
970
by checking inside their implementation of this method to see
971
whether their indices are still valid. This depends of course on
972
the disk format being validatable in this manner. This method is
973
also called by the refresh_data() public interface to cause a refresh
974
to occur while in a write lock so that data inserted by a smart server
975
push operation is visible on the client's instance of the physical
979
def revision_tree(self, revision_id):
980
"""Return Tree for a revision on this branch.
982
`revision_id` may be NULL_REVISION for the empty tree revision.
984
raise NotImplementedError(self.revision_tree)
986
def revision_trees(self, revision_ids):
987
"""Return Trees for revisions in this repository.
989
:param revision_ids: a sequence of revision-ids;
990
a revision-id may not be None or b'null:'
992
raise NotImplementedError(self.revision_trees)
994
def pack(self, hint=None, clean_obsolete_packs=False):
995
"""Compress the data within the repository.
997
This operation only makes sense for some repository types. For other
998
types it should be a no-op that just returns.
1000
This stub method does not require a lock, but subclasses should use
1001
self.write_lock as this is a long running call it's reasonable to
1002
implicitly lock for the user.
1004
:param hint: If not supplied, the whole repository is packed.
1005
If supplied, the repository may use the hint parameter as a
1006
hint for the parts of the repository to pack. A hint can be
1007
obtained from the result of commit_write_group(). Out of
1008
date hints are simply ignored, because concurrent operations
1009
can obsolete them rapidly.
1011
:param clean_obsolete_packs: Clean obsolete packs immediately after
1015
def get_transaction(self):
1016
return self.control_files.get_transaction()
1018
def get_parent_map(self, revision_ids):
1019
"""See graph.StackedParentsProvider.get_parent_map"""
1020
raise NotImplementedError(self.get_parent_map)
1022
def _get_parent_map_no_fallbacks(self, revision_ids):
1023
"""Same as Repository.get_parent_map except doesn't query fallbacks."""
1024
# revisions index works in keys; this just works in revisions
1025
# therefore wrap and unwrap
1028
for revision_id in revision_ids:
1029
if revision_id == _mod_revision.NULL_REVISION:
1030
result[revision_id] = ()
1031
elif revision_id is None:
1032
raise ValueError('get_parent_map(None) is not valid')
1034
query_keys.append((revision_id,))
1035
vf = self.revisions.without_fallbacks()
1036
for (revision_id,), parent_keys in viewitems(
1037
vf.get_parent_map(query_keys)):
1039
result[revision_id] = tuple([parent_revid
1040
for (parent_revid,) in parent_keys])
1042
result[revision_id] = (_mod_revision.NULL_REVISION,)
1045
def _make_parents_provider(self):
1046
if not self._format.supports_external_lookups:
1048
return graph.StackedParentsProvider(_LazyListJoin(
1049
[self._make_parents_provider_unstacked()],
1050
self._fallback_repositories))
1052
def _make_parents_provider_unstacked(self):
1053
return graph.CallableToParentsProviderAdapter(
1054
self._get_parent_map_no_fallbacks)
1056
def get_known_graph_ancestry(self, revision_ids):
1057
"""Return the known graph for a set of revision ids and their ancestors.
1059
raise NotImplementedError(self.get_known_graph_ancestry)
1061
def get_file_graph(self):
1062
"""Return the graph walker for files."""
1063
raise NotImplementedError(self.get_file_graph)
1065
def get_graph(self, other_repository=None):
1066
"""Return the graph walker for this repository format"""
1067
parents_provider = self._make_parents_provider()
1068
if (other_repository is not None and
1069
not self.has_same_location(other_repository)):
1070
parents_provider = graph.StackedParentsProvider(
1071
[parents_provider, other_repository._make_parents_provider()])
1072
return graph.Graph(parents_provider)
1074
def set_make_working_trees(self, new_value):
1075
"""Set the policy flag for making working trees when creating branches.
1077
This only applies to branches that use this repository.
1079
The default is 'True'.
1080
:param new_value: True to restore the default, False to disable making
1083
raise NotImplementedError(self.set_make_working_trees)
1085
def make_working_trees(self):
1086
"""Returns the policy for making working trees on new branches."""
1087
raise NotImplementedError(self.make_working_trees)
1089
def sign_revision(self, revision_id, gpg_strategy):
1090
with self.lock_write():
1091
testament = _mod_testament.Testament.from_revision(
1093
plaintext = testament.as_short_text()
1094
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1096
def verify_revision_signature(self, revision_id, gpg_strategy):
1097
"""Verify the signature on a revision.
1099
:param revision_id: the revision to verify
1100
:gpg_strategy: the GPGStrategy object to used
1102
:return: gpg.SIGNATURE_VALID or a failed SIGNATURE_ value
1104
with self.lock_read():
1105
if not self.has_signature_for_revision_id(revision_id):
1106
return gpg.SIGNATURE_NOT_SIGNED, None
1107
signature = self.get_signature_text(revision_id)
1109
testament = _mod_testament.Testament.from_revision(
1112
(status, key, signed_plaintext) = gpg_strategy.verify(signature)
1113
if testament.as_short_text() != signed_plaintext:
1114
return gpg.SIGNATURE_NOT_VALID, None
1115
return (status, key)
1117
def verify_revision_signatures(self, revision_ids, gpg_strategy):
1118
"""Verify revision signatures for a number of revisions.
1120
:param revision_id: the revision to verify
1121
:gpg_strategy: the GPGStrategy object to used
1122
:return: Iterator over tuples with revision id, result and keys
1124
with self.lock_read():
1125
for revid in revision_ids:
1126
(result, key) = self.verify_revision_signature(revid, gpg_strategy)
1127
yield revid, result, key
1129
def has_signature_for_revision_id(self, revision_id):
1130
"""Query for a revision signature for revision_id in the repository."""
1131
raise NotImplementedError(self.has_signature_for_revision_id)
1133
def get_signature_text(self, revision_id):
1134
"""Return the text for a signature."""
1135
raise NotImplementedError(self.get_signature_text)
1137
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
1138
"""Check consistency of all history of given revision_ids.
1140
Different repository implementations should override _check().
1142
:param revision_ids: A non-empty list of revision_ids whose ancestry
1143
will be checked. Typically the last revision_id of a branch.
1144
:param callback_refs: A dict of check-refs to resolve and callback
1145
the check/_check method on the items listed as wanting the ref.
1147
:param check_repo: If False do not check the repository contents, just
1148
calculate the data callback_refs requires and call them back.
1150
return self._check(revision_ids=revision_ids, callback_refs=callback_refs,
1151
check_repo=check_repo)
1153
def _check(self, revision_ids=None, callback_refs=None, check_repo=True):
1154
raise NotImplementedError(self.check)
1156
def _warn_if_deprecated(self, branch=None):
1157
if not self._format.is_deprecated():
1159
global _deprecation_warning_done
1160
if _deprecation_warning_done:
1164
conf = config.GlobalStack()
1166
conf = branch.get_config_stack()
1167
if 'format_deprecation' in conf.get('suppress_warnings'):
1169
warning("Format %s for %s is deprecated -"
1170
" please use 'brz upgrade' to get better performance"
1171
% (self._format, self.controldir.transport.base))
1173
_deprecation_warning_done = True
1175
def supports_rich_root(self):
1176
return self._format.rich_root_data
1178
def _check_ascii_revisionid(self, revision_id, method):
1179
"""Private helper for ascii-only repositories."""
1180
# weave repositories refuse to store revisionids that are non-ascii.
1181
if revision_id is not None:
1182
# weaves require ascii revision ids.
1183
if isinstance(revision_id, text_type):
1185
revision_id.encode('ascii')
1186
except UnicodeEncodeError:
1187
raise errors.NonAsciiRevisionId(method, self)
1190
revision_id.decode('ascii')
1191
except UnicodeDecodeError:
1192
raise errors.NonAsciiRevisionId(method, self)
1195
class RepositoryFormatRegistry(controldir.ControlComponentFormatRegistry):
1196
"""Repository format registry."""
1198
def get_default(self):
1199
"""Return the current default format."""
1200
return controldir.format_registry.make_controldir('default').repository_format
1203
network_format_registry = registry.FormatRegistry()
1204
"""Registry of formats indexed by their network name.
1206
The network name for a repository format is an identifier that can be used when
1207
referring to formats with smart server operations. See
1208
RepositoryFormat.network_name() for more detail.
1212
format_registry = RepositoryFormatRegistry(network_format_registry)
1213
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
1215
This can contain either format instances themselves, or classes/factories that
1216
can be called to obtain one.
1220
#####################################################################
1221
# Repository Formats
1223
class RepositoryFormat(controldir.ControlComponentFormat):
1224
"""A repository format.
1226
Formats provide four things:
1227
* An initialization routine to construct repository data on disk.
1228
* a optional format string which is used when the BzrDir supports
1230
* an open routine which returns a Repository instance.
1231
* A network name for referring to the format in smart server RPC
1234
There is one and only one Format subclass for each on-disk format. But
1235
there can be one Repository subclass that is used for several different
1236
formats. The _format attribute on a Repository instance can be used to
1237
determine the disk format.
1239
Formats are placed in a registry by their format string for reference
1240
during opening. These should be subclasses of RepositoryFormat for
1243
Once a format is deprecated, just deprecate the initialize and open
1244
methods on the format class. Do not deprecate the object, as the
1245
object may be created even when a repository instance hasn't been
1248
Common instance attributes:
1249
_matchingcontroldir - the controldir format that the repository format was
1250
originally written to work with. This can be used if manually
1251
constructing a bzrdir and repository, or more commonly for test suite
1255
# Set to True or False in derived classes. True indicates that the format
1256
# supports ghosts gracefully.
1257
supports_ghosts = None
1258
# Can this repository be given external locations to lookup additional
1259
# data. Set to True or False in derived classes.
1260
supports_external_lookups = None
1261
# Does this format support CHK bytestring lookups. Set to True or False in
1263
supports_chks = None
1264
# Should fetch trigger a reconcile after the fetch? Only needed for
1265
# some repository formats that can suffer internal inconsistencies.
1266
_fetch_reconcile = False
1267
# Does this format have < O(tree_size) delta generation. Used to hint what
1268
# code path for commit, amongst other things.
1270
# Does doing a pack operation compress data? Useful for the pack UI command
1271
# (so if there is one pack, the operation can still proceed because it may
1272
# help), and for fetching when data won't have come from the same
1274
pack_compresses = False
1275
# Does the repository storage understand references to trees?
1276
supports_tree_reference = None
1277
# Is the format experimental ?
1278
experimental = False
1279
# Does this repository format escape funky characters, or does it create
1280
# files with similar names as the versioned files in its contents on disk
1282
supports_funky_characters = None
1283
# Does this repository format support leaving locks?
1284
supports_leaving_lock = None
1285
# Does this format support the full VersionedFiles interface?
1286
supports_full_versioned_files = None
1287
# Does this format support signing revision signatures?
1288
supports_revision_signatures = True
1289
# Can the revision graph have incorrect parents?
1290
revision_graph_can_have_wrong_parents = None
1291
# Does this format support setting revision ids?
1292
supports_setting_revision_ids = True
1293
# Does this format support rich root data?
1294
rich_root_data = None
1295
# Does this format support explicitly versioned directories?
1296
supports_versioned_directories = None
1297
# Can other repositories be nested into one of this format?
1298
supports_nesting_repositories = None
1299
# Is it possible for revisions to be present without being referenced
1301
supports_unreferenced_revisions = None
1302
# Does this format store the current Branch.nick in a revision when
1304
supports_storing_branch_nick = True
1305
# Does the format support overriding the transport to use
1306
supports_overriding_transport = True
1307
# Does the format support setting custom revision properties?
1308
supports_custom_revision_properties = True
1309
# Does the format record per-file revision metadata?
1310
records_per_file_revision = True
1313
return "%s()" % self.__class__.__name__
1315
def __eq__(self, other):
1316
# format objects are generally stateless
1317
return isinstance(other, self.__class__)
1319
def __ne__(self, other):
1320
return not self == other
1322
def get_format_description(self):
1323
"""Return the short description for this format."""
1324
raise NotImplementedError(self.get_format_description)
1326
def initialize(self, controldir, shared=False):
1327
"""Initialize a repository of this format in controldir.
1329
:param controldir: The controldir to put the new repository in it.
1330
:param shared: The repository should be initialized as a sharable one.
1331
:returns: The new repository object.
1333
This may raise UninitializableFormat if shared repository are not
1334
compatible the controldir.
1336
raise NotImplementedError(self.initialize)
1338
def is_supported(self):
1339
"""Is this format supported?
1341
Supported formats must be initializable and openable.
1342
Unsupported formats may not support initialization or committing or
1343
some other features depending on the reason for not being supported.
1347
def is_deprecated(self):
1348
"""Is this format deprecated?
1350
Deprecated formats may trigger a user-visible warning recommending
1351
the user to upgrade. They are still fully supported.
1355
def network_name(self):
1356
"""A simple byte string uniquely identifying this format for RPC calls.
1358
MetaDir repository formats use their disk format string to identify the
1359
repository over the wire. All in one formats such as bzr < 0.8, and
1360
foreign formats like svn/git and hg should use some marker which is
1361
unique and immutable.
1363
raise NotImplementedError(self.network_name)
1365
def check_conversion_target(self, target_format):
1366
if self.rich_root_data and not target_format.rich_root_data:
1367
raise errors.BadConversionTarget(
1368
'Does not support rich root data.', target_format,
1370
if (self.supports_tree_reference
1371
and not getattr(target_format, 'supports_tree_reference', False)):
1372
raise errors.BadConversionTarget(
1373
'Does not support nested trees', target_format,
1376
def open(self, controldir, _found=False):
1377
"""Return an instance of this format for a controldir.
1379
_found is a private parameter, do not use it.
1381
raise NotImplementedError(self.open)
1383
def _run_post_repo_init_hooks(self, repository, controldir, shared):
1384
from .controldir import ControlDir, RepoInitHookParams
1385
hooks = ControlDir.hooks['post_repo_init']
1388
params = RepoInitHookParams(repository, self, controldir, shared)
1393
# formats which have no format string are not discoverable or independently
1394
# creatable on disk, so are not registered in format_registry. They're
1395
# all in breezy.bzr.knitreponow. When an instance of one of these is
1396
# needed, it's constructed directly by the ControlDir. Non-native formats where
1397
# the repository is not separately opened are similar.
1399
format_registry.register_lazy(
1400
b'Bazaar-NG Knit Repository Format 1',
1401
'breezy.bzr.knitrepo',
1402
'RepositoryFormatKnit1',
1405
format_registry.register_lazy(
1406
b'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
1407
'breezy.bzr.knitrepo',
1408
'RepositoryFormatKnit3',
1411
format_registry.register_lazy(
1412
b'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
1413
'breezy.bzr.knitrepo',
1414
'RepositoryFormatKnit4',
1417
# Pack-based formats. There is one format for pre-subtrees, and one for
1418
# post-subtrees to allow ease of testing.
1419
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
1420
format_registry.register_lazy(
1421
b'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1422
'breezy.bzr.knitpack_repo',
1423
'RepositoryFormatKnitPack1',
1425
format_registry.register_lazy(
1426
b'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1427
'breezy.bzr.knitpack_repo',
1428
'RepositoryFormatKnitPack3',
1430
format_registry.register_lazy(
1431
b'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
1432
'breezy.bzr.knitpack_repo',
1433
'RepositoryFormatKnitPack4',
1435
format_registry.register_lazy(
1436
b'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
1437
'breezy.bzr.knitpack_repo',
1438
'RepositoryFormatKnitPack5',
1440
format_registry.register_lazy(
1441
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
1442
'breezy.bzr.knitpack_repo',
1443
'RepositoryFormatKnitPack5RichRoot',
1445
format_registry.register_lazy(
1446
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
1447
'breezy.bzr.knitpack_repo',
1448
'RepositoryFormatKnitPack5RichRootBroken',
1450
format_registry.register_lazy(
1451
b'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
1452
'breezy.bzr.knitpack_repo',
1453
'RepositoryFormatKnitPack6',
1455
format_registry.register_lazy(
1456
b'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
1457
'breezy.bzr.knitpack_repo',
1458
'RepositoryFormatKnitPack6RichRoot',
1460
format_registry.register_lazy(
1461
b'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1462
'breezy.bzr.groupcompress_repo',
1463
'RepositoryFormat2a',
1466
# Development formats.
1467
# Check their docstrings to see if/when they are obsolete.
1468
format_registry.register_lazy(
1469
(b"Bazaar development format 2 with subtree support "
1470
b"(needs bzr.dev from before 1.8)\n"),
1471
'breezy.bzr.knitpack_repo',
1472
'RepositoryFormatPackDevelopment2Subtree',
1474
format_registry.register_lazy(
1475
b'Bazaar development format 8\n',
1476
'breezy.bzr.groupcompress_repo',
1477
'RepositoryFormat2aSubtree',
1481
class InterRepository(InterObject):
1482
"""This class represents operations taking place between two repositories.
1484
Its instances have methods like copy_content and fetch, and contain
1485
references to the source and target repositories these operations can be
1488
Often we will provide convenience methods on 'repository' which carry out
1489
operations with another repository - they will always forward to
1490
InterRepository.get(other).method_name(parameters).
1494
"""The available optimised InterRepository types."""
1496
def copy_content(self, revision_id=None):
1497
"""Make a complete copy of the content in self into destination.
1499
This is a destructive operation! Do not use it on existing
1502
:param revision_id: Only copy the content needed to construct
1503
revision_id and its parents.
1505
with self.lock_write():
1507
self.target.set_make_working_trees(
1508
self.source.make_working_trees())
1509
except NotImplementedError:
1511
self.target.fetch(self.source, revision_id=revision_id)
1513
def fetch(self, revision_id=None, find_ghosts=False):
1514
"""Fetch the content required to construct revision_id.
1516
The content is copied from self.source to self.target.
1518
:param revision_id: if None all content is copied, if NULL_REVISION no
1522
raise NotImplementedError(self.fetch)
1524
def search_missing_revision_ids(
1525
self, find_ghosts=True, revision_ids=None, if_present_ids=None,
1527
"""Return the revision ids that source has that target does not.
1529
:param revision_ids: return revision ids included by these
1530
revision_ids. NoSuchRevision will be raised if any of these
1531
revisions are not present.
1532
:param if_present_ids: like revision_ids, but will not cause
1533
NoSuchRevision if any of these are absent, instead they will simply
1534
not be in the result. This is useful for e.g. finding revisions
1535
to fetch for tags, which may reference absent revisions.
1536
:param find_ghosts: If True find missing revisions in deep history
1537
rather than just finding the surface difference.
1538
:param limit: Maximum number of revisions to return, topologically
1540
:return: A breezy.graph.SearchResult.
1542
raise NotImplementedError(self.search_missing_revision_ids)
1545
def _same_model(source, target):
1546
"""True if source and target have the same data representation.
1548
Note: this is always called on the base class; overriding it in a
1549
subclass will have no effect.
1552
InterRepository._assert_same_model(source, target)
1554
except errors.IncompatibleRepositories as e:
1558
def _assert_same_model(source, target):
1559
"""Raise an exception if two repositories do not use the same model.
1561
if source.supports_rich_root() != target.supports_rich_root():
1562
raise errors.IncompatibleRepositories(source, target,
1563
"different rich-root support")
1564
if source._serializer != target._serializer:
1565
raise errors.IncompatibleRepositories(source, target,
1566
"different serializers")
1569
class CopyConverter(object):
1570
"""A repository conversion tool which just performs a copy of the content.
1572
This is slow but quite reliable.
1575
def __init__(self, target_format):
1576
"""Create a CopyConverter.
1578
:param target_format: The format the resulting repository should be.
1580
self.target_format = target_format
1582
def convert(self, repo, pb):
1583
"""Perform the conversion of to_convert, giving feedback via pb.
1585
:param to_convert: The disk object to convert.
1586
:param pb: a progress bar to use for progress information.
1588
with ui.ui_factory.nested_progress_bar() as pb:
1591
# this is only useful with metadir layouts - separated repo content.
1592
# trigger an assertion if not such
1593
repo._format.get_format_string()
1594
self.repo_dir = repo.controldir
1595
pb.update(gettext('Moving repository to repository.backup'))
1596
self.repo_dir.transport.move('repository', 'repository.backup')
1597
backup_transport = self.repo_dir.transport.clone(
1598
'repository.backup')
1599
repo._format.check_conversion_target(self.target_format)
1600
self.source_repo = repo._format.open(self.repo_dir,
1602
_override_transport=backup_transport)
1603
pb.update(gettext('Creating new repository'))
1604
converted = self.target_format.initialize(self.repo_dir,
1605
self.source_repo.is_shared())
1606
converted.lock_write()
1608
pb.update(gettext('Copying content'))
1609
self.source_repo.copy_content_into(converted)
1612
pb.update(gettext('Deleting old repository content'))
1613
self.repo_dir.transport.delete_tree('repository.backup')
1614
ui.ui_factory.note(gettext('repository converted'))
1617
def _strip_NULL_ghosts(revision_graph):
1618
"""Also don't use this. more compatibility code for unmigrated clients."""
1619
# Filter ghosts, and null:
1620
if _mod_revision.NULL_REVISION in revision_graph:
1621
del revision_graph[_mod_revision.NULL_REVISION]
1622
for key, parents in viewitems(revision_graph):
1623
revision_graph[key] = tuple(parent for parent in parents if parent
1625
return revision_graph
1628
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
1629
stop_revision=None):
1630
"""Extend the partial history to include a given index
1632
If a stop_index is supplied, stop when that index has been reached.
1633
If a stop_revision is supplied, stop when that revision is
1634
encountered. Otherwise, stop when the beginning of history is
1637
:param stop_index: The index which should be present. When it is
1638
present, history extension will stop.
1639
:param stop_revision: The revision id which should be present. When
1640
it is encountered, history extension will stop.
1642
start_revision = partial_history_cache[-1]
1643
graph = repo.get_graph()
1644
iterator = graph.iter_lefthand_ancestry(start_revision,
1645
(_mod_revision.NULL_REVISION,))
1647
# skip the last revision in the list
1650
if (stop_index is not None and
1651
len(partial_history_cache) > stop_index):
1653
if partial_history_cache[-1] == stop_revision:
1655
revision_id = next(iterator)
1656
partial_history_cache.append(revision_id)
1657
except StopIteration:
1662
class _LazyListJoin(object):
1663
"""An iterable yielding the contents of many lists as one list.
1665
Each iterator made from this will reflect the current contents of the lists
1666
at the time the iterator is made.
1668
This is used by Repository's _make_parents_provider implementation so that
1671
pp = repo._make_parents_provider() # uses a list of fallback repos
1672
pp.add_fallback_repository(other_repo) # appends to that list
1673
result = pp.get_parent_map(...)
1674
# The result will include revs from other_repo
1677
def __init__(self, *list_parts):
1678
self.list_parts = list_parts
1682
for list_part in self.list_parts:
1683
full_list.extend(list_part)
1684
return iter(full_list)
1687
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,