1
# Copyright (C) 2005-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from __future__ import absolute_import
19
from .lazy_import import lazy_import
20
lazy_import(globals(), """
30
revision as _mod_revision,
31
testament as _mod_testament,
34
from breezy.bundle import serializer
35
from breezy.i18n import gettext
43
from .decorators import only_raises
44
from .inter import InterObject
45
from .lock import _RelockDebugMixin, LogicalLockResult
51
log_exception_quietly, note, mutter, mutter_callsite, warning)
54
# Old formats display a warning, but only once
55
_deprecation_warning_done = False
58
class IsInWriteGroupError(errors.InternalBzrError):
60
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
62
def __init__(self, repo):
63
errors.InternalBzrError.__init__(self, repo=repo)
66
class CannotSetRevisionId(errors.BzrError):
68
_fmt = "Repository format does not support setting revision ids."
71
class CommitBuilder(object):
72
"""Provides an interface to build up a commit.
74
This allows describing a tree to be committed without needing to
75
know the internals of the format of the repository.
78
# all clients should supply tree roots.
79
record_root_entry = True
80
# whether this commit builder will automatically update the branch that is
82
updates_branch = False
84
def __init__(self, repository, parents, config_stack, timestamp=None,
85
timezone=None, committer=None, revprops=None,
86
revision_id=None, lossy=False):
87
"""Initiate a CommitBuilder.
89
:param repository: Repository to commit to.
90
:param parents: Revision ids of the parents of the new revision.
91
:param timestamp: Optional timestamp recorded for commit.
92
:param timezone: Optional timezone for timestamp.
93
:param committer: Optional committer to set for commit.
94
:param revprops: Optional dictionary of revision properties.
95
:param revision_id: Optional revision id.
96
:param lossy: Whether to discard data that can not be natively
97
represented, when pushing to a foreign VCS
99
self._config_stack = config_stack
102
if committer is None:
103
self._committer = self._config_stack.get('email')
104
elif not isinstance(committer, text_type):
105
self._committer = committer.decode() # throw if non-ascii
107
self._committer = committer
109
self.parents = parents
110
self.repository = repository
113
if revprops is not None:
114
self._validate_revprops(revprops)
115
self._revprops.update(revprops)
117
if timestamp is None:
118
timestamp = time.time()
119
# Restrict resolution to 1ms
120
self._timestamp = round(timestamp, 3)
123
self._timezone = osutils.local_time_offset()
125
self._timezone = int(timezone)
127
self._generate_revision_if_needed(revision_id)
129
def any_changes(self):
130
"""Return True if any entries were changed.
132
This includes merge-only changes. It is the core for the --unchanged
135
:return: True if any changes have occured.
137
raise NotImplementedError(self.any_changes)
139
def _validate_unicode_text(self, text, context):
140
"""Verify things like commit messages don't have bogus characters."""
141
# TODO(jelmer): Make this repository-format specific
143
raise ValueError('Invalid value for %s: %r' % (context, text))
145
def _validate_revprops(self, revprops):
146
for key, value in viewitems(revprops):
147
# We know that the XML serializers do not round trip '\r'
148
# correctly, so refuse to accept them
149
if not isinstance(value, (text_type, str)):
150
raise ValueError('revision property (%s) is not a valid'
151
' (unicode) string: %r' % (key, value))
152
# TODO(jelmer): Make this repository-format specific
153
self._validate_unicode_text(value,
154
'revision property (%s)' % (key,))
156
def commit(self, message):
157
"""Make the actual commit.
159
:return: The revision id of the recorded revision.
161
raise NotImplementedError(self.commit)
164
"""Abort the commit that is being built.
166
raise NotImplementedError(self.abort)
168
def revision_tree(self):
169
"""Return the tree that was just committed.
171
After calling commit() this can be called to get a
172
RevisionTree representing the newly committed tree. This is
173
preferred to calling Repository.revision_tree() because that may
174
require deserializing the inventory, while we already have a copy in
177
raise NotImplementedError(self.revision_tree)
179
def finish_inventory(self):
180
"""Tell the builder that the inventory is finished.
182
:return: The inventory id in the repository, which can be used with
183
repository.get_inventory.
185
raise NotImplementedError(self.finish_inventory)
187
def _gen_revision_id(self):
188
"""Return new revision-id."""
189
return generate_ids.gen_revision_id(self._committer, self._timestamp)
191
def _generate_revision_if_needed(self, revision_id):
192
"""Create a revision id if None was supplied.
194
If the repository can not support user-specified revision ids
195
they should override this function and raise CannotSetRevisionId
196
if _new_revision_id is not None.
198
:raises: CannotSetRevisionId
200
if not self.repository._format.supports_setting_revision_ids:
201
if revision_id is not None:
202
raise CannotSetRevisionId()
204
if revision_id is None:
205
self._new_revision_id = self._gen_revision_id()
206
self.random_revid = True
208
self._new_revision_id = revision_id
209
self.random_revid = False
211
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
212
"""Record a new tree via iter_changes.
214
:param tree: The tree to obtain text contents from for changed objects.
215
:param basis_revision_id: The revision id of the tree the iter_changes
216
has been generated against. Currently assumed to be the same
217
as self.parents[0] - if it is not, errors may occur.
218
:param iter_changes: An iter_changes iterator with the changes to apply
219
to basis_revision_id. The iterator must not include any items with
220
a current kind of None - missing items must be either filtered out
221
or errored-on beefore record_iter_changes sees the item.
222
:return: A generator of (relpath, fs_hash) tuples for use with
225
raise NotImplementedError(self.record_iter_changes)
228
class RepositoryWriteLockResult(LogicalLockResult):
229
"""The result of write locking a repository.
231
:ivar repository_token: The token obtained from the underlying lock, or
233
:ivar unlock: A callable which will unlock the lock.
236
def __init__(self, unlock, repository_token):
237
LogicalLockResult.__init__(self, unlock)
238
self.repository_token = repository_token
241
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
245
######################################################################
249
class Repository(controldir.ControlComponent, _RelockDebugMixin):
250
"""Repository holding history for one or more branches.
252
The repository holds and retrieves historical information including
253
revisions and file history. It's normally accessed only by the Branch,
254
which views a particular line of development through that history.
256
See VersionedFileRepository in breezy.vf_repository for the
257
base class for most Bazaar repositories.
260
def abort_write_group(self, suppress_errors=False):
261
"""Commit the contents accrued within the current write group.
263
:param suppress_errors: if true, abort_write_group will catch and log
264
unexpected errors that happen during the abort, rather than
265
allowing them to propagate. Defaults to False.
267
:seealso: start_write_group.
269
if self._write_group is not self.get_transaction():
270
# has an unlock or relock occured ?
273
'(suppressed) mismatched lock context and write group. %r, %r',
274
self._write_group, self.get_transaction())
276
raise errors.BzrError(
277
'mismatched lock context and write group. %r, %r' %
278
(self._write_group, self.get_transaction()))
280
self._abort_write_group()
281
except Exception as exc:
282
self._write_group = None
283
if not suppress_errors:
285
mutter('abort_write_group failed')
286
log_exception_quietly()
287
note(gettext('brz: ERROR (ignored): %s'), exc)
288
self._write_group = None
290
def _abort_write_group(self):
291
"""Template method for per-repository write group cleanup.
293
This is called during abort before the write group is considered to be
294
finished and should cleanup any internal state accrued during the write
295
group. There is no requirement that data handed to the repository be
296
*not* made available - this is not a rollback - but neither should any
297
attempt be made to ensure that data added is fully commited. Abort is
298
invoked when an error has occured so futher disk or network operations
299
may not be possible or may error and if possible should not be
303
def add_fallback_repository(self, repository):
304
"""Add a repository to use for looking up data not held locally.
306
:param repository: A repository.
308
raise NotImplementedError(self.add_fallback_repository)
310
def _check_fallback_repository(self, repository):
311
"""Check that this repository can fallback to repository safely.
313
Raise an error if not.
315
:param repository: A repository to fallback to.
317
return InterRepository._assert_same_model(self, repository)
319
def all_revision_ids(self):
320
"""Returns a list of all the revision ids in the repository.
322
This is conceptually deprecated because code should generally work on
323
the graph reachable from a particular revision, and ignore any other
324
revisions that might be present. There is no direct replacement
327
if 'evil' in debug.debug_flags:
328
mutter_callsite(2, "all_revision_ids is linear with history.")
329
return self._all_revision_ids()
331
def _all_revision_ids(self):
332
"""Returns a list of all the revision ids in the repository.
334
These are in as much topological order as the underlying store can
337
raise NotImplementedError(self._all_revision_ids)
339
def break_lock(self):
340
"""Break a lock if one is present from another instance.
342
Uses the ui factory to ask for confirmation if the lock may be from
345
self.control_files.break_lock()
348
def create(controldir):
349
"""Construct the current default format repository in controldir."""
350
return RepositoryFormat.get_default_format().initialize(controldir)
352
def __init__(self, _format, controldir, control_files):
353
"""instantiate a Repository.
355
:param _format: The format of the repository on disk.
356
:param controldir: The ControlDir of the repository.
357
:param control_files: Control files to use for locking, etc.
359
# In the future we will have a single api for all stores for
360
# getting file texts, inventories and revisions, then
361
# this construct will accept instances of those things.
362
super(Repository, self).__init__()
363
self._format = _format
364
# the following are part of the public API for Repository:
365
self.controldir = controldir
366
self.control_files = control_files
368
self._write_group = None
369
# Additional places to query for data.
370
self._fallback_repositories = []
373
def user_transport(self):
374
return self.controldir.user_transport
377
def control_transport(self):
378
return self._transport
381
if self._fallback_repositories:
382
return '%s(%r, fallback_repositories=%r)' % (
383
self.__class__.__name__,
385
self._fallback_repositories)
387
return '%s(%r)' % (self.__class__.__name__,
390
def _has_same_fallbacks(self, other_repo):
391
"""Returns true if the repositories have the same fallbacks."""
392
my_fb = self._fallback_repositories
393
other_fb = other_repo._fallback_repositories
394
if len(my_fb) != len(other_fb):
396
for f, g in zip(my_fb, other_fb):
397
if not f.has_same_location(g):
401
def has_same_location(self, other):
402
"""Returns a boolean indicating if this repository is at the same
403
location as another repository.
405
This might return False even when two repository objects are accessing
406
the same physical repository via different URLs.
408
if self.__class__ is not other.__class__:
410
return (self.control_url == other.control_url)
412
def is_in_write_group(self):
413
"""Return True if there is an open write group.
415
:seealso: start_write_group.
417
return self._write_group is not None
420
return self.control_files.is_locked()
422
def is_write_locked(self):
423
"""Return True if this object is write locked."""
424
return self.is_locked() and self.control_files._lock_mode == 'w'
426
def lock_write(self, token=None):
427
"""Lock this repository for writing.
429
This causes caching within the repository obejct to start accumlating
430
data during reads, and allows a 'write_group' to be obtained. Write
431
groups must be used for actual data insertion.
433
A token should be passed in if you know that you have locked the object
434
some other way, and need to synchronise this object's state with that
437
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
439
:param token: if this is already locked, then lock_write will fail
440
unless the token matches the existing lock.
441
:returns: a token if this instance supports tokens, otherwise None.
442
:raises TokenLockingNotSupported: when a token is given but this
443
instance doesn't support using token locks.
444
:raises MismatchedToken: if the specified token doesn't match the token
445
of the existing lock.
446
:seealso: start_write_group.
447
:return: A RepositoryWriteLockResult.
449
locked = self.is_locked()
450
token = self.control_files.lock_write(token=token)
452
self._warn_if_deprecated()
454
for repo in self._fallback_repositories:
455
# Writes don't affect fallback repos
458
return RepositoryWriteLockResult(self.unlock, token)
461
"""Lock the repository for read operations.
463
:return: An object with an unlock method which will release the lock
466
locked = self.is_locked()
467
self.control_files.lock_read()
469
self._warn_if_deprecated()
471
for repo in self._fallback_repositories:
474
return LogicalLockResult(self.unlock)
476
def get_physical_lock_status(self):
477
return self.control_files.get_physical_lock_status()
479
def leave_lock_in_place(self):
480
"""Tell this repository not to release the physical lock when this
483
If lock_write doesn't return a token, then this method is not supported.
485
self.control_files.leave_in_place()
487
def dont_leave_lock_in_place(self):
488
"""Tell this repository to release the physical lock when this
489
object is unlocked, even if it didn't originally acquire it.
491
If lock_write doesn't return a token, then this method is not supported.
493
self.control_files.dont_leave_in_place()
495
def gather_stats(self, revid=None, committers=None):
496
"""Gather statistics from a revision id.
498
:param revid: The revision id to gather statistics from, if None, then
499
no revision specific statistics are gathered.
500
:param committers: Optional parameter controlling whether to grab
501
a count of committers from the revision specific statistics.
502
:return: A dictionary of statistics. Currently this contains:
503
committers: The number of committers if requested.
504
firstrev: A tuple with timestamp, timezone for the penultimate left
505
most ancestor of revid, if revid is not the NULL_REVISION.
506
latestrev: A tuple with timestamp, timezone for revid, if revid is
507
not the NULL_REVISION.
508
revisions: The total revision count in the repository.
509
size: An estimate disk size of the repository in bytes.
511
with self.lock_read():
513
if revid and committers:
514
result['committers'] = 0
515
if revid and revid != _mod_revision.NULL_REVISION:
516
graph = self.get_graph()
518
all_committers = set()
519
revisions = [r for (r, p) in graph.iter_ancestry([revid])
520
if r != _mod_revision.NULL_REVISION]
523
# ignore the revisions in the middle - just grab first and last
524
revisions = revisions[0], revisions[-1]
525
for revision in self.get_revisions(revisions):
526
if not last_revision:
527
last_revision = revision
529
all_committers.add(revision.committer)
530
first_revision = revision
532
result['committers'] = len(all_committers)
533
result['firstrev'] = (first_revision.timestamp,
534
first_revision.timezone)
535
result['latestrev'] = (last_revision.timestamp,
536
last_revision.timezone)
539
def find_branches(self, using=False):
540
"""Find branches underneath this repository.
542
This will include branches inside other branches.
544
:param using: If True, list only branches using this repository.
546
if using and not self.is_shared():
547
return self.controldir.list_branches()
549
class Evaluator(object):
552
self.first_call = True
554
def __call__(self, controldir):
555
# On the first call, the parameter is always the controldir
556
# containing the current repo.
557
if not self.first_call:
559
repository = controldir.open_repository()
560
except errors.NoRepositoryPresent:
563
return False, ([], repository)
564
self.first_call = False
565
value = (controldir.list_branches(), None)
569
for branches, repository in controldir.ControlDir.find_controldirs(
570
self.user_transport, evaluate=Evaluator()):
571
if branches is not None:
573
if not using and repository is not None:
574
ret.extend(repository.find_branches())
577
def search_missing_revision_ids(self, other,
578
find_ghosts=True, revision_ids=None, if_present_ids=None,
580
"""Return the revision ids that other has that this does not.
582
These are returned in topological order.
584
revision_ids: only return revision ids included by revision_id.
586
with self.lock_read():
587
return InterRepository.get(other, self).search_missing_revision_ids(
588
find_ghosts=find_ghosts, revision_ids=revision_ids,
589
if_present_ids=if_present_ids, limit=limit)
593
"""Open the repository rooted at base.
595
For instance, if the repository is at URL/.bzr/repository,
596
Repository.open(URL) -> a Repository instance.
598
control = controldir.ControlDir.open(base)
599
return control.open_repository()
601
def copy_content_into(self, destination, revision_id=None):
602
"""Make a complete copy of the content in self into destination.
604
This is a destructive operation! Do not use it on existing
607
return InterRepository.get(self, destination).copy_content(revision_id)
609
def commit_write_group(self):
610
"""Commit the contents accrued within the current write group.
612
:seealso: start_write_group.
614
:return: it may return an opaque hint that can be passed to 'pack'.
616
if self._write_group is not self.get_transaction():
617
# has an unlock or relock occured ?
618
raise errors.BzrError('mismatched lock context %r and '
620
(self.get_transaction(), self._write_group))
621
result = self._commit_write_group()
622
self._write_group = None
625
def _commit_write_group(self):
626
"""Template method for per-repository write group cleanup.
628
This is called before the write group is considered to be
629
finished and should ensure that all data handed to the repository
630
for writing during the write group is safely committed (to the
631
extent possible considering file system caching etc).
634
def suspend_write_group(self):
635
"""Suspend a write group.
637
:raise UnsuspendableWriteGroup: If the write group can not be
639
:return: List of tokens
641
raise errors.UnsuspendableWriteGroup(self)
643
def refresh_data(self):
644
"""Re-read any data needed to synchronise with disk.
646
This method is intended to be called after another repository instance
647
(such as one used by a smart server) has inserted data into the
648
repository. On all repositories this will work outside of write groups.
649
Some repository formats (pack and newer for breezy native formats)
650
support refresh_data inside write groups. If called inside a write
651
group on a repository that does not support refreshing in a write group
652
IsInWriteGroupError will be raised.
656
def resume_write_group(self, tokens):
657
if not self.is_write_locked():
658
raise errors.NotWriteLocked(self)
659
if self._write_group:
660
raise errors.BzrError('already in a write group')
661
self._resume_write_group(tokens)
662
# so we can detect unlock/relock - the write group is now entered.
663
self._write_group = self.get_transaction()
665
def _resume_write_group(self, tokens):
666
raise errors.UnsuspendableWriteGroup(self)
668
def fetch(self, source, revision_id=None, find_ghosts=False):
669
"""Fetch the content required to construct revision_id from source.
671
If revision_id is None, then all content is copied.
673
fetch() may not be used when the repository is in a write group -
674
either finish the current write group before using fetch, or use
675
fetch before starting the write group.
677
:param find_ghosts: Find and copy revisions in the source that are
678
ghosts in the target (and not reachable directly by walking out to
679
the first-present revision in target from revision_id).
680
:param revision_id: If specified, all the content needed for this
681
revision ID will be copied to the target. Fetch will determine for
682
itself which content needs to be copied.
684
if self.is_in_write_group():
685
raise errors.InternalBzrError(
686
"May not fetch while in a write group.")
687
# fast path same-url fetch operations
688
# TODO: lift out to somewhere common with RemoteRepository
689
# <https://bugs.launchpad.net/bzr/+bug/401646>
690
if (self.has_same_location(source)
691
and self._has_same_fallbacks(source)):
692
# check that last_revision is in 'from' and then return a
694
if (revision_id is not None and
695
not _mod_revision.is_null(revision_id)):
696
self.get_revision(revision_id)
698
inter = InterRepository.get(source, self)
699
return inter.fetch(revision_id=revision_id, find_ghosts=find_ghosts)
701
def create_bundle(self, target, base, fileobj, format=None):
702
return serializer.write_bundle(self, target, base, fileobj, format)
704
def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
705
timezone=None, committer=None, revprops=None,
706
revision_id=None, lossy=False):
707
"""Obtain a CommitBuilder for this repository.
709
:param branch: Branch to commit to.
710
:param parents: Revision ids of the parents of the new revision.
711
:param config_stack: Configuration stack to use.
712
:param timestamp: Optional timestamp recorded for commit.
713
:param timezone: Optional timezone for timestamp.
714
:param committer: Optional committer to set for commit.
715
:param revprops: Optional dictionary of revision properties.
716
:param revision_id: Optional revision id.
717
:param lossy: Whether to discard data that can not be natively
718
represented, when pushing to a foreign VCS
720
raise NotImplementedError(self.get_commit_builder)
722
@only_raises(errors.LockNotHeld, errors.LockBroken)
724
if (self.control_files._lock_count == 1 and
725
self.control_files._lock_mode == 'w'):
726
if self._write_group is not None:
727
self.abort_write_group()
728
self.control_files.unlock()
729
raise errors.BzrError(
730
'Must end write groups before releasing write locks.')
731
self.control_files.unlock()
732
if self.control_files._lock_count == 0:
733
for repo in self._fallback_repositories:
736
def clone(self, controldir, revision_id=None):
737
"""Clone this repository into controldir using the current format.
739
Currently no check is made that the format of this repository and
740
the bzrdir format are compatible. FIXME RBC 20060201.
742
:return: The newly created destination repository.
744
with self.lock_read():
745
# TODO: deprecate after 0.16; cloning this with all its settings is
746
# probably not very useful -- mbp 20070423
747
dest_repo = self._create_sprouting_repo(
748
controldir, shared=self.is_shared())
749
self.copy_content_into(dest_repo, revision_id)
752
def start_write_group(self):
753
"""Start a write group in the repository.
755
Write groups are used by repositories which do not have a 1:1 mapping
756
between file ids and backend store to manage the insertion of data from
757
both fetch and commit operations.
759
A write lock is required around the
760
start_write_group/commit_write_group for the support of lock-requiring
763
One can only insert data into a repository inside a write group.
767
if not self.is_write_locked():
768
raise errors.NotWriteLocked(self)
769
if self._write_group:
770
raise errors.BzrError('already in a write group')
771
self._start_write_group()
772
# so we can detect unlock/relock - the write group is now entered.
773
self._write_group = self.get_transaction()
775
def _start_write_group(self):
776
"""Template method for per-repository write group startup.
778
This is called before the write group is considered to be
782
def sprout(self, to_bzrdir, revision_id=None):
783
"""Create a descendent repository for new development.
785
Unlike clone, this does not copy the settings of the repository.
787
with self.lock_read():
788
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
789
dest_repo.fetch(self, revision_id=revision_id)
792
def _create_sprouting_repo(self, a_controldir, shared):
794
a_controldir._format, self.controldir._format.__class__):
795
# use target default format.
796
dest_repo = a_controldir.create_repository()
798
# Most control formats need the repository to be specifically
799
# created, but on some old all-in-one formats it's not needed
801
dest_repo = self._format.initialize(
802
a_controldir, shared=shared)
803
except errors.UninitializableFormat:
804
dest_repo = a_controldir.open_repository()
807
def has_revision(self, revision_id):
808
"""True if this repository has a copy of the revision."""
809
with self.lock_read():
810
return revision_id in self.has_revisions((revision_id,))
812
def has_revisions(self, revision_ids):
813
"""Probe to find out the presence of multiple revisions.
815
:param revision_ids: An iterable of revision_ids.
816
:return: A set of the revision_ids that were present.
818
raise NotImplementedError(self.has_revisions)
820
def get_revision(self, revision_id):
821
"""Return the Revision object for a named revision."""
822
with self.lock_read():
823
return self.get_revisions([revision_id])[0]
825
def get_revision_reconcile(self, revision_id):
826
"""'reconcile' helper routine that allows access to a revision always.
828
This variant of get_revision does not cross check the weave graph
829
against the revision one as get_revision does: but it should only
830
be used by reconcile, or reconcile-alike commands that are correcting
831
or testing the revision graph.
833
raise NotImplementedError(self.get_revision_reconcile)
835
def get_revisions(self, revision_ids):
836
"""Get many revisions at once.
838
Repositories that need to check data on every revision read should
839
subclass this method.
842
for revid, rev in self.iter_revisions(revision_ids):
844
raise errors.NoSuchRevision(self, revid)
846
return [revs[revid] for revid in revision_ids]
848
def iter_revisions(self, revision_ids):
849
"""Iterate over revision objects.
851
:param revision_ids: An iterable of revisions to examine. None may be
852
passed to request all revisions known to the repository. Note that
853
not all repositories can find unreferenced revisions; for those
854
repositories only referenced ones will be returned.
855
:return: An iterator of (revid, revision) tuples. Absent revisions (
856
those asked for but not available) are returned as (revid, None).
857
N.B.: Revisions are not necessarily yielded in order.
859
raise NotImplementedError(self.iter_revisions)
861
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
862
"""Produce a generator of revision deltas.
864
Note that the input is a sequence of REVISIONS, not revision_ids.
865
Trees will be held in memory until the generator exits.
866
Each delta is relative to the revision's lefthand predecessor.
868
:param specific_fileids: if not None, the result is filtered
869
so that only those file-ids, their parents and their
870
children are included.
872
raise NotImplementedError(self.get_deltas_for_revisions)
874
def get_revision_delta(self, revision_id, specific_fileids=None):
875
"""Return the delta for one revision.
877
The delta is relative to the left-hand predecessor of the
880
:param specific_fileids: if not None, the result is filtered
881
so that only those file-ids, their parents and their
882
children are included.
884
with self.lock_read():
885
r = self.get_revision(revision_id)
886
return list(self.get_deltas_for_revisions(
887
[r], specific_fileids=specific_fileids))[0]
889
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
890
with self.lock_write():
891
signature = gpg_strategy.sign(plaintext, gpg.MODE_CLEAR)
892
self.add_signature_text(revision_id, signature)
894
def add_signature_text(self, revision_id, signature):
895
"""Store a signature text for a revision.
897
:param revision_id: Revision id of the revision
898
:param signature: Signature text.
900
raise NotImplementedError(self.add_signature_text)
902
def iter_files_bytes(self, desired_files):
903
"""Iterate through file versions.
905
Files will not necessarily be returned in the order they occur in
906
desired_files. No specific order is guaranteed.
908
Yields pairs of identifier, bytes_iterator. identifier is an opaque
909
value supplied by the caller as part of desired_files. It should
910
uniquely identify the file version in the caller's context. (Examples:
911
an index number or a TreeTransform trans_id.)
913
:param desired_files: a list of (file_id, revision_id, identifier)
916
raise NotImplementedError(self.iter_files_bytes)
918
def get_rev_id_for_revno(self, revno, known_pair):
919
"""Return the revision id of a revno, given a later (revno, revid)
920
pair in the same history.
922
:return: if found (True, revid). If the available history ran out
923
before reaching the revno, then this returns
924
(False, (closest_revno, closest_revid)).
926
known_revno, known_revid = known_pair
927
partial_history = [known_revid]
928
distance_from_known = known_revno - revno
929
if distance_from_known < 0:
931
'requested revno (%d) is later than given known revno (%d)'
932
% (revno, known_revno))
935
self, partial_history, stop_index=distance_from_known)
936
except errors.RevisionNotPresent as err:
937
if err.revision_id == known_revid:
938
# The start revision (known_revid) wasn't found.
940
# This is a stacked repository with no fallbacks, or a there's a
941
# left-hand ghost. Either way, even though the revision named in
942
# the error isn't in this repo, we know it's the next step in this
944
partial_history.append(err.revision_id)
945
if len(partial_history) <= distance_from_known:
946
# Didn't find enough history to get a revid for the revno.
947
earliest_revno = known_revno - len(partial_history) + 1
948
return (False, (earliest_revno, partial_history[-1]))
949
if len(partial_history) - 1 > distance_from_known:
950
raise AssertionError('_iter_for_revno returned too much history')
951
return (True, partial_history[-1])
954
"""Return True if this repository is flagged as a shared repository."""
955
raise NotImplementedError(self.is_shared)
957
def reconcile(self, other=None, thorough=False):
958
"""Reconcile this repository."""
959
raise NotImplementedError(self.reconcile)
961
def _refresh_data(self):
962
"""Helper called from lock_* to ensure coherency with disk.
964
The default implementation does nothing; it is however possible
965
for repositories to maintain loaded indices across multiple locks
966
by checking inside their implementation of this method to see
967
whether their indices are still valid. This depends of course on
968
the disk format being validatable in this manner. This method is
969
also called by the refresh_data() public interface to cause a refresh
970
to occur while in a write lock so that data inserted by a smart server
971
push operation is visible on the client's instance of the physical
975
def revision_tree(self, revision_id):
976
"""Return Tree for a revision on this branch.
978
`revision_id` may be NULL_REVISION for the empty tree revision.
980
raise NotImplementedError(self.revision_tree)
982
def revision_trees(self, revision_ids):
983
"""Return Trees for revisions in this repository.
985
:param revision_ids: a sequence of revision-ids;
986
a revision-id may not be None or b'null:'
988
raise NotImplementedError(self.revision_trees)
990
def pack(self, hint=None, clean_obsolete_packs=False):
991
"""Compress the data within the repository.
993
This operation only makes sense for some repository types. For other
994
types it should be a no-op that just returns.
996
This stub method does not require a lock, but subclasses should use
997
self.write_lock as this is a long running call it's reasonable to
998
implicitly lock for the user.
1000
:param hint: If not supplied, the whole repository is packed.
1001
If supplied, the repository may use the hint parameter as a
1002
hint for the parts of the repository to pack. A hint can be
1003
obtained from the result of commit_write_group(). Out of
1004
date hints are simply ignored, because concurrent operations
1005
can obsolete them rapidly.
1007
:param clean_obsolete_packs: Clean obsolete packs immediately after
1011
def get_transaction(self):
1012
return self.control_files.get_transaction()
1014
def get_parent_map(self, revision_ids):
1015
"""See graph.StackedParentsProvider.get_parent_map"""
1016
raise NotImplementedError(self.get_parent_map)
1018
def _get_parent_map_no_fallbacks(self, revision_ids):
1019
"""Same as Repository.get_parent_map except doesn't query fallbacks."""
1020
# revisions index works in keys; this just works in revisions
1021
# therefore wrap and unwrap
1024
for revision_id in revision_ids:
1025
if revision_id == _mod_revision.NULL_REVISION:
1026
result[revision_id] = ()
1027
elif revision_id is None:
1028
raise ValueError('get_parent_map(None) is not valid')
1030
query_keys.append((revision_id,))
1031
vf = self.revisions.without_fallbacks()
1032
for (revision_id,), parent_keys in viewitems(
1033
vf.get_parent_map(query_keys)):
1035
result[revision_id] = tuple([parent_revid
1036
for (parent_revid,) in parent_keys])
1038
result[revision_id] = (_mod_revision.NULL_REVISION,)
1041
def _make_parents_provider(self):
1042
if not self._format.supports_external_lookups:
1044
return graph.StackedParentsProvider(_LazyListJoin(
1045
[self._make_parents_provider_unstacked()],
1046
self._fallback_repositories))
1048
def _make_parents_provider_unstacked(self):
1049
return graph.CallableToParentsProviderAdapter(
1050
self._get_parent_map_no_fallbacks)
1052
def get_known_graph_ancestry(self, revision_ids):
1053
"""Return the known graph for a set of revision ids and their ancestors.
1055
raise NotImplementedError(self.get_known_graph_ancestry)
1057
def get_file_graph(self):
1058
"""Return the graph walker for files."""
1059
raise NotImplementedError(self.get_file_graph)
1061
def get_graph(self, other_repository=None):
1062
"""Return the graph walker for this repository format"""
1063
parents_provider = self._make_parents_provider()
1064
if (other_repository is not None and
1065
not self.has_same_location(other_repository)):
1066
parents_provider = graph.StackedParentsProvider(
1067
[parents_provider, other_repository._make_parents_provider()])
1068
return graph.Graph(parents_provider)
1070
def set_make_working_trees(self, new_value):
1071
"""Set the policy flag for making working trees when creating branches.
1073
This only applies to branches that use this repository.
1075
The default is 'True'.
1076
:param new_value: True to restore the default, False to disable making
1079
raise NotImplementedError(self.set_make_working_trees)
1081
def make_working_trees(self):
1082
"""Returns the policy for making working trees on new branches."""
1083
raise NotImplementedError(self.make_working_trees)
1085
def sign_revision(self, revision_id, gpg_strategy):
1086
with self.lock_write():
1087
testament = _mod_testament.Testament.from_revision(
1089
plaintext = testament.as_short_text()
1090
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1092
def verify_revision_signature(self, revision_id, gpg_strategy):
1093
"""Verify the signature on a revision.
1095
:param revision_id: the revision to verify
1096
:gpg_strategy: the GPGStrategy object to used
1098
:return: gpg.SIGNATURE_VALID or a failed SIGNATURE_ value
1100
with self.lock_read():
1101
if not self.has_signature_for_revision_id(revision_id):
1102
return gpg.SIGNATURE_NOT_SIGNED, None
1103
signature = self.get_signature_text(revision_id)
1105
testament = _mod_testament.Testament.from_revision(
1108
(status, key, signed_plaintext) = gpg_strategy.verify(signature)
1109
if testament.as_short_text() != signed_plaintext:
1110
return gpg.SIGNATURE_NOT_VALID, None
1111
return (status, key)
1113
def verify_revision_signatures(self, revision_ids, gpg_strategy):
1114
"""Verify revision signatures for a number of revisions.
1116
:param revision_id: the revision to verify
1117
:gpg_strategy: the GPGStrategy object to used
1118
:return: Iterator over tuples with revision id, result and keys
1120
with self.lock_read():
1121
for revid in revision_ids:
1122
(result, key) = self.verify_revision_signature(revid, gpg_strategy)
1123
yield revid, result, key
1125
def has_signature_for_revision_id(self, revision_id):
1126
"""Query for a revision signature for revision_id in the repository."""
1127
raise NotImplementedError(self.has_signature_for_revision_id)
1129
def get_signature_text(self, revision_id):
1130
"""Return the text for a signature."""
1131
raise NotImplementedError(self.get_signature_text)
1133
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
1134
"""Check consistency of all history of given revision_ids.
1136
Different repository implementations should override _check().
1138
:param revision_ids: A non-empty list of revision_ids whose ancestry
1139
will be checked. Typically the last revision_id of a branch.
1140
:param callback_refs: A dict of check-refs to resolve and callback
1141
the check/_check method on the items listed as wanting the ref.
1143
:param check_repo: If False do not check the repository contents, just
1144
calculate the data callback_refs requires and call them back.
1146
return self._check(revision_ids=revision_ids, callback_refs=callback_refs,
1147
check_repo=check_repo)
1149
def _check(self, revision_ids=None, callback_refs=None, check_repo=True):
1150
raise NotImplementedError(self.check)
1152
def _warn_if_deprecated(self, branch=None):
1153
if not self._format.is_deprecated():
1155
global _deprecation_warning_done
1156
if _deprecation_warning_done:
1160
conf = config.GlobalStack()
1162
conf = branch.get_config_stack()
1163
if 'format_deprecation' in conf.get('suppress_warnings'):
1165
warning("Format %s for %s is deprecated -"
1166
" please use 'brz upgrade' to get better performance"
1167
% (self._format, self.controldir.transport.base))
1169
_deprecation_warning_done = True
1171
def supports_rich_root(self):
1172
return self._format.rich_root_data
1174
def _check_ascii_revisionid(self, revision_id, method):
1175
"""Private helper for ascii-only repositories."""
1176
# weave repositories refuse to store revisionids that are non-ascii.
1177
if revision_id is not None:
1178
# weaves require ascii revision ids.
1179
if isinstance(revision_id, text_type):
1181
revision_id.encode('ascii')
1182
except UnicodeEncodeError:
1183
raise errors.NonAsciiRevisionId(method, self)
1186
revision_id.decode('ascii')
1187
except UnicodeDecodeError:
1188
raise errors.NonAsciiRevisionId(method, self)
1191
class RepositoryFormatRegistry(controldir.ControlComponentFormatRegistry):
1192
"""Repository format registry."""
1194
def get_default(self):
1195
"""Return the current default format."""
1196
return controldir.format_registry.make_controldir('default').repository_format
1199
network_format_registry = registry.FormatRegistry()
1200
"""Registry of formats indexed by their network name.
1202
The network name for a repository format is an identifier that can be used when
1203
referring to formats with smart server operations. See
1204
RepositoryFormat.network_name() for more detail.
1208
format_registry = RepositoryFormatRegistry(network_format_registry)
1209
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
1211
This can contain either format instances themselves, or classes/factories that
1212
can be called to obtain one.
1216
#####################################################################
1217
# Repository Formats
1219
class RepositoryFormat(controldir.ControlComponentFormat):
1220
"""A repository format.
1222
Formats provide four things:
1223
* An initialization routine to construct repository data on disk.
1224
* a optional format string which is used when the BzrDir supports
1226
* an open routine which returns a Repository instance.
1227
* A network name for referring to the format in smart server RPC
1230
There is one and only one Format subclass for each on-disk format. But
1231
there can be one Repository subclass that is used for several different
1232
formats. The _format attribute on a Repository instance can be used to
1233
determine the disk format.
1235
Formats are placed in a registry by their format string for reference
1236
during opening. These should be subclasses of RepositoryFormat for
1239
Once a format is deprecated, just deprecate the initialize and open
1240
methods on the format class. Do not deprecate the object, as the
1241
object may be created even when a repository instance hasn't been
1244
Common instance attributes:
1245
_matchingcontroldir - the controldir format that the repository format was
1246
originally written to work with. This can be used if manually
1247
constructing a bzrdir and repository, or more commonly for test suite
1251
# Set to True or False in derived classes. True indicates that the format
1252
# supports ghosts gracefully.
1253
supports_ghosts = None
1254
# Can this repository be given external locations to lookup additional
1255
# data. Set to True or False in derived classes.
1256
supports_external_lookups = None
1257
# Does this format support CHK bytestring lookups. Set to True or False in
1259
supports_chks = None
1260
# Should fetch trigger a reconcile after the fetch? Only needed for
1261
# some repository formats that can suffer internal inconsistencies.
1262
_fetch_reconcile = False
1263
# Does this format have < O(tree_size) delta generation. Used to hint what
1264
# code path for commit, amongst other things.
1266
# Does doing a pack operation compress data? Useful for the pack UI command
1267
# (so if there is one pack, the operation can still proceed because it may
1268
# help), and for fetching when data won't have come from the same
1270
pack_compresses = False
1271
# Does the repository storage understand references to trees?
1272
supports_tree_reference = None
1273
# Is the format experimental ?
1274
experimental = False
1275
# Does this repository format escape funky characters, or does it create
1276
# files with similar names as the versioned files in its contents on disk
1278
supports_funky_characters = None
1279
# Does this repository format support leaving locks?
1280
supports_leaving_lock = None
1281
# Does this format support the full VersionedFiles interface?
1282
supports_full_versioned_files = None
1283
# Does this format support signing revision signatures?
1284
supports_revision_signatures = True
1285
# Can the revision graph have incorrect parents?
1286
revision_graph_can_have_wrong_parents = None
1287
# Does this format support setting revision ids?
1288
supports_setting_revision_ids = True
1289
# Does this format support rich root data?
1290
rich_root_data = None
1291
# Does this format support explicitly versioned directories?
1292
supports_versioned_directories = None
1293
# Can other repositories be nested into one of this format?
1294
supports_nesting_repositories = None
1295
# Is it possible for revisions to be present without being referenced
1297
supports_unreferenced_revisions = None
1298
# Does this format store the current Branch.nick in a revision when
1300
supports_storing_branch_nick = True
1301
# Does the format support overriding the transport to use
1302
supports_overriding_transport = True
1303
# Does the format support setting custom revision properties?
1304
supports_custom_revision_properties = True
1305
# Does the format record per-file revision metadata?
1306
records_per_file_revision = True
1309
return "%s()" % self.__class__.__name__
1311
def __eq__(self, other):
1312
# format objects are generally stateless
1313
return isinstance(other, self.__class__)
1315
def __ne__(self, other):
1316
return not self == other
1318
def get_format_description(self):
1319
"""Return the short description for this format."""
1320
raise NotImplementedError(self.get_format_description)
1322
def initialize(self, controldir, shared=False):
1323
"""Initialize a repository of this format in controldir.
1325
:param controldir: The controldir to put the new repository in it.
1326
:param shared: The repository should be initialized as a sharable one.
1327
:returns: The new repository object.
1329
This may raise UninitializableFormat if shared repository are not
1330
compatible the controldir.
1332
raise NotImplementedError(self.initialize)
1334
def is_supported(self):
1335
"""Is this format supported?
1337
Supported formats must be initializable and openable.
1338
Unsupported formats may not support initialization or committing or
1339
some other features depending on the reason for not being supported.
1343
def is_deprecated(self):
1344
"""Is this format deprecated?
1346
Deprecated formats may trigger a user-visible warning recommending
1347
the user to upgrade. They are still fully supported.
1351
def network_name(self):
1352
"""A simple byte string uniquely identifying this format for RPC calls.
1354
MetaDir repository formats use their disk format string to identify the
1355
repository over the wire. All in one formats such as bzr < 0.8, and
1356
foreign formats like svn/git and hg should use some marker which is
1357
unique and immutable.
1359
raise NotImplementedError(self.network_name)
1361
def check_conversion_target(self, target_format):
1362
if self.rich_root_data and not target_format.rich_root_data:
1363
raise errors.BadConversionTarget(
1364
'Does not support rich root data.', target_format,
1366
if (self.supports_tree_reference
1367
and not getattr(target_format, 'supports_tree_reference', False)):
1368
raise errors.BadConversionTarget(
1369
'Does not support nested trees', target_format,
1372
def open(self, controldir, _found=False):
1373
"""Return an instance of this format for a controldir.
1375
_found is a private parameter, do not use it.
1377
raise NotImplementedError(self.open)
1379
def _run_post_repo_init_hooks(self, repository, controldir, shared):
1380
from .controldir import ControlDir, RepoInitHookParams
1381
hooks = ControlDir.hooks['post_repo_init']
1384
params = RepoInitHookParams(repository, self, controldir, shared)
1389
# formats which have no format string are not discoverable or independently
1390
# creatable on disk, so are not registered in format_registry. They're
1391
# all in breezy.bzr.knitreponow. When an instance of one of these is
1392
# needed, it's constructed directly by the ControlDir. Non-native formats where
1393
# the repository is not separately opened are similar.
1395
format_registry.register_lazy(
1396
b'Bazaar-NG Knit Repository Format 1',
1397
'breezy.bzr.knitrepo',
1398
'RepositoryFormatKnit1',
1401
format_registry.register_lazy(
1402
b'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
1403
'breezy.bzr.knitrepo',
1404
'RepositoryFormatKnit3',
1407
format_registry.register_lazy(
1408
b'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
1409
'breezy.bzr.knitrepo',
1410
'RepositoryFormatKnit4',
1413
# Pack-based formats. There is one format for pre-subtrees, and one for
1414
# post-subtrees to allow ease of testing.
1415
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
1416
format_registry.register_lazy(
1417
b'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1418
'breezy.bzr.knitpack_repo',
1419
'RepositoryFormatKnitPack1',
1421
format_registry.register_lazy(
1422
b'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1423
'breezy.bzr.knitpack_repo',
1424
'RepositoryFormatKnitPack3',
1426
format_registry.register_lazy(
1427
b'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
1428
'breezy.bzr.knitpack_repo',
1429
'RepositoryFormatKnitPack4',
1431
format_registry.register_lazy(
1432
b'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
1433
'breezy.bzr.knitpack_repo',
1434
'RepositoryFormatKnitPack5',
1436
format_registry.register_lazy(
1437
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
1438
'breezy.bzr.knitpack_repo',
1439
'RepositoryFormatKnitPack5RichRoot',
1441
format_registry.register_lazy(
1442
b'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
1443
'breezy.bzr.knitpack_repo',
1444
'RepositoryFormatKnitPack5RichRootBroken',
1446
format_registry.register_lazy(
1447
b'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
1448
'breezy.bzr.knitpack_repo',
1449
'RepositoryFormatKnitPack6',
1451
format_registry.register_lazy(
1452
b'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
1453
'breezy.bzr.knitpack_repo',
1454
'RepositoryFormatKnitPack6RichRoot',
1456
format_registry.register_lazy(
1457
b'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1458
'breezy.bzr.groupcompress_repo',
1459
'RepositoryFormat2a',
1462
# Development formats.
1463
# Check their docstrings to see if/when they are obsolete.
1464
format_registry.register_lazy(
1465
(b"Bazaar development format 2 with subtree support "
1466
b"(needs bzr.dev from before 1.8)\n"),
1467
'breezy.bzr.knitpack_repo',
1468
'RepositoryFormatPackDevelopment2Subtree',
1470
format_registry.register_lazy(
1471
b'Bazaar development format 8\n',
1472
'breezy.bzr.groupcompress_repo',
1473
'RepositoryFormat2aSubtree',
1477
class InterRepository(InterObject):
1478
"""This class represents operations taking place between two repositories.
1480
Its instances have methods like copy_content and fetch, and contain
1481
references to the source and target repositories these operations can be
1484
Often we will provide convenience methods on 'repository' which carry out
1485
operations with another repository - they will always forward to
1486
InterRepository.get(other).method_name(parameters).
1490
"""The available optimised InterRepository types."""
1492
def copy_content(self, revision_id=None):
1493
"""Make a complete copy of the content in self into destination.
1495
This is a destructive operation! Do not use it on existing
1498
:param revision_id: Only copy the content needed to construct
1499
revision_id and its parents.
1501
with self.lock_write():
1503
self.target.set_make_working_trees(
1504
self.source.make_working_trees())
1505
except NotImplementedError:
1507
self.target.fetch(self.source, revision_id=revision_id)
1509
def fetch(self, revision_id=None, find_ghosts=False):
1510
"""Fetch the content required to construct revision_id.
1512
The content is copied from self.source to self.target.
1514
:param revision_id: if None all content is copied, if NULL_REVISION no
1518
raise NotImplementedError(self.fetch)
1520
def search_missing_revision_ids(
1521
self, find_ghosts=True, revision_ids=None, if_present_ids=None,
1523
"""Return the revision ids that source has that target does not.
1525
:param revision_ids: return revision ids included by these
1526
revision_ids. NoSuchRevision will be raised if any of these
1527
revisions are not present.
1528
:param if_present_ids: like revision_ids, but will not cause
1529
NoSuchRevision if any of these are absent, instead they will simply
1530
not be in the result. This is useful for e.g. finding revisions
1531
to fetch for tags, which may reference absent revisions.
1532
:param find_ghosts: If True find missing revisions in deep history
1533
rather than just finding the surface difference.
1534
:param limit: Maximum number of revisions to return, topologically
1536
:return: A breezy.graph.SearchResult.
1538
raise NotImplementedError(self.search_missing_revision_ids)
1541
def _same_model(source, target):
1542
"""True if source and target have the same data representation.
1544
Note: this is always called on the base class; overriding it in a
1545
subclass will have no effect.
1548
InterRepository._assert_same_model(source, target)
1550
except errors.IncompatibleRepositories as e:
1554
def _assert_same_model(source, target):
1555
"""Raise an exception if two repositories do not use the same model.
1557
if source.supports_rich_root() != target.supports_rich_root():
1558
raise errors.IncompatibleRepositories(source, target,
1559
"different rich-root support")
1560
if source._serializer != target._serializer:
1561
raise errors.IncompatibleRepositories(source, target,
1562
"different serializers")
1565
class CopyConverter(object):
1566
"""A repository conversion tool which just performs a copy of the content.
1568
This is slow but quite reliable.
1571
def __init__(self, target_format):
1572
"""Create a CopyConverter.
1574
:param target_format: The format the resulting repository should be.
1576
self.target_format = target_format
1578
def convert(self, repo, pb):
1579
"""Perform the conversion of to_convert, giving feedback via pb.
1581
:param to_convert: The disk object to convert.
1582
:param pb: a progress bar to use for progress information.
1584
with ui.ui_factory.nested_progress_bar() as pb:
1587
# this is only useful with metadir layouts - separated repo content.
1588
# trigger an assertion if not such
1589
repo._format.get_format_string()
1590
self.repo_dir = repo.controldir
1591
pb.update(gettext('Moving repository to repository.backup'))
1592
self.repo_dir.transport.move('repository', 'repository.backup')
1593
backup_transport = self.repo_dir.transport.clone(
1594
'repository.backup')
1595
repo._format.check_conversion_target(self.target_format)
1596
self.source_repo = repo._format.open(self.repo_dir,
1598
_override_transport=backup_transport)
1599
pb.update(gettext('Creating new repository'))
1600
converted = self.target_format.initialize(self.repo_dir,
1601
self.source_repo.is_shared())
1602
converted.lock_write()
1604
pb.update(gettext('Copying content'))
1605
self.source_repo.copy_content_into(converted)
1608
pb.update(gettext('Deleting old repository content'))
1609
self.repo_dir.transport.delete_tree('repository.backup')
1610
ui.ui_factory.note(gettext('repository converted'))
1613
def _strip_NULL_ghosts(revision_graph):
1614
"""Also don't use this. more compatibility code for unmigrated clients."""
1615
# Filter ghosts, and null:
1616
if _mod_revision.NULL_REVISION in revision_graph:
1617
del revision_graph[_mod_revision.NULL_REVISION]
1618
for key, parents in viewitems(revision_graph):
1619
revision_graph[key] = tuple(parent for parent in parents if parent
1621
return revision_graph
1624
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
1625
stop_revision=None):
1626
"""Extend the partial history to include a given index
1628
If a stop_index is supplied, stop when that index has been reached.
1629
If a stop_revision is supplied, stop when that revision is
1630
encountered. Otherwise, stop when the beginning of history is
1633
:param stop_index: The index which should be present. When it is
1634
present, history extension will stop.
1635
:param stop_revision: The revision id which should be present. When
1636
it is encountered, history extension will stop.
1638
start_revision = partial_history_cache[-1]
1639
graph = repo.get_graph()
1640
iterator = graph.iter_lefthand_ancestry(start_revision,
1641
(_mod_revision.NULL_REVISION,))
1643
# skip the last revision in the list
1646
if (stop_index is not None and
1647
len(partial_history_cache) > stop_index):
1649
if partial_history_cache[-1] == stop_revision:
1651
revision_id = next(iterator)
1652
partial_history_cache.append(revision_id)
1653
except StopIteration:
1658
class _LazyListJoin(object):
1659
"""An iterable yielding the contents of many lists as one list.
1661
Each iterator made from this will reflect the current contents of the lists
1662
at the time the iterator is made.
1664
This is used by Repository's _make_parents_provider implementation so that
1667
pp = repo._make_parents_provider() # uses a list of fallback repos
1668
pp.add_fallback_repository(other_repo) # appends to that list
1669
result = pp.get_parent_map(...)
1670
# The result will include revs from other_repo
1673
def __init__(self, *list_parts):
1674
self.list_parts = list_parts
1678
for list_part in self.list_parts:
1679
full_list.extend(list_part)
1680
return iter(full_list)
1683
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,