160
176
self._validate_unicode_text(value,
161
177
'revision property (%s)' % (key,))
179
def _ensure_fallback_inventories(self):
180
"""Ensure that appropriate inventories are available.
182
This only applies to repositories that are stacked, and is about
183
enusring the stacking invariants. Namely, that for any revision that is
184
present, we either have all of the file content, or we have the parent
185
inventory and the delta file content.
187
if not self.repository._fallback_repositories:
189
if not self.repository._format.supports_chks:
190
raise errors.BzrError("Cannot commit directly to a stacked branch"
191
" in pre-2a formats. See "
192
"https://bugs.launchpad.net/bzr/+bug/375013 for details.")
193
# This is a stacked repo, we need to make sure we have the parent
194
# inventories for the parents.
195
parent_keys = [(p,) for p in self.parents]
196
parent_map = self.repository.inventories._index.get_parent_map(parent_keys)
197
missing_parent_keys = set([pk for pk in parent_keys
198
if pk not in parent_map])
199
fallback_repos = list(reversed(self.repository._fallback_repositories))
200
missing_keys = [('inventories', pk[0])
201
for pk in missing_parent_keys]
203
while missing_keys and fallback_repos:
204
fallback_repo = fallback_repos.pop()
205
source = fallback_repo._get_source(self.repository._format)
206
sink = self.repository._get_sink()
207
stream = source.get_stream_for_missing_keys(missing_keys)
208
missing_keys = sink.insert_stream_without_locking(stream,
209
self.repository._format)
211
raise errors.BzrError('Unable to fill in parent inventories for a'
163
214
def commit(self, message):
164
215
"""Make the actual commit.
3237
3270
return self.get_format_string()
3240
# Pre-0.8 formats that don't have a disk format string (because they are
3241
# versioned by the matching control directory). We use the control directories
3242
# disk format string as a key for the network_name because they meet the
3243
# constraints (simple string, unique, immutable).
3244
network_format_registry.register_lazy(
3245
"Bazaar-NG branch, format 5\n",
3246
'bzrlib.repofmt.weaverepo',
3247
'RepositoryFormat5',
3249
network_format_registry.register_lazy(
3250
"Bazaar-NG branch, format 6\n",
3251
'bzrlib.repofmt.weaverepo',
3252
'RepositoryFormat6',
3255
3273
# formats which have no format string are not discoverable or independently
3256
3274
# creatable on disk, so are not registered in format_registry. They're
3257
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
3275
# all in bzrlib.repofmt.knitreponow. When an instance of one of these is
3258
3276
# needed, it's constructed directly by the BzrDir. Non-native formats where
3259
3277
# the repository is not separately opened are similar.
3261
3279
format_registry.register_lazy(
3262
'Bazaar-NG Repository format 7',
3263
'bzrlib.repofmt.weaverepo',
3267
format_registry.register_lazy(
3268
3280
'Bazaar-NG Knit Repository Format 1',
3269
3281
'bzrlib.repofmt.knitrepo',
3270
3282
'RepositoryFormatKnit1',
3325
3337
'bzrlib.repofmt.pack_repo',
3326
3338
'RepositoryFormatKnitPack6RichRoot',
3340
format_registry.register_lazy(
3341
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3342
'bzrlib.repofmt.groupcompress_repo',
3343
'RepositoryFormat2a',
3329
3346
# Development formats.
3330
# Obsolete but kept pending a CHK based subtree format.
3347
# Check their docstrings to see if/when they are obsolete.
3331
3348
format_registry.register_lazy(
3332
3349
("Bazaar development format 2 with subtree support "
3333
3350
"(needs bzr.dev from before 1.8)\n"),
3334
3351
'bzrlib.repofmt.pack_repo',
3335
3352
'RepositoryFormatPackDevelopment2Subtree',
3338
# 1.14->1.16 go below here
3339
format_registry.register_lazy(
3340
'Bazaar development format - group compression and chk inventory'
3341
' (needs bzr.dev from 1.14)\n',
3342
'bzrlib.repofmt.groupcompress_repo',
3343
'RepositoryFormatCHK1',
3346
format_registry.register_lazy(
3347
'Bazaar development format - chk repository with bencode revision '
3348
'serialization (needs bzr.dev from 1.16)\n',
3349
'bzrlib.repofmt.groupcompress_repo',
3350
'RepositoryFormatCHK2',
3352
format_registry.register_lazy(
3353
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3354
'bzrlib.repofmt.groupcompress_repo',
3355
'RepositoryFormat2a',
3354
format_registry.register_lazy(
3355
'Bazaar development format 8\n',
3356
'bzrlib.repofmt.groupcompress_repo',
3357
'RepositoryFormat2aSubtree',
3536
3538
return InterRepository._same_model(source, target)
3539
class InterWeaveRepo(InterSameDataRepository):
3540
"""Optimised code paths between Weave based repositories.
3542
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3543
implemented lazy inter-object optimisation.
3547
def _get_repo_format_to_test(self):
3548
from bzrlib.repofmt import weaverepo
3549
return weaverepo.RepositoryFormat7()
3552
def is_compatible(source, target):
3553
"""Be compatible with known Weave formats.
3555
We don't test for the stores being of specific types because that
3556
could lead to confusing results, and there is no need to be
3559
from bzrlib.repofmt.weaverepo import (
3565
return (isinstance(source._format, (RepositoryFormat5,
3567
RepositoryFormat7)) and
3568
isinstance(target._format, (RepositoryFormat5,
3570
RepositoryFormat7)))
3571
except AttributeError:
3575
def copy_content(self, revision_id=None):
3576
"""See InterRepository.copy_content()."""
3577
# weave specific optimised path:
3579
self.target.set_make_working_trees(self.source.make_working_trees())
3580
except (errors.RepositoryUpgradeRequired, NotImplemented):
3582
# FIXME do not peek!
3583
if self.source._transport.listable():
3584
pb = ui.ui_factory.nested_progress_bar()
3586
self.target.texts.insert_record_stream(
3587
self.source.texts.get_record_stream(
3588
self.source.texts.keys(), 'topological', False))
3589
pb.update('Copying inventory', 0, 1)
3590
self.target.inventories.insert_record_stream(
3591
self.source.inventories.get_record_stream(
3592
self.source.inventories.keys(), 'topological', False))
3593
self.target.signatures.insert_record_stream(
3594
self.source.signatures.get_record_stream(
3595
self.source.signatures.keys(),
3597
self.target.revisions.insert_record_stream(
3598
self.source.revisions.get_record_stream(
3599
self.source.revisions.keys(),
3600
'topological', True))
3604
self.target.fetch(self.source, revision_id=revision_id)
3607
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3608
"""See InterRepository.missing_revision_ids()."""
3609
# we want all revisions to satisfy revision_id in source.
3610
# but we don't want to stat every file here and there.
3611
# we want then, all revisions other needs to satisfy revision_id
3612
# checked, but not those that we have locally.
3613
# so the first thing is to get a subset of the revisions to
3614
# satisfy revision_id in source, and then eliminate those that
3615
# we do already have.
3616
# this is slow on high latency connection to self, but as this
3617
# disk format scales terribly for push anyway due to rewriting
3618
# inventory.weave, this is considered acceptable.
3620
if revision_id is not None:
3621
source_ids = self.source.get_ancestry(revision_id)
3622
if source_ids[0] is not None:
3623
raise AssertionError()
3626
source_ids = self.source._all_possible_ids()
3627
source_ids_set = set(source_ids)
3628
# source_ids is the worst possible case we may need to pull.
3629
# now we want to filter source_ids against what we actually
3630
# have in target, but don't try to check for existence where we know
3631
# we do not have a revision as that would be pointless.
3632
target_ids = set(self.target._all_possible_ids())
3633
possibly_present_revisions = target_ids.intersection(source_ids_set)
3634
actually_present_revisions = set(
3635
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3636
required_revisions = source_ids_set.difference(actually_present_revisions)
3637
if revision_id is not None:
3638
# we used get_ancestry to determine source_ids then we are assured all
3639
# revisions referenced are present as they are installed in topological order.
3640
# and the tip revision was validated by get_ancestry.
3641
result_set = required_revisions
3643
# if we just grabbed the possibly available ids, then
3644
# we only have an estimate of whats available and need to validate
3645
# that against the revision records.
3647
self.source._eliminate_revisions_not_present(required_revisions))
3648
return self.source.revision_ids_to_search_result(result_set)
3651
class InterKnitRepo(InterSameDataRepository):
3652
"""Optimised code paths between Knit based repositories."""
3655
def _get_repo_format_to_test(self):
3656
from bzrlib.repofmt import knitrepo
3657
return knitrepo.RepositoryFormatKnit1()
3660
def is_compatible(source, target):
3661
"""Be compatible with known Knit formats.
3663
We don't test for the stores being of specific types because that
3664
could lead to confusing results, and there is no need to be
3667
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3669
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3670
isinstance(target._format, RepositoryFormatKnit))
3671
except AttributeError:
3673
return are_knits and InterRepository._same_model(source, target)
3676
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3677
"""See InterRepository.missing_revision_ids()."""
3678
if revision_id is not None:
3679
source_ids = self.source.get_ancestry(revision_id)
3680
if source_ids[0] is not None:
3681
raise AssertionError()
3684
source_ids = self.source.all_revision_ids()
3685
source_ids_set = set(source_ids)
3686
# source_ids is the worst possible case we may need to pull.
3687
# now we want to filter source_ids against what we actually
3688
# have in target, but don't try to check for existence where we know
3689
# we do not have a revision as that would be pointless.
3690
target_ids = set(self.target.all_revision_ids())
3691
possibly_present_revisions = target_ids.intersection(source_ids_set)
3692
actually_present_revisions = set(
3693
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3694
required_revisions = source_ids_set.difference(actually_present_revisions)
3695
if revision_id is not None:
3696
# we used get_ancestry to determine source_ids then we are assured all
3697
# revisions referenced are present as they are installed in topological order.
3698
# and the tip revision was validated by get_ancestry.
3699
result_set = required_revisions
3701
# if we just grabbed the possibly available ids, then
3702
# we only have an estimate of whats available and need to validate
3703
# that against the revision records.
3705
self.source._eliminate_revisions_not_present(required_revisions))
3706
return self.source.revision_ids_to_search_result(result_set)
3709
3541
class InterDifferingSerializer(InterRepository):
4249
4072
is_resume = False
4251
4074
# locked_insert_stream performs a commit|suspend.
4252
return self._locked_insert_stream(stream, src_format, is_resume)
4075
missing_keys = self.insert_stream_without_locking(stream,
4076
src_format, is_resume)
4078
# suspend the write group and tell the caller what we is
4079
# missing. We know we can suspend or else we would not have
4080
# entered this code path. (All repositories that can handle
4081
# missing keys can handle suspending a write group).
4082
write_group_tokens = self.target_repo.suspend_write_group()
4083
return write_group_tokens, missing_keys
4084
hint = self.target_repo.commit_write_group()
4085
to_serializer = self.target_repo._format._serializer
4086
src_serializer = src_format._serializer
4087
if (to_serializer != src_serializer and
4088
self.target_repo._format.pack_compresses):
4089
self.target_repo.pack(hint=hint)
4254
4092
self.target_repo.abort_write_group(suppress_errors=True)
4257
4095
self.target_repo.unlock()
4259
def _locked_insert_stream(self, stream, src_format, is_resume):
4097
def insert_stream_without_locking(self, stream, src_format,
4099
"""Insert a stream's content into the target repository.
4101
This assumes that you already have a locked repository and an active
4104
:param src_format: a bzr repository format.
4105
:param is_resume: Passed down to get_missing_parent_inventories to
4106
indicate if we should be checking for missing texts at the same
4109
:return: A set of keys that are missing.
4111
if not self.target_repo.is_write_locked():
4112
raise errors.ObjectNotLocked(self)
4113
if not self.target_repo.is_in_write_group():
4114
raise errors.BzrError('you must already be in a write group')
4260
4115
to_serializer = self.target_repo._format._serializer
4261
4116
src_serializer = src_format._serializer
4262
4117
new_pack = None