1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
1
# Copyright (C) 2005-2010 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
51
52
from bzrlib.testament import Testament
54
60
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
55
61
from bzrlib.inter import InterObject
56
62
from bzrlib.inventory import (
62
from bzrlib.lock import _RelockDebugMixin
63
from bzrlib import registry
68
from bzrlib.recordcounter import RecordCounter
69
from bzrlib.lock import _RelockDebugMixin, LogicalLockResult
64
70
from bzrlib.trace import (
65
71
log_exception_quietly, note, mutter, mutter_callsite, warning)
69
75
_deprecation_warning_done = False
78
class IsInWriteGroupError(errors.InternalBzrError):
80
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
82
def __init__(self, repo):
83
errors.InternalBzrError.__init__(self, repo=repo)
72
86
class CommitBuilder(object):
73
87
"""Provides an interface to build up a commit.
230
244
def _gen_revision_id(self):
231
245
"""Return new revision-id."""
232
return generate_ids.gen_revision_id(self._config.username(),
246
return generate_ids.gen_revision_id(self._committer, self._timestamp)
235
248
def _generate_revision_if_needed(self):
236
249
"""Create a revision id if None was supplied.
277
290
:param tree: The tree which is being committed.
279
# NB: if there are no parents then this method is not called, so no
280
# need to guard on parents having length.
292
if len(self.parents) == 0:
293
raise errors.RootMissing()
281
294
entry = entry_factory['directory'](tree.path2id(''), '',
283
296
entry.revision = self._new_revision_id
422
435
# we don't need to commit this, because the caller already
423
436
# determined that an existing revision of this file is
424
# appropriate. If its not being considered for committing then
437
# appropriate. If it's not being considered for committing then
425
438
# it and all its parents to the root must be unaltered so
426
439
# no-change against the basis.
427
440
if ie.revision == self._new_revision_id:
743
756
# after iter_changes examines and decides it has changed,
744
757
# we will unconditionally record a new version even if some
745
758
# other process reverts it while commit is running (with
746
# the revert happening after iter_changes did it's
759
# the revert happening after iter_changes did its
749
762
entry.executable = True
858
871
# versioned roots do not change unless the tree found a change.
874
class RepositoryWriteLockResult(LogicalLockResult):
875
"""The result of write locking a repository.
877
:ivar repository_token: The token obtained from the underlying lock, or
879
:ivar unlock: A callable which will unlock the lock.
882
def __init__(self, unlock, repository_token):
883
LogicalLockResult.__init__(self, unlock)
884
self.repository_token = repository_token
887
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
861
891
######################################################################
865
class Repository(_RelockDebugMixin):
895
class Repository(_RelockDebugMixin, controldir.ControlComponent):
866
896
"""Repository holding history for one or more branches.
868
898
The repository holds and retrieves historical information including
915
945
pointing to .bzr/repository.
918
# What class to use for a CommitBuilder. Often its simpler to change this
948
# What class to use for a CommitBuilder. Often it's simpler to change this
919
949
# in a Repository class subclass rather than to override
920
950
# get_commit_builder.
921
951
_commit_builder_class = CommitBuilder
1016
1046
" id and insertion revid (%r, %r)"
1017
1047
% (inv.revision_id, revision_id))
1018
1048
if inv.root is None:
1019
raise AssertionError()
1049
raise errors.RootMissing()
1020
1050
return self._add_inventory_checked(revision_id, inv, parents)
1022
1052
def _add_inventory_checked(self, revision_id, inv, parents):
1027
1057
:seealso: add_inventory, for the contract.
1029
inv_lines = self._serialise_inventory_to_lines(inv)
1059
inv_lines = self._serializer.write_inventory_to_lines(inv)
1030
1060
return self._inventory_add_lines(revision_id, parents,
1031
1061
inv_lines, check_content=False)
1239
1269
"""Check a single text from this repository."""
1240
1270
if kind == 'inventories':
1241
1271
rev_id = record.key[0]
1242
inv = self.deserialise_inventory(rev_id,
1272
inv = self._deserialise_inventory(rev_id,
1243
1273
record.get_bytes_as('fulltext'))
1244
1274
if last_object is not None:
1245
1275
delta = inv._make_delta(last_object)
1290
1320
:param _format: The format of the repository on disk.
1291
1321
:param a_bzrdir: The BzrDir of the repository.
1293
In the future we will have a single api for all stores for
1294
getting file texts, inventories and revisions, then
1295
this construct will accept instances of those things.
1323
# In the future we will have a single api for all stores for
1324
# getting file texts, inventories and revisions, then
1325
# this construct will accept instances of those things.
1297
1326
super(Repository, self).__init__()
1298
1327
self._format = _format
1299
1328
# the following are part of the public API for Repository:
1314
1343
# rather copying them?
1315
1344
self._safe_to_return_from_cache = False
1347
def user_transport(self):
1348
return self.bzrdir.user_transport
1351
def control_transport(self):
1352
return self._transport
1317
1354
def __repr__(self):
1318
1355
if self._fallback_repositories:
1319
1356
return '%s(%r, fallback_repositories=%r)' % (
1367
1404
data during reads, and allows a 'write_group' to be obtained. Write
1368
1405
groups must be used for actual data insertion.
1407
A token should be passed in if you know that you have locked the object
1408
some other way, and need to synchronise this object's state with that
1411
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1370
1413
:param token: if this is already locked, then lock_write will fail
1371
1414
unless the token matches the existing lock.
1372
1415
:returns: a token if this instance supports tokens, otherwise None.
1375
1418
:raises MismatchedToken: if the specified token doesn't match the token
1376
1419
of the existing lock.
1377
1420
:seealso: start_write_group.
1379
A token should be passed in if you know that you have locked the object
1380
some other way, and need to synchronise this object's state with that
1383
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1421
:return: A RepositoryWriteLockResult.
1385
1423
locked = self.is_locked()
1386
result = self.control_files.lock_write(token=token)
1424
token = self.control_files.lock_write(token=token)
1388
1426
self._warn_if_deprecated()
1389
1427
self._note_lock('w')
1391
1429
# Writes don't affect fallback repos
1392
1430
repo.lock_read()
1393
1431
self._refresh_data()
1432
return RepositoryWriteLockResult(self.unlock, token)
1396
1434
def lock_read(self):
1435
"""Lock the repository for read operations.
1437
:return: An object with an unlock method which will release the lock
1397
1440
locked = self.is_locked()
1398
1441
self.control_files.lock_read()
1468
1512
# now gather global repository information
1469
1513
# XXX: This is available for many repos regardless of listability.
1470
if self.bzrdir.root_transport.listable():
1514
if self.user_transport.listable():
1471
1515
# XXX: do we want to __define len__() ?
1472
1516
# Maybe the versionedfiles object should provide a different
1473
1517
# method to get the number of keys.
1483
1527
:param using: If True, list only branches using this repository.
1485
1529
if using and not self.is_shared():
1487
return [self.bzrdir.open_branch()]
1488
except errors.NotBranchError:
1530
return self.bzrdir.list_branches()
1490
1531
class Evaluator(object):
1492
1533
def __init__(self):
1501
1542
except errors.NoRepositoryPresent:
1504
return False, (None, repository)
1545
return False, ([], repository)
1505
1546
self.first_call = False
1507
value = (bzrdir.open_branch(), None)
1508
except errors.NotBranchError:
1509
value = (None, None)
1547
value = (bzrdir.list_branches(), None)
1510
1548
return True, value
1513
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1514
self.bzrdir.root_transport, evaluate=Evaluator()):
1515
if branch is not None:
1516
branches.append(branch)
1551
for branches, repository in bzrdir.BzrDir.find_bzrdirs(
1552
self.user_transport, evaluate=Evaluator()):
1553
if branches is not None:
1554
ret.extend(branches)
1517
1555
if not using and repository is not None:
1518
branches.extend(repository.find_branches())
1556
ret.extend(repository.find_branches())
1521
1559
@needs_read_lock
1522
1560
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1631
1669
return missing_keys
1633
1671
def refresh_data(self):
1634
"""Re-read any data needed to to synchronise with disk.
1672
"""Re-read any data needed to synchronise with disk.
1636
1674
This method is intended to be called after another repository instance
1637
1675
(such as one used by a smart server) has inserted data into the
1638
repository. It may not be called during a write group, but may be
1639
called at any other time.
1676
repository. On all repositories this will work outside of write groups.
1677
Some repository formats (pack and newer for bzrlib native formats)
1678
support refresh_data inside write groups. If called inside a write
1679
group on a repository that does not support refreshing in a write group
1680
IsInWriteGroupError will be raised.
1641
if self.is_in_write_group():
1642
raise errors.InternalBzrError(
1643
"May not refresh_data while in a write group.")
1644
1682
self._refresh_data()
1646
1684
def resume_write_group(self, tokens):
1685
1723
"May not fetch while in a write group.")
1686
1724
# fast path same-url fetch operations
1687
1725
# TODO: lift out to somewhere common with RemoteRepository
1688
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1726
# <https://bugs.launchpad.net/bzr/+bug/401646>
1689
1727
if (self.has_same_location(source)
1690
1728
and fetch_spec is None
1691
1729
and self._has_same_fallbacks(source)):
1900
1938
rev = self._serializer.read_revision_from_string(text)
1901
1939
yield (revid, rev)
1904
def get_revision_xml(self, revision_id):
1905
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1906
# would have already do it.
1907
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1908
# TODO: this can't just be replaced by:
1909
# return self._serializer.write_revision_to_string(
1910
# self.get_revision(revision_id))
1911
# as cStringIO preservers the encoding unlike write_revision_to_string
1912
# or some other call down the path.
1913
rev = self.get_revision(revision_id)
1914
rev_tmp = cStringIO.StringIO()
1915
# the current serializer..
1916
self._serializer.write_revision(rev, rev_tmp)
1918
return rev_tmp.getvalue()
1920
1941
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1921
1942
"""Produce a generator of revision deltas.
2165
2186
selected_keys = set((revid,) for revid in revision_ids)
2166
2187
w = _inv_weave or self.inventories
2167
pb = ui.ui_factory.nested_progress_bar()
2169
return self._find_file_ids_from_xml_inventory_lines(
2170
w.iter_lines_added_or_present_in_keys(
2171
selected_keys, pb=pb),
2188
return self._find_file_ids_from_xml_inventory_lines(
2189
w.iter_lines_added_or_present_in_keys(
2190
selected_keys, pb=None),
2176
2193
def iter_files_bytes(self, desired_files):
2177
2194
"""Iterate through file versions.
2387
2404
"""single-document based inventory iteration."""
2388
2405
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2389
2406
for text, revision_id in inv_xmls:
2390
yield self.deserialise_inventory(revision_id, text)
2407
yield self._deserialise_inventory(revision_id, text)
2392
2409
def _iter_inventory_xmls(self, revision_ids, ordering):
2393
2410
if ordering is None:
2425
2442
next_key = None
2428
def deserialise_inventory(self, revision_id, xml):
2445
def _deserialise_inventory(self, revision_id, xml):
2429
2446
"""Transform the xml into an inventory object.
2431
2448
:param revision_id: The expected revision id of the inventory.
2439
2456
result.revision_id, revision_id))
2442
def serialise_inventory(self, inv):
2443
return self._serializer.write_inventory_to_string(inv)
2445
def _serialise_inventory_to_lines(self, inv):
2446
return self._serializer.write_inventory_to_lines(inv)
2448
2459
def get_serializer_format(self):
2449
2460
return self._serializer.format_num
2451
2462
@needs_read_lock
2452
def get_inventory_xml(self, revision_id):
2453
"""Get inventory XML as a file object."""
2463
def _get_inventory_xml(self, revision_id):
2464
"""Get serialized inventory as a string."""
2454
2465
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2456
2467
text, revision_id = texts.next()
2458
2469
raise errors.HistoryMissing(self, 'inventory', revision_id)
2462
def get_inventory_sha1(self, revision_id):
2463
"""Return the sha1 hash of the inventory entry
2465
return self.get_revision(revision_id).inventory_sha1
2467
2472
def get_rev_id_for_revno(self, revno, known_pair):
2468
2473
"""Return the revision id of a revno, given a later (revno, revid)
2469
2474
pair in the same history.
2506
2511
ancestors will be traversed.
2508
2513
graph = self.get_graph()
2509
next_id = revision_id
2511
if next_id in (None, _mod_revision.NULL_REVISION):
2514
parents = graph.get_parent_map([next_id])[next_id]
2516
raise errors.RevisionNotPresent(next_id, self)
2518
if len(parents) == 0:
2521
next_id = parents[0]
2524
def get_revision_inventory(self, revision_id):
2525
"""Return inventory of a past revision."""
2526
# TODO: Unify this with get_inventory()
2527
# bzr 0.0.6 and later imposes the constraint that the inventory_id
2528
# must be the same as its revision, so this is trivial.
2529
if revision_id is None:
2530
# This does not make sense: if there is no revision,
2531
# then it is the current tree inventory surely ?!
2532
# and thus get_root_id() is something that looks at the last
2533
# commit on the branch, and the get_root_id is an inventory check.
2534
raise NotImplementedError
2535
# return Inventory(self.get_root_id())
2537
return self.get_inventory(revision_id)
2514
stop_revisions = (None, _mod_revision.NULL_REVISION)
2515
return graph.iter_lefthand_ancestry(revision_id, stop_revisions)
2539
2517
def is_shared(self):
2540
2518
"""Return True if this repository is flagged as a shared repository."""
2575
2553
return RevisionTree(self, Inventory(root_id=None),
2576
2554
_mod_revision.NULL_REVISION)
2578
inv = self.get_revision_inventory(revision_id)
2556
inv = self.get_inventory(revision_id)
2579
2557
return RevisionTree(self, inv, revision_id)
2581
2559
def revision_trees(self, revision_ids):
2634
2612
keys = tsort.topo_sort(parent_map)
2635
2613
return [None] + list(keys)
2637
def pack(self, hint=None):
2615
def pack(self, hint=None, clean_obsolete_packs=False):
2638
2616
"""Compress the data within the repository.
2640
2618
This operation only makes sense for some repository types. For other
2641
2619
types it should be a no-op that just returns.
2643
2621
This stub method does not require a lock, but subclasses should use
2644
@needs_write_lock as this is a long running call its reasonable to
2622
@needs_write_lock as this is a long running call it's reasonable to
2645
2623
implicitly lock for the user.
2647
2625
:param hint: If not supplied, the whole repository is packed.
2650
2628
obtained from the result of commit_write_group(). Out of
2651
2629
date hints are simply ignored, because concurrent operations
2652
2630
can obsolete them rapidly.
2632
:param clean_obsolete_packs: Clean obsolete packs immediately after
2655
2636
def get_transaction(self):
2680
2661
def _make_parents_provider(self):
2665
def get_known_graph_ancestry(self, revision_ids):
2666
"""Return the known graph for a set of revision ids and their ancestors.
2668
st = static_tuple.StaticTuple
2669
revision_keys = [st(r_id).intern() for r_id in revision_ids]
2670
known_graph = self.revisions.get_known_graph_ancestry(revision_keys)
2671
return graph.GraphThunkIdsToKeys(known_graph)
2683
2673
def get_graph(self, other_repository=None):
2684
2674
"""Return the graph walker for this repository format"""
2685
2675
parents_provider = self._make_parents_provider()
3084
3074
pack_compresses = False
3085
3075
# Does the repository inventory storage understand references to trees?
3086
3076
supports_tree_reference = None
3077
# Is the format experimental ?
3078
experimental = False
3089
return "<%s>" % self.__class__.__name__
3081
return "%s()" % self.__class__.__name__
3091
3083
def __eq__(self, other):
3092
3084
# format objects are generally stateless
3211
3203
raise NotImplementedError(self.open)
3205
def _run_post_repo_init_hooks(self, repository, a_bzrdir, shared):
3206
from bzrlib.bzrdir import BzrDir, RepoInitHookParams
3207
hooks = BzrDir.hooks['post_repo_init']
3210
params = RepoInitHookParams(repository, self, a_bzrdir, shared)
3214
3215
class MetaDirRepositoryFormat(RepositoryFormat):
3215
3216
"""Common base class for the new repositories using the metadir layout."""
3377
3378
'bzrlib.repofmt.groupcompress_repo',
3378
3379
'RepositoryFormat2a',
3381
format_registry.register_lazy(
3382
'Bazaar development format 8\n',
3383
'bzrlib.repofmt.groupcompress_repo',
3384
'RepositoryFormat2aSubtree',
3382
3388
class InterRepository(InterObject):
3421
3427
:param revision_id: if None all content is copied, if NULL_REVISION no
3422
3428
content is copied.
3423
:param pb: optional progress bar to use for progress reports. If not
3424
provided a default one will be created.
3427
f = _mod_fetch.RepoFetcher(to_repository=self.target,
3432
ui.ui_factory.warn_experimental_format_fetch(self)
3433
from bzrlib.fetch import RepoFetcher
3434
# See <https://launchpad.net/bugs/456077> asking for a warning here
3435
if self.source._format.network_name() != self.target._format.network_name():
3436
ui.ui_factory.show_user_warning('cross_format_fetch',
3437
from_format=self.source._format,
3438
to_format=self.target._format)
3439
f = RepoFetcher(to_repository=self.target,
3428
3440
from_repository=self.source,
3429
3441
last_revision=revision_id,
3430
3442
fetch_spec=fetch_spec,
3431
pb=pb, find_ghosts=find_ghosts)
3443
find_ghosts=find_ghosts)
3433
3445
def _walk_to_common_revisions(self, revision_ids):
3434
3446
"""Walk out from revision_ids in source to revisions target has.
3830
3842
basis_id, delta, current_revision_id, parents_parents)
3831
3843
cache[current_revision_id] = parent_tree
3833
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3845
def _fetch_batch(self, revision_ids, basis_id, cache):
3834
3846
"""Fetch across a few revisions.
3836
3848
:param revision_ids: The revisions to copy
3837
3849
:param basis_id: The revision_id of a tree that must be in cache, used
3838
3850
as a basis for delta when no other base is available
3839
3851
:param cache: A cache of RevisionTrees that we can use.
3840
:param a_graph: A Graph object to determine the heads() of the
3841
rich-root data stream.
3842
3852
:return: The revision_id of the last converted tree. The RevisionTree
3843
3853
for it will be in cache
3912
3922
if root_keys_to_create:
3913
3923
root_stream = _mod_fetch._new_root_data_stream(
3914
3924
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3915
self.source, graph=a_graph)
3916
3926
to_texts.insert_record_stream(root_stream)
3917
3927
to_texts.insert_record_stream(from_texts.get_record_stream(
3918
3928
text_keys, self.target._format._fetch_order,
3975
3985
cache[basis_id] = basis_tree
3976
3986
del basis_tree # We don't want to hang on to it here
3978
if self._converting_to_rich_root and len(revision_ids) > 100:
3979
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3984
3990
for offset in range(0, len(revision_ids), batch_size):
3985
3991
self.target.start_write_group()
3987
3993
pb.update('Transferring revisions', offset,
3988
3994
len(revision_ids))
3989
3995
batch = revision_ids[offset:offset+batch_size]
3990
basis_id = self._fetch_batch(batch, basis_id, cache,
3996
basis_id = self._fetch_batch(batch, basis_id, cache)
3993
3998
self.source._safe_to_return_from_cache = False
3994
3999
self.target.abort_write_group()
4008
4013
"""See InterRepository.fetch()."""
4009
4014
if fetch_spec is not None:
4010
4015
raise AssertionError("Not implemented yet...")
4016
ui.ui_factory.warn_experimental_format_fetch(self)
4011
4017
if (not self.source.supports_rich_root()
4012
4018
and self.target.supports_rich_root()):
4013
4019
self._converting_to_rich_root = True
4014
4020
self._revision_id_to_root_id = {}
4016
4022
self._converting_to_rich_root = False
4023
# See <https://launchpad.net/bugs/456077> asking for a warning here
4024
if self.source._format.network_name() != self.target._format.network_name():
4025
ui.ui_factory.show_user_warning('cross_format_fetch',
4026
from_format=self.source._format,
4027
to_format=self.target._format)
4017
4028
revision_ids = self.target.search_missing_revision_ids(self.source,
4018
4029
revision_id, find_ghosts=find_ghosts).get_keys()
4019
4030
if not revision_ids:
4054
4065
basis_id = first_rev.parent_ids[0]
4055
4066
# only valid as a basis if the target has it
4056
4067
self.target.get_revision(basis_id)
4057
# Try to get a basis tree - if its a ghost it will hit the
4068
# Try to get a basis tree - if it's a ghost it will hit the
4058
4069
# NoSuchRevision case.
4059
4070
basis_tree = self.source.revision_tree(basis_id)
4060
4071
except (IndexError, errors.NoSuchRevision):
4088
4099
:param to_convert: The disk object to convert.
4089
4100
:param pb: a progress bar to use for progress information.
4102
pb = ui.ui_factory.nested_progress_bar()
4094
4105
# this is only useful with metadir layouts - separated repo content.
4095
4106
# trigger an assertion if not such
4096
4107
repo._format.get_format_string()
4097
4108
self.repo_dir = repo.bzrdir
4098
self.step('Moving repository to repository.backup')
4109
pb.update('Moving repository to repository.backup')
4099
4110
self.repo_dir.transport.move('repository', 'repository.backup')
4100
4111
backup_transport = self.repo_dir.transport.clone('repository.backup')
4101
4112
repo._format.check_conversion_target(self.target_format)
4102
4113
self.source_repo = repo._format.open(self.repo_dir,
4104
4115
_override_transport=backup_transport)
4105
self.step('Creating new repository')
4116
pb.update('Creating new repository')
4106
4117
converted = self.target_format.initialize(self.repo_dir,
4107
4118
self.source_repo.is_shared())
4108
4119
converted.lock_write()
4110
self.step('Copying content')
4121
pb.update('Copying content')
4111
4122
self.source_repo.copy_content_into(converted)
4113
4124
converted.unlock()
4114
self.step('Deleting old repository content')
4125
pb.update('Deleting old repository content')
4115
4126
self.repo_dir.transport.delete_tree('repository.backup')
4116
4127
ui.ui_factory.note('repository converted')
4118
def step(self, message):
4119
"""Update the pb by a step."""
4121
self.pb.update(message, self.count, self.total)
4124
4131
_unescape_map = {
4264
4271
is_resume = False
4266
4273
# locked_insert_stream performs a commit|suspend.
4267
return self._locked_insert_stream(stream, src_format, is_resume)
4274
return self._locked_insert_stream(stream, src_format,
4269
4277
self.target_repo.abort_write_group(suppress_errors=True)
4317
4325
# required if the serializers are different only in terms of
4318
4326
# the inventory.
4319
4327
if src_serializer == to_serializer:
4320
self.target_repo.revisions.insert_record_stream(
4328
self.target_repo.revisions.insert_record_stream(substream)
4323
4330
self._extract_and_insert_revisions(substream,
4324
4331
src_serializer)
4432
4439
"""Create a StreamSource streaming from from_repository."""
4433
4440
self.from_repository = from_repository
4434
4441
self.to_format = to_format
4442
self._record_counter = RecordCounter()
4436
4444
def delta_on_metadata(self):
4437
4445
"""Return True if delta's are permitted on metadata streams.
4617
4625
def _get_convertable_inventory_stream(self, revision_ids,
4618
4626
delta_versus_null=False):
4619
# The source is using CHKs, but the target either doesn't or it has a
4620
# different serializer. The StreamSink code expects to be able to
4627
# The two formats are sufficiently different that there is no fast
4628
# path, so we need to send just inventorydeltas, which any
4629
# sufficiently modern client can insert into any repository.
4630
# The StreamSink code expects to be able to
4621
4631
# convert on the target, so we need to put bytes-on-the-wire that can
4622
4632
# be converted. That means inventory deltas (if the remote is <1.19,
4623
4633
# RemoteStreamSink will fallback to VFS to insert the deltas).