1181
1151
# The old API returned a list, should this actually be a set?
1182
1152
return parent_map.keys()
1184
def _check_inventories(self, checker):
1185
"""Check the inventories found from the revision scan.
1187
This is responsible for verifying the sha1 of inventories and
1188
creating a pending_keys set that covers data referenced by inventories.
1190
bar = ui.ui_factory.nested_progress_bar()
1192
self._do_check_inventories(checker, bar)
1196
def _do_check_inventories(self, checker, bar):
1197
"""Helper for _check_inventories."""
1199
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1200
kinds = ['chk_bytes', 'texts']
1201
count = len(checker.pending_keys)
1202
bar.update("inventories", 0, 2)
1203
current_keys = checker.pending_keys
1204
checker.pending_keys = {}
1205
# Accumulate current checks.
1206
for key in current_keys:
1207
if key[0] != 'inventories' and key[0] not in kinds:
1208
checker._report_items.append('unknown key type %r' % (key,))
1209
keys[key[0]].add(key[1:])
1210
if keys['inventories']:
1211
# NB: output order *should* be roughly sorted - topo or
1212
# inverse topo depending on repository - either way decent
1213
# to just delta against. However, pre-CHK formats didn't
1214
# try to optimise inventory layout on disk. As such the
1215
# pre-CHK code path does not use inventory deltas.
1217
for record in self.inventories.check(keys=keys['inventories']):
1218
if record.storage_kind == 'absent':
1219
checker._report_items.append(
1220
'Missing inventory {%s}' % (record.key,))
1222
last_object = self._check_record('inventories', record,
1223
checker, last_object,
1224
current_keys[('inventories',) + record.key])
1225
del keys['inventories']
1228
bar.update("texts", 1)
1229
while (checker.pending_keys or keys['chk_bytes']
1231
# Something to check.
1232
current_keys = checker.pending_keys
1233
checker.pending_keys = {}
1234
# Accumulate current checks.
1235
for key in current_keys:
1236
if key[0] not in kinds:
1237
checker._report_items.append('unknown key type %r' % (key,))
1238
keys[key[0]].add(key[1:])
1239
# Check the outermost kind only - inventories || chk_bytes || texts
1243
for record in getattr(self, kind).check(keys=keys[kind]):
1244
if record.storage_kind == 'absent':
1245
checker._report_items.append(
1246
'Missing %s {%s}' % (kind, record.key,))
1248
last_object = self._check_record(kind, record,
1249
checker, last_object, current_keys[(kind,) + record.key])
1253
def _check_record(self, kind, record, checker, last_object, item_data):
1254
"""Check a single text from this repository."""
1255
if kind == 'inventories':
1256
rev_id = record.key[0]
1257
inv = self._deserialise_inventory(rev_id,
1258
record.get_bytes_as('fulltext'))
1259
if last_object is not None:
1260
delta = inv._make_delta(last_object)
1261
for old_path, path, file_id, ie in delta:
1264
ie.check(checker, rev_id, inv)
1266
for path, ie in inv.iter_entries():
1267
ie.check(checker, rev_id, inv)
1268
if self._format.fast_deltas:
1270
elif kind == 'chk_bytes':
1271
# No code written to check chk_bytes for this repo format.
1272
checker._report_items.append(
1273
'unsupported key type chk_bytes for %s' % (record.key,))
1274
elif kind == 'texts':
1275
self._check_text(record, checker, item_data)
1277
checker._report_items.append(
1278
'unknown key type %s for %s' % (kind, record.key))
1280
def _check_text(self, record, checker, item_data):
1281
"""Check a single text."""
1282
# Check it is extractable.
1283
# TODO: check length.
1284
if record.storage_kind == 'chunked':
1285
chunks = record.get_bytes_as(record.storage_kind)
1286
sha1 = osutils.sha_strings(chunks)
1287
length = sum(map(len, chunks))
1289
content = record.get_bytes_as('fulltext')
1290
sha1 = osutils.sha_string(content)
1291
length = len(content)
1292
if item_data and sha1 != item_data[1]:
1293
checker._report_items.append(
1294
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1295
(record.key, sha1, item_data[1], item_data[2]))
1298
1155
def create(a_bzrdir):
1299
1156
"""Construct the current default format repository in a_bzrdir."""
1880
1699
@needs_read_lock
1881
1700
def get_revisions(self, revision_ids):
1882
"""Get many revisions at once.
1884
Repositories that need to check data on every revision read should
1885
subclass this method.
1701
"""Get many revisions at once."""
1887
1702
return self._get_revisions(revision_ids)
1889
1704
@needs_read_lock
1890
1705
def _get_revisions(self, revision_ids):
1891
1706
"""Core work logic to get many revisions without sanity checks."""
1707
for rev_id in revision_ids:
1708
if not rev_id or not isinstance(rev_id, basestring):
1709
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1710
keys = [(key,) for key in revision_ids]
1711
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1893
for revid, rev in self._iter_revisions(revision_ids):
1895
raise errors.NoSuchRevision(self, revid)
1713
for record in stream:
1714
if record.storage_kind == 'absent':
1715
raise errors.NoSuchRevision(self, record.key[0])
1716
text = record.get_bytes_as('fulltext')
1717
rev = self._serializer.read_revision_from_string(text)
1718
revs[record.key[0]] = rev
1897
1719
return [revs[revid] for revid in revision_ids]
1899
def _iter_revisions(self, revision_ids):
1900
"""Iterate over revision objects.
1902
:param revision_ids: An iterable of revisions to examine. None may be
1903
passed to request all revisions known to the repository. Note that
1904
not all repositories can find unreferenced revisions; for those
1905
repositories only referenced ones will be returned.
1906
:return: An iterator of (revid, revision) tuples. Absent revisions (
1907
those asked for but not available) are returned as (revid, None).
1909
if revision_ids is None:
1910
revision_ids = self.all_revision_ids()
1912
for rev_id in revision_ids:
1913
if not rev_id or not isinstance(rev_id, basestring):
1914
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1915
keys = [(key,) for key in revision_ids]
1916
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1917
for record in stream:
1918
revid = record.key[0]
1919
if record.storage_kind == 'absent':
1922
text = record.get_bytes_as('fulltext')
1923
rev = self._serializer.read_revision_from_string(text)
1722
def get_revision_xml(self, revision_id):
1723
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1724
# would have already do it.
1725
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1726
# TODO: this can't just be replaced by:
1727
# return self._serializer.write_revision_to_string(
1728
# self.get_revision(revision_id))
1729
# as cStringIO preservers the encoding unlike write_revision_to_string
1730
# or some other call down the path.
1731
rev = self.get_revision(revision_id)
1732
rev_tmp = cStringIO.StringIO()
1733
# the current serializer..
1734
self._serializer.write_revision(rev, rev_tmp)
1736
return rev_tmp.getvalue()
1926
1738
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1927
1739
"""Produce a generator of revision deltas.
2389
2204
"""single-document based inventory iteration."""
2390
2205
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2391
2206
for text, revision_id in inv_xmls:
2392
yield self._deserialise_inventory(revision_id, text)
2207
yield self.deserialise_inventory(revision_id, text)
2394
def _iter_inventory_xmls(self, revision_ids, ordering):
2395
if ordering is None:
2396
order_as_requested = True
2397
ordering = 'unordered'
2399
order_as_requested = False
2209
def _iter_inventory_xmls(self, revision_ids, ordering='unordered'):
2400
2210
keys = [(revision_id,) for revision_id in revision_ids]
2403
if order_as_requested:
2404
key_iter = iter(keys)
2405
next_key = key_iter.next()
2406
2211
stream = self.inventories.get_record_stream(keys, ordering, True)
2407
2212
text_chunks = {}
2408
2213
for record in stream:
2409
2214
if record.storage_kind != 'absent':
2410
chunks = record.get_bytes_as('chunked')
2411
if order_as_requested:
2412
text_chunks[record.key] = chunks
2414
yield ''.join(chunks), record.key[-1]
2215
text_chunks[record.key] = record.get_bytes_as('chunked')
2416
2217
raise errors.NoSuchRevision(self, record.key)
2417
if order_as_requested:
2418
# Yield as many results as we can while preserving order.
2419
while next_key in text_chunks:
2420
chunks = text_chunks.pop(next_key)
2421
yield ''.join(chunks), next_key[-1]
2423
next_key = key_iter.next()
2424
except StopIteration:
2425
# We still want to fully consume the get_record_stream,
2426
# just in case it is not actually finished at this point
2219
chunks = text_chunks.pop(key)
2220
yield ''.join(chunks), key[-1]
2430
def _deserialise_inventory(self, revision_id, xml):
2222
def deserialise_inventory(self, revision_id, xml):
2431
2223
"""Transform the xml into an inventory object.
2433
2225
:param revision_id: The expected revision id of the inventory.
2434
2226
:param xml: A serialised inventory.
2436
2228
result = self._serializer.read_inventory_from_string(xml, revision_id,
2437
entry_cache=self._inventory_entry_cache,
2438
return_from_cache=self._safe_to_return_from_cache)
2229
entry_cache=self._inventory_entry_cache)
2439
2230
if result.revision_id != revision_id:
2440
2231
raise AssertionError('revision id mismatch %s != %s' % (
2441
2232
result.revision_id, revision_id))
2235
def serialise_inventory(self, inv):
2236
return self._serializer.write_inventory_to_string(inv)
2238
def _serialise_inventory_to_lines(self, inv):
2239
return self._serializer.write_inventory_to_lines(inv)
2444
2241
def get_serializer_format(self):
2445
2242
return self._serializer.format_num
2447
2244
@needs_read_lock
2448
def _get_inventory_xml(self, revision_id):
2449
"""Get serialized inventory as a string."""
2245
def get_inventory_xml(self, revision_id):
2246
"""Get inventory XML as a file object."""
2450
2247
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2452
2249
text, revision_id = texts.next()
3726
3488
return self.source.revision_ids_to_search_result(result_set)
3729
class InterDifferingSerializer(InterRepository):
3732
def _get_repo_format_to_test(self):
3736
def is_compatible(source, target):
3737
"""Be compatible with Knit2 source and Knit3 target"""
3738
# This is redundant with format.check_conversion_target(), however that
3739
# raises an exception, and we just want to say "False" as in we won't
3740
# support converting between these formats.
3741
if 'IDS_never' in debug.debug_flags:
3743
if source.supports_rich_root() and not target.supports_rich_root():
3745
if (source._format.supports_tree_reference
3746
and not target._format.supports_tree_reference):
3748
if target._fallback_repositories and target._format.supports_chks:
3749
# IDS doesn't know how to copy CHKs for the parent inventories it
3750
# adds to stacked repos.
3752
if 'IDS_always' in debug.debug_flags:
3754
# Only use this code path for local source and target. IDS does far
3755
# too much IO (both bandwidth and roundtrips) over a network.
3756
if not source.bzrdir.transport.base.startswith('file:///'):
3758
if not target.bzrdir.transport.base.startswith('file:///'):
3762
def _get_trees(self, revision_ids, cache):
3764
for rev_id in revision_ids:
3766
possible_trees.append((rev_id, cache[rev_id]))
3768
# Not cached, but inventory might be present anyway.
3770
tree = self.source.revision_tree(rev_id)
3771
except errors.NoSuchRevision:
3772
# Nope, parent is ghost.
3775
cache[rev_id] = tree
3776
possible_trees.append((rev_id, tree))
3777
return possible_trees
3779
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3780
"""Get the best delta and base for this revision.
3782
:return: (basis_id, delta)
3785
# Generate deltas against each tree, to find the shortest.
3786
texts_possibly_new_in_tree = set()
3787
for basis_id, basis_tree in possible_trees:
3788
delta = tree.inventory._make_delta(basis_tree.inventory)
3789
for old_path, new_path, file_id, new_entry in delta:
3790
if new_path is None:
3791
# This file_id isn't present in the new rev, so we don't
3795
# Rich roots are handled elsewhere...
3797
kind = new_entry.kind
3798
if kind != 'directory' and kind != 'file':
3799
# No text record associated with this inventory entry.
3801
# This is a directory or file that has changed somehow.
3802
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3803
deltas.append((len(delta), basis_id, delta))
3805
return deltas[0][1:]
3807
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3808
"""Find all parent revisions that are absent, but for which the
3809
inventory is present, and copy those inventories.
3811
This is necessary to preserve correctness when the source is stacked
3812
without fallbacks configured. (Note that in cases like upgrade the
3813
source may be not have _fallback_repositories even though it is
3817
for parents in parent_map.values():
3818
parent_revs.update(parents)
3819
present_parents = self.source.get_parent_map(parent_revs)
3820
absent_parents = set(parent_revs).difference(present_parents)
3821
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3822
(rev_id,) for rev_id in absent_parents)
3823
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3824
for parent_tree in self.source.revision_trees(parent_inv_ids):
3825
current_revision_id = parent_tree.get_revision_id()
3826
parents_parents_keys = parent_invs_keys_for_stacking[
3827
(current_revision_id,)]
3828
parents_parents = [key[-1] for key in parents_parents_keys]
3829
basis_id = _mod_revision.NULL_REVISION
3830
basis_tree = self.source.revision_tree(basis_id)
3831
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3832
self.target.add_inventory_by_delta(
3833
basis_id, delta, current_revision_id, parents_parents)
3834
cache[current_revision_id] = parent_tree
3836
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3837
"""Fetch across a few revisions.
3839
:param revision_ids: The revisions to copy
3840
:param basis_id: The revision_id of a tree that must be in cache, used
3841
as a basis for delta when no other base is available
3842
:param cache: A cache of RevisionTrees that we can use.
3843
:param a_graph: A Graph object to determine the heads() of the
3844
rich-root data stream.
3845
:return: The revision_id of the last converted tree. The RevisionTree
3846
for it will be in cache
3848
# Walk though all revisions; get inventory deltas, copy referenced
3849
# texts that delta references, insert the delta, revision and
3851
root_keys_to_create = set()
3854
pending_revisions = []
3855
parent_map = self.source.get_parent_map(revision_ids)
3856
self._fetch_parent_invs_for_stacking(parent_map, cache)
3857
self.source._safe_to_return_from_cache = True
3858
for tree in self.source.revision_trees(revision_ids):
3859
# Find a inventory delta for this revision.
3860
# Find text entries that need to be copied, too.
3861
current_revision_id = tree.get_revision_id()
3862
parent_ids = parent_map.get(current_revision_id, ())
3863
parent_trees = self._get_trees(parent_ids, cache)
3864
possible_trees = list(parent_trees)
3865
if len(possible_trees) == 0:
3866
# There either aren't any parents, or the parents are ghosts,
3867
# so just use the last converted tree.
3868
possible_trees.append((basis_id, cache[basis_id]))
3869
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3871
revision = self.source.get_revision(current_revision_id)
3872
pending_deltas.append((basis_id, delta,
3873
current_revision_id, revision.parent_ids))
3874
if self._converting_to_rich_root:
3875
self._revision_id_to_root_id[current_revision_id] = \
3877
# Determine which texts are in present in this revision but not in
3878
# any of the available parents.
3879
texts_possibly_new_in_tree = set()
3880
for old_path, new_path, file_id, entry in delta:
3881
if new_path is None:
3882
# This file_id isn't present in the new rev
3886
if not self.target.supports_rich_root():
3887
# The target doesn't support rich root, so we don't
3890
if self._converting_to_rich_root:
3891
# This can't be copied normally, we have to insert
3893
root_keys_to_create.add((file_id, entry.revision))
3896
texts_possibly_new_in_tree.add((file_id, entry.revision))
3897
for basis_id, basis_tree in possible_trees:
3898
basis_inv = basis_tree.inventory
3899
for file_key in list(texts_possibly_new_in_tree):
3900
file_id, file_revision = file_key
3902
entry = basis_inv[file_id]
3903
except errors.NoSuchId:
3905
if entry.revision == file_revision:
3906
texts_possibly_new_in_tree.remove(file_key)
3907
text_keys.update(texts_possibly_new_in_tree)
3908
pending_revisions.append(revision)
3909
cache[current_revision_id] = tree
3910
basis_id = current_revision_id
3911
self.source._safe_to_return_from_cache = False
3913
from_texts = self.source.texts
3914
to_texts = self.target.texts
3915
if root_keys_to_create:
3916
root_stream = _mod_fetch._new_root_data_stream(
3917
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3918
self.source, graph=a_graph)
3919
to_texts.insert_record_stream(root_stream)
3920
to_texts.insert_record_stream(from_texts.get_record_stream(
3921
text_keys, self.target._format._fetch_order,
3922
not self.target._format._fetch_uses_deltas))
3923
# insert inventory deltas
3924
for delta in pending_deltas:
3925
self.target.add_inventory_by_delta(*delta)
3926
if self.target._fallback_repositories:
3927
# Make sure this stacked repository has all the parent inventories
3928
# for the new revisions that we are about to insert. We do this
3929
# before adding the revisions so that no revision is added until
3930
# all the inventories it may depend on are added.
3931
# Note that this is overzealous, as we may have fetched these in an
3934
revision_ids = set()
3935
for revision in pending_revisions:
3936
revision_ids.add(revision.revision_id)
3937
parent_ids.update(revision.parent_ids)
3938
parent_ids.difference_update(revision_ids)
3939
parent_ids.discard(_mod_revision.NULL_REVISION)
3940
parent_map = self.source.get_parent_map(parent_ids)
3941
# we iterate over parent_map and not parent_ids because we don't
3942
# want to try copying any revision which is a ghost
3943
for parent_tree in self.source.revision_trees(parent_map):
3944
current_revision_id = parent_tree.get_revision_id()
3945
parents_parents = parent_map[current_revision_id]
3946
possible_trees = self._get_trees(parents_parents, cache)
3947
if len(possible_trees) == 0:
3948
# There either aren't any parents, or the parents are
3949
# ghosts, so just use the last converted tree.
3950
possible_trees.append((basis_id, cache[basis_id]))
3951
basis_id, delta = self._get_delta_for_revision(parent_tree,
3952
parents_parents, possible_trees)
3953
self.target.add_inventory_by_delta(
3954
basis_id, delta, current_revision_id, parents_parents)
3955
# insert signatures and revisions
3956
for revision in pending_revisions:
3958
signature = self.source.get_signature_text(
3959
revision.revision_id)
3960
self.target.add_signature_text(revision.revision_id,
3962
except errors.NoSuchRevision:
3964
self.target.add_revision(revision.revision_id, revision)
3967
def _fetch_all_revisions(self, revision_ids, pb):
3968
"""Fetch everything for the list of revisions.
3970
:param revision_ids: The list of revisions to fetch. Must be in
3972
:param pb: A ProgressTask
3975
basis_id, basis_tree = self._get_basis(revision_ids[0])
3977
cache = lru_cache.LRUCache(100)
3978
cache[basis_id] = basis_tree
3979
del basis_tree # We don't want to hang on to it here
3981
if self._converting_to_rich_root and len(revision_ids) > 100:
3982
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3987
for offset in range(0, len(revision_ids), batch_size):
3988
self.target.start_write_group()
3990
pb.update('Transferring revisions', offset,
3992
batch = revision_ids[offset:offset+batch_size]
3993
basis_id = self._fetch_batch(batch, basis_id, cache,
3996
self.source._safe_to_return_from_cache = False
3997
self.target.abort_write_group()
4000
hint = self.target.commit_write_group()
4003
if hints and self.target._format.pack_compresses:
4004
self.target.pack(hint=hints)
4005
pb.update('Transferring revisions', len(revision_ids),
4009
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
4011
"""See InterRepository.fetch()."""
4012
if fetch_spec is not None:
4013
raise AssertionError("Not implemented yet...")
4014
ui.ui_factory.warn_experimental_format_fetch(self)
4015
if (not self.source.supports_rich_root()
4016
and self.target.supports_rich_root()):
4017
self._converting_to_rich_root = True
4018
self._revision_id_to_root_id = {}
4020
self._converting_to_rich_root = False
4021
# See <https://launchpad.net/bugs/456077> asking for a warning here
4022
if self.source._format.network_name() != self.target._format.network_name():
4023
ui.ui_factory.show_user_warning('cross_format_fetch',
4024
from_format=self.source._format,
4025
to_format=self.target._format)
4026
revision_ids = self.target.search_missing_revision_ids(self.source,
4027
revision_id, find_ghosts=find_ghosts).get_keys()
4028
if not revision_ids:
4030
revision_ids = tsort.topo_sort(
4031
self.source.get_graph().get_parent_map(revision_ids))
4032
if not revision_ids:
4034
# Walk though all revisions; get inventory deltas, copy referenced
4035
# texts that delta references, insert the delta, revision and
4038
my_pb = ui.ui_factory.nested_progress_bar()
4041
symbol_versioning.warn(
4042
symbol_versioning.deprecated_in((1, 14, 0))
4043
% "pb parameter to fetch()")
4046
self._fetch_all_revisions(revision_ids, pb)
4048
if my_pb is not None:
4050
return len(revision_ids), 0
4052
def _get_basis(self, first_revision_id):
4053
"""Get a revision and tree which exists in the target.
4055
This assumes that first_revision_id is selected for transmission
4056
because all other ancestors are already present. If we can't find an
4057
ancestor we fall back to NULL_REVISION since we know that is safe.
4059
:return: (basis_id, basis_tree)
4061
first_rev = self.source.get_revision(first_revision_id)
4063
basis_id = first_rev.parent_ids[0]
4064
# only valid as a basis if the target has it
4065
self.target.get_revision(basis_id)
4066
# Try to get a basis tree - if its a ghost it will hit the
4067
# NoSuchRevision case.
4068
basis_tree = self.source.revision_tree(basis_id)
4069
except (IndexError, errors.NoSuchRevision):
4070
basis_id = _mod_revision.NULL_REVISION
4071
basis_tree = self.source.revision_tree(basis_id)
4072
return basis_id, basis_tree
4075
InterRepository.register_optimiser(InterDifferingSerializer)
4076
3491
InterRepository.register_optimiser(InterSameDataRepository)
4077
3492
InterRepository.register_optimiser(InterWeaveRepo)
4078
3493
InterRepository.register_optimiser(InterKnitRepo)
4649
4031
inventories = self.from_repository.iter_inventories(
4650
4032
revision_ids, 'topological')
4033
# XXX: ideally these flags would be per-revision, not per-repo (e.g.
4034
# streaming a non-rich-root revision out of a rich-root repo back into
4035
# a non-rich-root repo ought to be allowed)
4651
4036
format = from_repo._format
4037
flags = (format.rich_root_data, format.supports_tree_reference)
4652
4038
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4653
inventory_cache = lru_cache.LRUCache(50)
4654
null_inventory = from_repo.revision_tree(
4655
_mod_revision.NULL_REVISION).inventory
4656
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4657
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4658
# repo back into a non-rich-root repo ought to be allowed)
4659
serializer = inventory_delta.InventoryDeltaSerializer(
4660
versioned_root=format.rich_root_data,
4661
tree_references=format.supports_tree_reference)
4662
4039
for inv in inventories:
4663
4040
key = (inv.revision_id,)
4664
parent_keys = parent_map.get(key, ())
4666
if not delta_versus_null and parent_keys:
4667
# The caller did not ask for complete inventories and we have
4668
# some parents that we can delta against. Make a delta against
4669
# each parent so that we can find the smallest.
4670
parent_ids = [parent_key[0] for parent_key in parent_keys]
4041
parents = parent_map.get(key, ())
4042
if fulltexts or parents == ():
4043
# Either the caller asked for fulltexts, or there is no parent,
4044
# so, stream as a delta from null:.
4045
basis_id = _mod_revision.NULL_REVISION
4046
parent_inv = Inventory(None)
4047
delta = inv._make_delta(parent_inv)
4049
# Make a delta against each parent so that we can find the
4052
parent_ids = [parent_key[0] for parent_key in parents]
4053
parent_ids.append(_mod_revision.NULL_REVISION)
4671
4054
for parent_id in parent_ids:
4672
4055
if parent_id not in invs_sent_so_far:
4673
4056
# We don't know that the remote side has this basis, so
4674
4057
# we can't use it.
4676
4059
if parent_id == _mod_revision.NULL_REVISION:
4677
parent_inv = null_inventory
4060
parent_inv = Inventory(None)
4679
parent_inv = inventory_cache.get(parent_id, None)
4680
if parent_inv is None:
4681
parent_inv = from_repo.get_inventory(parent_id)
4062
parent_inv = from_repo.get_inventory(parent_id)
4682
4063
candidate_delta = inv._make_delta(parent_inv)
4683
if (delta is None or
4684
len(delta) > len(candidate_delta)):
4685
delta = candidate_delta
4064
if (best_delta is None or
4065
len(best_delta) > len(candidate_delta)):
4066
best_delta = candidate_delta
4686
4067
basis_id = parent_id
4688
# Either none of the parents ended up being suitable, or we
4689
# were asked to delta against NULL
4690
basis_id = _mod_revision.NULL_REVISION
4691
delta = inv._make_delta(null_inventory)
4692
invs_sent_so_far.add(inv.revision_id)
4693
inventory_cache[inv.revision_id] = inv
4694
delta_serialized = ''.join(
4695
serializer.delta_to_lines(basis_id, key[-1], delta))
4696
yield versionedfile.FulltextContentFactory(
4697
key, parent_keys, None, delta_serialized)
4069
invs_sent_so_far.add(basis_id)
4070
yield versionedfile.InventoryDeltaContentFactory(
4071
key, parents, None, delta, basis_id, flags)
4700
4074
def _iter_for_revno(repo, partial_history_cache, stop_index=None,