1195
1150
# The old API returned a list, should this actually be a set?
1196
1151
return parent_map.keys()
1198
def _check_inventories(self, checker):
1199
"""Check the inventories found from the revision scan.
1201
This is responsible for verifying the sha1 of inventories and
1202
creating a pending_keys set that covers data referenced by inventories.
1204
bar = ui.ui_factory.nested_progress_bar()
1206
self._do_check_inventories(checker, bar)
1210
def _do_check_inventories(self, checker, bar):
1211
"""Helper for _check_inventories."""
1213
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1214
kinds = ['chk_bytes', 'texts']
1215
count = len(checker.pending_keys)
1216
bar.update("inventories", 0, 2)
1217
current_keys = checker.pending_keys
1218
checker.pending_keys = {}
1219
# Accumulate current checks.
1220
for key in current_keys:
1221
if key[0] != 'inventories' and key[0] not in kinds:
1222
checker._report_items.append('unknown key type %r' % (key,))
1223
keys[key[0]].add(key[1:])
1224
if keys['inventories']:
1225
# NB: output order *should* be roughly sorted - topo or
1226
# inverse topo depending on repository - either way decent
1227
# to just delta against. However, pre-CHK formats didn't
1228
# try to optimise inventory layout on disk. As such the
1229
# pre-CHK code path does not use inventory deltas.
1231
for record in self.inventories.check(keys=keys['inventories']):
1232
if record.storage_kind == 'absent':
1233
checker._report_items.append(
1234
'Missing inventory {%s}' % (record.key,))
1236
last_object = self._check_record('inventories', record,
1237
checker, last_object,
1238
current_keys[('inventories',) + record.key])
1239
del keys['inventories']
1242
bar.update("texts", 1)
1243
while (checker.pending_keys or keys['chk_bytes']
1245
# Something to check.
1246
current_keys = checker.pending_keys
1247
checker.pending_keys = {}
1248
# Accumulate current checks.
1249
for key in current_keys:
1250
if key[0] not in kinds:
1251
checker._report_items.append('unknown key type %r' % (key,))
1252
keys[key[0]].add(key[1:])
1253
# Check the outermost kind only - inventories || chk_bytes || texts
1257
for record in getattr(self, kind).check(keys=keys[kind]):
1258
if record.storage_kind == 'absent':
1259
checker._report_items.append(
1260
'Missing %s {%s}' % (kind, record.key,))
1262
last_object = self._check_record(kind, record,
1263
checker, last_object, current_keys[(kind,) + record.key])
1267
def _check_record(self, kind, record, checker, last_object, item_data):
1268
"""Check a single text from this repository."""
1269
if kind == 'inventories':
1270
rev_id = record.key[0]
1271
inv = self._deserialise_inventory(rev_id,
1272
record.get_bytes_as('fulltext'))
1273
if last_object is not None:
1274
delta = inv._make_delta(last_object)
1275
for old_path, path, file_id, ie in delta:
1278
ie.check(checker, rev_id, inv)
1280
for path, ie in inv.iter_entries():
1281
ie.check(checker, rev_id, inv)
1282
if self._format.fast_deltas:
1284
elif kind == 'chk_bytes':
1285
# No code written to check chk_bytes for this repo format.
1286
checker._report_items.append(
1287
'unsupported key type chk_bytes for %s' % (record.key,))
1288
elif kind == 'texts':
1289
self._check_text(record, checker, item_data)
1291
checker._report_items.append(
1292
'unknown key type %s for %s' % (kind, record.key))
1294
def _check_text(self, record, checker, item_data):
1295
"""Check a single text."""
1296
# Check it is extractable.
1297
# TODO: check length.
1298
if record.storage_kind == 'chunked':
1299
chunks = record.get_bytes_as(record.storage_kind)
1300
sha1 = osutils.sha_strings(chunks)
1301
length = sum(map(len, chunks))
1303
content = record.get_bytes_as('fulltext')
1304
sha1 = osutils.sha_string(content)
1305
length = len(content)
1306
if item_data and sha1 != item_data[1]:
1307
checker._report_items.append(
1308
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1309
(record.key, sha1, item_data[1], item_data[2]))
1312
1154
def create(a_bzrdir):
1313
1155
"""Construct the current default format repository in a_bzrdir."""
1894
1670
@needs_read_lock
1895
1671
def get_revisions(self, revision_ids):
1896
"""Get many revisions at once.
1898
Repositories that need to check data on every revision read should
1899
subclass this method.
1672
"""Get many revisions at once."""
1901
1673
return self._get_revisions(revision_ids)
1903
1675
@needs_read_lock
1904
1676
def _get_revisions(self, revision_ids):
1905
1677
"""Core work logic to get many revisions without sanity checks."""
1678
for rev_id in revision_ids:
1679
if not rev_id or not isinstance(rev_id, basestring):
1680
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1681
keys = [(key,) for key in revision_ids]
1682
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1907
for revid, rev in self._iter_revisions(revision_ids):
1909
raise errors.NoSuchRevision(self, revid)
1684
for record in stream:
1685
if record.storage_kind == 'absent':
1686
raise errors.NoSuchRevision(self, record.key[0])
1687
text = record.get_bytes_as('fulltext')
1688
rev = self._serializer.read_revision_from_string(text)
1689
revs[record.key[0]] = rev
1911
1690
return [revs[revid] for revid in revision_ids]
1913
def _iter_revisions(self, revision_ids):
1914
"""Iterate over revision objects.
1916
:param revision_ids: An iterable of revisions to examine. None may be
1917
passed to request all revisions known to the repository. Note that
1918
not all repositories can find unreferenced revisions; for those
1919
repositories only referenced ones will be returned.
1920
:return: An iterator of (revid, revision) tuples. Absent revisions (
1921
those asked for but not available) are returned as (revid, None).
1923
if revision_ids is None:
1924
revision_ids = self.all_revision_ids()
1926
for rev_id in revision_ids:
1927
if not rev_id or not isinstance(rev_id, basestring):
1928
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1929
keys = [(key,) for key in revision_ids]
1930
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1931
for record in stream:
1932
revid = record.key[0]
1933
if record.storage_kind == 'absent':
1936
text = record.get_bytes_as('fulltext')
1937
rev = self._serializer.read_revision_from_string(text)
1693
def get_revision_xml(self, revision_id):
1694
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1695
# would have already do it.
1696
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1697
# TODO: this can't just be replaced by:
1698
# return self._serializer.write_revision_to_string(
1699
# self.get_revision(revision_id))
1700
# as cStringIO preservers the encoding unlike write_revision_to_string
1701
# or some other call down the path.
1702
rev = self.get_revision(revision_id)
1703
rev_tmp = cStringIO.StringIO()
1704
# the current serializer..
1705
self._serializer.write_revision(rev, rev_tmp)
1707
return rev_tmp.getvalue()
1940
1709
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1941
1710
"""Produce a generator of revision deltas.
2391
2142
:param revision_ids: The expected revision ids of the inventories.
2392
:param ordering: optional ordering, e.g. 'topological'. If not
2393
specified, the order of revision_ids will be preserved (by
2394
buffering if necessary).
2395
2143
:return: An iterator of inventories.
2397
2145
if ((None in revision_ids)
2398
2146
or (_mod_revision.NULL_REVISION in revision_ids)):
2399
2147
raise ValueError('cannot get null revision inventory')
2400
return self._iter_inventories(revision_ids, ordering)
2148
return self._iter_inventories(revision_ids)
2402
def _iter_inventories(self, revision_ids, ordering):
2150
def _iter_inventories(self, revision_ids):
2403
2151
"""single-document based inventory iteration."""
2404
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2405
for text, revision_id in inv_xmls:
2406
yield self._deserialise_inventory(revision_id, text)
2152
for text, revision_id in self._iter_inventory_xmls(revision_ids):
2153
yield self.deserialise_inventory(revision_id, text)
2408
def _iter_inventory_xmls(self, revision_ids, ordering):
2409
if ordering is None:
2410
order_as_requested = True
2411
ordering = 'unordered'
2413
order_as_requested = False
2155
def _iter_inventory_xmls(self, revision_ids):
2414
2156
keys = [(revision_id,) for revision_id in revision_ids]
2417
if order_as_requested:
2418
key_iter = iter(keys)
2419
next_key = key_iter.next()
2420
stream = self.inventories.get_record_stream(keys, ordering, True)
2157
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2421
2158
text_chunks = {}
2422
2159
for record in stream:
2423
2160
if record.storage_kind != 'absent':
2424
chunks = record.get_bytes_as('chunked')
2425
if order_as_requested:
2426
text_chunks[record.key] = chunks
2428
yield ''.join(chunks), record.key[-1]
2161
text_chunks[record.key] = record.get_bytes_as('chunked')
2430
2163
raise errors.NoSuchRevision(self, record.key)
2431
if order_as_requested:
2432
# Yield as many results as we can while preserving order.
2433
while next_key in text_chunks:
2434
chunks = text_chunks.pop(next_key)
2435
yield ''.join(chunks), next_key[-1]
2437
next_key = key_iter.next()
2438
except StopIteration:
2439
# We still want to fully consume the get_record_stream,
2440
# just in case it is not actually finished at this point
2165
chunks = text_chunks.pop(key)
2166
yield ''.join(chunks), key[-1]
2444
def _deserialise_inventory(self, revision_id, xml):
2168
def deserialise_inventory(self, revision_id, xml):
2445
2169
"""Transform the xml into an inventory object.
2447
2171
:param revision_id: The expected revision id of the inventory.
2448
2172
:param xml: A serialised inventory.
2450
2174
result = self._serializer.read_inventory_from_string(xml, revision_id,
2451
entry_cache=self._inventory_entry_cache,
2452
return_from_cache=self._safe_to_return_from_cache)
2175
entry_cache=self._inventory_entry_cache)
2453
2176
if result.revision_id != revision_id:
2454
2177
raise AssertionError('revision id mismatch %s != %s' % (
2455
2178
result.revision_id, revision_id))
2181
def serialise_inventory(self, inv):
2182
return self._serializer.write_inventory_to_string(inv)
2184
def _serialise_inventory_to_lines(self, inv):
2185
return self._serializer.write_inventory_to_lines(inv)
2458
2187
def get_serializer_format(self):
2459
2188
return self._serializer.format_num
2461
2190
@needs_read_lock
2462
def _get_inventory_xml(self, revision_id):
2463
"""Get serialized inventory as a string."""
2464
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2191
def get_inventory_xml(self, revision_id):
2192
"""Get inventory XML as a file object."""
2193
texts = self._iter_inventory_xmls([revision_id])
2466
2195
text, revision_id = texts.next()
2467
2196
except StopIteration:
2468
2197
raise errors.HistoryMissing(self, 'inventory', revision_id)
2471
def get_rev_id_for_revno(self, revno, known_pair):
2472
"""Return the revision id of a revno, given a later (revno, revid)
2473
pair in the same history.
2475
:return: if found (True, revid). If the available history ran out
2476
before reaching the revno, then this returns
2477
(False, (closest_revno, closest_revid)).
2201
def get_inventory_sha1(self, revision_id):
2202
"""Return the sha1 hash of the inventory entry
2479
known_revno, known_revid = known_pair
2480
partial_history = [known_revid]
2481
distance_from_known = known_revno - revno
2482
if distance_from_known < 0:
2484
'requested revno (%d) is later than given known revno (%d)'
2485
% (revno, known_revno))
2488
self, partial_history, stop_index=distance_from_known)
2489
except errors.RevisionNotPresent, err:
2490
if err.revision_id == known_revid:
2491
# The start revision (known_revid) wasn't found.
2493
# This is a stacked repository with no fallbacks, or a there's a
2494
# left-hand ghost. Either way, even though the revision named in
2495
# the error isn't in this repo, we know it's the next step in this
2496
# left-hand history.
2497
partial_history.append(err.revision_id)
2498
if len(partial_history) <= distance_from_known:
2499
# Didn't find enough history to get a revid for the revno.
2500
earliest_revno = known_revno - len(partial_history) + 1
2501
return (False, (earliest_revno, partial_history[-1]))
2502
if len(partial_history) - 1 > distance_from_known:
2503
raise AssertionError('_iter_for_revno returned too much history')
2504
return (True, partial_history[-1])
2204
return self.get_revision(revision_id).inventory_sha1
2506
2206
def iter_reverse_revision_history(self, revision_id):
2507
2207
"""Iterate backwards through revision ids in the lefthand history
3740
3377
return self.source.revision_ids_to_search_result(result_set)
3380
class InterPackRepo(InterSameDataRepository):
3381
"""Optimised code paths between Pack based repositories."""
3384
def _get_repo_format_to_test(self):
3385
from bzrlib.repofmt import pack_repo
3386
return pack_repo.RepositoryFormatKnitPack6RichRoot()
3389
def is_compatible(source, target):
3390
"""Be compatible with known Pack formats.
3392
We don't test for the stores being of specific types because that
3393
could lead to confusing results, and there is no need to be
3396
InterPackRepo does not support CHK based repositories.
3398
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3399
from bzrlib.repofmt.groupcompress_repo import RepositoryFormatCHK1
3401
are_packs = (isinstance(source._format, RepositoryFormatPack) and
3402
isinstance(target._format, RepositoryFormatPack))
3403
not_packs = (isinstance(source._format, RepositoryFormatCHK1) or
3404
isinstance(target._format, RepositoryFormatCHK1))
3405
except AttributeError:
3407
if not_packs or not are_packs:
3409
return InterRepository._same_model(source, target)
3412
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3414
"""See InterRepository.fetch()."""
3415
if (len(self.source._fallback_repositories) > 0 or
3416
len(self.target._fallback_repositories) > 0):
3417
# The pack layer is not aware of fallback repositories, so when
3418
# fetching from a stacked repository or into a stacked repository
3419
# we use the generic fetch logic which uses the VersionedFiles
3420
# attributes on repository.
3421
from bzrlib.fetch import RepoFetcher
3422
fetcher = RepoFetcher(self.target, self.source, revision_id,
3423
pb, find_ghosts, fetch_spec=fetch_spec)
3424
if fetch_spec is not None:
3425
if len(list(fetch_spec.heads)) != 1:
3426
raise AssertionError(
3427
"InterPackRepo.fetch doesn't support "
3428
"fetching multiple heads yet.")
3429
revision_id = list(fetch_spec.heads)[0]
3431
if revision_id is None:
3433
# everything to do - use pack logic
3434
# to fetch from all packs to one without
3435
# inventory parsing etc, IFF nothing to be copied is in the target.
3437
source_revision_ids = frozenset(self.source.all_revision_ids())
3438
revision_ids = source_revision_ids - \
3439
frozenset(self.target.get_parent_map(source_revision_ids))
3440
revision_keys = [(revid,) for revid in revision_ids]
3441
index = self.target._pack_collection.revision_index.combined_index
3442
present_revision_ids = set(item[1][0] for item in
3443
index.iter_entries(revision_keys))
3444
revision_ids = set(revision_ids) - present_revision_ids
3445
# implementing the TODO will involve:
3446
# - detecting when all of a pack is selected
3447
# - avoiding as much as possible pre-selection, so the
3448
# more-core routines such as create_pack_from_packs can filter in
3449
# a just-in-time fashion. (though having a HEADS list on a
3450
# repository might make this a lot easier, because we could
3451
# sensibly detect 'new revisions' without doing a full index scan.
3452
elif _mod_revision.is_null(revision_id):
3456
revision_ids = self.search_missing_revision_ids(revision_id,
3457
find_ghosts=find_ghosts).get_keys()
3458
if len(revision_ids) == 0:
3460
return self._pack(self.source, self.target, revision_ids)
3462
def _pack(self, source, target, revision_ids):
3463
from bzrlib.repofmt.pack_repo import Packer
3464
packs = source._pack_collection.all_packs()
3465
pack = Packer(self.target._pack_collection, packs, '.fetch',
3466
revision_ids).pack()
3467
if pack is not None:
3468
self.target._pack_collection._save_pack_names()
3469
copied_revs = pack.get_revision_count()
3470
# Trigger an autopack. This may duplicate effort as we've just done
3471
# a pack creation, but for now it is simpler to think about as
3472
# 'upload data, then repack if needed'.
3473
self.target._pack_collection.autopack()
3474
return (copied_revs, [])
3479
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3480
"""See InterRepository.missing_revision_ids().
3482
:param find_ghosts: Find ghosts throughout the ancestry of
3485
if not find_ghosts and revision_id is not None:
3486
return self._walk_to_common_revisions([revision_id])
3487
elif revision_id is not None:
3488
# Find ghosts: search for revisions pointing from one repository to
3489
# the other, and vice versa, anywhere in the history of revision_id.
3490
graph = self.target.get_graph(other_repository=self.source)
3491
searcher = graph._make_breadth_first_searcher([revision_id])
3495
next_revs, ghosts = searcher.next_with_ghosts()
3496
except StopIteration:
3498
if revision_id in ghosts:
3499
raise errors.NoSuchRevision(self.source, revision_id)
3500
found_ids.update(next_revs)
3501
found_ids.update(ghosts)
3502
found_ids = frozenset(found_ids)
3503
# Double query here: should be able to avoid this by changing the
3504
# graph api further.
3505
result_set = found_ids - frozenset(
3506
self.target.get_parent_map(found_ids))
3508
source_ids = self.source.all_revision_ids()
3509
# source_ids is the worst possible case we may need to pull.
3510
# now we want to filter source_ids against what we actually
3511
# have in target, but don't try to check for existence where we know
3512
# we do not have a revision as that would be pointless.
3513
target_ids = set(self.target.all_revision_ids())
3514
result_set = set(source_ids).difference(target_ids)
3515
return self.source.revision_ids_to_search_result(result_set)
3743
3518
class InterDifferingSerializer(InterRepository):
3752
3527
# This is redundant with format.check_conversion_target(), however that
3753
3528
# raises an exception, and we just want to say "False" as in we won't
3754
3529
# support converting between these formats.
3755
if 'IDS_never' in debug.debug_flags:
3757
3530
if source.supports_rich_root() and not target.supports_rich_root():
3759
3532
if (source._format.supports_tree_reference
3760
3533
and not target._format.supports_tree_reference):
3762
if target._fallback_repositories and target._format.supports_chks:
3763
# IDS doesn't know how to copy CHKs for the parent inventories it
3764
# adds to stacked repos.
3766
if 'IDS_always' in debug.debug_flags:
3768
# Only use this code path for local source and target. IDS does far
3769
# too much IO (both bandwidth and roundtrips) over a network.
3770
if not source.bzrdir.transport.base.startswith('file:///'):
3772
if not target.bzrdir.transport.base.startswith('file:///'):
3776
def _get_trees(self, revision_ids, cache):
3778
for rev_id in revision_ids:
3780
possible_trees.append((rev_id, cache[rev_id]))
3782
# Not cached, but inventory might be present anyway.
3784
tree = self.source.revision_tree(rev_id)
3785
except errors.NoSuchRevision:
3786
# Nope, parent is ghost.
3789
cache[rev_id] = tree
3790
possible_trees.append((rev_id, tree))
3791
return possible_trees
3793
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3537
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3794
3538
"""Get the best delta and base for this revision.
3796
3540
:return: (basis_id, delta)
3542
possible_trees = [(parent_id, cache[parent_id])
3543
for parent_id in parent_ids
3544
if parent_id in cache]
3545
if len(possible_trees) == 0:
3546
# There either aren't any parents, or the parents aren't in the
3547
# cache, so just use the last converted tree
3548
possible_trees.append((basis_id, cache[basis_id]))
3799
# Generate deltas against each tree, to find the shortest.
3800
texts_possibly_new_in_tree = set()
3801
3550
for basis_id, basis_tree in possible_trees:
3802
3551
delta = tree.inventory._make_delta(basis_tree.inventory)
3803
for old_path, new_path, file_id, new_entry in delta:
3804
if new_path is None:
3805
# This file_id isn't present in the new rev, so we don't
3809
# Rich roots are handled elsewhere...
3811
kind = new_entry.kind
3812
if kind != 'directory' and kind != 'file':
3813
# No text record associated with this inventory entry.
3815
# This is a directory or file that has changed somehow.
3816
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3817
3552
deltas.append((len(delta), basis_id, delta))
3819
3554
return deltas[0][1:]
3821
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3822
"""Find all parent revisions that are absent, but for which the
3823
inventory is present, and copy those inventories.
3825
This is necessary to preserve correctness when the source is stacked
3826
without fallbacks configured. (Note that in cases like upgrade the
3827
source may be not have _fallback_repositories even though it is
3831
for parents in parent_map.values():
3832
parent_revs.update(parents)
3833
present_parents = self.source.get_parent_map(parent_revs)
3834
absent_parents = set(parent_revs).difference(present_parents)
3835
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3836
(rev_id,) for rev_id in absent_parents)
3837
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3838
for parent_tree in self.source.revision_trees(parent_inv_ids):
3839
current_revision_id = parent_tree.get_revision_id()
3840
parents_parents_keys = parent_invs_keys_for_stacking[
3841
(current_revision_id,)]
3842
parents_parents = [key[-1] for key in parents_parents_keys]
3843
basis_id = _mod_revision.NULL_REVISION
3844
basis_tree = self.source.revision_tree(basis_id)
3845
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3846
self.target.add_inventory_by_delta(
3847
basis_id, delta, current_revision_id, parents_parents)
3848
cache[current_revision_id] = parent_tree
3850
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3556
def _get_parent_keys(self, root_key, parent_map):
3557
"""Get the parent keys for a given root id."""
3558
root_id, rev_id = root_key
3559
# Include direct parents of the revision, but only if they used
3560
# the same root_id and are heads.
3562
for parent_id in parent_map[rev_id]:
3563
if parent_id == _mod_revision.NULL_REVISION:
3565
if parent_id not in self._revision_id_to_root_id:
3566
# We probably didn't read this revision, go spend the
3567
# extra effort to actually check
3569
tree = self.source.revision_tree(parent_id)
3570
except errors.NoSuchRevision:
3571
# Ghost, fill out _revision_id_to_root_id in case we
3572
# encounter this again.
3573
# But set parent_root_id to None since we don't really know
3574
parent_root_id = None
3576
parent_root_id = tree.get_root_id()
3577
self._revision_id_to_root_id[parent_id] = None
3579
parent_root_id = self._revision_id_to_root_id[parent_id]
3580
if root_id == parent_root_id:
3581
# With stacking we _might_ want to refer to a non-local
3582
# revision, but this code path only applies when we have the
3583
# full content available, so ghosts really are ghosts, not just
3584
# the edge of local data.
3585
parent_keys.append((parent_id,))
3587
# root_id may be in the parent anyway.
3589
tree = self.source.revision_tree(parent_id)
3590
except errors.NoSuchRevision:
3591
# ghost, can't refer to it.
3595
parent_keys.append((tree.inventory[root_id].revision,))
3596
except errors.NoSuchId:
3599
g = graph.Graph(self.source.revisions)
3600
heads = g.heads(parent_keys)
3602
for key in parent_keys:
3603
if key in heads and key not in selected_keys:
3604
selected_keys.append(key)
3605
return tuple([(root_id,)+ key for key in selected_keys])
3607
def _new_root_data_stream(self, root_keys_to_create, parent_map):
3608
for root_key in root_keys_to_create:
3609
parent_keys = self._get_parent_keys(root_key, parent_map)
3610
yield versionedfile.FulltextContentFactory(root_key,
3611
parent_keys, None, '')
3613
def _fetch_batch(self, revision_ids, basis_id, cache):
3851
3614
"""Fetch across a few revisions.
3853
3616
:param revision_ids: The revisions to copy
3854
3617
:param basis_id: The revision_id of a tree that must be in cache, used
3855
3618
as a basis for delta when no other base is available
3856
3619
:param cache: A cache of RevisionTrees that we can use.
3857
:param a_graph: A Graph object to determine the heads() of the
3858
rich-root data stream.
3859
3620
:return: The revision_id of the last converted tree. The RevisionTree
3860
3621
for it will be in cache
3867
3628
pending_deltas = []
3868
3629
pending_revisions = []
3869
3630
parent_map = self.source.get_parent_map(revision_ids)
3870
self._fetch_parent_invs_for_stacking(parent_map, cache)
3871
self.source._safe_to_return_from_cache = True
3872
3631
for tree in self.source.revision_trees(revision_ids):
3873
# Find a inventory delta for this revision.
3874
# Find text entries that need to be copied, too.
3875
3632
current_revision_id = tree.get_revision_id()
3876
3633
parent_ids = parent_map.get(current_revision_id, ())
3877
parent_trees = self._get_trees(parent_ids, cache)
3878
possible_trees = list(parent_trees)
3879
if len(possible_trees) == 0:
3880
# There either aren't any parents, or the parents are ghosts,
3881
# so just use the last converted tree.
3882
possible_trees.append((basis_id, cache[basis_id]))
3883
3634
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3885
revision = self.source.get_revision(current_revision_id)
3886
pending_deltas.append((basis_id, delta,
3887
current_revision_id, revision.parent_ids))
3888
3636
if self._converting_to_rich_root:
3889
3637
self._revision_id_to_root_id[current_revision_id] = \
3890
3638
tree.get_root_id()
3891
# Determine which texts are in present in this revision but not in
3892
# any of the available parents.
3893
texts_possibly_new_in_tree = set()
3639
# Find text entries that need to be copied
3894
3640
for old_path, new_path, file_id, entry in delta:
3895
if new_path is None:
3896
# This file_id isn't present in the new rev
3900
if not self.target.supports_rich_root():
3901
# The target doesn't support rich root, so we don't
3904
if self._converting_to_rich_root:
3905
# This can't be copied normally, we have to insert
3907
root_keys_to_create.add((file_id, entry.revision))
3910
texts_possibly_new_in_tree.add((file_id, entry.revision))
3911
for basis_id, basis_tree in possible_trees:
3912
basis_inv = basis_tree.inventory
3913
for file_key in list(texts_possibly_new_in_tree):
3914
file_id, file_revision = file_key
3916
entry = basis_inv[file_id]
3917
except errors.NoSuchId:
3919
if entry.revision == file_revision:
3920
texts_possibly_new_in_tree.remove(file_key)
3921
text_keys.update(texts_possibly_new_in_tree)
3641
if new_path is not None:
3644
if not self.target.supports_rich_root():
3645
# The target doesn't support rich root, so we don't
3648
if self._converting_to_rich_root:
3649
# This can't be copied normally, we have to insert
3651
root_keys_to_create.add((file_id, entry.revision))
3653
text_keys.add((file_id, entry.revision))
3654
revision = self.source.get_revision(current_revision_id)
3655
pending_deltas.append((basis_id, delta,
3656
current_revision_id, revision.parent_ids))
3922
3657
pending_revisions.append(revision)
3923
3658
cache[current_revision_id] = tree
3924
3659
basis_id = current_revision_id
3925
self.source._safe_to_return_from_cache = False
3926
3660
# Copy file texts
3927
3661
from_texts = self.source.texts
3928
3662
to_texts = self.target.texts
3929
3663
if root_keys_to_create:
3930
root_stream = _mod_fetch._new_root_data_stream(
3931
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3932
self.source, graph=a_graph)
3664
root_stream = self._new_root_data_stream(root_keys_to_create,
3933
3666
to_texts.insert_record_stream(root_stream)
3934
3667
to_texts.insert_record_stream(from_texts.get_record_stream(
3935
3668
text_keys, self.target._format._fetch_order,
4384
4062
# missing keys can handle suspending a write group).
4385
4063
write_group_tokens = self.target_repo.suspend_write_group()
4386
4064
return write_group_tokens, missing_keys
4387
hint = self.target_repo.commit_write_group()
4388
if (to_serializer != src_serializer and
4389
self.target_repo._format.pack_compresses):
4390
self.target_repo.pack(hint=hint)
4065
self.target_repo.commit_write_group()
4391
4066
return [], set()
4393
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4394
target_rich_root = self.target_repo._format.rich_root_data
4395
target_tree_refs = self.target_repo._format.supports_tree_reference
4396
for record in substream:
4397
# Insert the delta directly
4398
inventory_delta_bytes = record.get_bytes_as('fulltext')
4399
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4401
parse_result = deserialiser.parse_text_bytes(
4402
inventory_delta_bytes)
4403
except inventory_delta.IncompatibleInventoryDelta, err:
4404
trace.mutter("Incompatible delta: %s", err.msg)
4405
raise errors.IncompatibleRevision(self.target_repo._format)
4406
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4407
revision_id = new_id
4408
parents = [key[0] for key in record.parents]
4409
self.target_repo.add_inventory_by_delta(
4410
basis_id, inv_delta, revision_id, parents)
4412
def _extract_and_insert_inventories(self, substream, serializer,
4068
def _extract_and_insert_inventories(self, substream, serializer):
4414
4069
"""Generate a new inventory versionedfile in target, converting data.
4416
4071
The inventory is retrieved from the source, (deserializing it), and
4417
4072
stored in the target (reserializing it in a different format).
4419
target_rich_root = self.target_repo._format.rich_root_data
4420
target_tree_refs = self.target_repo._format.supports_tree_reference
4421
4074
for record in substream:
4422
# It's not a delta, so it must be a fulltext in the source
4423
# serializer's format.
4424
4075
bytes = record.get_bytes_as('fulltext')
4425
4076
revision_id = record.key[0]
4426
4077
inv = serializer.read_inventory_from_string(bytes, revision_id)
4427
4078
parents = [key[0] for key in record.parents]
4428
4079
self.target_repo.add_inventory(revision_id, inv, parents)
4429
# No need to keep holding this full inv in memory when the rest of
4430
# the substream is likely to be all deltas.
4433
4081
def _extract_and_insert_revisions(self, substream, serializer):
4434
4082
for record in substream:
4597
4228
return (not self.from_repository._format.rich_root_data and
4598
4229
self.to_format.rich_root_data)
4600
def _get_inventory_stream(self, revision_ids, missing=False):
4231
def _get_inventory_stream(self, revision_ids):
4601
4232
from_format = self.from_repository._format
4602
if (from_format.supports_chks and self.to_format.supports_chks and
4603
from_format.network_name() == self.to_format.network_name()):
4604
raise AssertionError(
4605
"this case should be handled by GroupCHKStreamSource")
4606
elif 'forceinvdeltas' in debug.debug_flags:
4607
return self._get_convertable_inventory_stream(revision_ids,
4608
delta_versus_null=missing)
4609
elif from_format.network_name() == self.to_format.network_name():
4611
return self._get_simple_inventory_stream(revision_ids,
4613
elif (not from_format.supports_chks and not self.to_format.supports_chks
4614
and from_format._serializer == self.to_format._serializer):
4615
# Essentially the same format.
4616
return self._get_simple_inventory_stream(revision_ids,
4233
if (from_format.supports_chks and self.to_format.supports_chks
4234
and (from_format._serializer == self.to_format._serializer)):
4235
# Both sides support chks, and they use the same serializer, so it
4236
# is safe to transmit the chk pages and inventory pages across
4238
return self._get_chk_inventory_stream(revision_ids)
4239
elif (not from_format.supports_chks):
4240
# Source repository doesn't support chks. So we can transmit the
4241
# inventories 'as-is' and either they are just accepted on the
4242
# target, or the Sink will properly convert it.
4243
return self._get_simple_inventory_stream(revision_ids)
4619
# Any time we switch serializations, we want to use an
4620
# inventory-delta based approach.
4621
return self._get_convertable_inventory_stream(revision_ids,
4622
delta_versus_null=missing)
4245
# XXX: Hack to make not-chk->chk fetch: copy the inventories as
4246
# inventories. Note that this should probably be done somehow
4247
# as part of bzrlib.repository.StreamSink. Except JAM couldn't
4248
# figure out how a non-chk repository could possibly handle
4249
# deserializing an inventory stream from a chk repo, as it
4250
# doesn't have a way to understand individual pages.
4251
return self._get_convertable_inventory_stream(revision_ids)
4624
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4625
# NB: This currently reopens the inventory weave in source;
4626
# using a single stream interface instead would avoid this.
4253
def _get_simple_inventory_stream(self, revision_ids):
4627
4254
from_weave = self.from_repository.inventories
4629
delta_closure = True
4631
delta_closure = not self.delta_on_metadata()
4632
4255
yield ('inventories', from_weave.get_record_stream(
4633
4256
[(rev_id,) for rev_id in revision_ids],
4634
self.inventory_fetch_order(), delta_closure))
4636
def _get_convertable_inventory_stream(self, revision_ids,
4637
delta_versus_null=False):
4638
# The two formats are sufficiently different that there is no fast
4639
# path, so we need to send just inventorydeltas, which any
4640
# sufficiently modern client can insert into any repository.
4641
# The StreamSink code expects to be able to
4642
# convert on the target, so we need to put bytes-on-the-wire that can
4643
# be converted. That means inventory deltas (if the remote is <1.19,
4644
# RemoteStreamSink will fallback to VFS to insert the deltas).
4645
yield ('inventory-deltas',
4646
self._stream_invs_as_deltas(revision_ids,
4647
delta_versus_null=delta_versus_null))
4649
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4650
"""Return a stream of inventory-deltas for the given rev ids.
4652
:param revision_ids: The list of inventories to transmit
4653
:param delta_versus_null: Don't try to find a minimal delta for this
4654
entry, instead compute the delta versus the NULL_REVISION. This
4655
effectively streams a complete inventory. Used for stuff like
4656
filling in missing parents, etc.
4257
self.inventory_fetch_order(),
4258
not self.delta_on_metadata()))
4260
def _get_chk_inventory_stream(self, revision_ids):
4261
"""Fetch the inventory texts, along with the associated chk maps."""
4262
# We want an inventory outside of the search set, so that we can filter
4263
# out uninteresting chk pages. For now we use
4264
# _find_revision_outside_set, but if we had a Search with cut_revs, we
4265
# could use that instead.
4266
start_rev_id = self.from_repository._find_revision_outside_set(
4268
start_rev_key = (start_rev_id,)
4269
inv_keys_to_fetch = [(rev_id,) for rev_id in revision_ids]
4270
if start_rev_id != _mod_revision.NULL_REVISION:
4271
inv_keys_to_fetch.append((start_rev_id,))
4272
# Any repo that supports chk_bytes must also support out-of-order
4273
# insertion. At least, that is how we expect it to work
4274
# We use get_record_stream instead of iter_inventories because we want
4275
# to be able to insert the stream as well. We could instead fetch
4276
# allowing deltas, and then iter_inventories, but we don't know whether
4277
# source or target is more 'local' anway.
4278
inv_stream = self.from_repository.inventories.get_record_stream(
4279
inv_keys_to_fetch, 'unordered',
4280
True) # We need them as full-texts so we can find their references
4281
uninteresting_chk_roots = set()
4282
interesting_chk_roots = set()
4283
def filter_inv_stream(inv_stream):
4284
for idx, record in enumerate(inv_stream):
4285
### child_pb.update('fetch inv', idx, len(inv_keys_to_fetch))
4286
bytes = record.get_bytes_as('fulltext')
4287
chk_inv = inventory.CHKInventory.deserialise(
4288
self.from_repository.chk_bytes, bytes, record.key)
4289
if record.key == start_rev_key:
4290
uninteresting_chk_roots.add(chk_inv.id_to_entry.key())
4291
p_id_map = chk_inv.parent_id_basename_to_file_id
4292
if p_id_map is not None:
4293
uninteresting_chk_roots.add(p_id_map.key())
4296
interesting_chk_roots.add(chk_inv.id_to_entry.key())
4297
p_id_map = chk_inv.parent_id_basename_to_file_id
4298
if p_id_map is not None:
4299
interesting_chk_roots.add(p_id_map.key())
4300
### pb.update('fetch inventory', 0, 2)
4301
yield ('inventories', filter_inv_stream(inv_stream))
4302
# Now that we have worked out all of the interesting root nodes, grab
4303
# all of the interesting pages and insert them
4304
### pb.update('fetch inventory', 1, 2)
4305
interesting = chk_map.iter_interesting_nodes(
4306
self.from_repository.chk_bytes, interesting_chk_roots,
4307
uninteresting_chk_roots)
4308
def to_stream_adapter():
4309
"""Adapt the iter_interesting_nodes result to a single stream.
4311
iter_interesting_nodes returns records as it processes them, along
4312
with keys. However, we only want to return the records themselves.
4314
for record, items in interesting:
4315
if record is not None:
4317
# XXX: We could instead call get_record_stream(records.keys())
4318
# ATM, this will always insert the records as fulltexts, and
4319
# requires that you can hang on to records once you have gone
4320
# on to the next one. Further, it causes the target to
4321
# recompress the data. Testing shows it to be faster than
4322
# requesting the records again, though.
4323
yield ('chk_bytes', to_stream_adapter())
4324
### pb.update('fetch inventory', 2, 2)
4326
def _get_convertable_inventory_stream(self, revision_ids):
4327
# XXX: One of source or target is using chks, and they don't have
4328
# compatible serializations. The StreamSink code expects to be
4329
# able to convert on the target, so we need to put
4330
# bytes-on-the-wire that can be converted
4331
yield ('inventories', self._stream_invs_as_fulltexts(revision_ids))
4333
def _stream_invs_as_fulltexts(self, revision_ids):
4658
4334
from_repo = self.from_repository
4335
from_serializer = from_repo._format._serializer
4659
4336
revision_keys = [(rev_id,) for rev_id in revision_ids]
4660
4337
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4661
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4663
inventories = self.from_repository.iter_inventories(
4664
revision_ids, 'topological')
4665
format = from_repo._format
4666
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4667
inventory_cache = lru_cache.LRUCache(50)
4668
null_inventory = from_repo.revision_tree(
4669
_mod_revision.NULL_REVISION).inventory
4670
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4671
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4672
# repo back into a non-rich-root repo ought to be allowed)
4673
serializer = inventory_delta.InventoryDeltaSerializer(
4674
versioned_root=format.rich_root_data,
4675
tree_references=format.supports_tree_reference)
4676
for inv in inventories:
4338
for inv in self.from_repository.iter_inventories(revision_ids):
4339
# XXX: This is a bit hackish, but it works. Basically,
4340
# CHKSerializer 'accidentally' supports
4341
# read/write_inventory_to_string, even though that is never
4342
# the format that is stored on disk. It *does* give us a
4343
# single string representation for an inventory, so live with
4345
# This would be far better if we had a 'serialized inventory
4346
# delta' form. Then we could use 'inventory._make_delta', and
4347
# transmit that. This would both be faster to generate, and
4348
# result in fewer bytes-on-the-wire.
4349
as_bytes = from_serializer.write_inventory_to_string(inv)
4677
4350
key = (inv.revision_id,)
4678
4351
parent_keys = parent_map.get(key, ())
4680
if not delta_versus_null and parent_keys:
4681
# The caller did not ask for complete inventories and we have
4682
# some parents that we can delta against. Make a delta against
4683
# each parent so that we can find the smallest.
4684
parent_ids = [parent_key[0] for parent_key in parent_keys]
4685
for parent_id in parent_ids:
4686
if parent_id not in invs_sent_so_far:
4687
# We don't know that the remote side has this basis, so
4690
if parent_id == _mod_revision.NULL_REVISION:
4691
parent_inv = null_inventory
4693
parent_inv = inventory_cache.get(parent_id, None)
4694
if parent_inv is None:
4695
parent_inv = from_repo.get_inventory(parent_id)
4696
candidate_delta = inv._make_delta(parent_inv)
4697
if (delta is None or
4698
len(delta) > len(candidate_delta)):
4699
delta = candidate_delta
4700
basis_id = parent_id
4702
# Either none of the parents ended up being suitable, or we
4703
# were asked to delta against NULL
4704
basis_id = _mod_revision.NULL_REVISION
4705
delta = inv._make_delta(null_inventory)
4706
invs_sent_so_far.add(inv.revision_id)
4707
inventory_cache[inv.revision_id] = inv
4708
delta_serialized = ''.join(
4709
serializer.delta_to_lines(basis_id, key[-1], delta))
4710
4352
yield versionedfile.FulltextContentFactory(
4711
key, parent_keys, None, delta_serialized)
4714
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4715
stop_revision=None):
4716
"""Extend the partial history to include a given index
4718
If a stop_index is supplied, stop when that index has been reached.
4719
If a stop_revision is supplied, stop when that revision is
4720
encountered. Otherwise, stop when the beginning of history is
4723
:param stop_index: The index which should be present. When it is
4724
present, history extension will stop.
4725
:param stop_revision: The revision id which should be present. When
4726
it is encountered, history extension will stop.
4728
start_revision = partial_history_cache[-1]
4729
iterator = repo.iter_reverse_revision_history(start_revision)
4731
#skip the last revision in the list
4734
if (stop_index is not None and
4735
len(partial_history_cache) > stop_index):
4737
if partial_history_cache[-1] == stop_revision:
4739
revision_id = iterator.next()
4740
partial_history_cache.append(revision_id)
4741
except StopIteration:
4353
key, parent_keys, None, as_bytes)