1168
1154
# The old API returned a list, should this actually be a set?
1169
1155
return parent_map.keys()
1171
def _check_inventories(self, checker):
1172
"""Check the inventories found from the revision scan.
1174
This is responsible for verifying the sha1 of inventories and
1175
creating a pending_keys set that covers data referenced by inventories.
1177
bar = ui.ui_factory.nested_progress_bar()
1179
self._do_check_inventories(checker, bar)
1183
def _do_check_inventories(self, checker, bar):
1184
"""Helper for _check_inventories."""
1186
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1187
kinds = ['chk_bytes', 'texts']
1188
count = len(checker.pending_keys)
1189
bar.update("inventories", 0, 2)
1190
current_keys = checker.pending_keys
1191
checker.pending_keys = {}
1192
# Accumulate current checks.
1193
for key in current_keys:
1194
if key[0] != 'inventories' and key[0] not in kinds:
1195
checker._report_items.append('unknown key type %r' % (key,))
1196
keys[key[0]].add(key[1:])
1197
if keys['inventories']:
1198
# NB: output order *should* be roughly sorted - topo or
1199
# inverse topo depending on repository - either way decent
1200
# to just delta against. However, pre-CHK formats didn't
1201
# try to optimise inventory layout on disk. As such the
1202
# pre-CHK code path does not use inventory deltas.
1204
for record in self.inventories.check(keys=keys['inventories']):
1205
if record.storage_kind == 'absent':
1206
checker._report_items.append(
1207
'Missing inventory {%s}' % (record.key,))
1209
last_object = self._check_record('inventories', record,
1210
checker, last_object,
1211
current_keys[('inventories',) + record.key])
1212
del keys['inventories']
1215
bar.update("texts", 1)
1216
while (checker.pending_keys or keys['chk_bytes']
1218
# Something to check.
1219
current_keys = checker.pending_keys
1220
checker.pending_keys = {}
1221
# Accumulate current checks.
1222
for key in current_keys:
1223
if key[0] not in kinds:
1224
checker._report_items.append('unknown key type %r' % (key,))
1225
keys[key[0]].add(key[1:])
1226
# Check the outermost kind only - inventories || chk_bytes || texts
1230
for record in getattr(self, kind).check(keys=keys[kind]):
1231
if record.storage_kind == 'absent':
1232
checker._report_items.append(
1233
'Missing %s {%s}' % (kind, record.key,))
1235
last_object = self._check_record(kind, record,
1236
checker, last_object, current_keys[(kind,) + record.key])
1240
def _check_record(self, kind, record, checker, last_object, item_data):
1241
"""Check a single text from this repository."""
1242
if kind == 'inventories':
1243
rev_id = record.key[0]
1244
inv = self._deserialise_inventory(rev_id,
1245
record.get_bytes_as('fulltext'))
1246
if last_object is not None:
1247
delta = inv._make_delta(last_object)
1248
for old_path, path, file_id, ie in delta:
1251
ie.check(checker, rev_id, inv)
1253
for path, ie in inv.iter_entries():
1254
ie.check(checker, rev_id, inv)
1255
if self._format.fast_deltas:
1257
elif kind == 'chk_bytes':
1258
# No code written to check chk_bytes for this repo format.
1259
checker._report_items.append(
1260
'unsupported key type chk_bytes for %s' % (record.key,))
1261
elif kind == 'texts':
1262
self._check_text(record, checker, item_data)
1264
checker._report_items.append(
1265
'unknown key type %s for %s' % (kind, record.key))
1267
def _check_text(self, record, checker, item_data):
1268
"""Check a single text."""
1269
# Check it is extractable.
1270
# TODO: check length.
1271
if record.storage_kind == 'chunked':
1272
chunks = record.get_bytes_as(record.storage_kind)
1273
sha1 = osutils.sha_strings(chunks)
1274
length = sum(map(len, chunks))
1276
content = record.get_bytes_as('fulltext')
1277
sha1 = osutils.sha_string(content)
1278
length = len(content)
1279
if item_data and sha1 != item_data[1]:
1280
checker._report_items.append(
1281
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1282
(record.key, sha1, item_data[1], item_data[2]))
1285
1158
def create(a_bzrdir):
1286
1159
"""Construct the current default format repository in a_bzrdir."""
1860
1701
@needs_read_lock
1861
1702
def get_revisions(self, revision_ids):
1862
"""Get many revisions at once.
1864
Repositories that need to check data on every revision read should
1865
subclass this method.
1703
"""Get many revisions at once."""
1867
1704
return self._get_revisions(revision_ids)
1869
1706
@needs_read_lock
1870
1707
def _get_revisions(self, revision_ids):
1871
1708
"""Core work logic to get many revisions without sanity checks."""
1709
for rev_id in revision_ids:
1710
if not rev_id or not isinstance(rev_id, basestring):
1711
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1712
keys = [(key,) for key in revision_ids]
1713
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1873
for revid, rev in self._iter_revisions(revision_ids):
1875
raise errors.NoSuchRevision(self, revid)
1715
for record in stream:
1716
if record.storage_kind == 'absent':
1717
raise errors.NoSuchRevision(self, record.key[0])
1718
text = record.get_bytes_as('fulltext')
1719
rev = self._serializer.read_revision_from_string(text)
1720
revs[record.key[0]] = rev
1877
1721
return [revs[revid] for revid in revision_ids]
1879
def _iter_revisions(self, revision_ids):
1880
"""Iterate over revision objects.
1882
:param revision_ids: An iterable of revisions to examine. None may be
1883
passed to request all revisions known to the repository. Note that
1884
not all repositories can find unreferenced revisions; for those
1885
repositories only referenced ones will be returned.
1886
:return: An iterator of (revid, revision) tuples. Absent revisions (
1887
those asked for but not available) are returned as (revid, None).
1889
if revision_ids is None:
1890
revision_ids = self.all_revision_ids()
1892
for rev_id in revision_ids:
1893
if not rev_id or not isinstance(rev_id, basestring):
1894
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1895
keys = [(key,) for key in revision_ids]
1896
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1897
for record in stream:
1898
revid = record.key[0]
1899
if record.storage_kind == 'absent':
1902
text = record.get_bytes_as('fulltext')
1903
rev = self._serializer.read_revision_from_string(text)
1724
def get_revision_xml(self, revision_id):
1725
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1726
# would have already do it.
1727
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1728
# TODO: this can't just be replaced by:
1729
# return self._serializer.write_revision_to_string(
1730
# self.get_revision(revision_id))
1731
# as cStringIO preservers the encoding unlike write_revision_to_string
1732
# or some other call down the path.
1733
rev = self.get_revision(revision_id)
1734
rev_tmp = cStringIO.StringIO()
1735
# the current serializer..
1736
self._serializer.write_revision(rev, rev_tmp)
1738
return rev_tmp.getvalue()
1906
1740
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1907
1741
"""Produce a generator of revision deltas.
2357
2173
:param revision_ids: The expected revision ids of the inventories.
2358
:param ordering: optional ordering, e.g. 'topological'. If not
2359
specified, the order of revision_ids will be preserved (by
2360
buffering if necessary).
2361
2174
:return: An iterator of inventories.
2363
2176
if ((None in revision_ids)
2364
2177
or (_mod_revision.NULL_REVISION in revision_ids)):
2365
2178
raise ValueError('cannot get null revision inventory')
2366
return self._iter_inventories(revision_ids, ordering)
2179
return self._iter_inventories(revision_ids)
2368
def _iter_inventories(self, revision_ids, ordering):
2181
def _iter_inventories(self, revision_ids):
2369
2182
"""single-document based inventory iteration."""
2370
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2371
for text, revision_id in inv_xmls:
2372
yield self._deserialise_inventory(revision_id, text)
2183
for text, revision_id in self._iter_inventory_xmls(revision_ids):
2184
yield self.deserialise_inventory(revision_id, text)
2374
def _iter_inventory_xmls(self, revision_ids, ordering):
2375
if ordering is None:
2376
order_as_requested = True
2377
ordering = 'unordered'
2379
order_as_requested = False
2186
def _iter_inventory_xmls(self, revision_ids):
2380
2187
keys = [(revision_id,) for revision_id in revision_ids]
2383
if order_as_requested:
2384
key_iter = iter(keys)
2385
next_key = key_iter.next()
2386
stream = self.inventories.get_record_stream(keys, ordering, True)
2188
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2387
2189
text_chunks = {}
2388
2190
for record in stream:
2389
2191
if record.storage_kind != 'absent':
2390
chunks = record.get_bytes_as('chunked')
2391
if order_as_requested:
2392
text_chunks[record.key] = chunks
2394
yield ''.join(chunks), record.key[-1]
2192
text_chunks[record.key] = record.get_bytes_as('chunked')
2396
2194
raise errors.NoSuchRevision(self, record.key)
2397
if order_as_requested:
2398
# Yield as many results as we can while preserving order.
2399
while next_key in text_chunks:
2400
chunks = text_chunks.pop(next_key)
2401
yield ''.join(chunks), next_key[-1]
2403
next_key = key_iter.next()
2404
except StopIteration:
2405
# We still want to fully consume the get_record_stream,
2406
# just in case it is not actually finished at this point
2196
chunks = text_chunks.pop(key)
2197
yield ''.join(chunks), key[-1]
2410
def _deserialise_inventory(self, revision_id, xml):
2199
def deserialise_inventory(self, revision_id, xml):
2411
2200
"""Transform the xml into an inventory object.
2413
2202
:param revision_id: The expected revision id of the inventory.
2414
2203
:param xml: A serialised inventory.
2416
2205
result = self._serializer.read_inventory_from_string(xml, revision_id,
2417
entry_cache=self._inventory_entry_cache,
2418
return_from_cache=self._safe_to_return_from_cache)
2206
entry_cache=self._inventory_entry_cache)
2419
2207
if result.revision_id != revision_id:
2420
2208
raise AssertionError('revision id mismatch %s != %s' % (
2421
2209
result.revision_id, revision_id))
2212
def serialise_inventory(self, inv):
2213
return self._serializer.write_inventory_to_string(inv)
2215
def _serialise_inventory_to_lines(self, inv):
2216
return self._serializer.write_inventory_to_lines(inv)
2424
2218
def get_serializer_format(self):
2425
2219
return self._serializer.format_num
2427
2221
@needs_read_lock
2428
def _get_inventory_xml(self, revision_id):
2429
"""Get serialized inventory as a string."""
2430
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2222
def get_inventory_xml(self, revision_id):
2223
"""Get inventory XML as a file object."""
2224
texts = self._iter_inventory_xmls([revision_id])
2432
2226
text, revision_id = texts.next()
2433
2227
except StopIteration:
2434
2228
raise errors.HistoryMissing(self, 'inventory', revision_id)
2437
def get_rev_id_for_revno(self, revno, known_pair):
2438
"""Return the revision id of a revno, given a later (revno, revid)
2439
pair in the same history.
2441
:return: if found (True, revid). If the available history ran out
2442
before reaching the revno, then this returns
2443
(False, (closest_revno, closest_revid)).
2232
def get_inventory_sha1(self, revision_id):
2233
"""Return the sha1 hash of the inventory entry
2445
known_revno, known_revid = known_pair
2446
partial_history = [known_revid]
2447
distance_from_known = known_revno - revno
2448
if distance_from_known < 0:
2450
'requested revno (%d) is later than given known revno (%d)'
2451
% (revno, known_revno))
2454
self, partial_history, stop_index=distance_from_known)
2455
except errors.RevisionNotPresent, err:
2456
if err.revision_id == known_revid:
2457
# The start revision (known_revid) wasn't found.
2459
# This is a stacked repository with no fallbacks, or a there's a
2460
# left-hand ghost. Either way, even though the revision named in
2461
# the error isn't in this repo, we know it's the next step in this
2462
# left-hand history.
2463
partial_history.append(err.revision_id)
2464
if len(partial_history) <= distance_from_known:
2465
# Didn't find enough history to get a revid for the revno.
2466
earliest_revno = known_revno - len(partial_history) + 1
2467
return (False, (earliest_revno, partial_history[-1]))
2468
if len(partial_history) - 1 > distance_from_known:
2469
raise AssertionError('_iter_for_revno returned too much history')
2470
return (True, partial_history[-1])
2235
return self.get_revision(revision_id).inventory_sha1
2472
2237
def iter_reverse_revision_history(self, revision_id):
2473
2238
"""Iterate backwards through revision ids in the lefthand history
3706
3413
return self.source.revision_ids_to_search_result(result_set)
3416
class InterPackRepo(InterSameDataRepository):
3417
"""Optimised code paths between Pack based repositories."""
3420
def _get_repo_format_to_test(self):
3421
from bzrlib.repofmt import pack_repo
3422
return pack_repo.RepositoryFormatKnitPack6RichRoot()
3425
def is_compatible(source, target):
3426
"""Be compatible with known Pack formats.
3428
We don't test for the stores being of specific types because that
3429
could lead to confusing results, and there is no need to be
3432
InterPackRepo does not support CHK based repositories.
3434
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3435
from bzrlib.repofmt.groupcompress_repo import RepositoryFormatCHK1
3437
are_packs = (isinstance(source._format, RepositoryFormatPack) and
3438
isinstance(target._format, RepositoryFormatPack))
3439
not_packs = (isinstance(source._format, RepositoryFormatCHK1) or
3440
isinstance(target._format, RepositoryFormatCHK1))
3441
except AttributeError:
3443
if not_packs or not are_packs:
3445
return InterRepository._same_model(source, target)
3448
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3450
"""See InterRepository.fetch()."""
3451
if (len(self.source._fallback_repositories) > 0 or
3452
len(self.target._fallback_repositories) > 0):
3453
# The pack layer is not aware of fallback repositories, so when
3454
# fetching from a stacked repository or into a stacked repository
3455
# we use the generic fetch logic which uses the VersionedFiles
3456
# attributes on repository.
3457
from bzrlib.fetch import RepoFetcher
3458
fetcher = RepoFetcher(self.target, self.source, revision_id,
3459
pb, find_ghosts, fetch_spec=fetch_spec)
3460
if fetch_spec is not None:
3461
if len(list(fetch_spec.heads)) != 1:
3462
raise AssertionError(
3463
"InterPackRepo.fetch doesn't support "
3464
"fetching multiple heads yet.")
3465
revision_id = list(fetch_spec.heads)[0]
3467
if revision_id is None:
3469
# everything to do - use pack logic
3470
# to fetch from all packs to one without
3471
# inventory parsing etc, IFF nothing to be copied is in the target.
3473
source_revision_ids = frozenset(self.source.all_revision_ids())
3474
revision_ids = source_revision_ids - \
3475
frozenset(self.target.get_parent_map(source_revision_ids))
3476
revision_keys = [(revid,) for revid in revision_ids]
3477
index = self.target._pack_collection.revision_index.combined_index
3478
present_revision_ids = set(item[1][0] for item in
3479
index.iter_entries(revision_keys))
3480
revision_ids = set(revision_ids) - present_revision_ids
3481
# implementing the TODO will involve:
3482
# - detecting when all of a pack is selected
3483
# - avoiding as much as possible pre-selection, so the
3484
# more-core routines such as create_pack_from_packs can filter in
3485
# a just-in-time fashion. (though having a HEADS list on a
3486
# repository might make this a lot easier, because we could
3487
# sensibly detect 'new revisions' without doing a full index scan.
3488
elif _mod_revision.is_null(revision_id):
3492
revision_ids = self.search_missing_revision_ids(revision_id,
3493
find_ghosts=find_ghosts).get_keys()
3494
if len(revision_ids) == 0:
3496
return self._pack(self.source, self.target, revision_ids)
3498
def _pack(self, source, target, revision_ids):
3499
from bzrlib.repofmt.pack_repo import Packer
3500
packs = source._pack_collection.all_packs()
3501
pack = Packer(self.target._pack_collection, packs, '.fetch',
3502
revision_ids).pack()
3503
if pack is not None:
3504
self.target._pack_collection._save_pack_names()
3505
copied_revs = pack.get_revision_count()
3506
# Trigger an autopack. This may duplicate effort as we've just done
3507
# a pack creation, but for now it is simpler to think about as
3508
# 'upload data, then repack if needed'.
3509
self.target._pack_collection.autopack()
3510
return (copied_revs, [])
3515
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3516
"""See InterRepository.missing_revision_ids().
3518
:param find_ghosts: Find ghosts throughout the ancestry of
3521
if not find_ghosts and revision_id is not None:
3522
return self._walk_to_common_revisions([revision_id])
3523
elif revision_id is not None:
3524
# Find ghosts: search for revisions pointing from one repository to
3525
# the other, and vice versa, anywhere in the history of revision_id.
3526
graph = self.target.get_graph(other_repository=self.source)
3527
searcher = graph._make_breadth_first_searcher([revision_id])
3531
next_revs, ghosts = searcher.next_with_ghosts()
3532
except StopIteration:
3534
if revision_id in ghosts:
3535
raise errors.NoSuchRevision(self.source, revision_id)
3536
found_ids.update(next_revs)
3537
found_ids.update(ghosts)
3538
found_ids = frozenset(found_ids)
3539
# Double query here: should be able to avoid this by changing the
3540
# graph api further.
3541
result_set = found_ids - frozenset(
3542
self.target.get_parent_map(found_ids))
3544
source_ids = self.source.all_revision_ids()
3545
# source_ids is the worst possible case we may need to pull.
3546
# now we want to filter source_ids against what we actually
3547
# have in target, but don't try to check for existence where we know
3548
# we do not have a revision as that would be pointless.
3549
target_ids = set(self.target.all_revision_ids())
3550
result_set = set(source_ids).difference(target_ids)
3551
return self.source.revision_ids_to_search_result(result_set)
3709
3554
class InterDifferingSerializer(InterRepository):
3718
3563
# This is redundant with format.check_conversion_target(), however that
3719
3564
# raises an exception, and we just want to say "False" as in we won't
3720
3565
# support converting between these formats.
3721
if 'IDS_never' in debug.debug_flags:
3723
3566
if source.supports_rich_root() and not target.supports_rich_root():
3725
3568
if (source._format.supports_tree_reference
3726
3569
and not target._format.supports_tree_reference):
3728
if target._fallback_repositories and target._format.supports_chks:
3729
# IDS doesn't know how to copy CHKs for the parent inventories it
3730
# adds to stacked repos.
3732
if 'IDS_always' in debug.debug_flags:
3734
# Only use this code path for local source and target. IDS does far
3735
# too much IO (both bandwidth and roundtrips) over a network.
3736
if not source.bzrdir.transport.base.startswith('file:///'):
3738
if not target.bzrdir.transport.base.startswith('file:///'):
3742
def _get_trees(self, revision_ids, cache):
3744
for rev_id in revision_ids:
3746
possible_trees.append((rev_id, cache[rev_id]))
3748
# Not cached, but inventory might be present anyway.
3750
tree = self.source.revision_tree(rev_id)
3751
except errors.NoSuchRevision:
3752
# Nope, parent is ghost.
3755
cache[rev_id] = tree
3756
possible_trees.append((rev_id, tree))
3757
return possible_trees
3759
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3573
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3760
3574
"""Get the best delta and base for this revision.
3762
3576
:return: (basis_id, delta)
3578
possible_trees = [(parent_id, cache[parent_id])
3579
for parent_id in parent_ids
3580
if parent_id in cache]
3581
if len(possible_trees) == 0:
3582
# There either aren't any parents, or the parents aren't in the
3583
# cache, so just use the last converted tree
3584
possible_trees.append((basis_id, cache[basis_id]))
3765
# Generate deltas against each tree, to find the shortest.
3766
texts_possibly_new_in_tree = set()
3767
3586
for basis_id, basis_tree in possible_trees:
3768
3587
delta = tree.inventory._make_delta(basis_tree.inventory)
3769
for old_path, new_path, file_id, new_entry in delta:
3770
if new_path is None:
3771
# This file_id isn't present in the new rev, so we don't
3775
# Rich roots are handled elsewhere...
3777
kind = new_entry.kind
3778
if kind != 'directory' and kind != 'file':
3779
# No text record associated with this inventory entry.
3781
# This is a directory or file that has changed somehow.
3782
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3783
3588
deltas.append((len(delta), basis_id, delta))
3785
3590
return deltas[0][1:]
3787
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3788
"""Find all parent revisions that are absent, but for which the
3789
inventory is present, and copy those inventories.
3791
This is necessary to preserve correctness when the source is stacked
3792
without fallbacks configured. (Note that in cases like upgrade the
3793
source may be not have _fallback_repositories even though it is
3797
for parents in parent_map.values():
3798
parent_revs.update(parents)
3799
present_parents = self.source.get_parent_map(parent_revs)
3800
absent_parents = set(parent_revs).difference(present_parents)
3801
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3802
(rev_id,) for rev_id in absent_parents)
3803
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3804
for parent_tree in self.source.revision_trees(parent_inv_ids):
3805
current_revision_id = parent_tree.get_revision_id()
3806
parents_parents_keys = parent_invs_keys_for_stacking[
3807
(current_revision_id,)]
3808
parents_parents = [key[-1] for key in parents_parents_keys]
3809
basis_id = _mod_revision.NULL_REVISION
3810
basis_tree = self.source.revision_tree(basis_id)
3811
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3812
self.target.add_inventory_by_delta(
3813
basis_id, delta, current_revision_id, parents_parents)
3814
cache[current_revision_id] = parent_tree
3816
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3592
def _get_parent_keys(self, root_key, parent_map):
3593
"""Get the parent keys for a given root id."""
3594
root_id, rev_id = root_key
3595
# Include direct parents of the revision, but only if they used
3596
# the same root_id and are heads.
3598
for parent_id in parent_map[rev_id]:
3599
if parent_id == _mod_revision.NULL_REVISION:
3601
if parent_id not in self._revision_id_to_root_id:
3602
# We probably didn't read this revision, go spend the
3603
# extra effort to actually check
3605
tree = self.source.revision_tree(parent_id)
3606
except errors.NoSuchRevision:
3607
# Ghost, fill out _revision_id_to_root_id in case we
3608
# encounter this again.
3609
# But set parent_root_id to None since we don't really know
3610
parent_root_id = None
3612
parent_root_id = tree.get_root_id()
3613
self._revision_id_to_root_id[parent_id] = None
3615
parent_root_id = self._revision_id_to_root_id[parent_id]
3616
if root_id == parent_root_id:
3617
# With stacking we _might_ want to refer to a non-local
3618
# revision, but this code path only applies when we have the
3619
# full content available, so ghosts really are ghosts, not just
3620
# the edge of local data.
3621
parent_keys.append((parent_id,))
3623
# root_id may be in the parent anyway.
3625
tree = self.source.revision_tree(parent_id)
3626
except errors.NoSuchRevision:
3627
# ghost, can't refer to it.
3631
parent_keys.append((tree.inventory[root_id].revision,))
3632
except errors.NoSuchId:
3635
g = graph.Graph(self.source.revisions)
3636
heads = g.heads(parent_keys)
3638
for key in parent_keys:
3639
if key in heads and key not in selected_keys:
3640
selected_keys.append(key)
3641
return tuple([(root_id,)+ key for key in selected_keys])
3643
def _new_root_data_stream(self, root_keys_to_create, parent_map):
3644
for root_key in root_keys_to_create:
3645
parent_keys = self._get_parent_keys(root_key, parent_map)
3646
yield versionedfile.FulltextContentFactory(root_key,
3647
parent_keys, None, '')
3649
def _fetch_batch(self, revision_ids, basis_id, cache):
3817
3650
"""Fetch across a few revisions.
3819
3652
:param revision_ids: The revisions to copy
3820
3653
:param basis_id: The revision_id of a tree that must be in cache, used
3821
3654
as a basis for delta when no other base is available
3822
3655
:param cache: A cache of RevisionTrees that we can use.
3823
:param a_graph: A Graph object to determine the heads() of the
3824
rich-root data stream.
3825
3656
:return: The revision_id of the last converted tree. The RevisionTree
3826
3657
for it will be in cache
3833
3664
pending_deltas = []
3834
3665
pending_revisions = []
3835
3666
parent_map = self.source.get_parent_map(revision_ids)
3836
self._fetch_parent_invs_for_stacking(parent_map, cache)
3837
self.source._safe_to_return_from_cache = True
3838
3667
for tree in self.source.revision_trees(revision_ids):
3839
# Find a inventory delta for this revision.
3840
# Find text entries that need to be copied, too.
3841
3668
current_revision_id = tree.get_revision_id()
3842
3669
parent_ids = parent_map.get(current_revision_id, ())
3843
parent_trees = self._get_trees(parent_ids, cache)
3844
possible_trees = list(parent_trees)
3845
if len(possible_trees) == 0:
3846
# There either aren't any parents, or the parents are ghosts,
3847
# so just use the last converted tree.
3848
possible_trees.append((basis_id, cache[basis_id]))
3849
3670
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3851
revision = self.source.get_revision(current_revision_id)
3852
pending_deltas.append((basis_id, delta,
3853
current_revision_id, revision.parent_ids))
3854
3672
if self._converting_to_rich_root:
3855
3673
self._revision_id_to_root_id[current_revision_id] = \
3856
3674
tree.get_root_id()
3857
# Determine which texts are in present in this revision but not in
3858
# any of the available parents.
3859
texts_possibly_new_in_tree = set()
3675
# Find text entries that need to be copied
3860
3676
for old_path, new_path, file_id, entry in delta:
3861
if new_path is None:
3862
# This file_id isn't present in the new rev
3866
if not self.target.supports_rich_root():
3867
# The target doesn't support rich root, so we don't
3870
if self._converting_to_rich_root:
3871
# This can't be copied normally, we have to insert
3873
root_keys_to_create.add((file_id, entry.revision))
3876
texts_possibly_new_in_tree.add((file_id, entry.revision))
3877
for basis_id, basis_tree in possible_trees:
3878
basis_inv = basis_tree.inventory
3879
for file_key in list(texts_possibly_new_in_tree):
3880
file_id, file_revision = file_key
3882
entry = basis_inv[file_id]
3883
except errors.NoSuchId:
3885
if entry.revision == file_revision:
3886
texts_possibly_new_in_tree.remove(file_key)
3887
text_keys.update(texts_possibly_new_in_tree)
3677
if new_path is not None:
3680
if not self.target.supports_rich_root():
3681
# The target doesn't support rich root, so we don't
3684
if self._converting_to_rich_root:
3685
# This can't be copied normally, we have to insert
3687
root_keys_to_create.add((file_id, entry.revision))
3689
text_keys.add((file_id, entry.revision))
3690
revision = self.source.get_revision(current_revision_id)
3691
pending_deltas.append((basis_id, delta,
3692
current_revision_id, revision.parent_ids))
3888
3693
pending_revisions.append(revision)
3889
3694
cache[current_revision_id] = tree
3890
3695
basis_id = current_revision_id
3891
self.source._safe_to_return_from_cache = False
3892
3696
# Copy file texts
3893
3697
from_texts = self.source.texts
3894
3698
to_texts = self.target.texts
3895
3699
if root_keys_to_create:
3896
root_stream = _mod_fetch._new_root_data_stream(
3897
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3898
self.source, graph=a_graph)
3700
root_stream = self._new_root_data_stream(root_keys_to_create,
3899
3702
to_texts.insert_record_stream(root_stream)
3900
3703
to_texts.insert_record_stream(from_texts.get_record_stream(
3901
3704
text_keys, self.target._format._fetch_order,
4350
4110
# missing keys can handle suspending a write group).
4351
4111
write_group_tokens = self.target_repo.suspend_write_group()
4352
4112
return write_group_tokens, missing_keys
4353
hint = self.target_repo.commit_write_group()
4354
if (to_serializer != src_serializer and
4355
self.target_repo._format.pack_compresses):
4356
self.target_repo.pack(hint=hint)
4113
self.target_repo.commit_write_group()
4357
4114
return [], set()
4359
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4360
target_rich_root = self.target_repo._format.rich_root_data
4361
target_tree_refs = self.target_repo._format.supports_tree_reference
4362
for record in substream:
4363
# Insert the delta directly
4364
inventory_delta_bytes = record.get_bytes_as('fulltext')
4365
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4367
parse_result = deserialiser.parse_text_bytes(
4368
inventory_delta_bytes)
4369
except inventory_delta.IncompatibleInventoryDelta, err:
4370
trace.mutter("Incompatible delta: %s", err.msg)
4371
raise errors.IncompatibleRevision(self.target_repo._format)
4372
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4373
revision_id = new_id
4374
parents = [key[0] for key in record.parents]
4375
self.target_repo.add_inventory_by_delta(
4376
basis_id, inv_delta, revision_id, parents)
4378
def _extract_and_insert_inventories(self, substream, serializer,
4116
def _extract_and_insert_inventories(self, substream, serializer):
4380
4117
"""Generate a new inventory versionedfile in target, converting data.
4382
4119
The inventory is retrieved from the source, (deserializing it), and
4383
4120
stored in the target (reserializing it in a different format).
4385
target_rich_root = self.target_repo._format.rich_root_data
4386
target_tree_refs = self.target_repo._format.supports_tree_reference
4387
4122
for record in substream:
4388
# It's not a delta, so it must be a fulltext in the source
4389
# serializer's format.
4390
4123
bytes = record.get_bytes_as('fulltext')
4391
4124
revision_id = record.key[0]
4392
4125
inv = serializer.read_inventory_from_string(bytes, revision_id)
4393
4126
parents = [key[0] for key in record.parents]
4394
4127
self.target_repo.add_inventory(revision_id, inv, parents)
4395
# No need to keep holding this full inv in memory when the rest of
4396
# the substream is likely to be all deltas.
4399
4129
def _extract_and_insert_revisions(self, substream, serializer):
4400
4130
for record in substream:
4563
4288
return (not self.from_repository._format.rich_root_data and
4564
4289
self.to_format.rich_root_data)
4566
def _get_inventory_stream(self, revision_ids, missing=False):
4291
def _get_inventory_stream(self, revision_ids):
4567
4292
from_format = self.from_repository._format
4568
if (from_format.supports_chks and self.to_format.supports_chks and
4569
from_format.network_name() == self.to_format.network_name()):
4570
raise AssertionError(
4571
"this case should be handled by GroupCHKStreamSource")
4572
elif 'forceinvdeltas' in debug.debug_flags:
4573
return self._get_convertable_inventory_stream(revision_ids,
4574
delta_versus_null=missing)
4575
elif from_format.network_name() == self.to_format.network_name():
4577
return self._get_simple_inventory_stream(revision_ids,
4579
elif (not from_format.supports_chks and not self.to_format.supports_chks
4580
and from_format._serializer == self.to_format._serializer):
4581
# Essentially the same format.
4582
return self._get_simple_inventory_stream(revision_ids,
4293
if (from_format.supports_chks and self.to_format.supports_chks
4294
and (from_format._serializer == self.to_format._serializer)):
4295
# Both sides support chks, and they use the same serializer, so it
4296
# is safe to transmit the chk pages and inventory pages across
4298
return self._get_chk_inventory_stream(revision_ids)
4299
elif (not from_format.supports_chks):
4300
# Source repository doesn't support chks. So we can transmit the
4301
# inventories 'as-is' and either they are just accepted on the
4302
# target, or the Sink will properly convert it.
4303
return self._get_simple_inventory_stream(revision_ids)
4585
# Any time we switch serializations, we want to use an
4586
# inventory-delta based approach.
4587
return self._get_convertable_inventory_stream(revision_ids,
4588
delta_versus_null=missing)
4305
# XXX: Hack to make not-chk->chk fetch: copy the inventories as
4306
# inventories. Note that this should probably be done somehow
4307
# as part of bzrlib.repository.StreamSink. Except JAM couldn't
4308
# figure out how a non-chk repository could possibly handle
4309
# deserializing an inventory stream from a chk repo, as it
4310
# doesn't have a way to understand individual pages.
4311
return self._get_convertable_inventory_stream(revision_ids)
4590
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4591
# NB: This currently reopens the inventory weave in source;
4592
# using a single stream interface instead would avoid this.
4313
def _get_simple_inventory_stream(self, revision_ids):
4593
4314
from_weave = self.from_repository.inventories
4595
delta_closure = True
4597
delta_closure = not self.delta_on_metadata()
4598
4315
yield ('inventories', from_weave.get_record_stream(
4599
4316
[(rev_id,) for rev_id in revision_ids],
4600
self.inventory_fetch_order(), delta_closure))
4602
def _get_convertable_inventory_stream(self, revision_ids,
4603
delta_versus_null=False):
4604
# The two formats are sufficiently different that there is no fast
4605
# path, so we need to send just inventorydeltas, which any
4606
# sufficiently modern client can insert into any repository.
4607
# The StreamSink code expects to be able to
4608
# convert on the target, so we need to put bytes-on-the-wire that can
4609
# be converted. That means inventory deltas (if the remote is <1.19,
4610
# RemoteStreamSink will fallback to VFS to insert the deltas).
4611
yield ('inventory-deltas',
4612
self._stream_invs_as_deltas(revision_ids,
4613
delta_versus_null=delta_versus_null))
4615
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4616
"""Return a stream of inventory-deltas for the given rev ids.
4618
:param revision_ids: The list of inventories to transmit
4619
:param delta_versus_null: Don't try to find a minimal delta for this
4620
entry, instead compute the delta versus the NULL_REVISION. This
4621
effectively streams a complete inventory. Used for stuff like
4622
filling in missing parents, etc.
4317
self.inventory_fetch_order(),
4318
not self.delta_on_metadata()))
4320
def _get_chk_inventory_stream(self, revision_ids):
4321
"""Fetch the inventory texts, along with the associated chk maps."""
4322
# We want an inventory outside of the search set, so that we can filter
4323
# out uninteresting chk pages. For now we use
4324
# _find_revision_outside_set, but if we had a Search with cut_revs, we
4325
# could use that instead.
4326
start_rev_id = self.from_repository._find_revision_outside_set(
4328
start_rev_key = (start_rev_id,)
4329
inv_keys_to_fetch = [(rev_id,) for rev_id in revision_ids]
4330
if start_rev_id != _mod_revision.NULL_REVISION:
4331
inv_keys_to_fetch.append((start_rev_id,))
4332
# Any repo that supports chk_bytes must also support out-of-order
4333
# insertion. At least, that is how we expect it to work
4334
# We use get_record_stream instead of iter_inventories because we want
4335
# to be able to insert the stream as well. We could instead fetch
4336
# allowing deltas, and then iter_inventories, but we don't know whether
4337
# source or target is more 'local' anway.
4338
inv_stream = self.from_repository.inventories.get_record_stream(
4339
inv_keys_to_fetch, 'unordered',
4340
True) # We need them as full-texts so we can find their references
4341
uninteresting_chk_roots = set()
4342
interesting_chk_roots = set()
4343
def filter_inv_stream(inv_stream):
4344
for idx, record in enumerate(inv_stream):
4345
### child_pb.update('fetch inv', idx, len(inv_keys_to_fetch))
4346
bytes = record.get_bytes_as('fulltext')
4347
chk_inv = inventory.CHKInventory.deserialise(
4348
self.from_repository.chk_bytes, bytes, record.key)
4349
if record.key == start_rev_key:
4350
uninteresting_chk_roots.add(chk_inv.id_to_entry.key())
4351
p_id_map = chk_inv.parent_id_basename_to_file_id
4352
if p_id_map is not None:
4353
uninteresting_chk_roots.add(p_id_map.key())
4356
interesting_chk_roots.add(chk_inv.id_to_entry.key())
4357
p_id_map = chk_inv.parent_id_basename_to_file_id
4358
if p_id_map is not None:
4359
interesting_chk_roots.add(p_id_map.key())
4360
### pb.update('fetch inventory', 0, 2)
4361
yield ('inventories', filter_inv_stream(inv_stream))
4362
# Now that we have worked out all of the interesting root nodes, grab
4363
# all of the interesting pages and insert them
4364
### pb.update('fetch inventory', 1, 2)
4365
interesting = chk_map.iter_interesting_nodes(
4366
self.from_repository.chk_bytes, interesting_chk_roots,
4367
uninteresting_chk_roots)
4368
def to_stream_adapter():
4369
"""Adapt the iter_interesting_nodes result to a single stream.
4371
iter_interesting_nodes returns records as it processes them, along
4372
with keys. However, we only want to return the records themselves.
4374
for record, items in interesting:
4375
if record is not None:
4377
# XXX: We could instead call get_record_stream(records.keys())
4378
# ATM, this will always insert the records as fulltexts, and
4379
# requires that you can hang on to records once you have gone
4380
# on to the next one. Further, it causes the target to
4381
# recompress the data. Testing shows it to be faster than
4382
# requesting the records again, though.
4383
yield ('chk_bytes', to_stream_adapter())
4384
### pb.update('fetch inventory', 2, 2)
4386
def _get_convertable_inventory_stream(self, revision_ids):
4387
# XXX: One of source or target is using chks, and they don't have
4388
# compatible serializations. The StreamSink code expects to be
4389
# able to convert on the target, so we need to put
4390
# bytes-on-the-wire that can be converted
4391
yield ('inventories', self._stream_invs_as_fulltexts(revision_ids))
4393
def _stream_invs_as_fulltexts(self, revision_ids):
4624
4394
from_repo = self.from_repository
4395
from_serializer = from_repo._format._serializer
4625
4396
revision_keys = [(rev_id,) for rev_id in revision_ids]
4626
4397
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4627
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4629
inventories = self.from_repository.iter_inventories(
4630
revision_ids, 'topological')
4631
format = from_repo._format
4632
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4633
inventory_cache = lru_cache.LRUCache(50)
4634
null_inventory = from_repo.revision_tree(
4635
_mod_revision.NULL_REVISION).inventory
4636
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4637
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4638
# repo back into a non-rich-root repo ought to be allowed)
4639
serializer = inventory_delta.InventoryDeltaSerializer(
4640
versioned_root=format.rich_root_data,
4641
tree_references=format.supports_tree_reference)
4642
for inv in inventories:
4398
for inv in self.from_repository.iter_inventories(revision_ids):
4399
# XXX: This is a bit hackish, but it works. Basically,
4400
# CHKSerializer 'accidentally' supports
4401
# read/write_inventory_to_string, even though that is never
4402
# the format that is stored on disk. It *does* give us a
4403
# single string representation for an inventory, so live with
4405
# This would be far better if we had a 'serialized inventory
4406
# delta' form. Then we could use 'inventory._make_delta', and
4407
# transmit that. This would both be faster to generate, and
4408
# result in fewer bytes-on-the-wire.
4409
as_bytes = from_serializer.write_inventory_to_string(inv)
4643
4410
key = (inv.revision_id,)
4644
4411
parent_keys = parent_map.get(key, ())
4646
if not delta_versus_null and parent_keys:
4647
# The caller did not ask for complete inventories and we have
4648
# some parents that we can delta against. Make a delta against
4649
# each parent so that we can find the smallest.
4650
parent_ids = [parent_key[0] for parent_key in parent_keys]
4651
for parent_id in parent_ids:
4652
if parent_id not in invs_sent_so_far:
4653
# We don't know that the remote side has this basis, so
4656
if parent_id == _mod_revision.NULL_REVISION:
4657
parent_inv = null_inventory
4659
parent_inv = inventory_cache.get(parent_id, None)
4660
if parent_inv is None:
4661
parent_inv = from_repo.get_inventory(parent_id)
4662
candidate_delta = inv._make_delta(parent_inv)
4663
if (delta is None or
4664
len(delta) > len(candidate_delta)):
4665
delta = candidate_delta
4666
basis_id = parent_id
4668
# Either none of the parents ended up being suitable, or we
4669
# were asked to delta against NULL
4670
basis_id = _mod_revision.NULL_REVISION
4671
delta = inv._make_delta(null_inventory)
4672
invs_sent_so_far.add(inv.revision_id)
4673
inventory_cache[inv.revision_id] = inv
4674
delta_serialized = ''.join(
4675
serializer.delta_to_lines(basis_id, key[-1], delta))
4676
4412
yield versionedfile.FulltextContentFactory(
4677
key, parent_keys, None, delta_serialized)
4680
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4681
stop_revision=None):
4682
"""Extend the partial history to include a given index
4684
If a stop_index is supplied, stop when that index has been reached.
4685
If a stop_revision is supplied, stop when that revision is
4686
encountered. Otherwise, stop when the beginning of history is
4689
:param stop_index: The index which should be present. When it is
4690
present, history extension will stop.
4691
:param stop_revision: The revision id which should be present. When
4692
it is encountered, history extension will stop.
4694
start_revision = partial_history_cache[-1]
4695
iterator = repo.iter_reverse_revision_history(start_revision)
4697
#skip the last revision in the list
4700
if (stop_index is not None and
4701
len(partial_history_cache) > stop_index):
4703
if partial_history_cache[-1] == stop_revision:
4705
revision_id = iterator.next()
4706
partial_history_cache.append(revision_id)
4707
except StopIteration:
4413
key, parent_keys, None, as_bytes)