498
548
raise NotImplementedError('unknown kind')
499
549
ie.revision = self._new_revision_id
550
self._any_changes = True
500
551
return self._get_delta(ie, basis_inv, path), True, fingerprint
553
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
554
_entry_factory=entry_factory):
555
"""Record a new tree via iter_changes.
557
:param tree: The tree to obtain text contents from for changed objects.
558
:param basis_revision_id: The revision id of the tree the iter_changes
559
has been generated against. Currently assumed to be the same
560
as self.parents[0] - if it is not, errors may occur.
561
:param iter_changes: An iter_changes iterator with the changes to apply
562
to basis_revision_id. The iterator must not include any items with
563
a current kind of None - missing items must be either filtered out
564
or errored-on beefore record_iter_changes sees the item.
565
:param _entry_factory: Private method to bind entry_factory locally for
567
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
570
# Create an inventory delta based on deltas between all the parents and
571
# deltas between all the parent inventories. We use inventory delta's
572
# between the inventory objects because iter_changes masks
573
# last-changed-field only changes.
575
# file_id -> change map, change is fileid, paths, changed, versioneds,
576
# parents, names, kinds, executables
578
# {file_id -> revision_id -> inventory entry, for entries in parent
579
# trees that are not parents[0]
583
revtrees = list(self.repository.revision_trees(self.parents))
584
except errors.NoSuchRevision:
585
# one or more ghosts, slow path.
587
for revision_id in self.parents:
589
revtrees.append(self.repository.revision_tree(revision_id))
590
except errors.NoSuchRevision:
592
basis_revision_id = _mod_revision.NULL_REVISION
594
revtrees.append(self.repository.revision_tree(
595
_mod_revision.NULL_REVISION))
596
# The basis inventory from a repository
598
basis_inv = revtrees[0].inventory
600
basis_inv = self.repository.revision_tree(
601
_mod_revision.NULL_REVISION).inventory
602
if len(self.parents) > 0:
603
if basis_revision_id != self.parents[0] and not ghost_basis:
605
"arbitrary basis parents not yet supported with merges")
606
for revtree in revtrees[1:]:
607
for change in revtree.inventory._make_delta(basis_inv):
608
if change[1] is None:
609
# Not present in this parent.
611
if change[2] not in merged_ids:
612
if change[0] is not None:
613
basis_entry = basis_inv[change[2]]
614
merged_ids[change[2]] = [
616
basis_entry.revision,
619
parent_entries[change[2]] = {
621
basis_entry.revision:basis_entry,
623
change[3].revision:change[3],
626
merged_ids[change[2]] = [change[3].revision]
627
parent_entries[change[2]] = {change[3].revision:change[3]}
629
merged_ids[change[2]].append(change[3].revision)
630
parent_entries[change[2]][change[3].revision] = change[3]
633
# Setup the changes from the tree:
634
# changes maps file_id -> (change, [parent revision_ids])
636
for change in iter_changes:
637
# This probably looks up in basis_inv way to much.
638
if change[1][0] is not None:
639
head_candidate = [basis_inv[change[0]].revision]
642
changes[change[0]] = change, merged_ids.get(change[0],
644
unchanged_merged = set(merged_ids) - set(changes)
645
# Extend the changes dict with synthetic changes to record merges of
647
for file_id in unchanged_merged:
648
# Record a merged version of these items that did not change vs the
649
# basis. This can be either identical parallel changes, or a revert
650
# of a specific file after a merge. The recorded content will be
651
# that of the current tree (which is the same as the basis), but
652
# the per-file graph will reflect a merge.
653
# NB:XXX: We are reconstructing path information we had, this
654
# should be preserved instead.
655
# inv delta change: (file_id, (path_in_source, path_in_target),
656
# changed_content, versioned, parent, name, kind,
659
basis_entry = basis_inv[file_id]
660
except errors.NoSuchId:
661
# a change from basis->some_parents but file_id isn't in basis
662
# so was new in the merge, which means it must have changed
663
# from basis -> current, and as it hasn't the add was reverted
664
# by the user. So we discard this change.
668
(basis_inv.id2path(file_id), tree.id2path(file_id)),
670
(basis_entry.parent_id, basis_entry.parent_id),
671
(basis_entry.name, basis_entry.name),
672
(basis_entry.kind, basis_entry.kind),
673
(basis_entry.executable, basis_entry.executable))
674
changes[file_id] = (change, merged_ids[file_id])
675
# changes contains tuples with the change and a set of inventory
676
# candidates for the file.
678
# old_path, new_path, file_id, new_inventory_entry
679
seen_root = False # Is the root in the basis delta?
680
inv_delta = self._basis_delta
681
modified_rev = self._new_revision_id
682
for change, head_candidates in changes.values():
683
if change[3][1]: # versioned in target.
684
# Several things may be happening here:
685
# We may have a fork in the per-file graph
686
# - record a change with the content from tree
687
# We may have a change against < all trees
688
# - carry over the tree that hasn't changed
689
# We may have a change against all trees
690
# - record the change with the content from tree
693
entry = _entry_factory[kind](file_id, change[5][1],
695
head_set = self._heads(change[0], set(head_candidates))
698
for head_candidate in head_candidates:
699
if head_candidate in head_set:
700
heads.append(head_candidate)
701
head_set.remove(head_candidate)
704
# Could be a carry-over situation:
705
parent_entry_revs = parent_entries.get(file_id, None)
706
if parent_entry_revs:
707
parent_entry = parent_entry_revs.get(heads[0], None)
710
if parent_entry is None:
711
# The parent iter_changes was called against is the one
712
# that is the per-file head, so any change is relevant
713
# iter_changes is valid.
714
carry_over_possible = False
716
# could be a carry over situation
717
# A change against the basis may just indicate a merge,
718
# we need to check the content against the source of the
719
# merge to determine if it was changed after the merge
721
if (parent_entry.kind != entry.kind or
722
parent_entry.parent_id != entry.parent_id or
723
parent_entry.name != entry.name):
724
# Metadata common to all entries has changed
725
# against per-file parent
726
carry_over_possible = False
728
carry_over_possible = True
729
# per-type checks for changes against the parent_entry
732
# Cannot be a carry-over situation
733
carry_over_possible = False
734
# Populate the entry in the delta
736
# XXX: There is still a small race here: If someone reverts the content of a file
737
# after iter_changes examines and decides it has changed,
738
# we will unconditionally record a new version even if some
739
# other process reverts it while commit is running (with
740
# the revert happening after iter_changes did it's
743
entry.executable = True
745
entry.executable = False
746
if (carry_over_possible and
747
parent_entry.executable == entry.executable):
748
# Check the file length, content hash after reading
750
nostore_sha = parent_entry.text_sha1
753
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
755
lines = file_obj.readlines()
759
entry.text_sha1, entry.text_size = self._add_text_to_weave(
760
file_id, lines, heads, nostore_sha)
761
yield file_id, change[1][1], (entry.text_sha1, stat_value)
762
except errors.ExistingContent:
763
# No content change against a carry_over parent
764
# Perhaps this should also yield a fs hash update?
766
entry.text_size = parent_entry.text_size
767
entry.text_sha1 = parent_entry.text_sha1
768
elif kind == 'symlink':
770
entry.symlink_target = tree.get_symlink_target(file_id)
771
if (carry_over_possible and
772
parent_entry.symlink_target == entry.symlink_target):
775
self._add_text_to_weave(change[0], [], heads, None)
776
elif kind == 'directory':
777
if carry_over_possible:
780
# Nothing to set on the entry.
781
# XXX: split into the Root and nonRoot versions.
782
if change[1][1] != '' or self.repository.supports_rich_root():
783
self._add_text_to_weave(change[0], [], heads, None)
784
elif kind == 'tree-reference':
785
if not self.repository._format.supports_tree_reference:
786
# This isn't quite sane as an error, but we shouldn't
787
# ever see this code path in practice: tree's don't
788
# permit references when the repo doesn't support tree
790
raise errors.UnsupportedOperation(tree.add_reference,
792
entry.reference_revision = \
793
tree.get_reference_revision(change[0])
794
if (carry_over_possible and
795
parent_entry.reference_revision == reference_revision):
798
self._add_text_to_weave(change[0], [], heads, None)
800
raise AssertionError('unknown kind %r' % kind)
802
entry.revision = modified_rev
804
entry.revision = parent_entry.revision
807
new_path = change[1][1]
808
inv_delta.append((change[1][0], new_path, change[0], entry))
811
self.new_inventory = None
813
self._any_changes = True
815
# housekeeping root entry changes do not affect no-change commits.
816
self._require_root_change(tree)
817
self.basis_delta_revision = basis_revision_id
502
819
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
503
820
# Note: as we read the content directly from the tree, we know its not
504
821
# been turned into unicode or badly split - but a broken tree
1822
2215
# TODO: refactor this to use an existing revision object
1823
2216
# so we don't need to read it in twice.
1824
2217
if revision_id == _mod_revision.NULL_REVISION:
1825
return RevisionTree(self, Inventory(root_id=None),
2218
return RevisionTree(self, Inventory(root_id=None),
1826
2219
_mod_revision.NULL_REVISION)
1828
2221
inv = self.get_revision_inventory(revision_id)
1829
2222
return RevisionTree(self, inv, revision_id)
1831
2224
def revision_trees(self, revision_ids):
1832
"""Return Tree for a revision on this branch.
2225
"""Return Trees for revisions in this repository.
1834
`revision_id` may not be None or 'null:'"""
2227
:param revision_ids: a sequence of revision-ids;
2228
a revision-id may not be None or 'null:'
1835
2230
inventories = self.iter_inventories(revision_ids)
1836
2231
for inv in inventories:
1837
2232
yield RevisionTree(self, inv, inv.revision_id)
2234
def _filtered_revision_trees(self, revision_ids, file_ids):
2235
"""Return Tree for a revision on this branch with only some files.
2237
:param revision_ids: a sequence of revision-ids;
2238
a revision-id may not be None or 'null:'
2239
:param file_ids: if not None, the result is filtered
2240
so that only those file-ids, their parents and their
2241
children are included.
2243
inventories = self.iter_inventories(revision_ids)
2244
for inv in inventories:
2245
# Should we introduce a FilteredRevisionTree class rather
2246
# than pre-filter the inventory here?
2247
filtered_inv = inv.filter(file_ids)
2248
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
1839
2250
@needs_read_lock
1840
2251
def get_ancestry(self, revision_id, topo_sorted=True):
1841
2252
"""Return a list of revision-ids integrated by a revision.
1843
The first element of the list is always None, indicating the origin
1844
revision. This might change when we have history horizons, or
2254
The first element of the list is always None, indicating the origin
2255
revision. This might change when we have history horizons, or
1845
2256
perhaps we should have a new API.
1847
2258
This is topologically sorted.
1849
2260
if _mod_revision.is_null(revision_id):
3122
3432
return self.source.revision_ids_to_search_result(result_set)
3125
class InterModel1and2(InterRepository):
3128
def _get_repo_format_to_test(self):
3132
def is_compatible(source, target):
3133
if not source.supports_rich_root() and target.supports_rich_root():
3139
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3140
"""See InterRepository.fetch()."""
3141
from bzrlib.fetch import Model1toKnit2Fetcher
3142
f = Model1toKnit2Fetcher(to_repository=self.target,
3143
from_repository=self.source,
3144
last_revision=revision_id,
3145
pb=pb, find_ghosts=find_ghosts)
3146
return f.count_copied, f.failed_revisions
3149
def copy_content(self, revision_id=None):
3150
"""Make a complete copy of the content in self into destination.
3152
This is a destructive operation! Do not use it on existing
3155
:param revision_id: Only copy the content needed to construct
3156
revision_id and its parents.
3159
self.target.set_make_working_trees(self.source.make_working_trees())
3160
except NotImplementedError:
3162
# but don't bother fetching if we have the needed data now.
3163
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
3164
self.target.has_revision(revision_id)):
3166
self.target.fetch(self.source, revision_id=revision_id)
3169
class InterKnit1and2(InterKnitRepo):
3172
def _get_repo_format_to_test(self):
3176
def is_compatible(source, target):
3177
"""Be compatible with Knit1 source and Knit3 target"""
3179
from bzrlib.repofmt.knitrepo import (
3180
RepositoryFormatKnit1,
3181
RepositoryFormatKnit3,
3183
from bzrlib.repofmt.pack_repo import (
3184
RepositoryFormatKnitPack1,
3185
RepositoryFormatKnitPack3,
3186
RepositoryFormatKnitPack4,
3187
RepositoryFormatKnitPack5,
3188
RepositoryFormatKnitPack5RichRoot,
3189
RepositoryFormatKnitPack6,
3190
RepositoryFormatKnitPack6RichRoot,
3191
RepositoryFormatPackDevelopment2,
3192
RepositoryFormatPackDevelopment2Subtree,
3195
RepositoryFormatKnit1, # no rr, no subtree
3196
RepositoryFormatKnitPack1, # no rr, no subtree
3197
RepositoryFormatPackDevelopment2, # no rr, no subtree
3198
RepositoryFormatKnitPack5, # no rr, no subtree
3199
RepositoryFormatKnitPack6, # no rr, no subtree
3202
RepositoryFormatKnit3, # rr, subtree
3203
RepositoryFormatKnitPack3, # rr, subtree
3204
RepositoryFormatKnitPack4, # rr, no subtree
3205
RepositoryFormatKnitPack5RichRoot,# rr, no subtree
3206
RepositoryFormatKnitPack6RichRoot,# rr, no subtree
3207
RepositoryFormatPackDevelopment2Subtree, # rr, subtree
3209
for format in norichroot:
3210
if format.rich_root_data:
3211
raise AssertionError('Format %s is a rich-root format'
3212
' but is included in the non-rich-root list'
3214
for format in richroot:
3215
if not format.rich_root_data:
3216
raise AssertionError('Format %s is not a rich-root format'
3217
' but is included in the rich-root list'
3219
# TODO: One alternative is to just check format.rich_root_data,
3220
# instead of keeping membership lists. However, the formats
3221
# *also* have to use the same 'Knit' style of storage
3222
# (line-deltas, fulltexts, etc.)
3223
return (isinstance(source._format, norichroot) and
3224
isinstance(target._format, richroot))
3225
except AttributeError:
3229
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3230
"""See InterRepository.fetch()."""
3231
from bzrlib.fetch import Knit1to2Fetcher
3232
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
3233
self.source, self.source._format, self.target,
3234
self.target._format)
3235
f = Knit1to2Fetcher(to_repository=self.target,
3236
from_repository=self.source,
3237
last_revision=revision_id,
3238
pb=pb, find_ghosts=find_ghosts)
3239
return f.count_copied, f.failed_revisions
3242
3435
class InterDifferingSerializer(InterKnitRepo):
3384
3604
return basis_id, basis_tree
3387
class InterOtherToRemote(InterRepository):
3388
"""An InterRepository that simply delegates to the 'real' InterRepository
3389
calculated for (source, target._real_repository).
3392
_walk_to_common_revisions_batch_size = 50
3394
def __init__(self, source, target):
3395
InterRepository.__init__(self, source, target)
3396
self._real_inter = None
3399
def is_compatible(source, target):
3400
if isinstance(target, remote.RemoteRepository):
3404
def _ensure_real_inter(self):
3405
if self._real_inter is None:
3406
self.target._ensure_real()
3407
real_target = self.target._real_repository
3408
self._real_inter = InterRepository.get(self.source, real_target)
3409
# Make _real_inter use the RemoteRepository for get_parent_map
3410
self._real_inter.target_get_graph = self.target.get_graph
3411
self._real_inter.target_get_parent_map = self.target.get_parent_map
3413
def copy_content(self, revision_id=None):
3414
self._ensure_real_inter()
3415
self._real_inter.copy_content(revision_id=revision_id)
3417
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3418
self._ensure_real_inter()
3419
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3420
find_ghosts=find_ghosts)
3423
def _get_repo_format_to_test(self):
3427
class InterRemoteToOther(InterRepository):
3429
def __init__(self, source, target):
3430
InterRepository.__init__(self, source, target)
3431
self._real_inter = None
3434
def is_compatible(source, target):
3435
if not isinstance(source, remote.RemoteRepository):
3437
# Is source's model compatible with target's model?
3438
source._ensure_real()
3439
real_source = source._real_repository
3440
if isinstance(real_source, remote.RemoteRepository):
3441
raise NotImplementedError(
3442
"We don't support remote repos backed by remote repos yet.")
3443
return InterRepository._same_model(real_source, target)
3445
def _ensure_real_inter(self):
3446
if self._real_inter is None:
3447
self.source._ensure_real()
3448
real_source = self.source._real_repository
3449
self._real_inter = InterRepository.get(real_source, self.target)
3451
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3452
self._ensure_real_inter()
3453
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3454
find_ghosts=find_ghosts)
3456
def copy_content(self, revision_id=None):
3457
self._ensure_real_inter()
3458
self._real_inter.copy_content(revision_id=revision_id)
3461
def _get_repo_format_to_test(self):
3466
class InterPackToRemotePack(InterPackRepo):
3467
"""A specialisation of InterPackRepo for a target that is a
3470
This will use the get_parent_map RPC rather than plain readvs, and also
3471
uses an RPC for autopacking.
3474
_walk_to_common_revisions_batch_size = 50
3477
def is_compatible(source, target):
3478
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3479
if isinstance(source._format, RepositoryFormatPack):
3480
if isinstance(target, remote.RemoteRepository):
3481
target._ensure_real()
3482
if isinstance(target._real_repository._format,
3483
RepositoryFormatPack):
3484
if InterRepository._same_model(source, target):
3488
def _autopack(self):
3489
self.target.autopack()
3492
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3493
"""See InterRepository.fetch()."""
3494
# Always fetch using the generic streaming fetch code, to allow
3495
# streaming fetching into remote servers.
3496
from bzrlib.fetch import RepoFetcher
3497
fetcher = RepoFetcher(self.target, self.source, revision_id,
3499
self.target.autopack()
3500
return fetcher.count_copied, fetcher.failed_revisions
3502
def _get_target_pack_collection(self):
3503
return self.target._real_repository._pack_collection
3506
def _get_repo_format_to_test(self):
3510
3607
InterRepository.register_optimiser(InterDifferingSerializer)
3511
3608
InterRepository.register_optimiser(InterSameDataRepository)
3512
3609
InterRepository.register_optimiser(InterWeaveRepo)
3513
3610
InterRepository.register_optimiser(InterKnitRepo)
3514
InterRepository.register_optimiser(InterModel1and2)
3515
InterRepository.register_optimiser(InterKnit1and2)
3516
3611
InterRepository.register_optimiser(InterPackRepo)
3517
InterRepository.register_optimiser(InterOtherToRemote)
3518
InterRepository.register_optimiser(InterRemoteToOther)
3519
InterRepository.register_optimiser(InterPackToRemotePack)
3522
3614
class CopyConverter(object):
3523
3615
"""A repository conversion tool which just performs a copy of the content.
3525
3617
This is slow but quite reliable.
3687
3780
def __init__(self, target_repo):
3688
3781
self.target_repo = target_repo
3690
def insert_stream(self, stream, src_format):
3783
def insert_stream(self, stream, src_format, resume_tokens):
3691
3784
"""Insert a stream's content into the target repository.
3693
3786
:param src_format: a bzr repository format.
3695
:return: an iterable of keys additional items required before the
3696
insertion can be completed.
3788
:return: a list of resume tokens and an iterable of keys additional
3789
items required before the insertion can be completed.
3791
self.target_repo.lock_write()
3794
self.target_repo.resume_write_group(resume_tokens)
3796
self.target_repo.start_write_group()
3798
# locked_insert_stream performs a commit|suspend.
3799
return self._locked_insert_stream(stream, src_format)
3801
self.target_repo.abort_write_group(suppress_errors=True)
3804
self.target_repo.unlock()
3806
def _locked_insert_stream(self, stream, src_format):
3699
3807
to_serializer = self.target_repo._format._serializer
3700
3808
src_serializer = src_format._serializer
3809
if to_serializer == src_serializer:
3810
# If serializers match and the target is a pack repository, set the
3811
# write cache size on the new pack. This avoids poor performance
3812
# on transports where append is unbuffered (such as
3813
# RemoteTransport). This is safe to do because nothing should read
3814
# back from the target repository while a stream with matching
3815
# serialization is being inserted.
3816
# The exception is that a delta record from the source that should
3817
# be a fulltext may need to be expanded by the target (see
3818
# test_fetch_revisions_with_deltas_into_pack); but we take care to
3819
# explicitly flush any buffered writes first in that rare case.
3821
new_pack = self.target_repo._pack_collection._new_pack
3822
except AttributeError:
3823
# Not a pack repository
3826
new_pack.set_write_cache_size(1024*1024)
3701
3827
for substream_type, substream in stream:
3702
3828
if substream_type == 'texts':
3703
3829
self.target_repo.texts.insert_record_stream(substream)
3747
3896
self.target_repo.add_revision(revision_id, rev)
3749
3898
def finished(self):
3750
if self.target_repo._fetch_reconcile:
3899
if self.target_repo._format._fetch_reconcile:
3751
3900
self.target_repo.reconcile()
3903
class StreamSource(object):
3904
"""A source of a stream for fetching between repositories."""
3906
def __init__(self, from_repository, to_format):
3907
"""Create a StreamSource streaming from from_repository."""
3908
self.from_repository = from_repository
3909
self.to_format = to_format
3911
def delta_on_metadata(self):
3912
"""Return True if delta's are permitted on metadata streams.
3914
That is on revisions and signatures.
3916
src_serializer = self.from_repository._format._serializer
3917
target_serializer = self.to_format._serializer
3918
return (self.to_format._fetch_uses_deltas and
3919
src_serializer == target_serializer)
3921
def _fetch_revision_texts(self, revs):
3922
# fetch signatures first and then the revision texts
3923
# may need to be a InterRevisionStore call here.
3924
from_sf = self.from_repository.signatures
3925
# A missing signature is just skipped.
3926
keys = [(rev_id,) for rev_id in revs]
3927
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
3929
self.to_format._fetch_order,
3930
not self.to_format._fetch_uses_deltas))
3931
# If a revision has a delta, this is actually expanded inside the
3932
# insert_record_stream code now, which is an alternate fix for
3934
from_rf = self.from_repository.revisions
3935
revisions = from_rf.get_record_stream(
3937
self.to_format._fetch_order,
3938
not self.delta_on_metadata())
3939
return [('signatures', signatures), ('revisions', revisions)]
3941
def _generate_root_texts(self, revs):
3942
"""This will be called by __fetch between fetching weave texts and
3943
fetching the inventory weave.
3945
Subclasses should override this if they need to generate root texts
3946
after fetching weave texts.
3948
if self._rich_root_upgrade():
3950
return bzrlib.fetch.Inter1and2Helper(
3951
self.from_repository).generate_root_texts(revs)
3955
def get_stream(self, search):
3957
revs = search.get_keys()
3958
graph = self.from_repository.get_graph()
3959
revs = list(graph.iter_topo_order(revs))
3960
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
3962
for knit_kind, file_id, revisions in data_to_fetch:
3963
if knit_kind != phase:
3965
# Make a new progress bar for this phase
3966
if knit_kind == "file":
3967
# Accumulate file texts
3968
text_keys.extend([(file_id, revision) for revision in
3970
elif knit_kind == "inventory":
3971
# Now copy the file texts.
3972
from_texts = self.from_repository.texts
3973
yield ('texts', from_texts.get_record_stream(
3974
text_keys, self.to_format._fetch_order,
3975
not self.to_format._fetch_uses_deltas))
3976
# Cause an error if a text occurs after we have done the
3979
# Before we process the inventory we generate the root
3980
# texts (if necessary) so that the inventories references
3982
for _ in self._generate_root_texts(revs):
3984
# NB: This currently reopens the inventory weave in source;
3985
# using a single stream interface instead would avoid this.
3986
from_weave = self.from_repository.inventories
3987
# we fetch only the referenced inventories because we do not
3988
# know for unselected inventories whether all their required
3989
# texts are present in the other repository - it could be
3991
yield ('inventories', from_weave.get_record_stream(
3992
[(rev_id,) for rev_id in revs],
3993
self.inventory_fetch_order(),
3994
not self.delta_on_metadata()))
3995
elif knit_kind == "signatures":
3996
# Nothing to do here; this will be taken care of when
3997
# _fetch_revision_texts happens.
3999
elif knit_kind == "revisions":
4000
for record in self._fetch_revision_texts(revs):
4003
raise AssertionError("Unknown knit kind %r" % knit_kind)
4005
def get_stream_for_missing_keys(self, missing_keys):
4006
# missing keys can only occur when we are byte copying and not
4007
# translating (because translation means we don't send
4008
# unreconstructable deltas ever).
4010
keys['texts'] = set()
4011
keys['revisions'] = set()
4012
keys['inventories'] = set()
4013
keys['signatures'] = set()
4014
for key in missing_keys:
4015
keys[key[0]].add(key[1:])
4016
if len(keys['revisions']):
4017
# If we allowed copying revisions at this point, we could end up
4018
# copying a revision without copying its required texts: a
4019
# violation of the requirements for repository integrity.
4020
raise AssertionError(
4021
'cannot copy revisions to fill in missing deltas %s' % (
4022
keys['revisions'],))
4023
for substream_kind, keys in keys.iteritems():
4024
vf = getattr(self.from_repository, substream_kind)
4025
# Ask for full texts always so that we don't need more round trips
4026
# after this stream.
4027
stream = vf.get_record_stream(keys,
4028
self.to_format._fetch_order, True)
4029
yield substream_kind, stream
4031
def inventory_fetch_order(self):
4032
if self._rich_root_upgrade():
4033
return 'topological'
4035
return self.to_format._fetch_order
4037
def _rich_root_upgrade(self):
4038
return (not self.from_repository._format.rich_root_data and
4039
self.to_format.rich_root_data)