546
402
# unchanged, carry over.
547
403
ie.reference_revision = parent_entry.reference_revision
548
404
ie.revision = parent_entry.revision
549
return self._get_delta(ie, basis_inv, path), False, None
405
return self._get_delta(ie, basis_inv, path), False
550
406
ie.reference_revision = content_summary[3]
551
if ie.reference_revision is None:
552
raise AssertionError("invalid content_summary for nested tree: %r"
553
% (content_summary,))
554
self._add_text_to_weave(ie.file_id, '', heads, None)
408
self._add_text_to_weave(ie.file_id, lines, heads, None)
556
410
raise NotImplementedError('unknown kind')
557
411
ie.revision = self._new_revision_id
558
self._any_changes = True
559
return self._get_delta(ie, basis_inv, path), True, fingerprint
561
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
562
_entry_factory=entry_factory):
563
"""Record a new tree via iter_changes.
565
:param tree: The tree to obtain text contents from for changed objects.
566
:param basis_revision_id: The revision id of the tree the iter_changes
567
has been generated against. Currently assumed to be the same
568
as self.parents[0] - if it is not, errors may occur.
569
:param iter_changes: An iter_changes iterator with the changes to apply
570
to basis_revision_id. The iterator must not include any items with
571
a current kind of None - missing items must be either filtered out
572
or errored-on beefore record_iter_changes sees the item.
573
:param _entry_factory: Private method to bind entry_factory locally for
575
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
578
# Create an inventory delta based on deltas between all the parents and
579
# deltas between all the parent inventories. We use inventory delta's
580
# between the inventory objects because iter_changes masks
581
# last-changed-field only changes.
583
# file_id -> change map, change is fileid, paths, changed, versioneds,
584
# parents, names, kinds, executables
586
# {file_id -> revision_id -> inventory entry, for entries in parent
587
# trees that are not parents[0]
591
revtrees = list(self.repository.revision_trees(self.parents))
592
except errors.NoSuchRevision:
593
# one or more ghosts, slow path.
595
for revision_id in self.parents:
597
revtrees.append(self.repository.revision_tree(revision_id))
598
except errors.NoSuchRevision:
600
basis_revision_id = _mod_revision.NULL_REVISION
602
revtrees.append(self.repository.revision_tree(
603
_mod_revision.NULL_REVISION))
604
# The basis inventory from a repository
606
basis_inv = revtrees[0].inventory
608
basis_inv = self.repository.revision_tree(
609
_mod_revision.NULL_REVISION).inventory
610
if len(self.parents) > 0:
611
if basis_revision_id != self.parents[0] and not ghost_basis:
613
"arbitrary basis parents not yet supported with merges")
614
for revtree in revtrees[1:]:
615
for change in revtree.inventory._make_delta(basis_inv):
616
if change[1] is None:
617
# Not present in this parent.
619
if change[2] not in merged_ids:
620
if change[0] is not None:
621
basis_entry = basis_inv[change[2]]
622
merged_ids[change[2]] = [
624
basis_entry.revision,
627
parent_entries[change[2]] = {
629
basis_entry.revision:basis_entry,
631
change[3].revision:change[3],
634
merged_ids[change[2]] = [change[3].revision]
635
parent_entries[change[2]] = {change[3].revision:change[3]}
637
merged_ids[change[2]].append(change[3].revision)
638
parent_entries[change[2]][change[3].revision] = change[3]
641
# Setup the changes from the tree:
642
# changes maps file_id -> (change, [parent revision_ids])
644
for change in iter_changes:
645
# This probably looks up in basis_inv way to much.
646
if change[1][0] is not None:
647
head_candidate = [basis_inv[change[0]].revision]
650
changes[change[0]] = change, merged_ids.get(change[0],
652
unchanged_merged = set(merged_ids) - set(changes)
653
# Extend the changes dict with synthetic changes to record merges of
655
for file_id in unchanged_merged:
656
# Record a merged version of these items that did not change vs the
657
# basis. This can be either identical parallel changes, or a revert
658
# of a specific file after a merge. The recorded content will be
659
# that of the current tree (which is the same as the basis), but
660
# the per-file graph will reflect a merge.
661
# NB:XXX: We are reconstructing path information we had, this
662
# should be preserved instead.
663
# inv delta change: (file_id, (path_in_source, path_in_target),
664
# changed_content, versioned, parent, name, kind,
667
basis_entry = basis_inv[file_id]
668
except errors.NoSuchId:
669
# a change from basis->some_parents but file_id isn't in basis
670
# so was new in the merge, which means it must have changed
671
# from basis -> current, and as it hasn't the add was reverted
672
# by the user. So we discard this change.
676
(basis_inv.id2path(file_id), tree.id2path(file_id)),
678
(basis_entry.parent_id, basis_entry.parent_id),
679
(basis_entry.name, basis_entry.name),
680
(basis_entry.kind, basis_entry.kind),
681
(basis_entry.executable, basis_entry.executable))
682
changes[file_id] = (change, merged_ids[file_id])
683
# changes contains tuples with the change and a set of inventory
684
# candidates for the file.
686
# old_path, new_path, file_id, new_inventory_entry
687
seen_root = False # Is the root in the basis delta?
688
inv_delta = self._basis_delta
689
modified_rev = self._new_revision_id
690
for change, head_candidates in changes.values():
691
if change[3][1]: # versioned in target.
692
# Several things may be happening here:
693
# We may have a fork in the per-file graph
694
# - record a change with the content from tree
695
# We may have a change against < all trees
696
# - carry over the tree that hasn't changed
697
# We may have a change against all trees
698
# - record the change with the content from tree
701
entry = _entry_factory[kind](file_id, change[5][1],
703
head_set = self._heads(change[0], set(head_candidates))
706
for head_candidate in head_candidates:
707
if head_candidate in head_set:
708
heads.append(head_candidate)
709
head_set.remove(head_candidate)
712
# Could be a carry-over situation:
713
parent_entry_revs = parent_entries.get(file_id, None)
714
if parent_entry_revs:
715
parent_entry = parent_entry_revs.get(heads[0], None)
718
if parent_entry is None:
719
# The parent iter_changes was called against is the one
720
# that is the per-file head, so any change is relevant
721
# iter_changes is valid.
722
carry_over_possible = False
724
# could be a carry over situation
725
# A change against the basis may just indicate a merge,
726
# we need to check the content against the source of the
727
# merge to determine if it was changed after the merge
729
if (parent_entry.kind != entry.kind or
730
parent_entry.parent_id != entry.parent_id or
731
parent_entry.name != entry.name):
732
# Metadata common to all entries has changed
733
# against per-file parent
734
carry_over_possible = False
736
carry_over_possible = True
737
# per-type checks for changes against the parent_entry
740
# Cannot be a carry-over situation
741
carry_over_possible = False
742
# Populate the entry in the delta
744
# XXX: There is still a small race here: If someone reverts the content of a file
745
# after iter_changes examines and decides it has changed,
746
# we will unconditionally record a new version even if some
747
# other process reverts it while commit is running (with
748
# the revert happening after iter_changes did it's
751
entry.executable = True
753
entry.executable = False
754
if (carry_over_possible and
755
parent_entry.executable == entry.executable):
756
# Check the file length, content hash after reading
758
nostore_sha = parent_entry.text_sha1
761
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
763
text = file_obj.read()
767
entry.text_sha1, entry.text_size = self._add_text_to_weave(
768
file_id, text, heads, nostore_sha)
769
yield file_id, change[1][1], (entry.text_sha1, stat_value)
770
except errors.ExistingContent:
771
# No content change against a carry_over parent
772
# Perhaps this should also yield a fs hash update?
774
entry.text_size = parent_entry.text_size
775
entry.text_sha1 = parent_entry.text_sha1
776
elif kind == 'symlink':
778
entry.symlink_target = tree.get_symlink_target(file_id)
779
if (carry_over_possible and
780
parent_entry.symlink_target == entry.symlink_target):
783
self._add_text_to_weave(change[0], '', heads, None)
784
elif kind == 'directory':
785
if carry_over_possible:
788
# Nothing to set on the entry.
789
# XXX: split into the Root and nonRoot versions.
790
if change[1][1] != '' or self.repository.supports_rich_root():
791
self._add_text_to_weave(change[0], '', heads, None)
792
elif kind == 'tree-reference':
793
if not self.repository._format.supports_tree_reference:
794
# This isn't quite sane as an error, but we shouldn't
795
# ever see this code path in practice: tree's don't
796
# permit references when the repo doesn't support tree
798
raise errors.UnsupportedOperation(tree.add_reference,
800
reference_revision = tree.get_reference_revision(change[0])
801
entry.reference_revision = reference_revision
802
if (carry_over_possible and
803
parent_entry.reference_revision == reference_revision):
806
self._add_text_to_weave(change[0], '', heads, None)
808
raise AssertionError('unknown kind %r' % kind)
810
entry.revision = modified_rev
812
entry.revision = parent_entry.revision
815
new_path = change[1][1]
816
inv_delta.append((change[1][0], new_path, change[0], entry))
819
self.new_inventory = None
821
# This should perhaps be guarded by a check that the basis we
822
# commit against is the basis for the commit and if not do a delta
824
self._any_changes = True
826
# housekeeping root entry changes do not affect no-change commits.
827
self._require_root_change(tree)
828
self.basis_delta_revision = basis_revision_id
830
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
831
parent_keys = tuple([(file_id, parent) for parent in parents])
832
return self.repository.texts._add_text(
833
(file_id, self._new_revision_id), parent_keys, new_text,
834
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
412
return self._get_delta(ie, basis_inv, path), True
414
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
415
# Note: as we read the content directly from the tree, we know its not
416
# been turned into unicode or badly split - but a broken tree
417
# implementation could give us bad output from readlines() so this is
418
# not a guarantee of safety. What would be better is always checking
419
# the content during test suite execution. RBC 20070912
420
parent_keys = tuple((file_id, parent) for parent in parents)
421
return self.repository.texts.add_lines(
422
(file_id, self._new_revision_id), parent_keys, new_lines,
423
nostore_sha=nostore_sha, random_id=self.random_revid,
424
check_content=False)[0:2]
837
427
class RootCommitBuilder(CommitBuilder):
838
428
"""This commitbuilder actually records the root id"""
840
430
# the root entry gets versioned properly by this builder.
841
431
_versioned_root = True
1168
633
# The old API returned a list, should this actually be a set?
1169
634
return parent_map.keys()
1171
def _check_inventories(self, checker):
1172
"""Check the inventories found from the revision scan.
1174
This is responsible for verifying the sha1 of inventories and
1175
creating a pending_keys set that covers data referenced by inventories.
1177
bar = ui.ui_factory.nested_progress_bar()
1179
self._do_check_inventories(checker, bar)
1183
def _do_check_inventories(self, checker, bar):
1184
"""Helper for _check_inventories."""
1186
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1187
kinds = ['chk_bytes', 'texts']
1188
count = len(checker.pending_keys)
1189
bar.update("inventories", 0, 2)
1190
current_keys = checker.pending_keys
1191
checker.pending_keys = {}
1192
# Accumulate current checks.
1193
for key in current_keys:
1194
if key[0] != 'inventories' and key[0] not in kinds:
1195
checker._report_items.append('unknown key type %r' % (key,))
1196
keys[key[0]].add(key[1:])
1197
if keys['inventories']:
1198
# NB: output order *should* be roughly sorted - topo or
1199
# inverse topo depending on repository - either way decent
1200
# to just delta against. However, pre-CHK formats didn't
1201
# try to optimise inventory layout on disk. As such the
1202
# pre-CHK code path does not use inventory deltas.
1204
for record in self.inventories.check(keys=keys['inventories']):
1205
if record.storage_kind == 'absent':
1206
checker._report_items.append(
1207
'Missing inventory {%s}' % (record.key,))
1209
last_object = self._check_record('inventories', record,
1210
checker, last_object,
1211
current_keys[('inventories',) + record.key])
1212
del keys['inventories']
1215
bar.update("texts", 1)
1216
while (checker.pending_keys or keys['chk_bytes']
1218
# Something to check.
1219
current_keys = checker.pending_keys
1220
checker.pending_keys = {}
1221
# Accumulate current checks.
1222
for key in current_keys:
1223
if key[0] not in kinds:
1224
checker._report_items.append('unknown key type %r' % (key,))
1225
keys[key[0]].add(key[1:])
1226
# Check the outermost kind only - inventories || chk_bytes || texts
1230
for record in getattr(self, kind).check(keys=keys[kind]):
1231
if record.storage_kind == 'absent':
1232
checker._report_items.append(
1233
'Missing %s {%s}' % (kind, record.key,))
1235
last_object = self._check_record(kind, record,
1236
checker, last_object, current_keys[(kind,) + record.key])
1240
def _check_record(self, kind, record, checker, last_object, item_data):
1241
"""Check a single text from this repository."""
1242
if kind == 'inventories':
1243
rev_id = record.key[0]
1244
inv = self._deserialise_inventory(rev_id,
1245
record.get_bytes_as('fulltext'))
1246
if last_object is not None:
1247
delta = inv._make_delta(last_object)
1248
for old_path, path, file_id, ie in delta:
1251
ie.check(checker, rev_id, inv)
1253
for path, ie in inv.iter_entries():
1254
ie.check(checker, rev_id, inv)
1255
if self._format.fast_deltas:
1257
elif kind == 'chk_bytes':
1258
# No code written to check chk_bytes for this repo format.
1259
checker._report_items.append(
1260
'unsupported key type chk_bytes for %s' % (record.key,))
1261
elif kind == 'texts':
1262
self._check_text(record, checker, item_data)
1264
checker._report_items.append(
1265
'unknown key type %s for %s' % (kind, record.key))
1267
def _check_text(self, record, checker, item_data):
1268
"""Check a single text."""
1269
# Check it is extractable.
1270
# TODO: check length.
1271
if record.storage_kind == 'chunked':
1272
chunks = record.get_bytes_as(record.storage_kind)
1273
sha1 = osutils.sha_strings(chunks)
1274
length = sum(map(len, chunks))
1276
content = record.get_bytes_as('fulltext')
1277
sha1 = osutils.sha_string(content)
1278
length = len(content)
1279
if item_data and sha1 != item_data[1]:
1280
checker._report_items.append(
1281
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1282
(record.key, sha1, item_data[1], item_data[2]))
1285
637
def create(a_bzrdir):
1286
638
"""Construct the current default format repository in a_bzrdir."""
1554
891
"""Commit the contents accrued within the current write group.
1556
893
:seealso: start_write_group.
1558
:return: it may return an opaque hint that can be passed to 'pack'.
1560
895
if self._write_group is not self.get_transaction():
1561
896
# has an unlock or relock occured ?
1562
897
raise errors.BzrError('mismatched lock context %r and '
1563
898
'write group %r.' %
1564
899
(self.get_transaction(), self._write_group))
1565
result = self._commit_write_group()
900
self._commit_write_group()
1566
901
self._write_group = None
1569
903
def _commit_write_group(self):
1570
904
"""Template method for per-repository write group cleanup.
1572
This is called before the write group is considered to be
906
This is called before the write group is considered to be
1573
907
finished and should ensure that all data handed to the repository
1574
for writing during the write group is safely committed (to the
908
for writing during the write group is safely committed (to the
1575
909
extent possible considering file system caching etc).
1578
def suspend_write_group(self):
1579
raise errors.UnsuspendableWriteGroup(self)
1581
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1582
"""Return the keys of missing inventory parents for revisions added in
1585
A revision is not complete if the inventory delta for that revision
1586
cannot be calculated. Therefore if the parent inventories of a
1587
revision are not present, the revision is incomplete, and e.g. cannot
1588
be streamed by a smart server. This method finds missing inventory
1589
parents for revisions added in this write group.
1591
if not self._format.supports_external_lookups:
1592
# This is only an issue for stacked repositories
1594
if not self.is_in_write_group():
1595
raise AssertionError('not in a write group')
1597
# XXX: We assume that every added revision already has its
1598
# corresponding inventory, so we only check for parent inventories that
1599
# might be missing, rather than all inventories.
1600
parents = set(self.revisions._index.get_missing_parents())
1601
parents.discard(_mod_revision.NULL_REVISION)
1602
unstacked_inventories = self.inventories._index
1603
present_inventories = unstacked_inventories.get_parent_map(
1604
key[-1:] for key in parents)
1605
parents.difference_update(present_inventories)
1606
if len(parents) == 0:
1607
# No missing parent inventories.
1609
if not check_for_missing_texts:
1610
return set(('inventories', rev_id) for (rev_id,) in parents)
1611
# Ok, now we have a list of missing inventories. But these only matter
1612
# if the inventories that reference them are missing some texts they
1613
# appear to introduce.
1614
# XXX: Texts referenced by all added inventories need to be present,
1615
# but at the moment we're only checking for texts referenced by
1616
# inventories at the graph's edge.
1617
key_deps = self.revisions._index._key_dependencies
1618
key_deps.satisfy_refs_for_keys(present_inventories)
1619
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1620
file_ids = self.fileids_altered_by_revision_ids(referrers)
1621
missing_texts = set()
1622
for file_id, version_ids in file_ids.iteritems():
1623
missing_texts.update(
1624
(file_id, version_id) for version_id in version_ids)
1625
present_texts = self.texts.get_parent_map(missing_texts)
1626
missing_texts.difference_update(present_texts)
1627
if not missing_texts:
1628
# No texts are missing, so all revisions and their deltas are
1631
# Alternatively the text versions could be returned as the missing
1632
# keys, but this is likely to be less data.
1633
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1636
def refresh_data(self):
1637
"""Re-read any data needed to to synchronise with disk.
1639
This method is intended to be called after another repository instance
1640
(such as one used by a smart server) has inserted data into the
1641
repository. It may not be called during a write group, but may be
1642
called at any other time.
1644
if self.is_in_write_group():
1645
raise errors.InternalBzrError(
1646
"May not refresh_data while in a write group.")
1647
self._refresh_data()
1649
def resume_write_group(self, tokens):
1650
if not self.is_write_locked():
1651
raise errors.NotWriteLocked(self)
1652
if self._write_group:
1653
raise errors.BzrError('already in a write group')
1654
self._resume_write_group(tokens)
1655
# so we can detect unlock/relock - the write group is now entered.
1656
self._write_group = self.get_transaction()
1658
def _resume_write_group(self, tokens):
1659
raise errors.UnsuspendableWriteGroup(self)
1661
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
912
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
1663
913
"""Fetch the content required to construct revision_id from source.
1665
If revision_id is None and fetch_spec is None, then all content is
1668
fetch() may not be used when the repository is in a write group -
1669
either finish the current write group before using fetch, or use
1670
fetch before starting the write group.
915
If revision_id is None all content is copied.
1672
916
:param find_ghosts: Find and copy revisions in the source that are
1673
917
ghosts in the target (and not reachable directly by walking out to
1674
918
the first-present revision in target from revision_id).
1675
:param revision_id: If specified, all the content needed for this
1676
revision ID will be copied to the target. Fetch will determine for
1677
itself which content needs to be copied.
1678
:param fetch_spec: If specified, a SearchResult or
1679
PendingAncestryResult that describes which revisions to copy. This
1680
allows copying multiple heads at once. Mutually exclusive with
1683
if fetch_spec is not None and revision_id is not None:
1684
raise AssertionError(
1685
"fetch_spec and revision_id are mutually exclusive.")
1686
if self.is_in_write_group():
1687
raise errors.InternalBzrError(
1688
"May not fetch while in a write group.")
1689
920
# fast path same-url fetch operations
1690
# TODO: lift out to somewhere common with RemoteRepository
1691
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1692
if (self.has_same_location(source)
1693
and fetch_spec is None
1694
and self._has_same_fallbacks(source)):
921
if self.has_same_location(source):
1695
922
# check that last_revision is in 'from' and then return a
1697
924
if (revision_id is not None and
1698
925
not _mod_revision.is_null(revision_id)):
1699
926
self.get_revision(revision_id)
1701
# if there is no specific appropriate InterRepository, this will get
1702
# the InterRepository base class, which raises an
1703
# IncompatibleRepositories when asked to fetch.
1704
928
inter = InterRepository.get(source, self)
1705
return inter.fetch(revision_id=revision_id, pb=pb,
1706
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
930
return inter.fetch(revision_id=revision_id, pb=pb, find_ghosts=find_ghosts)
931
except NotImplementedError:
932
raise errors.IncompatibleRepositories(source, self)
1708
934
def create_bundle(self, target, base, fileobj, format=None):
1709
935
return serializer.write_bundle(self, target, base, fileobj, format)
1860
1069
@needs_read_lock
1861
1070
def get_revisions(self, revision_ids):
1862
"""Get many revisions at once.
1864
Repositories that need to check data on every revision read should
1865
subclass this method.
1071
"""Get many revisions at once."""
1867
1072
return self._get_revisions(revision_ids)
1869
1074
@needs_read_lock
1870
1075
def _get_revisions(self, revision_ids):
1871
1076
"""Core work logic to get many revisions without sanity checks."""
1077
for rev_id in revision_ids:
1078
if not rev_id or not isinstance(rev_id, basestring):
1079
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1080
keys = [(key,) for key in revision_ids]
1081
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1873
for revid, rev in self._iter_revisions(revision_ids):
1875
raise errors.NoSuchRevision(self, revid)
1083
for record in stream:
1084
if record.storage_kind == 'absent':
1085
raise errors.NoSuchRevision(self, record.key[0])
1086
text = record.get_bytes_as('fulltext')
1087
rev = self._serializer.read_revision_from_string(text)
1088
revs[record.key[0]] = rev
1877
1089
return [revs[revid] for revid in revision_ids]
1879
def _iter_revisions(self, revision_ids):
1880
"""Iterate over revision objects.
1882
:param revision_ids: An iterable of revisions to examine. None may be
1883
passed to request all revisions known to the repository. Note that
1884
not all repositories can find unreferenced revisions; for those
1885
repositories only referenced ones will be returned.
1886
:return: An iterator of (revid, revision) tuples. Absent revisions (
1887
those asked for but not available) are returned as (revid, None).
1889
if revision_ids is None:
1890
revision_ids = self.all_revision_ids()
1892
for rev_id in revision_ids:
1893
if not rev_id or not isinstance(rev_id, basestring):
1894
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1895
keys = [(key,) for key in revision_ids]
1896
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1897
for record in stream:
1898
revid = record.key[0]
1899
if record.storage_kind == 'absent':
1902
text = record.get_bytes_as('fulltext')
1903
rev = self._serializer.read_revision_from_string(text)
1906
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1092
def get_revision_xml(self, revision_id):
1093
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1094
# would have already do it.
1095
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1096
rev = self.get_revision(revision_id)
1097
rev_tmp = StringIO()
1098
# the current serializer..
1099
self._serializer.write_revision(rev, rev_tmp)
1101
return rev_tmp.getvalue()
1103
def get_deltas_for_revisions(self, revisions):
1907
1104
"""Produce a generator of revision deltas.
1909
1106
Note that the input is a sequence of REVISIONS, not revision_ids.
1910
1107
Trees will be held in memory until the generator exits.
1911
1108
Each delta is relative to the revision's lefthand predecessor.
1913
:param specific_fileids: if not None, the result is filtered
1914
so that only those file-ids, their parents and their
1915
children are included.
1917
# Get the revision-ids of interest
1918
1110
required_trees = set()
1919
1111
for revision in revisions:
1920
1112
required_trees.add(revision.revision_id)
1921
1113
required_trees.update(revision.parent_ids[:1])
1923
# Get the matching filtered trees. Note that it's more
1924
# efficient to pass filtered trees to changes_from() rather
1925
# than doing the filtering afterwards. changes_from() could
1926
# arguably do the filtering itself but it's path-based, not
1927
# file-id based, so filtering before or afterwards is
1929
if specific_fileids is None:
1930
trees = dict((t.get_revision_id(), t) for
1931
t in self.revision_trees(required_trees))
1933
trees = dict((t.get_revision_id(), t) for
1934
t in self._filtered_revision_trees(required_trees,
1937
# Calculate the deltas
1114
trees = dict((t.get_revision_id(), t) for
1115
t in self.revision_trees(required_trees))
1938
1116
for revision in revisions:
1939
1117
if not revision.parent_ids:
1940
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1118
old_tree = self.revision_tree(None)
1942
1120
old_tree = trees[revision.parent_ids[0]]
1943
1121
yield trees[revision.revision_id].changes_from(old_tree)
1945
1123
@needs_read_lock
1946
def get_revision_delta(self, revision_id, specific_fileids=None):
1124
def get_revision_delta(self, revision_id):
1947
1125
"""Return the delta for one revision.
1949
1127
The delta is relative to the left-hand predecessor of the
1952
:param specific_fileids: if not None, the result is filtered
1953
so that only those file-ids, their parents and their
1954
children are included.
1956
1130
r = self.get_revision(revision_id)
1957
return list(self.get_deltas_for_revisions([r],
1958
specific_fileids=specific_fileids))[0]
1131
return list(self.get_deltas_for_revisions([r]))[0]
1960
1133
@needs_write_lock
1961
1134
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
2347
1474
"""Get Inventory object by revision id."""
2348
1475
return self.iter_inventories([revision_id]).next()
2350
def iter_inventories(self, revision_ids, ordering=None):
1477
def iter_inventories(self, revision_ids):
2351
1478
"""Get many inventories by revision_ids.
2353
1480
This will buffer some or all of the texts used in constructing the
2354
1481
inventories in memory, but will only parse a single inventory at a
2357
:param revision_ids: The expected revision ids of the inventories.
2358
:param ordering: optional ordering, e.g. 'topological'. If not
2359
specified, the order of revision_ids will be preserved (by
2360
buffering if necessary).
2361
1484
:return: An iterator of inventories.
2363
1486
if ((None in revision_ids)
2364
1487
or (_mod_revision.NULL_REVISION in revision_ids)):
2365
1488
raise ValueError('cannot get null revision inventory')
2366
return self._iter_inventories(revision_ids, ordering)
1489
return self._iter_inventories(revision_ids)
2368
def _iter_inventories(self, revision_ids, ordering):
1491
def _iter_inventories(self, revision_ids):
2369
1492
"""single-document based inventory iteration."""
2370
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2371
for text, revision_id in inv_xmls:
2372
yield self._deserialise_inventory(revision_id, text)
1493
for text, revision_id in self._iter_inventory_xmls(revision_ids):
1494
yield self.deserialise_inventory(revision_id, text)
2374
def _iter_inventory_xmls(self, revision_ids, ordering):
2375
if ordering is None:
2376
order_as_requested = True
2377
ordering = 'unordered'
2379
order_as_requested = False
1496
def _iter_inventory_xmls(self, revision_ids):
2380
1497
keys = [(revision_id,) for revision_id in revision_ids]
2383
if order_as_requested:
2384
key_iter = iter(keys)
2385
next_key = key_iter.next()
2386
stream = self.inventories.get_record_stream(keys, ordering, True)
1498
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2388
1500
for record in stream:
2389
1501
if record.storage_kind != 'absent':
2390
chunks = record.get_bytes_as('chunked')
2391
if order_as_requested:
2392
text_chunks[record.key] = chunks
2394
yield ''.join(chunks), record.key[-1]
1502
texts[record.key] = record.get_bytes_as('fulltext')
2396
1504
raise errors.NoSuchRevision(self, record.key)
2397
if order_as_requested:
2398
# Yield as many results as we can while preserving order.
2399
while next_key in text_chunks:
2400
chunks = text_chunks.pop(next_key)
2401
yield ''.join(chunks), next_key[-1]
2403
next_key = key_iter.next()
2404
except StopIteration:
2405
# We still want to fully consume the get_record_stream,
2406
# just in case it is not actually finished at this point
1506
yield texts[key], key[-1]
2410
def _deserialise_inventory(self, revision_id, xml):
2411
"""Transform the xml into an inventory object.
1508
def deserialise_inventory(self, revision_id, xml):
1509
"""Transform the xml into an inventory object.
2413
1511
:param revision_id: The expected revision id of the inventory.
2414
1512
:param xml: A serialised inventory.
2416
result = self._serializer.read_inventory_from_string(xml, revision_id,
2417
entry_cache=self._inventory_entry_cache,
2418
return_from_cache=self._safe_to_return_from_cache)
1514
result = self._serializer.read_inventory_from_string(xml, revision_id)
2419
1515
if result.revision_id != revision_id:
2420
1516
raise AssertionError('revision id mismatch %s != %s' % (
2421
1517
result.revision_id, revision_id))
1520
def serialise_inventory(self, inv):
1521
return self._serializer.write_inventory_to_string(inv)
1523
def _serialise_inventory_to_lines(self, inv):
1524
return self._serializer.write_inventory_to_lines(inv)
2424
1526
def get_serializer_format(self):
2425
1527
return self._serializer.format_num
2427
1529
@needs_read_lock
2428
def _get_inventory_xml(self, revision_id):
2429
"""Get serialized inventory as a string."""
2430
texts = self._iter_inventory_xmls([revision_id], 'unordered')
1530
def get_inventory_xml(self, revision_id):
1531
"""Get inventory XML as a file object."""
1532
texts = self._iter_inventory_xmls([revision_id])
2432
1534
text, revision_id = texts.next()
2433
1535
except StopIteration:
2434
1536
raise errors.HistoryMissing(self, 'inventory', revision_id)
2437
def get_rev_id_for_revno(self, revno, known_pair):
2438
"""Return the revision id of a revno, given a later (revno, revid)
2439
pair in the same history.
2441
:return: if found (True, revid). If the available history ran out
2442
before reaching the revno, then this returns
2443
(False, (closest_revno, closest_revid)).
1540
def get_inventory_sha1(self, revision_id):
1541
"""Return the sha1 hash of the inventory entry
2445
known_revno, known_revid = known_pair
2446
partial_history = [known_revid]
2447
distance_from_known = known_revno - revno
2448
if distance_from_known < 0:
2450
'requested revno (%d) is later than given known revno (%d)'
2451
% (revno, known_revno))
2454
self, partial_history, stop_index=distance_from_known)
2455
except errors.RevisionNotPresent, err:
2456
if err.revision_id == known_revid:
2457
# The start revision (known_revid) wasn't found.
2459
# This is a stacked repository with no fallbacks, or a there's a
2460
# left-hand ghost. Either way, even though the revision named in
2461
# the error isn't in this repo, we know it's the next step in this
2462
# left-hand history.
2463
partial_history.append(err.revision_id)
2464
if len(partial_history) <= distance_from_known:
2465
# Didn't find enough history to get a revid for the revno.
2466
earliest_revno = known_revno - len(partial_history) + 1
2467
return (False, (earliest_revno, partial_history[-1]))
2468
if len(partial_history) - 1 > distance_from_known:
2469
raise AssertionError('_iter_for_revno returned too much history')
2470
return (True, partial_history[-1])
1543
return self.get_revision(revision_id).inventory_sha1
2472
1545
def iter_reverse_revision_history(self, revision_id):
2473
1546
"""Iterate backwards through revision ids in the lefthand history
2588
1657
keys = tsort.topo_sort(parent_map)
2589
1658
return [None] + list(keys)
2591
def pack(self, hint=None, clean_obsolete_packs=False):
2592
1661
"""Compress the data within the repository.
2594
1663
This operation only makes sense for some repository types. For other
2595
1664
types it should be a no-op that just returns.
2597
1666
This stub method does not require a lock, but subclasses should use
2598
@needs_write_lock as this is a long running call its reasonable to
1667
@needs_write_lock as this is a long running call its reasonable to
2599
1668
implicitly lock for the user.
2601
:param hint: If not supplied, the whole repository is packed.
2602
If supplied, the repository may use the hint parameter as a
2603
hint for the parts of the repository to pack. A hint can be
2604
obtained from the result of commit_write_group(). Out of
2605
date hints are simply ignored, because concurrent operations
2606
can obsolete them rapidly.
2608
:param clean_obsolete_packs: Clean obsolete packs immediately after
1672
@deprecated_method(one_six)
1673
def print_file(self, file, revision_id):
1674
"""Print `file` to stdout.
1676
FIXME RBC 20060125 as John Meinel points out this is a bad api
1677
- it writes to stdout, it assumes that that is valid etc. Fix
1678
by creating a new more flexible convenience function.
1680
tree = self.revision_tree(revision_id)
1681
# use inventory as it was in that revision
1682
file_id = tree.inventory.path2id(file)
1684
# TODO: jam 20060427 Write a test for this code path
1685
# it had a bug in it, and was raising the wrong
1687
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1688
tree.print_file(file_id)
2612
1690
def get_transaction(self):
2613
1691
return self.control_files.get_transaction()
2615
def get_parent_map(self, revision_ids):
2616
"""See graph.StackedParentsProvider.get_parent_map"""
2617
# revisions index works in keys; this just works in revisions
2618
# therefore wrap and unwrap
2621
for revision_id in revision_ids:
1693
@deprecated_method(one_one)
1694
def get_parents(self, revision_ids):
1695
"""See StackedParentsProvider.get_parents"""
1696
parent_map = self.get_parent_map(revision_ids)
1697
return [parent_map.get(r, None) for r in revision_ids]
1699
def get_parent_map(self, keys):
1700
"""See graph._StackedParentsProvider.get_parent_map"""
1702
for revision_id in keys:
1703
if revision_id is None:
1704
raise ValueError('get_parent_map(None) is not valid')
2622
1705
if revision_id == _mod_revision.NULL_REVISION:
2623
result[revision_id] = ()
2624
elif revision_id is None:
2625
raise ValueError('get_parent_map(None) is not valid')
2627
query_keys.append((revision_id ,))
2628
for ((revision_id,), parent_keys) in \
2629
self.revisions.get_parent_map(query_keys).iteritems():
2631
result[revision_id] = tuple([parent_revid
2632
for (parent_revid,) in parent_keys])
2634
result[revision_id] = (_mod_revision.NULL_REVISION,)
1706
parent_map[revision_id] = ()
1709
parent_id_list = self.get_revision(revision_id).parent_ids
1710
except errors.NoSuchRevision:
1713
if len(parent_id_list) == 0:
1714
parent_ids = (_mod_revision.NULL_REVISION,)
1716
parent_ids = tuple(parent_id_list)
1717
parent_map[revision_id] = parent_ids
2637
1720
def _make_parents_provider(self):
2641
def get_known_graph_ancestry(self, revision_ids):
2642
"""Return the known graph for a set of revision ids and their ancestors.
2644
st = static_tuple.StaticTuple
2645
revision_keys = [st(r_id).intern() for r_id in revision_ids]
2646
known_graph = self.revisions.get_known_graph_ancestry(revision_keys)
2647
return graph.GraphThunkIdsToKeys(known_graph)
2649
1723
def get_graph(self, other_repository=None):
2650
1724
"""Return the graph walker for this repository format"""
2651
1725
parents_provider = self._make_parents_provider()
2652
1726
if (other_repository is not None and
2653
1727
not self.has_same_location(other_repository)):
2654
parents_provider = graph.StackedParentsProvider(
1728
parents_provider = graph._StackedParentsProvider(
2655
1729
[parents_provider, other_repository._make_parents_provider()])
2656
1730
return graph.Graph(parents_provider)
2658
def _get_versioned_file_checker(self, text_key_references=None,
2660
"""Return an object suitable for checking versioned files.
2662
:param text_key_references: if non-None, an already built
2663
dictionary mapping text keys ((fileid, revision_id) tuples)
2664
to whether they were referred to by the inventory of the
2665
revision_id that they contain. If None, this will be
2667
:param ancestors: Optional result from
2668
self.get_graph().get_parent_map(self.all_revision_ids()) if already
2671
return _VersionedFileChecker(self,
2672
text_key_references=text_key_references, ancestors=ancestors)
1732
def _get_versioned_file_checker(self):
1733
"""Return an object suitable for checking versioned files."""
1734
return _VersionedFileChecker(self)
2674
1736
def revision_ids_to_search_result(self, result_set):
2675
1737
"""Convert a set of revision ids to a graph SearchResult."""
3706
2637
return self.source.revision_ids_to_search_result(result_set)
3709
class InterDifferingSerializer(InterRepository):
2640
class InterPackRepo(InterSameDataRepository):
2641
"""Optimised code paths between Pack based repositories."""
2644
def _get_repo_format_to_test(self):
2645
from bzrlib.repofmt import pack_repo
2646
return pack_repo.RepositoryFormatKnitPack1()
2649
def is_compatible(source, target):
2650
"""Be compatible with known Pack formats.
2652
We don't test for the stores being of specific types because that
2653
could lead to confusing results, and there is no need to be
2656
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2658
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2659
isinstance(target._format, RepositoryFormatPack))
2660
except AttributeError:
2662
return are_packs and InterRepository._same_model(source, target)
2665
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2666
"""See InterRepository.fetch()."""
2667
from bzrlib.repofmt.pack_repo import Packer
2668
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2669
self.source, self.source._format, self.target, self.target._format)
2670
self.count_copied = 0
2671
if revision_id is None:
2673
# everything to do - use pack logic
2674
# to fetch from all packs to one without
2675
# inventory parsing etc, IFF nothing to be copied is in the target.
2677
revision_ids = self.source.all_revision_ids()
2678
revision_keys = [(revid,) for revid in revision_ids]
2679
index = self.target._pack_collection.revision_index.combined_index
2680
present_revision_ids = set(item[1][0] for item in
2681
index.iter_entries(revision_keys))
2682
revision_ids = set(revision_ids) - present_revision_ids
2683
# implementing the TODO will involve:
2684
# - detecting when all of a pack is selected
2685
# - avoiding as much as possible pre-selection, so the
2686
# more-core routines such as create_pack_from_packs can filter in
2687
# a just-in-time fashion. (though having a HEADS list on a
2688
# repository might make this a lot easier, because we could
2689
# sensibly detect 'new revisions' without doing a full index scan.
2690
elif _mod_revision.is_null(revision_id):
2695
revision_ids = self.search_missing_revision_ids(revision_id,
2696
find_ghosts=find_ghosts).get_keys()
2697
except errors.NoSuchRevision:
2698
raise errors.InstallFailed([revision_id])
2699
if len(revision_ids) == 0:
2701
packs = self.source._pack_collection.all_packs()
2702
pack = Packer(self.target._pack_collection, packs, '.fetch',
2703
revision_ids).pack()
2704
if pack is not None:
2705
self.target._pack_collection._save_pack_names()
2706
# Trigger an autopack. This may duplicate effort as we've just done
2707
# a pack creation, but for now it is simpler to think about as
2708
# 'upload data, then repack if needed'.
2709
self.target._pack_collection.autopack()
2710
return (pack.get_revision_count(), [])
2715
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2716
"""See InterRepository.missing_revision_ids().
2718
:param find_ghosts: Find ghosts throughout the ancestry of
2721
if not find_ghosts and revision_id is not None:
2722
return self._walk_to_common_revisions([revision_id])
2723
elif revision_id is not None:
2724
source_ids = self.source.get_ancestry(revision_id)
2725
if source_ids[0] is not None:
2726
raise AssertionError()
2729
source_ids = self.source.all_revision_ids()
2730
# source_ids is the worst possible case we may need to pull.
2731
# now we want to filter source_ids against what we actually
2732
# have in target, but don't try to check for existence where we know
2733
# we do not have a revision as that would be pointless.
2734
target_ids = set(self.target.all_revision_ids())
2735
result_set = set(source_ids).difference(target_ids)
2736
return self.source.revision_ids_to_search_result(result_set)
2739
class InterModel1and2(InterRepository):
2742
def _get_repo_format_to_test(self):
2746
def is_compatible(source, target):
2747
if not source.supports_rich_root() and target.supports_rich_root():
2753
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2754
"""See InterRepository.fetch()."""
2755
from bzrlib.fetch import Model1toKnit2Fetcher
2756
f = Model1toKnit2Fetcher(to_repository=self.target,
2757
from_repository=self.source,
2758
last_revision=revision_id,
2759
pb=pb, find_ghosts=find_ghosts)
2760
return f.count_copied, f.failed_revisions
2763
def copy_content(self, revision_id=None):
2764
"""Make a complete copy of the content in self into destination.
2766
This is a destructive operation! Do not use it on existing
2769
:param revision_id: Only copy the content needed to construct
2770
revision_id and its parents.
2773
self.target.set_make_working_trees(self.source.make_working_trees())
2774
except NotImplementedError:
2776
# but don't bother fetching if we have the needed data now.
2777
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2778
self.target.has_revision(revision_id)):
2780
self.target.fetch(self.source, revision_id=revision_id)
2783
class InterKnit1and2(InterKnitRepo):
2786
def _get_repo_format_to_test(self):
2790
def is_compatible(source, target):
2791
"""Be compatible with Knit1 source and Knit3 target"""
2792
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2794
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2795
RepositoryFormatKnit3)
2796
from bzrlib.repofmt.pack_repo import (
2797
RepositoryFormatKnitPack1,
2798
RepositoryFormatKnitPack3,
2799
RepositoryFormatPackDevelopment0,
2800
RepositoryFormatPackDevelopment0Subtree,
2803
RepositoryFormatKnit1,
2804
RepositoryFormatKnitPack1,
2805
RepositoryFormatPackDevelopment0,
2808
RepositoryFormatKnit3,
2809
RepositoryFormatKnitPack3,
2810
RepositoryFormatPackDevelopment0Subtree,
2812
return (isinstance(source._format, nosubtrees) and
2813
isinstance(target._format, subtrees))
2814
except AttributeError:
2818
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2819
"""See InterRepository.fetch()."""
2820
from bzrlib.fetch import Knit1to2Fetcher
2821
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2822
self.source, self.source._format, self.target,
2823
self.target._format)
2824
f = Knit1to2Fetcher(to_repository=self.target,
2825
from_repository=self.source,
2826
last_revision=revision_id,
2827
pb=pb, find_ghosts=find_ghosts)
2828
return f.count_copied, f.failed_revisions
2831
class InterDifferingSerializer(InterKnitRepo):
3712
2834
def _get_repo_format_to_test(self):
3716
2838
def is_compatible(source, target):
3717
2839
"""Be compatible with Knit2 source and Knit3 target"""
3718
# This is redundant with format.check_conversion_target(), however that
3719
# raises an exception, and we just want to say "False" as in we won't
3720
# support converting between these formats.
3721
if 'IDS_never' in debug.debug_flags:
3723
if source.supports_rich_root() and not target.supports_rich_root():
3725
if (source._format.supports_tree_reference
3726
and not target._format.supports_tree_reference):
3728
if target._fallback_repositories and target._format.supports_chks:
3729
# IDS doesn't know how to copy CHKs for the parent inventories it
3730
# adds to stacked repos.
3732
if 'IDS_always' in debug.debug_flags:
3734
# Only use this code path for local source and target. IDS does far
3735
# too much IO (both bandwidth and roundtrips) over a network.
3736
if not source.bzrdir.transport.base.startswith('file:///'):
3738
if not target.bzrdir.transport.base.startswith('file:///'):
2840
if source.supports_rich_root() != target.supports_rich_root():
2842
# Ideally, we'd support fetching if the source had no tree references
2843
# even if it supported them...
2844
if (getattr(source, '_format.supports_tree_reference', False) and
2845
not getattr(target, '_format.supports_tree_reference', False)):
3742
def _get_trees(self, revision_ids, cache):
3744
for rev_id in revision_ids:
3746
possible_trees.append((rev_id, cache[rev_id]))
3748
# Not cached, but inventory might be present anyway.
3750
tree = self.source.revision_tree(rev_id)
3751
except errors.NoSuchRevision:
3752
# Nope, parent is ghost.
3755
cache[rev_id] = tree
3756
possible_trees.append((rev_id, tree))
3757
return possible_trees
3759
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3760
"""Get the best delta and base for this revision.
3762
:return: (basis_id, delta)
3765
# Generate deltas against each tree, to find the shortest.
3766
texts_possibly_new_in_tree = set()
3767
for basis_id, basis_tree in possible_trees:
3768
delta = tree.inventory._make_delta(basis_tree.inventory)
3769
for old_path, new_path, file_id, new_entry in delta:
3770
if new_path is None:
3771
# This file_id isn't present in the new rev, so we don't
3775
# Rich roots are handled elsewhere...
3777
kind = new_entry.kind
3778
if kind != 'directory' and kind != 'file':
3779
# No text record associated with this inventory entry.
3781
# This is a directory or file that has changed somehow.
3782
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3783
deltas.append((len(delta), basis_id, delta))
3785
return deltas[0][1:]
3787
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3788
"""Find all parent revisions that are absent, but for which the
3789
inventory is present, and copy those inventories.
3791
This is necessary to preserve correctness when the source is stacked
3792
without fallbacks configured. (Note that in cases like upgrade the
3793
source may be not have _fallback_repositories even though it is
3797
for parents in parent_map.values():
3798
parent_revs.update(parents)
3799
present_parents = self.source.get_parent_map(parent_revs)
3800
absent_parents = set(parent_revs).difference(present_parents)
3801
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3802
(rev_id,) for rev_id in absent_parents)
3803
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3804
for parent_tree in self.source.revision_trees(parent_inv_ids):
3805
current_revision_id = parent_tree.get_revision_id()
3806
parents_parents_keys = parent_invs_keys_for_stacking[
3807
(current_revision_id,)]
3808
parents_parents = [key[-1] for key in parents_parents_keys]
3809
basis_id = _mod_revision.NULL_REVISION
3810
basis_tree = self.source.revision_tree(basis_id)
3811
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3812
self.target.add_inventory_by_delta(
3813
basis_id, delta, current_revision_id, parents_parents)
3814
cache[current_revision_id] = parent_tree
3816
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3817
"""Fetch across a few revisions.
3819
:param revision_ids: The revisions to copy
3820
:param basis_id: The revision_id of a tree that must be in cache, used
3821
as a basis for delta when no other base is available
3822
:param cache: A cache of RevisionTrees that we can use.
3823
:param a_graph: A Graph object to determine the heads() of the
3824
rich-root data stream.
3825
:return: The revision_id of the last converted tree. The RevisionTree
3826
for it will be in cache
3828
# Walk though all revisions; get inventory deltas, copy referenced
3829
# texts that delta references, insert the delta, revision and
3831
root_keys_to_create = set()
3834
pending_revisions = []
3835
parent_map = self.source.get_parent_map(revision_ids)
3836
self._fetch_parent_invs_for_stacking(parent_map, cache)
3837
self.source._safe_to_return_from_cache = True
3838
for tree in self.source.revision_trees(revision_ids):
3839
# Find a inventory delta for this revision.
3840
# Find text entries that need to be copied, too.
3841
current_revision_id = tree.get_revision_id()
3842
parent_ids = parent_map.get(current_revision_id, ())
3843
parent_trees = self._get_trees(parent_ids, cache)
3844
possible_trees = list(parent_trees)
3845
if len(possible_trees) == 0:
3846
# There either aren't any parents, or the parents are ghosts,
3847
# so just use the last converted tree.
3848
possible_trees.append((basis_id, cache[basis_id]))
3849
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3851
revision = self.source.get_revision(current_revision_id)
3852
pending_deltas.append((basis_id, delta,
3853
current_revision_id, revision.parent_ids))
3854
if self._converting_to_rich_root:
3855
self._revision_id_to_root_id[current_revision_id] = \
3857
# Determine which texts are in present in this revision but not in
3858
# any of the available parents.
3859
texts_possibly_new_in_tree = set()
3860
for old_path, new_path, file_id, entry in delta:
3861
if new_path is None:
3862
# This file_id isn't present in the new rev
3866
if not self.target.supports_rich_root():
3867
# The target doesn't support rich root, so we don't
3870
if self._converting_to_rich_root:
3871
# This can't be copied normally, we have to insert
3873
root_keys_to_create.add((file_id, entry.revision))
3876
texts_possibly_new_in_tree.add((file_id, entry.revision))
3877
for basis_id, basis_tree in possible_trees:
3878
basis_inv = basis_tree.inventory
3879
for file_key in list(texts_possibly_new_in_tree):
3880
file_id, file_revision = file_key
3882
entry = basis_inv[file_id]
3883
except errors.NoSuchId:
3885
if entry.revision == file_revision:
3886
texts_possibly_new_in_tree.remove(file_key)
3887
text_keys.update(texts_possibly_new_in_tree)
3888
pending_revisions.append(revision)
3889
cache[current_revision_id] = tree
3890
basis_id = current_revision_id
3891
self.source._safe_to_return_from_cache = False
3893
from_texts = self.source.texts
3894
to_texts = self.target.texts
3895
if root_keys_to_create:
3896
root_stream = _mod_fetch._new_root_data_stream(
3897
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3898
self.source, graph=a_graph)
3899
to_texts.insert_record_stream(root_stream)
3900
to_texts.insert_record_stream(from_texts.get_record_stream(
3901
text_keys, self.target._format._fetch_order,
3902
not self.target._format._fetch_uses_deltas))
3903
# insert inventory deltas
3904
for delta in pending_deltas:
3905
self.target.add_inventory_by_delta(*delta)
3906
if self.target._fallback_repositories:
3907
# Make sure this stacked repository has all the parent inventories
3908
# for the new revisions that we are about to insert. We do this
3909
# before adding the revisions so that no revision is added until
3910
# all the inventories it may depend on are added.
3911
# Note that this is overzealous, as we may have fetched these in an
3914
revision_ids = set()
3915
for revision in pending_revisions:
3916
revision_ids.add(revision.revision_id)
3917
parent_ids.update(revision.parent_ids)
3918
parent_ids.difference_update(revision_ids)
3919
parent_ids.discard(_mod_revision.NULL_REVISION)
3920
parent_map = self.source.get_parent_map(parent_ids)
3921
# we iterate over parent_map and not parent_ids because we don't
3922
# want to try copying any revision which is a ghost
3923
for parent_tree in self.source.revision_trees(parent_map):
3924
current_revision_id = parent_tree.get_revision_id()
3925
parents_parents = parent_map[current_revision_id]
3926
possible_trees = self._get_trees(parents_parents, cache)
3927
if len(possible_trees) == 0:
3928
# There either aren't any parents, or the parents are
3929
# ghosts, so just use the last converted tree.
3930
possible_trees.append((basis_id, cache[basis_id]))
3931
basis_id, delta = self._get_delta_for_revision(parent_tree,
3932
parents_parents, possible_trees)
3933
self.target.add_inventory_by_delta(
3934
basis_id, delta, current_revision_id, parents_parents)
3935
# insert signatures and revisions
3936
for revision in pending_revisions:
3938
signature = self.source.get_signature_text(
3939
revision.revision_id)
3940
self.target.add_signature_text(revision.revision_id,
3942
except errors.NoSuchRevision:
3944
self.target.add_revision(revision.revision_id, revision)
3947
def _fetch_all_revisions(self, revision_ids, pb):
3948
"""Fetch everything for the list of revisions.
3950
:param revision_ids: The list of revisions to fetch. Must be in
3952
:param pb: A ProgressTask
3955
basis_id, basis_tree = self._get_basis(revision_ids[0])
3957
cache = lru_cache.LRUCache(100)
3958
cache[basis_id] = basis_tree
3959
del basis_tree # We don't want to hang on to it here
3961
if self._converting_to_rich_root and len(revision_ids) > 100:
3962
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3967
for offset in range(0, len(revision_ids), batch_size):
3968
self.target.start_write_group()
3970
pb.update('Transferring revisions', offset,
3972
batch = revision_ids[offset:offset+batch_size]
3973
basis_id = self._fetch_batch(batch, basis_id, cache,
3976
self.source._safe_to_return_from_cache = False
3977
self.target.abort_write_group()
3980
hint = self.target.commit_write_group()
3983
if hints and self.target._format.pack_compresses:
3984
self.target.pack(hint=hints)
3985
pb.update('Transferring revisions', len(revision_ids),
3988
2849
@needs_write_lock
3989
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
2850
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3991
2851
"""See InterRepository.fetch()."""
3992
if fetch_spec is not None:
3993
raise AssertionError("Not implemented yet...")
3994
ui.ui_factory.warn_experimental_format_fetch(self)
3995
if (not self.source.supports_rich_root()
3996
and self.target.supports_rich_root()):
3997
self._converting_to_rich_root = True
3998
self._revision_id_to_root_id = {}
4000
self._converting_to_rich_root = False
4001
# See <https://launchpad.net/bugs/456077> asking for a warning here
4002
if self.source._format.network_name() != self.target._format.network_name():
4003
ui.ui_factory.show_user_warning('cross_format_fetch',
4004
from_format=self.source._format,
4005
to_format=self.target._format)
4006
2852
revision_ids = self.target.search_missing_revision_ids(self.source,
4007
2853
revision_id, find_ghosts=find_ghosts).get_keys()
4008
if not revision_ids:
4010
2854
revision_ids = tsort.topo_sort(
4011
2855
self.source.get_graph().get_parent_map(revision_ids))
4012
if not revision_ids:
4014
# Walk though all revisions; get inventory deltas, copy referenced
4015
# texts that delta references, insert the delta, revision and
2856
def revisions_iterator():
2857
for current_revision_id in revision_ids:
2858
revision = self.source.get_revision(current_revision_id)
2859
tree = self.source.revision_tree(current_revision_id)
2861
signature = self.source.get_signature_text(
2862
current_revision_id)
2863
except errors.NoSuchRevision:
2865
yield revision, tree, signature
4018
2867
my_pb = ui.ui_factory.nested_progress_bar()
4021
symbol_versioning.warn(
4022
symbol_versioning.deprecated_in((1, 14, 0))
4023
% "pb parameter to fetch()")
4026
self._fetch_all_revisions(revision_ids, pb)
2872
install_revisions(self.target, revisions_iterator(),
2873
len(revision_ids), pb)
4028
2875
if my_pb is not None:
4029
2876
my_pb.finished()
4030
2877
return len(revision_ids), 0
4032
def _get_basis(self, first_revision_id):
4033
"""Get a revision and tree which exists in the target.
4035
This assumes that first_revision_id is selected for transmission
4036
because all other ancestors are already present. If we can't find an
4037
ancestor we fall back to NULL_REVISION since we know that is safe.
4039
:return: (basis_id, basis_tree)
4041
first_rev = self.source.get_revision(first_revision_id)
4043
basis_id = first_rev.parent_ids[0]
4044
# only valid as a basis if the target has it
4045
self.target.get_revision(basis_id)
4046
# Try to get a basis tree - if its a ghost it will hit the
4047
# NoSuchRevision case.
4048
basis_tree = self.source.revision_tree(basis_id)
4049
except (IndexError, errors.NoSuchRevision):
4050
basis_id = _mod_revision.NULL_REVISION
4051
basis_tree = self.source.revision_tree(basis_id)
4052
return basis_id, basis_tree
2880
class InterOtherToRemote(InterRepository):
2882
def __init__(self, source, target):
2883
InterRepository.__init__(self, source, target)
2884
self._real_inter = None
2887
def is_compatible(source, target):
2888
if isinstance(target, remote.RemoteRepository):
2892
def _ensure_real_inter(self):
2893
if self._real_inter is None:
2894
self.target._ensure_real()
2895
real_target = self.target._real_repository
2896
self._real_inter = InterRepository.get(self.source, real_target)
2898
def copy_content(self, revision_id=None):
2899
self._ensure_real_inter()
2900
self._real_inter.copy_content(revision_id=revision_id)
2902
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2903
self._ensure_real_inter()
2904
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
2905
find_ghosts=find_ghosts)
2908
def _get_repo_format_to_test(self):
2912
class InterRemoteToOther(InterRepository):
2914
def __init__(self, source, target):
2915
InterRepository.__init__(self, source, target)
2916
self._real_inter = None
2919
def is_compatible(source, target):
2920
if not isinstance(source, remote.RemoteRepository):
2922
# Is source's model compatible with target's model?
2923
source._ensure_real()
2924
real_source = source._real_repository
2925
if isinstance(real_source, remote.RemoteRepository):
2926
raise NotImplementedError(
2927
"We don't support remote repos backed by remote repos yet.")
2928
return InterRepository._same_model(real_source, target)
2930
def _ensure_real_inter(self):
2931
if self._real_inter is None:
2932
self.source._ensure_real()
2933
real_source = self.source._real_repository
2934
self._real_inter = InterRepository.get(real_source, self.target)
2936
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2937
self._ensure_real_inter()
2938
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
2939
find_ghosts=find_ghosts)
2941
def copy_content(self, revision_id=None):
2942
self._ensure_real_inter()
2943
self._real_inter.copy_content(revision_id=revision_id)
2946
def _get_repo_format_to_test(self):
4055
2951
InterRepository.register_optimiser(InterDifferingSerializer)
4056
2952
InterRepository.register_optimiser(InterSameDataRepository)
4057
2953
InterRepository.register_optimiser(InterWeaveRepo)
4058
2954
InterRepository.register_optimiser(InterKnitRepo)
2955
InterRepository.register_optimiser(InterModel1and2)
2956
InterRepository.register_optimiser(InterKnit1and2)
2957
InterRepository.register_optimiser(InterPackRepo)
2958
InterRepository.register_optimiser(InterOtherToRemote)
2959
InterRepository.register_optimiser(InterRemoteToOther)
4061
2962
class CopyConverter(object):
4062
2963
"""A repository conversion tool which just performs a copy of the content.
4064
2965
This is slow but quite reliable.
4217
3113
revision_graph[key] = tuple(parent for parent in parents if parent
4218
3114
in revision_graph)
4219
3115
return revision_graph
4222
class StreamSink(object):
4223
"""An object that can insert a stream into a repository.
4225
This interface handles the complexity of reserialising inventories and
4226
revisions from different formats, and allows unidirectional insertion into
4227
stacked repositories without looking for the missing basis parents
4231
def __init__(self, target_repo):
4232
self.target_repo = target_repo
4234
def insert_stream(self, stream, src_format, resume_tokens):
4235
"""Insert a stream's content into the target repository.
4237
:param src_format: a bzr repository format.
4239
:return: a list of resume tokens and an iterable of keys additional
4240
items required before the insertion can be completed.
4242
self.target_repo.lock_write()
4245
self.target_repo.resume_write_group(resume_tokens)
4248
self.target_repo.start_write_group()
4251
# locked_insert_stream performs a commit|suspend.
4252
return self._locked_insert_stream(stream, src_format, is_resume)
4254
self.target_repo.abort_write_group(suppress_errors=True)
4257
self.target_repo.unlock()
4259
def _locked_insert_stream(self, stream, src_format, is_resume):
4260
to_serializer = self.target_repo._format._serializer
4261
src_serializer = src_format._serializer
4263
if to_serializer == src_serializer:
4264
# If serializers match and the target is a pack repository, set the
4265
# write cache size on the new pack. This avoids poor performance
4266
# on transports where append is unbuffered (such as
4267
# RemoteTransport). This is safe to do because nothing should read
4268
# back from the target repository while a stream with matching
4269
# serialization is being inserted.
4270
# The exception is that a delta record from the source that should
4271
# be a fulltext may need to be expanded by the target (see
4272
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4273
# explicitly flush any buffered writes first in that rare case.
4275
new_pack = self.target_repo._pack_collection._new_pack
4276
except AttributeError:
4277
# Not a pack repository
4280
new_pack.set_write_cache_size(1024*1024)
4281
for substream_type, substream in stream:
4282
if 'stream' in debug.debug_flags:
4283
mutter('inserting substream: %s', substream_type)
4284
if substream_type == 'texts':
4285
self.target_repo.texts.insert_record_stream(substream)
4286
elif substream_type == 'inventories':
4287
if src_serializer == to_serializer:
4288
self.target_repo.inventories.insert_record_stream(
4291
self._extract_and_insert_inventories(
4292
substream, src_serializer)
4293
elif substream_type == 'inventory-deltas':
4294
self._extract_and_insert_inventory_deltas(
4295
substream, src_serializer)
4296
elif substream_type == 'chk_bytes':
4297
# XXX: This doesn't support conversions, as it assumes the
4298
# conversion was done in the fetch code.
4299
self.target_repo.chk_bytes.insert_record_stream(substream)
4300
elif substream_type == 'revisions':
4301
# This may fallback to extract-and-insert more often than
4302
# required if the serializers are different only in terms of
4304
if src_serializer == to_serializer:
4305
self.target_repo.revisions.insert_record_stream(
4308
self._extract_and_insert_revisions(substream,
4310
elif substream_type == 'signatures':
4311
self.target_repo.signatures.insert_record_stream(substream)
4313
raise AssertionError('kaboom! %s' % (substream_type,))
4314
# Done inserting data, and the missing_keys calculations will try to
4315
# read back from the inserted data, so flush the writes to the new pack
4316
# (if this is pack format).
4317
if new_pack is not None:
4318
new_pack._write_data('', flush=True)
4319
# Find all the new revisions (including ones from resume_tokens)
4320
missing_keys = self.target_repo.get_missing_parent_inventories(
4321
check_for_missing_texts=is_resume)
4323
for prefix, versioned_file in (
4324
('texts', self.target_repo.texts),
4325
('inventories', self.target_repo.inventories),
4326
('revisions', self.target_repo.revisions),
4327
('signatures', self.target_repo.signatures),
4328
('chk_bytes', self.target_repo.chk_bytes),
4330
if versioned_file is None:
4332
# TODO: key is often going to be a StaticTuple object
4333
# I don't believe we can define a method by which
4334
# (prefix,) + StaticTuple will work, though we could
4335
# define a StaticTuple.sq_concat that would allow you to
4336
# pass in either a tuple or a StaticTuple as the second
4337
# object, so instead we could have:
4338
# StaticTuple(prefix) + key here...
4339
missing_keys.update((prefix,) + key for key in
4340
versioned_file.get_missing_compression_parent_keys())
4341
except NotImplementedError:
4342
# cannot even attempt suspending, and missing would have failed
4343
# during stream insertion.
4344
missing_keys = set()
4347
# suspend the write group and tell the caller what we is
4348
# missing. We know we can suspend or else we would not have
4349
# entered this code path. (All repositories that can handle
4350
# missing keys can handle suspending a write group).
4351
write_group_tokens = self.target_repo.suspend_write_group()
4352
return write_group_tokens, missing_keys
4353
hint = self.target_repo.commit_write_group()
4354
if (to_serializer != src_serializer and
4355
self.target_repo._format.pack_compresses):
4356
self.target_repo.pack(hint=hint)
4359
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4360
target_rich_root = self.target_repo._format.rich_root_data
4361
target_tree_refs = self.target_repo._format.supports_tree_reference
4362
for record in substream:
4363
# Insert the delta directly
4364
inventory_delta_bytes = record.get_bytes_as('fulltext')
4365
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4367
parse_result = deserialiser.parse_text_bytes(
4368
inventory_delta_bytes)
4369
except inventory_delta.IncompatibleInventoryDelta, err:
4370
trace.mutter("Incompatible delta: %s", err.msg)
4371
raise errors.IncompatibleRevision(self.target_repo._format)
4372
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4373
revision_id = new_id
4374
parents = [key[0] for key in record.parents]
4375
self.target_repo.add_inventory_by_delta(
4376
basis_id, inv_delta, revision_id, parents)
4378
def _extract_and_insert_inventories(self, substream, serializer,
4380
"""Generate a new inventory versionedfile in target, converting data.
4382
The inventory is retrieved from the source, (deserializing it), and
4383
stored in the target (reserializing it in a different format).
4385
target_rich_root = self.target_repo._format.rich_root_data
4386
target_tree_refs = self.target_repo._format.supports_tree_reference
4387
for record in substream:
4388
# It's not a delta, so it must be a fulltext in the source
4389
# serializer's format.
4390
bytes = record.get_bytes_as('fulltext')
4391
revision_id = record.key[0]
4392
inv = serializer.read_inventory_from_string(bytes, revision_id)
4393
parents = [key[0] for key in record.parents]
4394
self.target_repo.add_inventory(revision_id, inv, parents)
4395
# No need to keep holding this full inv in memory when the rest of
4396
# the substream is likely to be all deltas.
4399
def _extract_and_insert_revisions(self, substream, serializer):
4400
for record in substream:
4401
bytes = record.get_bytes_as('fulltext')
4402
revision_id = record.key[0]
4403
rev = serializer.read_revision_from_string(bytes)
4404
if rev.revision_id != revision_id:
4405
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4406
self.target_repo.add_revision(revision_id, rev)
4409
if self.target_repo._format._fetch_reconcile:
4410
self.target_repo.reconcile()
4413
class StreamSource(object):
4414
"""A source of a stream for fetching between repositories."""
4416
def __init__(self, from_repository, to_format):
4417
"""Create a StreamSource streaming from from_repository."""
4418
self.from_repository = from_repository
4419
self.to_format = to_format
4421
def delta_on_metadata(self):
4422
"""Return True if delta's are permitted on metadata streams.
4424
That is on revisions and signatures.
4426
src_serializer = self.from_repository._format._serializer
4427
target_serializer = self.to_format._serializer
4428
return (self.to_format._fetch_uses_deltas and
4429
src_serializer == target_serializer)
4431
def _fetch_revision_texts(self, revs):
4432
# fetch signatures first and then the revision texts
4433
# may need to be a InterRevisionStore call here.
4434
from_sf = self.from_repository.signatures
4435
# A missing signature is just skipped.
4436
keys = [(rev_id,) for rev_id in revs]
4437
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4439
self.to_format._fetch_order,
4440
not self.to_format._fetch_uses_deltas))
4441
# If a revision has a delta, this is actually expanded inside the
4442
# insert_record_stream code now, which is an alternate fix for
4444
from_rf = self.from_repository.revisions
4445
revisions = from_rf.get_record_stream(
4447
self.to_format._fetch_order,
4448
not self.delta_on_metadata())
4449
return [('signatures', signatures), ('revisions', revisions)]
4451
def _generate_root_texts(self, revs):
4452
"""This will be called by get_stream between fetching weave texts and
4453
fetching the inventory weave.
4455
if self._rich_root_upgrade():
4456
return _mod_fetch.Inter1and2Helper(
4457
self.from_repository).generate_root_texts(revs)
4461
def get_stream(self, search):
4463
revs = search.get_keys()
4464
graph = self.from_repository.get_graph()
4465
revs = tsort.topo_sort(graph.get_parent_map(revs))
4466
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4468
for knit_kind, file_id, revisions in data_to_fetch:
4469
if knit_kind != phase:
4471
# Make a new progress bar for this phase
4472
if knit_kind == "file":
4473
# Accumulate file texts
4474
text_keys.extend([(file_id, revision) for revision in
4476
elif knit_kind == "inventory":
4477
# Now copy the file texts.
4478
from_texts = self.from_repository.texts
4479
yield ('texts', from_texts.get_record_stream(
4480
text_keys, self.to_format._fetch_order,
4481
not self.to_format._fetch_uses_deltas))
4482
# Cause an error if a text occurs after we have done the
4485
# Before we process the inventory we generate the root
4486
# texts (if necessary) so that the inventories references
4488
for _ in self._generate_root_texts(revs):
4490
# we fetch only the referenced inventories because we do not
4491
# know for unselected inventories whether all their required
4492
# texts are present in the other repository - it could be
4494
for info in self._get_inventory_stream(revs):
4496
elif knit_kind == "signatures":
4497
# Nothing to do here; this will be taken care of when
4498
# _fetch_revision_texts happens.
4500
elif knit_kind == "revisions":
4501
for record in self._fetch_revision_texts(revs):
4504
raise AssertionError("Unknown knit kind %r" % knit_kind)
4506
def get_stream_for_missing_keys(self, missing_keys):
4507
# missing keys can only occur when we are byte copying and not
4508
# translating (because translation means we don't send
4509
# unreconstructable deltas ever).
4511
keys['texts'] = set()
4512
keys['revisions'] = set()
4513
keys['inventories'] = set()
4514
keys['chk_bytes'] = set()
4515
keys['signatures'] = set()
4516
for key in missing_keys:
4517
keys[key[0]].add(key[1:])
4518
if len(keys['revisions']):
4519
# If we allowed copying revisions at this point, we could end up
4520
# copying a revision without copying its required texts: a
4521
# violation of the requirements for repository integrity.
4522
raise AssertionError(
4523
'cannot copy revisions to fill in missing deltas %s' % (
4524
keys['revisions'],))
4525
for substream_kind, keys in keys.iteritems():
4526
vf = getattr(self.from_repository, substream_kind)
4527
if vf is None and keys:
4528
raise AssertionError(
4529
"cannot fill in keys for a versioned file we don't"
4530
" have: %s needs %s" % (substream_kind, keys))
4532
# No need to stream something we don't have
4534
if substream_kind == 'inventories':
4535
# Some missing keys are genuinely ghosts, filter those out.
4536
present = self.from_repository.inventories.get_parent_map(keys)
4537
revs = [key[0] for key in present]
4538
# Get the inventory stream more-or-less as we do for the
4539
# original stream; there's no reason to assume that records
4540
# direct from the source will be suitable for the sink. (Think
4541
# e.g. 2a -> 1.9-rich-root).
4542
for info in self._get_inventory_stream(revs, missing=True):
4546
# Ask for full texts always so that we don't need more round trips
4547
# after this stream.
4548
# Some of the missing keys are genuinely ghosts, so filter absent
4549
# records. The Sink is responsible for doing another check to
4550
# ensure that ghosts don't introduce missing data for future
4552
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4553
self.to_format._fetch_order, True))
4554
yield substream_kind, stream
4556
def inventory_fetch_order(self):
4557
if self._rich_root_upgrade():
4558
return 'topological'
4560
return self.to_format._fetch_order
4562
def _rich_root_upgrade(self):
4563
return (not self.from_repository._format.rich_root_data and
4564
self.to_format.rich_root_data)
4566
def _get_inventory_stream(self, revision_ids, missing=False):
4567
from_format = self.from_repository._format
4568
if (from_format.supports_chks and self.to_format.supports_chks and
4569
from_format.network_name() == self.to_format.network_name()):
4570
raise AssertionError(
4571
"this case should be handled by GroupCHKStreamSource")
4572
elif 'forceinvdeltas' in debug.debug_flags:
4573
return self._get_convertable_inventory_stream(revision_ids,
4574
delta_versus_null=missing)
4575
elif from_format.network_name() == self.to_format.network_name():
4577
return self._get_simple_inventory_stream(revision_ids,
4579
elif (not from_format.supports_chks and not self.to_format.supports_chks
4580
and from_format._serializer == self.to_format._serializer):
4581
# Essentially the same format.
4582
return self._get_simple_inventory_stream(revision_ids,
4585
# Any time we switch serializations, we want to use an
4586
# inventory-delta based approach.
4587
return self._get_convertable_inventory_stream(revision_ids,
4588
delta_versus_null=missing)
4590
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4591
# NB: This currently reopens the inventory weave in source;
4592
# using a single stream interface instead would avoid this.
4593
from_weave = self.from_repository.inventories
4595
delta_closure = True
4597
delta_closure = not self.delta_on_metadata()
4598
yield ('inventories', from_weave.get_record_stream(
4599
[(rev_id,) for rev_id in revision_ids],
4600
self.inventory_fetch_order(), delta_closure))
4602
def _get_convertable_inventory_stream(self, revision_ids,
4603
delta_versus_null=False):
4604
# The two formats are sufficiently different that there is no fast
4605
# path, so we need to send just inventorydeltas, which any
4606
# sufficiently modern client can insert into any repository.
4607
# The StreamSink code expects to be able to
4608
# convert on the target, so we need to put bytes-on-the-wire that can
4609
# be converted. That means inventory deltas (if the remote is <1.19,
4610
# RemoteStreamSink will fallback to VFS to insert the deltas).
4611
yield ('inventory-deltas',
4612
self._stream_invs_as_deltas(revision_ids,
4613
delta_versus_null=delta_versus_null))
4615
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4616
"""Return a stream of inventory-deltas for the given rev ids.
4618
:param revision_ids: The list of inventories to transmit
4619
:param delta_versus_null: Don't try to find a minimal delta for this
4620
entry, instead compute the delta versus the NULL_REVISION. This
4621
effectively streams a complete inventory. Used for stuff like
4622
filling in missing parents, etc.
4624
from_repo = self.from_repository
4625
revision_keys = [(rev_id,) for rev_id in revision_ids]
4626
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4627
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4629
inventories = self.from_repository.iter_inventories(
4630
revision_ids, 'topological')
4631
format = from_repo._format
4632
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4633
inventory_cache = lru_cache.LRUCache(50)
4634
null_inventory = from_repo.revision_tree(
4635
_mod_revision.NULL_REVISION).inventory
4636
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4637
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4638
# repo back into a non-rich-root repo ought to be allowed)
4639
serializer = inventory_delta.InventoryDeltaSerializer(
4640
versioned_root=format.rich_root_data,
4641
tree_references=format.supports_tree_reference)
4642
for inv in inventories:
4643
key = (inv.revision_id,)
4644
parent_keys = parent_map.get(key, ())
4646
if not delta_versus_null and parent_keys:
4647
# The caller did not ask for complete inventories and we have
4648
# some parents that we can delta against. Make a delta against
4649
# each parent so that we can find the smallest.
4650
parent_ids = [parent_key[0] for parent_key in parent_keys]
4651
for parent_id in parent_ids:
4652
if parent_id not in invs_sent_so_far:
4653
# We don't know that the remote side has this basis, so
4656
if parent_id == _mod_revision.NULL_REVISION:
4657
parent_inv = null_inventory
4659
parent_inv = inventory_cache.get(parent_id, None)
4660
if parent_inv is None:
4661
parent_inv = from_repo.get_inventory(parent_id)
4662
candidate_delta = inv._make_delta(parent_inv)
4663
if (delta is None or
4664
len(delta) > len(candidate_delta)):
4665
delta = candidate_delta
4666
basis_id = parent_id
4668
# Either none of the parents ended up being suitable, or we
4669
# were asked to delta against NULL
4670
basis_id = _mod_revision.NULL_REVISION
4671
delta = inv._make_delta(null_inventory)
4672
invs_sent_so_far.add(inv.revision_id)
4673
inventory_cache[inv.revision_id] = inv
4674
delta_serialized = ''.join(
4675
serializer.delta_to_lines(basis_id, key[-1], delta))
4676
yield versionedfile.FulltextContentFactory(
4677
key, parent_keys, None, delta_serialized)
4680
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4681
stop_revision=None):
4682
"""Extend the partial history to include a given index
4684
If a stop_index is supplied, stop when that index has been reached.
4685
If a stop_revision is supplied, stop when that revision is
4686
encountered. Otherwise, stop when the beginning of history is
4689
:param stop_index: The index which should be present. When it is
4690
present, history extension will stop.
4691
:param stop_revision: The revision id which should be present. When
4692
it is encountered, history extension will stop.
4694
start_revision = partial_history_cache[-1]
4695
iterator = repo.iter_reverse_revision_history(start_revision)
4697
#skip the last revision in the list
4700
if (stop_index is not None and
4701
len(partial_history_cache) > stop_index):
4703
if partial_history_cache[-1] == stop_revision:
4705
revision_id = iterator.next()
4706
partial_history_cache.append(revision_id)
4707
except StopIteration: