548
436
ie.revision = parent_entry.revision
549
437
return self._get_delta(ie, basis_inv, path), False, None
550
438
ie.reference_revision = content_summary[3]
551
if ie.reference_revision is None:
552
raise AssertionError("invalid content_summary for nested tree: %r"
553
% (content_summary,))
554
self._add_text_to_weave(ie.file_id, '', heads, None)
440
self._add_text_to_weave(ie.file_id, lines, heads, None)
556
442
raise NotImplementedError('unknown kind')
557
443
ie.revision = self._new_revision_id
558
self._any_changes = True
559
444
return self._get_delta(ie, basis_inv, path), True, fingerprint
561
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
562
_entry_factory=entry_factory):
563
"""Record a new tree via iter_changes.
565
:param tree: The tree to obtain text contents from for changed objects.
566
:param basis_revision_id: The revision id of the tree the iter_changes
567
has been generated against. Currently assumed to be the same
568
as self.parents[0] - if it is not, errors may occur.
569
:param iter_changes: An iter_changes iterator with the changes to apply
570
to basis_revision_id. The iterator must not include any items with
571
a current kind of None - missing items must be either filtered out
572
or errored-on beefore record_iter_changes sees the item.
573
:param _entry_factory: Private method to bind entry_factory locally for
575
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
578
# Create an inventory delta based on deltas between all the parents and
579
# deltas between all the parent inventories. We use inventory delta's
580
# between the inventory objects because iter_changes masks
581
# last-changed-field only changes.
583
# file_id -> change map, change is fileid, paths, changed, versioneds,
584
# parents, names, kinds, executables
586
# {file_id -> revision_id -> inventory entry, for entries in parent
587
# trees that are not parents[0]
591
revtrees = list(self.repository.revision_trees(self.parents))
592
except errors.NoSuchRevision:
593
# one or more ghosts, slow path.
595
for revision_id in self.parents:
597
revtrees.append(self.repository.revision_tree(revision_id))
598
except errors.NoSuchRevision:
600
basis_revision_id = _mod_revision.NULL_REVISION
602
revtrees.append(self.repository.revision_tree(
603
_mod_revision.NULL_REVISION))
604
# The basis inventory from a repository
606
basis_inv = revtrees[0].inventory
608
basis_inv = self.repository.revision_tree(
609
_mod_revision.NULL_REVISION).inventory
610
if len(self.parents) > 0:
611
if basis_revision_id != self.parents[0] and not ghost_basis:
613
"arbitrary basis parents not yet supported with merges")
614
for revtree in revtrees[1:]:
615
for change in revtree.inventory._make_delta(basis_inv):
616
if change[1] is None:
617
# Not present in this parent.
619
if change[2] not in merged_ids:
620
if change[0] is not None:
621
basis_entry = basis_inv[change[2]]
622
merged_ids[change[2]] = [
624
basis_entry.revision,
627
parent_entries[change[2]] = {
629
basis_entry.revision:basis_entry,
631
change[3].revision:change[3],
634
merged_ids[change[2]] = [change[3].revision]
635
parent_entries[change[2]] = {change[3].revision:change[3]}
637
merged_ids[change[2]].append(change[3].revision)
638
parent_entries[change[2]][change[3].revision] = change[3]
641
# Setup the changes from the tree:
642
# changes maps file_id -> (change, [parent revision_ids])
644
for change in iter_changes:
645
# This probably looks up in basis_inv way to much.
646
if change[1][0] is not None:
647
head_candidate = [basis_inv[change[0]].revision]
650
changes[change[0]] = change, merged_ids.get(change[0],
652
unchanged_merged = set(merged_ids) - set(changes)
653
# Extend the changes dict with synthetic changes to record merges of
655
for file_id in unchanged_merged:
656
# Record a merged version of these items that did not change vs the
657
# basis. This can be either identical parallel changes, or a revert
658
# of a specific file after a merge. The recorded content will be
659
# that of the current tree (which is the same as the basis), but
660
# the per-file graph will reflect a merge.
661
# NB:XXX: We are reconstructing path information we had, this
662
# should be preserved instead.
663
# inv delta change: (file_id, (path_in_source, path_in_target),
664
# changed_content, versioned, parent, name, kind,
667
basis_entry = basis_inv[file_id]
668
except errors.NoSuchId:
669
# a change from basis->some_parents but file_id isn't in basis
670
# so was new in the merge, which means it must have changed
671
# from basis -> current, and as it hasn't the add was reverted
672
# by the user. So we discard this change.
676
(basis_inv.id2path(file_id), tree.id2path(file_id)),
678
(basis_entry.parent_id, basis_entry.parent_id),
679
(basis_entry.name, basis_entry.name),
680
(basis_entry.kind, basis_entry.kind),
681
(basis_entry.executable, basis_entry.executable))
682
changes[file_id] = (change, merged_ids[file_id])
683
# changes contains tuples with the change and a set of inventory
684
# candidates for the file.
686
# old_path, new_path, file_id, new_inventory_entry
687
seen_root = False # Is the root in the basis delta?
688
inv_delta = self._basis_delta
689
modified_rev = self._new_revision_id
690
for change, head_candidates in changes.values():
691
if change[3][1]: # versioned in target.
692
# Several things may be happening here:
693
# We may have a fork in the per-file graph
694
# - record a change with the content from tree
695
# We may have a change against < all trees
696
# - carry over the tree that hasn't changed
697
# We may have a change against all trees
698
# - record the change with the content from tree
701
entry = _entry_factory[kind](file_id, change[5][1],
703
head_set = self._heads(change[0], set(head_candidates))
706
for head_candidate in head_candidates:
707
if head_candidate in head_set:
708
heads.append(head_candidate)
709
head_set.remove(head_candidate)
712
# Could be a carry-over situation:
713
parent_entry_revs = parent_entries.get(file_id, None)
714
if parent_entry_revs:
715
parent_entry = parent_entry_revs.get(heads[0], None)
718
if parent_entry is None:
719
# The parent iter_changes was called against is the one
720
# that is the per-file head, so any change is relevant
721
# iter_changes is valid.
722
carry_over_possible = False
724
# could be a carry over situation
725
# A change against the basis may just indicate a merge,
726
# we need to check the content against the source of the
727
# merge to determine if it was changed after the merge
729
if (parent_entry.kind != entry.kind or
730
parent_entry.parent_id != entry.parent_id or
731
parent_entry.name != entry.name):
732
# Metadata common to all entries has changed
733
# against per-file parent
734
carry_over_possible = False
736
carry_over_possible = True
737
# per-type checks for changes against the parent_entry
740
# Cannot be a carry-over situation
741
carry_over_possible = False
742
# Populate the entry in the delta
744
# XXX: There is still a small race here: If someone reverts the content of a file
745
# after iter_changes examines and decides it has changed,
746
# we will unconditionally record a new version even if some
747
# other process reverts it while commit is running (with
748
# the revert happening after iter_changes did it's
751
entry.executable = True
753
entry.executable = False
754
if (carry_over_possible and
755
parent_entry.executable == entry.executable):
756
# Check the file length, content hash after reading
758
nostore_sha = parent_entry.text_sha1
761
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
763
text = file_obj.read()
767
entry.text_sha1, entry.text_size = self._add_text_to_weave(
768
file_id, text, heads, nostore_sha)
769
yield file_id, change[1][1], (entry.text_sha1, stat_value)
770
except errors.ExistingContent:
771
# No content change against a carry_over parent
772
# Perhaps this should also yield a fs hash update?
774
entry.text_size = parent_entry.text_size
775
entry.text_sha1 = parent_entry.text_sha1
776
elif kind == 'symlink':
778
entry.symlink_target = tree.get_symlink_target(file_id)
779
if (carry_over_possible and
780
parent_entry.symlink_target == entry.symlink_target):
783
self._add_text_to_weave(change[0], '', heads, None)
784
elif kind == 'directory':
785
if carry_over_possible:
788
# Nothing to set on the entry.
789
# XXX: split into the Root and nonRoot versions.
790
if change[1][1] != '' or self.repository.supports_rich_root():
791
self._add_text_to_weave(change[0], '', heads, None)
792
elif kind == 'tree-reference':
793
if not self.repository._format.supports_tree_reference:
794
# This isn't quite sane as an error, but we shouldn't
795
# ever see this code path in practice: tree's don't
796
# permit references when the repo doesn't support tree
798
raise errors.UnsupportedOperation(tree.add_reference,
800
reference_revision = tree.get_reference_revision(change[0])
801
entry.reference_revision = reference_revision
802
if (carry_over_possible and
803
parent_entry.reference_revision == reference_revision):
806
self._add_text_to_weave(change[0], '', heads, None)
808
raise AssertionError('unknown kind %r' % kind)
810
entry.revision = modified_rev
812
entry.revision = parent_entry.revision
815
new_path = change[1][1]
816
inv_delta.append((change[1][0], new_path, change[0], entry))
819
self.new_inventory = None
821
# This should perhaps be guarded by a check that the basis we
822
# commit against is the basis for the commit and if not do a delta
824
self._any_changes = True
826
# housekeeping root entry changes do not affect no-change commits.
827
self._require_root_change(tree)
828
self.basis_delta_revision = basis_revision_id
830
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
831
parent_keys = tuple([(file_id, parent) for parent in parents])
832
return self.repository.texts._add_text(
833
(file_id, self._new_revision_id), parent_keys, new_text,
834
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
446
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
447
# Note: as we read the content directly from the tree, we know its not
448
# been turned into unicode or badly split - but a broken tree
449
# implementation could give us bad output from readlines() so this is
450
# not a guarantee of safety. What would be better is always checking
451
# the content during test suite execution. RBC 20070912
452
parent_keys = tuple((file_id, parent) for parent in parents)
453
return self.repository.texts.add_lines(
454
(file_id, self._new_revision_id), parent_keys, new_lines,
455
nostore_sha=nostore_sha, random_id=self.random_revid,
456
check_content=False)[0:2]
837
459
class RootCommitBuilder(CommitBuilder):
838
460
"""This commitbuilder actually records the root id"""
840
462
# the root entry gets versioned properly by this builder.
841
463
_versioned_root = True
1181
701
# The old API returned a list, should this actually be a set?
1182
702
return parent_map.keys()
1184
def _check_inventories(self, checker):
1185
"""Check the inventories found from the revision scan.
1187
This is responsible for verifying the sha1 of inventories and
1188
creating a pending_keys set that covers data referenced by inventories.
1190
bar = ui.ui_factory.nested_progress_bar()
1192
self._do_check_inventories(checker, bar)
1196
def _do_check_inventories(self, checker, bar):
1197
"""Helper for _check_inventories."""
1199
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1200
kinds = ['chk_bytes', 'texts']
1201
count = len(checker.pending_keys)
1202
bar.update("inventories", 0, 2)
1203
current_keys = checker.pending_keys
1204
checker.pending_keys = {}
1205
# Accumulate current checks.
1206
for key in current_keys:
1207
if key[0] != 'inventories' and key[0] not in kinds:
1208
checker._report_items.append('unknown key type %r' % (key,))
1209
keys[key[0]].add(key[1:])
1210
if keys['inventories']:
1211
# NB: output order *should* be roughly sorted - topo or
1212
# inverse topo depending on repository - either way decent
1213
# to just delta against. However, pre-CHK formats didn't
1214
# try to optimise inventory layout on disk. As such the
1215
# pre-CHK code path does not use inventory deltas.
1217
for record in self.inventories.check(keys=keys['inventories']):
1218
if record.storage_kind == 'absent':
1219
checker._report_items.append(
1220
'Missing inventory {%s}' % (record.key,))
1222
last_object = self._check_record('inventories', record,
1223
checker, last_object,
1224
current_keys[('inventories',) + record.key])
1225
del keys['inventories']
1228
bar.update("texts", 1)
1229
while (checker.pending_keys or keys['chk_bytes']
1231
# Something to check.
1232
current_keys = checker.pending_keys
1233
checker.pending_keys = {}
1234
# Accumulate current checks.
1235
for key in current_keys:
1236
if key[0] not in kinds:
1237
checker._report_items.append('unknown key type %r' % (key,))
1238
keys[key[0]].add(key[1:])
1239
# Check the outermost kind only - inventories || chk_bytes || texts
1243
for record in getattr(self, kind).check(keys=keys[kind]):
1244
if record.storage_kind == 'absent':
1245
checker._report_items.append(
1246
'Missing %s {%s}' % (kind, record.key,))
1248
last_object = self._check_record(kind, record,
1249
checker, last_object, current_keys[(kind,) + record.key])
1253
def _check_record(self, kind, record, checker, last_object, item_data):
1254
"""Check a single text from this repository."""
1255
if kind == 'inventories':
1256
rev_id = record.key[0]
1257
inv = self._deserialise_inventory(rev_id,
1258
record.get_bytes_as('fulltext'))
1259
if last_object is not None:
1260
delta = inv._make_delta(last_object)
1261
for old_path, path, file_id, ie in delta:
1264
ie.check(checker, rev_id, inv)
1266
for path, ie in inv.iter_entries():
1267
ie.check(checker, rev_id, inv)
1268
if self._format.fast_deltas:
1270
elif kind == 'chk_bytes':
1271
# No code written to check chk_bytes for this repo format.
1272
checker._report_items.append(
1273
'unsupported key type chk_bytes for %s' % (record.key,))
1274
elif kind == 'texts':
1275
self._check_text(record, checker, item_data)
1277
checker._report_items.append(
1278
'unknown key type %s for %s' % (kind, record.key))
1280
def _check_text(self, record, checker, item_data):
1281
"""Check a single text."""
1282
# Check it is extractable.
1283
# TODO: check length.
1284
if record.storage_kind == 'chunked':
1285
chunks = record.get_bytes_as(record.storage_kind)
1286
sha1 = osutils.sha_strings(chunks)
1287
length = sum(map(len, chunks))
1289
content = record.get_bytes_as('fulltext')
1290
sha1 = osutils.sha_string(content)
1291
length = len(content)
1292
if item_data and sha1 != item_data[1]:
1293
checker._report_items.append(
1294
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1295
(record.key, sha1, item_data[1], item_data[2]))
1298
705
def create(a_bzrdir):
1299
706
"""Construct the current default format repository in a_bzrdir."""
1574
978
"""Commit the contents accrued within the current write group.
1576
980
:seealso: start_write_group.
1578
:return: it may return an opaque hint that can be passed to 'pack'.
1580
982
if self._write_group is not self.get_transaction():
1581
983
# has an unlock or relock occured ?
1582
984
raise errors.BzrError('mismatched lock context %r and '
1583
985
'write group %r.' %
1584
986
(self.get_transaction(), self._write_group))
1585
result = self._commit_write_group()
987
self._commit_write_group()
1586
988
self._write_group = None
1589
990
def _commit_write_group(self):
1590
991
"""Template method for per-repository write group cleanup.
1592
This is called before the write group is considered to be
993
This is called before the write group is considered to be
1593
994
finished and should ensure that all data handed to the repository
1594
for writing during the write group is safely committed (to the
995
for writing during the write group is safely committed (to the
1595
996
extent possible considering file system caching etc).
1598
def suspend_write_group(self):
1599
raise errors.UnsuspendableWriteGroup(self)
1601
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1602
"""Return the keys of missing inventory parents for revisions added in
1605
A revision is not complete if the inventory delta for that revision
1606
cannot be calculated. Therefore if the parent inventories of a
1607
revision are not present, the revision is incomplete, and e.g. cannot
1608
be streamed by a smart server. This method finds missing inventory
1609
parents for revisions added in this write group.
1611
if not self._format.supports_external_lookups:
1612
# This is only an issue for stacked repositories
1614
if not self.is_in_write_group():
1615
raise AssertionError('not in a write group')
1617
# XXX: We assume that every added revision already has its
1618
# corresponding inventory, so we only check for parent inventories that
1619
# might be missing, rather than all inventories.
1620
parents = set(self.revisions._index.get_missing_parents())
1621
parents.discard(_mod_revision.NULL_REVISION)
1622
unstacked_inventories = self.inventories._index
1623
present_inventories = unstacked_inventories.get_parent_map(
1624
key[-1:] for key in parents)
1625
parents.difference_update(present_inventories)
1626
if len(parents) == 0:
1627
# No missing parent inventories.
1629
if not check_for_missing_texts:
1630
return set(('inventories', rev_id) for (rev_id,) in parents)
1631
# Ok, now we have a list of missing inventories. But these only matter
1632
# if the inventories that reference them are missing some texts they
1633
# appear to introduce.
1634
# XXX: Texts referenced by all added inventories need to be present,
1635
# but at the moment we're only checking for texts referenced by
1636
# inventories at the graph's edge.
1637
key_deps = self.revisions._index._key_dependencies
1638
key_deps.satisfy_refs_for_keys(present_inventories)
1639
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1640
file_ids = self.fileids_altered_by_revision_ids(referrers)
1641
missing_texts = set()
1642
for file_id, version_ids in file_ids.iteritems():
1643
missing_texts.update(
1644
(file_id, version_id) for version_id in version_ids)
1645
present_texts = self.texts.get_parent_map(missing_texts)
1646
missing_texts.difference_update(present_texts)
1647
if not missing_texts:
1648
# No texts are missing, so all revisions and their deltas are
1651
# Alternatively the text versions could be returned as the missing
1652
# keys, but this is likely to be less data.
1653
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1656
def refresh_data(self):
1657
"""Re-read any data needed to to synchronise with disk.
1659
This method is intended to be called after another repository instance
1660
(such as one used by a smart server) has inserted data into the
1661
repository. It may not be called during a write group, but may be
1662
called at any other time.
1664
if self.is_in_write_group():
1665
raise errors.InternalBzrError(
1666
"May not refresh_data while in a write group.")
1667
self._refresh_data()
1669
def resume_write_group(self, tokens):
1670
if not self.is_write_locked():
1671
raise errors.NotWriteLocked(self)
1672
if self._write_group:
1673
raise errors.BzrError('already in a write group')
1674
self._resume_write_group(tokens)
1675
# so we can detect unlock/relock - the write group is now entered.
1676
self._write_group = self.get_transaction()
1678
def _resume_write_group(self, tokens):
1679
raise errors.UnsuspendableWriteGroup(self)
1681
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
999
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
1683
1000
"""Fetch the content required to construct revision_id from source.
1685
If revision_id is None and fetch_spec is None, then all content is
1688
fetch() may not be used when the repository is in a write group -
1689
either finish the current write group before using fetch, or use
1690
fetch before starting the write group.
1002
If revision_id is None all content is copied.
1692
1003
:param find_ghosts: Find and copy revisions in the source that are
1693
1004
ghosts in the target (and not reachable directly by walking out to
1694
1005
the first-present revision in target from revision_id).
1695
:param revision_id: If specified, all the content needed for this
1696
revision ID will be copied to the target. Fetch will determine for
1697
itself which content needs to be copied.
1698
:param fetch_spec: If specified, a SearchResult or
1699
PendingAncestryResult that describes which revisions to copy. This
1700
allows copying multiple heads at once. Mutually exclusive with
1703
if fetch_spec is not None and revision_id is not None:
1704
raise AssertionError(
1705
"fetch_spec and revision_id are mutually exclusive.")
1706
if self.is_in_write_group():
1707
raise errors.InternalBzrError(
1708
"May not fetch while in a write group.")
1709
1007
# fast path same-url fetch operations
1710
# TODO: lift out to somewhere common with RemoteRepository
1711
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1712
if (self.has_same_location(source)
1713
and fetch_spec is None
1714
and self._has_same_fallbacks(source)):
1008
if self.has_same_location(source):
1715
1009
# check that last_revision is in 'from' and then return a
1716
1010
# no-operation.
1717
1011
if (revision_id is not None and
1880
1159
@needs_read_lock
1881
1160
def get_revisions(self, revision_ids):
1882
"""Get many revisions at once.
1884
Repositories that need to check data on every revision read should
1885
subclass this method.
1161
"""Get many revisions at once."""
1887
1162
return self._get_revisions(revision_ids)
1889
1164
@needs_read_lock
1890
1165
def _get_revisions(self, revision_ids):
1891
1166
"""Core work logic to get many revisions without sanity checks."""
1167
for rev_id in revision_ids:
1168
if not rev_id or not isinstance(rev_id, basestring):
1169
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1170
keys = [(key,) for key in revision_ids]
1171
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1893
for revid, rev in self._iter_revisions(revision_ids):
1895
raise errors.NoSuchRevision(self, revid)
1173
for record in stream:
1174
if record.storage_kind == 'absent':
1175
raise errors.NoSuchRevision(self, record.key[0])
1176
text = record.get_bytes_as('fulltext')
1177
rev = self._serializer.read_revision_from_string(text)
1178
revs[record.key[0]] = rev
1897
1179
return [revs[revid] for revid in revision_ids]
1899
def _iter_revisions(self, revision_ids):
1900
"""Iterate over revision objects.
1902
:param revision_ids: An iterable of revisions to examine. None may be
1903
passed to request all revisions known to the repository. Note that
1904
not all repositories can find unreferenced revisions; for those
1905
repositories only referenced ones will be returned.
1906
:return: An iterator of (revid, revision) tuples. Absent revisions (
1907
those asked for but not available) are returned as (revid, None).
1909
if revision_ids is None:
1910
revision_ids = self.all_revision_ids()
1912
for rev_id in revision_ids:
1913
if not rev_id or not isinstance(rev_id, basestring):
1914
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1915
keys = [(key,) for key in revision_ids]
1916
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1917
for record in stream:
1918
revid = record.key[0]
1919
if record.storage_kind == 'absent':
1922
text = record.get_bytes_as('fulltext')
1923
rev = self._serializer.read_revision_from_string(text)
1926
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1182
def get_revision_xml(self, revision_id):
1183
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1184
# would have already do it.
1185
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1186
rev = self.get_revision(revision_id)
1187
rev_tmp = cStringIO.StringIO()
1188
# the current serializer..
1189
self._serializer.write_revision(rev, rev_tmp)
1191
return rev_tmp.getvalue()
1193
def get_deltas_for_revisions(self, revisions):
1927
1194
"""Produce a generator of revision deltas.
1929
1196
Note that the input is a sequence of REVISIONS, not revision_ids.
1930
1197
Trees will be held in memory until the generator exits.
1931
1198
Each delta is relative to the revision's lefthand predecessor.
1933
:param specific_fileids: if not None, the result is filtered
1934
so that only those file-ids, their parents and their
1935
children are included.
1937
# Get the revision-ids of interest
1938
1200
required_trees = set()
1939
1201
for revision in revisions:
1940
1202
required_trees.add(revision.revision_id)
1941
1203
required_trees.update(revision.parent_ids[:1])
1943
# Get the matching filtered trees. Note that it's more
1944
# efficient to pass filtered trees to changes_from() rather
1945
# than doing the filtering afterwards. changes_from() could
1946
# arguably do the filtering itself but it's path-based, not
1947
# file-id based, so filtering before or afterwards is
1949
if specific_fileids is None:
1950
trees = dict((t.get_revision_id(), t) for
1951
t in self.revision_trees(required_trees))
1953
trees = dict((t.get_revision_id(), t) for
1954
t in self._filtered_revision_trees(required_trees,
1957
# Calculate the deltas
1204
trees = dict((t.get_revision_id(), t) for
1205
t in self.revision_trees(required_trees))
1958
1206
for revision in revisions:
1959
1207
if not revision.parent_ids:
1960
1208
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
2367
1563
"""Get Inventory object by revision id."""
2368
1564
return self.iter_inventories([revision_id]).next()
2370
def iter_inventories(self, revision_ids, ordering=None):
1566
def iter_inventories(self, revision_ids):
2371
1567
"""Get many inventories by revision_ids.
2373
1569
This will buffer some or all of the texts used in constructing the
2374
1570
inventories in memory, but will only parse a single inventory at a
2377
:param revision_ids: The expected revision ids of the inventories.
2378
:param ordering: optional ordering, e.g. 'topological'. If not
2379
specified, the order of revision_ids will be preserved (by
2380
buffering if necessary).
2381
1573
:return: An iterator of inventories.
2383
1575
if ((None in revision_ids)
2384
1576
or (_mod_revision.NULL_REVISION in revision_ids)):
2385
1577
raise ValueError('cannot get null revision inventory')
2386
return self._iter_inventories(revision_ids, ordering)
1578
return self._iter_inventories(revision_ids)
2388
def _iter_inventories(self, revision_ids, ordering):
1580
def _iter_inventories(self, revision_ids):
2389
1581
"""single-document based inventory iteration."""
2390
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2391
for text, revision_id in inv_xmls:
2392
yield self._deserialise_inventory(revision_id, text)
1582
for text, revision_id in self._iter_inventory_xmls(revision_ids):
1583
yield self.deserialise_inventory(revision_id, text)
2394
def _iter_inventory_xmls(self, revision_ids, ordering):
2395
if ordering is None:
2396
order_as_requested = True
2397
ordering = 'unordered'
2399
order_as_requested = False
1585
def _iter_inventory_xmls(self, revision_ids):
2400
1586
keys = [(revision_id,) for revision_id in revision_ids]
2403
if order_as_requested:
2404
key_iter = iter(keys)
2405
next_key = key_iter.next()
2406
stream = self.inventories.get_record_stream(keys, ordering, True)
1587
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2408
1589
for record in stream:
2409
1590
if record.storage_kind != 'absent':
2410
chunks = record.get_bytes_as('chunked')
2411
if order_as_requested:
2412
text_chunks[record.key] = chunks
2414
yield ''.join(chunks), record.key[-1]
1591
texts[record.key] = record.get_bytes_as('fulltext')
2416
1593
raise errors.NoSuchRevision(self, record.key)
2417
if order_as_requested:
2418
# Yield as many results as we can while preserving order.
2419
while next_key in text_chunks:
2420
chunks = text_chunks.pop(next_key)
2421
yield ''.join(chunks), next_key[-1]
2423
next_key = key_iter.next()
2424
except StopIteration:
2425
# We still want to fully consume the get_record_stream,
2426
# just in case it is not actually finished at this point
1595
yield texts[key], key[-1]
2430
def _deserialise_inventory(self, revision_id, xml):
2431
"""Transform the xml into an inventory object.
1597
def deserialise_inventory(self, revision_id, xml):
1598
"""Transform the xml into an inventory object.
2433
1600
:param revision_id: The expected revision id of the inventory.
2434
1601
:param xml: A serialised inventory.
2436
result = self._serializer.read_inventory_from_string(xml, revision_id,
2437
entry_cache=self._inventory_entry_cache,
2438
return_from_cache=self._safe_to_return_from_cache)
1603
result = self._serializer.read_inventory_from_string(xml, revision_id)
2439
1604
if result.revision_id != revision_id:
2440
1605
raise AssertionError('revision id mismatch %s != %s' % (
2441
1606
result.revision_id, revision_id))
1609
def serialise_inventory(self, inv):
1610
return self._serializer.write_inventory_to_string(inv)
1612
def _serialise_inventory_to_lines(self, inv):
1613
return self._serializer.write_inventory_to_lines(inv)
2444
1615
def get_serializer_format(self):
2445
1616
return self._serializer.format_num
2447
1618
@needs_read_lock
2448
def _get_inventory_xml(self, revision_id):
2449
"""Get serialized inventory as a string."""
2450
texts = self._iter_inventory_xmls([revision_id], 'unordered')
1619
def get_inventory_xml(self, revision_id):
1620
"""Get inventory XML as a file object."""
1621
texts = self._iter_inventory_xmls([revision_id])
2452
1623
text, revision_id = texts.next()
2453
1624
except StopIteration:
2454
1625
raise errors.HistoryMissing(self, 'inventory', revision_id)
2457
def get_rev_id_for_revno(self, revno, known_pair):
2458
"""Return the revision id of a revno, given a later (revno, revid)
2459
pair in the same history.
2461
:return: if found (True, revid). If the available history ran out
2462
before reaching the revno, then this returns
2463
(False, (closest_revno, closest_revid)).
1629
def get_inventory_sha1(self, revision_id):
1630
"""Return the sha1 hash of the inventory entry
2465
known_revno, known_revid = known_pair
2466
partial_history = [known_revid]
2467
distance_from_known = known_revno - revno
2468
if distance_from_known < 0:
2470
'requested revno (%d) is later than given known revno (%d)'
2471
% (revno, known_revno))
2474
self, partial_history, stop_index=distance_from_known)
2475
except errors.RevisionNotPresent, err:
2476
if err.revision_id == known_revid:
2477
# The start revision (known_revid) wasn't found.
2479
# This is a stacked repository with no fallbacks, or a there's a
2480
# left-hand ghost. Either way, even though the revision named in
2481
# the error isn't in this repo, we know it's the next step in this
2482
# left-hand history.
2483
partial_history.append(err.revision_id)
2484
if len(partial_history) <= distance_from_known:
2485
# Didn't find enough history to get a revid for the revno.
2486
earliest_revno = known_revno - len(partial_history) + 1
2487
return (False, (earliest_revno, partial_history[-1]))
2488
if len(partial_history) - 1 > distance_from_known:
2489
raise AssertionError('_iter_for_revno returned too much history')
2490
return (True, partial_history[-1])
1632
return self.get_revision(revision_id).inventory_sha1
2492
1634
def iter_reverse_revision_history(self, revision_id):
2493
1635
"""Iterate backwards through revision ids in the lefthand history
3726
2808
return self.source.revision_ids_to_search_result(result_set)
3729
class InterDifferingSerializer(InterRepository):
2811
class InterPackRepo(InterSameDataRepository):
2812
"""Optimised code paths between Pack based repositories."""
2815
def _get_repo_format_to_test(self):
2816
from bzrlib.repofmt import pack_repo
2817
return pack_repo.RepositoryFormatKnitPack1()
2820
def is_compatible(source, target):
2821
"""Be compatible with known Pack formats.
2823
We don't test for the stores being of specific types because that
2824
could lead to confusing results, and there is no need to be
2827
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2829
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2830
isinstance(target._format, RepositoryFormatPack))
2831
except AttributeError:
2833
return are_packs and InterRepository._same_model(source, target)
2836
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2837
"""See InterRepository.fetch()."""
2838
if (len(self.source._fallback_repositories) > 0 or
2839
len(self.target._fallback_repositories) > 0):
2840
# The pack layer is not aware of fallback repositories, so when
2841
# fetching from a stacked repository or into a stacked repository
2842
# we use the generic fetch logic which uses the VersionedFiles
2843
# attributes on repository.
2844
from bzrlib.fetch import RepoFetcher
2845
# Make sure the generic fetcher sets the write cache size on the
2846
# new pack (just like Packer.pack does) to avoid doing many tiny
2847
# writes (which can be slow over a network connection).
2848
# XXX: ideally the transport layer would do this automatically.
2849
pack_coll = self._get_target_pack_collection()
2851
lambda: pack_coll._new_pack.set_write_cache_size(1024*1024))
2852
fetcher = RepoFetcher(self.target, self.source, revision_id,
2853
pb, find_ghosts, set_cache_size)
2854
return fetcher.count_copied, fetcher.failed_revisions
2855
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2856
self.source, self.source._format, self.target, self.target._format)
2857
self.count_copied = 0
2858
if revision_id is None:
2860
# everything to do - use pack logic
2861
# to fetch from all packs to one without
2862
# inventory parsing etc, IFF nothing to be copied is in the target.
2864
source_revision_ids = frozenset(self.source.all_revision_ids())
2865
revision_ids = source_revision_ids - \
2866
frozenset(self.target_get_parent_map(source_revision_ids))
2867
revision_keys = [(revid,) for revid in revision_ids]
2868
target_pack_collection = self._get_target_pack_collection()
2869
index = target_pack_collection.revision_index.combined_index
2870
present_revision_ids = set(item[1][0] for item in
2871
index.iter_entries(revision_keys))
2872
revision_ids = set(revision_ids) - present_revision_ids
2873
# implementing the TODO will involve:
2874
# - detecting when all of a pack is selected
2875
# - avoiding as much as possible pre-selection, so the
2876
# more-core routines such as create_pack_from_packs can filter in
2877
# a just-in-time fashion. (though having a HEADS list on a
2878
# repository might make this a lot easier, because we could
2879
# sensibly detect 'new revisions' without doing a full index scan.
2880
elif _mod_revision.is_null(revision_id):
2885
revision_ids = self.search_missing_revision_ids(revision_id,
2886
find_ghosts=find_ghosts).get_keys()
2887
except errors.NoSuchRevision:
2888
raise errors.InstallFailed([revision_id])
2889
if len(revision_ids) == 0:
2891
return self._pack(self.source, self.target, revision_ids)
2893
def _pack(self, source, target, revision_ids):
2894
from bzrlib.repofmt.pack_repo import Packer
2895
target_pack_collection = self._get_target_pack_collection()
2896
packs = source._pack_collection.all_packs()
2897
pack = Packer(target_pack_collection, packs, '.fetch',
2898
revision_ids).pack()
2899
if pack is not None:
2900
target_pack_collection._save_pack_names()
2901
copied_revs = pack.get_revision_count()
2902
# Trigger an autopack. This may duplicate effort as we've just done
2903
# a pack creation, but for now it is simpler to think about as
2904
# 'upload data, then repack if needed'.
2906
return (copied_revs, [])
2910
def _autopack(self):
2911
self.target._pack_collection.autopack()
2913
def _get_target_pack_collection(self):
2914
return self.target._pack_collection
2917
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2918
"""See InterRepository.missing_revision_ids().
2920
:param find_ghosts: Find ghosts throughout the ancestry of
2923
if not find_ghosts and revision_id is not None:
2924
return self._walk_to_common_revisions([revision_id])
2925
elif revision_id is not None:
2926
# Find ghosts: search for revisions pointing from one repository to
2927
# the other, and vice versa, anywhere in the history of revision_id.
2928
graph = self.target_get_graph(other_repository=self.source)
2929
searcher = graph._make_breadth_first_searcher([revision_id])
2933
next_revs, ghosts = searcher.next_with_ghosts()
2934
except StopIteration:
2936
if revision_id in ghosts:
2937
raise errors.NoSuchRevision(self.source, revision_id)
2938
found_ids.update(next_revs)
2939
found_ids.update(ghosts)
2940
found_ids = frozenset(found_ids)
2941
# Double query here: should be able to avoid this by changing the
2942
# graph api further.
2943
result_set = found_ids - frozenset(
2944
self.target_get_parent_map(found_ids))
2946
source_ids = self.source.all_revision_ids()
2947
# source_ids is the worst possible case we may need to pull.
2948
# now we want to filter source_ids against what we actually
2949
# have in target, but don't try to check for existence where we know
2950
# we do not have a revision as that would be pointless.
2951
target_ids = set(self.target.all_revision_ids())
2952
result_set = set(source_ids).difference(target_ids)
2953
return self.source.revision_ids_to_search_result(result_set)
2956
class InterModel1and2(InterRepository):
2959
def _get_repo_format_to_test(self):
2963
def is_compatible(source, target):
2964
if not source.supports_rich_root() and target.supports_rich_root():
2970
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2971
"""See InterRepository.fetch()."""
2972
from bzrlib.fetch import Model1toKnit2Fetcher
2973
f = Model1toKnit2Fetcher(to_repository=self.target,
2974
from_repository=self.source,
2975
last_revision=revision_id,
2976
pb=pb, find_ghosts=find_ghosts)
2977
return f.count_copied, f.failed_revisions
2980
def copy_content(self, revision_id=None):
2981
"""Make a complete copy of the content in self into destination.
2983
This is a destructive operation! Do not use it on existing
2986
:param revision_id: Only copy the content needed to construct
2987
revision_id and its parents.
2990
self.target.set_make_working_trees(self.source.make_working_trees())
2991
except NotImplementedError:
2993
# but don't bother fetching if we have the needed data now.
2994
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2995
self.target.has_revision(revision_id)):
2997
self.target.fetch(self.source, revision_id=revision_id)
3000
class InterKnit1and2(InterKnitRepo):
3003
def _get_repo_format_to_test(self):
3007
def is_compatible(source, target):
3008
"""Be compatible with Knit1 source and Knit3 target"""
3010
from bzrlib.repofmt.knitrepo import (
3011
RepositoryFormatKnit1,
3012
RepositoryFormatKnit3,
3014
from bzrlib.repofmt.pack_repo import (
3015
RepositoryFormatKnitPack1,
3016
RepositoryFormatKnitPack3,
3017
RepositoryFormatKnitPack4,
3018
RepositoryFormatKnitPack5,
3019
RepositoryFormatKnitPack5RichRoot,
3020
RepositoryFormatKnitPack6,
3021
RepositoryFormatKnitPack6RichRoot,
3022
RepositoryFormatPackDevelopment2,
3023
RepositoryFormatPackDevelopment2Subtree,
3026
RepositoryFormatKnit1, # no rr, no subtree
3027
RepositoryFormatKnitPack1, # no rr, no subtree
3028
RepositoryFormatPackDevelopment2, # no rr, no subtree
3029
RepositoryFormatKnitPack5, # no rr, no subtree
3030
RepositoryFormatKnitPack6, # no rr, no subtree
3033
RepositoryFormatKnit3, # rr, subtree
3034
RepositoryFormatKnitPack3, # rr, subtree
3035
RepositoryFormatKnitPack4, # rr, no subtree
3036
RepositoryFormatKnitPack5RichRoot,# rr, no subtree
3037
RepositoryFormatKnitPack6RichRoot,# rr, no subtree
3038
RepositoryFormatPackDevelopment2Subtree, # rr, subtree
3040
for format in norichroot:
3041
if format.rich_root_data:
3042
raise AssertionError('Format %s is a rich-root format'
3043
' but is included in the non-rich-root list'
3045
for format in richroot:
3046
if not format.rich_root_data:
3047
raise AssertionError('Format %s is not a rich-root format'
3048
' but is included in the rich-root list'
3050
# TODO: One alternative is to just check format.rich_root_data,
3051
# instead of keeping membership lists. However, the formats
3052
# *also* have to use the same 'Knit' style of storage
3053
# (line-deltas, fulltexts, etc.)
3054
return (isinstance(source._format, norichroot) and
3055
isinstance(target._format, richroot))
3056
except AttributeError:
3060
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3061
"""See InterRepository.fetch()."""
3062
from bzrlib.fetch import Knit1to2Fetcher
3063
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
3064
self.source, self.source._format, self.target,
3065
self.target._format)
3066
f = Knit1to2Fetcher(to_repository=self.target,
3067
from_repository=self.source,
3068
last_revision=revision_id,
3069
pb=pb, find_ghosts=find_ghosts)
3070
return f.count_copied, f.failed_revisions
3073
class InterDifferingSerializer(InterKnitRepo):
3732
3076
def _get_repo_format_to_test(self):
3736
3080
def is_compatible(source, target):
3737
3081
"""Be compatible with Knit2 source and Knit3 target"""
3738
# This is redundant with format.check_conversion_target(), however that
3739
# raises an exception, and we just want to say "False" as in we won't
3740
# support converting between these formats.
3741
if 'IDS_never' in debug.debug_flags:
3743
if source.supports_rich_root() and not target.supports_rich_root():
3745
if (source._format.supports_tree_reference
3746
and not target._format.supports_tree_reference):
3748
if target._fallback_repositories and target._format.supports_chks:
3749
# IDS doesn't know how to copy CHKs for the parent inventories it
3750
# adds to stacked repos.
3752
if 'IDS_always' in debug.debug_flags:
3754
# Only use this code path for local source and target. IDS does far
3755
# too much IO (both bandwidth and roundtrips) over a network.
3756
if not source.bzrdir.transport.base.startswith('file:///'):
3758
if not target.bzrdir.transport.base.startswith('file:///'):
3082
if source.supports_rich_root() != target.supports_rich_root():
3084
# Ideally, we'd support fetching if the source had no tree references
3085
# even if it supported them...
3086
if (getattr(source, '_format.supports_tree_reference', False) and
3087
not getattr(target, '_format.supports_tree_reference', False)):
3762
def _get_trees(self, revision_ids, cache):
3764
for rev_id in revision_ids:
3766
possible_trees.append((rev_id, cache[rev_id]))
3768
# Not cached, but inventory might be present anyway.
3770
tree = self.source.revision_tree(rev_id)
3771
except errors.NoSuchRevision:
3772
# Nope, parent is ghost.
3775
cache[rev_id] = tree
3776
possible_trees.append((rev_id, tree))
3777
return possible_trees
3779
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3780
"""Get the best delta and base for this revision.
3782
:return: (basis_id, delta)
3785
# Generate deltas against each tree, to find the shortest.
3786
texts_possibly_new_in_tree = set()
3787
for basis_id, basis_tree in possible_trees:
3788
delta = tree.inventory._make_delta(basis_tree.inventory)
3789
for old_path, new_path, file_id, new_entry in delta:
3790
if new_path is None:
3791
# This file_id isn't present in the new rev, so we don't
3795
# Rich roots are handled elsewhere...
3797
kind = new_entry.kind
3798
if kind != 'directory' and kind != 'file':
3799
# No text record associated with this inventory entry.
3801
# This is a directory or file that has changed somehow.
3802
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3803
deltas.append((len(delta), basis_id, delta))
3805
return deltas[0][1:]
3807
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3808
"""Find all parent revisions that are absent, but for which the
3809
inventory is present, and copy those inventories.
3811
This is necessary to preserve correctness when the source is stacked
3812
without fallbacks configured. (Note that in cases like upgrade the
3813
source may be not have _fallback_repositories even though it is
3817
for parents in parent_map.values():
3818
parent_revs.update(parents)
3819
present_parents = self.source.get_parent_map(parent_revs)
3820
absent_parents = set(parent_revs).difference(present_parents)
3821
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3822
(rev_id,) for rev_id in absent_parents)
3823
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3824
for parent_tree in self.source.revision_trees(parent_inv_ids):
3825
current_revision_id = parent_tree.get_revision_id()
3826
parents_parents_keys = parent_invs_keys_for_stacking[
3827
(current_revision_id,)]
3828
parents_parents = [key[-1] for key in parents_parents_keys]
3829
basis_id = _mod_revision.NULL_REVISION
3830
basis_tree = self.source.revision_tree(basis_id)
3831
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3832
self.target.add_inventory_by_delta(
3833
basis_id, delta, current_revision_id, parents_parents)
3834
cache[current_revision_id] = parent_tree
3836
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3837
"""Fetch across a few revisions.
3839
:param revision_ids: The revisions to copy
3840
:param basis_id: The revision_id of a tree that must be in cache, used
3841
as a basis for delta when no other base is available
3842
:param cache: A cache of RevisionTrees that we can use.
3843
:param a_graph: A Graph object to determine the heads() of the
3844
rich-root data stream.
3845
:return: The revision_id of the last converted tree. The RevisionTree
3846
for it will be in cache
3848
# Walk though all revisions; get inventory deltas, copy referenced
3849
# texts that delta references, insert the delta, revision and
3851
root_keys_to_create = set()
3854
pending_revisions = []
3855
parent_map = self.source.get_parent_map(revision_ids)
3856
self._fetch_parent_invs_for_stacking(parent_map, cache)
3857
self.source._safe_to_return_from_cache = True
3858
for tree in self.source.revision_trees(revision_ids):
3859
# Find a inventory delta for this revision.
3860
# Find text entries that need to be copied, too.
3861
current_revision_id = tree.get_revision_id()
3862
parent_ids = parent_map.get(current_revision_id, ())
3863
parent_trees = self._get_trees(parent_ids, cache)
3864
possible_trees = list(parent_trees)
3865
if len(possible_trees) == 0:
3866
# There either aren't any parents, or the parents are ghosts,
3867
# so just use the last converted tree.
3868
possible_trees.append((basis_id, cache[basis_id]))
3869
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3871
revision = self.source.get_revision(current_revision_id)
3872
pending_deltas.append((basis_id, delta,
3873
current_revision_id, revision.parent_ids))
3874
if self._converting_to_rich_root:
3875
self._revision_id_to_root_id[current_revision_id] = \
3877
# Determine which texts are in present in this revision but not in
3878
# any of the available parents.
3879
texts_possibly_new_in_tree = set()
3880
for old_path, new_path, file_id, entry in delta:
3881
if new_path is None:
3882
# This file_id isn't present in the new rev
3886
if not self.target.supports_rich_root():
3887
# The target doesn't support rich root, so we don't
3890
if self._converting_to_rich_root:
3891
# This can't be copied normally, we have to insert
3893
root_keys_to_create.add((file_id, entry.revision))
3896
texts_possibly_new_in_tree.add((file_id, entry.revision))
3897
for basis_id, basis_tree in possible_trees:
3898
basis_inv = basis_tree.inventory
3899
for file_key in list(texts_possibly_new_in_tree):
3900
file_id, file_revision = file_key
3902
entry = basis_inv[file_id]
3903
except errors.NoSuchId:
3905
if entry.revision == file_revision:
3906
texts_possibly_new_in_tree.remove(file_key)
3907
text_keys.update(texts_possibly_new_in_tree)
3908
pending_revisions.append(revision)
3909
cache[current_revision_id] = tree
3910
basis_id = current_revision_id
3911
self.source._safe_to_return_from_cache = False
3913
from_texts = self.source.texts
3914
to_texts = self.target.texts
3915
if root_keys_to_create:
3916
root_stream = _mod_fetch._new_root_data_stream(
3917
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3918
self.source, graph=a_graph)
3919
to_texts.insert_record_stream(root_stream)
3920
to_texts.insert_record_stream(from_texts.get_record_stream(
3921
text_keys, self.target._format._fetch_order,
3922
not self.target._format._fetch_uses_deltas))
3923
# insert inventory deltas
3924
for delta in pending_deltas:
3925
self.target.add_inventory_by_delta(*delta)
3926
if self.target._fallback_repositories:
3927
# Make sure this stacked repository has all the parent inventories
3928
# for the new revisions that we are about to insert. We do this
3929
# before adding the revisions so that no revision is added until
3930
# all the inventories it may depend on are added.
3931
# Note that this is overzealous, as we may have fetched these in an
3934
revision_ids = set()
3935
for revision in pending_revisions:
3936
revision_ids.add(revision.revision_id)
3937
parent_ids.update(revision.parent_ids)
3938
parent_ids.difference_update(revision_ids)
3939
parent_ids.discard(_mod_revision.NULL_REVISION)
3940
parent_map = self.source.get_parent_map(parent_ids)
3941
# we iterate over parent_map and not parent_ids because we don't
3942
# want to try copying any revision which is a ghost
3943
for parent_tree in self.source.revision_trees(parent_map):
3944
current_revision_id = parent_tree.get_revision_id()
3945
parents_parents = parent_map[current_revision_id]
3946
possible_trees = self._get_trees(parents_parents, cache)
3947
if len(possible_trees) == 0:
3948
# There either aren't any parents, or the parents are
3949
# ghosts, so just use the last converted tree.
3950
possible_trees.append((basis_id, cache[basis_id]))
3951
basis_id, delta = self._get_delta_for_revision(parent_tree,
3952
parents_parents, possible_trees)
3953
self.target.add_inventory_by_delta(
3954
basis_id, delta, current_revision_id, parents_parents)
3955
# insert signatures and revisions
3956
for revision in pending_revisions:
3958
signature = self.source.get_signature_text(
3959
revision.revision_id)
3960
self.target.add_signature_text(revision.revision_id,
3962
except errors.NoSuchRevision:
3964
self.target.add_revision(revision.revision_id, revision)
3967
def _fetch_all_revisions(self, revision_ids, pb):
3968
"""Fetch everything for the list of revisions.
3970
:param revision_ids: The list of revisions to fetch. Must be in
3972
:param pb: A ProgressTask
3975
basis_id, basis_tree = self._get_basis(revision_ids[0])
3977
cache = lru_cache.LRUCache(100)
3978
cache[basis_id] = basis_tree
3979
del basis_tree # We don't want to hang on to it here
3981
if self._converting_to_rich_root and len(revision_ids) > 100:
3982
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3987
for offset in range(0, len(revision_ids), batch_size):
3988
self.target.start_write_group()
3990
pb.update('Transferring revisions', offset,
3992
batch = revision_ids[offset:offset+batch_size]
3993
basis_id = self._fetch_batch(batch, basis_id, cache,
3996
self.source._safe_to_return_from_cache = False
3997
self.target.abort_write_group()
4000
hint = self.target.commit_write_group()
4003
if hints and self.target._format.pack_compresses:
4004
self.target.pack(hint=hints)
4005
pb.update('Transferring revisions', len(revision_ids),
4008
3091
@needs_write_lock
4009
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3092
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
4011
3093
"""See InterRepository.fetch()."""
4012
if fetch_spec is not None:
4013
raise AssertionError("Not implemented yet...")
4014
ui.ui_factory.warn_experimental_format_fetch(self)
4015
if (not self.source.supports_rich_root()
4016
and self.target.supports_rich_root()):
4017
self._converting_to_rich_root = True
4018
self._revision_id_to_root_id = {}
4020
self._converting_to_rich_root = False
4021
# See <https://launchpad.net/bugs/456077> asking for a warning here
4022
if self.source._format.network_name() != self.target._format.network_name():
4023
ui.ui_factory.show_user_warning('cross_format_fetch',
4024
from_format=self.source._format,
4025
to_format=self.target._format)
4026
3094
revision_ids = self.target.search_missing_revision_ids(self.source,
4027
3095
revision_id, find_ghosts=find_ghosts).get_keys()
4028
if not revision_ids:
4030
3096
revision_ids = tsort.topo_sort(
4031
3097
self.source.get_graph().get_parent_map(revision_ids))
4032
if not revision_ids:
4034
# Walk though all revisions; get inventory deltas, copy referenced
4035
# texts that delta references, insert the delta, revision and
3098
def revisions_iterator():
3099
rev_ids = list(revision_ids)
3100
for offset in xrange(0, len(rev_ids), 100):
3101
current_revids = rev_ids[offset:offset+100]
3102
revisions = self.source.get_revisions(current_revids)
3103
trees = self.source.revision_trees(current_revids)
3104
keys = [(r,) for r in current_revids]
3105
sig_stream = self.source.signatures.get_record_stream(
3106
keys, 'unordered', True)
3108
for record in versionedfile.filter_absent(sig_stream):
3109
sigs[record.key[0]] = record.get_bytes_as('fulltext')
3110
for rev, tree in zip(revisions, trees):
3111
yield rev, tree, sigs.get(rev.revision_id, None)
4038
3113
my_pb = ui.ui_factory.nested_progress_bar()
4041
symbol_versioning.warn(
4042
symbol_versioning.deprecated_in((1, 14, 0))
4043
% "pb parameter to fetch()")
4046
self._fetch_all_revisions(revision_ids, pb)
3118
install_revisions(self.target, revisions_iterator(),
3119
len(revision_ids), pb)
4048
3121
if my_pb is not None:
4049
3122
my_pb.finished()
4050
3123
return len(revision_ids), 0
4052
def _get_basis(self, first_revision_id):
4053
"""Get a revision and tree which exists in the target.
4055
This assumes that first_revision_id is selected for transmission
4056
because all other ancestors are already present. If we can't find an
4057
ancestor we fall back to NULL_REVISION since we know that is safe.
4059
:return: (basis_id, basis_tree)
4061
first_rev = self.source.get_revision(first_revision_id)
4063
basis_id = first_rev.parent_ids[0]
4064
# only valid as a basis if the target has it
4065
self.target.get_revision(basis_id)
4066
# Try to get a basis tree - if its a ghost it will hit the
4067
# NoSuchRevision case.
4068
basis_tree = self.source.revision_tree(basis_id)
4069
except (IndexError, errors.NoSuchRevision):
4070
basis_id = _mod_revision.NULL_REVISION
4071
basis_tree = self.source.revision_tree(basis_id)
4072
return basis_id, basis_tree
3126
class InterOtherToRemote(InterRepository):
3127
"""An InterRepository that simply delegates to the 'real' InterRepository
3128
calculated for (source, target._real_repository).
3131
_walk_to_common_revisions_batch_size = 50
3133
def __init__(self, source, target):
3134
InterRepository.__init__(self, source, target)
3135
self._real_inter = None
3138
def is_compatible(source, target):
3139
if isinstance(target, remote.RemoteRepository):
3143
def _ensure_real_inter(self):
3144
if self._real_inter is None:
3145
self.target._ensure_real()
3146
real_target = self.target._real_repository
3147
self._real_inter = InterRepository.get(self.source, real_target)
3148
# Make _real_inter use the RemoteRepository for get_parent_map
3149
self._real_inter.target_get_graph = self.target.get_graph
3150
self._real_inter.target_get_parent_map = self.target.get_parent_map
3152
def copy_content(self, revision_id=None):
3153
self._ensure_real_inter()
3154
self._real_inter.copy_content(revision_id=revision_id)
3156
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3157
self._ensure_real_inter()
3158
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3159
find_ghosts=find_ghosts)
3162
def _get_repo_format_to_test(self):
3166
class InterRemoteToOther(InterRepository):
3168
def __init__(self, source, target):
3169
InterRepository.__init__(self, source, target)
3170
self._real_inter = None
3173
def is_compatible(source, target):
3174
if not isinstance(source, remote.RemoteRepository):
3176
# Is source's model compatible with target's model?
3177
source._ensure_real()
3178
real_source = source._real_repository
3179
if isinstance(real_source, remote.RemoteRepository):
3180
raise NotImplementedError(
3181
"We don't support remote repos backed by remote repos yet.")
3182
return InterRepository._same_model(real_source, target)
3184
def _ensure_real_inter(self):
3185
if self._real_inter is None:
3186
self.source._ensure_real()
3187
real_source = self.source._real_repository
3188
self._real_inter = InterRepository.get(real_source, self.target)
3190
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3191
self._ensure_real_inter()
3192
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3193
find_ghosts=find_ghosts)
3195
def copy_content(self, revision_id=None):
3196
self._ensure_real_inter()
3197
self._real_inter.copy_content(revision_id=revision_id)
3200
def _get_repo_format_to_test(self):
3205
class InterPackToRemotePack(InterPackRepo):
3206
"""A specialisation of InterPackRepo for a target that is a
3209
This will use the get_parent_map RPC rather than plain readvs, and also
3210
uses an RPC for autopacking.
3213
_walk_to_common_revisions_batch_size = 50
3216
def is_compatible(source, target):
3217
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3218
if isinstance(source._format, RepositoryFormatPack):
3219
if isinstance(target, remote.RemoteRepository):
3220
target._ensure_real()
3221
if isinstance(target._real_repository._format,
3222
RepositoryFormatPack):
3223
if InterRepository._same_model(source, target):
3227
def _autopack(self):
3228
self.target.autopack()
3230
def _get_target_pack_collection(self):
3231
return self.target._real_repository._pack_collection
3234
def _get_repo_format_to_test(self):
4075
3238
InterRepository.register_optimiser(InterDifferingSerializer)
4076
3239
InterRepository.register_optimiser(InterSameDataRepository)
4077
3240
InterRepository.register_optimiser(InterWeaveRepo)
4078
3241
InterRepository.register_optimiser(InterKnitRepo)
3242
InterRepository.register_optimiser(InterModel1and2)
3243
InterRepository.register_optimiser(InterKnit1and2)
3244
InterRepository.register_optimiser(InterPackRepo)
3245
InterRepository.register_optimiser(InterOtherToRemote)
3246
InterRepository.register_optimiser(InterRemoteToOther)
3247
InterRepository.register_optimiser(InterPackToRemotePack)
4081
3250
class CopyConverter(object):
4082
3251
"""A repository conversion tool which just performs a copy of the content.
4084
3253
This is slow but quite reliable.
4237
3401
revision_graph[key] = tuple(parent for parent in parents if parent
4238
3402
in revision_graph)
4239
3403
return revision_graph
4242
class StreamSink(object):
4243
"""An object that can insert a stream into a repository.
4245
This interface handles the complexity of reserialising inventories and
4246
revisions from different formats, and allows unidirectional insertion into
4247
stacked repositories without looking for the missing basis parents
4251
def __init__(self, target_repo):
4252
self.target_repo = target_repo
4254
def insert_stream(self, stream, src_format, resume_tokens):
4255
"""Insert a stream's content into the target repository.
4257
:param src_format: a bzr repository format.
4259
:return: a list of resume tokens and an iterable of keys additional
4260
items required before the insertion can be completed.
4262
self.target_repo.lock_write()
4265
self.target_repo.resume_write_group(resume_tokens)
4268
self.target_repo.start_write_group()
4271
# locked_insert_stream performs a commit|suspend.
4272
return self._locked_insert_stream(stream, src_format, is_resume)
4274
self.target_repo.abort_write_group(suppress_errors=True)
4277
self.target_repo.unlock()
4279
def _locked_insert_stream(self, stream, src_format, is_resume):
4280
to_serializer = self.target_repo._format._serializer
4281
src_serializer = src_format._serializer
4283
if to_serializer == src_serializer:
4284
# If serializers match and the target is a pack repository, set the
4285
# write cache size on the new pack. This avoids poor performance
4286
# on transports where append is unbuffered (such as
4287
# RemoteTransport). This is safe to do because nothing should read
4288
# back from the target repository while a stream with matching
4289
# serialization is being inserted.
4290
# The exception is that a delta record from the source that should
4291
# be a fulltext may need to be expanded by the target (see
4292
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4293
# explicitly flush any buffered writes first in that rare case.
4295
new_pack = self.target_repo._pack_collection._new_pack
4296
except AttributeError:
4297
# Not a pack repository
4300
new_pack.set_write_cache_size(1024*1024)
4301
for substream_type, substream in stream:
4302
if 'stream' in debug.debug_flags:
4303
mutter('inserting substream: %s', substream_type)
4304
if substream_type == 'texts':
4305
self.target_repo.texts.insert_record_stream(substream)
4306
elif substream_type == 'inventories':
4307
if src_serializer == to_serializer:
4308
self.target_repo.inventories.insert_record_stream(
4311
self._extract_and_insert_inventories(
4312
substream, src_serializer)
4313
elif substream_type == 'inventory-deltas':
4314
self._extract_and_insert_inventory_deltas(
4315
substream, src_serializer)
4316
elif substream_type == 'chk_bytes':
4317
# XXX: This doesn't support conversions, as it assumes the
4318
# conversion was done in the fetch code.
4319
self.target_repo.chk_bytes.insert_record_stream(substream)
4320
elif substream_type == 'revisions':
4321
# This may fallback to extract-and-insert more often than
4322
# required if the serializers are different only in terms of
4324
if src_serializer == to_serializer:
4325
self.target_repo.revisions.insert_record_stream(
4328
self._extract_and_insert_revisions(substream,
4330
elif substream_type == 'signatures':
4331
self.target_repo.signatures.insert_record_stream(substream)
4333
raise AssertionError('kaboom! %s' % (substream_type,))
4334
# Done inserting data, and the missing_keys calculations will try to
4335
# read back from the inserted data, so flush the writes to the new pack
4336
# (if this is pack format).
4337
if new_pack is not None:
4338
new_pack._write_data('', flush=True)
4339
# Find all the new revisions (including ones from resume_tokens)
4340
missing_keys = self.target_repo.get_missing_parent_inventories(
4341
check_for_missing_texts=is_resume)
4343
for prefix, versioned_file in (
4344
('texts', self.target_repo.texts),
4345
('inventories', self.target_repo.inventories),
4346
('revisions', self.target_repo.revisions),
4347
('signatures', self.target_repo.signatures),
4348
('chk_bytes', self.target_repo.chk_bytes),
4350
if versioned_file is None:
4352
# TODO: key is often going to be a StaticTuple object
4353
# I don't believe we can define a method by which
4354
# (prefix,) + StaticTuple will work, though we could
4355
# define a StaticTuple.sq_concat that would allow you to
4356
# pass in either a tuple or a StaticTuple as the second
4357
# object, so instead we could have:
4358
# StaticTuple(prefix) + key here...
4359
missing_keys.update((prefix,) + key for key in
4360
versioned_file.get_missing_compression_parent_keys())
4361
except NotImplementedError:
4362
# cannot even attempt suspending, and missing would have failed
4363
# during stream insertion.
4364
missing_keys = set()
4367
# suspend the write group and tell the caller what we is
4368
# missing. We know we can suspend or else we would not have
4369
# entered this code path. (All repositories that can handle
4370
# missing keys can handle suspending a write group).
4371
write_group_tokens = self.target_repo.suspend_write_group()
4372
return write_group_tokens, missing_keys
4373
hint = self.target_repo.commit_write_group()
4374
if (to_serializer != src_serializer and
4375
self.target_repo._format.pack_compresses):
4376
self.target_repo.pack(hint=hint)
4379
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4380
target_rich_root = self.target_repo._format.rich_root_data
4381
target_tree_refs = self.target_repo._format.supports_tree_reference
4382
for record in substream:
4383
# Insert the delta directly
4384
inventory_delta_bytes = record.get_bytes_as('fulltext')
4385
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4387
parse_result = deserialiser.parse_text_bytes(
4388
inventory_delta_bytes)
4389
except inventory_delta.IncompatibleInventoryDelta, err:
4390
trace.mutter("Incompatible delta: %s", err.msg)
4391
raise errors.IncompatibleRevision(self.target_repo._format)
4392
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4393
revision_id = new_id
4394
parents = [key[0] for key in record.parents]
4395
self.target_repo.add_inventory_by_delta(
4396
basis_id, inv_delta, revision_id, parents)
4398
def _extract_and_insert_inventories(self, substream, serializer,
4400
"""Generate a new inventory versionedfile in target, converting data.
4402
The inventory is retrieved from the source, (deserializing it), and
4403
stored in the target (reserializing it in a different format).
4405
target_rich_root = self.target_repo._format.rich_root_data
4406
target_tree_refs = self.target_repo._format.supports_tree_reference
4407
for record in substream:
4408
# It's not a delta, so it must be a fulltext in the source
4409
# serializer's format.
4410
bytes = record.get_bytes_as('fulltext')
4411
revision_id = record.key[0]
4412
inv = serializer.read_inventory_from_string(bytes, revision_id)
4413
parents = [key[0] for key in record.parents]
4414
self.target_repo.add_inventory(revision_id, inv, parents)
4415
# No need to keep holding this full inv in memory when the rest of
4416
# the substream is likely to be all deltas.
4419
def _extract_and_insert_revisions(self, substream, serializer):
4420
for record in substream:
4421
bytes = record.get_bytes_as('fulltext')
4422
revision_id = record.key[0]
4423
rev = serializer.read_revision_from_string(bytes)
4424
if rev.revision_id != revision_id:
4425
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4426
self.target_repo.add_revision(revision_id, rev)
4429
if self.target_repo._format._fetch_reconcile:
4430
self.target_repo.reconcile()
4433
class StreamSource(object):
4434
"""A source of a stream for fetching between repositories."""
4436
def __init__(self, from_repository, to_format):
4437
"""Create a StreamSource streaming from from_repository."""
4438
self.from_repository = from_repository
4439
self.to_format = to_format
4441
def delta_on_metadata(self):
4442
"""Return True if delta's are permitted on metadata streams.
4444
That is on revisions and signatures.
4446
src_serializer = self.from_repository._format._serializer
4447
target_serializer = self.to_format._serializer
4448
return (self.to_format._fetch_uses_deltas and
4449
src_serializer == target_serializer)
4451
def _fetch_revision_texts(self, revs):
4452
# fetch signatures first and then the revision texts
4453
# may need to be a InterRevisionStore call here.
4454
from_sf = self.from_repository.signatures
4455
# A missing signature is just skipped.
4456
keys = [(rev_id,) for rev_id in revs]
4457
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4459
self.to_format._fetch_order,
4460
not self.to_format._fetch_uses_deltas))
4461
# If a revision has a delta, this is actually expanded inside the
4462
# insert_record_stream code now, which is an alternate fix for
4464
from_rf = self.from_repository.revisions
4465
revisions = from_rf.get_record_stream(
4467
self.to_format._fetch_order,
4468
not self.delta_on_metadata())
4469
return [('signatures', signatures), ('revisions', revisions)]
4471
def _generate_root_texts(self, revs):
4472
"""This will be called by get_stream between fetching weave texts and
4473
fetching the inventory weave.
4475
if self._rich_root_upgrade():
4476
return _mod_fetch.Inter1and2Helper(
4477
self.from_repository).generate_root_texts(revs)
4481
def get_stream(self, search):
4483
revs = search.get_keys()
4484
graph = self.from_repository.get_graph()
4485
revs = tsort.topo_sort(graph.get_parent_map(revs))
4486
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4488
for knit_kind, file_id, revisions in data_to_fetch:
4489
if knit_kind != phase:
4491
# Make a new progress bar for this phase
4492
if knit_kind == "file":
4493
# Accumulate file texts
4494
text_keys.extend([(file_id, revision) for revision in
4496
elif knit_kind == "inventory":
4497
# Now copy the file texts.
4498
from_texts = self.from_repository.texts
4499
yield ('texts', from_texts.get_record_stream(
4500
text_keys, self.to_format._fetch_order,
4501
not self.to_format._fetch_uses_deltas))
4502
# Cause an error if a text occurs after we have done the
4505
# Before we process the inventory we generate the root
4506
# texts (if necessary) so that the inventories references
4508
for _ in self._generate_root_texts(revs):
4510
# we fetch only the referenced inventories because we do not
4511
# know for unselected inventories whether all their required
4512
# texts are present in the other repository - it could be
4514
for info in self._get_inventory_stream(revs):
4516
elif knit_kind == "signatures":
4517
# Nothing to do here; this will be taken care of when
4518
# _fetch_revision_texts happens.
4520
elif knit_kind == "revisions":
4521
for record in self._fetch_revision_texts(revs):
4524
raise AssertionError("Unknown knit kind %r" % knit_kind)
4526
def get_stream_for_missing_keys(self, missing_keys):
4527
# missing keys can only occur when we are byte copying and not
4528
# translating (because translation means we don't send
4529
# unreconstructable deltas ever).
4531
keys['texts'] = set()
4532
keys['revisions'] = set()
4533
keys['inventories'] = set()
4534
keys['chk_bytes'] = set()
4535
keys['signatures'] = set()
4536
for key in missing_keys:
4537
keys[key[0]].add(key[1:])
4538
if len(keys['revisions']):
4539
# If we allowed copying revisions at this point, we could end up
4540
# copying a revision without copying its required texts: a
4541
# violation of the requirements for repository integrity.
4542
raise AssertionError(
4543
'cannot copy revisions to fill in missing deltas %s' % (
4544
keys['revisions'],))
4545
for substream_kind, keys in keys.iteritems():
4546
vf = getattr(self.from_repository, substream_kind)
4547
if vf is None and keys:
4548
raise AssertionError(
4549
"cannot fill in keys for a versioned file we don't"
4550
" have: %s needs %s" % (substream_kind, keys))
4552
# No need to stream something we don't have
4554
if substream_kind == 'inventories':
4555
# Some missing keys are genuinely ghosts, filter those out.
4556
present = self.from_repository.inventories.get_parent_map(keys)
4557
revs = [key[0] for key in present]
4558
# Get the inventory stream more-or-less as we do for the
4559
# original stream; there's no reason to assume that records
4560
# direct from the source will be suitable for the sink. (Think
4561
# e.g. 2a -> 1.9-rich-root).
4562
for info in self._get_inventory_stream(revs, missing=True):
4566
# Ask for full texts always so that we don't need more round trips
4567
# after this stream.
4568
# Some of the missing keys are genuinely ghosts, so filter absent
4569
# records. The Sink is responsible for doing another check to
4570
# ensure that ghosts don't introduce missing data for future
4572
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4573
self.to_format._fetch_order, True))
4574
yield substream_kind, stream
4576
def inventory_fetch_order(self):
4577
if self._rich_root_upgrade():
4578
return 'topological'
4580
return self.to_format._fetch_order
4582
def _rich_root_upgrade(self):
4583
return (not self.from_repository._format.rich_root_data and
4584
self.to_format.rich_root_data)
4586
def _get_inventory_stream(self, revision_ids, missing=False):
4587
from_format = self.from_repository._format
4588
if (from_format.supports_chks and self.to_format.supports_chks and
4589
from_format.network_name() == self.to_format.network_name()):
4590
raise AssertionError(
4591
"this case should be handled by GroupCHKStreamSource")
4592
elif 'forceinvdeltas' in debug.debug_flags:
4593
return self._get_convertable_inventory_stream(revision_ids,
4594
delta_versus_null=missing)
4595
elif from_format.network_name() == self.to_format.network_name():
4597
return self._get_simple_inventory_stream(revision_ids,
4599
elif (not from_format.supports_chks and not self.to_format.supports_chks
4600
and from_format._serializer == self.to_format._serializer):
4601
# Essentially the same format.
4602
return self._get_simple_inventory_stream(revision_ids,
4605
# Any time we switch serializations, we want to use an
4606
# inventory-delta based approach.
4607
return self._get_convertable_inventory_stream(revision_ids,
4608
delta_versus_null=missing)
4610
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4611
# NB: This currently reopens the inventory weave in source;
4612
# using a single stream interface instead would avoid this.
4613
from_weave = self.from_repository.inventories
4615
delta_closure = True
4617
delta_closure = not self.delta_on_metadata()
4618
yield ('inventories', from_weave.get_record_stream(
4619
[(rev_id,) for rev_id in revision_ids],
4620
self.inventory_fetch_order(), delta_closure))
4622
def _get_convertable_inventory_stream(self, revision_ids,
4623
delta_versus_null=False):
4624
# The two formats are sufficiently different that there is no fast
4625
# path, so we need to send just inventorydeltas, which any
4626
# sufficiently modern client can insert into any repository.
4627
# The StreamSink code expects to be able to
4628
# convert on the target, so we need to put bytes-on-the-wire that can
4629
# be converted. That means inventory deltas (if the remote is <1.19,
4630
# RemoteStreamSink will fallback to VFS to insert the deltas).
4631
yield ('inventory-deltas',
4632
self._stream_invs_as_deltas(revision_ids,
4633
delta_versus_null=delta_versus_null))
4635
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4636
"""Return a stream of inventory-deltas for the given rev ids.
4638
:param revision_ids: The list of inventories to transmit
4639
:param delta_versus_null: Don't try to find a minimal delta for this
4640
entry, instead compute the delta versus the NULL_REVISION. This
4641
effectively streams a complete inventory. Used for stuff like
4642
filling in missing parents, etc.
4644
from_repo = self.from_repository
4645
revision_keys = [(rev_id,) for rev_id in revision_ids]
4646
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4647
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4649
inventories = self.from_repository.iter_inventories(
4650
revision_ids, 'topological')
4651
format = from_repo._format
4652
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4653
inventory_cache = lru_cache.LRUCache(50)
4654
null_inventory = from_repo.revision_tree(
4655
_mod_revision.NULL_REVISION).inventory
4656
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4657
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4658
# repo back into a non-rich-root repo ought to be allowed)
4659
serializer = inventory_delta.InventoryDeltaSerializer(
4660
versioned_root=format.rich_root_data,
4661
tree_references=format.supports_tree_reference)
4662
for inv in inventories:
4663
key = (inv.revision_id,)
4664
parent_keys = parent_map.get(key, ())
4666
if not delta_versus_null and parent_keys:
4667
# The caller did not ask for complete inventories and we have
4668
# some parents that we can delta against. Make a delta against
4669
# each parent so that we can find the smallest.
4670
parent_ids = [parent_key[0] for parent_key in parent_keys]
4671
for parent_id in parent_ids:
4672
if parent_id not in invs_sent_so_far:
4673
# We don't know that the remote side has this basis, so
4676
if parent_id == _mod_revision.NULL_REVISION:
4677
parent_inv = null_inventory
4679
parent_inv = inventory_cache.get(parent_id, None)
4680
if parent_inv is None:
4681
parent_inv = from_repo.get_inventory(parent_id)
4682
candidate_delta = inv._make_delta(parent_inv)
4683
if (delta is None or
4684
len(delta) > len(candidate_delta)):
4685
delta = candidate_delta
4686
basis_id = parent_id
4688
# Either none of the parents ended up being suitable, or we
4689
# were asked to delta against NULL
4690
basis_id = _mod_revision.NULL_REVISION
4691
delta = inv._make_delta(null_inventory)
4692
invs_sent_so_far.add(inv.revision_id)
4693
inventory_cache[inv.revision_id] = inv
4694
delta_serialized = ''.join(
4695
serializer.delta_to_lines(basis_id, key[-1], delta))
4696
yield versionedfile.FulltextContentFactory(
4697
key, parent_keys, None, delta_serialized)
4700
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4701
stop_revision=None):
4702
"""Extend the partial history to include a given index
4704
If a stop_index is supplied, stop when that index has been reached.
4705
If a stop_revision is supplied, stop when that revision is
4706
encountered. Otherwise, stop when the beginning of history is
4709
:param stop_index: The index which should be present. When it is
4710
present, history extension will stop.
4711
:param stop_revision: The revision id which should be present. When
4712
it is encountered, history extension will stop.
4714
start_revision = partial_history_cache[-1]
4715
iterator = repo.iter_reverse_revision_history(start_revision)
4717
#skip the last revision in the list
4720
if (stop_index is not None and
4721
len(partial_history_cache) > stop_index):
4723
if partial_history_cache[-1] == stop_revision:
4725
revision_id = iterator.next()
4726
partial_history_cache.append(revision_id)
4727
except StopIteration: