417
548
ie.revision = parent_entry.revision
418
549
return self._get_delta(ie, basis_inv, path), False, None
419
550
ie.reference_revision = content_summary[3]
421
self._add_text_to_weave(ie.file_id, lines, heads, None)
551
if ie.reference_revision is None:
552
raise AssertionError("invalid content_summary for nested tree: %r"
553
% (content_summary,))
554
self._add_text_to_weave(ie.file_id, '', heads, None)
423
556
raise NotImplementedError('unknown kind')
424
557
ie.revision = self._new_revision_id
558
self._any_changes = True
425
559
return self._get_delta(ie, basis_inv, path), True, fingerprint
427
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
428
# Note: as we read the content directly from the tree, we know its not
429
# been turned into unicode or badly split - but a broken tree
430
# implementation could give us bad output from readlines() so this is
431
# not a guarantee of safety. What would be better is always checking
432
# the content during test suite execution. RBC 20070912
433
parent_keys = tuple((file_id, parent) for parent in parents)
434
return self.repository.texts.add_lines(
435
(file_id, self._new_revision_id), parent_keys, new_lines,
436
nostore_sha=nostore_sha, random_id=self.random_revid,
437
check_content=False)[0:2]
561
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
562
_entry_factory=entry_factory):
563
"""Record a new tree via iter_changes.
565
:param tree: The tree to obtain text contents from for changed objects.
566
:param basis_revision_id: The revision id of the tree the iter_changes
567
has been generated against. Currently assumed to be the same
568
as self.parents[0] - if it is not, errors may occur.
569
:param iter_changes: An iter_changes iterator with the changes to apply
570
to basis_revision_id. The iterator must not include any items with
571
a current kind of None - missing items must be either filtered out
572
or errored-on beefore record_iter_changes sees the item.
573
:param _entry_factory: Private method to bind entry_factory locally for
575
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
578
# Create an inventory delta based on deltas between all the parents and
579
# deltas between all the parent inventories. We use inventory delta's
580
# between the inventory objects because iter_changes masks
581
# last-changed-field only changes.
583
# file_id -> change map, change is fileid, paths, changed, versioneds,
584
# parents, names, kinds, executables
586
# {file_id -> revision_id -> inventory entry, for entries in parent
587
# trees that are not parents[0]
591
revtrees = list(self.repository.revision_trees(self.parents))
592
except errors.NoSuchRevision:
593
# one or more ghosts, slow path.
595
for revision_id in self.parents:
597
revtrees.append(self.repository.revision_tree(revision_id))
598
except errors.NoSuchRevision:
600
basis_revision_id = _mod_revision.NULL_REVISION
602
revtrees.append(self.repository.revision_tree(
603
_mod_revision.NULL_REVISION))
604
# The basis inventory from a repository
606
basis_inv = revtrees[0].inventory
608
basis_inv = self.repository.revision_tree(
609
_mod_revision.NULL_REVISION).inventory
610
if len(self.parents) > 0:
611
if basis_revision_id != self.parents[0] and not ghost_basis:
613
"arbitrary basis parents not yet supported with merges")
614
for revtree in revtrees[1:]:
615
for change in revtree.inventory._make_delta(basis_inv):
616
if change[1] is None:
617
# Not present in this parent.
619
if change[2] not in merged_ids:
620
if change[0] is not None:
621
basis_entry = basis_inv[change[2]]
622
merged_ids[change[2]] = [
624
basis_entry.revision,
627
parent_entries[change[2]] = {
629
basis_entry.revision:basis_entry,
631
change[3].revision:change[3],
634
merged_ids[change[2]] = [change[3].revision]
635
parent_entries[change[2]] = {change[3].revision:change[3]}
637
merged_ids[change[2]].append(change[3].revision)
638
parent_entries[change[2]][change[3].revision] = change[3]
641
# Setup the changes from the tree:
642
# changes maps file_id -> (change, [parent revision_ids])
644
for change in iter_changes:
645
# This probably looks up in basis_inv way to much.
646
if change[1][0] is not None:
647
head_candidate = [basis_inv[change[0]].revision]
650
changes[change[0]] = change, merged_ids.get(change[0],
652
unchanged_merged = set(merged_ids) - set(changes)
653
# Extend the changes dict with synthetic changes to record merges of
655
for file_id in unchanged_merged:
656
# Record a merged version of these items that did not change vs the
657
# basis. This can be either identical parallel changes, or a revert
658
# of a specific file after a merge. The recorded content will be
659
# that of the current tree (which is the same as the basis), but
660
# the per-file graph will reflect a merge.
661
# NB:XXX: We are reconstructing path information we had, this
662
# should be preserved instead.
663
# inv delta change: (file_id, (path_in_source, path_in_target),
664
# changed_content, versioned, parent, name, kind,
667
basis_entry = basis_inv[file_id]
668
except errors.NoSuchId:
669
# a change from basis->some_parents but file_id isn't in basis
670
# so was new in the merge, which means it must have changed
671
# from basis -> current, and as it hasn't the add was reverted
672
# by the user. So we discard this change.
676
(basis_inv.id2path(file_id), tree.id2path(file_id)),
678
(basis_entry.parent_id, basis_entry.parent_id),
679
(basis_entry.name, basis_entry.name),
680
(basis_entry.kind, basis_entry.kind),
681
(basis_entry.executable, basis_entry.executable))
682
changes[file_id] = (change, merged_ids[file_id])
683
# changes contains tuples with the change and a set of inventory
684
# candidates for the file.
686
# old_path, new_path, file_id, new_inventory_entry
687
seen_root = False # Is the root in the basis delta?
688
inv_delta = self._basis_delta
689
modified_rev = self._new_revision_id
690
for change, head_candidates in changes.values():
691
if change[3][1]: # versioned in target.
692
# Several things may be happening here:
693
# We may have a fork in the per-file graph
694
# - record a change with the content from tree
695
# We may have a change against < all trees
696
# - carry over the tree that hasn't changed
697
# We may have a change against all trees
698
# - record the change with the content from tree
701
entry = _entry_factory[kind](file_id, change[5][1],
703
head_set = self._heads(change[0], set(head_candidates))
706
for head_candidate in head_candidates:
707
if head_candidate in head_set:
708
heads.append(head_candidate)
709
head_set.remove(head_candidate)
712
# Could be a carry-over situation:
713
parent_entry_revs = parent_entries.get(file_id, None)
714
if parent_entry_revs:
715
parent_entry = parent_entry_revs.get(heads[0], None)
718
if parent_entry is None:
719
# The parent iter_changes was called against is the one
720
# that is the per-file head, so any change is relevant
721
# iter_changes is valid.
722
carry_over_possible = False
724
# could be a carry over situation
725
# A change against the basis may just indicate a merge,
726
# we need to check the content against the source of the
727
# merge to determine if it was changed after the merge
729
if (parent_entry.kind != entry.kind or
730
parent_entry.parent_id != entry.parent_id or
731
parent_entry.name != entry.name):
732
# Metadata common to all entries has changed
733
# against per-file parent
734
carry_over_possible = False
736
carry_over_possible = True
737
# per-type checks for changes against the parent_entry
740
# Cannot be a carry-over situation
741
carry_over_possible = False
742
# Populate the entry in the delta
744
# XXX: There is still a small race here: If someone reverts the content of a file
745
# after iter_changes examines and decides it has changed,
746
# we will unconditionally record a new version even if some
747
# other process reverts it while commit is running (with
748
# the revert happening after iter_changes did it's
751
entry.executable = True
753
entry.executable = False
754
if (carry_over_possible and
755
parent_entry.executable == entry.executable):
756
# Check the file length, content hash after reading
758
nostore_sha = parent_entry.text_sha1
761
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
763
text = file_obj.read()
767
entry.text_sha1, entry.text_size = self._add_text_to_weave(
768
file_id, text, heads, nostore_sha)
769
yield file_id, change[1][1], (entry.text_sha1, stat_value)
770
except errors.ExistingContent:
771
# No content change against a carry_over parent
772
# Perhaps this should also yield a fs hash update?
774
entry.text_size = parent_entry.text_size
775
entry.text_sha1 = parent_entry.text_sha1
776
elif kind == 'symlink':
778
entry.symlink_target = tree.get_symlink_target(file_id)
779
if (carry_over_possible and
780
parent_entry.symlink_target == entry.symlink_target):
783
self._add_text_to_weave(change[0], '', heads, None)
784
elif kind == 'directory':
785
if carry_over_possible:
788
# Nothing to set on the entry.
789
# XXX: split into the Root and nonRoot versions.
790
if change[1][1] != '' or self.repository.supports_rich_root():
791
self._add_text_to_weave(change[0], '', heads, None)
792
elif kind == 'tree-reference':
793
if not self.repository._format.supports_tree_reference:
794
# This isn't quite sane as an error, but we shouldn't
795
# ever see this code path in practice: tree's don't
796
# permit references when the repo doesn't support tree
798
raise errors.UnsupportedOperation(tree.add_reference,
800
reference_revision = tree.get_reference_revision(change[0])
801
entry.reference_revision = reference_revision
802
if (carry_over_possible and
803
parent_entry.reference_revision == reference_revision):
806
self._add_text_to_weave(change[0], '', heads, None)
808
raise AssertionError('unknown kind %r' % kind)
810
entry.revision = modified_rev
812
entry.revision = parent_entry.revision
815
new_path = change[1][1]
816
inv_delta.append((change[1][0], new_path, change[0], entry))
819
self.new_inventory = None
821
# This should perhaps be guarded by a check that the basis we
822
# commit against is the basis for the commit and if not do a delta
824
self._any_changes = True
826
# housekeeping root entry changes do not affect no-change commits.
827
self._require_root_change(tree)
828
self.basis_delta_revision = basis_revision_id
830
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
831
parent_keys = tuple([(file_id, parent) for parent in parents])
832
return self.repository.texts._add_text(
833
(file_id, self._new_revision_id), parent_keys, new_text,
834
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
440
837
class RootCommitBuilder(CommitBuilder):
441
838
"""This commitbuilder actually records the root id"""
443
840
# the root entry gets versioned properly by this builder.
444
841
_versioned_root = True
670
1168
# The old API returned a list, should this actually be a set?
671
1169
return parent_map.keys()
1171
def _check_inventories(self, checker):
1172
"""Check the inventories found from the revision scan.
1174
This is responsible for verifying the sha1 of inventories and
1175
creating a pending_keys set that covers data referenced by inventories.
1177
bar = ui.ui_factory.nested_progress_bar()
1179
self._do_check_inventories(checker, bar)
1183
def _do_check_inventories(self, checker, bar):
1184
"""Helper for _check_inventories."""
1186
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1187
kinds = ['chk_bytes', 'texts']
1188
count = len(checker.pending_keys)
1189
bar.update("inventories", 0, 2)
1190
current_keys = checker.pending_keys
1191
checker.pending_keys = {}
1192
# Accumulate current checks.
1193
for key in current_keys:
1194
if key[0] != 'inventories' and key[0] not in kinds:
1195
checker._report_items.append('unknown key type %r' % (key,))
1196
keys[key[0]].add(key[1:])
1197
if keys['inventories']:
1198
# NB: output order *should* be roughly sorted - topo or
1199
# inverse topo depending on repository - either way decent
1200
# to just delta against. However, pre-CHK formats didn't
1201
# try to optimise inventory layout on disk. As such the
1202
# pre-CHK code path does not use inventory deltas.
1204
for record in self.inventories.check(keys=keys['inventories']):
1205
if record.storage_kind == 'absent':
1206
checker._report_items.append(
1207
'Missing inventory {%s}' % (record.key,))
1209
last_object = self._check_record('inventories', record,
1210
checker, last_object,
1211
current_keys[('inventories',) + record.key])
1212
del keys['inventories']
1215
bar.update("texts", 1)
1216
while (checker.pending_keys or keys['chk_bytes']
1218
# Something to check.
1219
current_keys = checker.pending_keys
1220
checker.pending_keys = {}
1221
# Accumulate current checks.
1222
for key in current_keys:
1223
if key[0] not in kinds:
1224
checker._report_items.append('unknown key type %r' % (key,))
1225
keys[key[0]].add(key[1:])
1226
# Check the outermost kind only - inventories || chk_bytes || texts
1230
for record in getattr(self, kind).check(keys=keys[kind]):
1231
if record.storage_kind == 'absent':
1232
checker._report_items.append(
1233
'Missing %s {%s}' % (kind, record.key,))
1235
last_object = self._check_record(kind, record,
1236
checker, last_object, current_keys[(kind,) + record.key])
1240
def _check_record(self, kind, record, checker, last_object, item_data):
1241
"""Check a single text from this repository."""
1242
if kind == 'inventories':
1243
rev_id = record.key[0]
1244
inv = self._deserialise_inventory(rev_id,
1245
record.get_bytes_as('fulltext'))
1246
if last_object is not None:
1247
delta = inv._make_delta(last_object)
1248
for old_path, path, file_id, ie in delta:
1251
ie.check(checker, rev_id, inv)
1253
for path, ie in inv.iter_entries():
1254
ie.check(checker, rev_id, inv)
1255
if self._format.fast_deltas:
1257
elif kind == 'chk_bytes':
1258
# No code written to check chk_bytes for this repo format.
1259
checker._report_items.append(
1260
'unsupported key type chk_bytes for %s' % (record.key,))
1261
elif kind == 'texts':
1262
self._check_text(record, checker, item_data)
1264
checker._report_items.append(
1265
'unknown key type %s for %s' % (kind, record.key))
1267
def _check_text(self, record, checker, item_data):
1268
"""Check a single text."""
1269
# Check it is extractable.
1270
# TODO: check length.
1271
if record.storage_kind == 'chunked':
1272
chunks = record.get_bytes_as(record.storage_kind)
1273
sha1 = osutils.sha_strings(chunks)
1274
length = sum(map(len, chunks))
1276
content = record.get_bytes_as('fulltext')
1277
sha1 = osutils.sha_string(content)
1278
length = len(content)
1279
if item_data and sha1 != item_data[1]:
1280
checker._report_items.append(
1281
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1282
(record.key, sha1, item_data[1], item_data[2]))
674
1285
def create(a_bzrdir):
675
1286
"""Construct the current default format repository in a_bzrdir."""
947
1547
"""Commit the contents accrued within the current write group.
949
1549
:seealso: start_write_group.
1551
:return: it may return an opaque hint that can be passed to 'pack'.
951
1553
if self._write_group is not self.get_transaction():
952
1554
# has an unlock or relock occured ?
953
1555
raise errors.BzrError('mismatched lock context %r and '
954
1556
'write group %r.' %
955
1557
(self.get_transaction(), self._write_group))
956
self._commit_write_group()
1558
result = self._commit_write_group()
957
1559
self._write_group = None
959
1562
def _commit_write_group(self):
960
1563
"""Template method for per-repository write group cleanup.
962
This is called before the write group is considered to be
1565
This is called before the write group is considered to be
963
1566
finished and should ensure that all data handed to the repository
964
for writing during the write group is safely committed (to the
1567
for writing during the write group is safely committed (to the
965
1568
extent possible considering file system caching etc).
968
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
1571
def suspend_write_group(self):
1572
raise errors.UnsuspendableWriteGroup(self)
1574
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1575
"""Return the keys of missing inventory parents for revisions added in
1578
A revision is not complete if the inventory delta for that revision
1579
cannot be calculated. Therefore if the parent inventories of a
1580
revision are not present, the revision is incomplete, and e.g. cannot
1581
be streamed by a smart server. This method finds missing inventory
1582
parents for revisions added in this write group.
1584
if not self._format.supports_external_lookups:
1585
# This is only an issue for stacked repositories
1587
if not self.is_in_write_group():
1588
raise AssertionError('not in a write group')
1590
# XXX: We assume that every added revision already has its
1591
# corresponding inventory, so we only check for parent inventories that
1592
# might be missing, rather than all inventories.
1593
parents = set(self.revisions._index.get_missing_parents())
1594
parents.discard(_mod_revision.NULL_REVISION)
1595
unstacked_inventories = self.inventories._index
1596
present_inventories = unstacked_inventories.get_parent_map(
1597
key[-1:] for key in parents)
1598
parents.difference_update(present_inventories)
1599
if len(parents) == 0:
1600
# No missing parent inventories.
1602
if not check_for_missing_texts:
1603
return set(('inventories', rev_id) for (rev_id,) in parents)
1604
# Ok, now we have a list of missing inventories. But these only matter
1605
# if the inventories that reference them are missing some texts they
1606
# appear to introduce.
1607
# XXX: Texts referenced by all added inventories need to be present,
1608
# but at the moment we're only checking for texts referenced by
1609
# inventories at the graph's edge.
1610
key_deps = self.revisions._index._key_dependencies
1611
key_deps.satisfy_refs_for_keys(present_inventories)
1612
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1613
file_ids = self.fileids_altered_by_revision_ids(referrers)
1614
missing_texts = set()
1615
for file_id, version_ids in file_ids.iteritems():
1616
missing_texts.update(
1617
(file_id, version_id) for version_id in version_ids)
1618
present_texts = self.texts.get_parent_map(missing_texts)
1619
missing_texts.difference_update(present_texts)
1620
if not missing_texts:
1621
# No texts are missing, so all revisions and their deltas are
1624
# Alternatively the text versions could be returned as the missing
1625
# keys, but this is likely to be less data.
1626
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1629
def refresh_data(self):
1630
"""Re-read any data needed to to synchronise with disk.
1632
This method is intended to be called after another repository instance
1633
(such as one used by a smart server) has inserted data into the
1634
repository. It may not be called during a write group, but may be
1635
called at any other time.
1637
if self.is_in_write_group():
1638
raise errors.InternalBzrError(
1639
"May not refresh_data while in a write group.")
1640
self._refresh_data()
1642
def resume_write_group(self, tokens):
1643
if not self.is_write_locked():
1644
raise errors.NotWriteLocked(self)
1645
if self._write_group:
1646
raise errors.BzrError('already in a write group')
1647
self._resume_write_group(tokens)
1648
# so we can detect unlock/relock - the write group is now entered.
1649
self._write_group = self.get_transaction()
1651
def _resume_write_group(self, tokens):
1652
raise errors.UnsuspendableWriteGroup(self)
1654
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
969
1656
"""Fetch the content required to construct revision_id from source.
971
If revision_id is None all content is copied.
1658
If revision_id is None and fetch_spec is None, then all content is
1661
fetch() may not be used when the repository is in a write group -
1662
either finish the current write group before using fetch, or use
1663
fetch before starting the write group.
972
1665
:param find_ghosts: Find and copy revisions in the source that are
973
1666
ghosts in the target (and not reachable directly by walking out to
974
1667
the first-present revision in target from revision_id).
1668
:param revision_id: If specified, all the content needed for this
1669
revision ID will be copied to the target. Fetch will determine for
1670
itself which content needs to be copied.
1671
:param fetch_spec: If specified, a SearchResult or
1672
PendingAncestryResult that describes which revisions to copy. This
1673
allows copying multiple heads at once. Mutually exclusive with
1676
if fetch_spec is not None and revision_id is not None:
1677
raise AssertionError(
1678
"fetch_spec and revision_id are mutually exclusive.")
1679
if self.is_in_write_group():
1680
raise errors.InternalBzrError(
1681
"May not fetch while in a write group.")
976
1682
# fast path same-url fetch operations
977
if self.has_same_location(source):
1683
# TODO: lift out to somewhere common with RemoteRepository
1684
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1685
if (self.has_same_location(source)
1686
and fetch_spec is None
1687
and self._has_same_fallbacks(source)):
978
1688
# check that last_revision is in 'from' and then return a
980
1690
if (revision_id is not None and
1128
1853
@needs_read_lock
1129
1854
def get_revisions(self, revision_ids):
1130
"""Get many revisions at once."""
1855
"""Get many revisions at once.
1857
Repositories that need to check data on every revision read should
1858
subclass this method.
1131
1860
return self._get_revisions(revision_ids)
1133
1862
@needs_read_lock
1134
1863
def _get_revisions(self, revision_ids):
1135
1864
"""Core work logic to get many revisions without sanity checks."""
1136
for rev_id in revision_ids:
1137
if not rev_id or not isinstance(rev_id, basestring):
1138
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1866
for revid, rev in self._iter_revisions(revision_ids):
1868
raise errors.NoSuchRevision(self, revid)
1870
return [revs[revid] for revid in revision_ids]
1872
def _iter_revisions(self, revision_ids):
1873
"""Iterate over revision objects.
1875
:param revision_ids: An iterable of revisions to examine. None may be
1876
passed to request all revisions known to the repository. Note that
1877
not all repositories can find unreferenced revisions; for those
1878
repositories only referenced ones will be returned.
1879
:return: An iterator of (revid, revision) tuples. Absent revisions (
1880
those asked for but not available) are returned as (revid, None).
1882
if revision_ids is None:
1883
revision_ids = self.all_revision_ids()
1885
for rev_id in revision_ids:
1886
if not rev_id or not isinstance(rev_id, basestring):
1887
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1139
1888
keys = [(key,) for key in revision_ids]
1140
1889
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1142
1890
for record in stream:
1891
revid = record.key[0]
1143
1892
if record.storage_kind == 'absent':
1144
raise errors.NoSuchRevision(self, record.key[0])
1145
text = record.get_bytes_as('fulltext')
1146
rev = self._serializer.read_revision_from_string(text)
1147
revs[record.key[0]] = rev
1148
return [revs[revid] for revid in revision_ids]
1151
def get_revision_xml(self, revision_id):
1152
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1153
# would have already do it.
1154
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1155
rev = self.get_revision(revision_id)
1156
rev_tmp = cStringIO.StringIO()
1157
# the current serializer..
1158
self._serializer.write_revision(rev, rev_tmp)
1160
return rev_tmp.getvalue()
1162
def get_deltas_for_revisions(self, revisions):
1895
text = record.get_bytes_as('fulltext')
1896
rev = self._serializer.read_revision_from_string(text)
1899
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1163
1900
"""Produce a generator of revision deltas.
1165
1902
Note that the input is a sequence of REVISIONS, not revision_ids.
1166
1903
Trees will be held in memory until the generator exits.
1167
1904
Each delta is relative to the revision's lefthand predecessor.
1906
:param specific_fileids: if not None, the result is filtered
1907
so that only those file-ids, their parents and their
1908
children are included.
1910
# Get the revision-ids of interest
1169
1911
required_trees = set()
1170
1912
for revision in revisions:
1171
1913
required_trees.add(revision.revision_id)
1172
1914
required_trees.update(revision.parent_ids[:1])
1173
trees = dict((t.get_revision_id(), t) for
1174
t in self.revision_trees(required_trees))
1916
# Get the matching filtered trees. Note that it's more
1917
# efficient to pass filtered trees to changes_from() rather
1918
# than doing the filtering afterwards. changes_from() could
1919
# arguably do the filtering itself but it's path-based, not
1920
# file-id based, so filtering before or afterwards is
1922
if specific_fileids is None:
1923
trees = dict((t.get_revision_id(), t) for
1924
t in self.revision_trees(required_trees))
1926
trees = dict((t.get_revision_id(), t) for
1927
t in self._filtered_revision_trees(required_trees,
1930
# Calculate the deltas
1175
1931
for revision in revisions:
1176
1932
if not revision.parent_ids:
1177
1933
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1533
2340
"""Get Inventory object by revision id."""
1534
2341
return self.iter_inventories([revision_id]).next()
1536
def iter_inventories(self, revision_ids):
2343
def iter_inventories(self, revision_ids, ordering=None):
1537
2344
"""Get many inventories by revision_ids.
1539
2346
This will buffer some or all of the texts used in constructing the
1540
2347
inventories in memory, but will only parse a single inventory at a
2350
:param revision_ids: The expected revision ids of the inventories.
2351
:param ordering: optional ordering, e.g. 'topological'. If not
2352
specified, the order of revision_ids will be preserved (by
2353
buffering if necessary).
1543
2354
:return: An iterator of inventories.
1545
2356
if ((None in revision_ids)
1546
2357
or (_mod_revision.NULL_REVISION in revision_ids)):
1547
2358
raise ValueError('cannot get null revision inventory')
1548
return self._iter_inventories(revision_ids)
2359
return self._iter_inventories(revision_ids, ordering)
1550
def _iter_inventories(self, revision_ids):
2361
def _iter_inventories(self, revision_ids, ordering):
1551
2362
"""single-document based inventory iteration."""
1552
for text, revision_id in self._iter_inventory_xmls(revision_ids):
1553
yield self.deserialise_inventory(revision_id, text)
2363
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2364
for text, revision_id in inv_xmls:
2365
yield self._deserialise_inventory(revision_id, text)
1555
def _iter_inventory_xmls(self, revision_ids):
2367
def _iter_inventory_xmls(self, revision_ids, ordering):
2368
if ordering is None:
2369
order_as_requested = True
2370
ordering = 'unordered'
2372
order_as_requested = False
1556
2373
keys = [(revision_id,) for revision_id in revision_ids]
1557
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2376
if order_as_requested:
2377
key_iter = iter(keys)
2378
next_key = key_iter.next()
2379
stream = self.inventories.get_record_stream(keys, ordering, True)
1559
2381
for record in stream:
1560
2382
if record.storage_kind != 'absent':
1561
texts[record.key] = record.get_bytes_as('fulltext')
2383
chunks = record.get_bytes_as('chunked')
2384
if order_as_requested:
2385
text_chunks[record.key] = chunks
2387
yield ''.join(chunks), record.key[-1]
1563
2389
raise errors.NoSuchRevision(self, record.key)
1565
yield texts[key], key[-1]
2390
if order_as_requested:
2391
# Yield as many results as we can while preserving order.
2392
while next_key in text_chunks:
2393
chunks = text_chunks.pop(next_key)
2394
yield ''.join(chunks), next_key[-1]
2396
next_key = key_iter.next()
2397
except StopIteration:
2398
# We still want to fully consume the get_record_stream,
2399
# just in case it is not actually finished at this point
1567
def deserialise_inventory(self, revision_id, xml):
1568
"""Transform the xml into an inventory object.
2403
def _deserialise_inventory(self, revision_id, xml):
2404
"""Transform the xml into an inventory object.
1570
2406
:param revision_id: The expected revision id of the inventory.
1571
2407
:param xml: A serialised inventory.
1573
result = self._serializer.read_inventory_from_string(xml, revision_id)
2409
result = self._serializer.read_inventory_from_string(xml, revision_id,
2410
entry_cache=self._inventory_entry_cache,
2411
return_from_cache=self._safe_to_return_from_cache)
1574
2412
if result.revision_id != revision_id:
1575
2413
raise AssertionError('revision id mismatch %s != %s' % (
1576
2414
result.revision_id, revision_id))
1579
def serialise_inventory(self, inv):
1580
return self._serializer.write_inventory_to_string(inv)
1582
def _serialise_inventory_to_lines(self, inv):
1583
return self._serializer.write_inventory_to_lines(inv)
1585
2417
def get_serializer_format(self):
1586
2418
return self._serializer.format_num
1588
2420
@needs_read_lock
1589
def get_inventory_xml(self, revision_id):
1590
"""Get inventory XML as a file object."""
1591
texts = self._iter_inventory_xmls([revision_id])
2421
def _get_inventory_xml(self, revision_id):
2422
"""Get serialized inventory as a string."""
2423
texts = self._iter_inventory_xmls([revision_id], 'unordered')
1593
2425
text, revision_id = texts.next()
1594
2426
except StopIteration:
1595
2427
raise errors.HistoryMissing(self, 'inventory', revision_id)
1599
def get_inventory_sha1(self, revision_id):
1600
"""Return the sha1 hash of the inventory entry
2430
def get_rev_id_for_revno(self, revno, known_pair):
2431
"""Return the revision id of a revno, given a later (revno, revid)
2432
pair in the same history.
2434
:return: if found (True, revid). If the available history ran out
2435
before reaching the revno, then this returns
2436
(False, (closest_revno, closest_revid)).
1602
return self.get_revision(revision_id).inventory_sha1
2438
known_revno, known_revid = known_pair
2439
partial_history = [known_revid]
2440
distance_from_known = known_revno - revno
2441
if distance_from_known < 0:
2443
'requested revno (%d) is later than given known revno (%d)'
2444
% (revno, known_revno))
2447
self, partial_history, stop_index=distance_from_known)
2448
except errors.RevisionNotPresent, err:
2449
if err.revision_id == known_revid:
2450
# The start revision (known_revid) wasn't found.
2452
# This is a stacked repository with no fallbacks, or a there's a
2453
# left-hand ghost. Either way, even though the revision named in
2454
# the error isn't in this repo, we know it's the next step in this
2455
# left-hand history.
2456
partial_history.append(err.revision_id)
2457
if len(partial_history) <= distance_from_known:
2458
# Didn't find enough history to get a revid for the revno.
2459
earliest_revno = known_revno - len(partial_history) + 1
2460
return (False, (earliest_revno, partial_history[-1]))
2461
if len(partial_history) - 1 > distance_from_known:
2462
raise AssertionError('_iter_for_revno returned too much history')
2463
return (True, partial_history[-1])
1604
2465
def iter_reverse_revision_history(self, revision_id):
1605
2466
"""Iterate backwards through revision ids in the lefthand history
2732
3687
return self.source.revision_ids_to_search_result(result_set)
2735
class InterPackRepo(InterSameDataRepository):
2736
"""Optimised code paths between Pack based repositories."""
2739
def _get_repo_format_to_test(self):
2740
from bzrlib.repofmt import pack_repo
2741
return pack_repo.RepositoryFormatKnitPack1()
2744
def is_compatible(source, target):
2745
"""Be compatible with known Pack formats.
2747
We don't test for the stores being of specific types because that
2748
could lead to confusing results, and there is no need to be
2751
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2753
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2754
isinstance(target._format, RepositoryFormatPack))
2755
except AttributeError:
2757
return are_packs and InterRepository._same_model(source, target)
2760
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2761
"""See InterRepository.fetch()."""
2762
if (len(self.source._fallback_repositories) > 0 or
2763
len(self.target._fallback_repositories) > 0):
2764
# The pack layer is not aware of fallback repositories, so when
2765
# fetching from a stacked repository or into a stacked repository
2766
# we use the generic fetch logic which uses the VersionedFiles
2767
# attributes on repository.
2768
from bzrlib.fetch import RepoFetcher
2769
fetcher = RepoFetcher(self.target, self.source, revision_id,
2771
return fetcher.count_copied, fetcher.failed_revisions
2772
from bzrlib.repofmt.pack_repo import Packer
2773
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2774
self.source, self.source._format, self.target, self.target._format)
2775
self.count_copied = 0
2776
if revision_id is None:
2778
# everything to do - use pack logic
2779
# to fetch from all packs to one without
2780
# inventory parsing etc, IFF nothing to be copied is in the target.
2782
source_revision_ids = frozenset(self.source.all_revision_ids())
2783
revision_ids = source_revision_ids - \
2784
frozenset(self.target.get_parent_map(source_revision_ids))
2785
revision_keys = [(revid,) for revid in revision_ids]
2786
index = self.target._pack_collection.revision_index.combined_index
2787
present_revision_ids = set(item[1][0] for item in
2788
index.iter_entries(revision_keys))
2789
revision_ids = set(revision_ids) - present_revision_ids
2790
# implementing the TODO will involve:
2791
# - detecting when all of a pack is selected
2792
# - avoiding as much as possible pre-selection, so the
2793
# more-core routines such as create_pack_from_packs can filter in
2794
# a just-in-time fashion. (though having a HEADS list on a
2795
# repository might make this a lot easier, because we could
2796
# sensibly detect 'new revisions' without doing a full index scan.
2797
elif _mod_revision.is_null(revision_id):
2802
revision_ids = self.search_missing_revision_ids(revision_id,
2803
find_ghosts=find_ghosts).get_keys()
2804
except errors.NoSuchRevision:
2805
raise errors.InstallFailed([revision_id])
2806
if len(revision_ids) == 0:
2808
packs = self.source._pack_collection.all_packs()
2809
pack = Packer(self.target._pack_collection, packs, '.fetch',
2810
revision_ids).pack()
2811
if pack is not None:
2812
self.target._pack_collection._save_pack_names()
2813
# Trigger an autopack. This may duplicate effort as we've just done
2814
# a pack creation, but for now it is simpler to think about as
2815
# 'upload data, then repack if needed'.
2816
self.target._pack_collection.autopack()
2817
return (pack.get_revision_count(), [])
2822
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2823
"""See InterRepository.missing_revision_ids().
2825
:param find_ghosts: Find ghosts throughout the ancestry of
2828
if not find_ghosts and revision_id is not None:
2829
return self._walk_to_common_revisions([revision_id])
2830
elif revision_id is not None:
2831
# Find ghosts: search for revisions pointing from one repository to
2832
# the other, and vice versa, anywhere in the history of revision_id.
2833
graph = self.target.get_graph(other_repository=self.source)
2834
searcher = graph._make_breadth_first_searcher([revision_id])
2838
next_revs, ghosts = searcher.next_with_ghosts()
2839
except StopIteration:
2841
if revision_id in ghosts:
2842
raise errors.NoSuchRevision(self.source, revision_id)
2843
found_ids.update(next_revs)
2844
found_ids.update(ghosts)
2845
found_ids = frozenset(found_ids)
2846
# Double query here: should be able to avoid this by changing the
2847
# graph api further.
2848
result_set = found_ids - frozenset(
2849
self.target.get_parent_map(found_ids))
2851
source_ids = self.source.all_revision_ids()
2852
# source_ids is the worst possible case we may need to pull.
2853
# now we want to filter source_ids against what we actually
2854
# have in target, but don't try to check for existence where we know
2855
# we do not have a revision as that would be pointless.
2856
target_ids = set(self.target.all_revision_ids())
2857
result_set = set(source_ids).difference(target_ids)
2858
return self.source.revision_ids_to_search_result(result_set)
2861
class InterModel1and2(InterRepository):
2864
def _get_repo_format_to_test(self):
2868
def is_compatible(source, target):
2869
if not source.supports_rich_root() and target.supports_rich_root():
2875
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2876
"""See InterRepository.fetch()."""
2877
from bzrlib.fetch import Model1toKnit2Fetcher
2878
f = Model1toKnit2Fetcher(to_repository=self.target,
2879
from_repository=self.source,
2880
last_revision=revision_id,
2881
pb=pb, find_ghosts=find_ghosts)
2882
return f.count_copied, f.failed_revisions
2885
def copy_content(self, revision_id=None):
2886
"""Make a complete copy of the content in self into destination.
2888
This is a destructive operation! Do not use it on existing
2891
:param revision_id: Only copy the content needed to construct
2892
revision_id and its parents.
2895
self.target.set_make_working_trees(self.source.make_working_trees())
2896
except NotImplementedError:
2898
# but don't bother fetching if we have the needed data now.
2899
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2900
self.target.has_revision(revision_id)):
2902
self.target.fetch(self.source, revision_id=revision_id)
2905
class InterKnit1and2(InterKnitRepo):
2908
def _get_repo_format_to_test(self):
2912
def is_compatible(source, target):
2913
"""Be compatible with Knit1 source and Knit3 target"""
2915
from bzrlib.repofmt.knitrepo import (
2916
RepositoryFormatKnit1,
2917
RepositoryFormatKnit3,
2919
from bzrlib.repofmt.pack_repo import (
2920
RepositoryFormatKnitPack1,
2921
RepositoryFormatKnitPack3,
2922
RepositoryFormatKnitPack4,
2923
RepositoryFormatKnitPack5,
2924
RepositoryFormatKnitPack5RichRoot,
2925
RepositoryFormatPackDevelopment2,
2926
RepositoryFormatPackDevelopment2Subtree,
2929
RepositoryFormatKnit1, # no rr, no subtree
2930
RepositoryFormatKnitPack1, # no rr, no subtree
2931
RepositoryFormatPackDevelopment2, # no rr, no subtree
2932
RepositoryFormatKnitPack5, # no rr, no subtree
2935
RepositoryFormatKnit3, # rr, subtree
2936
RepositoryFormatKnitPack3, # rr, subtree
2937
RepositoryFormatKnitPack4, # rr, no subtree
2938
RepositoryFormatKnitPack5RichRoot,# rr, no subtree
2939
RepositoryFormatPackDevelopment2Subtree, # rr, subtree
2941
for format in norichroot:
2942
if format.rich_root_data:
2943
raise AssertionError('Format %s is a rich-root format'
2944
' but is included in the non-rich-root list'
2946
for format in richroot:
2947
if not format.rich_root_data:
2948
raise AssertionError('Format %s is not a rich-root format'
2949
' but is included in the rich-root list'
2951
# TODO: One alternative is to just check format.rich_root_data,
2952
# instead of keeping membership lists. However, the formats
2953
# *also* have to use the same 'Knit' style of storage
2954
# (line-deltas, fulltexts, etc.)
2955
return (isinstance(source._format, norichroot) and
2956
isinstance(target._format, richroot))
2957
except AttributeError:
2961
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2962
"""See InterRepository.fetch()."""
2963
from bzrlib.fetch import Knit1to2Fetcher
2964
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2965
self.source, self.source._format, self.target,
2966
self.target._format)
2967
f = Knit1to2Fetcher(to_repository=self.target,
2968
from_repository=self.source,
2969
last_revision=revision_id,
2970
pb=pb, find_ghosts=find_ghosts)
2971
return f.count_copied, f.failed_revisions
2974
class InterDifferingSerializer(InterKnitRepo):
3690
class InterDifferingSerializer(InterRepository):
2977
3693
def _get_repo_format_to_test(self):
2981
3697
def is_compatible(source, target):
2982
3698
"""Be compatible with Knit2 source and Knit3 target"""
2983
if source.supports_rich_root() != target.supports_rich_root():
2985
# Ideally, we'd support fetching if the source had no tree references
2986
# even if it supported them...
2987
if (getattr(source, '_format.supports_tree_reference', False) and
2988
not getattr(target, '_format.supports_tree_reference', False)):
3699
# This is redundant with format.check_conversion_target(), however that
3700
# raises an exception, and we just want to say "False" as in we won't
3701
# support converting between these formats.
3702
if 'IDS_never' in debug.debug_flags:
3704
if source.supports_rich_root() and not target.supports_rich_root():
3706
if (source._format.supports_tree_reference
3707
and not target._format.supports_tree_reference):
3709
if target._fallback_repositories and target._format.supports_chks:
3710
# IDS doesn't know how to copy CHKs for the parent inventories it
3711
# adds to stacked repos.
3713
if 'IDS_always' in debug.debug_flags:
3715
# Only use this code path for local source and target. IDS does far
3716
# too much IO (both bandwidth and roundtrips) over a network.
3717
if not source.bzrdir.transport.base.startswith('file:///'):
3719
if not target.bzrdir.transport.base.startswith('file:///'):
3723
def _get_trees(self, revision_ids, cache):
3725
for rev_id in revision_ids:
3727
possible_trees.append((rev_id, cache[rev_id]))
3729
# Not cached, but inventory might be present anyway.
3731
tree = self.source.revision_tree(rev_id)
3732
except errors.NoSuchRevision:
3733
# Nope, parent is ghost.
3736
cache[rev_id] = tree
3737
possible_trees.append((rev_id, tree))
3738
return possible_trees
3740
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3741
"""Get the best delta and base for this revision.
3743
:return: (basis_id, delta)
3746
# Generate deltas against each tree, to find the shortest.
3747
texts_possibly_new_in_tree = set()
3748
for basis_id, basis_tree in possible_trees:
3749
delta = tree.inventory._make_delta(basis_tree.inventory)
3750
for old_path, new_path, file_id, new_entry in delta:
3751
if new_path is None:
3752
# This file_id isn't present in the new rev, so we don't
3756
# Rich roots are handled elsewhere...
3758
kind = new_entry.kind
3759
if kind != 'directory' and kind != 'file':
3760
# No text record associated with this inventory entry.
3762
# This is a directory or file that has changed somehow.
3763
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3764
deltas.append((len(delta), basis_id, delta))
3766
return deltas[0][1:]
3768
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3769
"""Find all parent revisions that are absent, but for which the
3770
inventory is present, and copy those inventories.
3772
This is necessary to preserve correctness when the source is stacked
3773
without fallbacks configured. (Note that in cases like upgrade the
3774
source may be not have _fallback_repositories even though it is
3778
for parents in parent_map.values():
3779
parent_revs.update(parents)
3780
present_parents = self.source.get_parent_map(parent_revs)
3781
absent_parents = set(parent_revs).difference(present_parents)
3782
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3783
(rev_id,) for rev_id in absent_parents)
3784
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3785
for parent_tree in self.source.revision_trees(parent_inv_ids):
3786
current_revision_id = parent_tree.get_revision_id()
3787
parents_parents_keys = parent_invs_keys_for_stacking[
3788
(current_revision_id,)]
3789
parents_parents = [key[-1] for key in parents_parents_keys]
3790
basis_id = _mod_revision.NULL_REVISION
3791
basis_tree = self.source.revision_tree(basis_id)
3792
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3793
self.target.add_inventory_by_delta(
3794
basis_id, delta, current_revision_id, parents_parents)
3795
cache[current_revision_id] = parent_tree
3797
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3798
"""Fetch across a few revisions.
3800
:param revision_ids: The revisions to copy
3801
:param basis_id: The revision_id of a tree that must be in cache, used
3802
as a basis for delta when no other base is available
3803
:param cache: A cache of RevisionTrees that we can use.
3804
:param a_graph: A Graph object to determine the heads() of the
3805
rich-root data stream.
3806
:return: The revision_id of the last converted tree. The RevisionTree
3807
for it will be in cache
3809
# Walk though all revisions; get inventory deltas, copy referenced
3810
# texts that delta references, insert the delta, revision and
3812
root_keys_to_create = set()
3815
pending_revisions = []
3816
parent_map = self.source.get_parent_map(revision_ids)
3817
self._fetch_parent_invs_for_stacking(parent_map, cache)
3818
self.source._safe_to_return_from_cache = True
3819
for tree in self.source.revision_trees(revision_ids):
3820
# Find a inventory delta for this revision.
3821
# Find text entries that need to be copied, too.
3822
current_revision_id = tree.get_revision_id()
3823
parent_ids = parent_map.get(current_revision_id, ())
3824
parent_trees = self._get_trees(parent_ids, cache)
3825
possible_trees = list(parent_trees)
3826
if len(possible_trees) == 0:
3827
# There either aren't any parents, or the parents are ghosts,
3828
# so just use the last converted tree.
3829
possible_trees.append((basis_id, cache[basis_id]))
3830
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3832
revision = self.source.get_revision(current_revision_id)
3833
pending_deltas.append((basis_id, delta,
3834
current_revision_id, revision.parent_ids))
3835
if self._converting_to_rich_root:
3836
self._revision_id_to_root_id[current_revision_id] = \
3838
# Determine which texts are in present in this revision but not in
3839
# any of the available parents.
3840
texts_possibly_new_in_tree = set()
3841
for old_path, new_path, file_id, entry in delta:
3842
if new_path is None:
3843
# This file_id isn't present in the new rev
3847
if not self.target.supports_rich_root():
3848
# The target doesn't support rich root, so we don't
3851
if self._converting_to_rich_root:
3852
# This can't be copied normally, we have to insert
3854
root_keys_to_create.add((file_id, entry.revision))
3857
texts_possibly_new_in_tree.add((file_id, entry.revision))
3858
for basis_id, basis_tree in possible_trees:
3859
basis_inv = basis_tree.inventory
3860
for file_key in list(texts_possibly_new_in_tree):
3861
file_id, file_revision = file_key
3863
entry = basis_inv[file_id]
3864
except errors.NoSuchId:
3866
if entry.revision == file_revision:
3867
texts_possibly_new_in_tree.remove(file_key)
3868
text_keys.update(texts_possibly_new_in_tree)
3869
pending_revisions.append(revision)
3870
cache[current_revision_id] = tree
3871
basis_id = current_revision_id
3872
self.source._safe_to_return_from_cache = False
3874
from_texts = self.source.texts
3875
to_texts = self.target.texts
3876
if root_keys_to_create:
3877
root_stream = _mod_fetch._new_root_data_stream(
3878
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3879
self.source, graph=a_graph)
3880
to_texts.insert_record_stream(root_stream)
3881
to_texts.insert_record_stream(from_texts.get_record_stream(
3882
text_keys, self.target._format._fetch_order,
3883
not self.target._format._fetch_uses_deltas))
3884
# insert inventory deltas
3885
for delta in pending_deltas:
3886
self.target.add_inventory_by_delta(*delta)
3887
if self.target._fallback_repositories:
3888
# Make sure this stacked repository has all the parent inventories
3889
# for the new revisions that we are about to insert. We do this
3890
# before adding the revisions so that no revision is added until
3891
# all the inventories it may depend on are added.
3892
# Note that this is overzealous, as we may have fetched these in an
3895
revision_ids = set()
3896
for revision in pending_revisions:
3897
revision_ids.add(revision.revision_id)
3898
parent_ids.update(revision.parent_ids)
3899
parent_ids.difference_update(revision_ids)
3900
parent_ids.discard(_mod_revision.NULL_REVISION)
3901
parent_map = self.source.get_parent_map(parent_ids)
3902
# we iterate over parent_map and not parent_ids because we don't
3903
# want to try copying any revision which is a ghost
3904
for parent_tree in self.source.revision_trees(parent_map):
3905
current_revision_id = parent_tree.get_revision_id()
3906
parents_parents = parent_map[current_revision_id]
3907
possible_trees = self._get_trees(parents_parents, cache)
3908
if len(possible_trees) == 0:
3909
# There either aren't any parents, or the parents are
3910
# ghosts, so just use the last converted tree.
3911
possible_trees.append((basis_id, cache[basis_id]))
3912
basis_id, delta = self._get_delta_for_revision(parent_tree,
3913
parents_parents, possible_trees)
3914
self.target.add_inventory_by_delta(
3915
basis_id, delta, current_revision_id, parents_parents)
3916
# insert signatures and revisions
3917
for revision in pending_revisions:
3919
signature = self.source.get_signature_text(
3920
revision.revision_id)
3921
self.target.add_signature_text(revision.revision_id,
3923
except errors.NoSuchRevision:
3925
self.target.add_revision(revision.revision_id, revision)
3928
def _fetch_all_revisions(self, revision_ids, pb):
3929
"""Fetch everything for the list of revisions.
3931
:param revision_ids: The list of revisions to fetch. Must be in
3933
:param pb: A ProgressTask
3936
basis_id, basis_tree = self._get_basis(revision_ids[0])
3938
cache = lru_cache.LRUCache(100)
3939
cache[basis_id] = basis_tree
3940
del basis_tree # We don't want to hang on to it here
3942
if self._converting_to_rich_root and len(revision_ids) > 100:
3943
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3948
for offset in range(0, len(revision_ids), batch_size):
3949
self.target.start_write_group()
3951
pb.update('Transferring revisions', offset,
3953
batch = revision_ids[offset:offset+batch_size]
3954
basis_id = self._fetch_batch(batch, basis_id, cache,
3957
self.source._safe_to_return_from_cache = False
3958
self.target.abort_write_group()
3961
hint = self.target.commit_write_group()
3964
if hints and self.target._format.pack_compresses:
3965
self.target.pack(hint=hints)
3966
pb.update('Transferring revisions', len(revision_ids),
2992
3969
@needs_write_lock
2993
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3970
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
2994
3972
"""See InterRepository.fetch()."""
3973
if fetch_spec is not None:
3974
raise AssertionError("Not implemented yet...")
3975
ui.ui_factory.warn_experimental_format_fetch(self)
3976
if (not self.source.supports_rich_root()
3977
and self.target.supports_rich_root()):
3978
self._converting_to_rich_root = True
3979
self._revision_id_to_root_id = {}
3981
self._converting_to_rich_root = False
3982
# See <https://launchpad.net/bugs/456077> asking for a warning here
3983
if self.source._format.network_name() != self.target._format.network_name():
3984
ui.ui_factory.show_user_warning('cross_format_fetch',
3985
from_format=self.source._format,
3986
to_format=self.target._format)
2995
3987
revision_ids = self.target.search_missing_revision_ids(self.source,
2996
3988
revision_id, find_ghosts=find_ghosts).get_keys()
3989
if not revision_ids:
2997
3991
revision_ids = tsort.topo_sort(
2998
3992
self.source.get_graph().get_parent_map(revision_ids))
2999
def revisions_iterator():
3000
for current_revision_id in revision_ids:
3001
revision = self.source.get_revision(current_revision_id)
3002
tree = self.source.revision_tree(current_revision_id)
3004
signature = self.source.get_signature_text(
3005
current_revision_id)
3006
except errors.NoSuchRevision:
3008
yield revision, tree, signature
3993
if not revision_ids:
3995
# Walk though all revisions; get inventory deltas, copy referenced
3996
# texts that delta references, insert the delta, revision and
3010
3999
my_pb = ui.ui_factory.nested_progress_bar()
4002
symbol_versioning.warn(
4003
symbol_versioning.deprecated_in((1, 14, 0))
4004
% "pb parameter to fetch()")
3015
install_revisions(self.target, revisions_iterator(),
3016
len(revision_ids), pb)
4007
self._fetch_all_revisions(revision_ids, pb)
3018
4009
if my_pb is not None:
3019
4010
my_pb.finished()
3020
4011
return len(revision_ids), 0
3023
class InterOtherToRemote(InterRepository):
3025
def __init__(self, source, target):
3026
InterRepository.__init__(self, source, target)
3027
self._real_inter = None
3030
def is_compatible(source, target):
3031
if isinstance(target, remote.RemoteRepository):
3035
def _ensure_real_inter(self):
3036
if self._real_inter is None:
3037
self.target._ensure_real()
3038
real_target = self.target._real_repository
3039
self._real_inter = InterRepository.get(self.source, real_target)
3041
def copy_content(self, revision_id=None):
3042
self._ensure_real_inter()
3043
self._real_inter.copy_content(revision_id=revision_id)
3045
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3046
self._ensure_real_inter()
3047
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3048
find_ghosts=find_ghosts)
3051
def _get_repo_format_to_test(self):
3055
class InterRemoteToOther(InterRepository):
3057
def __init__(self, source, target):
3058
InterRepository.__init__(self, source, target)
3059
self._real_inter = None
3062
def is_compatible(source, target):
3063
if not isinstance(source, remote.RemoteRepository):
3065
# Is source's model compatible with target's model?
3066
source._ensure_real()
3067
real_source = source._real_repository
3068
if isinstance(real_source, remote.RemoteRepository):
3069
raise NotImplementedError(
3070
"We don't support remote repos backed by remote repos yet.")
3071
return InterRepository._same_model(real_source, target)
3073
def _ensure_real_inter(self):
3074
if self._real_inter is None:
3075
self.source._ensure_real()
3076
real_source = self.source._real_repository
3077
self._real_inter = InterRepository.get(real_source, self.target)
3079
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3080
self._ensure_real_inter()
3081
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3082
find_ghosts=find_ghosts)
3084
def copy_content(self, revision_id=None):
3085
self._ensure_real_inter()
3086
self._real_inter.copy_content(revision_id=revision_id)
3089
def _get_repo_format_to_test(self):
4013
def _get_basis(self, first_revision_id):
4014
"""Get a revision and tree which exists in the target.
4016
This assumes that first_revision_id is selected for transmission
4017
because all other ancestors are already present. If we can't find an
4018
ancestor we fall back to NULL_REVISION since we know that is safe.
4020
:return: (basis_id, basis_tree)
4022
first_rev = self.source.get_revision(first_revision_id)
4024
basis_id = first_rev.parent_ids[0]
4025
# only valid as a basis if the target has it
4026
self.target.get_revision(basis_id)
4027
# Try to get a basis tree - if its a ghost it will hit the
4028
# NoSuchRevision case.
4029
basis_tree = self.source.revision_tree(basis_id)
4030
except (IndexError, errors.NoSuchRevision):
4031
basis_id = _mod_revision.NULL_REVISION
4032
basis_tree = self.source.revision_tree(basis_id)
4033
return basis_id, basis_tree
3094
4036
InterRepository.register_optimiser(InterDifferingSerializer)
3095
4037
InterRepository.register_optimiser(InterSameDataRepository)
3096
4038
InterRepository.register_optimiser(InterWeaveRepo)
3097
4039
InterRepository.register_optimiser(InterKnitRepo)
3098
InterRepository.register_optimiser(InterModel1and2)
3099
InterRepository.register_optimiser(InterKnit1and2)
3100
InterRepository.register_optimiser(InterPackRepo)
3101
InterRepository.register_optimiser(InterOtherToRemote)
3102
InterRepository.register_optimiser(InterRemoteToOther)
3105
4042
class CopyConverter(object):
3106
4043
"""A repository conversion tool which just performs a copy of the content.
3108
4045
This is slow but quite reliable.
3256
4198
revision_graph[key] = tuple(parent for parent in parents if parent
3257
4199
in revision_graph)
3258
4200
return revision_graph
4203
class StreamSink(object):
4204
"""An object that can insert a stream into a repository.
4206
This interface handles the complexity of reserialising inventories and
4207
revisions from different formats, and allows unidirectional insertion into
4208
stacked repositories without looking for the missing basis parents
4212
def __init__(self, target_repo):
4213
self.target_repo = target_repo
4215
def insert_stream(self, stream, src_format, resume_tokens):
4216
"""Insert a stream's content into the target repository.
4218
:param src_format: a bzr repository format.
4220
:return: a list of resume tokens and an iterable of keys additional
4221
items required before the insertion can be completed.
4223
self.target_repo.lock_write()
4226
self.target_repo.resume_write_group(resume_tokens)
4229
self.target_repo.start_write_group()
4232
# locked_insert_stream performs a commit|suspend.
4233
return self._locked_insert_stream(stream, src_format, is_resume)
4235
self.target_repo.abort_write_group(suppress_errors=True)
4238
self.target_repo.unlock()
4240
def _locked_insert_stream(self, stream, src_format, is_resume):
4241
to_serializer = self.target_repo._format._serializer
4242
src_serializer = src_format._serializer
4244
if to_serializer == src_serializer:
4245
# If serializers match and the target is a pack repository, set the
4246
# write cache size on the new pack. This avoids poor performance
4247
# on transports where append is unbuffered (such as
4248
# RemoteTransport). This is safe to do because nothing should read
4249
# back from the target repository while a stream with matching
4250
# serialization is being inserted.
4251
# The exception is that a delta record from the source that should
4252
# be a fulltext may need to be expanded by the target (see
4253
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4254
# explicitly flush any buffered writes first in that rare case.
4256
new_pack = self.target_repo._pack_collection._new_pack
4257
except AttributeError:
4258
# Not a pack repository
4261
new_pack.set_write_cache_size(1024*1024)
4262
for substream_type, substream in stream:
4263
if 'stream' in debug.debug_flags:
4264
mutter('inserting substream: %s', substream_type)
4265
if substream_type == 'texts':
4266
self.target_repo.texts.insert_record_stream(substream)
4267
elif substream_type == 'inventories':
4268
if src_serializer == to_serializer:
4269
self.target_repo.inventories.insert_record_stream(
4272
self._extract_and_insert_inventories(
4273
substream, src_serializer)
4274
elif substream_type == 'inventory-deltas':
4275
self._extract_and_insert_inventory_deltas(
4276
substream, src_serializer)
4277
elif substream_type == 'chk_bytes':
4278
# XXX: This doesn't support conversions, as it assumes the
4279
# conversion was done in the fetch code.
4280
self.target_repo.chk_bytes.insert_record_stream(substream)
4281
elif substream_type == 'revisions':
4282
# This may fallback to extract-and-insert more often than
4283
# required if the serializers are different only in terms of
4285
if src_serializer == to_serializer:
4286
self.target_repo.revisions.insert_record_stream(
4289
self._extract_and_insert_revisions(substream,
4291
elif substream_type == 'signatures':
4292
self.target_repo.signatures.insert_record_stream(substream)
4294
raise AssertionError('kaboom! %s' % (substream_type,))
4295
# Done inserting data, and the missing_keys calculations will try to
4296
# read back from the inserted data, so flush the writes to the new pack
4297
# (if this is pack format).
4298
if new_pack is not None:
4299
new_pack._write_data('', flush=True)
4300
# Find all the new revisions (including ones from resume_tokens)
4301
missing_keys = self.target_repo.get_missing_parent_inventories(
4302
check_for_missing_texts=is_resume)
4304
for prefix, versioned_file in (
4305
('texts', self.target_repo.texts),
4306
('inventories', self.target_repo.inventories),
4307
('revisions', self.target_repo.revisions),
4308
('signatures', self.target_repo.signatures),
4309
('chk_bytes', self.target_repo.chk_bytes),
4311
if versioned_file is None:
4313
# TODO: key is often going to be a StaticTuple object
4314
# I don't believe we can define a method by which
4315
# (prefix,) + StaticTuple will work, though we could
4316
# define a StaticTuple.sq_concat that would allow you to
4317
# pass in either a tuple or a StaticTuple as the second
4318
# object, so instead we could have:
4319
# StaticTuple(prefix) + key here...
4320
missing_keys.update((prefix,) + key for key in
4321
versioned_file.get_missing_compression_parent_keys())
4322
except NotImplementedError:
4323
# cannot even attempt suspending, and missing would have failed
4324
# during stream insertion.
4325
missing_keys = set()
4328
# suspend the write group and tell the caller what we is
4329
# missing. We know we can suspend or else we would not have
4330
# entered this code path. (All repositories that can handle
4331
# missing keys can handle suspending a write group).
4332
write_group_tokens = self.target_repo.suspend_write_group()
4333
return write_group_tokens, missing_keys
4334
hint = self.target_repo.commit_write_group()
4335
if (to_serializer != src_serializer and
4336
self.target_repo._format.pack_compresses):
4337
self.target_repo.pack(hint=hint)
4340
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4341
target_rich_root = self.target_repo._format.rich_root_data
4342
target_tree_refs = self.target_repo._format.supports_tree_reference
4343
for record in substream:
4344
# Insert the delta directly
4345
inventory_delta_bytes = record.get_bytes_as('fulltext')
4346
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4348
parse_result = deserialiser.parse_text_bytes(
4349
inventory_delta_bytes)
4350
except inventory_delta.IncompatibleInventoryDelta, err:
4351
trace.mutter("Incompatible delta: %s", err.msg)
4352
raise errors.IncompatibleRevision(self.target_repo._format)
4353
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4354
revision_id = new_id
4355
parents = [key[0] for key in record.parents]
4356
self.target_repo.add_inventory_by_delta(
4357
basis_id, inv_delta, revision_id, parents)
4359
def _extract_and_insert_inventories(self, substream, serializer,
4361
"""Generate a new inventory versionedfile in target, converting data.
4363
The inventory is retrieved from the source, (deserializing it), and
4364
stored in the target (reserializing it in a different format).
4366
target_rich_root = self.target_repo._format.rich_root_data
4367
target_tree_refs = self.target_repo._format.supports_tree_reference
4368
for record in substream:
4369
# It's not a delta, so it must be a fulltext in the source
4370
# serializer's format.
4371
bytes = record.get_bytes_as('fulltext')
4372
revision_id = record.key[0]
4373
inv = serializer.read_inventory_from_string(bytes, revision_id)
4374
parents = [key[0] for key in record.parents]
4375
self.target_repo.add_inventory(revision_id, inv, parents)
4376
# No need to keep holding this full inv in memory when the rest of
4377
# the substream is likely to be all deltas.
4380
def _extract_and_insert_revisions(self, substream, serializer):
4381
for record in substream:
4382
bytes = record.get_bytes_as('fulltext')
4383
revision_id = record.key[0]
4384
rev = serializer.read_revision_from_string(bytes)
4385
if rev.revision_id != revision_id:
4386
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4387
self.target_repo.add_revision(revision_id, rev)
4390
if self.target_repo._format._fetch_reconcile:
4391
self.target_repo.reconcile()
4394
class StreamSource(object):
4395
"""A source of a stream for fetching between repositories."""
4397
def __init__(self, from_repository, to_format):
4398
"""Create a StreamSource streaming from from_repository."""
4399
self.from_repository = from_repository
4400
self.to_format = to_format
4402
def delta_on_metadata(self):
4403
"""Return True if delta's are permitted on metadata streams.
4405
That is on revisions and signatures.
4407
src_serializer = self.from_repository._format._serializer
4408
target_serializer = self.to_format._serializer
4409
return (self.to_format._fetch_uses_deltas and
4410
src_serializer == target_serializer)
4412
def _fetch_revision_texts(self, revs):
4413
# fetch signatures first and then the revision texts
4414
# may need to be a InterRevisionStore call here.
4415
from_sf = self.from_repository.signatures
4416
# A missing signature is just skipped.
4417
keys = [(rev_id,) for rev_id in revs]
4418
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4420
self.to_format._fetch_order,
4421
not self.to_format._fetch_uses_deltas))
4422
# If a revision has a delta, this is actually expanded inside the
4423
# insert_record_stream code now, which is an alternate fix for
4425
from_rf = self.from_repository.revisions
4426
revisions = from_rf.get_record_stream(
4428
self.to_format._fetch_order,
4429
not self.delta_on_metadata())
4430
return [('signatures', signatures), ('revisions', revisions)]
4432
def _generate_root_texts(self, revs):
4433
"""This will be called by get_stream between fetching weave texts and
4434
fetching the inventory weave.
4436
if self._rich_root_upgrade():
4437
return _mod_fetch.Inter1and2Helper(
4438
self.from_repository).generate_root_texts(revs)
4442
def get_stream(self, search):
4444
revs = search.get_keys()
4445
graph = self.from_repository.get_graph()
4446
revs = tsort.topo_sort(graph.get_parent_map(revs))
4447
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4449
for knit_kind, file_id, revisions in data_to_fetch:
4450
if knit_kind != phase:
4452
# Make a new progress bar for this phase
4453
if knit_kind == "file":
4454
# Accumulate file texts
4455
text_keys.extend([(file_id, revision) for revision in
4457
elif knit_kind == "inventory":
4458
# Now copy the file texts.
4459
from_texts = self.from_repository.texts
4460
yield ('texts', from_texts.get_record_stream(
4461
text_keys, self.to_format._fetch_order,
4462
not self.to_format._fetch_uses_deltas))
4463
# Cause an error if a text occurs after we have done the
4466
# Before we process the inventory we generate the root
4467
# texts (if necessary) so that the inventories references
4469
for _ in self._generate_root_texts(revs):
4471
# we fetch only the referenced inventories because we do not
4472
# know for unselected inventories whether all their required
4473
# texts are present in the other repository - it could be
4475
for info in self._get_inventory_stream(revs):
4477
elif knit_kind == "signatures":
4478
# Nothing to do here; this will be taken care of when
4479
# _fetch_revision_texts happens.
4481
elif knit_kind == "revisions":
4482
for record in self._fetch_revision_texts(revs):
4485
raise AssertionError("Unknown knit kind %r" % knit_kind)
4487
def get_stream_for_missing_keys(self, missing_keys):
4488
# missing keys can only occur when we are byte copying and not
4489
# translating (because translation means we don't send
4490
# unreconstructable deltas ever).
4492
keys['texts'] = set()
4493
keys['revisions'] = set()
4494
keys['inventories'] = set()
4495
keys['chk_bytes'] = set()
4496
keys['signatures'] = set()
4497
for key in missing_keys:
4498
keys[key[0]].add(key[1:])
4499
if len(keys['revisions']):
4500
# If we allowed copying revisions at this point, we could end up
4501
# copying a revision without copying its required texts: a
4502
# violation of the requirements for repository integrity.
4503
raise AssertionError(
4504
'cannot copy revisions to fill in missing deltas %s' % (
4505
keys['revisions'],))
4506
for substream_kind, keys in keys.iteritems():
4507
vf = getattr(self.from_repository, substream_kind)
4508
if vf is None and keys:
4509
raise AssertionError(
4510
"cannot fill in keys for a versioned file we don't"
4511
" have: %s needs %s" % (substream_kind, keys))
4513
# No need to stream something we don't have
4515
if substream_kind == 'inventories':
4516
# Some missing keys are genuinely ghosts, filter those out.
4517
present = self.from_repository.inventories.get_parent_map(keys)
4518
revs = [key[0] for key in present]
4519
# Get the inventory stream more-or-less as we do for the
4520
# original stream; there's no reason to assume that records
4521
# direct from the source will be suitable for the sink. (Think
4522
# e.g. 2a -> 1.9-rich-root).
4523
for info in self._get_inventory_stream(revs, missing=True):
4527
# Ask for full texts always so that we don't need more round trips
4528
# after this stream.
4529
# Some of the missing keys are genuinely ghosts, so filter absent
4530
# records. The Sink is responsible for doing another check to
4531
# ensure that ghosts don't introduce missing data for future
4533
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4534
self.to_format._fetch_order, True))
4535
yield substream_kind, stream
4537
def inventory_fetch_order(self):
4538
if self._rich_root_upgrade():
4539
return 'topological'
4541
return self.to_format._fetch_order
4543
def _rich_root_upgrade(self):
4544
return (not self.from_repository._format.rich_root_data and
4545
self.to_format.rich_root_data)
4547
def _get_inventory_stream(self, revision_ids, missing=False):
4548
from_format = self.from_repository._format
4549
if (from_format.supports_chks and self.to_format.supports_chks and
4550
from_format.network_name() == self.to_format.network_name()):
4551
raise AssertionError(
4552
"this case should be handled by GroupCHKStreamSource")
4553
elif 'forceinvdeltas' in debug.debug_flags:
4554
return self._get_convertable_inventory_stream(revision_ids,
4555
delta_versus_null=missing)
4556
elif from_format.network_name() == self.to_format.network_name():
4558
return self._get_simple_inventory_stream(revision_ids,
4560
elif (not from_format.supports_chks and not self.to_format.supports_chks
4561
and from_format._serializer == self.to_format._serializer):
4562
# Essentially the same format.
4563
return self._get_simple_inventory_stream(revision_ids,
4566
# Any time we switch serializations, we want to use an
4567
# inventory-delta based approach.
4568
return self._get_convertable_inventory_stream(revision_ids,
4569
delta_versus_null=missing)
4571
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4572
# NB: This currently reopens the inventory weave in source;
4573
# using a single stream interface instead would avoid this.
4574
from_weave = self.from_repository.inventories
4576
delta_closure = True
4578
delta_closure = not self.delta_on_metadata()
4579
yield ('inventories', from_weave.get_record_stream(
4580
[(rev_id,) for rev_id in revision_ids],
4581
self.inventory_fetch_order(), delta_closure))
4583
def _get_convertable_inventory_stream(self, revision_ids,
4584
delta_versus_null=False):
4585
# The two formats are sufficiently different that there is no fast
4586
# path, so we need to send just inventorydeltas, which any
4587
# sufficiently modern client can insert into any repository.
4588
# The StreamSink code expects to be able to
4589
# convert on the target, so we need to put bytes-on-the-wire that can
4590
# be converted. That means inventory deltas (if the remote is <1.19,
4591
# RemoteStreamSink will fallback to VFS to insert the deltas).
4592
yield ('inventory-deltas',
4593
self._stream_invs_as_deltas(revision_ids,
4594
delta_versus_null=delta_versus_null))
4596
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4597
"""Return a stream of inventory-deltas for the given rev ids.
4599
:param revision_ids: The list of inventories to transmit
4600
:param delta_versus_null: Don't try to find a minimal delta for this
4601
entry, instead compute the delta versus the NULL_REVISION. This
4602
effectively streams a complete inventory. Used for stuff like
4603
filling in missing parents, etc.
4605
from_repo = self.from_repository
4606
revision_keys = [(rev_id,) for rev_id in revision_ids]
4607
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4608
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4610
inventories = self.from_repository.iter_inventories(
4611
revision_ids, 'topological')
4612
format = from_repo._format
4613
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4614
inventory_cache = lru_cache.LRUCache(50)
4615
null_inventory = from_repo.revision_tree(
4616
_mod_revision.NULL_REVISION).inventory
4617
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4618
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4619
# repo back into a non-rich-root repo ought to be allowed)
4620
serializer = inventory_delta.InventoryDeltaSerializer(
4621
versioned_root=format.rich_root_data,
4622
tree_references=format.supports_tree_reference)
4623
for inv in inventories:
4624
key = (inv.revision_id,)
4625
parent_keys = parent_map.get(key, ())
4627
if not delta_versus_null and parent_keys:
4628
# The caller did not ask for complete inventories and we have
4629
# some parents that we can delta against. Make a delta against
4630
# each parent so that we can find the smallest.
4631
parent_ids = [parent_key[0] for parent_key in parent_keys]
4632
for parent_id in parent_ids:
4633
if parent_id not in invs_sent_so_far:
4634
# We don't know that the remote side has this basis, so
4637
if parent_id == _mod_revision.NULL_REVISION:
4638
parent_inv = null_inventory
4640
parent_inv = inventory_cache.get(parent_id, None)
4641
if parent_inv is None:
4642
parent_inv = from_repo.get_inventory(parent_id)
4643
candidate_delta = inv._make_delta(parent_inv)
4644
if (delta is None or
4645
len(delta) > len(candidate_delta)):
4646
delta = candidate_delta
4647
basis_id = parent_id
4649
# Either none of the parents ended up being suitable, or we
4650
# were asked to delta against NULL
4651
basis_id = _mod_revision.NULL_REVISION
4652
delta = inv._make_delta(null_inventory)
4653
invs_sent_so_far.add(inv.revision_id)
4654
inventory_cache[inv.revision_id] = inv
4655
delta_serialized = ''.join(
4656
serializer.delta_to_lines(basis_id, key[-1], delta))
4657
yield versionedfile.FulltextContentFactory(
4658
key, parent_keys, None, delta_serialized)
4661
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4662
stop_revision=None):
4663
"""Extend the partial history to include a given index
4665
If a stop_index is supplied, stop when that index has been reached.
4666
If a stop_revision is supplied, stop when that revision is
4667
encountered. Otherwise, stop when the beginning of history is
4670
:param stop_index: The index which should be present. When it is
4671
present, history extension will stop.
4672
:param stop_revision: The revision id which should be present. When
4673
it is encountered, history extension will stop.
4675
start_revision = partial_history_cache[-1]
4676
iterator = repo.iter_reverse_revision_history(start_revision)
4678
#skip the last revision in the list
4681
if (stop_index is not None and
4682
len(partial_history_cache) > stop_index):
4684
if partial_history_cache[-1] == stop_revision:
4686
revision_id = iterator.next()
4687
partial_history_cache.append(revision_id)
4688
except StopIteration: