359
214
:param parent_invs: The inventories of the parent revisions of the
361
216
:param path: The path the entry is at in the tree.
362
:param tree: The tree which contains this entry and should be used to
364
:param content_summary: Summary data from the tree about the paths
365
content - stat, length, exec, sha/link target. This is only
366
accessed when the entry has a revision of None - that is when it is
367
a candidate to commit.
368
:return: A tuple (change_delta, version_recorded, fs_hash).
369
change_delta is an inventory_delta change for this entry against
370
the basis tree of the commit, or None if no change occured against
372
version_recorded is True if a new version of the entry has been
373
recorded. For instance, committing a merge where a file was only
374
changed on the other side will return (delta, False).
375
fs_hash is either None, or the hash details for the path (currently
376
a tuple of the contents sha1 and the statvalue returned by
377
tree.get_file_with_stat()).
217
:param tree: The tree which contains this entry and should be used to
379
220
if self.new_inventory.root is None:
380
if ie.parent_id is not None:
381
raise errors.RootMissing()
382
221
self._check_root(ie, parent_invs, tree)
383
if ie.revision is None:
384
kind = content_summary[0]
386
# ie is carried over from a prior commit
388
# XXX: repository specific check for nested tree support goes here - if
389
# the repo doesn't want nested trees we skip it ?
390
if (kind == 'tree-reference' and
391
not self.repository._format.supports_tree_reference):
392
# mismatch between commit builder logic and repository:
393
# this needs the entry creation pushed down into the builder.
394
raise NotImplementedError('Missing repository subtree support.')
395
222
self.new_inventory.add(ie)
397
# TODO: slow, take it out of the inner loop.
399
basis_inv = parent_invs[0]
401
basis_inv = Inventory(root_id=None)
403
224
# ie.revision is always None if the InventoryEntry is considered
404
# for committing. We may record the previous parents revision if the
405
# content is actually unchanged against a sole head.
225
# for committing. ie.snapshot will record the correct revision
226
# which may be the sole parent if it is untouched.
406
227
if ie.revision is not None:
407
if not self._versioned_root and path == '':
408
# repositories that do not version the root set the root's
409
# revision to the new commit even when no change occurs (more
410
# specifically, they do not record a revision on the root; and
411
# the rev id is assigned to the root during deserialisation -
412
# this masks when a change may have occurred against the basis.
413
# To match this we always issue a delta, because the revision
414
# of the root will always be changing.
415
if ie.file_id in basis_inv:
416
delta = (basis_inv.id2path(ie.file_id), path,
420
delta = (None, path, ie.file_id, ie)
421
self._basis_delta.append(delta)
422
return delta, False, None
424
# we don't need to commit this, because the caller already
425
# determined that an existing revision of this file is
426
# appropriate. If its not being considered for committing then
427
# it and all its parents to the root must be unaltered so
428
# no-change against the basis.
429
if ie.revision == self._new_revision_id:
430
raise AssertionError("Impossible situation, a skipped "
431
"inventory entry (%r) claims to be modified in this "
432
"commit (%r).", (ie, self._new_revision_id))
433
return None, False, None
434
# XXX: Friction: parent_candidates should return a list not a dict
435
# so that we don't have to walk the inventories again.
436
230
parent_candiate_entries = ie.parent_candidates(parent_invs)
437
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
439
for inv in parent_invs:
440
if ie.file_id in inv:
441
old_rev = inv[ie.file_id].revision
442
if old_rev in head_set:
443
heads.append(inv[ie.file_id].revision)
444
head_set.remove(inv[ie.file_id].revision)
447
# now we check to see if we need to write a new record to the
449
# We write a new entry unless there is one head to the ancestors, and
450
# the kind-derived content is unchanged.
452
# Cheapest check first: no ancestors, or more the one head in the
453
# ancestors, we write a new node.
457
# There is a single head, look it up for comparison
458
parent_entry = parent_candiate_entries[heads[0]]
459
# if the non-content specific data has changed, we'll be writing a
461
if (parent_entry.parent_id != ie.parent_id or
462
parent_entry.name != ie.name):
464
# now we need to do content specific checks:
466
# if the kind changed the content obviously has
467
if kind != parent_entry.kind:
469
# Stat cache fingerprint feedback for the caller - None as we usually
470
# don't generate one.
473
if content_summary[2] is None:
474
raise ValueError("Files must not have executable = None")
476
# We can't trust a check of the file length because of content
478
if (# if the exec bit has changed we have to store:
479
parent_entry.executable != content_summary[2]):
481
elif parent_entry.text_sha1 == content_summary[3]:
482
# all meta and content is unchanged (using a hash cache
483
# hit to check the sha)
484
ie.revision = parent_entry.revision
485
ie.text_size = parent_entry.text_size
486
ie.text_sha1 = parent_entry.text_sha1
487
ie.executable = parent_entry.executable
488
return self._get_delta(ie, basis_inv, path), False, None
490
# Either there is only a hash change(no hash cache entry,
491
# or same size content change), or there is no change on
493
# Provide the parent's hash to the store layer, so that the
494
# content is unchanged we will not store a new node.
495
nostore_sha = parent_entry.text_sha1
497
# We want to record a new node regardless of the presence or
498
# absence of a content change in the file.
500
ie.executable = content_summary[2]
501
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
503
text = file_obj.read()
507
ie.text_sha1, ie.text_size = self._add_text_to_weave(
508
ie.file_id, text, heads, nostore_sha)
509
# Let the caller know we generated a stat fingerprint.
510
fingerprint = (ie.text_sha1, stat_value)
511
except errors.ExistingContent:
512
# Turns out that the file content was unchanged, and we were
513
# only going to store a new node if it was changed. Carry over
515
ie.revision = parent_entry.revision
516
ie.text_size = parent_entry.text_size
517
ie.text_sha1 = parent_entry.text_sha1
518
ie.executable = parent_entry.executable
519
return self._get_delta(ie, basis_inv, path), False, None
520
elif kind == 'directory':
522
# all data is meta here, nothing specific to directory, so
524
ie.revision = parent_entry.revision
525
return self._get_delta(ie, basis_inv, path), False, None
526
self._add_text_to_weave(ie.file_id, '', heads, None)
527
elif kind == 'symlink':
528
current_link_target = content_summary[3]
530
# symlink target is not generic metadata, check if it has
532
if current_link_target != parent_entry.symlink_target:
535
# unchanged, carry over.
536
ie.revision = parent_entry.revision
537
ie.symlink_target = parent_entry.symlink_target
538
return self._get_delta(ie, basis_inv, path), False, None
539
ie.symlink_target = current_link_target
540
self._add_text_to_weave(ie.file_id, '', heads, None)
541
elif kind == 'tree-reference':
543
if content_summary[3] != parent_entry.reference_revision:
546
# unchanged, carry over.
547
ie.reference_revision = parent_entry.reference_revision
548
ie.revision = parent_entry.revision
549
return self._get_delta(ie, basis_inv, path), False, None
550
ie.reference_revision = content_summary[3]
551
if ie.reference_revision is None:
552
raise AssertionError("invalid content_summary for nested tree: %r"
553
% (content_summary,))
554
self._add_text_to_weave(ie.file_id, '', heads, None)
556
raise NotImplementedError('unknown kind')
557
ie.revision = self._new_revision_id
558
self._any_changes = True
559
return self._get_delta(ie, basis_inv, path), True, fingerprint
561
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
562
_entry_factory=entry_factory):
563
"""Record a new tree via iter_changes.
565
:param tree: The tree to obtain text contents from for changed objects.
566
:param basis_revision_id: The revision id of the tree the iter_changes
567
has been generated against. Currently assumed to be the same
568
as self.parents[0] - if it is not, errors may occur.
569
:param iter_changes: An iter_changes iterator with the changes to apply
570
to basis_revision_id. The iterator must not include any items with
571
a current kind of None - missing items must be either filtered out
572
or errored-on beefore record_iter_changes sees the item.
573
:param _entry_factory: Private method to bind entry_factory locally for
575
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
578
# Create an inventory delta based on deltas between all the parents and
579
# deltas between all the parent inventories. We use inventory delta's
580
# between the inventory objects because iter_changes masks
581
# last-changed-field only changes.
583
# file_id -> change map, change is fileid, paths, changed, versioneds,
584
# parents, names, kinds, executables
586
# {file_id -> revision_id -> inventory entry, for entries in parent
587
# trees that are not parents[0]
591
revtrees = list(self.repository.revision_trees(self.parents))
592
except errors.NoSuchRevision:
593
# one or more ghosts, slow path.
595
for revision_id in self.parents:
597
revtrees.append(self.repository.revision_tree(revision_id))
598
except errors.NoSuchRevision:
600
basis_revision_id = _mod_revision.NULL_REVISION
602
revtrees.append(self.repository.revision_tree(
603
_mod_revision.NULL_REVISION))
604
# The basis inventory from a repository
606
basis_inv = revtrees[0].inventory
608
basis_inv = self.repository.revision_tree(
609
_mod_revision.NULL_REVISION).inventory
610
if len(self.parents) > 0:
611
if basis_revision_id != self.parents[0] and not ghost_basis:
613
"arbitrary basis parents not yet supported with merges")
614
for revtree in revtrees[1:]:
615
for change in revtree.inventory._make_delta(basis_inv):
616
if change[1] is None:
617
# Not present in this parent.
619
if change[2] not in merged_ids:
620
if change[0] is not None:
621
basis_entry = basis_inv[change[2]]
622
merged_ids[change[2]] = [
624
basis_entry.revision,
627
parent_entries[change[2]] = {
629
basis_entry.revision:basis_entry,
631
change[3].revision:change[3],
634
merged_ids[change[2]] = [change[3].revision]
635
parent_entries[change[2]] = {change[3].revision:change[3]}
637
merged_ids[change[2]].append(change[3].revision)
638
parent_entries[change[2]][change[3].revision] = change[3]
641
# Setup the changes from the tree:
642
# changes maps file_id -> (change, [parent revision_ids])
644
for change in iter_changes:
645
# This probably looks up in basis_inv way to much.
646
if change[1][0] is not None:
647
head_candidate = [basis_inv[change[0]].revision]
650
changes[change[0]] = change, merged_ids.get(change[0],
652
unchanged_merged = set(merged_ids) - set(changes)
653
# Extend the changes dict with synthetic changes to record merges of
655
for file_id in unchanged_merged:
656
# Record a merged version of these items that did not change vs the
657
# basis. This can be either identical parallel changes, or a revert
658
# of a specific file after a merge. The recorded content will be
659
# that of the current tree (which is the same as the basis), but
660
# the per-file graph will reflect a merge.
661
# NB:XXX: We are reconstructing path information we had, this
662
# should be preserved instead.
663
# inv delta change: (file_id, (path_in_source, path_in_target),
664
# changed_content, versioned, parent, name, kind,
667
basis_entry = basis_inv[file_id]
668
except errors.NoSuchId:
669
# a change from basis->some_parents but file_id isn't in basis
670
# so was new in the merge, which means it must have changed
671
# from basis -> current, and as it hasn't the add was reverted
672
# by the user. So we discard this change.
676
(basis_inv.id2path(file_id), tree.id2path(file_id)),
678
(basis_entry.parent_id, basis_entry.parent_id),
679
(basis_entry.name, basis_entry.name),
680
(basis_entry.kind, basis_entry.kind),
681
(basis_entry.executable, basis_entry.executable))
682
changes[file_id] = (change, merged_ids[file_id])
683
# changes contains tuples with the change and a set of inventory
684
# candidates for the file.
686
# old_path, new_path, file_id, new_inventory_entry
687
seen_root = False # Is the root in the basis delta?
688
inv_delta = self._basis_delta
689
modified_rev = self._new_revision_id
690
for change, head_candidates in changes.values():
691
if change[3][1]: # versioned in target.
692
# Several things may be happening here:
693
# We may have a fork in the per-file graph
694
# - record a change with the content from tree
695
# We may have a change against < all trees
696
# - carry over the tree that hasn't changed
697
# We may have a change against all trees
698
# - record the change with the content from tree
701
entry = _entry_factory[kind](file_id, change[5][1],
703
head_set = self._heads(change[0], set(head_candidates))
706
for head_candidate in head_candidates:
707
if head_candidate in head_set:
708
heads.append(head_candidate)
709
head_set.remove(head_candidate)
712
# Could be a carry-over situation:
713
parent_entry_revs = parent_entries.get(file_id, None)
714
if parent_entry_revs:
715
parent_entry = parent_entry_revs.get(heads[0], None)
718
if parent_entry is None:
719
# The parent iter_changes was called against is the one
720
# that is the per-file head, so any change is relevant
721
# iter_changes is valid.
722
carry_over_possible = False
724
# could be a carry over situation
725
# A change against the basis may just indicate a merge,
726
# we need to check the content against the source of the
727
# merge to determine if it was changed after the merge
729
if (parent_entry.kind != entry.kind or
730
parent_entry.parent_id != entry.parent_id or
731
parent_entry.name != entry.name):
732
# Metadata common to all entries has changed
733
# against per-file parent
734
carry_over_possible = False
736
carry_over_possible = True
737
# per-type checks for changes against the parent_entry
740
# Cannot be a carry-over situation
741
carry_over_possible = False
742
# Populate the entry in the delta
744
# XXX: There is still a small race here: If someone reverts the content of a file
745
# after iter_changes examines and decides it has changed,
746
# we will unconditionally record a new version even if some
747
# other process reverts it while commit is running (with
748
# the revert happening after iter_changes did it's
751
entry.executable = True
753
entry.executable = False
754
if (carry_over_possible and
755
parent_entry.executable == entry.executable):
756
# Check the file length, content hash after reading
758
nostore_sha = parent_entry.text_sha1
761
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
763
text = file_obj.read()
767
entry.text_sha1, entry.text_size = self._add_text_to_weave(
768
file_id, text, heads, nostore_sha)
769
yield file_id, change[1][1], (entry.text_sha1, stat_value)
770
except errors.ExistingContent:
771
# No content change against a carry_over parent
772
# Perhaps this should also yield a fs hash update?
774
entry.text_size = parent_entry.text_size
775
entry.text_sha1 = parent_entry.text_sha1
776
elif kind == 'symlink':
778
entry.symlink_target = tree.get_symlink_target(file_id)
779
if (carry_over_possible and
780
parent_entry.symlink_target == entry.symlink_target):
783
self._add_text_to_weave(change[0], '', heads, None)
784
elif kind == 'directory':
785
if carry_over_possible:
788
# Nothing to set on the entry.
789
# XXX: split into the Root and nonRoot versions.
790
if change[1][1] != '' or self.repository.supports_rich_root():
791
self._add_text_to_weave(change[0], '', heads, None)
792
elif kind == 'tree-reference':
793
if not self.repository._format.supports_tree_reference:
794
# This isn't quite sane as an error, but we shouldn't
795
# ever see this code path in practice: tree's don't
796
# permit references when the repo doesn't support tree
798
raise errors.UnsupportedOperation(tree.add_reference,
800
reference_revision = tree.get_reference_revision(change[0])
801
entry.reference_revision = reference_revision
802
if (carry_over_possible and
803
parent_entry.reference_revision == reference_revision):
806
self._add_text_to_weave(change[0], '', heads, None)
808
raise AssertionError('unknown kind %r' % kind)
810
entry.revision = modified_rev
812
entry.revision = parent_entry.revision
815
new_path = change[1][1]
816
inv_delta.append((change[1][0], new_path, change[0], entry))
819
self.new_inventory = None
821
# This should perhaps be guarded by a check that the basis we
822
# commit against is the basis for the commit and if not do a delta
824
self._any_changes = True
826
# housekeeping root entry changes do not affect no-change commits.
827
self._require_root_change(tree)
828
self.basis_delta_revision = basis_revision_id
830
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
831
parent_keys = tuple([(file_id, parent) for parent in parents])
832
return self.repository.texts._add_text(
833
(file_id, self._new_revision_id), parent_keys, new_text,
834
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
231
heads = self.repository.get_graph().heads(parent_candiate_entries.keys())
232
# XXX: Note that this is unordered - and this is tolerable because
233
# the previous code was also unordered.
234
previous_entries = dict((head, parent_candiate_entries[head]) for head
236
# we are creating a new revision for ie in the history store and
238
ie.snapshot(self._new_revision_id, path, previous_entries, tree, self)
240
def modified_directory(self, file_id, file_parents):
241
"""Record the presence of a symbolic link.
243
:param file_id: The file_id of the link to record.
244
:param file_parents: The per-file parent revision ids.
246
self._add_text_to_weave(file_id, [], file_parents.keys())
248
def modified_reference(self, file_id, file_parents):
249
"""Record the modification of a reference.
251
:param file_id: The file_id of the link to record.
252
:param file_parents: The per-file parent revision ids.
254
self._add_text_to_weave(file_id, [], file_parents.keys())
256
def modified_file_text(self, file_id, file_parents,
257
get_content_byte_lines, text_sha1=None,
259
"""Record the text of file file_id
261
:param file_id: The file_id of the file to record the text of.
262
:param file_parents: The per-file parent revision ids.
263
:param get_content_byte_lines: A callable which will return the byte
265
:param text_sha1: Optional SHA1 of the file contents.
266
:param text_size: Optional size of the file contents.
268
# mutter('storing text of file {%s} in revision {%s} into %r',
269
# file_id, self._new_revision_id, self.repository.weave_store)
270
# special case to avoid diffing on renames or
272
if (len(file_parents) == 1
273
and text_sha1 == file_parents.values()[0].text_sha1
274
and text_size == file_parents.values()[0].text_size):
275
previous_ie = file_parents.values()[0]
276
versionedfile = self.repository.weave_store.get_weave(file_id,
277
self.repository.get_transaction())
278
versionedfile.clone_text(self._new_revision_id,
279
previous_ie.revision, file_parents.keys())
280
return text_sha1, text_size
282
new_lines = get_content_byte_lines()
283
return self._add_text_to_weave(file_id, new_lines,
286
def modified_link(self, file_id, file_parents, link_target):
287
"""Record the presence of a symbolic link.
289
:param file_id: The file_id of the link to record.
290
:param file_parents: The per-file parent revision ids.
291
:param link_target: Target location of this link.
293
self._add_text_to_weave(file_id, [], file_parents.keys())
295
def _add_text_to_weave(self, file_id, new_lines, parents):
296
versionedfile = self.repository.weave_store.get_weave_or_empty(
297
file_id, self.repository.get_transaction())
298
# Don't change this to add_lines - add_lines_with_ghosts is cheaper
299
# than add_lines, and allows committing when a parent is ghosted for
301
# Note: as we read the content directly from the tree, we know its not
302
# been turned into unicode or badly split - but a broken tree
303
# implementation could give us bad output from readlines() so this is
304
# not a guarantee of safety. What would be better is always checking
305
# the content during test suite execution. RBC 20070912
306
result = versionedfile.add_lines_with_ghosts(
307
self._new_revision_id, parents, new_lines,
308
random_id=self.random_revid, check_content=False)[0:2]
309
versionedfile.clear_cache()
837
313
class RootCommitBuilder(CommitBuilder):
838
314
"""This commitbuilder actually records the root id"""
840
# the root entry gets versioned properly by this builder.
841
_versioned_root = True
843
316
def _check_root(self, ie, parent_invs, tree):
844
317
"""Helper for record_entry_contents.
987
def add_fallback_repository(self, repository):
988
"""Add a repository to use for looking up data not held locally.
990
:param repository: A repository.
992
if not self._format.supports_external_lookups:
993
raise errors.UnstackableRepositoryFormat(self._format, self.base)
995
# This repository will call fallback.unlock() when we transition to
996
# the unlocked state, so we make sure to increment the lock count
997
repository.lock_read()
998
self._check_fallback_repository(repository)
999
self._fallback_repositories.append(repository)
1000
self.texts.add_fallback_versioned_files(repository.texts)
1001
self.inventories.add_fallback_versioned_files(repository.inventories)
1002
self.revisions.add_fallback_versioned_files(repository.revisions)
1003
self.signatures.add_fallback_versioned_files(repository.signatures)
1004
if self.chk_bytes is not None:
1005
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
1007
def _check_fallback_repository(self, repository):
1008
"""Check that this repository can fallback to repository safely.
1010
Raise an error if not.
1012
:param repository: A repository to fallback to.
1014
return InterRepository._assert_same_model(self, repository)
1016
379
def add_inventory(self, revision_id, inv, parents):
1017
380
"""Add the inventory inv to the repository as revision_id.
1019
382
:param parents: The revision ids of the parents that revision_id
1020
383
is known to have and are in the repository already.
1022
:returns: The validator(which is a sha1 digest, though what is sha'd is
1023
repository format specific) of the serialized inventory.
385
returns the sha1 of the serialized inventory.
1025
if not self.is_in_write_group():
1026
raise AssertionError("%r not in write group" % (self,))
387
revision_id = osutils.safe_revision_id(revision_id)
1027
388
_mod_revision.check_not_reserved_id(revision_id)
1028
if not (inv.revision_id is None or inv.revision_id == revision_id):
1029
raise AssertionError(
1030
"Mismatch between inventory revision"
1031
" id and insertion revid (%r, %r)"
1032
% (inv.revision_id, revision_id))
1033
if inv.root is None:
1034
raise AssertionError()
1035
return self._add_inventory_checked(revision_id, inv, parents)
1037
def _add_inventory_checked(self, revision_id, inv, parents):
1038
"""Add inv to the repository after checking the inputs.
1040
This function can be overridden to allow different inventory styles.
1042
:seealso: add_inventory, for the contract.
1044
inv_lines = self._serializer.write_inventory_to_lines(inv)
1045
return self._inventory_add_lines(revision_id, parents,
389
assert inv.revision_id is None or inv.revision_id == revision_id, \
390
"Mismatch between inventory revision" \
391
" id and insertion revid (%r, %r)" % (inv.revision_id, revision_id)
392
assert inv.root is not None
393
inv_lines = self._serialise_inventory_to_lines(inv)
394
inv_vf = self.get_inventory_weave()
395
return self._inventory_add_lines(inv_vf, revision_id, parents,
1046
396
inv_lines, check_content=False)
1048
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1049
parents, basis_inv=None, propagate_caches=False):
1050
"""Add a new inventory expressed as a delta against another revision.
1052
See the inventory developers documentation for the theory behind
1055
:param basis_revision_id: The inventory id the delta was created
1056
against. (This does not have to be a direct parent.)
1057
:param delta: The inventory delta (see Inventory.apply_delta for
1059
:param new_revision_id: The revision id that the inventory is being
1061
:param parents: The revision ids of the parents that revision_id is
1062
known to have and are in the repository already. These are supplied
1063
for repositories that depend on the inventory graph for revision
1064
graph access, as well as for those that pun ancestry with delta
1066
:param basis_inv: The basis inventory if it is already known,
1068
:param propagate_caches: If True, the caches for this inventory are
1069
copied to and updated for the result if possible.
1071
:returns: (validator, new_inv)
1072
The validator(which is a sha1 digest, though what is sha'd is
1073
repository format specific) of the serialized inventory, and the
1074
resulting inventory.
1076
if not self.is_in_write_group():
1077
raise AssertionError("%r not in write group" % (self,))
1078
_mod_revision.check_not_reserved_id(new_revision_id)
1079
basis_tree = self.revision_tree(basis_revision_id)
1080
basis_tree.lock_read()
1082
# Note that this mutates the inventory of basis_tree, which not all
1083
# inventory implementations may support: A better idiom would be to
1084
# return a new inventory, but as there is no revision tree cache in
1085
# repository this is safe for now - RBC 20081013
1086
if basis_inv is None:
1087
basis_inv = basis_tree.inventory
1088
basis_inv.apply_delta(delta)
1089
basis_inv.revision_id = new_revision_id
1090
return (self.add_inventory(new_revision_id, basis_inv, parents),
1095
def _inventory_add_lines(self, revision_id, parents, lines,
398
def _inventory_add_lines(self, inv_vf, revision_id, parents, lines,
1096
399
check_content=True):
1097
400
"""Store lines in inv_vf and return the sha1 of the inventory."""
1098
parents = [(parent,) for parent in parents]
1099
result = self.inventories.add_lines((revision_id,), parents, lines,
402
for parent in parents:
404
final_parents.append(parent)
405
return inv_vf.add_lines(revision_id, final_parents, lines,
1100
406
check_content=check_content)[0]
1101
self.inventories._access.flush()
1104
409
def add_revision(self, revision_id, rev, inv=None, config=None):
1105
410
"""Add rev to the revision store as revision_id.
1176
487
Returns a set of the present revisions.
1179
graph = self.get_graph()
1180
parent_map = graph.get_parent_map(revision_ids)
1181
# The old API returned a list, should this actually be a set?
1182
return parent_map.keys()
1184
def _check_inventories(self, checker):
1185
"""Check the inventories found from the revision scan.
1187
This is responsible for verifying the sha1 of inventories and
1188
creating a pending_keys set that covers data referenced by inventories.
1190
bar = ui.ui_factory.nested_progress_bar()
1192
self._do_check_inventories(checker, bar)
1196
def _do_check_inventories(self, checker, bar):
1197
"""Helper for _check_inventories."""
1199
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1200
kinds = ['chk_bytes', 'texts']
1201
count = len(checker.pending_keys)
1202
bar.update("inventories", 0, 2)
1203
current_keys = checker.pending_keys
1204
checker.pending_keys = {}
1205
# Accumulate current checks.
1206
for key in current_keys:
1207
if key[0] != 'inventories' and key[0] not in kinds:
1208
checker._report_items.append('unknown key type %r' % (key,))
1209
keys[key[0]].add(key[1:])
1210
if keys['inventories']:
1211
# NB: output order *should* be roughly sorted - topo or
1212
# inverse topo depending on repository - either way decent
1213
# to just delta against. However, pre-CHK formats didn't
1214
# try to optimise inventory layout on disk. As such the
1215
# pre-CHK code path does not use inventory deltas.
1217
for record in self.inventories.check(keys=keys['inventories']):
1218
if record.storage_kind == 'absent':
1219
checker._report_items.append(
1220
'Missing inventory {%s}' % (record.key,))
1222
last_object = self._check_record('inventories', record,
1223
checker, last_object,
1224
current_keys[('inventories',) + record.key])
1225
del keys['inventories']
1228
bar.update("texts", 1)
1229
while (checker.pending_keys or keys['chk_bytes']
1231
# Something to check.
1232
current_keys = checker.pending_keys
1233
checker.pending_keys = {}
1234
# Accumulate current checks.
1235
for key in current_keys:
1236
if key[0] not in kinds:
1237
checker._report_items.append('unknown key type %r' % (key,))
1238
keys[key[0]].add(key[1:])
1239
# Check the outermost kind only - inventories || chk_bytes || texts
1243
for record in getattr(self, kind).check(keys=keys[kind]):
1244
if record.storage_kind == 'absent':
1245
checker._report_items.append(
1246
'Missing %s {%s}' % (kind, record.key,))
1248
last_object = self._check_record(kind, record,
1249
checker, last_object, current_keys[(kind,) + record.key])
1253
def _check_record(self, kind, record, checker, last_object, item_data):
1254
"""Check a single text from this repository."""
1255
if kind == 'inventories':
1256
rev_id = record.key[0]
1257
inv = self._deserialise_inventory(rev_id,
1258
record.get_bytes_as('fulltext'))
1259
if last_object is not None:
1260
delta = inv._make_delta(last_object)
1261
for old_path, path, file_id, ie in delta:
1264
ie.check(checker, rev_id, inv)
1266
for path, ie in inv.iter_entries():
1267
ie.check(checker, rev_id, inv)
1268
if self._format.fast_deltas:
1270
elif kind == 'chk_bytes':
1271
# No code written to check chk_bytes for this repo format.
1272
checker._report_items.append(
1273
'unsupported key type chk_bytes for %s' % (record.key,))
1274
elif kind == 'texts':
1275
self._check_text(record, checker, item_data)
1277
checker._report_items.append(
1278
'unknown key type %s for %s' % (kind, record.key))
1280
def _check_text(self, record, checker, item_data):
1281
"""Check a single text."""
1282
# Check it is extractable.
1283
# TODO: check length.
1284
if record.storage_kind == 'chunked':
1285
chunks = record.get_bytes_as(record.storage_kind)
1286
sha1 = osutils.sha_strings(chunks)
1287
length = sum(map(len, chunks))
1289
content = record.get_bytes_as('fulltext')
1290
sha1 = osutils.sha_string(content)
1291
length = len(content)
1292
if item_data and sha1 != item_data[1]:
1293
checker._report_items.append(
1294
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1295
(record.key, sha1, item_data[1], item_data[2]))
490
for id in revision_ids:
491
if self.has_revision(id):
1298
496
def create(a_bzrdir):
1299
497
"""Construct the current default format repository in a_bzrdir."""
1300
498
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
1302
def __init__(self, _format, a_bzrdir, control_files):
500
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
1303
501
"""instantiate a Repository.
1305
503
:param _format: The format of the repository on disk.
1306
504
:param a_bzrdir: The BzrDir of the repository.
506
In the future we will have a single api for all stores for
507
getting file texts, inventories and revisions, then
508
this construct will accept instances of those things.
1308
# In the future we will have a single api for all stores for
1309
# getting file texts, inventories and revisions, then
1310
# this construct will accept instances of those things.
1311
510
super(Repository, self).__init__()
1312
511
self._format = _format
1313
512
# the following are part of the public API for Repository:
1314
513
self.bzrdir = a_bzrdir
1315
514
self.control_files = control_files
1316
self._transport = control_files._transport
1317
self.base = self._transport.base
515
self._revision_store = _revision_store
516
# backwards compatibility
517
self.weave_store = text_store
1319
519
self._reconcile_does_inventory_gc = True
1320
self._reconcile_fixes_text_parents = False
1321
self._reconcile_backsup_inventory = True
520
# not right yet - should be more semantically clear ?
522
self.control_store = control_store
523
self.control_weaves = control_store
524
# TODO: make sure to construct the right store classes, etc, depending
525
# on whether escaping is required.
526
self._warn_if_deprecated()
1322
527
self._write_group = None
1323
# Additional places to query for data.
1324
self._fallback_repositories = []
1325
# An InventoryEntry cache, used during deserialization
1326
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1327
# Is it safe to return inventory entries directly from the entry cache,
1328
# rather copying them?
1329
self._safe_to_return_from_cache = False
1332
def user_transport(self):
1333
return self.bzrdir.user_transport
1336
def control_transport(self):
1337
return self._transport
1339
529
def __repr__(self):
1340
if self._fallback_repositories:
1341
return '%s(%r, fallback_repositories=%r)' % (
1342
self.__class__.__name__,
1344
self._fallback_repositories)
1346
return '%s(%r)' % (self.__class__.__name__,
1349
def _has_same_fallbacks(self, other_repo):
1350
"""Returns true if the repositories have the same fallbacks."""
1351
my_fb = self._fallback_repositories
1352
other_fb = other_repo._fallback_repositories
1353
if len(my_fb) != len(other_fb):
1355
for f, g in zip(my_fb, other_fb):
1356
if not f.has_same_location(g):
530
return '%s(%r)' % (self.__class__.__name__,
531
self.bzrdir.transport.base)
1360
533
def has_same_location(self, other):
1361
534
"""Returns a boolean indicating if this repository is at the same
1565
675
def copy_content_into(self, destination, revision_id=None):
1566
676
"""Make a complete copy of the content in self into destination.
1568
This is a destructive operation! Do not use it on existing
678
This is a destructive operation! Do not use it on existing
681
revision_id = osutils.safe_revision_id(revision_id)
1571
682
return InterRepository.get(self, destination).copy_content(revision_id)
1573
684
def commit_write_group(self):
1574
685
"""Commit the contents accrued within the current write group.
1576
687
:seealso: start_write_group.
1578
:return: it may return an opaque hint that can be passed to 'pack'.
1580
689
if self._write_group is not self.get_transaction():
1581
690
# has an unlock or relock occured ?
1582
raise errors.BzrError('mismatched lock context %r and '
1584
(self.get_transaction(), self._write_group))
1585
result = self._commit_write_group()
691
raise errors.BzrError('mismatched lock context and write group.')
692
self._commit_write_group()
1586
693
self._write_group = None
1589
695
def _commit_write_group(self):
1590
696
"""Template method for per-repository write group cleanup.
1592
This is called before the write group is considered to be
698
This is called before the write group is considered to be
1593
699
finished and should ensure that all data handed to the repository
1594
for writing during the write group is safely committed (to the
700
for writing during the write group is safely committed (to the
1595
701
extent possible considering file system caching etc).
1598
def suspend_write_group(self):
1599
raise errors.UnsuspendableWriteGroup(self)
1601
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1602
"""Return the keys of missing inventory parents for revisions added in
1605
A revision is not complete if the inventory delta for that revision
1606
cannot be calculated. Therefore if the parent inventories of a
1607
revision are not present, the revision is incomplete, and e.g. cannot
1608
be streamed by a smart server. This method finds missing inventory
1609
parents for revisions added in this write group.
1611
if not self._format.supports_external_lookups:
1612
# This is only an issue for stacked repositories
1614
if not self.is_in_write_group():
1615
raise AssertionError('not in a write group')
1617
# XXX: We assume that every added revision already has its
1618
# corresponding inventory, so we only check for parent inventories that
1619
# might be missing, rather than all inventories.
1620
parents = set(self.revisions._index.get_missing_parents())
1621
parents.discard(_mod_revision.NULL_REVISION)
1622
unstacked_inventories = self.inventories._index
1623
present_inventories = unstacked_inventories.get_parent_map(
1624
key[-1:] for key in parents)
1625
parents.difference_update(present_inventories)
1626
if len(parents) == 0:
1627
# No missing parent inventories.
1629
if not check_for_missing_texts:
1630
return set(('inventories', rev_id) for (rev_id,) in parents)
1631
# Ok, now we have a list of missing inventories. But these only matter
1632
# if the inventories that reference them are missing some texts they
1633
# appear to introduce.
1634
# XXX: Texts referenced by all added inventories need to be present,
1635
# but at the moment we're only checking for texts referenced by
1636
# inventories at the graph's edge.
1637
key_deps = self.revisions._index._key_dependencies
1638
key_deps.satisfy_refs_for_keys(present_inventories)
1639
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1640
file_ids = self.fileids_altered_by_revision_ids(referrers)
1641
missing_texts = set()
1642
for file_id, version_ids in file_ids.iteritems():
1643
missing_texts.update(
1644
(file_id, version_id) for version_id in version_ids)
1645
present_texts = self.texts.get_parent_map(missing_texts)
1646
missing_texts.difference_update(present_texts)
1647
if not missing_texts:
1648
# No texts are missing, so all revisions and their deltas are
1651
# Alternatively the text versions could be returned as the missing
1652
# keys, but this is likely to be less data.
1653
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1656
def refresh_data(self):
1657
"""Re-read any data needed to to synchronise with disk.
1659
This method is intended to be called after another repository instance
1660
(such as one used by a smart server) has inserted data into the
1661
repository. It may not be called during a write group, but may be
1662
called at any other time.
1664
if self.is_in_write_group():
1665
raise errors.InternalBzrError(
1666
"May not refresh_data while in a write group.")
1667
self._refresh_data()
1669
def resume_write_group(self, tokens):
1670
if not self.is_write_locked():
1671
raise errors.NotWriteLocked(self)
1672
if self._write_group:
1673
raise errors.BzrError('already in a write group')
1674
self._resume_write_group(tokens)
1675
# so we can detect unlock/relock - the write group is now entered.
1676
self._write_group = self.get_transaction()
1678
def _resume_write_group(self, tokens):
1679
raise errors.UnsuspendableWriteGroup(self)
1681
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
704
def fetch(self, source, revision_id=None, pb=None):
1683
705
"""Fetch the content required to construct revision_id from source.
1685
If revision_id is None and fetch_spec is None, then all content is
1688
fetch() may not be used when the repository is in a write group -
1689
either finish the current write group before using fetch, or use
1690
fetch before starting the write group.
1692
:param find_ghosts: Find and copy revisions in the source that are
1693
ghosts in the target (and not reachable directly by walking out to
1694
the first-present revision in target from revision_id).
1695
:param revision_id: If specified, all the content needed for this
1696
revision ID will be copied to the target. Fetch will determine for
1697
itself which content needs to be copied.
1698
:param fetch_spec: If specified, a SearchResult or
1699
PendingAncestryResult that describes which revisions to copy. This
1700
allows copying multiple heads at once. Mutually exclusive with
707
If revision_id is None all content is copied.
1703
if fetch_spec is not None and revision_id is not None:
1704
raise AssertionError(
1705
"fetch_spec and revision_id are mutually exclusive.")
1706
if self.is_in_write_group():
1707
raise errors.InternalBzrError(
1708
"May not fetch while in a write group.")
1709
# fast path same-url fetch operations
1710
# TODO: lift out to somewhere common with RemoteRepository
1711
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1712
if (self.has_same_location(source)
1713
and fetch_spec is None
1714
and self._has_same_fallbacks(source)):
1715
# check that last_revision is in 'from' and then return a
1717
if (revision_id is not None and
1718
not _mod_revision.is_null(revision_id)):
1719
self.get_revision(revision_id)
1721
# if there is no specific appropriate InterRepository, this will get
1722
# the InterRepository base class, which raises an
1723
# IncompatibleRepositories when asked to fetch.
709
revision_id = osutils.safe_revision_id(revision_id)
1724
710
inter = InterRepository.get(source, self)
1725
return inter.fetch(revision_id=revision_id, pb=pb,
1726
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
712
return inter.fetch(revision_id=revision_id, pb=pb)
713
except NotImplementedError:
714
raise errors.IncompatibleRepositories(source, self)
1728
716
def create_bundle(self, target, base, fileobj, format=None):
1729
717
return serializer.write_bundle(self, target, base, fileobj, format)
1833
811
dest_repo = a_bzrdir.open_repository()
1834
812
return dest_repo
1836
def _get_sink(self):
1837
"""Return a sink for streaming into this repository."""
1838
return StreamSink(self)
1840
def _get_source(self, to_format):
1841
"""Return a source for streaming from this repository."""
1842
return StreamSource(self, to_format)
1844
814
@needs_read_lock
1845
815
def has_revision(self, revision_id):
1846
816
"""True if this repository has a copy of the revision."""
1847
return revision_id in self.has_revisions((revision_id,))
1850
def has_revisions(self, revision_ids):
1851
"""Probe to find out the presence of multiple revisions.
1853
:param revision_ids: An iterable of revision_ids.
1854
:return: A set of the revision_ids that were present.
1856
parent_map = self.revisions.get_parent_map(
1857
[(rev_id,) for rev_id in revision_ids])
1859
if _mod_revision.NULL_REVISION in revision_ids:
1860
result.add(_mod_revision.NULL_REVISION)
1861
result.update([key[0] for key in parent_map])
1865
def get_revision(self, revision_id):
1866
"""Return the Revision object for a named revision."""
1867
return self.get_revisions([revision_id])[0]
817
if 'evil' in debug.debug_flags:
818
mutter_callsite(2, "has_revision is a LBYL symptom.")
819
revision_id = osutils.safe_revision_id(revision_id)
820
return self._revision_store.has_revision_id(revision_id,
821
self.get_transaction())
1869
823
@needs_read_lock
1870
824
def get_revision_reconcile(self, revision_id):
1871
825
"""'reconcile' helper routine that allows access to a revision always.
1873
827
This variant of get_revision does not cross check the weave graph
1874
828
against the revision one as get_revision does: but it should only
1875
829
be used by reconcile, or reconcile-alike commands that are correcting
1876
830
or testing the revision graph.
1878
return self._get_revisions([revision_id])[0]
832
if not revision_id or not isinstance(revision_id, basestring):
833
raise errors.InvalidRevisionId(revision_id=revision_id,
835
return self.get_revisions([revision_id])[0]
1880
837
@needs_read_lock
1881
838
def get_revisions(self, revision_ids):
1882
"""Get many revisions at once.
1884
Repositories that need to check data on every revision read should
1885
subclass this method.
1887
return self._get_revisions(revision_ids)
1890
def _get_revisions(self, revision_ids):
1891
"""Core work logic to get many revisions without sanity checks."""
1893
for revid, rev in self._iter_revisions(revision_ids):
1895
raise errors.NoSuchRevision(self, revid)
1897
return [revs[revid] for revid in revision_ids]
1899
def _iter_revisions(self, revision_ids):
1900
"""Iterate over revision objects.
1902
:param revision_ids: An iterable of revisions to examine. None may be
1903
passed to request all revisions known to the repository. Note that
1904
not all repositories can find unreferenced revisions; for those
1905
repositories only referenced ones will be returned.
1906
:return: An iterator of (revid, revision) tuples. Absent revisions (
1907
those asked for but not available) are returned as (revid, None).
1909
if revision_ids is None:
1910
revision_ids = self.all_revision_ids()
1912
for rev_id in revision_ids:
1913
if not rev_id or not isinstance(rev_id, basestring):
1914
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1915
keys = [(key,) for key in revision_ids]
1916
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1917
for record in stream:
1918
revid = record.key[0]
1919
if record.storage_kind == 'absent':
1922
text = record.get_bytes_as('fulltext')
1923
rev = self._serializer.read_revision_from_string(text)
1926
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
839
revision_ids = [osutils.safe_revision_id(r) for r in revision_ids]
840
revs = self._revision_store.get_revisions(revision_ids,
841
self.get_transaction())
843
assert not isinstance(rev.revision_id, unicode)
844
for parent_id in rev.parent_ids:
845
assert not isinstance(parent_id, unicode)
849
def get_revision_xml(self, revision_id):
850
# TODO: jam 20070210 This shouldn't be necessary since get_revision
851
# would have already do it.
852
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
853
revision_id = osutils.safe_revision_id(revision_id)
854
rev = self.get_revision(revision_id)
856
# the current serializer..
857
self._revision_store._serializer.write_revision(rev, rev_tmp)
859
return rev_tmp.getvalue()
862
def get_revision(self, revision_id):
863
"""Return the Revision object for a named revision"""
864
# TODO: jam 20070210 get_revision_reconcile should do this for us
865
revision_id = osutils.safe_revision_id(revision_id)
866
r = self.get_revision_reconcile(revision_id)
867
# weave corruption can lead to absent revision markers that should be
869
# the following test is reasonably cheap (it needs a single weave read)
870
# and the weave is cached in read transactions. In write transactions
871
# it is not cached but typically we only read a small number of
872
# revisions. For knits when they are introduced we will probably want
873
# to ensure that caching write transactions are in use.
874
inv = self.get_inventory_weave()
875
self._check_revision_parents(r, inv)
879
def get_deltas_for_revisions(self, revisions):
1927
880
"""Produce a generator of revision deltas.
1929
882
Note that the input is a sequence of REVISIONS, not revision_ids.
1930
883
Trees will be held in memory until the generator exits.
1931
884
Each delta is relative to the revision's lefthand predecessor.
1933
:param specific_fileids: if not None, the result is filtered
1934
so that only those file-ids, their parents and their
1935
children are included.
1937
# Get the revision-ids of interest
1938
886
required_trees = set()
1939
887
for revision in revisions:
1940
888
required_trees.add(revision.revision_id)
1941
889
required_trees.update(revision.parent_ids[:1])
1943
# Get the matching filtered trees. Note that it's more
1944
# efficient to pass filtered trees to changes_from() rather
1945
# than doing the filtering afterwards. changes_from() could
1946
# arguably do the filtering itself but it's path-based, not
1947
# file-id based, so filtering before or afterwards is
1949
if specific_fileids is None:
1950
trees = dict((t.get_revision_id(), t) for
1951
t in self.revision_trees(required_trees))
1953
trees = dict((t.get_revision_id(), t) for
1954
t in self._filtered_revision_trees(required_trees,
1957
# Calculate the deltas
890
trees = dict((t.get_revision_id(), t) for
891
t in self.revision_trees(required_trees))
1958
892
for revision in revisions:
1959
893
if not revision.parent_ids:
1960
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
894
old_tree = self.revision_tree(None)
1962
896
old_tree = trees[revision.parent_ids[0]]
1963
897
yield trees[revision.revision_id].changes_from(old_tree)
1965
899
@needs_read_lock
1966
def get_revision_delta(self, revision_id, specific_fileids=None):
900
def get_revision_delta(self, revision_id):
1967
901
"""Return the delta for one revision.
1969
903
The delta is relative to the left-hand predecessor of the
1972
:param specific_fileids: if not None, the result is filtered
1973
so that only those file-ids, their parents and their
1974
children are included.
1976
906
r = self.get_revision(revision_id)
1977
return list(self.get_deltas_for_revisions([r],
1978
specific_fileids=specific_fileids))[0]
907
return list(self.get_deltas_for_revisions([r]))[0]
909
def _check_revision_parents(self, revision, inventory):
910
"""Private to Repository and Fetch.
912
This checks the parentage of revision in an inventory weave for
913
consistency and is only applicable to inventory-weave-for-ancestry
914
using repository formats & fetchers.
916
weave_parents = inventory.get_parents(revision.revision_id)
917
weave_names = inventory.versions()
918
for parent_id in revision.parent_ids:
919
if parent_id in weave_names:
920
# this parent must not be a ghost.
921
if not parent_id in weave_parents:
923
raise errors.CorruptRepository(self)
1980
925
@needs_write_lock
1981
926
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
927
revision_id = osutils.safe_revision_id(revision_id)
1982
928
signature = gpg_strategy.sign(plaintext)
1983
self.add_signature_text(revision_id, signature)
1986
def add_signature_text(self, revision_id, signature):
1987
self.signatures.add_lines((revision_id,), (),
1988
osutils.split_lines(signature))
1990
def find_text_key_references(self):
1991
"""Find the text key references within the repository.
1993
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1994
to whether they were referred to by the inventory of the
1995
revision_id that they contain. The inventory texts from all present
1996
revision ids are assessed to generate this report.
1998
revision_keys = self.revisions.keys()
1999
w = self.inventories
2000
pb = ui.ui_factory.nested_progress_bar()
2002
return self._find_text_key_references_from_xml_inventory_lines(
2003
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
2007
def _find_text_key_references_from_xml_inventory_lines(self,
2009
"""Core routine for extracting references to texts from inventories.
2011
This performs the translation of xml lines to revision ids.
2013
:param line_iterator: An iterator of lines, origin_version_id
2014
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2015
to whether they were referred to by the inventory of the
2016
revision_id that they contain. Note that if that revision_id was
2017
not part of the line_iterator's output then False will be given -
2018
even though it may actually refer to that key.
2020
if not self._serializer.support_altered_by_hack:
2021
raise AssertionError(
2022
"_find_text_key_references_from_xml_inventory_lines only "
2023
"supported for branches which store inventory as unnested xml"
2024
", not on %r" % self)
929
self._revision_store.add_revision_signature_text(revision_id,
931
self.get_transaction())
933
def fileids_altered_by_revision_ids(self, revision_ids):
934
"""Find the file ids and versions affected by revisions.
936
:param revisions: an iterable containing revision ids.
937
:return: a dictionary mapping altered file-ids to an iterable of
938
revision_ids. Each altered file-ids has the exact revision_ids that
939
altered it listed explicitly.
941
assert self._serializer.support_altered_by_hack, \
942
("fileids_altered_by_revision_ids only supported for branches "
943
"which store inventory as unnested xml, not on %r" % self)
944
selected_revision_ids = set(osutils.safe_revision_id(r)
945
for r in revision_ids)
946
w = self.get_inventory_weave()
2027
949
# this code needs to read every new line in every inventory for the
2028
950
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
2029
# not present in one of those inventories is unnecessary but not
951
# not present in one of those inventories is unnecessary but not
2030
952
# harmful because we are filtering by the revision id marker in the
2031
# inventory lines : we only select file ids altered in one of those
953
# inventory lines : we only select file ids altered in one of those
2032
954
# revisions. We don't need to see all lines in the inventory because
2033
955
# only those added in an inventory in rev X can contain a revision=X
2043
965
search = self._file_ids_altered_regex.search
2044
966
unescape = _unescape_xml
2045
967
setdefault = result.setdefault
2046
for line, line_key in line_iterator:
2047
match = search(line)
2050
# One call to match.group() returning multiple items is quite a
2051
# bit faster than 2 calls to match.group() each returning 1
2052
file_id, revision_id = match.group('file_id', 'revision_id')
2054
# Inlining the cache lookups helps a lot when you make 170,000
2055
# lines and 350k ids, versus 8.4 unique ids.
2056
# Using a cache helps in 2 ways:
2057
# 1) Avoids unnecessary decoding calls
2058
# 2) Re-uses cached strings, which helps in future set and
2060
# (2) is enough that removing encoding entirely along with
2061
# the cache (so we are using plain strings) results in no
2062
# performance improvement.
2064
revision_id = unescape_revid_cache[revision_id]
2066
unescaped = unescape(revision_id)
2067
unescape_revid_cache[revision_id] = unescaped
2068
revision_id = unescaped
2070
# Note that unconditionally unescaping means that we deserialise
2071
# every fileid, which for general 'pull' is not great, but we don't
2072
# really want to have some many fulltexts that this matters anyway.
2075
file_id = unescape_fileid_cache[file_id]
2077
unescaped = unescape(file_id)
2078
unescape_fileid_cache[file_id] = unescaped
2081
key = (file_id, revision_id)
2082
setdefault(key, False)
2083
if revision_id == line_key[-1]:
2087
def _inventory_xml_lines_for_keys(self, keys):
2088
"""Get a line iterator of the sort needed for findind references.
2090
Not relevant for non-xml inventory repositories.
2092
Ghosts in revision_keys are ignored.
2094
:param revision_keys: The revision keys for the inventories to inspect.
2095
:return: An iterator over (inventory line, revid) for the fulltexts of
2096
all of the xml inventories specified by revision_keys.
2098
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2099
for record in stream:
2100
if record.storage_kind != 'absent':
2101
chunks = record.get_bytes_as('chunked')
2102
revid = record.key[-1]
2103
lines = osutils.chunks_to_lines(chunks)
2107
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
2109
"""Helper routine for fileids_altered_by_revision_ids.
2111
This performs the translation of xml lines to revision ids.
2113
:param line_iterator: An iterator of lines, origin_version_id
2114
:param revision_keys: The revision ids to filter for. This should be a
2115
set or other type which supports efficient __contains__ lookups, as
2116
the revision key from each parsed line will be looked up in the
2117
revision_keys filter.
2118
:return: a dictionary mapping altered file-ids to an iterable of
2119
revision_ids. Each altered file-ids has the exact revision_ids that
2120
altered it listed explicitly.
2122
seen = set(self._find_text_key_references_from_xml_inventory_lines(
2123
line_iterator).iterkeys())
2124
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
2125
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
2126
self._inventory_xml_lines_for_keys(parent_keys)))
2127
new_keys = seen - parent_seen
2129
setdefault = result.setdefault
2130
for key in new_keys:
2131
setdefault(key[0], set()).add(key[-1])
2134
def _find_parent_ids_of_revisions(self, revision_ids):
2135
"""Find all parent ids that are mentioned in the revision graph.
2137
:return: set of revisions that are parents of revision_ids which are
2138
not part of revision_ids themselves
2140
parent_map = self.get_parent_map(revision_ids)
2142
map(parent_ids.update, parent_map.itervalues())
2143
parent_ids.difference_update(revision_ids)
2144
parent_ids.discard(_mod_revision.NULL_REVISION)
2147
def _find_parent_keys_of_revisions(self, revision_keys):
2148
"""Similar to _find_parent_ids_of_revisions, but used with keys.
2150
:param revision_keys: An iterable of revision_keys.
2151
:return: The parents of all revision_keys that are not already in
2154
parent_map = self.revisions.get_parent_map(revision_keys)
2156
map(parent_keys.update, parent_map.itervalues())
2157
parent_keys.difference_update(revision_keys)
2158
parent_keys.discard(_mod_revision.NULL_REVISION)
2161
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
2162
"""Find the file ids and versions affected by revisions.
2164
:param revisions: an iterable containing revision ids.
2165
:param _inv_weave: The inventory weave from this repository or None.
2166
If None, the inventory weave will be opened automatically.
2167
:return: a dictionary mapping altered file-ids to an iterable of
2168
revision_ids. Each altered file-ids has the exact revision_ids that
2169
altered it listed explicitly.
2171
selected_keys = set((revid,) for revid in revision_ids)
2172
w = _inv_weave or self.inventories
2173
return self._find_file_ids_from_xml_inventory_lines(
2174
w.iter_lines_added_or_present_in_keys(
2175
selected_keys, pb=None),
968
pb = ui.ui_factory.nested_progress_bar()
970
for line in w.iter_lines_added_or_present_in_versions(
971
selected_revision_ids, pb=pb):
975
# One call to match.group() returning multiple items is quite a
976
# bit faster than 2 calls to match.group() each returning 1
977
file_id, revision_id = match.group('file_id', 'revision_id')
979
# Inlining the cache lookups helps a lot when you make 170,000
980
# lines and 350k ids, versus 8.4 unique ids.
981
# Using a cache helps in 2 ways:
982
# 1) Avoids unnecessary decoding calls
983
# 2) Re-uses cached strings, which helps in future set and
985
# (2) is enough that removing encoding entirely along with
986
# the cache (so we are using plain strings) results in no
987
# performance improvement.
989
revision_id = unescape_revid_cache[revision_id]
991
unescaped = unescape(revision_id)
992
unescape_revid_cache[revision_id] = unescaped
993
revision_id = unescaped
995
if revision_id in selected_revision_ids:
997
file_id = unescape_fileid_cache[file_id]
999
unescaped = unescape(file_id)
1000
unescape_fileid_cache[file_id] = unescaped
1002
setdefault(file_id, set()).add(revision_id)
2178
1007
def iter_files_bytes(self, desired_files):
2179
1008
"""Iterate through file versions.
2189
1018
bytes_iterator is an iterable of bytestrings for the file. The
2190
1019
kind of iterable and length of the bytestrings are unspecified, but for
2191
this implementation, it is a list of bytes produced by
2192
VersionedFile.get_record_stream().
1020
this implementation, it is a list of lines produced by
1021
VersionedFile.get_lines().
2194
1023
:param desired_files: a list of (file_id, revision_id, identifier)
1026
transaction = self.get_transaction()
2198
1027
for file_id, revision_id, callable_data in desired_files:
2199
text_keys[(file_id, revision_id)] = callable_data
2200
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2201
if record.storage_kind == 'absent':
2202
raise errors.RevisionNotPresent(record.key, self)
2203
yield text_keys[record.key], record.get_bytes_as('chunked')
2205
def _generate_text_key_index(self, text_key_references=None,
2207
"""Generate a new text key index for the repository.
2209
This is an expensive function that will take considerable time to run.
2211
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2212
list of parents, also text keys. When a given key has no parents,
2213
the parents list will be [NULL_REVISION].
2215
# All revisions, to find inventory parents.
2216
if ancestors is None:
2217
graph = self.get_graph()
2218
ancestors = graph.get_parent_map(self.all_revision_ids())
2219
if text_key_references is None:
2220
text_key_references = self.find_text_key_references()
2221
pb = ui.ui_factory.nested_progress_bar()
2223
return self._do_generate_text_key_index(ancestors,
2224
text_key_references, pb)
2228
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2229
"""Helper for _generate_text_key_index to avoid deep nesting."""
2230
revision_order = tsort.topo_sort(ancestors)
2231
invalid_keys = set()
2233
for revision_id in revision_order:
2234
revision_keys[revision_id] = set()
2235
text_count = len(text_key_references)
2236
# a cache of the text keys to allow reuse; costs a dict of all the
2237
# keys, but saves a 2-tuple for every child of a given key.
2239
for text_key, valid in text_key_references.iteritems():
2241
invalid_keys.add(text_key)
2243
revision_keys[text_key[1]].add(text_key)
2244
text_key_cache[text_key] = text_key
2245
del text_key_references
2247
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2248
NULL_REVISION = _mod_revision.NULL_REVISION
2249
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2250
# too small for large or very branchy trees. However, for 55K path
2251
# trees, it would be easy to use too much memory trivially. Ideally we
2252
# could gauge this by looking at available real memory etc, but this is
2253
# always a tricky proposition.
2254
inventory_cache = lru_cache.LRUCache(10)
2255
batch_size = 10 # should be ~150MB on a 55K path tree
2256
batch_count = len(revision_order) / batch_size + 1
2258
pb.update("Calculating text parents", processed_texts, text_count)
2259
for offset in xrange(batch_count):
2260
to_query = revision_order[offset * batch_size:(offset + 1) *
2264
for revision_id in to_query:
2265
parent_ids = ancestors[revision_id]
2266
for text_key in revision_keys[revision_id]:
2267
pb.update("Calculating text parents", processed_texts)
2268
processed_texts += 1
2269
candidate_parents = []
2270
for parent_id in parent_ids:
2271
parent_text_key = (text_key[0], parent_id)
2273
check_parent = parent_text_key not in \
2274
revision_keys[parent_id]
2276
# the parent parent_id is a ghost:
2277
check_parent = False
2278
# truncate the derived graph against this ghost.
2279
parent_text_key = None
2281
# look at the parent commit details inventories to
2282
# determine possible candidates in the per file graph.
2285
inv = inventory_cache[parent_id]
2287
inv = self.revision_tree(parent_id).inventory
2288
inventory_cache[parent_id] = inv
2290
parent_entry = inv[text_key[0]]
2291
except (KeyError, errors.NoSuchId):
2293
if parent_entry is not None:
2295
text_key[0], parent_entry.revision)
2297
parent_text_key = None
2298
if parent_text_key is not None:
2299
candidate_parents.append(
2300
text_key_cache[parent_text_key])
2301
parent_heads = text_graph.heads(candidate_parents)
2302
new_parents = list(parent_heads)
2303
new_parents.sort(key=lambda x:candidate_parents.index(x))
2304
if new_parents == []:
2305
new_parents = [NULL_REVISION]
2306
text_index[text_key] = new_parents
2308
for text_key in invalid_keys:
2309
text_index[text_key] = [NULL_REVISION]
1029
weave = self.weave_store.get_weave(file_id, transaction)
1030
except errors.NoSuchFile:
1031
raise errors.NoSuchIdInRepository(self, file_id)
1032
yield callable_data, weave.get_lines(revision_id)
2312
1034
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2313
1035
"""Get an iterable listing the keys of all the data introduced by a set
2320
1042
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2321
1043
'revisions'. file-id is None unless knit-kind is 'file'.
2323
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2326
for result in self._find_non_file_keys_to_fetch(revision_ids):
2329
def _find_file_keys_to_fetch(self, revision_ids, pb):
2330
1045
# XXX: it's a bit weird to control the inventory weave caching in this
2331
1046
# generator. Ideally the caching would be done in fetch.py I think. Or
2332
1047
# maybe this generator should explicitly have the contract that it
2333
1048
# should not be iterated until the previously yielded item has been
2335
inv_w = self.inventories
1050
inv_w = self.get_inventory_weave()
1051
inv_w.enable_cache()
2337
1053
# file ids that changed
2338
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
1054
file_ids = self.fileids_altered_by_revision_ids(revision_ids)
2340
1056
num_file_ids = len(file_ids)
2341
1057
for file_id, altered_versions in file_ids.iteritems():
2343
pb.update("Fetch texts", count, num_file_ids)
1058
if _files_pb is not None:
1059
_files_pb.update("fetch texts", count, num_file_ids)
2345
1061
yield ("file", file_id, altered_versions)
1062
# We're done with the files_pb. Note that it finished by the caller,
1063
# just as it was created by the caller.
2347
def _find_non_file_keys_to_fetch(self, revision_ids):
2349
1067
yield ("inventory", None, revision_ids)
2352
# XXX: Note ATM no callers actually pay attention to this return
2353
# instead they just use the list of revision ids and ignore
2354
# missing sigs. Consider removing this work entirely
2355
revisions_with_signatures = set(self.signatures.get_parent_map(
2356
[(r,) for r in revision_ids]))
2357
revisions_with_signatures = set(
2358
[r for (r,) in revisions_with_signatures])
2359
revisions_with_signatures.intersection_update(revision_ids)
1071
revisions_with_signatures = set()
1072
for rev_id in revision_ids:
1074
self.get_signature_text(rev_id)
1075
except errors.NoSuchRevision:
1079
revisions_with_signatures.add(rev_id)
2360
1080
yield ("signatures", None, revisions_with_signatures)
2363
1083
yield ("revisions", None, revision_ids)
2365
1085
@needs_read_lock
1086
def get_inventory_weave(self):
1087
return self.control_weaves.get_weave('inventory',
1088
self.get_transaction())
2366
1091
def get_inventory(self, revision_id):
2367
"""Get Inventory object by revision id."""
2368
return self.iter_inventories([revision_id]).next()
2370
def iter_inventories(self, revision_ids, ordering=None):
2371
"""Get many inventories by revision_ids.
2373
This will buffer some or all of the texts used in constructing the
2374
inventories in memory, but will only parse a single inventory at a
2377
:param revision_ids: The expected revision ids of the inventories.
2378
:param ordering: optional ordering, e.g. 'topological'. If not
2379
specified, the order of revision_ids will be preserved (by
2380
buffering if necessary).
2381
:return: An iterator of inventories.
2383
if ((None in revision_ids)
2384
or (_mod_revision.NULL_REVISION in revision_ids)):
2385
raise ValueError('cannot get null revision inventory')
2386
return self._iter_inventories(revision_ids, ordering)
2388
def _iter_inventories(self, revision_ids, ordering):
2389
"""single-document based inventory iteration."""
2390
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2391
for text, revision_id in inv_xmls:
2392
yield self._deserialise_inventory(revision_id, text)
2394
def _iter_inventory_xmls(self, revision_ids, ordering):
2395
if ordering is None:
2396
order_as_requested = True
2397
ordering = 'unordered'
2399
order_as_requested = False
2400
keys = [(revision_id,) for revision_id in revision_ids]
2403
if order_as_requested:
2404
key_iter = iter(keys)
2405
next_key = key_iter.next()
2406
stream = self.inventories.get_record_stream(keys, ordering, True)
2408
for record in stream:
2409
if record.storage_kind != 'absent':
2410
chunks = record.get_bytes_as('chunked')
2411
if order_as_requested:
2412
text_chunks[record.key] = chunks
2414
yield ''.join(chunks), record.key[-1]
2416
raise errors.NoSuchRevision(self, record.key)
2417
if order_as_requested:
2418
# Yield as many results as we can while preserving order.
2419
while next_key in text_chunks:
2420
chunks = text_chunks.pop(next_key)
2421
yield ''.join(chunks), next_key[-1]
2423
next_key = key_iter.next()
2424
except StopIteration:
2425
# We still want to fully consume the get_record_stream,
2426
# just in case it is not actually finished at this point
2430
def _deserialise_inventory(self, revision_id, xml):
2431
"""Transform the xml into an inventory object.
1092
"""Get Inventory object by hash."""
1093
# TODO: jam 20070210 Technically we don't need to sanitize, since all
1094
# called functions must sanitize.
1095
revision_id = osutils.safe_revision_id(revision_id)
1096
return self.deserialise_inventory(
1097
revision_id, self.get_inventory_xml(revision_id))
1099
def deserialise_inventory(self, revision_id, xml):
1100
"""Transform the xml into an inventory object.
2433
1102
:param revision_id: The expected revision id of the inventory.
2434
1103
:param xml: A serialised inventory.
2436
result = self._serializer.read_inventory_from_string(xml, revision_id,
2437
entry_cache=self._inventory_entry_cache,
2438
return_from_cache=self._safe_to_return_from_cache)
2439
if result.revision_id != revision_id:
2440
raise AssertionError('revision id mismatch %s != %s' % (
2441
result.revision_id, revision_id))
1105
revision_id = osutils.safe_revision_id(revision_id)
1106
result = self._serializer.read_inventory_from_string(xml)
1107
result.root.revision = revision_id
1110
def serialise_inventory(self, inv):
1111
return self._serializer.write_inventory_to_string(inv)
1113
def _serialise_inventory_to_lines(self, inv):
1114
return self._serializer.write_inventory_to_lines(inv)
2444
1116
def get_serializer_format(self):
2445
1117
return self._serializer.format_num
2447
1119
@needs_read_lock
2448
def _get_inventory_xml(self, revision_id):
2449
"""Get serialized inventory as a string."""
2450
texts = self._iter_inventory_xmls([revision_id], 'unordered')
1120
def get_inventory_xml(self, revision_id):
1121
"""Get inventory XML as a file object."""
1122
revision_id = osutils.safe_revision_id(revision_id)
2452
text, revision_id = texts.next()
2453
except StopIteration:
1124
assert isinstance(revision_id, str), type(revision_id)
1125
iw = self.get_inventory_weave()
1126
return iw.get_text(revision_id)
2454
1128
raise errors.HistoryMissing(self, 'inventory', revision_id)
2457
def get_rev_id_for_revno(self, revno, known_pair):
2458
"""Return the revision id of a revno, given a later (revno, revid)
2459
pair in the same history.
2461
:return: if found (True, revid). If the available history ran out
2462
before reaching the revno, then this returns
2463
(False, (closest_revno, closest_revid)).
2465
known_revno, known_revid = known_pair
2466
partial_history = [known_revid]
2467
distance_from_known = known_revno - revno
2468
if distance_from_known < 0:
2470
'requested revno (%d) is later than given known revno (%d)'
2471
% (revno, known_revno))
2474
self, partial_history, stop_index=distance_from_known)
2475
except errors.RevisionNotPresent, err:
2476
if err.revision_id == known_revid:
2477
# The start revision (known_revid) wasn't found.
2479
# This is a stacked repository with no fallbacks, or a there's a
2480
# left-hand ghost. Either way, even though the revision named in
2481
# the error isn't in this repo, we know it's the next step in this
2482
# left-hand history.
2483
partial_history.append(err.revision_id)
2484
if len(partial_history) <= distance_from_known:
2485
# Didn't find enough history to get a revid for the revno.
2486
earliest_revno = known_revno - len(partial_history) + 1
2487
return (False, (earliest_revno, partial_history[-1]))
2488
if len(partial_history) - 1 > distance_from_known:
2489
raise AssertionError('_iter_for_revno returned too much history')
2490
return (True, partial_history[-1])
1131
def get_inventory_sha1(self, revision_id):
1132
"""Return the sha1 hash of the inventory entry
1134
# TODO: jam 20070210 Shouldn't this be deprecated / removed?
1135
revision_id = osutils.safe_revision_id(revision_id)
1136
return self.get_revision(revision_id).inventory_sha1
1139
def get_revision_graph(self, revision_id=None):
1140
"""Return a dictionary containing the revision graph.
1142
:param revision_id: The revision_id to get a graph from. If None, then
1143
the entire revision graph is returned. This is a deprecated mode of
1144
operation and will be removed in the future.
1145
:return: a dictionary of revision_id->revision_parents_list.
1147
if 'evil' in debug.debug_flags:
1149
"get_revision_graph scales with size of history.")
1150
# special case NULL_REVISION
1151
if revision_id == _mod_revision.NULL_REVISION:
1153
revision_id = osutils.safe_revision_id(revision_id)
1154
a_weave = self.get_inventory_weave()
1155
all_revisions = self._eliminate_revisions_not_present(
1157
entire_graph = dict([(node, tuple(a_weave.get_parents(node))) for
1158
node in all_revisions])
1159
if revision_id is None:
1161
elif revision_id not in entire_graph:
1162
raise errors.NoSuchRevision(self, revision_id)
1164
# add what can be reached from revision_id
1166
pending = set([revision_id])
1167
while len(pending) > 0:
1168
node = pending.pop()
1169
result[node] = entire_graph[node]
1170
for revision_id in result[node]:
1171
if revision_id not in result:
1172
pending.add(revision_id)
1176
def get_revision_graph_with_ghosts(self, revision_ids=None):
1177
"""Return a graph of the revisions with ghosts marked as applicable.
1179
:param revision_ids: an iterable of revisions to graph or None for all.
1180
:return: a Graph object with the graph reachable from revision_ids.
1182
if 'evil' in debug.debug_flags:
1184
"get_revision_graph_with_ghosts scales with size of history.")
1185
result = deprecated_graph.Graph()
1186
if not revision_ids:
1187
pending = set(self.all_revision_ids())
1190
pending = set(osutils.safe_revision_id(r) for r in revision_ids)
1191
# special case NULL_REVISION
1192
if _mod_revision.NULL_REVISION in pending:
1193
pending.remove(_mod_revision.NULL_REVISION)
1194
required = set(pending)
1197
revision_id = pending.pop()
1199
rev = self.get_revision(revision_id)
1200
except errors.NoSuchRevision:
1201
if revision_id in required:
1204
result.add_ghost(revision_id)
1206
for parent_id in rev.parent_ids:
1207
# is this queued or done ?
1208
if (parent_id not in pending and
1209
parent_id not in done):
1211
pending.add(parent_id)
1212
result.add_node(revision_id, rev.parent_ids)
1213
done.add(revision_id)
1216
def _get_history_vf(self):
1217
"""Get a versionedfile whose history graph reflects all revisions.
1219
For weave repositories, this is the inventory weave.
1221
return self.get_inventory_weave()
2492
1223
def iter_reverse_revision_history(self, revision_id):
2493
1224
"""Iterate backwards through revision ids in the lefthand history
2529
1275
for repositories to maintain loaded indices across multiple locks
2530
1276
by checking inside their implementation of this method to see
2531
1277
whether their indices are still valid. This depends of course on
2532
the disk format being validatable in this manner. This method is
2533
also called by the refresh_data() public interface to cause a refresh
2534
to occur while in a write lock so that data inserted by a smart server
2535
push operation is visible on the client's instance of the physical
1278
the disk format being validatable in this manner.
2539
1281
@needs_read_lock
2540
1282
def revision_tree(self, revision_id):
2541
1283
"""Return Tree for a revision on this branch.
2543
`revision_id` may be NULL_REVISION for the empty tree revision.
1285
`revision_id` may be None for the empty tree revision.
2545
revision_id = _mod_revision.ensure_null(revision_id)
2546
1287
# TODO: refactor this to use an existing revision object
2547
1288
# so we don't need to read it in twice.
2548
if revision_id == _mod_revision.NULL_REVISION:
2549
return RevisionTree(self, Inventory(root_id=None),
1289
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
1290
return RevisionTree(self, Inventory(root_id=None),
2550
1291
_mod_revision.NULL_REVISION)
2552
inv = self.get_inventory(revision_id)
1293
revision_id = osutils.safe_revision_id(revision_id)
1294
inv = self.get_revision_inventory(revision_id)
2553
1295
return RevisionTree(self, inv, revision_id)
2555
1298
def revision_trees(self, revision_ids):
2556
"""Return Trees for revisions in this repository.
2558
:param revision_ids: a sequence of revision-ids;
2559
a revision-id may not be None or 'null:'
2561
inventories = self.iter_inventories(revision_ids)
2562
for inv in inventories:
2563
yield RevisionTree(self, inv, inv.revision_id)
2565
def _filtered_revision_trees(self, revision_ids, file_ids):
2566
"""Return Tree for a revision on this branch with only some files.
2568
:param revision_ids: a sequence of revision-ids;
2569
a revision-id may not be None or 'null:'
2570
:param file_ids: if not None, the result is filtered
2571
so that only those file-ids, their parents and their
2572
children are included.
2574
inventories = self.iter_inventories(revision_ids)
2575
for inv in inventories:
2576
# Should we introduce a FilteredRevisionTree class rather
2577
# than pre-filter the inventory here?
2578
filtered_inv = inv.filter(file_ids)
2579
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
1299
"""Return Tree for a revision on this branch.
1301
`revision_id` may not be None or 'null:'"""
1302
assert None not in revision_ids
1303
assert _mod_revision.NULL_REVISION not in revision_ids
1304
texts = self.get_inventory_weave().get_texts(revision_ids)
1305
for text, revision_id in zip(texts, revision_ids):
1306
inv = self.deserialise_inventory(revision_id, text)
1307
yield RevisionTree(self, inv, revision_id)
2581
1309
@needs_read_lock
2582
1310
def get_ancestry(self, revision_id, topo_sorted=True):
2583
1311
"""Return a list of revision-ids integrated by a revision.
2585
The first element of the list is always None, indicating the origin
2586
revision. This might change when we have history horizons, or
1313
The first element of the list is always None, indicating the origin
1314
revision. This might change when we have history horizons, or
2587
1315
perhaps we should have a new API.
2589
1317
This is topologically sorted.
2591
1319
if _mod_revision.is_null(revision_id):
1321
revision_id = osutils.safe_revision_id(revision_id)
2593
1322
if not self.has_revision(revision_id):
2594
1323
raise errors.NoSuchRevision(self, revision_id)
2595
graph = self.get_graph()
2597
search = graph._make_breadth_first_searcher([revision_id])
2600
found, ghosts = search.next_with_ghosts()
2601
except StopIteration:
2604
if _mod_revision.NULL_REVISION in keys:
2605
keys.remove(_mod_revision.NULL_REVISION)
2607
parent_map = graph.get_parent_map(keys)
2608
keys = tsort.topo_sort(parent_map)
2609
return [None] + list(keys)
1324
w = self.get_inventory_weave()
1325
candidates = w.get_ancestry(revision_id, topo_sorted)
1326
return [None] + candidates # self._eliminate_revisions_not_present(candidates)
2611
def pack(self, hint=None, clean_obsolete_packs=False):
2612
1329
"""Compress the data within the repository.
2614
1331
This operation only makes sense for some repository types. For other
2615
1332
types it should be a no-op that just returns.
2617
1334
This stub method does not require a lock, but subclasses should use
2618
@needs_write_lock as this is a long running call its reasonable to
1335
@needs_write_lock as this is a long running call its reasonable to
2619
1336
implicitly lock for the user.
2621
:param hint: If not supplied, the whole repository is packed.
2622
If supplied, the repository may use the hint parameter as a
2623
hint for the parts of the repository to pack. A hint can be
2624
obtained from the result of commit_write_group(). Out of
2625
date hints are simply ignored, because concurrent operations
2626
can obsolete them rapidly.
2628
:param clean_obsolete_packs: Clean obsolete packs immediately after
1340
def print_file(self, file, revision_id):
1341
"""Print `file` to stdout.
1343
FIXME RBC 20060125 as John Meinel points out this is a bad api
1344
- it writes to stdout, it assumes that that is valid etc. Fix
1345
by creating a new more flexible convenience function.
1347
revision_id = osutils.safe_revision_id(revision_id)
1348
tree = self.revision_tree(revision_id)
1349
# use inventory as it was in that revision
1350
file_id = tree.inventory.path2id(file)
1352
# TODO: jam 20060427 Write a test for this code path
1353
# it had a bug in it, and was raising the wrong
1355
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1356
tree.print_file(file_id)
2632
1358
def get_transaction(self):
2633
1359
return self.control_files.get_transaction()
2635
def get_parent_map(self, revision_ids):
2636
"""See graph.StackedParentsProvider.get_parent_map"""
2637
# revisions index works in keys; this just works in revisions
2638
# therefore wrap and unwrap
1361
def revision_parents(self, revision_id):
1362
revision_id = osutils.safe_revision_id(revision_id)
1363
return self.get_inventory_weave().parent_names(revision_id)
1365
def get_parents(self, revision_ids):
1366
"""See StackedParentsProvider.get_parents"""
2641
1368
for revision_id in revision_ids:
2642
1369
if revision_id == _mod_revision.NULL_REVISION:
2643
result[revision_id] = ()
2644
elif revision_id is None:
2645
raise ValueError('get_parent_map(None) is not valid')
2647
query_keys.append((revision_id ,))
2648
for ((revision_id,), parent_keys) in \
2649
self.revisions.get_parent_map(query_keys).iteritems():
2651
result[revision_id] = tuple([parent_revid
2652
for (parent_revid,) in parent_keys])
2654
result[revision_id] = (_mod_revision.NULL_REVISION,)
1373
parents = self.get_revision(revision_id).parent_ids
1374
except errors.NoSuchRevision:
1377
if len(parents) == 0:
1378
parents = [_mod_revision.NULL_REVISION]
1379
parents_list.append(parents)
2657
1382
def _make_parents_provider(self):
2661
def get_known_graph_ancestry(self, revision_ids):
2662
"""Return the known graph for a set of revision ids and their ancestors.
2664
st = static_tuple.StaticTuple
2665
revision_keys = [st(r_id).intern() for r_id in revision_ids]
2666
known_graph = self.revisions.get_known_graph_ancestry(revision_keys)
2667
return graph.GraphThunkIdsToKeys(known_graph)
2669
1385
def get_graph(self, other_repository=None):
2670
1386
"""Return the graph walker for this repository format"""
2671
1387
parents_provider = self._make_parents_provider()
2672
1388
if (other_repository is not None and
2673
not self.has_same_location(other_repository)):
2674
parents_provider = graph.StackedParentsProvider(
1389
other_repository.bzrdir.transport.base !=
1390
self.bzrdir.transport.base):
1391
parents_provider = graph._StackedParentsProvider(
2675
1392
[parents_provider, other_repository._make_parents_provider()])
2676
1393
return graph.Graph(parents_provider)
2678
def _get_versioned_file_checker(self, text_key_references=None,
2680
"""Return an object suitable for checking versioned files.
2682
:param text_key_references: if non-None, an already built
2683
dictionary mapping text keys ((fileid, revision_id) tuples)
2684
to whether they were referred to by the inventory of the
2685
revision_id that they contain. If None, this will be
2687
:param ancestors: Optional result from
2688
self.get_graph().get_parent_map(self.all_revision_ids()) if already
2691
return _VersionedFileChecker(self,
2692
text_key_references=text_key_references, ancestors=ancestors)
2694
def revision_ids_to_search_result(self, result_set):
2695
"""Convert a set of revision ids to a graph SearchResult."""
2696
result_parents = set()
2697
for parents in self.get_graph().get_parent_map(
2698
result_set).itervalues():
2699
result_parents.update(parents)
2700
included_keys = result_set.intersection(result_parents)
2701
start_keys = result_set.difference(included_keys)
2702
exclude_keys = result_parents.difference(result_set)
2703
result = graph.SearchResult(start_keys, exclude_keys,
2704
len(result_set), result_set)
2707
1395
@needs_write_lock
2708
1396
def set_make_working_trees(self, new_value):
2709
1397
"""Set the policy flag for making working trees when creating branches.
3388
1861
InterRepository.get(other).method_name(parameters).
3391
_walk_to_common_revisions_batch_size = 50
3392
1864
_optimisers = []
3393
1865
"""The available optimised InterRepository types."""
3396
1867
def copy_content(self, revision_id=None):
3397
"""Make a complete copy of the content in self into destination.
3399
This is a destructive operation! Do not use it on existing
3402
:param revision_id: Only copy the content needed to construct
3403
revision_id and its parents.
3406
self.target.set_make_working_trees(self.source.make_working_trees())
3407
except NotImplementedError:
3409
self.target.fetch(self.source, revision_id=revision_id)
3412
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
1868
raise NotImplementedError(self.copy_content)
1870
def fetch(self, revision_id=None, pb=None):
3414
1871
"""Fetch the content required to construct revision_id.
3416
1873
The content is copied from self.source to self.target.
3418
1875
:param revision_id: if None all content is copied, if NULL_REVISION no
3419
1876
content is copied.
3423
ui.ui_factory.warn_experimental_format_fetch(self)
3424
from bzrlib.fetch import RepoFetcher
3425
# See <https://launchpad.net/bugs/456077> asking for a warning here
3426
if self.source._format.network_name() != self.target._format.network_name():
3427
ui.ui_factory.show_user_warning('cross_format_fetch',
3428
from_format=self.source._format,
3429
to_format=self.target._format)
3430
f = RepoFetcher(to_repository=self.target,
3431
from_repository=self.source,
3432
last_revision=revision_id,
3433
fetch_spec=fetch_spec,
3434
find_ghosts=find_ghosts)
3436
def _walk_to_common_revisions(self, revision_ids):
3437
"""Walk out from revision_ids in source to revisions target has.
3439
:param revision_ids: The start point for the search.
3440
:return: A set of revision ids.
3442
target_graph = self.target.get_graph()
3443
revision_ids = frozenset(revision_ids)
3444
missing_revs = set()
3445
source_graph = self.source.get_graph()
3446
# ensure we don't pay silly lookup costs.
3447
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3448
null_set = frozenset([_mod_revision.NULL_REVISION])
3449
searcher_exhausted = False
3453
# Iterate the searcher until we have enough next_revs
3454
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3456
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3457
next_revs.update(next_revs_part)
3458
ghosts.update(ghosts_part)
3459
except StopIteration:
3460
searcher_exhausted = True
3462
# If there are ghosts in the source graph, and the caller asked for
3463
# them, make sure that they are present in the target.
3464
# We don't care about other ghosts as we can't fetch them and
3465
# haven't been asked to.
3466
ghosts_to_check = set(revision_ids.intersection(ghosts))
3467
revs_to_get = set(next_revs).union(ghosts_to_check)
3469
have_revs = set(target_graph.get_parent_map(revs_to_get))
3470
# we always have NULL_REVISION present.
3471
have_revs = have_revs.union(null_set)
3472
# Check if the target is missing any ghosts we need.
3473
ghosts_to_check.difference_update(have_revs)
3475
# One of the caller's revision_ids is a ghost in both the
3476
# source and the target.
3477
raise errors.NoSuchRevision(
3478
self.source, ghosts_to_check.pop())
3479
missing_revs.update(next_revs - have_revs)
3480
# Because we may have walked past the original stop point, make
3481
# sure everything is stopped
3482
stop_revs = searcher.find_seen_ancestors(have_revs)
3483
searcher.stop_searching_any(stop_revs)
3484
if searcher_exhausted:
3486
return searcher.get_result()
1877
:param pb: optional progress bar to use for progress reports. If not
1878
provided a default one will be created.
1880
Returns the copied revision count and the failed revisions in a tuple:
1883
raise NotImplementedError(self.fetch)
3488
1885
@needs_read_lock
3489
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
1886
def missing_revision_ids(self, revision_id=None):
3490
1887
"""Return the revision ids that source has that target does not.
1889
These are returned in topological order.
3492
1891
:param revision_id: only return revision ids included by this
3494
:param find_ghosts: If True find missing revisions in deep history
3495
rather than just finding the surface difference.
3496
:return: A bzrlib.graph.SearchResult.
3498
# stop searching at found target revisions.
3499
if not find_ghosts and revision_id is not None:
3500
return self._walk_to_common_revisions([revision_id])
3501
1894
# generic, possibly worst case, slow code path.
3502
1895
target_ids = set(self.target.all_revision_ids())
3503
1896
if revision_id is not None:
1897
# TODO: jam 20070210 InterRepository is internal enough that it
1898
# should assume revision_ids are already utf-8
1899
revision_id = osutils.safe_revision_id(revision_id)
3504
1900
source_ids = self.source.get_ancestry(revision_id)
3505
if source_ids[0] is not None:
3506
raise AssertionError()
1901
assert source_ids[0] is None
3507
1902
source_ids.pop(0)
3509
1904
source_ids = self.source.all_revision_ids()
3510
1905
result_set = set(source_ids).difference(target_ids)
3511
return self.source.revision_ids_to_search_result(result_set)
3514
def _same_model(source, target):
3515
"""True if source and target have the same data representation.
3517
Note: this is always called on the base class; overriding it in a
3518
subclass will have no effect.
3521
InterRepository._assert_same_model(source, target)
3523
except errors.IncompatibleRepositories, e:
3527
def _assert_same_model(source, target):
3528
"""Raise an exception if two repositories do not use the same model.
3530
if source.supports_rich_root() != target.supports_rich_root():
3531
raise errors.IncompatibleRepositories(source, target,
3532
"different rich-root support")
3533
if source._serializer != target._serializer:
3534
raise errors.IncompatibleRepositories(source, target,
3535
"different serializers")
1906
# this may look like a no-op: its not. It preserves the ordering
1907
# other_ids had while only returning the members from other_ids
1908
# that we've decided we need.
1909
return [rev_id for rev_id in source_ids if rev_id in result_set]
3538
1912
class InterSameDataRepository(InterRepository):
3539
1913
"""Code for converting between repositories that represent the same data.
3541
1915
Data format and model must match for this to work.
3545
1919
def _get_repo_format_to_test(self):
3546
1920
"""Repository format for testing with.
3548
1922
InterSameData can pull from subtree to subtree and from non-subtree to
3549
1923
non-subtree, so we test this with the richest repository format.
3680
2106
def is_compatible(source, target):
3681
2107
"""Be compatible with known Knit formats.
3683
2109
We don't test for the stores being of specific types because that
3684
could lead to confusing results, and there is no need to be
2110
could lead to confusing results, and there is no need to be
3685
2111
overly general.
3687
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
2113
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit1
3689
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3690
isinstance(target._format, RepositoryFormatKnit))
2115
return (isinstance(source._format, (RepositoryFormatKnit1)) and
2116
isinstance(target._format, (RepositoryFormatKnit1)))
3691
2117
except AttributeError:
3693
return are_knits and InterRepository._same_model(source, target)
2121
def fetch(self, revision_id=None, pb=None):
2122
"""See InterRepository.fetch()."""
2123
from bzrlib.fetch import KnitRepoFetcher
2124
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2125
self.source, self.source._format, self.target, self.target._format)
2126
# TODO: jam 20070210 This should be an assert, not a translate
2127
revision_id = osutils.safe_revision_id(revision_id)
2128
f = KnitRepoFetcher(to_repository=self.target,
2129
from_repository=self.source,
2130
last_revision=revision_id,
2132
return f.count_copied, f.failed_revisions
3695
2134
@needs_read_lock
3696
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2135
def missing_revision_ids(self, revision_id=None):
3697
2136
"""See InterRepository.missing_revision_ids()."""
3698
2137
if revision_id is not None:
3699
2138
source_ids = self.source.get_ancestry(revision_id)
3700
if source_ids[0] is not None:
3701
raise AssertionError()
2139
assert source_ids[0] is None
3702
2140
source_ids.pop(0)
3704
source_ids = self.source.all_revision_ids()
2142
source_ids = self.source._all_possible_ids()
3705
2143
source_ids_set = set(source_ids)
3706
2144
# source_ids is the worst possible case we may need to pull.
3707
2145
# now we want to filter source_ids against what we actually
3708
2146
# have in target, but don't try to check for existence where we know
3709
2147
# we do not have a revision as that would be pointless.
3710
target_ids = set(self.target.all_revision_ids())
2148
target_ids = set(self.target._all_possible_ids())
3711
2149
possibly_present_revisions = target_ids.intersection(source_ids_set)
3712
actually_present_revisions = set(
3713
self.target._eliminate_revisions_not_present(possibly_present_revisions))
2150
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
3714
2151
required_revisions = source_ids_set.difference(actually_present_revisions)
2152
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
3715
2153
if revision_id is not None:
3716
2154
# we used get_ancestry to determine source_ids then we are assured all
3717
2155
# revisions referenced are present as they are installed in topological order.
3718
2156
# and the tip revision was validated by get_ancestry.
3719
result_set = required_revisions
2157
return required_topo_revisions
3721
# if we just grabbed the possibly available ids, then
2159
# if we just grabbed the possibly available ids, then
3722
2160
# we only have an estimate of whats available and need to validate
3723
2161
# that against the revision records.
3725
self.source._eliminate_revisions_not_present(required_revisions))
3726
return self.source.revision_ids_to_search_result(result_set)
3729
class InterDifferingSerializer(InterRepository):
3732
def _get_repo_format_to_test(self):
3736
def is_compatible(source, target):
3737
"""Be compatible with Knit2 source and Knit3 target"""
3738
# This is redundant with format.check_conversion_target(), however that
3739
# raises an exception, and we just want to say "False" as in we won't
3740
# support converting between these formats.
3741
if 'IDS_never' in debug.debug_flags:
3743
if source.supports_rich_root() and not target.supports_rich_root():
3745
if (source._format.supports_tree_reference
3746
and not target._format.supports_tree_reference):
3748
if target._fallback_repositories and target._format.supports_chks:
3749
# IDS doesn't know how to copy CHKs for the parent inventories it
3750
# adds to stacked repos.
3752
if 'IDS_always' in debug.debug_flags:
3754
# Only use this code path for local source and target. IDS does far
3755
# too much IO (both bandwidth and roundtrips) over a network.
3756
if not source.bzrdir.transport.base.startswith('file:///'):
3758
if not target.bzrdir.transport.base.startswith('file:///'):
3762
def _get_trees(self, revision_ids, cache):
3764
for rev_id in revision_ids:
3766
possible_trees.append((rev_id, cache[rev_id]))
3768
# Not cached, but inventory might be present anyway.
3770
tree = self.source.revision_tree(rev_id)
3771
except errors.NoSuchRevision:
3772
# Nope, parent is ghost.
3775
cache[rev_id] = tree
3776
possible_trees.append((rev_id, tree))
3777
return possible_trees
3779
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3780
"""Get the best delta and base for this revision.
3782
:return: (basis_id, delta)
3785
# Generate deltas against each tree, to find the shortest.
3786
texts_possibly_new_in_tree = set()
3787
for basis_id, basis_tree in possible_trees:
3788
delta = tree.inventory._make_delta(basis_tree.inventory)
3789
for old_path, new_path, file_id, new_entry in delta:
3790
if new_path is None:
3791
# This file_id isn't present in the new rev, so we don't
3795
# Rich roots are handled elsewhere...
3797
kind = new_entry.kind
3798
if kind != 'directory' and kind != 'file':
3799
# No text record associated with this inventory entry.
3801
# This is a directory or file that has changed somehow.
3802
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3803
deltas.append((len(delta), basis_id, delta))
3805
return deltas[0][1:]
3807
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3808
"""Find all parent revisions that are absent, but for which the
3809
inventory is present, and copy those inventories.
3811
This is necessary to preserve correctness when the source is stacked
3812
without fallbacks configured. (Note that in cases like upgrade the
3813
source may be not have _fallback_repositories even though it is
3817
for parents in parent_map.values():
3818
parent_revs.update(parents)
3819
present_parents = self.source.get_parent_map(parent_revs)
3820
absent_parents = set(parent_revs).difference(present_parents)
3821
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3822
(rev_id,) for rev_id in absent_parents)
3823
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3824
for parent_tree in self.source.revision_trees(parent_inv_ids):
3825
current_revision_id = parent_tree.get_revision_id()
3826
parents_parents_keys = parent_invs_keys_for_stacking[
3827
(current_revision_id,)]
3828
parents_parents = [key[-1] for key in parents_parents_keys]
3829
basis_id = _mod_revision.NULL_REVISION
3830
basis_tree = self.source.revision_tree(basis_id)
3831
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3832
self.target.add_inventory_by_delta(
3833
basis_id, delta, current_revision_id, parents_parents)
3834
cache[current_revision_id] = parent_tree
3836
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3837
"""Fetch across a few revisions.
3839
:param revision_ids: The revisions to copy
3840
:param basis_id: The revision_id of a tree that must be in cache, used
3841
as a basis for delta when no other base is available
3842
:param cache: A cache of RevisionTrees that we can use.
3843
:param a_graph: A Graph object to determine the heads() of the
3844
rich-root data stream.
3845
:return: The revision_id of the last converted tree. The RevisionTree
3846
for it will be in cache
3848
# Walk though all revisions; get inventory deltas, copy referenced
3849
# texts that delta references, insert the delta, revision and
3851
root_keys_to_create = set()
3854
pending_revisions = []
3855
parent_map = self.source.get_parent_map(revision_ids)
3856
self._fetch_parent_invs_for_stacking(parent_map, cache)
3857
self.source._safe_to_return_from_cache = True
3858
for tree in self.source.revision_trees(revision_ids):
3859
# Find a inventory delta for this revision.
3860
# Find text entries that need to be copied, too.
3861
current_revision_id = tree.get_revision_id()
3862
parent_ids = parent_map.get(current_revision_id, ())
3863
parent_trees = self._get_trees(parent_ids, cache)
3864
possible_trees = list(parent_trees)
3865
if len(possible_trees) == 0:
3866
# There either aren't any parents, or the parents are ghosts,
3867
# so just use the last converted tree.
3868
possible_trees.append((basis_id, cache[basis_id]))
3869
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3871
revision = self.source.get_revision(current_revision_id)
3872
pending_deltas.append((basis_id, delta,
3873
current_revision_id, revision.parent_ids))
3874
if self._converting_to_rich_root:
3875
self._revision_id_to_root_id[current_revision_id] = \
3877
# Determine which texts are in present in this revision but not in
3878
# any of the available parents.
3879
texts_possibly_new_in_tree = set()
3880
for old_path, new_path, file_id, entry in delta:
3881
if new_path is None:
3882
# This file_id isn't present in the new rev
3886
if not self.target.supports_rich_root():
3887
# The target doesn't support rich root, so we don't
3890
if self._converting_to_rich_root:
3891
# This can't be copied normally, we have to insert
3893
root_keys_to_create.add((file_id, entry.revision))
3896
texts_possibly_new_in_tree.add((file_id, entry.revision))
3897
for basis_id, basis_tree in possible_trees:
3898
basis_inv = basis_tree.inventory
3899
for file_key in list(texts_possibly_new_in_tree):
3900
file_id, file_revision = file_key
3902
entry = basis_inv[file_id]
3903
except errors.NoSuchId:
3905
if entry.revision == file_revision:
3906
texts_possibly_new_in_tree.remove(file_key)
3907
text_keys.update(texts_possibly_new_in_tree)
3908
pending_revisions.append(revision)
3909
cache[current_revision_id] = tree
3910
basis_id = current_revision_id
3911
self.source._safe_to_return_from_cache = False
3913
from_texts = self.source.texts
3914
to_texts = self.target.texts
3915
if root_keys_to_create:
3916
root_stream = _mod_fetch._new_root_data_stream(
3917
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3918
self.source, graph=a_graph)
3919
to_texts.insert_record_stream(root_stream)
3920
to_texts.insert_record_stream(from_texts.get_record_stream(
3921
text_keys, self.target._format._fetch_order,
3922
not self.target._format._fetch_uses_deltas))
3923
# insert inventory deltas
3924
for delta in pending_deltas:
3925
self.target.add_inventory_by_delta(*delta)
3926
if self.target._fallback_repositories:
3927
# Make sure this stacked repository has all the parent inventories
3928
# for the new revisions that we are about to insert. We do this
3929
# before adding the revisions so that no revision is added until
3930
# all the inventories it may depend on are added.
3931
# Note that this is overzealous, as we may have fetched these in an
3934
revision_ids = set()
3935
for revision in pending_revisions:
3936
revision_ids.add(revision.revision_id)
3937
parent_ids.update(revision.parent_ids)
3938
parent_ids.difference_update(revision_ids)
3939
parent_ids.discard(_mod_revision.NULL_REVISION)
3940
parent_map = self.source.get_parent_map(parent_ids)
3941
# we iterate over parent_map and not parent_ids because we don't
3942
# want to try copying any revision which is a ghost
3943
for parent_tree in self.source.revision_trees(parent_map):
3944
current_revision_id = parent_tree.get_revision_id()
3945
parents_parents = parent_map[current_revision_id]
3946
possible_trees = self._get_trees(parents_parents, cache)
3947
if len(possible_trees) == 0:
3948
# There either aren't any parents, or the parents are
3949
# ghosts, so just use the last converted tree.
3950
possible_trees.append((basis_id, cache[basis_id]))
3951
basis_id, delta = self._get_delta_for_revision(parent_tree,
3952
parents_parents, possible_trees)
3953
self.target.add_inventory_by_delta(
3954
basis_id, delta, current_revision_id, parents_parents)
3955
# insert signatures and revisions
3956
for revision in pending_revisions:
3958
signature = self.source.get_signature_text(
3959
revision.revision_id)
3960
self.target.add_signature_text(revision.revision_id,
3962
except errors.NoSuchRevision:
3964
self.target.add_revision(revision.revision_id, revision)
3967
def _fetch_all_revisions(self, revision_ids, pb):
3968
"""Fetch everything for the list of revisions.
3970
:param revision_ids: The list of revisions to fetch. Must be in
3972
:param pb: A ProgressTask
3975
basis_id, basis_tree = self._get_basis(revision_ids[0])
3977
cache = lru_cache.LRUCache(100)
3978
cache[basis_id] = basis_tree
3979
del basis_tree # We don't want to hang on to it here
3981
if self._converting_to_rich_root and len(revision_ids) > 100:
3982
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3987
for offset in range(0, len(revision_ids), batch_size):
3988
self.target.start_write_group()
3990
pb.update('Transferring revisions', offset,
3992
batch = revision_ids[offset:offset+batch_size]
3993
basis_id = self._fetch_batch(batch, basis_id, cache,
3996
self.source._safe_to_return_from_cache = False
3997
self.target.abort_write_group()
4000
hint = self.target.commit_write_group()
4003
if hints and self.target._format.pack_compresses:
4004
self.target.pack(hint=hints)
4005
pb.update('Transferring revisions', len(revision_ids),
4009
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
4011
"""See InterRepository.fetch()."""
4012
if fetch_spec is not None:
4013
raise AssertionError("Not implemented yet...")
4014
ui.ui_factory.warn_experimental_format_fetch(self)
4015
if (not self.source.supports_rich_root()
4016
and self.target.supports_rich_root()):
4017
self._converting_to_rich_root = True
4018
self._revision_id_to_root_id = {}
4020
self._converting_to_rich_root = False
4021
# See <https://launchpad.net/bugs/456077> asking for a warning here
4022
if self.source._format.network_name() != self.target._format.network_name():
4023
ui.ui_factory.show_user_warning('cross_format_fetch',
4024
from_format=self.source._format,
4025
to_format=self.target._format)
4026
revision_ids = self.target.search_missing_revision_ids(self.source,
4027
revision_id, find_ghosts=find_ghosts).get_keys()
4028
if not revision_ids:
4030
revision_ids = tsort.topo_sort(
4031
self.source.get_graph().get_parent_map(revision_ids))
4032
if not revision_ids:
4034
# Walk though all revisions; get inventory deltas, copy referenced
4035
# texts that delta references, insert the delta, revision and
4038
my_pb = ui.ui_factory.nested_progress_bar()
4041
symbol_versioning.warn(
4042
symbol_versioning.deprecated_in((1, 14, 0))
4043
% "pb parameter to fetch()")
4046
self._fetch_all_revisions(revision_ids, pb)
4048
if my_pb is not None:
4050
return len(revision_ids), 0
4052
def _get_basis(self, first_revision_id):
4053
"""Get a revision and tree which exists in the target.
4055
This assumes that first_revision_id is selected for transmission
4056
because all other ancestors are already present. If we can't find an
4057
ancestor we fall back to NULL_REVISION since we know that is safe.
4059
:return: (basis_id, basis_tree)
4061
first_rev = self.source.get_revision(first_revision_id)
4063
basis_id = first_rev.parent_ids[0]
4064
# only valid as a basis if the target has it
4065
self.target.get_revision(basis_id)
4066
# Try to get a basis tree - if its a ghost it will hit the
4067
# NoSuchRevision case.
4068
basis_tree = self.source.revision_tree(basis_id)
4069
except (IndexError, errors.NoSuchRevision):
4070
basis_id = _mod_revision.NULL_REVISION
4071
basis_tree = self.source.revision_tree(basis_id)
4072
return basis_id, basis_tree
4075
InterRepository.register_optimiser(InterDifferingSerializer)
2162
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2165
class InterModel1and2(InterRepository):
2168
def _get_repo_format_to_test(self):
2172
def is_compatible(source, target):
2173
if not source.supports_rich_root() and target.supports_rich_root():
2179
def fetch(self, revision_id=None, pb=None):
2180
"""See InterRepository.fetch()."""
2181
from bzrlib.fetch import Model1toKnit2Fetcher
2182
# TODO: jam 20070210 This should be an assert, not a translate
2183
revision_id = osutils.safe_revision_id(revision_id)
2184
f = Model1toKnit2Fetcher(to_repository=self.target,
2185
from_repository=self.source,
2186
last_revision=revision_id,
2188
return f.count_copied, f.failed_revisions
2191
def copy_content(self, revision_id=None):
2192
"""Make a complete copy of the content in self into destination.
2194
This is a destructive operation! Do not use it on existing
2197
:param revision_id: Only copy the content needed to construct
2198
revision_id and its parents.
2201
self.target.set_make_working_trees(self.source.make_working_trees())
2202
except NotImplementedError:
2204
# TODO: jam 20070210 Internal, assert, don't translate
2205
revision_id = osutils.safe_revision_id(revision_id)
2206
# but don't bother fetching if we have the needed data now.
2207
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2208
self.target.has_revision(revision_id)):
2210
self.target.fetch(self.source, revision_id=revision_id)
2213
class InterKnit1and2(InterKnitRepo):
2216
def _get_repo_format_to_test(self):
2220
def is_compatible(source, target):
2221
"""Be compatible with Knit1 source and Knit3 target"""
2222
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2224
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit1, \
2225
RepositoryFormatKnit3
2226
return (isinstance(source._format, (RepositoryFormatKnit1)) and
2227
isinstance(target._format, (RepositoryFormatKnit3)))
2228
except AttributeError:
2232
def fetch(self, revision_id=None, pb=None):
2233
"""See InterRepository.fetch()."""
2234
from bzrlib.fetch import Knit1to2Fetcher
2235
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2236
self.source, self.source._format, self.target,
2237
self.target._format)
2238
# TODO: jam 20070210 This should be an assert, not a translate
2239
revision_id = osutils.safe_revision_id(revision_id)
2240
f = Knit1to2Fetcher(to_repository=self.target,
2241
from_repository=self.source,
2242
last_revision=revision_id,
2244
return f.count_copied, f.failed_revisions
2247
class InterRemoteRepository(InterRepository):
2248
"""Code for converting between RemoteRepository objects.
2250
This just gets an non-remote repository from the RemoteRepository, and calls
2251
InterRepository.get again.
2254
def __init__(self, source, target):
2255
if isinstance(source, remote.RemoteRepository):
2256
source._ensure_real()
2257
real_source = source._real_repository
2259
real_source = source
2260
if isinstance(target, remote.RemoteRepository):
2261
target._ensure_real()
2262
real_target = target._real_repository
2264
real_target = target
2265
self.real_inter = InterRepository.get(real_source, real_target)
2268
def is_compatible(source, target):
2269
if isinstance(source, remote.RemoteRepository):
2271
if isinstance(target, remote.RemoteRepository):
2275
def copy_content(self, revision_id=None):
2276
self.real_inter.copy_content(revision_id=revision_id)
2278
def fetch(self, revision_id=None, pb=None):
2279
self.real_inter.fetch(revision_id=revision_id, pb=pb)
2282
def _get_repo_format_to_test(self):
4076
2286
InterRepository.register_optimiser(InterSameDataRepository)
4077
2287
InterRepository.register_optimiser(InterWeaveRepo)
4078
2288
InterRepository.register_optimiser(InterKnitRepo)
2289
InterRepository.register_optimiser(InterModel1and2)
2290
InterRepository.register_optimiser(InterKnit1and2)
2291
InterRepository.register_optimiser(InterRemoteRepository)
4081
2294
class CopyConverter(object):
4082
2295
"""A repository conversion tool which just performs a copy of the content.
4084
2297
This is slow but quite reliable.
4154
2371
if _unescape_re is None:
4155
2372
_unescape_re = re.compile('\&([^;]*);')
4156
2373
return _unescape_re.sub(_unescaper, data)
4159
class _VersionedFileChecker(object):
4161
def __init__(self, repository, text_key_references=None, ancestors=None):
4162
self.repository = repository
4163
self.text_index = self.repository._generate_text_key_index(
4164
text_key_references=text_key_references, ancestors=ancestors)
4166
def calculate_file_version_parents(self, text_key):
4167
"""Calculate the correct parents for a file version according to
4170
parent_keys = self.text_index[text_key]
4171
if parent_keys == [_mod_revision.NULL_REVISION]:
4173
return tuple(parent_keys)
4175
def check_file_version_parents(self, texts, progress_bar=None):
4176
"""Check the parents stored in a versioned file are correct.
4178
It also detects file versions that are not referenced by their
4179
corresponding revision's inventory.
4181
:returns: A tuple of (wrong_parents, dangling_file_versions).
4182
wrong_parents is a dict mapping {revision_id: (stored_parents,
4183
correct_parents)} for each revision_id where the stored parents
4184
are not correct. dangling_file_versions is a set of (file_id,
4185
revision_id) tuples for versions that are present in this versioned
4186
file, but not used by the corresponding inventory.
4188
local_progress = None
4189
if progress_bar is None:
4190
local_progress = ui.ui_factory.nested_progress_bar()
4191
progress_bar = local_progress
4193
return self._check_file_version_parents(texts, progress_bar)
4196
local_progress.finished()
4198
def _check_file_version_parents(self, texts, progress_bar):
4199
"""See check_file_version_parents."""
4201
self.file_ids = set([file_id for file_id, _ in
4202
self.text_index.iterkeys()])
4203
# text keys is now grouped by file_id
4204
n_versions = len(self.text_index)
4205
progress_bar.update('loading text store', 0, n_versions)
4206
parent_map = self.repository.texts.get_parent_map(self.text_index)
4207
# On unlistable transports this could well be empty/error...
4208
text_keys = self.repository.texts.keys()
4209
unused_keys = frozenset(text_keys) - set(self.text_index)
4210
for num, key in enumerate(self.text_index.iterkeys()):
4211
progress_bar.update('checking text graph', num, n_versions)
4212
correct_parents = self.calculate_file_version_parents(key)
4214
knit_parents = parent_map[key]
4215
except errors.RevisionNotPresent:
4218
if correct_parents != knit_parents:
4219
wrong_parents[key] = (knit_parents, correct_parents)
4220
return wrong_parents, unused_keys
4223
def _old_get_graph(repository, revision_id):
4224
"""DO NOT USE. That is all. I'm serious."""
4225
graph = repository.get_graph()
4226
revision_graph = dict(((key, value) for key, value in
4227
graph.iter_ancestry([revision_id]) if value is not None))
4228
return _strip_NULL_ghosts(revision_graph)
4231
def _strip_NULL_ghosts(revision_graph):
4232
"""Also don't use this. more compatibility code for unmigrated clients."""
4233
# Filter ghosts, and null:
4234
if _mod_revision.NULL_REVISION in revision_graph:
4235
del revision_graph[_mod_revision.NULL_REVISION]
4236
for key, parents in revision_graph.items():
4237
revision_graph[key] = tuple(parent for parent in parents if parent
4239
return revision_graph
4242
class StreamSink(object):
4243
"""An object that can insert a stream into a repository.
4245
This interface handles the complexity of reserialising inventories and
4246
revisions from different formats, and allows unidirectional insertion into
4247
stacked repositories without looking for the missing basis parents
4251
def __init__(self, target_repo):
4252
self.target_repo = target_repo
4254
def insert_stream(self, stream, src_format, resume_tokens):
4255
"""Insert a stream's content into the target repository.
4257
:param src_format: a bzr repository format.
4259
:return: a list of resume tokens and an iterable of keys additional
4260
items required before the insertion can be completed.
4262
self.target_repo.lock_write()
4265
self.target_repo.resume_write_group(resume_tokens)
4268
self.target_repo.start_write_group()
4271
# locked_insert_stream performs a commit|suspend.
4272
return self._locked_insert_stream(stream, src_format, is_resume)
4274
self.target_repo.abort_write_group(suppress_errors=True)
4277
self.target_repo.unlock()
4279
def _locked_insert_stream(self, stream, src_format, is_resume):
4280
to_serializer = self.target_repo._format._serializer
4281
src_serializer = src_format._serializer
4283
if to_serializer == src_serializer:
4284
# If serializers match and the target is a pack repository, set the
4285
# write cache size on the new pack. This avoids poor performance
4286
# on transports where append is unbuffered (such as
4287
# RemoteTransport). This is safe to do because nothing should read
4288
# back from the target repository while a stream with matching
4289
# serialization is being inserted.
4290
# The exception is that a delta record from the source that should
4291
# be a fulltext may need to be expanded by the target (see
4292
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4293
# explicitly flush any buffered writes first in that rare case.
4295
new_pack = self.target_repo._pack_collection._new_pack
4296
except AttributeError:
4297
# Not a pack repository
4300
new_pack.set_write_cache_size(1024*1024)
4301
for substream_type, substream in stream:
4302
if 'stream' in debug.debug_flags:
4303
mutter('inserting substream: %s', substream_type)
4304
if substream_type == 'texts':
4305
self.target_repo.texts.insert_record_stream(substream)
4306
elif substream_type == 'inventories':
4307
if src_serializer == to_serializer:
4308
self.target_repo.inventories.insert_record_stream(
4311
self._extract_and_insert_inventories(
4312
substream, src_serializer)
4313
elif substream_type == 'inventory-deltas':
4314
self._extract_and_insert_inventory_deltas(
4315
substream, src_serializer)
4316
elif substream_type == 'chk_bytes':
4317
# XXX: This doesn't support conversions, as it assumes the
4318
# conversion was done in the fetch code.
4319
self.target_repo.chk_bytes.insert_record_stream(substream)
4320
elif substream_type == 'revisions':
4321
# This may fallback to extract-and-insert more often than
4322
# required if the serializers are different only in terms of
4324
if src_serializer == to_serializer:
4325
self.target_repo.revisions.insert_record_stream(
4328
self._extract_and_insert_revisions(substream,
4330
elif substream_type == 'signatures':
4331
self.target_repo.signatures.insert_record_stream(substream)
4333
raise AssertionError('kaboom! %s' % (substream_type,))
4334
# Done inserting data, and the missing_keys calculations will try to
4335
# read back from the inserted data, so flush the writes to the new pack
4336
# (if this is pack format).
4337
if new_pack is not None:
4338
new_pack._write_data('', flush=True)
4339
# Find all the new revisions (including ones from resume_tokens)
4340
missing_keys = self.target_repo.get_missing_parent_inventories(
4341
check_for_missing_texts=is_resume)
4343
for prefix, versioned_file in (
4344
('texts', self.target_repo.texts),
4345
('inventories', self.target_repo.inventories),
4346
('revisions', self.target_repo.revisions),
4347
('signatures', self.target_repo.signatures),
4348
('chk_bytes', self.target_repo.chk_bytes),
4350
if versioned_file is None:
4352
# TODO: key is often going to be a StaticTuple object
4353
# I don't believe we can define a method by which
4354
# (prefix,) + StaticTuple will work, though we could
4355
# define a StaticTuple.sq_concat that would allow you to
4356
# pass in either a tuple or a StaticTuple as the second
4357
# object, so instead we could have:
4358
# StaticTuple(prefix) + key here...
4359
missing_keys.update((prefix,) + key for key in
4360
versioned_file.get_missing_compression_parent_keys())
4361
except NotImplementedError:
4362
# cannot even attempt suspending, and missing would have failed
4363
# during stream insertion.
4364
missing_keys = set()
4367
# suspend the write group and tell the caller what we is
4368
# missing. We know we can suspend or else we would not have
4369
# entered this code path. (All repositories that can handle
4370
# missing keys can handle suspending a write group).
4371
write_group_tokens = self.target_repo.suspend_write_group()
4372
return write_group_tokens, missing_keys
4373
hint = self.target_repo.commit_write_group()
4374
if (to_serializer != src_serializer and
4375
self.target_repo._format.pack_compresses):
4376
self.target_repo.pack(hint=hint)
4379
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4380
target_rich_root = self.target_repo._format.rich_root_data
4381
target_tree_refs = self.target_repo._format.supports_tree_reference
4382
for record in substream:
4383
# Insert the delta directly
4384
inventory_delta_bytes = record.get_bytes_as('fulltext')
4385
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4387
parse_result = deserialiser.parse_text_bytes(
4388
inventory_delta_bytes)
4389
except inventory_delta.IncompatibleInventoryDelta, err:
4390
trace.mutter("Incompatible delta: %s", err.msg)
4391
raise errors.IncompatibleRevision(self.target_repo._format)
4392
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4393
revision_id = new_id
4394
parents = [key[0] for key in record.parents]
4395
self.target_repo.add_inventory_by_delta(
4396
basis_id, inv_delta, revision_id, parents)
4398
def _extract_and_insert_inventories(self, substream, serializer,
4400
"""Generate a new inventory versionedfile in target, converting data.
4402
The inventory is retrieved from the source, (deserializing it), and
4403
stored in the target (reserializing it in a different format).
4405
target_rich_root = self.target_repo._format.rich_root_data
4406
target_tree_refs = self.target_repo._format.supports_tree_reference
4407
for record in substream:
4408
# It's not a delta, so it must be a fulltext in the source
4409
# serializer's format.
4410
bytes = record.get_bytes_as('fulltext')
4411
revision_id = record.key[0]
4412
inv = serializer.read_inventory_from_string(bytes, revision_id)
4413
parents = [key[0] for key in record.parents]
4414
self.target_repo.add_inventory(revision_id, inv, parents)
4415
# No need to keep holding this full inv in memory when the rest of
4416
# the substream is likely to be all deltas.
4419
def _extract_and_insert_revisions(self, substream, serializer):
4420
for record in substream:
4421
bytes = record.get_bytes_as('fulltext')
4422
revision_id = record.key[0]
4423
rev = serializer.read_revision_from_string(bytes)
4424
if rev.revision_id != revision_id:
4425
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4426
self.target_repo.add_revision(revision_id, rev)
4429
if self.target_repo._format._fetch_reconcile:
4430
self.target_repo.reconcile()
4433
class StreamSource(object):
4434
"""A source of a stream for fetching between repositories."""
4436
def __init__(self, from_repository, to_format):
4437
"""Create a StreamSource streaming from from_repository."""
4438
self.from_repository = from_repository
4439
self.to_format = to_format
4441
def delta_on_metadata(self):
4442
"""Return True if delta's are permitted on metadata streams.
4444
That is on revisions and signatures.
4446
src_serializer = self.from_repository._format._serializer
4447
target_serializer = self.to_format._serializer
4448
return (self.to_format._fetch_uses_deltas and
4449
src_serializer == target_serializer)
4451
def _fetch_revision_texts(self, revs):
4452
# fetch signatures first and then the revision texts
4453
# may need to be a InterRevisionStore call here.
4454
from_sf = self.from_repository.signatures
4455
# A missing signature is just skipped.
4456
keys = [(rev_id,) for rev_id in revs]
4457
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4459
self.to_format._fetch_order,
4460
not self.to_format._fetch_uses_deltas))
4461
# If a revision has a delta, this is actually expanded inside the
4462
# insert_record_stream code now, which is an alternate fix for
4464
from_rf = self.from_repository.revisions
4465
revisions = from_rf.get_record_stream(
4467
self.to_format._fetch_order,
4468
not self.delta_on_metadata())
4469
return [('signatures', signatures), ('revisions', revisions)]
4471
def _generate_root_texts(self, revs):
4472
"""This will be called by get_stream between fetching weave texts and
4473
fetching the inventory weave.
4475
if self._rich_root_upgrade():
4476
return _mod_fetch.Inter1and2Helper(
4477
self.from_repository).generate_root_texts(revs)
4481
def get_stream(self, search):
4483
revs = search.get_keys()
4484
graph = self.from_repository.get_graph()
4485
revs = tsort.topo_sort(graph.get_parent_map(revs))
4486
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4488
for knit_kind, file_id, revisions in data_to_fetch:
4489
if knit_kind != phase:
4491
# Make a new progress bar for this phase
4492
if knit_kind == "file":
4493
# Accumulate file texts
4494
text_keys.extend([(file_id, revision) for revision in
4496
elif knit_kind == "inventory":
4497
# Now copy the file texts.
4498
from_texts = self.from_repository.texts
4499
yield ('texts', from_texts.get_record_stream(
4500
text_keys, self.to_format._fetch_order,
4501
not self.to_format._fetch_uses_deltas))
4502
# Cause an error if a text occurs after we have done the
4505
# Before we process the inventory we generate the root
4506
# texts (if necessary) so that the inventories references
4508
for _ in self._generate_root_texts(revs):
4510
# we fetch only the referenced inventories because we do not
4511
# know for unselected inventories whether all their required
4512
# texts are present in the other repository - it could be
4514
for info in self._get_inventory_stream(revs):
4516
elif knit_kind == "signatures":
4517
# Nothing to do here; this will be taken care of when
4518
# _fetch_revision_texts happens.
4520
elif knit_kind == "revisions":
4521
for record in self._fetch_revision_texts(revs):
4524
raise AssertionError("Unknown knit kind %r" % knit_kind)
4526
def get_stream_for_missing_keys(self, missing_keys):
4527
# missing keys can only occur when we are byte copying and not
4528
# translating (because translation means we don't send
4529
# unreconstructable deltas ever).
4531
keys['texts'] = set()
4532
keys['revisions'] = set()
4533
keys['inventories'] = set()
4534
keys['chk_bytes'] = set()
4535
keys['signatures'] = set()
4536
for key in missing_keys:
4537
keys[key[0]].add(key[1:])
4538
if len(keys['revisions']):
4539
# If we allowed copying revisions at this point, we could end up
4540
# copying a revision without copying its required texts: a
4541
# violation of the requirements for repository integrity.
4542
raise AssertionError(
4543
'cannot copy revisions to fill in missing deltas %s' % (
4544
keys['revisions'],))
4545
for substream_kind, keys in keys.iteritems():
4546
vf = getattr(self.from_repository, substream_kind)
4547
if vf is None and keys:
4548
raise AssertionError(
4549
"cannot fill in keys for a versioned file we don't"
4550
" have: %s needs %s" % (substream_kind, keys))
4552
# No need to stream something we don't have
4554
if substream_kind == 'inventories':
4555
# Some missing keys are genuinely ghosts, filter those out.
4556
present = self.from_repository.inventories.get_parent_map(keys)
4557
revs = [key[0] for key in present]
4558
# Get the inventory stream more-or-less as we do for the
4559
# original stream; there's no reason to assume that records
4560
# direct from the source will be suitable for the sink. (Think
4561
# e.g. 2a -> 1.9-rich-root).
4562
for info in self._get_inventory_stream(revs, missing=True):
4566
# Ask for full texts always so that we don't need more round trips
4567
# after this stream.
4568
# Some of the missing keys are genuinely ghosts, so filter absent
4569
# records. The Sink is responsible for doing another check to
4570
# ensure that ghosts don't introduce missing data for future
4572
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4573
self.to_format._fetch_order, True))
4574
yield substream_kind, stream
4576
def inventory_fetch_order(self):
4577
if self._rich_root_upgrade():
4578
return 'topological'
4580
return self.to_format._fetch_order
4582
def _rich_root_upgrade(self):
4583
return (not self.from_repository._format.rich_root_data and
4584
self.to_format.rich_root_data)
4586
def _get_inventory_stream(self, revision_ids, missing=False):
4587
from_format = self.from_repository._format
4588
if (from_format.supports_chks and self.to_format.supports_chks and
4589
from_format.network_name() == self.to_format.network_name()):
4590
raise AssertionError(
4591
"this case should be handled by GroupCHKStreamSource")
4592
elif 'forceinvdeltas' in debug.debug_flags:
4593
return self._get_convertable_inventory_stream(revision_ids,
4594
delta_versus_null=missing)
4595
elif from_format.network_name() == self.to_format.network_name():
4597
return self._get_simple_inventory_stream(revision_ids,
4599
elif (not from_format.supports_chks and not self.to_format.supports_chks
4600
and from_format._serializer == self.to_format._serializer):
4601
# Essentially the same format.
4602
return self._get_simple_inventory_stream(revision_ids,
4605
# Any time we switch serializations, we want to use an
4606
# inventory-delta based approach.
4607
return self._get_convertable_inventory_stream(revision_ids,
4608
delta_versus_null=missing)
4610
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4611
# NB: This currently reopens the inventory weave in source;
4612
# using a single stream interface instead would avoid this.
4613
from_weave = self.from_repository.inventories
4615
delta_closure = True
4617
delta_closure = not self.delta_on_metadata()
4618
yield ('inventories', from_weave.get_record_stream(
4619
[(rev_id,) for rev_id in revision_ids],
4620
self.inventory_fetch_order(), delta_closure))
4622
def _get_convertable_inventory_stream(self, revision_ids,
4623
delta_versus_null=False):
4624
# The two formats are sufficiently different that there is no fast
4625
# path, so we need to send just inventorydeltas, which any
4626
# sufficiently modern client can insert into any repository.
4627
# The StreamSink code expects to be able to
4628
# convert on the target, so we need to put bytes-on-the-wire that can
4629
# be converted. That means inventory deltas (if the remote is <1.19,
4630
# RemoteStreamSink will fallback to VFS to insert the deltas).
4631
yield ('inventory-deltas',
4632
self._stream_invs_as_deltas(revision_ids,
4633
delta_versus_null=delta_versus_null))
4635
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4636
"""Return a stream of inventory-deltas for the given rev ids.
4638
:param revision_ids: The list of inventories to transmit
4639
:param delta_versus_null: Don't try to find a minimal delta for this
4640
entry, instead compute the delta versus the NULL_REVISION. This
4641
effectively streams a complete inventory. Used for stuff like
4642
filling in missing parents, etc.
4644
from_repo = self.from_repository
4645
revision_keys = [(rev_id,) for rev_id in revision_ids]
4646
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4647
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4649
inventories = self.from_repository.iter_inventories(
4650
revision_ids, 'topological')
4651
format = from_repo._format
4652
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4653
inventory_cache = lru_cache.LRUCache(50)
4654
null_inventory = from_repo.revision_tree(
4655
_mod_revision.NULL_REVISION).inventory
4656
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4657
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4658
# repo back into a non-rich-root repo ought to be allowed)
4659
serializer = inventory_delta.InventoryDeltaSerializer(
4660
versioned_root=format.rich_root_data,
4661
tree_references=format.supports_tree_reference)
4662
for inv in inventories:
4663
key = (inv.revision_id,)
4664
parent_keys = parent_map.get(key, ())
4666
if not delta_versus_null and parent_keys:
4667
# The caller did not ask for complete inventories and we have
4668
# some parents that we can delta against. Make a delta against
4669
# each parent so that we can find the smallest.
4670
parent_ids = [parent_key[0] for parent_key in parent_keys]
4671
for parent_id in parent_ids:
4672
if parent_id not in invs_sent_so_far:
4673
# We don't know that the remote side has this basis, so
4676
if parent_id == _mod_revision.NULL_REVISION:
4677
parent_inv = null_inventory
4679
parent_inv = inventory_cache.get(parent_id, None)
4680
if parent_inv is None:
4681
parent_inv = from_repo.get_inventory(parent_id)
4682
candidate_delta = inv._make_delta(parent_inv)
4683
if (delta is None or
4684
len(delta) > len(candidate_delta)):
4685
delta = candidate_delta
4686
basis_id = parent_id
4688
# Either none of the parents ended up being suitable, or we
4689
# were asked to delta against NULL
4690
basis_id = _mod_revision.NULL_REVISION
4691
delta = inv._make_delta(null_inventory)
4692
invs_sent_so_far.add(inv.revision_id)
4693
inventory_cache[inv.revision_id] = inv
4694
delta_serialized = ''.join(
4695
serializer.delta_to_lines(basis_id, key[-1], delta))
4696
yield versionedfile.FulltextContentFactory(
4697
key, parent_keys, None, delta_serialized)
4700
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4701
stop_revision=None):
4702
"""Extend the partial history to include a given index
4704
If a stop_index is supplied, stop when that index has been reached.
4705
If a stop_revision is supplied, stop when that revision is
4706
encountered. Otherwise, stop when the beginning of history is
4709
:param stop_index: The index which should be present. When it is
4710
present, history extension will stop.
4711
:param stop_revision: The revision id which should be present. When
4712
it is encountered, history extension will stop.
4714
start_revision = partial_history_cache[-1]
4715
iterator = repo.iter_reverse_revision_history(start_revision)
4717
#skip the last revision in the list
4720
if (stop_index is not None and
4721
len(partial_history_cache) > stop_index):
4723
if partial_history_cache[-1] == stop_revision:
4725
revision_id = iterator.next()
4726
partial_history_cache.append(revision_id)
4727
except StopIteration: