/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/groupcompress_repo.py

  • Committer: John Arbash Meinel
  • Date: 2009-07-31 17:42:29 UTC
  • mto: This revision was merged to the branch mainline in revision 4611.
  • Revision ID: john@arbash-meinel.com-20090731174229-w2zdsdlfpeddk8gl
Now we got to the per-workingtree tests, etc.

The main causes seem to break down into:
  bzrdir.clone() is known to be broken wrt locking, this effects
  everything that tries to 'push'

  shelf code is not compatible with strict locking

  merge code seems to have an issue. This might actually be the
  root cause of the clone() problems.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
 
1
# Copyright (C) 2008, 2009 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
29
29
    knit,
30
30
    osutils,
31
31
    pack,
 
32
    remote,
32
33
    revision as _mod_revision,
33
34
    trace,
34
35
    ui,
52
53
    ResumedPack,
53
54
    Packer,
54
55
    )
55
 
from bzrlib.static_tuple import StaticTuple
56
56
 
57
57
 
58
58
class GCPack(NewPack):
154
154
        self._writer.begin()
155
155
        # what state is the pack in? (open, finished, aborted)
156
156
        self._state = 'open'
157
 
        # no name until we finish writing the content
158
 
        self.name = None
159
157
 
160
158
    def _check_references(self):
161
159
        """Make sure our external references are present.
352
350
        """Build a VersionedFiles instance on top of this group of packs."""
353
351
        index_name = index_name + '_index'
354
352
        index_to_pack = {}
355
 
        access = knit._DirectPackAccess(index_to_pack,
356
 
                                        reload_func=self._reload_func)
 
353
        access = knit._DirectPackAccess(index_to_pack)
357
354
        if for_write:
358
355
            # Use new_pack
359
356
            if self.new_pack is None:
413
410
 
414
411
    def _copy_inventory_texts(self):
415
412
        source_vf, target_vf = self._build_vfs('inventory', True, True)
416
 
        # It is not sufficient to just use self.revision_keys, as stacked
417
 
        # repositories can have more inventories than they have revisions.
418
 
        # One alternative would be to do something with
419
 
        # get_parent_map(self.revision_keys), but that shouldn't be any faster
420
 
        # than this.
421
 
        inventory_keys = source_vf.keys()
422
 
        missing_inventories = set(self.revision_keys).difference(inventory_keys)
423
 
        if missing_inventories:
424
 
            missing_inventories = sorted(missing_inventories)
425
 
            raise ValueError('We are missing inventories for revisions: %s'
426
 
                % (missing_inventories,))
427
 
        self._copy_stream(source_vf, target_vf, inventory_keys,
 
413
        self._copy_stream(source_vf, target_vf, self.revision_keys,
428
414
                          'inventories', self._get_filtered_inv_stream, 2)
429
415
 
430
416
    def _copy_chk_texts(self):
480
466
        if not self._use_pack(self.new_pack):
481
467
            self.new_pack.abort()
482
468
            return None
483
 
        self.new_pack.finish_content()
484
 
        if len(self.packs) == 1:
485
 
            old_pack = self.packs[0]
486
 
            if old_pack.name == self.new_pack._hash.hexdigest():
487
 
                # The single old pack was already optimally packed.
488
 
                trace.mutter('single pack %s was already optimally packed',
489
 
                    old_pack.name)
490
 
                self.new_pack.abort()
491
 
                return None
492
469
        self.pb.update('finishing repack', 6, 7)
493
470
        self.new_pack.finish()
494
471
        self._pack_collection.allocate(self.new_pack)
585
562
    pack_factory = GCPack
586
563
    resumed_pack_factory = ResumedGCPack
587
564
 
588
 
    def _check_new_inventories(self):
589
 
        """Detect missing inventories or chk root entries for the new revisions
590
 
        in this write group.
591
 
 
592
 
        :returns: list of strs, summarising any problems found.  If the list is
593
 
            empty no problems were found.
594
 
        """
595
 
        # Ensure that all revisions added in this write group have:
596
 
        #   - corresponding inventories,
597
 
        #   - chk root entries for those inventories,
598
 
        #   - and any present parent inventories have their chk root
599
 
        #     entries too.
600
 
        # And all this should be independent of any fallback repository.
601
 
        problems = []
602
 
        key_deps = self.repo.revisions._index._key_dependencies
603
 
        new_revisions_keys = key_deps.get_new_keys()
604
 
        no_fallback_inv_index = self.repo.inventories._index
605
 
        no_fallback_chk_bytes_index = self.repo.chk_bytes._index
606
 
        no_fallback_texts_index = self.repo.texts._index
607
 
        inv_parent_map = no_fallback_inv_index.get_parent_map(
608
 
            new_revisions_keys)
609
 
        # Are any inventories for corresponding to the new revisions missing?
610
 
        corresponding_invs = set(inv_parent_map)
611
 
        missing_corresponding = set(new_revisions_keys)
612
 
        missing_corresponding.difference_update(corresponding_invs)
613
 
        if missing_corresponding:
614
 
            problems.append("inventories missing for revisions %s" %
615
 
                (sorted(missing_corresponding),))
616
 
            return problems
617
 
        # Are any chk root entries missing for any inventories?  This includes
618
 
        # any present parent inventories, which may be used when calculating
619
 
        # deltas for streaming.
620
 
        all_inv_keys = set(corresponding_invs)
621
 
        for parent_inv_keys in inv_parent_map.itervalues():
622
 
            all_inv_keys.update(parent_inv_keys)
623
 
        # Filter out ghost parents.
624
 
        all_inv_keys.intersection_update(
625
 
            no_fallback_inv_index.get_parent_map(all_inv_keys))
626
 
        parent_invs_only_keys = all_inv_keys.symmetric_difference(
627
 
            corresponding_invs)
628
 
        all_missing = set()
629
 
        inv_ids = [key[-1] for key in all_inv_keys]
630
 
        parent_invs_only_ids = [key[-1] for key in parent_invs_only_keys]
631
 
        root_key_info = _build_interesting_key_sets(
632
 
            self.repo, inv_ids, parent_invs_only_ids)
633
 
        expected_chk_roots = root_key_info.all_keys()
634
 
        present_chk_roots = no_fallback_chk_bytes_index.get_parent_map(
635
 
            expected_chk_roots)
636
 
        missing_chk_roots = expected_chk_roots.difference(present_chk_roots)
637
 
        if missing_chk_roots:
638
 
            problems.append("missing referenced chk root keys: %s"
639
 
                % (sorted(missing_chk_roots),))
640
 
            # Don't bother checking any further.
641
 
            return problems
642
 
        # Find all interesting chk_bytes records, and make sure they are
643
 
        # present, as well as the text keys they reference.
644
 
        chk_bytes_no_fallbacks = self.repo.chk_bytes.without_fallbacks()
645
 
        chk_bytes_no_fallbacks._search_key_func = \
646
 
            self.repo.chk_bytes._search_key_func
647
 
        chk_diff = chk_map.iter_interesting_nodes(
648
 
            chk_bytes_no_fallbacks, root_key_info.interesting_root_keys,
649
 
            root_key_info.uninteresting_root_keys)
650
 
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
651
 
        text_keys = set()
652
 
        try:
653
 
            for record in _filter_text_keys(chk_diff, text_keys, bytes_to_info):
654
 
                pass
655
 
        except errors.NoSuchRevision, e:
656
 
            # XXX: It would be nice if we could give a more precise error here.
657
 
            problems.append("missing chk node(s) for id_to_entry maps")
658
 
        chk_diff = chk_map.iter_interesting_nodes(
659
 
            chk_bytes_no_fallbacks, root_key_info.interesting_pid_root_keys,
660
 
            root_key_info.uninteresting_pid_root_keys)
661
 
        try:
662
 
            for interesting_rec, interesting_map in chk_diff:
663
 
                pass
664
 
        except errors.NoSuchRevision, e:
665
 
            problems.append(
666
 
                "missing chk node(s) for parent_id_basename_to_file_id maps")
667
 
        present_text_keys = no_fallback_texts_index.get_parent_map(text_keys)
668
 
        missing_text_keys = text_keys.difference(present_text_keys)
669
 
        if missing_text_keys:
670
 
            problems.append("missing text keys: %r"
671
 
                % (sorted(missing_text_keys),))
672
 
        return problems
673
 
 
674
565
    def _execute_pack_operations(self, pack_operations,
675
566
                                 _packer_class=GCCHKPacker,
676
567
                                 reload_func=None):
689
580
            packer = GCCHKPacker(self, packs, '.autopack',
690
581
                                 reload_func=reload_func)
691
582
            try:
692
 
                result = packer.pack()
 
583
                packer.pack()
693
584
            except errors.RetryWithNewPacks:
694
585
                # An exception is propagating out of this context, make sure
695
586
                # this packer has cleaned up. Packer() doesn't set its new_pack
698
589
                if packer.new_pack is not None:
699
590
                    packer.new_pack.abort()
700
591
                raise
701
 
            if result is None:
702
 
                return
703
592
            for pack in packs:
704
593
                self._remove_pack_from_memory(pack)
705
594
        # record the newly available packs and stop advertising the old
706
595
        # packs
707
 
        to_be_obsoleted = []
708
 
        for _, packs in pack_operations:
709
 
            to_be_obsoleted.extend(packs)
710
 
        result = self._save_pack_names(clear_obsolete_packs=True,
711
 
                                       obsolete_packs=to_be_obsoleted)
712
 
        return result
 
596
        self._save_pack_names(clear_obsolete_packs=True)
 
597
        # Move the old packs out of the way now they are no longer referenced.
 
598
        for revision_count, packs in pack_operations:
 
599
            self._obsolete_packs(packs)
713
600
 
714
601
 
715
602
class CHKInventoryRepository(KnitPackRepository):
740
627
            _GCGraphIndex(self._pack_collection.revision_index.combined_index,
741
628
                add_callback=self._pack_collection.revision_index.add_callback,
742
629
                parents=True, is_locked=self.is_locked,
743
 
                track_external_parent_refs=True, track_new_keys=True),
 
630
                track_external_parent_refs=True),
744
631
            access=self._pack_collection.revision_index.data_access,
745
632
            delta=False)
746
633
        self.signatures = GroupCompressVersionedFiles(
816
703
                                 ' no new_path %r' % (file_id,))
817
704
            if new_path == '':
818
705
                new_inv.root_id = file_id
819
 
                parent_id_basename_key = StaticTuple('', '').intern()
 
706
                parent_id_basename_key = ('', '')
820
707
            else:
821
708
                utf8_entry_name = entry.name.encode('utf-8')
822
 
                parent_id_basename_key = StaticTuple(entry.parent_id,
823
 
                                                     utf8_entry_name).intern()
 
709
                parent_id_basename_key = (entry.parent_id, utf8_entry_name)
824
710
            new_value = entry_to_bytes(entry)
825
711
            # Populate Caches?
826
712
            # new_inv._path_to_fileid_cache[new_path] = file_id
827
 
            key = StaticTuple(file_id).intern()
828
 
            id_to_entry_dict[key] = new_value
 
713
            id_to_entry_dict[(file_id,)] = new_value
829
714
            parent_id_basename_dict[parent_id_basename_key] = file_id
830
715
 
831
716
        new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
881
766
            if basis_tree is not None:
882
767
                basis_tree.unlock()
883
768
 
884
 
    def _deserialise_inventory(self, revision_id, bytes):
885
 
        return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
886
 
            (revision_id,))
887
 
 
888
 
    def _iter_inventories(self, revision_ids, ordering):
 
769
    def _iter_inventories(self, revision_ids):
889
770
        """Iterate over many inventory objects."""
890
 
        if ordering is None:
891
 
            ordering = 'unordered'
892
771
        keys = [(revision_id,) for revision_id in revision_ids]
893
 
        stream = self.inventories.get_record_stream(keys, ordering, True)
 
772
        stream = self.inventories.get_record_stream(keys, 'unordered', True)
894
773
        texts = {}
895
774
        for record in stream:
896
775
            if record.storage_kind != 'absent':
900
779
        for key in keys:
901
780
            yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
902
781
 
903
 
    def _iter_inventory_xmls(self, revision_ids, ordering):
904
 
        # Without a native 'xml' inventory, this method doesn't make sense.
905
 
        # However older working trees, and older bundles want it - so we supply
906
 
        # it allowing _get_inventory_xml to work. Bundles currently use the
907
 
        # serializer directly; this also isn't ideal, but there isn't an xml
908
 
        # iteration interface offered at all for repositories. We could make
909
 
        # _iter_inventory_xmls be part of the contract, even if kept private.
910
 
        inv_to_str = self._serializer.write_inventory_to_string
911
 
        for inv in self.iter_inventories(revision_ids, ordering=ordering):
912
 
            yield inv_to_str(inv), inv.revision_id
 
782
    def _iter_inventory_xmls(self, revision_ids):
 
783
        # Without a native 'xml' inventory, this method doesn't make sense, so
 
784
        # make it raise to trap naughty direct users.
 
785
        raise NotImplementedError(self._iter_inventory_xmls)
913
786
 
914
787
    def _find_present_inventory_keys(self, revision_keys):
915
788
        parent_map = self.inventories.get_parent_map(revision_keys)
941
814
                                        parent_keys)
942
815
            present_parent_inv_ids = set(
943
816
                [k[-1] for k in present_parent_inv_keys])
 
817
            uninteresting_root_keys = set()
 
818
            interesting_root_keys = set()
944
819
            inventories_to_read = set(revision_ids)
945
820
            inventories_to_read.update(present_parent_inv_ids)
946
 
            root_key_info = _build_interesting_key_sets(
947
 
                self, inventories_to_read, present_parent_inv_ids)
948
 
            interesting_root_keys = root_key_info.interesting_root_keys
949
 
            uninteresting_root_keys = root_key_info.uninteresting_root_keys
 
821
            for inv in self.iter_inventories(inventories_to_read):
 
822
                entry_chk_root_key = inv.id_to_entry.key()
 
823
                if inv.revision_id in present_parent_inv_ids:
 
824
                    uninteresting_root_keys.add(entry_chk_root_key)
 
825
                else:
 
826
                    interesting_root_keys.add(entry_chk_root_key)
 
827
 
950
828
            chk_bytes = self.chk_bytes
951
829
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
952
830
                        interesting_root_keys, uninteresting_root_keys,
953
831
                        pb=pb):
954
832
                for name, bytes in items:
955
833
                    (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
956
 
                    # TODO: consider interning file_id, revision_id here, or
957
 
                    #       pushing that intern() into bytes_to_info()
958
 
                    # TODO: rich_root should always be True here, for all
959
 
                    #       repositories that support chk_bytes
960
834
                    if not rich_root and name_utf8 == '':
961
835
                        continue
962
836
                    try:
1005
879
 
1006
880
    def _get_source(self, to_format):
1007
881
        """Return a source for streaming from this repository."""
1008
 
        if self._format._serializer == to_format._serializer:
 
882
        if isinstance(to_format, remote.RemoteRepositoryFormat):
 
883
            # Can't just check attributes on to_format with the current code,
 
884
            # work around this:
 
885
            to_format._ensure_real()
 
886
            to_format = to_format._custom_format
 
887
        if to_format.__class__ is self._format.__class__:
1009
888
            # We must be exactly the same format, otherwise stuff like the chk
1010
 
            # page layout might be different.
1011
 
            # Actually, this test is just slightly looser than exact so that
1012
 
            # CHK2 <-> 2a transfers will work.
 
889
            # page layout might be different
1013
890
            return GroupCHKStreamSource(self, to_format)
1014
891
        return super(CHKInventoryRepository, self)._get_source(to_format)
1015
892
 
1090
967
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
1091
968
        chk_bytes = self.from_repository.chk_bytes
1092
969
        def _filter_id_to_entry():
1093
 
            interesting_nodes = chk_map.iter_interesting_nodes(chk_bytes,
1094
 
                        self._chk_id_roots, uninteresting_root_keys)
1095
 
            for record in _filter_text_keys(interesting_nodes, self._text_keys,
1096
 
                    bytes_to_info):
 
970
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
 
971
                        self._chk_id_roots, uninteresting_root_keys):
 
972
                for name, bytes in items:
 
973
                    # Note: we don't care about name_utf8, because we are always
 
974
                    # rich-root = True
 
975
                    _, file_id, revision_id = bytes_to_info(bytes)
 
976
                    self._text_keys.add((file_id, revision_id))
1097
977
                if record is not None:
1098
978
                    yield record
1099
979
            # Consumed
1113
993
        for stream_info in self._fetch_revision_texts(revision_ids):
1114
994
            yield stream_info
1115
995
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1116
 
        self.from_repository.revisions.clear_cache()
1117
 
        self.from_repository.signatures.clear_cache()
1118
996
        yield self._get_inventory_stream(self._revision_keys)
1119
 
        self.from_repository.inventories.clear_cache()
1120
997
        # TODO: The keys to exclude might be part of the search recipe
1121
998
        # For now, exclude all parents that are at the edge of ancestry, for
1122
999
        # which we have inventories
1125
1002
                        self._revision_keys)
1126
1003
        for stream_info in self._get_filtered_chk_streams(parent_keys):
1127
1004
            yield stream_info
1128
 
        self.from_repository.chk_bytes.clear_cache()
1129
1005
        yield self._get_text_stream()
1130
 
        self.from_repository.texts.clear_cache()
1131
1006
 
1132
1007
    def get_stream_for_missing_keys(self, missing_keys):
1133
1008
        # missing keys can only occur when we are byte copying and not
1142
1017
            missing_inventory_keys.add(key[1:])
1143
1018
        if self._chk_id_roots or self._chk_p_id_roots:
1144
1019
            raise AssertionError('Cannot call get_stream_for_missing_keys'
1145
 
                ' until all of get_stream() has been consumed.')
 
1020
                ' untill all of get_stream() has been consumed.')
1146
1021
        # Yield the inventory stream, so we can find the chk stream
1147
1022
        # Some of the missing_keys will be missing because they are ghosts.
1148
1023
        # As such, we can ignore them. The Sink is required to verify there are
1155
1030
            yield stream_info
1156
1031
 
1157
1032
 
1158
 
class _InterestingKeyInfo(object):
1159
 
    def __init__(self):
1160
 
        self.interesting_root_keys = set()
1161
 
        self.interesting_pid_root_keys = set()
1162
 
        self.uninteresting_root_keys = set()
1163
 
        self.uninteresting_pid_root_keys = set()
1164
 
 
1165
 
    def all_interesting(self):
1166
 
        return self.interesting_root_keys.union(self.interesting_pid_root_keys)
1167
 
 
1168
 
    def all_uninteresting(self):
1169
 
        return self.uninteresting_root_keys.union(
1170
 
            self.uninteresting_pid_root_keys)
1171
 
 
1172
 
    def all_keys(self):
1173
 
        return self.all_interesting().union(self.all_uninteresting())
1174
 
 
1175
 
 
1176
 
def _build_interesting_key_sets(repo, inventory_ids, parent_only_inv_ids):
1177
 
    result = _InterestingKeyInfo()
1178
 
    for inv in repo.iter_inventories(inventory_ids, 'unordered'):
1179
 
        root_key = inv.id_to_entry.key()
1180
 
        pid_root_key = inv.parent_id_basename_to_file_id.key()
1181
 
        if inv.revision_id in parent_only_inv_ids:
1182
 
            result.uninteresting_root_keys.add(root_key)
1183
 
            result.uninteresting_pid_root_keys.add(pid_root_key)
1184
 
        else:
1185
 
            result.interesting_root_keys.add(root_key)
1186
 
            result.interesting_pid_root_keys.add(pid_root_key)
1187
 
    return result
1188
 
 
1189
 
 
1190
 
def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_info):
1191
 
    """Iterate the result of iter_interesting_nodes, yielding the records
1192
 
    and adding to text_keys.
1193
 
    """
1194
 
    for record, items in interesting_nodes_iterable:
1195
 
        for name, bytes in items:
1196
 
            # Note: we don't care about name_utf8, because groupcompress repos
1197
 
            # are always rich-root, so there are no synthesised root records to
1198
 
            # ignore.
1199
 
            _, file_id, revision_id = bytes_to_info(bytes)
1200
 
            file_id = intern(file_id)
1201
 
            revision_id = intern(revision_id)
1202
 
            text_keys.add(StaticTuple(file_id, revision_id).intern())
1203
 
        yield record
1204
 
 
1205
 
 
1206
 
 
1207
 
 
1208
1033
class RepositoryFormatCHK1(RepositoryFormatPack):
1209
1034
    """A hashed CHK+group compress pack repository."""
1210
1035
 
1249
1074
        return ("Development repository format - rich roots, group compression"
1250
1075
            " and chk inventories")
1251
1076
 
 
1077
    def check_conversion_target(self, target_format):
 
1078
        if not target_format.rich_root_data:
 
1079
            raise errors.BadConversionTarget(
 
1080
                'Does not support rich root data.', target_format)
 
1081
        if (self.supports_tree_reference and 
 
1082
            not getattr(target_format, 'supports_tree_reference', False)):
 
1083
            raise errors.BadConversionTarget(
 
1084
                'Does not support nested trees', target_format)
 
1085
 
 
1086
 
1252
1087
 
1253
1088
class RepositoryFormatCHK2(RepositoryFormatCHK1):
1254
1089
    """A CHK repository that uses the bencode revision serializer."""
1271
1106
 
1272
1107
class RepositoryFormat2a(RepositoryFormatCHK2):
1273
1108
    """A CHK repository that uses the bencode revision serializer.
1274
 
 
 
1109
    
1275
1110
    This is the same as RepositoryFormatCHK2 but with a public name.
1276
1111
    """
1277
1112
 
1287
1122
 
1288
1123
    def get_format_string(self):
1289
1124
        return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')
1290
 
 
1291
 
    def get_format_description(self):
1292
 
        """See RepositoryFormat.get_format_description()."""
1293
 
        return ("Repository format 2a - rich roots, group compression"
1294
 
            " and chk inventories")