/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to breezy/bzr/groupcompress_repo.py

  • Committer: Jelmer Vernooij
  • Date: 2020-11-19 18:28:52 UTC
  • mto: This revision was merged to the branch mainline in revision 7526.
  • Revision ID: jelmer@jelmer.uk-20201119182852-4za22s5mhakix4dq
Remove sixish data type.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
 
1
# Copyright (C) 2008-2011 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
18
18
 
19
19
import time
20
20
 
21
 
from bzrlib import (
22
 
    bzrdir,
23
 
    chk_map,
24
 
    chk_serializer,
 
21
from .. import (
 
22
    controldir,
25
23
    debug,
26
24
    errors,
27
 
    index as _mod_index,
28
 
    inventory,
29
 
    knit,
30
25
    osutils,
31
 
    pack,
32
26
    revision as _mod_revision,
33
27
    trace,
34
28
    ui,
35
29
    )
36
 
from bzrlib.btree_index import (
 
30
from ..bzr import (
 
31
    chk_map,
 
32
    chk_serializer,
 
33
    index as _mod_index,
 
34
    inventory,
 
35
    pack,
 
36
    versionedfile,
 
37
    )
 
38
from ..bzr.btree_index import (
37
39
    BTreeGraphIndex,
38
40
    BTreeBuilder,
39
41
    )
40
 
from bzrlib.groupcompress import (
 
42
from ..bzr.groupcompress import (
41
43
    _GCGraphIndex,
42
44
    GroupCompressVersionedFiles,
43
45
    )
44
 
from bzrlib.repofmt.pack_repo import (
 
46
from .pack_repo import (
 
47
    _DirectPackAccess,
45
48
    Pack,
46
49
    NewPack,
47
 
    KnitPackRepository,
48
 
    KnitPackStreamSource,
49
 
    PackRootCommitBuilder,
 
50
    PackRepository,
 
51
    PackCommitBuilder,
50
52
    RepositoryPackCollection,
51
53
    RepositoryFormatPack,
52
54
    ResumedPack,
53
55
    Packer,
54
56
    )
55
 
from bzrlib.static_tuple import StaticTuple
 
57
from ..bzr.vf_repository import (
 
58
    StreamSource,
 
59
    )
 
60
from ..static_tuple import StaticTuple
56
61
 
57
62
 
58
63
class GCPack(NewPack):
80
85
        else:
81
86
            chk_index = None
82
87
        Pack.__init__(self,
83
 
            # Revisions: parents list, no text compression.
84
 
            index_builder_class(reference_lists=1),
85
 
            # Inventory: We want to map compression only, but currently the
86
 
            # knit code hasn't been updated enough to understand that, so we
87
 
            # have a regular 2-list index giving parents and compression
88
 
            # source.
89
 
            index_builder_class(reference_lists=1),
90
 
            # Texts: per file graph, for all fileids - so one reference list
91
 
            # and two elements in the key tuple.
92
 
            index_builder_class(reference_lists=1, key_elements=2),
93
 
            # Signatures: Just blobs to store, no compression, no parents
94
 
            # listing.
95
 
            index_builder_class(reference_lists=0),
96
 
            # CHK based storage - just blobs, no compression or parents.
97
 
            chk_index=chk_index
98
 
            )
 
88
                      # Revisions: parents list, no text compression.
 
89
                      index_builder_class(reference_lists=1),
 
90
                      # Inventory: We want to map compression only, but currently the
 
91
                      # knit code hasn't been updated enough to understand that, so we
 
92
                      # have a regular 2-list index giving parents and compression
 
93
                      # source.
 
94
                      index_builder_class(reference_lists=1),
 
95
                      # Texts: per file graph, for all fileids - so one reference list
 
96
                      # and two elements in the key tuple.
 
97
                      index_builder_class(reference_lists=1, key_elements=2),
 
98
                      # Signatures: Just blobs to store, no compression, no parents
 
99
                      # listing.
 
100
                      index_builder_class(reference_lists=0),
 
101
                      # CHK based storage - just blobs, no compression or parents.
 
102
                      chk_index=chk_index
 
103
                      )
99
104
        self._pack_collection = pack_collection
100
105
        # When we make readonly indices, we need this.
101
106
        self.index_class = pack_collection._index_class
126
131
            self.random_name, mode=self._file_mode)
127
132
        if 'pack' in debug.debug_flags:
128
133
            trace.mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
129
 
                time.ctime(), self.upload_transport.base, self.random_name,
130
 
                time.time() - self.start_time)
 
134
                         time.ctime(), self.upload_transport.base, self.random_name,
 
135
                         time.time() - self.start_time)
131
136
        # A list of byte sequences to be written to the new pack, and the
132
137
        # aggregate size of them.  Stored as a list rather than separate
133
138
        # variables so that the _write_data closure below can update them.
137
142
        # robertc says- this is a closure rather than a method on the object
138
143
        # so that the variables are locals, and faster than accessing object
139
144
        # members.
140
 
        def _write_data(bytes, flush=False, _buffer=self._buffer,
141
 
            _write=self.write_stream.write, _update=self._hash.update):
142
 
            _buffer[0].append(bytes)
143
 
            _buffer[1] += len(bytes)
 
145
 
 
146
        def _write_data(data, flush=False, _buffer=self._buffer,
 
147
                        _write=self.write_stream.write, _update=self._hash.update):
 
148
            _buffer[0].append(data)
 
149
            _buffer[1] += len(data)
144
150
            # buffer cap
145
151
            if _buffer[1] > self._cache_limit or flush:
146
 
                bytes = ''.join(_buffer[0])
147
 
                _write(bytes)
148
 
                _update(bytes)
 
152
                data = b''.join(_buffer[0])
 
153
                _write(data)
 
154
                _update(data)
149
155
                _buffer[:] = [[], 0]
150
156
        # expose this on self, for the occasion when clients want to add data.
151
157
        self._write_data = _write_data
193
199
        self._pack_collection = pack_collection
194
200
        # ATM, We only support this for GCCHK repositories
195
201
        if pack_collection.chk_index is None:
196
 
            raise AssertionError('pack_collection.chk_index should not be None')
 
202
            raise AssertionError(
 
203
                'pack_collection.chk_index should not be None')
197
204
        self._gather_text_refs = False
198
205
        self._chk_id_roots = []
199
206
        self._chk_p_id_roots = []
203
210
 
204
211
    def _get_progress_stream(self, source_vf, keys, message, pb):
205
212
        def pb_stream():
206
 
            substream = source_vf.get_record_stream(keys, 'groupcompress', True)
 
213
            substream = source_vf.get_record_stream(
 
214
                keys, 'groupcompress', True)
207
215
            for idx, record in enumerate(substream):
208
216
                if pb is not None:
209
217
                    pb.update(message, idx + 1, len(keys))
213
221
    def _get_filtered_inv_stream(self, source_vf, keys, message, pb=None):
214
222
        """Filter the texts of inventories, to find the chk pages."""
215
223
        total_keys = len(keys)
 
224
 
216
225
        def _filtered_inv_stream():
217
226
            id_roots_set = set()
218
227
            p_id_roots_set = set()
219
228
            stream = source_vf.get_record_stream(keys, 'groupcompress', True)
220
229
            for idx, record in enumerate(stream):
221
230
                # Inventories should always be with revisions; assume success.
222
 
                bytes = record.get_bytes_as('fulltext')
223
 
                chk_inv = inventory.CHKInventory.deserialise(None, bytes,
224
 
                                                             record.key)
 
231
                lines = record.get_bytes_as('lines')
 
232
                chk_inv = inventory.CHKInventory.deserialise(
 
233
                    None, lines, record.key)
225
234
                if pb is not None:
226
235
                    pb.update('inv', idx, total_keys)
227
236
                key = chk_inv.id_to_entry.key()
262
271
        remaining_keys = set(keys)
263
272
        counter = [0]
264
273
        if self._gather_text_refs:
265
 
            bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
266
274
            self._text_refs = set()
 
275
 
267
276
        def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
268
277
            cur_keys = root_keys
269
278
            while cur_keys:
270
279
                keys_by_search_prefix = {}
271
280
                remaining_keys.difference_update(cur_keys)
272
281
                next_keys = set()
 
282
 
273
283
                def handle_internal_node(node):
274
 
                    for prefix, value in node._items.iteritems():
 
284
                    for prefix, value in node._items.items():
275
285
                        # We don't want to request the same key twice, and we
276
286
                        # want to order it by the first time it is seen.
277
287
                        # Even further, we don't want to request a key which is
283
293
                        #       always fill them in for stacked branches
284
294
                        if value not in next_keys and value in remaining_keys:
285
295
                            keys_by_search_prefix.setdefault(prefix,
286
 
                                []).append(value)
 
296
                                                             []).append(value)
287
297
                            next_keys.add(value)
 
298
 
288
299
                def handle_leaf_node(node):
289
300
                    # Store is None, because we know we have a LeafNode, and we
290
301
                    # just want its entries
291
302
                    for file_id, bytes in node.iteritems(None):
292
 
                        name_utf8, file_id, revision_id = bytes_to_info(bytes)
293
 
                        self._text_refs.add((file_id, revision_id))
 
303
                        self._text_refs.add(chk_map._bytes_to_text_key(bytes))
 
304
 
294
305
                def next_stream():
295
306
                    stream = source_vf.get_record_stream(cur_keys,
296
307
                                                         'as-requested', True)
352
363
        """Build a VersionedFiles instance on top of this group of packs."""
353
364
        index_name = index_name + '_index'
354
365
        index_to_pack = {}
355
 
        access = knit._DirectPackAccess(index_to_pack,
356
 
                                        reload_func=self._reload_func)
 
366
        access = _DirectPackAccess(index_to_pack,
 
367
                                   reload_func=self._reload_func)
357
368
        if for_write:
358
369
            # Use new_pack
359
370
            if self.new_pack is None:
393
404
                     pb_offset):
394
405
        trace.mutter('repacking %d %s', len(keys), message)
395
406
        self.pb.update('repacking %s' % (message,), pb_offset)
396
 
        child_pb = ui.ui_factory.nested_progress_bar()
397
 
        try:
 
407
        with ui.ui_factory.nested_progress_bar() as child_pb:
398
408
            stream = vf_to_stream(source_vf, keys, message, child_pb)
399
 
            for _ in target_vf._insert_record_stream(stream,
400
 
                                                     random_id=True,
401
 
                                                     reuse_blocks=False):
 
409
            for _, _ in target_vf._insert_record_stream(
 
410
                    stream, random_id=True, reuse_blocks=False):
402
411
                pass
403
 
        finally:
404
 
            child_pb.finished()
405
412
 
406
413
    def _copy_revision_texts(self):
407
414
        source_vf, target_vf = self._build_vfs('revision', True, False)
419
426
        # get_parent_map(self.revision_keys), but that shouldn't be any faster
420
427
        # than this.
421
428
        inventory_keys = source_vf.keys()
422
 
        missing_inventories = set(self.revision_keys).difference(inventory_keys)
 
429
        missing_inventories = set(
 
430
            self.revision_keys).difference(inventory_keys)
423
431
        if missing_inventories:
424
 
            missing_inventories = sorted(missing_inventories)
425
 
            raise ValueError('We are missing inventories for revisions: %s'
426
 
                % (missing_inventories,))
 
432
            # Go back to the original repo, to see if these are really missing
 
433
            # https://bugs.launchpad.net/bzr/+bug/437003
 
434
            # If we are packing a subset of the repo, it is fine to just have
 
435
            # the data in another Pack file, which is not included in this pack
 
436
            # operation.
 
437
            inv_index = self._pack_collection.repo.inventories._index
 
438
            pmap = inv_index.get_parent_map(missing_inventories)
 
439
            really_missing = missing_inventories.difference(pmap)
 
440
            if really_missing:
 
441
                missing_inventories = sorted(really_missing)
 
442
                raise ValueError('We are missing inventories for revisions: %s'
 
443
                                 % (missing_inventories,))
427
444
        self._copy_stream(source_vf, target_vf, inventory_keys,
428
445
                          'inventories', self._get_filtered_inv_stream, 2)
429
446
 
 
447
    def _get_chk_vfs_for_copy(self):
 
448
        return self._build_vfs('chk', False, False)
 
449
 
430
450
    def _copy_chk_texts(self):
431
 
        source_vf, target_vf = self._build_vfs('chk', False, False)
 
451
        source_vf, target_vf = self._get_chk_vfs_for_copy()
432
452
        # TODO: This is technically spurious... if it is a performance issue,
433
453
        #       remove it
434
454
        total_keys = source_vf.keys()
437
457
                     len(self._chk_id_roots), len(self._chk_p_id_roots),
438
458
                     len(total_keys))
439
459
        self.pb.update('repacking chk', 3)
440
 
        child_pb = ui.ui_factory.nested_progress_bar()
441
 
        try:
 
460
        with ui.ui_factory.nested_progress_bar() as child_pb:
442
461
            for stream in self._get_chk_streams(source_vf, total_keys,
443
462
                                                pb=child_pb):
444
 
                for _ in target_vf._insert_record_stream(stream,
445
 
                                                         random_id=True,
446
 
                                                         reuse_blocks=False):
 
463
                for _, _ in target_vf._insert_record_stream(
 
464
                        stream, random_id=True, reuse_blocks=False):
447
465
                    pass
448
 
        finally:
449
 
            child_pb.finished()
450
466
 
451
467
    def _copy_text_texts(self):
452
468
        source_vf, target_vf = self._build_vfs('text', True, True)
470
486
        self.pb.update('repacking', 0, 7)
471
487
        self.new_pack = self.open_pack()
472
488
        # Is this necessary for GC ?
473
 
        self.new_pack.set_write_cache_size(1024*1024)
 
489
        self.new_pack.set_write_cache_size(1024 * 1024)
474
490
        self._copy_revision_texts()
475
491
        self._copy_inventory_texts()
476
492
        self._copy_chk_texts()
486
502
            if old_pack.name == self.new_pack._hash.hexdigest():
487
503
                # The single old pack was already optimally packed.
488
504
                trace.mutter('single pack %s was already optimally packed',
489
 
                    old_pack.name)
 
505
                             old_pack.name)
490
506
                self.new_pack.abort()
491
507
                return None
492
508
        self.pb.update('finishing repack', 6, 7)
498
514
class GCCHKReconcilePacker(GCCHKPacker):
499
515
    """A packer which regenerates indices etc as it copies.
500
516
 
501
 
    This is used by ``bzr reconcile`` to cause parent text pointers to be
 
517
    This is used by ``brz reconcile`` to cause parent text pointers to be
502
518
    regenerated.
503
519
    """
504
520
 
527
543
        ancestor_keys = revision_vf.get_parent_map(revision_vf.keys())
528
544
        # Strip keys back into revision_ids.
529
545
        ancestors = dict((k[0], tuple([p[0] for p in parents]))
530
 
                         for k, parents in ancestor_keys.iteritems())
 
546
                         for k, parents in ancestor_keys.items())
531
547
        del ancestor_keys
532
548
        # TODO: _generate_text_key_index should be much cheaper to generate from
533
549
        #       a chk repository, rather than the current implementation
536
552
        # 2) generate a keys list that contains all the entries that can
537
553
        #    be used as-is, with corrected parents.
538
554
        ok_keys = []
539
 
        new_parent_keys = {} # (key, parent_keys)
 
555
        new_parent_keys = {}  # (key, parent_keys)
540
556
        discarded_keys = []
541
557
        NULL_REVISION = _mod_revision.NULL_REVISION
542
558
        for key in self._text_refs:
566
582
        del ideal_index
567
583
        del file_id_parent_map
568
584
        # 3) bulk copy the data, updating records than need it
 
585
 
569
586
        def _update_parents_for_texts():
570
587
            stream = source_vf.get_record_stream(self._text_refs,
571
 
                'groupcompress', False)
 
588
                                                 'groupcompress', False)
572
589
            for record in stream:
573
590
                if record.key in new_parent_keys:
574
591
                    record.parents = new_parent_keys[record.key]
580
597
        return new_pack.data_inserted() and self._data_changed
581
598
 
582
599
 
 
600
class GCCHKCanonicalizingPacker(GCCHKPacker):
 
601
    """A packer that ensures inventories have canonical-form CHK maps.
 
602
 
 
603
    Ideally this would be part of reconcile, but it's very slow and rarely
 
604
    needed.  (It repairs repositories affected by
 
605
    https://bugs.launchpad.net/bzr/+bug/522637).
 
606
    """
 
607
 
 
608
    def __init__(self, *args, **kwargs):
 
609
        super(GCCHKCanonicalizingPacker, self).__init__(*args, **kwargs)
 
610
        self._data_changed = False
 
611
 
 
612
    def _exhaust_stream(self, source_vf, keys, message, vf_to_stream, pb_offset):
 
613
        """Create and exhaust a stream, but don't insert it.
 
614
 
 
615
        This is useful to get the side-effects of generating a stream.
 
616
        """
 
617
        self.pb.update('scanning %s' % (message,), pb_offset)
 
618
        with ui.ui_factory.nested_progress_bar() as child_pb:
 
619
            list(vf_to_stream(source_vf, keys, message, child_pb))
 
620
 
 
621
    def _copy_inventory_texts(self):
 
622
        source_vf, target_vf = self._build_vfs('inventory', True, True)
 
623
        source_chk_vf, target_chk_vf = self._get_chk_vfs_for_copy()
 
624
        inventory_keys = source_vf.keys()
 
625
        # First, copy the existing CHKs on the assumption that most of them
 
626
        # will be correct.  This will save us from having to reinsert (and
 
627
        # recompress) these records later at the cost of perhaps preserving a
 
628
        # few unused CHKs.
 
629
        # (Iterate but don't insert _get_filtered_inv_stream to populate the
 
630
        # variables needed by GCCHKPacker._copy_chk_texts.)
 
631
        self._exhaust_stream(source_vf, inventory_keys, 'inventories',
 
632
                             self._get_filtered_inv_stream, 2)
 
633
        GCCHKPacker._copy_chk_texts(self)
 
634
        # Now copy and fix the inventories, and any regenerated CHKs.
 
635
 
 
636
        def chk_canonicalizing_inv_stream(source_vf, keys, message, pb=None):
 
637
            return self._get_filtered_canonicalizing_inv_stream(
 
638
                source_vf, keys, message, pb, source_chk_vf, target_chk_vf)
 
639
        self._copy_stream(source_vf, target_vf, inventory_keys,
 
640
                          'inventories', chk_canonicalizing_inv_stream, 4)
 
641
 
 
642
    def _copy_chk_texts(self):
 
643
        # No-op; in this class this happens during _copy_inventory_texts.
 
644
        pass
 
645
 
 
646
    def _get_filtered_canonicalizing_inv_stream(self, source_vf, keys, message,
 
647
                                                pb=None, source_chk_vf=None, target_chk_vf=None):
 
648
        """Filter the texts of inventories, regenerating CHKs to make sure they
 
649
        are canonical.
 
650
        """
 
651
        total_keys = len(keys)
 
652
        target_chk_vf = versionedfile.NoDupeAddLinesDecorator(target_chk_vf)
 
653
 
 
654
        def _filtered_inv_stream():
 
655
            stream = source_vf.get_record_stream(keys, 'groupcompress', True)
 
656
            search_key_name = None
 
657
            for idx, record in enumerate(stream):
 
658
                # Inventories should always be with revisions; assume success.
 
659
                lines = record.get_bytes_as('lines')
 
660
                chk_inv = inventory.CHKInventory.deserialise(
 
661
                    source_chk_vf, lines, record.key)
 
662
                if pb is not None:
 
663
                    pb.update('inv', idx, total_keys)
 
664
                chk_inv.id_to_entry._ensure_root()
 
665
                if search_key_name is None:
 
666
                    # Find the name corresponding to the search_key_func
 
667
                    search_key_reg = chk_map.search_key_registry
 
668
                    for search_key_name, func in search_key_reg.items():
 
669
                        if func == chk_inv.id_to_entry._search_key_func:
 
670
                            break
 
671
                canonical_inv = inventory.CHKInventory.from_inventory(
 
672
                    target_chk_vf, chk_inv,
 
673
                    maximum_size=chk_inv.id_to_entry._root_node._maximum_size,
 
674
                    search_key_name=search_key_name)
 
675
                if chk_inv.id_to_entry.key() != canonical_inv.id_to_entry.key():
 
676
                    trace.mutter(
 
677
                        'Non-canonical CHK map for id_to_entry of inv: %s '
 
678
                        '(root is %s, should be %s)' % (chk_inv.revision_id,
 
679
                                                        chk_inv.id_to_entry.key()[
 
680
                                                            0],
 
681
                                                        canonical_inv.id_to_entry.key()[0]))
 
682
                    self._data_changed = True
 
683
                p_id_map = chk_inv.parent_id_basename_to_file_id
 
684
                p_id_map._ensure_root()
 
685
                canon_p_id_map = canonical_inv.parent_id_basename_to_file_id
 
686
                if p_id_map.key() != canon_p_id_map.key():
 
687
                    trace.mutter(
 
688
                        'Non-canonical CHK map for parent_id_to_basename of '
 
689
                        'inv: %s (root is %s, should be %s)'
 
690
                        % (chk_inv.revision_id, p_id_map.key()[0],
 
691
                           canon_p_id_map.key()[0]))
 
692
                    self._data_changed = True
 
693
                yield versionedfile.ChunkedContentFactory(
 
694
                    record.key, record.parents, record.sha1, canonical_inv.to_lines(),
 
695
                    chunks_are_lines=True)
 
696
            # We have finished processing all of the inventory records, we
 
697
            # don't need these sets anymore
 
698
        return _filtered_inv_stream()
 
699
 
 
700
    def _use_pack(self, new_pack):
 
701
        """Override _use_pack to check for reconcile having changed content."""
 
702
        return new_pack.data_inserted() and self._data_changed
 
703
 
 
704
 
583
705
class GCRepositoryPackCollection(RepositoryPackCollection):
584
706
 
585
707
    pack_factory = GCPack
586
708
    resumed_pack_factory = ResumedGCPack
 
709
    normal_packer_class = GCCHKPacker
 
710
    optimising_packer_class = GCCHKPacker
587
711
 
588
712
    def _check_new_inventories(self):
589
713
        """Detect missing inventories or chk root entries for the new revisions
612
736
        missing_corresponding.difference_update(corresponding_invs)
613
737
        if missing_corresponding:
614
738
            problems.append("inventories missing for revisions %s" %
615
 
                (sorted(missing_corresponding),))
 
739
                            (sorted(missing_corresponding),))
616
740
            return problems
617
741
        # Are any chk root entries missing for any inventories?  This includes
618
742
        # any present parent inventories, which may be used when calculating
619
743
        # deltas for streaming.
620
744
        all_inv_keys = set(corresponding_invs)
621
 
        for parent_inv_keys in inv_parent_map.itervalues():
 
745
        for parent_inv_keys in inv_parent_map.values():
622
746
            all_inv_keys.update(parent_inv_keys)
623
747
        # Filter out ghost parents.
624
748
        all_inv_keys.intersection_update(
635
759
            expected_chk_roots)
636
760
        missing_chk_roots = expected_chk_roots.difference(present_chk_roots)
637
761
        if missing_chk_roots:
638
 
            problems.append("missing referenced chk root keys: %s"
 
762
            problems.append(
 
763
                "missing referenced chk root keys: %s."
 
764
                "Run 'brz reconcile --canonicalize-chks' on the affected "
 
765
                "repository."
639
766
                % (sorted(missing_chk_roots),))
640
767
            # Don't bother checking any further.
641
768
            return problems
647
774
        chk_diff = chk_map.iter_interesting_nodes(
648
775
            chk_bytes_no_fallbacks, root_key_info.interesting_root_keys,
649
776
            root_key_info.uninteresting_root_keys)
650
 
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
651
777
        text_keys = set()
652
778
        try:
653
 
            for record in _filter_text_keys(chk_diff, text_keys, bytes_to_info):
 
779
            for record in _filter_text_keys(chk_diff, text_keys,
 
780
                                            chk_map._bytes_to_text_key):
654
781
                pass
655
 
        except errors.NoSuchRevision, e:
 
782
        except errors.NoSuchRevision as e:
656
783
            # XXX: It would be nice if we could give a more precise error here.
657
784
            problems.append("missing chk node(s) for id_to_entry maps")
658
785
        chk_diff = chk_map.iter_interesting_nodes(
661
788
        try:
662
789
            for interesting_rec, interesting_map in chk_diff:
663
790
                pass
664
 
        except errors.NoSuchRevision, e:
 
791
        except errors.NoSuchRevision as e:
665
792
            problems.append(
666
793
                "missing chk node(s) for parent_id_basename_to_file_id maps")
667
794
        present_text_keys = no_fallback_texts_index.get_parent_map(text_keys)
668
795
        missing_text_keys = text_keys.difference(present_text_keys)
669
796
        if missing_text_keys:
670
797
            problems.append("missing text keys: %r"
671
 
                % (sorted(missing_text_keys),))
 
798
                            % (sorted(missing_text_keys),))
672
799
        return problems
673
800
 
674
 
    def _execute_pack_operations(self, pack_operations,
675
 
                                 _packer_class=GCCHKPacker,
676
 
                                 reload_func=None):
677
 
        """Execute a series of pack operations.
678
 
 
679
 
        :param pack_operations: A list of [revision_count, packs_to_combine].
680
 
        :param _packer_class: The class of packer to use (default: Packer).
681
 
        :return: None.
682
 
        """
683
 
        # XXX: Copied across from RepositoryPackCollection simply because we
684
 
        #      want to override the _packer_class ... :(
685
 
        for revision_count, packs in pack_operations:
686
 
            # we may have no-ops from the setup logic
687
 
            if len(packs) == 0:
688
 
                continue
689
 
            packer = GCCHKPacker(self, packs, '.autopack',
690
 
                                 reload_func=reload_func)
691
 
            try:
692
 
                result = packer.pack()
693
 
            except errors.RetryWithNewPacks:
694
 
                # An exception is propagating out of this context, make sure
695
 
                # this packer has cleaned up. Packer() doesn't set its new_pack
696
 
                # state into the RepositoryPackCollection object, so we only
697
 
                # have access to it directly here.
698
 
                if packer.new_pack is not None:
699
 
                    packer.new_pack.abort()
700
 
                raise
701
 
            if result is None:
702
 
                return
703
 
            for pack in packs:
704
 
                self._remove_pack_from_memory(pack)
705
 
        # record the newly available packs and stop advertising the old
706
 
        # packs
707
 
        to_be_obsoleted = []
708
 
        for _, packs in pack_operations:
709
 
            to_be_obsoleted.extend(packs)
710
 
        result = self._save_pack_names(clear_obsolete_packs=True,
711
 
                                       obsolete_packs=to_be_obsoleted)
712
 
        return result
713
 
 
714
 
 
715
 
class CHKInventoryRepository(KnitPackRepository):
716
 
    """subclass of KnitPackRepository that uses CHK based inventories."""
717
 
 
718
 
    def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
719
 
        _serializer):
 
801
 
 
802
class CHKInventoryRepository(PackRepository):
 
803
    """subclass of PackRepository that uses CHK based inventories."""
 
804
 
 
805
    def __init__(self, _format, a_controldir, control_files, _commit_builder_class,
 
806
                 _serializer):
720
807
        """Overridden to change pack collection class."""
721
 
        KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
722
 
            _commit_builder_class, _serializer)
723
 
        # and now replace everything it did :)
 
808
        super(CHKInventoryRepository, self).__init__(_format, a_controldir,
 
809
                                                     control_files, _commit_builder_class, _serializer)
724
810
        index_transport = self._transport.clone('indices')
725
811
        self._pack_collection = GCRepositoryPackCollection(self,
726
 
            self._transport, index_transport,
727
 
            self._transport.clone('upload'),
728
 
            self._transport.clone('packs'),
729
 
            _format.index_builder_class,
730
 
            _format.index_class,
731
 
            use_chk_index=self._format.supports_chks,
732
 
            )
 
812
                                                           self._transport, index_transport,
 
813
                                                           self._transport.clone(
 
814
                                                               'upload'),
 
815
                                                           self._transport.clone(
 
816
                                                               'packs'),
 
817
                                                           _format.index_builder_class,
 
818
                                                           _format.index_class,
 
819
                                                           use_chk_index=self._format.supports_chks,
 
820
                                                           )
733
821
        self.inventories = GroupCompressVersionedFiles(
734
822
            _GCGraphIndex(self._pack_collection.inventory_index.combined_index,
735
 
                add_callback=self._pack_collection.inventory_index.add_callback,
736
 
                parents=True, is_locked=self.is_locked,
737
 
                inconsistency_fatal=False),
 
823
                          add_callback=self._pack_collection.inventory_index.add_callback,
 
824
                          parents=True, is_locked=self.is_locked,
 
825
                          inconsistency_fatal=False),
738
826
            access=self._pack_collection.inventory_index.data_access)
739
827
        self.revisions = GroupCompressVersionedFiles(
740
828
            _GCGraphIndex(self._pack_collection.revision_index.combined_index,
741
 
                add_callback=self._pack_collection.revision_index.add_callback,
742
 
                parents=True, is_locked=self.is_locked,
743
 
                track_external_parent_refs=True, track_new_keys=True),
 
829
                          add_callback=self._pack_collection.revision_index.add_callback,
 
830
                          parents=True, is_locked=self.is_locked,
 
831
                          track_external_parent_refs=True, track_new_keys=True),
744
832
            access=self._pack_collection.revision_index.data_access,
745
833
            delta=False)
746
834
        self.signatures = GroupCompressVersionedFiles(
747
835
            _GCGraphIndex(self._pack_collection.signature_index.combined_index,
748
 
                add_callback=self._pack_collection.signature_index.add_callback,
749
 
                parents=False, is_locked=self.is_locked,
750
 
                inconsistency_fatal=False),
 
836
                          add_callback=self._pack_collection.signature_index.add_callback,
 
837
                          parents=False, is_locked=self.is_locked,
 
838
                          inconsistency_fatal=False),
751
839
            access=self._pack_collection.signature_index.data_access,
752
840
            delta=False)
753
841
        self.texts = GroupCompressVersionedFiles(
754
842
            _GCGraphIndex(self._pack_collection.text_index.combined_index,
755
 
                add_callback=self._pack_collection.text_index.add_callback,
756
 
                parents=True, is_locked=self.is_locked,
757
 
                inconsistency_fatal=False),
 
843
                          add_callback=self._pack_collection.text_index.add_callback,
 
844
                          parents=True, is_locked=self.is_locked,
 
845
                          inconsistency_fatal=False),
758
846
            access=self._pack_collection.text_index.data_access)
759
847
        # No parents, individual CHK pages don't have specific ancestry
760
848
        self.chk_bytes = GroupCompressVersionedFiles(
761
849
            _GCGraphIndex(self._pack_collection.chk_index.combined_index,
762
 
                add_callback=self._pack_collection.chk_index.add_callback,
763
 
                parents=False, is_locked=self.is_locked,
764
 
                inconsistency_fatal=False),
 
850
                          add_callback=self._pack_collection.chk_index.add_callback,
 
851
                          parents=False, is_locked=self.is_locked,
 
852
                          inconsistency_fatal=False),
765
853
            access=self._pack_collection.chk_index.data_access)
766
854
        search_key_name = self._format._serializer.search_key_name
767
855
        search_key_func = chk_map.search_key_registry.get(search_key_name)
788
876
        # make inventory
789
877
        serializer = self._format._serializer
790
878
        result = inventory.CHKInventory.from_inventory(self.chk_bytes, inv,
791
 
            maximum_size=serializer.maximum_size,
792
 
            search_key_name=serializer.search_key_name)
 
879
                                                       maximum_size=serializer.maximum_size,
 
880
                                                       search_key_name=serializer.search_key_name)
793
881
        inv_lines = result.to_lines()
794
882
        return self._inventory_add_lines(revision_id, parents,
795
 
            inv_lines, check_content=False)
 
883
                                         inv_lines, check_content=False)
796
884
 
797
885
    def _create_inv_from_null(self, delta, revision_id):
798
886
        """This will mutate new_inv directly.
816
904
                                 ' no new_path %r' % (file_id,))
817
905
            if new_path == '':
818
906
                new_inv.root_id = file_id
819
 
                parent_id_basename_key = StaticTuple('', '').intern()
 
907
                parent_id_basename_key = StaticTuple(b'', b'').intern()
820
908
            else:
821
909
                utf8_entry_name = entry.name.encode('utf-8')
822
910
                parent_id_basename_key = StaticTuple(entry.parent_id,
829
917
            parent_id_basename_dict[parent_id_basename_key] = file_id
830
918
 
831
919
        new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
832
 
            parent_id_basename_dict, maximum_size=serializer.maximum_size)
 
920
                                     parent_id_basename_dict, maximum_size=serializer.maximum_size)
833
921
        return new_inv
834
922
 
835
923
    def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
861
949
            raise AssertionError("%r not in write group" % (self,))
862
950
        _mod_revision.check_not_reserved_id(new_revision_id)
863
951
        basis_tree = None
864
 
        if basis_inv is None:
 
952
        if basis_inv is None or not isinstance(basis_inv, inventory.CHKInventory):
865
953
            if basis_revision_id == _mod_revision.NULL_REVISION:
866
954
                new_inv = self._create_inv_from_null(delta, new_revision_id)
 
955
                if new_inv.root_id is None:
 
956
                    raise errors.RootMissing()
867
957
                inv_lines = new_inv.to_lines()
868
958
                return self._inventory_add_lines(new_revision_id, parents,
869
 
                    inv_lines, check_content=False), new_inv
 
959
                                                 inv_lines, check_content=False), new_inv
870
960
            else:
871
961
                basis_tree = self.revision_tree(basis_revision_id)
872
962
                basis_tree.lock_read()
873
 
                basis_inv = basis_tree.inventory
 
963
                basis_inv = basis_tree.root_inventory
874
964
        try:
875
965
            result = basis_inv.create_by_apply_delta(delta, new_revision_id,
876
 
                propagate_caches=propagate_caches)
 
966
                                                     propagate_caches=propagate_caches)
877
967
            inv_lines = result.to_lines()
878
968
            return self._inventory_add_lines(new_revision_id, parents,
879
 
                inv_lines, check_content=False), result
 
969
                                             inv_lines, check_content=False), result
880
970
        finally:
881
971
            if basis_tree is not None:
882
972
                basis_tree.unlock()
883
973
 
884
 
    def _deserialise_inventory(self, revision_id, bytes):
885
 
        return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
886
 
            (revision_id,))
 
974
    def _deserialise_inventory(self, revision_id, lines):
 
975
        return inventory.CHKInventory.deserialise(self.chk_bytes, lines,
 
976
                                                  (revision_id,))
887
977
 
888
978
    def _iter_inventories(self, revision_ids, ordering):
889
979
        """Iterate over many inventory objects."""
894
984
        texts = {}
895
985
        for record in stream:
896
986
            if record.storage_kind != 'absent':
897
 
                texts[record.key] = record.get_bytes_as('fulltext')
 
987
                texts[record.key] = record.get_bytes_as('lines')
898
988
            else:
899
 
                raise errors.NoSuchRevision(self, record.key)
 
989
                texts[record.key] = None
900
990
        for key in keys:
901
 
            yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
 
991
            lines = texts[key]
 
992
            if lines is None:
 
993
                yield (None, key[-1])
 
994
            else:
 
995
                yield (inventory.CHKInventory.deserialise(
 
996
                    self.chk_bytes, lines, key), key[-1])
902
997
 
903
 
    def _iter_inventory_xmls(self, revision_ids, ordering):
 
998
    def _get_inventory_xml(self, revision_id):
 
999
        """Get serialized inventory as a string."""
904
1000
        # Without a native 'xml' inventory, this method doesn't make sense.
905
1001
        # However older working trees, and older bundles want it - so we supply
906
1002
        # it allowing _get_inventory_xml to work. Bundles currently use the
907
1003
        # serializer directly; this also isn't ideal, but there isn't an xml
908
 
        # iteration interface offered at all for repositories. We could make
909
 
        # _iter_inventory_xmls be part of the contract, even if kept private.
910
 
        inv_to_str = self._serializer.write_inventory_to_string
911
 
        for inv in self.iter_inventories(revision_ids, ordering=ordering):
912
 
            yield inv_to_str(inv), inv.revision_id
 
1004
        # iteration interface offered at all for repositories.
 
1005
        return self._serializer.write_inventory_to_lines(
 
1006
            self.get_inventory(revision_id))
913
1007
 
914
1008
    def _find_present_inventory_keys(self, revision_keys):
915
1009
        parent_map = self.inventories.get_parent_map(revision_keys)
929
1023
        rich_root = self.supports_rich_root()
930
1024
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
931
1025
        file_id_revisions = {}
932
 
        pb = ui.ui_factory.nested_progress_bar()
933
 
        try:
 
1026
        with ui.ui_factory.nested_progress_bar() as pb:
934
1027
            revision_keys = [(r,) for r in revision_ids]
935
1028
            parent_keys = self._find_parent_keys_of_revisions(revision_keys)
936
1029
            # TODO: instead of using _find_present_inventory_keys, change the
938
1031
            #       However, we only want to tolerate missing parent
939
1032
            #       inventories, not missing inventories for revision_ids
940
1033
            present_parent_inv_keys = self._find_present_inventory_keys(
941
 
                                        parent_keys)
942
 
            present_parent_inv_ids = set(
943
 
                [k[-1] for k in present_parent_inv_keys])
 
1034
                parent_keys)
 
1035
            present_parent_inv_ids = {k[-1] for k in present_parent_inv_keys}
944
1036
            inventories_to_read = set(revision_ids)
945
1037
            inventories_to_read.update(present_parent_inv_ids)
946
1038
            root_key_info = _build_interesting_key_sets(
949
1041
            uninteresting_root_keys = root_key_info.uninteresting_root_keys
950
1042
            chk_bytes = self.chk_bytes
951
1043
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
952
 
                        interesting_root_keys, uninteresting_root_keys,
953
 
                        pb=pb):
 
1044
                                                                interesting_root_keys, uninteresting_root_keys,
 
1045
                                                                pb=pb):
954
1046
                for name, bytes in items:
955
1047
                    (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
956
1048
                    # TODO: consider interning file_id, revision_id here, or
962
1054
                    try:
963
1055
                        file_id_revisions[file_id].add(revision_id)
964
1056
                    except KeyError:
965
 
                        file_id_revisions[file_id] = set([revision_id])
966
 
        finally:
967
 
            pb.finished()
 
1057
                        file_id_revisions[file_id] = {revision_id}
968
1058
        return file_id_revisions
969
1059
 
970
1060
    def find_text_key_references(self):
982
1072
        revision_keys = self.revisions.keys()
983
1073
        result = {}
984
1074
        rich_roots = self.supports_rich_root()
985
 
        pb = ui.ui_factory.nested_progress_bar()
986
 
        try:
 
1075
        with ui.ui_factory.nested_progress_bar() as pb:
987
1076
            all_revs = self.all_revision_ids()
988
1077
            total = len(all_revs)
989
1078
            for pos, inv in enumerate(self.iter_inventories(all_revs)):
996
1085
                    if entry.revision == inv.revision_id:
997
1086
                        result[key] = True
998
1087
            return result
999
 
        finally:
1000
 
            pb.finished()
 
1088
 
 
1089
    def reconcile_canonicalize_chks(self):
 
1090
        """Reconcile this repository to make sure all CHKs are in canonical
 
1091
        form.
 
1092
        """
 
1093
        from .reconcile import PackReconciler
 
1094
        with self.lock_write():
 
1095
            reconciler = PackReconciler(
 
1096
                self, thorough=True, canonicalize_chks=True)
 
1097
            return reconciler.reconcile()
1001
1098
 
1002
1099
    def _reconcile_pack(self, collection, packs, extension, revs, pb):
1003
1100
        packer = GCCHKReconcilePacker(collection, packs, extension)
1004
1101
        return packer.pack(pb)
1005
1102
 
 
1103
    def _canonicalize_chks_pack(self, collection, packs, extension, revs, pb):
 
1104
        packer = GCCHKCanonicalizingPacker(collection, packs, extension, revs)
 
1105
        return packer.pack(pb)
 
1106
 
1006
1107
    def _get_source(self, to_format):
1007
1108
        """Return a source for streaming from this repository."""
1008
1109
        if self._format._serializer == to_format._serializer:
1013
1114
            return GroupCHKStreamSource(self, to_format)
1014
1115
        return super(CHKInventoryRepository, self)._get_source(to_format)
1015
1116
 
1016
 
 
1017
 
class GroupCHKStreamSource(KnitPackStreamSource):
 
1117
    def _find_inconsistent_revision_parents(self, revisions_iterator=None):
 
1118
        """Find revisions with different parent lists in the revision object
 
1119
        and in the index graph.
 
1120
 
 
1121
        :param revisions_iterator: None, or an iterator of (revid,
 
1122
            Revision-or-None). This iterator controls the revisions checked.
 
1123
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
1124
            parents-in-revision).
 
1125
        """
 
1126
        if not self.is_locked():
 
1127
            raise AssertionError()
 
1128
        vf = self.revisions
 
1129
        if revisions_iterator is None:
 
1130
            revisions_iterator = self.iter_revisions(self.all_revision_ids())
 
1131
        for revid, revision in revisions_iterator:
 
1132
            if revision is None:
 
1133
                pass
 
1134
            parent_map = vf.get_parent_map([(revid,)])
 
1135
            parents_according_to_index = tuple(parent[-1] for parent in
 
1136
                                               parent_map[(revid,)])
 
1137
            parents_according_to_revision = tuple(revision.parent_ids)
 
1138
            if parents_according_to_index != parents_according_to_revision:
 
1139
                yield (revid, parents_according_to_index,
 
1140
                       parents_according_to_revision)
 
1141
 
 
1142
    def _check_for_inconsistent_revision_parents(self):
 
1143
        inconsistencies = list(self._find_inconsistent_revision_parents())
 
1144
        if inconsistencies:
 
1145
            raise errors.BzrCheckError(
 
1146
                "Revision index has inconsistent parents.")
 
1147
 
 
1148
 
 
1149
class GroupCHKStreamSource(StreamSource):
1018
1150
    """Used when both the source and target repo are GroupCHK repos."""
1019
1151
 
1020
1152
    def __init__(self, from_repository, to_format):
1034
1166
        """
1035
1167
        self._chk_id_roots = []
1036
1168
        self._chk_p_id_roots = []
 
1169
 
1037
1170
        def _filtered_inv_stream():
1038
1171
            id_roots_set = set()
1039
1172
            p_id_roots_set = set()
1046
1179
                        continue
1047
1180
                    else:
1048
1181
                        raise errors.NoSuchRevision(self, record.key)
1049
 
                bytes = record.get_bytes_as('fulltext')
1050
 
                chk_inv = inventory.CHKInventory.deserialise(None, bytes,
 
1182
                lines = record.get_bytes_as('lines')
 
1183
                chk_inv = inventory.CHKInventory.deserialise(None, lines,
1051
1184
                                                             record.key)
1052
1185
                key = chk_inv.id_to_entry.key()
1053
1186
                if key not in id_roots_set:
1079
1212
            # TODO: Update Repository.iter_inventories() to add
1080
1213
            #       ignore_missing=True
1081
1214
            present_keys = self.from_repository._find_present_inventory_keys(
1082
 
                            excluded_revision_keys)
 
1215
                excluded_revision_keys)
1083
1216
            present_ids = [k[-1] for k in present_keys]
1084
1217
            uninteresting_root_keys = set()
1085
1218
            uninteresting_pid_root_keys = set()
1087
1220
                uninteresting_root_keys.add(inv.id_to_entry.key())
1088
1221
                uninteresting_pid_root_keys.add(
1089
1222
                    inv.parent_id_basename_to_file_id.key())
1090
 
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
1091
1223
        chk_bytes = self.from_repository.chk_bytes
 
1224
 
1092
1225
        def _filter_id_to_entry():
1093
1226
            interesting_nodes = chk_map.iter_interesting_nodes(chk_bytes,
1094
 
                        self._chk_id_roots, uninteresting_root_keys)
 
1227
                                                               self._chk_id_roots, uninteresting_root_keys)
1095
1228
            for record in _filter_text_keys(interesting_nodes, self._text_keys,
1096
 
                    bytes_to_info):
 
1229
                                            chk_map._bytes_to_text_key):
1097
1230
                if record is not None:
1098
1231
                    yield record
1099
1232
            # Consumed
1100
1233
            self._chk_id_roots = None
1101
1234
        yield 'chk_bytes', _filter_id_to_entry()
 
1235
 
1102
1236
        def _get_parent_id_basename_to_file_id_pages():
1103
1237
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
1104
 
                        self._chk_p_id_roots, uninteresting_pid_root_keys):
 
1238
                                                                self._chk_p_id_roots, uninteresting_pid_root_keys):
1105
1239
                if record is not None:
1106
1240
                    yield record
1107
1241
            # Consumed
1108
1242
            self._chk_p_id_roots = None
1109
1243
        yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1110
1244
 
 
1245
    def _get_text_stream(self):
 
1246
        # Note: We know we don't have to handle adding root keys, because both
 
1247
        # the source and target are the identical network name.
 
1248
        text_stream = self.from_repository.texts.get_record_stream(
 
1249
            self._text_keys, self._text_fetch_order, False)
 
1250
        return ('texts', text_stream)
 
1251
 
1111
1252
    def get_stream(self, search):
 
1253
        def wrap_and_count(pb, rc, stream):
 
1254
            """Yield records from stream while showing progress."""
 
1255
            count = 0
 
1256
            for record in stream:
 
1257
                if count == rc.STEP:
 
1258
                    rc.increment(count)
 
1259
                    pb.update('Estimate', rc.current, rc.max)
 
1260
                    count = 0
 
1261
                count += 1
 
1262
                yield record
 
1263
 
1112
1264
        revision_ids = search.get_keys()
1113
 
        for stream_info in self._fetch_revision_texts(revision_ids):
1114
 
            yield stream_info
1115
 
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1116
 
        self.from_repository.revisions.clear_cache()
1117
 
        self.from_repository.signatures.clear_cache()
1118
 
        yield self._get_inventory_stream(self._revision_keys)
1119
 
        self.from_repository.inventories.clear_cache()
1120
 
        # TODO: The keys to exclude might be part of the search recipe
1121
 
        # For now, exclude all parents that are at the edge of ancestry, for
1122
 
        # which we have inventories
1123
 
        from_repo = self.from_repository
1124
 
        parent_keys = from_repo._find_parent_keys_of_revisions(
1125
 
                        self._revision_keys)
1126
 
        for stream_info in self._get_filtered_chk_streams(parent_keys):
1127
 
            yield stream_info
1128
 
        self.from_repository.chk_bytes.clear_cache()
1129
 
        yield self._get_text_stream()
1130
 
        self.from_repository.texts.clear_cache()
 
1265
        with ui.ui_factory.nested_progress_bar() as pb:
 
1266
            rc = self._record_counter
 
1267
            self._record_counter.setup(len(revision_ids))
 
1268
            for stream_info in self._fetch_revision_texts(revision_ids):
 
1269
                yield (stream_info[0],
 
1270
                       wrap_and_count(pb, rc, stream_info[1]))
 
1271
            self._revision_keys = [(rev_id,) for rev_id in revision_ids]
 
1272
            # TODO: The keys to exclude might be part of the search recipe
 
1273
            # For now, exclude all parents that are at the edge of ancestry, for
 
1274
            # which we have inventories
 
1275
            from_repo = self.from_repository
 
1276
            parent_keys = from_repo._find_parent_keys_of_revisions(
 
1277
                self._revision_keys)
 
1278
            self.from_repository.revisions.clear_cache()
 
1279
            self.from_repository.signatures.clear_cache()
 
1280
            # Clear the repo's get_parent_map cache too.
 
1281
            self.from_repository._unstacked_provider.disable_cache()
 
1282
            self.from_repository._unstacked_provider.enable_cache()
 
1283
            s = self._get_inventory_stream(self._revision_keys)
 
1284
            yield (s[0], wrap_and_count(pb, rc, s[1]))
 
1285
            self.from_repository.inventories.clear_cache()
 
1286
            for stream_info in self._get_filtered_chk_streams(parent_keys):
 
1287
                yield (stream_info[0], wrap_and_count(pb, rc, stream_info[1]))
 
1288
            self.from_repository.chk_bytes.clear_cache()
 
1289
            s = self._get_text_stream()
 
1290
            yield (s[0], wrap_and_count(pb, rc, s[1]))
 
1291
            self.from_repository.texts.clear_cache()
 
1292
            pb.update('Done', rc.max, rc.max)
1131
1293
 
1132
1294
    def get_stream_for_missing_keys(self, missing_keys):
1133
1295
        # missing keys can only occur when we are byte copying and not
1137
1299
        for key in missing_keys:
1138
1300
            if key[0] != 'inventories':
1139
1301
                raise AssertionError('The only missing keys we should'
1140
 
                    ' be filling in are inventory keys, not %s'
1141
 
                    % (key[0],))
 
1302
                                     ' be filling in are inventory keys, not %s'
 
1303
                                     % (key[0],))
1142
1304
            missing_inventory_keys.add(key[1:])
1143
1305
        if self._chk_id_roots or self._chk_p_id_roots:
1144
1306
            raise AssertionError('Cannot call get_stream_for_missing_keys'
1145
 
                ' until all of get_stream() has been consumed.')
 
1307
                                 ' until all of get_stream() has been consumed.')
1146
1308
        # Yield the inventory stream, so we can find the chk stream
1147
1309
        # Some of the missing_keys will be missing because they are ghosts.
1148
1310
        # As such, we can ignore them. The Sink is required to verify there are
1187
1349
    return result
1188
1350
 
1189
1351
 
1190
 
def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_info):
 
1352
def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_text_key):
1191
1353
    """Iterate the result of iter_interesting_nodes, yielding the records
1192
1354
    and adding to text_keys.
1193
1355
    """
 
1356
    text_keys_update = text_keys.update
1194
1357
    for record, items in interesting_nodes_iterable:
1195
 
        for name, bytes in items:
1196
 
            # Note: we don't care about name_utf8, because groupcompress repos
1197
 
            # are always rich-root, so there are no synthesised root records to
1198
 
            # ignore.
1199
 
            _, file_id, revision_id = bytes_to_info(bytes)
1200
 
            file_id = intern(file_id)
1201
 
            revision_id = intern(revision_id)
1202
 
            text_keys.add(StaticTuple(file_id, revision_id).intern())
 
1358
        text_keys_update([bytes_to_text_key(b) for n, b in items])
1203
1359
        yield record
1204
1360
 
1205
1361
 
1206
 
 
1207
 
 
1208
 
class RepositoryFormatCHK1(RepositoryFormatPack):
1209
 
    """A hashed CHK+group compress pack repository."""
 
1362
class RepositoryFormat2a(RepositoryFormatPack):
 
1363
    """A CHK repository that uses the bencode revision serializer."""
1210
1364
 
1211
1365
    repository_class = CHKInventoryRepository
1212
1366
    supports_external_lookups = True
1213
1367
    supports_chks = True
1214
 
    # For right now, setting this to True gives us InterModel1And2 rather
1215
 
    # than InterDifferingSerializer
1216
 
    _commit_builder_class = PackRootCommitBuilder
 
1368
    _commit_builder_class = PackCommitBuilder
1217
1369
    rich_root_data = True
1218
 
    _serializer = chk_serializer.chk_serializer_255_bigpage
 
1370
    _serializer = chk_serializer.chk_bencode_serializer
1219
1371
    _commit_inv_deltas = True
1220
1372
    # What index classes to use
1221
1373
    index_builder_class = BTreeBuilder
1227
1379
    # multiple in-a-row (and sharing strings). Topological is better
1228
1380
    # for remote, because we access less data.
1229
1381
    _fetch_order = 'unordered'
1230
 
    _fetch_uses_deltas = False # essentially ignored by the groupcompress code.
 
1382
    # essentially ignored by the groupcompress code.
 
1383
    _fetch_uses_deltas = False
1231
1384
    fast_deltas = True
1232
1385
    pack_compresses = True
1233
 
 
1234
 
    def _get_matching_bzrdir(self):
1235
 
        return bzrdir.format_registry.make_bzrdir('development6-rich-root')
1236
 
 
1237
 
    def _ignore_setting_bzrdir(self, format):
1238
 
        pass
1239
 
 
1240
 
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1241
 
 
1242
 
    def get_format_string(self):
1243
 
        """See RepositoryFormat.get_format_string()."""
1244
 
        return ('Bazaar development format - group compression and chk inventory'
1245
 
                ' (needs bzr.dev from 1.14)\n')
1246
 
 
1247
 
    def get_format_description(self):
1248
 
        """See RepositoryFormat.get_format_description()."""
1249
 
        return ("Development repository format - rich roots, group compression"
1250
 
            " and chk inventories")
1251
 
 
1252
 
 
1253
 
class RepositoryFormatCHK2(RepositoryFormatCHK1):
1254
 
    """A CHK repository that uses the bencode revision serializer."""
1255
 
 
1256
 
    _serializer = chk_serializer.chk_bencode_serializer
1257
 
 
1258
 
    def _get_matching_bzrdir(self):
1259
 
        return bzrdir.format_registry.make_bzrdir('development7-rich-root')
1260
 
 
1261
 
    def _ignore_setting_bzrdir(self, format):
1262
 
        pass
1263
 
 
1264
 
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1265
 
 
1266
 
    def get_format_string(self):
1267
 
        """See RepositoryFormat.get_format_string()."""
1268
 
        return ('Bazaar development format - chk repository with bencode '
1269
 
                'revision serialization (needs bzr.dev from 1.16)\n')
1270
 
 
1271
 
 
1272
 
class RepositoryFormat2a(RepositoryFormatCHK2):
1273
 
    """A CHK repository that uses the bencode revision serializer.
1274
 
 
1275
 
    This is the same as RepositoryFormatCHK2 but with a public name.
1276
 
    """
1277
 
 
1278
 
    _serializer = chk_serializer.chk_bencode_serializer
1279
 
 
1280
 
    def _get_matching_bzrdir(self):
1281
 
        return bzrdir.format_registry.make_bzrdir('2a')
1282
 
 
1283
 
    def _ignore_setting_bzrdir(self, format):
1284
 
        pass
1285
 
 
1286
 
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1287
 
 
1288
 
    def get_format_string(self):
1289
 
        return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')
 
1386
    supports_tree_reference = True
 
1387
 
 
1388
    def _get_matching_bzrdir(self):
 
1389
        return controldir.format_registry.make_controldir('2a')
 
1390
 
 
1391
    def _ignore_setting_bzrdir(self, format):
 
1392
        pass
 
1393
 
 
1394
    _matchingcontroldir = property(
 
1395
        _get_matching_bzrdir, _ignore_setting_bzrdir)
 
1396
 
 
1397
    @classmethod
 
1398
    def get_format_string(cls):
 
1399
        return b'Bazaar repository format 2a (needs bzr 1.16 or later)\n'
1290
1400
 
1291
1401
    def get_format_description(self):
1292
1402
        """See RepositoryFormat.get_format_description()."""
1293
1403
        return ("Repository format 2a - rich roots, group compression"
1294
 
            " and chk inventories")
 
1404
                " and chk inventories")
 
1405
 
 
1406
 
 
1407
class RepositoryFormat2aSubtree(RepositoryFormat2a):
 
1408
    """A 2a repository format that supports nested trees.
 
1409
 
 
1410
    """
 
1411
 
 
1412
    def _get_matching_bzrdir(self):
 
1413
        return controldir.format_registry.make_controldir('development-subtree')
 
1414
 
 
1415
    def _ignore_setting_bzrdir(self, format):
 
1416
        pass
 
1417
 
 
1418
    _matchingcontroldir = property(
 
1419
        _get_matching_bzrdir, _ignore_setting_bzrdir)
 
1420
 
 
1421
    @classmethod
 
1422
    def get_format_string(cls):
 
1423
        return b'Bazaar development format 8\n'
 
1424
 
 
1425
    def get_format_description(self):
 
1426
        """See RepositoryFormat.get_format_description()."""
 
1427
        return ("Development repository format 8 - nested trees, "
 
1428
                "group compression and chk inventories")
 
1429
 
 
1430
    experimental = True
 
1431
    supports_tree_reference = True