/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-03-24 17:01:50 UTC
  • mfrom: (4178.3.7 lru_cache_linked_lst)
  • Revision ID: pqm@pqm.ubuntu.com-20090324170150-9wtdpv5w7192zdwy
(jam) Improvements to LRUCache structure, use a double-linked-list

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2007-2010 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
23
23
import time
24
24
 
25
25
from bzrlib import (
26
 
    chk_map,
27
 
    cleanup,
28
26
    debug,
29
27
    graph,
30
28
    osutils,
37
35
    )
38
36
from bzrlib.index import (
39
37
    CombinedGraphIndex,
 
38
    GraphIndex,
 
39
    GraphIndexBuilder,
40
40
    GraphIndexPrefixAdapter,
 
41
    InMemoryGraphIndex,
41
42
    )
42
43
from bzrlib.knit import (
43
44
    KnitPlainFactory,
52
53
    errors,
53
54
    lockable_files,
54
55
    lockdir,
55
 
    revision as _mod_revision,
 
56
    symbol_versioning,
56
57
    )
57
58
 
58
 
from bzrlib.decorators import needs_write_lock, only_raises
 
59
from bzrlib.decorators import needs_write_lock
59
60
from bzrlib.btree_index import (
60
61
    BTreeGraphIndex,
61
62
    BTreeBuilder,
70
71
    MetaDirRepositoryFormat,
71
72
    RepositoryFormat,
72
73
    RootCommitBuilder,
73
 
    StreamSource,
74
74
    )
 
75
import bzrlib.revision as _mod_revision
75
76
from bzrlib.trace import (
76
77
    mutter,
77
 
    note,
78
78
    warning,
79
79
    )
80
80
 
131
131
    # A map of index 'type' to the file extension and position in the
132
132
    # index_sizes array.
133
133
    index_definitions = {
134
 
        'chk': ('.cix', 4),
135
134
        'revision': ('.rix', 0),
136
135
        'inventory': ('.iix', 1),
137
136
        'text': ('.tix', 2),
139
138
        }
140
139
 
141
140
    def __init__(self, revision_index, inventory_index, text_index,
142
 
        signature_index, chk_index=None):
 
141
        signature_index):
143
142
        """Create a pack instance.
144
143
 
145
144
        :param revision_index: A GraphIndex for determining what revisions are
152
151
            texts/deltas (via (fileid, revisionid) tuples).
153
152
        :param signature_index: A GraphIndex for determining what signatures are
154
153
            present in the Pack and accessing the locations of their texts.
155
 
        :param chk_index: A GraphIndex for accessing content by CHK, if the
156
 
            pack has one.
157
154
        """
158
155
        self.revision_index = revision_index
159
156
        self.inventory_index = inventory_index
160
157
        self.text_index = text_index
161
158
        self.signature_index = signature_index
162
 
        self.chk_index = chk_index
163
159
 
164
160
    def access_tuple(self):
165
161
        """Return a tuple (transport, name) for the pack content."""
226
222
        return self.index_name('text', name)
227
223
 
228
224
    def _replace_index_with_readonly(self, index_type):
229
 
        unlimited_cache = False
230
 
        if index_type == 'chk':
231
 
            unlimited_cache = True
232
225
        setattr(self, index_type + '_index',
233
226
            self.index_class(self.index_transport,
234
227
                self.index_name(index_type, self.name),
235
 
                self.index_sizes[self.index_offset(index_type)],
236
 
                unlimited_cache=unlimited_cache))
 
228
                self.index_sizes[self.index_offset(index_type)]))
237
229
 
238
230
 
239
231
class ExistingPack(Pack):
240
232
    """An in memory proxy for an existing .pack and its disk indices."""
241
233
 
242
234
    def __init__(self, pack_transport, name, revision_index, inventory_index,
243
 
        text_index, signature_index, chk_index=None):
 
235
        text_index, signature_index):
244
236
        """Create an ExistingPack object.
245
237
 
246
238
        :param pack_transport: The transport where the pack file resides.
247
239
        :param name: The name of the pack on disk in the pack_transport.
248
240
        """
249
241
        Pack.__init__(self, revision_index, inventory_index, text_index,
250
 
            signature_index, chk_index)
 
242
            signature_index)
251
243
        self.name = name
252
244
        self.pack_transport = pack_transport
253
245
        if None in (revision_index, inventory_index, text_index,
270
262
 
271
263
    def __init__(self, name, revision_index, inventory_index, text_index,
272
264
        signature_index, upload_transport, pack_transport, index_transport,
273
 
        pack_collection, chk_index=None):
 
265
        pack_collection):
274
266
        """Create a ResumedPack object."""
275
267
        ExistingPack.__init__(self, pack_transport, name, revision_index,
276
 
            inventory_index, text_index, signature_index,
277
 
            chk_index=chk_index)
 
268
            inventory_index, text_index, signature_index)
278
269
        self.upload_transport = upload_transport
279
270
        self.index_transport = index_transport
280
271
        self.index_sizes = [None, None, None, None]
284
275
            ('text', text_index),
285
276
            ('signature', signature_index),
286
277
            ]
287
 
        if chk_index is not None:
288
 
            indices.append(('chk', chk_index))
289
 
            self.index_sizes.append(None)
290
278
        for index_type, index in indices:
291
279
            offset = self.index_offset(index_type)
292
280
            self.index_sizes[offset] = index._size
307
295
        self.upload_transport.delete(self.file_name())
308
296
        indices = [self.revision_index, self.inventory_index, self.text_index,
309
297
            self.signature_index]
310
 
        if self.chk_index is not None:
311
 
            indices.append(self.chk_index)
312
298
        for index in indices:
313
299
            index._transport.delete(index._name)
314
300
 
315
301
    def finish(self):
316
302
        self._check_references()
317
 
        index_types = ['revision', 'inventory', 'text', 'signature']
318
 
        if self.chk_index is not None:
319
 
            index_types.append('chk')
320
 
        for index_type in index_types:
 
303
        new_name = '../packs/' + self.file_name()
 
304
        self.upload_transport.rename(self.file_name(), new_name)
 
305
        for index_type in ['revision', 'inventory', 'text', 'signature']:
321
306
            old_name = self.index_name(index_type, self.name)
322
307
            new_name = '../indices/' + old_name
323
308
            self.upload_transport.rename(old_name, new_name)
324
309
            self._replace_index_with_readonly(index_type)
325
 
        new_name = '../packs/' + self.file_name()
326
 
        self.upload_transport.rename(self.file_name(), new_name)
327
310
        self._state = 'finished'
328
311
 
329
312
    def _get_external_refs(self, index):
330
 
        """Return compression parents for this index that are not present.
331
 
 
332
 
        This returns any compression parents that are referenced by this index,
333
 
        which are not contained *in* this index. They may be present elsewhere.
334
 
        """
335
313
        return index.external_references(1)
336
314
 
337
315
 
349
327
        # The relative locations of the packs are constrained, but all are
350
328
        # passed in because the caller has them, so as to avoid object churn.
351
329
        index_builder_class = pack_collection._index_builder_class
352
 
        if pack_collection.chk_index is not None:
353
 
            chk_index = index_builder_class(reference_lists=0)
354
 
        else:
355
 
            chk_index = None
356
330
        Pack.__init__(self,
357
331
            # Revisions: parents list, no text compression.
358
332
            index_builder_class(reference_lists=1),
367
341
            # Signatures: Just blobs to store, no compression, no parents
368
342
            # listing.
369
343
            index_builder_class(reference_lists=0),
370
 
            # CHK based storage - just blobs, no compression or parents.
371
 
            chk_index=chk_index
372
344
            )
373
345
        self._pack_collection = pack_collection
374
346
        # When we make readonly indices, we need this.
383
355
        self._file_mode = file_mode
384
356
        # tracks the content written to the .pack file.
385
357
        self._hash = osutils.md5()
386
 
        # a tuple with the length in bytes of the indices, once the pack
387
 
        # is finalised. (rev, inv, text, sigs, chk_if_in_use)
 
358
        # a four-tuple with the length in bytes of the indices, once the pack
 
359
        # is finalised. (rev, inv, text, sigs)
388
360
        self.index_sizes = None
389
361
        # How much data to cache when writing packs. Note that this is not
390
362
        # synchronised with reads, because it's not in the transport layer, so
428
400
        self._writer.begin()
429
401
        # what state is the pack in? (open, finished, aborted)
430
402
        self._state = 'open'
431
 
        # no name until we finish writing the content
432
 
        self.name = None
433
403
 
434
404
    def abort(self):
435
405
        """Cancel creating this pack."""
453
423
        return bool(self.get_revision_count() or
454
424
            self.inventory_index.key_count() or
455
425
            self.text_index.key_count() or
456
 
            self.signature_index.key_count() or
457
 
            (self.chk_index is not None and self.chk_index.key_count()))
458
 
 
459
 
    def finish_content(self):
460
 
        if self.name is not None:
461
 
            return
462
 
        self._writer.end()
463
 
        if self._buffer[1]:
464
 
            self._write_data('', flush=True)
465
 
        self.name = self._hash.hexdigest()
 
426
            self.signature_index.key_count())
466
427
 
467
428
    def finish(self, suspend=False):
468
429
        """Finish the new pack.
475
436
         - stores the index size tuple for the pack in the index_sizes
476
437
           attribute.
477
438
        """
478
 
        self.finish_content()
 
439
        self._writer.end()
 
440
        if self._buffer[1]:
 
441
            self._write_data('', flush=True)
 
442
        self.name = self._hash.hexdigest()
479
443
        if not suspend:
480
444
            self._check_references()
481
445
        # write indices
490
454
        self._write_index('text', self.text_index, 'file texts', suspend)
491
455
        self._write_index('signature', self.signature_index,
492
456
            'revision signatures', suspend)
493
 
        if self.chk_index is not None:
494
 
            self.index_sizes.append(None)
495
 
            self._write_index('chk', self.chk_index,
496
 
                'content hash bytes', suspend)
497
457
        self.write_stream.close()
498
458
        # Note that this will clobber an existing pack with the same name,
499
459
        # without checking for hash collisions. While this is undesirable this
572
532
    # XXX: Probably 'can be written to' could/should be separated from 'acts
573
533
    # like a knit index' -- mbp 20071024
574
534
 
575
 
    def __init__(self, reload_func=None, flush_func=None):
 
535
    def __init__(self, reload_func=None):
576
536
        """Create an AggregateIndex.
577
537
 
578
538
        :param reload_func: A function to call if we find we are missing an
583
543
        self.index_to_pack = {}
584
544
        self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
585
545
        self.data_access = _DirectPackAccess(self.index_to_pack,
586
 
                                             reload_func=reload_func,
587
 
                                             flush_func=flush_func)
 
546
                                             reload_func=reload_func)
 
547
        self.add_callback = None
 
548
 
 
549
    def replace_indices(self, index_to_pack, indices):
 
550
        """Replace the current mappings with fresh ones.
 
551
 
 
552
        This should probably not be used eventually, rather incremental add and
 
553
        removal of indices. It has been added during refactoring of existing
 
554
        code.
 
555
 
 
556
        :param index_to_pack: A mapping from index objects to
 
557
            (transport, name) tuples for the pack file data.
 
558
        :param indices: A list of indices.
 
559
        """
 
560
        # refresh the revision pack map dict without replacing the instance.
 
561
        self.index_to_pack.clear()
 
562
        self.index_to_pack.update(index_to_pack)
 
563
        # XXX: API break - clearly a 'replace' method would be good?
 
564
        self.combined_index._indices[:] = indices
 
565
        # the current add nodes callback for the current writable index if
 
566
        # there is one.
588
567
        self.add_callback = None
589
568
 
590
569
    def add_index(self, index, pack):
599
578
        # expose it to the index map
600
579
        self.index_to_pack[index] = pack.access_tuple()
601
580
        # put it at the front of the linear index list
602
 
        self.combined_index.insert_index(0, index, pack.name)
 
581
        self.combined_index.insert_index(0, index)
603
582
 
604
583
    def add_writable_index(self, index, pack):
605
584
        """Add an index which is able to have data added to it.
625
604
        self.data_access.set_writer(None, None, (None, None))
626
605
        self.index_to_pack.clear()
627
606
        del self.combined_index._indices[:]
628
 
        del self.combined_index._index_names[:]
629
607
        self.add_callback = None
630
608
 
631
 
    def remove_index(self, index):
 
609
    def remove_index(self, index, pack):
632
610
        """Remove index from the indices used to answer queries.
633
611
 
634
612
        :param index: An index from the pack parameter.
 
613
        :param pack: A Pack instance.
635
614
        """
636
615
        del self.index_to_pack[index]
637
 
        pos = self.combined_index._indices.index(index)
638
 
        del self.combined_index._indices[pos]
639
 
        del self.combined_index._index_names[pos]
 
616
        self.combined_index._indices.remove(index)
640
617
        if (self.add_callback is not None and
641
618
            getattr(index, 'add_nodes', None) == self.add_callback):
642
619
            self.add_callback = None
748
725
 
749
726
    def open_pack(self):
750
727
        """Open a pack for the pack we are creating."""
751
 
        new_pack = self._pack_collection.pack_factory(self._pack_collection,
752
 
                upload_suffix=self.suffix,
 
728
        new_pack = NewPack(self._pack_collection, upload_suffix=self.suffix,
753
729
                file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
754
730
        # We know that we will process all nodes in order, and don't need to
755
731
        # query, so don't combine any indices spilled to disk until we are done
920
896
                time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
921
897
                new_pack.signature_index.key_count(),
922
898
                time.time() - new_pack.start_time)
923
 
        # copy chk contents
924
 
        # NB XXX: how to check CHK references are present? perhaps by yielding
925
 
        # the items? How should that interact with stacked repos?
926
 
        if new_pack.chk_index is not None:
927
 
            self._copy_chks()
928
 
            if 'pack' in debug.debug_flags:
929
 
                mutter('%s: create_pack: chk content copied: %s%s %d items t+%6.3fs',
930
 
                    time.ctime(), self._pack_collection._upload_transport.base,
931
 
                    new_pack.random_name,
932
 
                    new_pack.chk_index.key_count(),
933
 
                    time.time() - new_pack.start_time)
934
899
        new_pack._check_references()
935
900
        if not self._use_pack(new_pack):
936
901
            new_pack.abort()
940
905
        self._pack_collection.allocate(new_pack)
941
906
        return new_pack
942
907
 
943
 
    def _copy_chks(self, refs=None):
944
 
        # XXX: Todo, recursive follow-pointers facility when fetching some
945
 
        # revisions only.
946
 
        chk_index_map, chk_indices = self._pack_map_and_index_list(
947
 
            'chk_index')
948
 
        chk_nodes = self._index_contents(chk_indices, refs)
949
 
        new_refs = set()
950
 
        # TODO: This isn't strictly tasteful as we are accessing some private
951
 
        #       variables (_serializer). Perhaps a better way would be to have
952
 
        #       Repository._deserialise_chk_node()
953
 
        search_key_func = chk_map.search_key_registry.get(
954
 
            self._pack_collection.repo._serializer.search_key_name)
955
 
        def accumlate_refs(lines):
956
 
            # XXX: move to a generic location
957
 
            # Yay mismatch:
958
 
            bytes = ''.join(lines)
959
 
            node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
960
 
            new_refs.update(node.refs())
961
 
        self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
962
 
            self.new_pack.chk_index, output_lines=accumlate_refs)
963
 
        return new_refs
964
 
 
965
 
    def _copy_nodes(self, nodes, index_map, writer, write_index,
966
 
        output_lines=None):
967
 
        """Copy knit nodes between packs with no graph references.
968
 
 
969
 
        :param output_lines: Output full texts of copied items.
970
 
        """
 
908
    def _copy_nodes(self, nodes, index_map, writer, write_index):
 
909
        """Copy knit nodes between packs with no graph references."""
971
910
        pb = ui.ui_factory.nested_progress_bar()
972
911
        try:
973
912
            return self._do_copy_nodes(nodes, index_map, writer,
974
 
                write_index, pb, output_lines=output_lines)
 
913
                write_index, pb)
975
914
        finally:
976
915
            pb.finished()
977
916
 
978
 
    def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
979
 
        output_lines=None):
 
917
    def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
980
918
        # for record verification
981
919
        knit = KnitVersionedFiles(None, None)
982
920
        # plan a readv on each source pack:
1016
954
                izip(reader.iter_records(), pack_readv_requests):
1017
955
                raw_data = read_func(None)
1018
956
                # check the header only
1019
 
                if output_lines is not None:
1020
 
                    output_lines(knit._parse_record(key[-1], raw_data)[0])
1021
 
                else:
1022
 
                    df, _ = knit._parse_record_header(key, raw_data)
1023
 
                    df.close()
 
957
                df, _ = knit._parse_record_header(key, raw_data)
 
958
                df.close()
1024
959
                pos, size = writer.add_bytes_record(raw_data, names)
1025
960
                write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
1026
961
                pb.update("Copied record", record_index)
1100
1035
            iterator is a tuple with:
1101
1036
            index, readv_vector, node_vector. readv_vector is a list ready to
1102
1037
            hand to the transport readv method, and node_vector is a list of
1103
 
            (key, eol_flag, references) for the node retrieved by the
 
1038
            (key, eol_flag, references) for the the node retrieved by the
1104
1039
            matching readv_vector.
1105
1040
        """
1106
1041
        # group by pack so we do one readv per pack
1297
1232
        # space (we only topo sort the revisions, which is smaller).
1298
1233
        topo_order = tsort.topo_sort(ancestors)
1299
1234
        rev_order = dict(zip(topo_order, range(len(topo_order))))
1300
 
        bad_texts.sort(key=lambda key:rev_order.get(key[0][1], 0))
 
1235
        bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1301
1236
        transaction = repo.get_transaction()
1302
1237
        file_id_index = GraphIndexPrefixAdapter(
1303
1238
            self.new_pack.text_index,
1356
1291
    :ivar _names: map of {pack_name: (index_size,)}
1357
1292
    """
1358
1293
 
1359
 
    pack_factory = NewPack
1360
 
    resumed_pack_factory = ResumedPack
1361
 
 
1362
1294
    def __init__(self, repo, transport, index_transport, upload_transport,
1363
 
                 pack_transport, index_builder_class, index_class,
1364
 
                 use_chk_index):
 
1295
                 pack_transport, index_builder_class, index_class):
1365
1296
        """Create a new RepositoryPackCollection.
1366
1297
 
1367
1298
        :param transport: Addresses the repository base directory
1372
1303
        :param pack_transport: Addresses the directory of existing complete packs.
1373
1304
        :param index_builder_class: The index builder class to use.
1374
1305
        :param index_class: The index class to use.
1375
 
        :param use_chk_index: Whether to setup and manage a CHK index.
1376
1306
        """
1377
1307
        # XXX: This should call self.reset()
1378
1308
        self.repo = repo
1382
1312
        self._pack_transport = pack_transport
1383
1313
        self._index_builder_class = index_builder_class
1384
1314
        self._index_class = index_class
1385
 
        self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1386
 
            '.cix': 4}
 
1315
        self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1387
1316
        self.packs = []
1388
1317
        # name:Pack mapping
1389
1318
        self._names = None
1393
1322
        # when a pack is being created by this object, the state of that pack.
1394
1323
        self._new_pack = None
1395
1324
        # aggregated revision index data
1396
 
        flush = self._flush_new_pack
1397
 
        self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1398
 
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1399
 
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1400
 
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1401
 
        all_indices = [self.revision_index, self.inventory_index,
1402
 
                self.text_index, self.signature_index]
1403
 
        if use_chk_index:
1404
 
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1405
 
            all_indices.append(self.chk_index)
1406
 
        else:
1407
 
            # used to determine if we're using a chk_index elsewhere.
1408
 
            self.chk_index = None
1409
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1410
 
        # share hints about which pack names to search first.
1411
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1412
 
        for combined_idx in all_combined:
1413
 
            combined_idx.set_sibling_indices(
1414
 
                set(all_combined).difference([combined_idx]))
 
1325
        self.revision_index = AggregateIndex(self.reload_pack_names)
 
1326
        self.inventory_index = AggregateIndex(self.reload_pack_names)
 
1327
        self.text_index = AggregateIndex(self.reload_pack_names)
 
1328
        self.signature_index = AggregateIndex(self.reload_pack_names)
1415
1329
        # resumed packs
1416
1330
        self._resumed_packs = []
1417
1331
 
1418
 
    def __repr__(self):
1419
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1420
 
 
1421
1332
    def add_pack_to_memory(self, pack):
1422
1333
        """Make a Pack object available to the repository to satisfy queries.
1423
1334
 
1432
1343
        self.inventory_index.add_index(pack.inventory_index, pack)
1433
1344
        self.text_index.add_index(pack.text_index, pack)
1434
1345
        self.signature_index.add_index(pack.signature_index, pack)
1435
 
        if self.chk_index is not None:
1436
 
            self.chk_index.add_index(pack.chk_index, pack)
1437
1346
 
1438
1347
    def all_packs(self):
1439
1348
        """Return a list of all the Pack objects this repository has.
1461
1370
        in synchronisation with certain steps. Otherwise the names collection
1462
1371
        is not flushed.
1463
1372
 
1464
 
        :return: Something evaluating true if packing took place.
 
1373
        :return: True if packing took place.
1465
1374
        """
1466
1375
        while True:
1467
1376
            try:
1468
1377
                return self._do_autopack()
1469
 
            except errors.RetryAutopack:
 
1378
            except errors.RetryAutopack, e:
1470
1379
                # If we get a RetryAutopack exception, we should abort the
1471
1380
                # current action, and retry.
1472
1381
                pass
1476
1385
        total_revisions = self.revision_index.combined_index.key_count()
1477
1386
        total_packs = len(self._names)
1478
1387
        if self._max_pack_count(total_revisions) >= total_packs:
1479
 
            return None
 
1388
            return False
 
1389
        # XXX: the following may want to be a class, to pack with a given
 
1390
        # policy.
1480
1391
        # determine which packs need changing
1481
1392
        pack_distribution = self.pack_distribution(total_revisions)
1482
1393
        existing_packs = []
1504
1415
            'containing %d revisions. Packing %d files into %d affecting %d'
1505
1416
            ' revisions', self, total_packs, total_revisions, num_old_packs,
1506
1417
            num_new_packs, num_revs_affected)
1507
 
        result = self._execute_pack_operations(pack_operations,
 
1418
        self._execute_pack_operations(pack_operations,
1508
1419
                                      reload_func=self._restart_autopack)
1509
 
        mutter('Auto-packing repository %s completed', self)
1510
 
        return result
 
1420
        return True
1511
1421
 
1512
1422
    def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1513
1423
                                 reload_func=None):
1515
1425
 
1516
1426
        :param pack_operations: A list of [revision_count, packs_to_combine].
1517
1427
        :param _packer_class: The class of packer to use (default: Packer).
1518
 
        :return: The new pack names.
 
1428
        :return: None.
1519
1429
        """
1520
1430
        for revision_count, packs in pack_operations:
1521
1431
            # we may have no-ops from the setup logic
1537
1447
                self._remove_pack_from_memory(pack)
1538
1448
        # record the newly available packs and stop advertising the old
1539
1449
        # packs
1540
 
        to_be_obsoleted = []
1541
 
        for _, packs in pack_operations:
1542
 
            to_be_obsoleted.extend(packs)
1543
 
        result = self._save_pack_names(clear_obsolete_packs=True,
1544
 
                                       obsolete_packs=to_be_obsoleted)
1545
 
        return result
1546
 
 
1547
 
    def _flush_new_pack(self):
1548
 
        if self._new_pack is not None:
1549
 
            self._new_pack.flush()
 
1450
        self._save_pack_names(clear_obsolete_packs=True)
 
1451
        # Move the old packs out of the way now they are no longer referenced.
 
1452
        for revision_count, packs in pack_operations:
 
1453
            self._obsolete_packs(packs)
1550
1454
 
1551
1455
    def lock_names(self):
1552
1456
        """Acquire the mutex around the pack-names index.
1556
1460
        """
1557
1461
        self.repo.control_files.lock_write()
1558
1462
 
1559
 
    def _already_packed(self):
1560
 
        """Is the collection already packed?"""
1561
 
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1562
 
 
1563
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1463
    def pack(self):
1564
1464
        """Pack the pack collection totally."""
1565
1465
        self.ensure_loaded()
1566
1466
        total_packs = len(self._names)
1567
 
        if self._already_packed():
 
1467
        if total_packs < 2:
 
1468
            # This is arguably wrong because we might not be optimal, but for
 
1469
            # now lets leave it in. (e.g. reconcile -> one pack. But not
 
1470
            # optimal.
1568
1471
            return
1569
1472
        total_revisions = self.revision_index.combined_index.key_count()
1570
1473
        # XXX: the following may want to be a class, to pack with a given
1571
1474
        # policy.
1572
1475
        mutter('Packing repository %s, which has %d pack files, '
1573
 
            'containing %d revisions with hint %r.', self, total_packs,
1574
 
            total_revisions, hint)
 
1476
            'containing %d revisions into 1 packs.', self, total_packs,
 
1477
            total_revisions)
1575
1478
        # determine which packs need changing
 
1479
        pack_distribution = [1]
1576
1480
        pack_operations = [[0, []]]
1577
1481
        for pack in self.all_packs():
1578
 
            if hint is None or pack.name in hint:
1579
 
                # Either no hint was provided (so we are packing everything),
1580
 
                # or this pack was included in the hint.
1581
 
                pack_operations[-1][0] += pack.get_revision_count()
1582
 
                pack_operations[-1][1].append(pack)
 
1482
            pack_operations[-1][0] += pack.get_revision_count()
 
1483
            pack_operations[-1][1].append(pack)
1583
1484
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1584
1485
 
1585
 
        if clean_obsolete_packs:
1586
 
            self._clear_obsolete_packs()
1587
 
 
1588
1486
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1487
        """Plan a pack operation.
1590
1488
 
1677
1575
            inv_index = self._make_index(name, '.iix')
1678
1576
            txt_index = self._make_index(name, '.tix')
1679
1577
            sig_index = self._make_index(name, '.six')
1680
 
            if self.chk_index is not None:
1681
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1682
 
            else:
1683
 
                chk_index = None
1684
1578
            result = ExistingPack(self._pack_transport, name, rev_index,
1685
 
                inv_index, txt_index, sig_index, chk_index)
 
1579
                inv_index, txt_index, sig_index)
1686
1580
            self.add_pack_to_memory(result)
1687
1581
            return result
1688
1582
 
1702
1596
            inv_index = self._make_index(name, '.iix', resume=True)
1703
1597
            txt_index = self._make_index(name, '.tix', resume=True)
1704
1598
            sig_index = self._make_index(name, '.six', resume=True)
1705
 
            if self.chk_index is not None:
1706
 
                chk_index = self._make_index(name, '.cix', resume=True,
1707
 
                                             unlimited_cache=True)
1708
 
            else:
1709
 
                chk_index = None
1710
 
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1711
 
                txt_index, sig_index, self._upload_transport,
1712
 
                self._pack_transport, self._index_transport, self,
1713
 
                chk_index=chk_index)
 
1599
            result = ResumedPack(name, rev_index, inv_index, txt_index,
 
1600
                sig_index, self._upload_transport, self._pack_transport,
 
1601
                self._index_transport, self)
1714
1602
        except errors.NoSuchFile, e:
1715
1603
            raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1716
1604
        self.add_pack_to_memory(result)
1740
1628
        return self._index_class(self.transport, 'pack-names', None
1741
1629
                ).iter_all_entries()
1742
1630
 
1743
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1631
    def _make_index(self, name, suffix, resume=False):
1744
1632
        size_offset = self._suffix_offsets[suffix]
1745
1633
        index_name = name + suffix
1746
1634
        if resume:
1749
1637
        else:
1750
1638
            transport = self._index_transport
1751
1639
            index_size = self._names[name][size_offset]
1752
 
        return self._index_class(transport, index_name, index_size,
1753
 
                                 unlimited_cache=unlimited_cache)
 
1640
        return self._index_class(transport, index_name, index_size)
1754
1641
 
1755
1642
    def _max_pack_count(self, total_revisions):
1756
1643
        """Return the maximum number of packs to use for total revisions.
1784
1671
        :param return: None.
1785
1672
        """
1786
1673
        for pack in packs:
1787
 
            try:
1788
 
                pack.pack_transport.rename(pack.file_name(),
1789
 
                    '../obsolete_packs/' + pack.file_name())
1790
 
            except (errors.PathError, errors.TransportError), e:
1791
 
                # TODO: Should these be warnings or mutters?
1792
 
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
1793
 
                       % (e,))
 
1674
            pack.pack_transport.rename(pack.file_name(),
 
1675
                '../obsolete_packs/' + pack.file_name())
1794
1676
            # TODO: Probably needs to know all possible indices for this pack
1795
1677
            # - or maybe list the directory and move all indices matching this
1796
1678
            # name whether we recognize it or not?
1797
 
            suffixes = ['.iix', '.six', '.tix', '.rix']
1798
 
            if self.chk_index is not None:
1799
 
                suffixes.append('.cix')
1800
 
            for suffix in suffixes:
1801
 
                try:
1802
 
                    self._index_transport.rename(pack.name + suffix,
1803
 
                        '../obsolete_packs/' + pack.name + suffix)
1804
 
                except (errors.PathError, errors.TransportError), e:
1805
 
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
1806
 
                           % (e,))
 
1679
            for suffix in ('.iix', '.six', '.tix', '.rix'):
 
1680
                self._index_transport.rename(pack.name + suffix,
 
1681
                    '../obsolete_packs/' + pack.name + suffix)
1807
1682
 
1808
1683
    def pack_distribution(self, total_revisions):
1809
1684
        """Generate a list of the number of revisions to put in each pack.
1835
1710
        self._remove_pack_indices(pack)
1836
1711
        self.packs.remove(pack)
1837
1712
 
1838
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1839
 
        """Remove the indices for pack from the aggregated indices.
1840
 
        
1841
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1842
 
        """
1843
 
        for index_type in Pack.index_definitions.keys():
1844
 
            attr_name = index_type + '_index'
1845
 
            aggregate_index = getattr(self, attr_name)
1846
 
            if aggregate_index is not None:
1847
 
                pack_index = getattr(pack, attr_name)
1848
 
                try:
1849
 
                    aggregate_index.remove_index(pack_index)
1850
 
                except KeyError:
1851
 
                    if ignore_missing:
1852
 
                        continue
1853
 
                    raise
 
1713
    def _remove_pack_indices(self, pack):
 
1714
        """Remove the indices for pack from the aggregated indices."""
 
1715
        self.revision_index.remove_index(pack.revision_index, pack)
 
1716
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1717
        self.text_index.remove_index(pack.text_index, pack)
 
1718
        self.signature_index.remove_index(pack.signature_index, pack)
1854
1719
 
1855
1720
    def reset(self):
1856
1721
        """Clear all cached data."""
1857
1722
        # cached revision data
 
1723
        self.repo._revision_knit = None
1858
1724
        self.revision_index.clear()
1859
1725
        # cached signature data
 
1726
        self.repo._signature_knit = None
1860
1727
        self.signature_index.clear()
1861
1728
        # cached file text data
1862
1729
        self.text_index.clear()
 
1730
        self.repo._text_knit = None
1863
1731
        # cached inventory data
1864
1732
        self.inventory_index.clear()
1865
 
        # cached chk data
1866
 
        if self.chk_index is not None:
1867
 
            self.chk_index.clear()
1868
1733
        # remove the open pack
1869
1734
        self._new_pack = None
1870
1735
        # information about packs.
1889
1754
        disk_nodes = set()
1890
1755
        for index, key, value in self._iter_disk_pack_index():
1891
1756
            disk_nodes.add((key, value))
1892
 
        orig_disk_nodes = set(disk_nodes)
1893
1757
 
1894
1758
        # do a two-way diff against our original content
1895
1759
        current_nodes = set()
1908
1772
        disk_nodes.difference_update(deleted_nodes)
1909
1773
        disk_nodes.update(new_nodes)
1910
1774
 
1911
 
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
 
1775
        return disk_nodes, deleted_nodes, new_nodes
1912
1776
 
1913
1777
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1914
1778
        """Given the correct set of pack files, update our saved info.
1954
1818
                added.append(name)
1955
1819
        return removed, added, modified
1956
1820
 
1957
 
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
 
1821
    def _save_pack_names(self, clear_obsolete_packs=False):
1958
1822
        """Save the list of packs.
1959
1823
 
1960
1824
        This will take out the mutex around the pack names list for the
1964
1828
 
1965
1829
        :param clear_obsolete_packs: If True, clear out the contents of the
1966
1830
            obsolete_packs directory.
1967
 
        :param obsolete_packs: Packs that are obsolete once the new pack-names
1968
 
            file has been written.
1969
 
        :return: A list of the names saved that were not previously on disk.
1970
1831
        """
1971
 
        already_obsolete = []
1972
1832
        self.lock_names()
1973
1833
        try:
1974
1834
            builder = self._index_builder_class()
1975
 
            (disk_nodes, deleted_nodes, new_nodes,
1976
 
             orig_disk_nodes) = self._diff_pack_names()
 
1835
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1977
1836
            # TODO: handle same-name, index-size-changes here -
1978
1837
            # e.g. use the value from disk, not ours, *unless* we're the one
1979
1838
            # changing it.
1981
1840
                builder.add_node(key, value)
1982
1841
            self.transport.put_file('pack-names', builder.finish(),
1983
1842
                mode=self.repo.bzrdir._get_file_mode())
 
1843
            # move the baseline forward
1984
1844
            self._packs_at_load = disk_nodes
1985
1845
            if clear_obsolete_packs:
1986
 
                to_preserve = None
1987
 
                if obsolete_packs:
1988
 
                    to_preserve = set([o.name for o in obsolete_packs])
1989
 
                already_obsolete = self._clear_obsolete_packs(to_preserve)
 
1846
                self._clear_obsolete_packs()
1990
1847
        finally:
1991
1848
            self._unlock_names()
1992
1849
        # synchronise the memory packs list with what we just wrote:
1993
1850
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1994
 
        if obsolete_packs:
1995
 
            # TODO: We could add one more condition here. "if o.name not in
1996
 
            #       orig_disk_nodes and o != the new_pack we haven't written to
1997
 
            #       disk yet. However, the new pack object is not easily
1998
 
            #       accessible here (it would have to be passed through the
1999
 
            #       autopacking code, etc.)
2000
 
            obsolete_packs = [o for o in obsolete_packs
2001
 
                              if o.name not in already_obsolete]
2002
 
            self._obsolete_packs(obsolete_packs)
2003
 
        return [new_node[0][0] for new_node in new_nodes]
2004
1851
 
2005
1852
    def reload_pack_names(self):
2006
1853
        """Sync our pack listing with what is present in the repository.
2020
1867
        if first_read:
2021
1868
            return True
2022
1869
        # out the new value.
2023
 
        (disk_nodes, deleted_nodes, new_nodes,
2024
 
         orig_disk_nodes) = self._diff_pack_names()
2025
 
        # _packs_at_load is meant to be the explicit list of names in
2026
 
        # 'pack-names' at then start. As such, it should not contain any
2027
 
        # pending names that haven't been written out yet.
2028
 
        self._packs_at_load = orig_disk_nodes
 
1870
        disk_nodes, _, _ = self._diff_pack_names()
 
1871
        self._packs_at_load = disk_nodes
2029
1872
        (removed, added,
2030
1873
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2031
1874
        if removed or added or modified:
2040
1883
            raise
2041
1884
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2042
1885
 
2043
 
    def _clear_obsolete_packs(self, preserve=None):
 
1886
    def _clear_obsolete_packs(self):
2044
1887
        """Delete everything from the obsolete-packs directory.
2045
 
 
2046
 
        :return: A list of pack identifiers (the filename without '.pack') that
2047
 
            were found in obsolete_packs.
2048
1888
        """
2049
 
        found = []
2050
1889
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
2051
 
        if preserve is None:
2052
 
            preserve = set()
2053
1890
        for filename in obsolete_pack_transport.list_dir('.'):
2054
 
            name, ext = osutils.splitext(filename)
2055
 
            if ext == '.pack':
2056
 
                found.append(name)
2057
 
            if name in preserve:
2058
 
                continue
2059
1891
            try:
2060
1892
                obsolete_pack_transport.delete(filename)
2061
1893
            except (errors.PathError, errors.TransportError), e:
2062
 
                warning("couldn't delete obsolete pack, skipping it:\n%s"
2063
 
                        % (e,))
2064
 
        return found
 
1894
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2065
1895
 
2066
1896
    def _start_write_group(self):
2067
1897
        # Do not permit preparation for writing if we're not in a 'write lock'.
2068
1898
        if not self.repo.is_write_locked():
2069
1899
            raise errors.NotWriteLocked(self)
2070
 
        self._new_pack = self.pack_factory(self, upload_suffix='.pack',
 
1900
        self._new_pack = NewPack(self, upload_suffix='.pack',
2071
1901
            file_mode=self.repo.bzrdir._get_file_mode())
2072
1902
        # allow writing: queue writes to a new index
2073
1903
        self.revision_index.add_writable_index(self._new_pack.revision_index,
2076
1906
            self._new_pack)
2077
1907
        self.text_index.add_writable_index(self._new_pack.text_index,
2078
1908
            self._new_pack)
2079
 
        self._new_pack.text_index.set_optimize(combine_backing_indices=False)
2080
1909
        self.signature_index.add_writable_index(self._new_pack.signature_index,
2081
1910
            self._new_pack)
2082
 
        if self.chk_index is not None:
2083
 
            self.chk_index.add_writable_index(self._new_pack.chk_index,
2084
 
                self._new_pack)
2085
 
            self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
2086
 
            self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
2087
1911
 
2088
1912
        self.repo.inventories._index._add_callback = self.inventory_index.add_callback
2089
1913
        self.repo.revisions._index._add_callback = self.revision_index.add_callback
2094
1918
        # FIXME: just drop the transient index.
2095
1919
        # forget what names there are
2096
1920
        if self._new_pack is not None:
2097
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2098
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2099
 
            # If we aborted while in the middle of finishing the write
2100
 
            # group, _remove_pack_indices could fail because the indexes are
2101
 
            # already gone.  But they're not there we shouldn't fail in this
2102
 
            # case, so we pass ignore_missing=True.
2103
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2104
 
                ignore_missing=True)
2105
 
            operation.run_simple()
 
1921
            try:
 
1922
                self._new_pack.abort()
 
1923
            finally:
 
1924
                # XXX: If we aborted while in the middle of finishing the write
 
1925
                # group, _remove_pack_indices can fail because the indexes are
 
1926
                # already gone.  If they're not there we shouldn't fail in this
 
1927
                # case.  -- mbp 20081113
 
1928
                self._remove_pack_indices(self._new_pack)
 
1929
                self._new_pack = None
2106
1930
        for resumed_pack in self._resumed_packs:
2107
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2108
 
            # See comment in previous finally block.
2109
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2110
 
                ignore_missing=True)
2111
 
            operation.run_simple()
 
1931
            try:
 
1932
                resumed_pack.abort()
 
1933
            finally:
 
1934
                # See comment in previous finally block.
 
1935
                try:
 
1936
                    self._remove_pack_indices(resumed_pack)
 
1937
                except KeyError:
 
1938
                    pass
2112
1939
        del self._resumed_packs[:]
 
1940
        self.repo._text_knit = None
2113
1941
 
2114
1942
    def _remove_resumed_pack_indices(self):
2115
1943
        for resumed_pack in self._resumed_packs:
2116
1944
            self._remove_pack_indices(resumed_pack)
2117
1945
        del self._resumed_packs[:]
2118
1946
 
2119
 
    def _check_new_inventories(self):
2120
 
        """Detect missing inventories in this write group.
2121
 
 
2122
 
        :returns: list of strs, summarising any problems found.  If the list is
2123
 
            empty no problems were found.
2124
 
        """
2125
 
        # The base implementation does no checks.  GCRepositoryPackCollection
2126
 
        # overrides this.
2127
 
        return []
2128
 
        
2129
1947
    def _commit_write_group(self):
2130
1948
        all_missing = set()
2131
1949
        for prefix, versioned_file in (
2140
1958
            raise errors.BzrCheckError(
2141
1959
                "Repository %s has missing compression parent(s) %r "
2142
1960
                 % (self.repo, sorted(all_missing)))
2143
 
        problems = self._check_new_inventories()
2144
 
        if problems:
2145
 
            problems_summary = '\n'.join(problems)
2146
 
            raise errors.BzrCheckError(
2147
 
                "Cannot add revision(s) to repository: " + problems_summary)
2148
1961
        self._remove_pack_indices(self._new_pack)
2149
 
        any_new_content = False
 
1962
        should_autopack = False
2150
1963
        if self._new_pack.data_inserted():
2151
1964
            # get all the data to disk and read to use
2152
1965
            self._new_pack.finish()
2153
1966
            self.allocate(self._new_pack)
2154
1967
            self._new_pack = None
2155
 
            any_new_content = True
 
1968
            should_autopack = True
2156
1969
        else:
2157
1970
            self._new_pack.abort()
2158
1971
            self._new_pack = None
2163
1976
            self._remove_pack_from_memory(resumed_pack)
2164
1977
            resumed_pack.finish()
2165
1978
            self.allocate(resumed_pack)
2166
 
            any_new_content = True
 
1979
            should_autopack = True
2167
1980
        del self._resumed_packs[:]
2168
 
        if any_new_content:
2169
 
            result = self.autopack()
2170
 
            if not result:
 
1981
        if should_autopack:
 
1982
            if not self.autopack():
2171
1983
                # when autopack takes no steps, the names list is still
2172
1984
                # unsaved.
2173
 
                return self._save_pack_names()
2174
 
            return result
2175
 
        return []
 
1985
                self._save_pack_names()
 
1986
        self.repo._text_knit = None
2176
1987
 
2177
1988
    def _suspend_write_group(self):
2178
1989
        tokens = [pack.name for pack in self._resumed_packs]
2186
1997
            self._new_pack.abort()
2187
1998
            self._new_pack = None
2188
1999
        self._remove_resumed_pack_indices()
 
2000
        self.repo._text_knit = None
2189
2001
        return tokens
2190
2002
 
2191
2003
    def _resume_write_group(self, tokens):
2228
2040
            self._transport.clone('upload'),
2229
2041
            self._transport.clone('packs'),
2230
2042
            _format.index_builder_class,
2231
 
            _format.index_class,
2232
 
            use_chk_index=self._format.supports_chks,
2233
 
            )
 
2043
            _format.index_class)
2234
2044
        self.inventories = KnitVersionedFiles(
2235
2045
            _KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
2236
2046
                add_callback=self._pack_collection.inventory_index.add_callback,
2240
2050
        self.revisions = KnitVersionedFiles(
2241
2051
            _KnitGraphIndex(self._pack_collection.revision_index.combined_index,
2242
2052
                add_callback=self._pack_collection.revision_index.add_callback,
2243
 
                deltas=False, parents=True, is_locked=self.is_locked,
2244
 
                track_external_parent_refs=True),
 
2053
                deltas=False, parents=True, is_locked=self.is_locked),
2245
2054
            data_access=self._pack_collection.revision_index.data_access,
2246
2055
            max_delta_chain=0)
2247
2056
        self.signatures = KnitVersionedFiles(
2256
2065
                deltas=True, parents=True, is_locked=self.is_locked),
2257
2066
            data_access=self._pack_collection.text_index.data_access,
2258
2067
            max_delta_chain=200)
2259
 
        if _format.supports_chks:
2260
 
            # No graph, no compression:- references from chks are between
2261
 
            # different objects not temporal versions of the same; and without
2262
 
            # some sort of temporal structure knit compression will just fail.
2263
 
            self.chk_bytes = KnitVersionedFiles(
2264
 
                _KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2265
 
                    add_callback=self._pack_collection.chk_index.add_callback,
2266
 
                    deltas=False, parents=False, is_locked=self.is_locked),
2267
 
                data_access=self._pack_collection.chk_index.data_access,
2268
 
                max_delta_chain=0)
2269
 
        else:
2270
 
            self.chk_bytes = None
2271
2068
        # True when the repository object is 'write locked' (as opposed to the
2272
2069
        # physical lock only taken out around changes to the pack-names list.)
2273
2070
        # Another way to represent this would be a decorator around the control
2280
2077
        self._reconcile_fixes_text_parents = True
2281
2078
        self._reconcile_backsup_inventory = False
2282
2079
 
2283
 
    def _warn_if_deprecated(self, branch=None):
 
2080
    def _warn_if_deprecated(self):
2284
2081
        # This class isn't deprecated, but one sub-format is
2285
2082
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2286
 
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
 
2083
            from bzrlib import repository
 
2084
            if repository._deprecation_warning_done:
 
2085
                return
 
2086
            repository._deprecation_warning_done = True
 
2087
            warning("Format %s for %s is deprecated - please use"
 
2088
                    " 'bzr upgrade --1.6.1-rich-root'"
 
2089
                    % (self._format, self.bzrdir.transport.base))
2287
2090
 
2288
2091
    def _abort_write_group(self):
2289
 
        self.revisions._index._key_dependencies.clear()
2290
2092
        self._pack_collection._abort_write_group()
2291
2093
 
2292
 
    def _get_source(self, to_format):
2293
 
        if to_format.network_name() == self._format.network_name():
2294
 
            return KnitPackStreamSource(self, to_format)
2295
 
        return super(KnitPackRepository, self)._get_source(to_format)
 
2094
    def _find_inconsistent_revision_parents(self):
 
2095
        """Find revisions with incorrectly cached parents.
 
2096
 
 
2097
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
2098
            parents-in-revision).
 
2099
        """
 
2100
        if not self.is_locked():
 
2101
            raise errors.ObjectNotLocked(self)
 
2102
        pb = ui.ui_factory.nested_progress_bar()
 
2103
        result = []
 
2104
        try:
 
2105
            revision_nodes = self._pack_collection.revision_index \
 
2106
                .combined_index.iter_all_entries()
 
2107
            index_positions = []
 
2108
            # Get the cached index values for all revisions, and also the location
 
2109
            # in each index of the revision text so we can perform linear IO.
 
2110
            for index, key, value, refs in revision_nodes:
 
2111
                pos, length = value[1:].split(' ')
 
2112
                index_positions.append((index, int(pos), key[0],
 
2113
                    tuple(parent[0] for parent in refs[0])))
 
2114
                pb.update("Reading revision index", 0, 0)
 
2115
            index_positions.sort()
 
2116
            batch_count = len(index_positions) / 1000 + 1
 
2117
            pb.update("Checking cached revision graph", 0, batch_count)
 
2118
            for offset in xrange(batch_count):
 
2119
                pb.update("Checking cached revision graph", offset)
 
2120
                to_query = index_positions[offset * 1000:(offset + 1) * 1000]
 
2121
                if not to_query:
 
2122
                    break
 
2123
                rev_ids = [item[2] for item in to_query]
 
2124
                revs = self.get_revisions(rev_ids)
 
2125
                for revision, item in zip(revs, to_query):
 
2126
                    index_parents = item[3]
 
2127
                    rev_parents = tuple(revision.parent_ids)
 
2128
                    if index_parents != rev_parents:
 
2129
                        result.append((revision.revision_id, index_parents, rev_parents))
 
2130
        finally:
 
2131
            pb.finished()
 
2132
        return result
2296
2133
 
2297
2134
    def _make_parents_provider(self):
2298
2135
        return graph.CachingParentsProvider(self)
2306
2143
        self._pack_collection._start_write_group()
2307
2144
 
2308
2145
    def _commit_write_group(self):
2309
 
        hint = self._pack_collection._commit_write_group()
2310
 
        self.revisions._index._key_dependencies.clear()
2311
 
        return hint
 
2146
        return self._pack_collection._commit_write_group()
2312
2147
 
2313
2148
    def suspend_write_group(self):
2314
2149
        # XXX check self._write_group is self.get_transaction()?
2315
2150
        tokens = self._pack_collection._suspend_write_group()
2316
 
        self.revisions._index._key_dependencies.clear()
2317
2151
        self._write_group = None
2318
2152
        return tokens
2319
2153
 
2320
2154
    def _resume_write_group(self, tokens):
2321
2155
        self._start_write_group()
2322
 
        try:
2323
 
            self._pack_collection._resume_write_group(tokens)
2324
 
        except errors.UnresumableWriteGroup:
2325
 
            self._abort_write_group()
2326
 
            raise
2327
 
        for pack in self._pack_collection._resumed_packs:
2328
 
            self.revisions._index.scan_unvalidated_index(pack.revision_index)
 
2156
        self._pack_collection._resume_write_group(tokens)
2329
2157
 
2330
2158
    def get_transaction(self):
2331
2159
        if self._write_lock_count:
2346
2174
        self._write_lock_count += 1
2347
2175
        if self._write_lock_count == 1:
2348
2176
            self._transaction = transactions.WriteTransaction()
2349
 
        if not locked:
2350
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2351
 
                note('%r was write locked again', self)
2352
 
            self._prev_lock = 'w'
2353
2177
            for repo in self._fallback_repositories:
2354
2178
                # Writes don't affect fallback repos
2355
2179
                repo.lock_read()
 
2180
        if not locked:
2356
2181
            self._refresh_data()
2357
2182
 
2358
2183
    def lock_read(self):
2361
2186
            self._write_lock_count += 1
2362
2187
        else:
2363
2188
            self.control_files.lock_read()
2364
 
        if not locked:
2365
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2366
 
                note('%r was read locked again', self)
2367
 
            self._prev_lock = 'r'
2368
2189
            for repo in self._fallback_repositories:
 
2190
                # Writes don't affect fallback repos
2369
2191
                repo.lock_read()
 
2192
        if not locked:
2370
2193
            self._refresh_data()
2371
2194
 
2372
2195
    def leave_lock_in_place(self):
2378
2201
        raise NotImplementedError(self.dont_leave_lock_in_place)
2379
2202
 
2380
2203
    @needs_write_lock
2381
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2204
    def pack(self):
2382
2205
        """Compress the data within the repository.
2383
2206
 
2384
2207
        This will pack all the data to a single pack. In future it may
2385
2208
        recompress deltas or do other such expensive operations.
2386
2209
        """
2387
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2210
        self._pack_collection.pack()
2388
2211
 
2389
2212
    @needs_write_lock
2390
2213
    def reconcile(self, other=None, thorough=False):
2394
2217
        reconciler.reconcile()
2395
2218
        return reconciler
2396
2219
 
2397
 
    def _reconcile_pack(self, collection, packs, extension, revs, pb):
2398
 
        packer = ReconcilePacker(collection, packs, extension, revs)
2399
 
        return packer.pack(pb)
2400
 
 
2401
 
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2402
2220
    def unlock(self):
2403
2221
        if self._write_lock_count == 1 and self._write_group is not None:
2404
2222
            self.abort_write_group()
2413
2231
                transaction = self._transaction
2414
2232
                self._transaction = None
2415
2233
                transaction.finish()
 
2234
                for repo in self._fallback_repositories:
 
2235
                    repo.unlock()
2416
2236
        else:
2417
2237
            self.control_files.unlock()
2418
 
 
2419
 
        if not self.is_locked():
2420
2238
            for repo in self._fallback_repositories:
2421
2239
                repo.unlock()
2422
2240
 
2423
2241
 
2424
 
class KnitPackStreamSource(StreamSource):
2425
 
    """A StreamSource used to transfer data between same-format KnitPack repos.
2426
 
 
2427
 
    This source assumes:
2428
 
        1) Same serialization format for all objects
2429
 
        2) Same root information
2430
 
        3) XML format inventories
2431
 
        4) Atomic inserts (so we can stream inventory texts before text
2432
 
           content)
2433
 
        5) No chk_bytes
2434
 
    """
2435
 
 
2436
 
    def __init__(self, from_repository, to_format):
2437
 
        super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2438
 
        self._text_keys = None
2439
 
        self._text_fetch_order = 'unordered'
2440
 
 
2441
 
    def _get_filtered_inv_stream(self, revision_ids):
2442
 
        from_repo = self.from_repository
2443
 
        parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2444
 
        parent_keys = [(p,) for p in parent_ids]
2445
 
        find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2446
 
        parent_text_keys = set(find_text_keys(
2447
 
            from_repo._inventory_xml_lines_for_keys(parent_keys)))
2448
 
        content_text_keys = set()
2449
 
        knit = KnitVersionedFiles(None, None)
2450
 
        factory = KnitPlainFactory()
2451
 
        def find_text_keys_from_content(record):
2452
 
            if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2453
 
                raise ValueError("Unknown content storage kind for"
2454
 
                    " inventory text: %s" % (record.storage_kind,))
2455
 
            # It's a knit record, it has a _raw_record field (even if it was
2456
 
            # reconstituted from a network stream).
2457
 
            raw_data = record._raw_record
2458
 
            # read the entire thing
2459
 
            revision_id = record.key[-1]
2460
 
            content, _ = knit._parse_record(revision_id, raw_data)
2461
 
            if record.storage_kind == 'knit-delta-gz':
2462
 
                line_iterator = factory.get_linedelta_content(content)
2463
 
            elif record.storage_kind == 'knit-ft-gz':
2464
 
                line_iterator = factory.get_fulltext_content(content)
2465
 
            content_text_keys.update(find_text_keys(
2466
 
                [(line, revision_id) for line in line_iterator]))
2467
 
        revision_keys = [(r,) for r in revision_ids]
2468
 
        def _filtered_inv_stream():
2469
 
            source_vf = from_repo.inventories
2470
 
            stream = source_vf.get_record_stream(revision_keys,
2471
 
                                                 'unordered', False)
2472
 
            for record in stream:
2473
 
                if record.storage_kind == 'absent':
2474
 
                    raise errors.NoSuchRevision(from_repo, record.key)
2475
 
                find_text_keys_from_content(record)
2476
 
                yield record
2477
 
            self._text_keys = content_text_keys - parent_text_keys
2478
 
        return ('inventories', _filtered_inv_stream())
2479
 
 
2480
 
    def _get_text_stream(self):
2481
 
        # Note: We know we don't have to handle adding root keys, because both
2482
 
        # the source and target are the identical network name.
2483
 
        text_stream = self.from_repository.texts.get_record_stream(
2484
 
                        self._text_keys, self._text_fetch_order, False)
2485
 
        return ('texts', text_stream)
2486
 
 
2487
 
    def get_stream(self, search):
2488
 
        revision_ids = search.get_keys()
2489
 
        for stream_info in self._fetch_revision_texts(revision_ids):
2490
 
            yield stream_info
2491
 
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2492
 
        yield self._get_filtered_inv_stream(revision_ids)
2493
 
        yield self._get_text_stream()
2494
 
 
2495
 
 
2496
 
 
2497
2242
class RepositoryFormatPack(MetaDirRepositoryFormat):
2498
2243
    """Format logic for pack structured repositories.
2499
2244
 
2523
2268
    supports_ghosts = True
2524
2269
    # External references are not supported in pack repositories yet.
2525
2270
    supports_external_lookups = False
2526
 
    # Most pack formats do not use chk lookups.
2527
 
    supports_chks = False
2528
2271
    # What index classes to use
2529
2272
    index_builder_class = None
2530
2273
    index_class = None
2546
2289
        utf8_files = [('format', self.get_format_string())]
2547
2290
 
2548
2291
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2549
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2550
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2551
 
        return repository
 
2292
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2552
2293
 
2553
2294
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2554
2295
        """See RepositoryFormat.open().
2603
2344
        """See RepositoryFormat.get_format_description()."""
2604
2345
        return "Packs containing knits without subtree support"
2605
2346
 
 
2347
    def check_conversion_target(self, target_format):
 
2348
        pass
 
2349
 
2606
2350
 
2607
2351
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2608
2352
    """A subtrees parameterized Pack repository.
2617
2361
    repository_class = KnitPackRepository
2618
2362
    _commit_builder_class = PackRootCommitBuilder
2619
2363
    rich_root_data = True
2620
 
    experimental = True
2621
2364
    supports_tree_reference = True
2622
2365
    @property
2623
2366
    def _serializer(self):
2635
2378
 
2636
2379
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2637
2380
 
 
2381
    def check_conversion_target(self, target_format):
 
2382
        if not target_format.rich_root_data:
 
2383
            raise errors.BadConversionTarget(
 
2384
                'Does not support rich root data.', target_format)
 
2385
        if not getattr(target_format, 'supports_tree_reference', False):
 
2386
            raise errors.BadConversionTarget(
 
2387
                'Does not support nested trees', target_format)
 
2388
 
2638
2389
    def get_format_string(self):
2639
2390
        """See RepositoryFormat.get_format_string()."""
2640
2391
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2673
2424
 
2674
2425
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2675
2426
 
 
2427
    def check_conversion_target(self, target_format):
 
2428
        if not target_format.rich_root_data:
 
2429
            raise errors.BadConversionTarget(
 
2430
                'Does not support rich root data.', target_format)
 
2431
 
2676
2432
    def get_format_string(self):
2677
2433
        """See RepositoryFormat.get_format_string()."""
2678
2434
        return ("Bazaar pack repository format 1 with rich root"
2719
2475
        """See RepositoryFormat.get_format_description()."""
2720
2476
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2721
2477
 
 
2478
    def check_conversion_target(self, target_format):
 
2479
        pass
 
2480
 
2722
2481
 
2723
2482
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2724
2483
    """A repository with rich roots and stacking.
2751
2510
 
2752
2511
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2753
2512
 
 
2513
    def check_conversion_target(self, target_format):
 
2514
        if not target_format.rich_root_data:
 
2515
            raise errors.BadConversionTarget(
 
2516
                'Does not support rich root data.', target_format)
 
2517
 
2754
2518
    def get_format_string(self):
2755
2519
        """See RepositoryFormat.get_format_string()."""
2756
2520
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2797
2561
 
2798
2562
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2799
2563
 
 
2564
    def check_conversion_target(self, target_format):
 
2565
        if not target_format.rich_root_data:
 
2566
            raise errors.BadConversionTarget(
 
2567
                'Does not support rich root data.', target_format)
 
2568
 
2800
2569
    def get_format_string(self):
2801
2570
        """See RepositoryFormat.get_format_string()."""
2802
2571
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2840
2609
        """See RepositoryFormat.get_format_description()."""
2841
2610
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2842
2611
 
 
2612
    def check_conversion_target(self, target_format):
 
2613
        pass
 
2614
 
2843
2615
 
2844
2616
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2845
2617
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2869
2641
 
2870
2642
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2871
2643
 
 
2644
    def check_conversion_target(self, target_format):
 
2645
        if not target_format.rich_root_data:
 
2646
            raise errors.BadConversionTarget(
 
2647
                'Does not support rich root data.', target_format)
 
2648
 
2872
2649
    def get_format_string(self):
2873
2650
        """See RepositoryFormat.get_format_string()."""
2874
2651
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2877
2654
        return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2878
2655
 
2879
2656
 
 
2657
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
 
2658
    """A no-subtrees development repository.
 
2659
 
 
2660
    This format should be retained until the second release after bzr 1.7.
 
2661
 
 
2662
    This is pack-1.6.1 with B+Tree indices.
 
2663
    """
 
2664
 
 
2665
    repository_class = KnitPackRepository
 
2666
    _commit_builder_class = PackCommitBuilder
 
2667
    supports_external_lookups = True
 
2668
    # What index classes to use
 
2669
    index_builder_class = BTreeBuilder
 
2670
    index_class = BTreeGraphIndex
 
2671
    # Set to true to get the fast-commit code path tested until a really fast
 
2672
    # format lands in trunk. Not actually fast in this format.
 
2673
    fast_deltas = True
 
2674
 
 
2675
    @property
 
2676
    def _serializer(self):
 
2677
        return xml5.serializer_v5
 
2678
 
 
2679
    def _get_matching_bzrdir(self):
 
2680
        return bzrdir.format_registry.make_bzrdir('development2')
 
2681
 
 
2682
    def _ignore_setting_bzrdir(self, format):
 
2683
        pass
 
2684
 
 
2685
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
 
2686
 
 
2687
    def get_format_string(self):
 
2688
        """See RepositoryFormat.get_format_string()."""
 
2689
        return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
 
2690
 
 
2691
    def get_format_description(self):
 
2692
        """See RepositoryFormat.get_format_description()."""
 
2693
        return ("Development repository format, currently the same as "
 
2694
            "1.6.1 with B+Trees.\n")
 
2695
 
 
2696
    def check_conversion_target(self, target_format):
 
2697
        pass
 
2698
 
 
2699
 
2880
2700
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2881
2701
    """A subtrees development repository.
2882
2702
 
2883
2703
    This format should be retained until the second release after bzr 1.7.
2884
2704
 
2885
2705
    1.6.1-subtree[as it might have been] with B+Tree indices.
2886
 
 
2887
 
    This is [now] retained until we have a CHK based subtree format in
2888
 
    development.
2889
2706
    """
2890
2707
 
2891
2708
    repository_class = KnitPackRepository
2892
2709
    _commit_builder_class = PackRootCommitBuilder
2893
2710
    rich_root_data = True
2894
 
    experimental = True
2895
2711
    supports_tree_reference = True
2896
2712
    supports_external_lookups = True
2897
2713
    # What index classes to use
2904
2720
 
2905
2721
    def _get_matching_bzrdir(self):
2906
2722
        return bzrdir.format_registry.make_bzrdir(
2907
 
            'development-subtree')
 
2723
            'development2-subtree')
2908
2724
 
2909
2725
    def _ignore_setting_bzrdir(self, format):
2910
2726
        pass
2911
2727
 
2912
2728
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2913
2729
 
 
2730
    def check_conversion_target(self, target_format):
 
2731
        if not target_format.rich_root_data:
 
2732
            raise errors.BadConversionTarget(
 
2733
                'Does not support rich root data.', target_format)
 
2734
        if not getattr(target_format, 'supports_tree_reference', False):
 
2735
            raise errors.BadConversionTarget(
 
2736
                'Does not support nested trees', target_format)
 
2737
 
2914
2738
    def get_format_string(self):
2915
2739
        """See RepositoryFormat.get_format_string()."""
2916
2740
        return ("Bazaar development format 2 with subtree support "
2920
2744
        """See RepositoryFormat.get_format_description()."""
2921
2745
        return ("Development repository format, currently the same as "
2922
2746
            "1.6.1-subtree with B+Tree indices.\n")
2923