/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Aaron Bentley
  • Date: 2009-03-30 20:59:42 UTC
  • mto: This revision was merged to the branch mainline in revision 4241.
  • Revision ID: aaron@aaronbentley.com-20090330205942-irvhmm42k6d4kfdd
Add tests of guess_renames output.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2007-2010 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
23
23
import time
24
24
 
25
25
from bzrlib import (
26
 
    chk_map,
27
 
    cleanup,
28
26
    debug,
29
27
    graph,
30
28
    osutils,
37
35
    )
38
36
from bzrlib.index import (
39
37
    CombinedGraphIndex,
 
38
    GraphIndex,
 
39
    GraphIndexBuilder,
40
40
    GraphIndexPrefixAdapter,
 
41
    InMemoryGraphIndex,
41
42
    )
42
43
from bzrlib.knit import (
43
44
    KnitPlainFactory,
52
53
    errors,
53
54
    lockable_files,
54
55
    lockdir,
55
 
    revision as _mod_revision,
 
56
    symbol_versioning,
56
57
    )
57
58
 
58
 
from bzrlib.decorators import needs_write_lock, only_raises
 
59
from bzrlib.decorators import needs_write_lock
59
60
from bzrlib.btree_index import (
60
61
    BTreeGraphIndex,
61
62
    BTreeBuilder,
69
70
    CommitBuilder,
70
71
    MetaDirRepositoryFormat,
71
72
    RepositoryFormat,
72
 
    RepositoryWriteLockResult,
73
73
    RootCommitBuilder,
74
 
    StreamSource,
75
74
    )
 
75
import bzrlib.revision as _mod_revision
76
76
from bzrlib.trace import (
77
77
    mutter,
78
 
    note,
79
78
    warning,
80
79
    )
81
80
 
132
131
    # A map of index 'type' to the file extension and position in the
133
132
    # index_sizes array.
134
133
    index_definitions = {
135
 
        'chk': ('.cix', 4),
136
134
        'revision': ('.rix', 0),
137
135
        'inventory': ('.iix', 1),
138
136
        'text': ('.tix', 2),
140
138
        }
141
139
 
142
140
    def __init__(self, revision_index, inventory_index, text_index,
143
 
        signature_index, chk_index=None):
 
141
        signature_index):
144
142
        """Create a pack instance.
145
143
 
146
144
        :param revision_index: A GraphIndex for determining what revisions are
153
151
            texts/deltas (via (fileid, revisionid) tuples).
154
152
        :param signature_index: A GraphIndex for determining what signatures are
155
153
            present in the Pack and accessing the locations of their texts.
156
 
        :param chk_index: A GraphIndex for accessing content by CHK, if the
157
 
            pack has one.
158
154
        """
159
155
        self.revision_index = revision_index
160
156
        self.inventory_index = inventory_index
161
157
        self.text_index = text_index
162
158
        self.signature_index = signature_index
163
 
        self.chk_index = chk_index
164
159
 
165
160
    def access_tuple(self):
166
161
        """Return a tuple (transport, name) for the pack content."""
227
222
        return self.index_name('text', name)
228
223
 
229
224
    def _replace_index_with_readonly(self, index_type):
230
 
        unlimited_cache = False
231
 
        if index_type == 'chk':
232
 
            unlimited_cache = True
233
225
        setattr(self, index_type + '_index',
234
226
            self.index_class(self.index_transport,
235
227
                self.index_name(index_type, self.name),
236
 
                self.index_sizes[self.index_offset(index_type)],
237
 
                unlimited_cache=unlimited_cache))
 
228
                self.index_sizes[self.index_offset(index_type)]))
238
229
 
239
230
 
240
231
class ExistingPack(Pack):
241
232
    """An in memory proxy for an existing .pack and its disk indices."""
242
233
 
243
234
    def __init__(self, pack_transport, name, revision_index, inventory_index,
244
 
        text_index, signature_index, chk_index=None):
 
235
        text_index, signature_index):
245
236
        """Create an ExistingPack object.
246
237
 
247
238
        :param pack_transport: The transport where the pack file resides.
248
239
        :param name: The name of the pack on disk in the pack_transport.
249
240
        """
250
241
        Pack.__init__(self, revision_index, inventory_index, text_index,
251
 
            signature_index, chk_index)
 
242
            signature_index)
252
243
        self.name = name
253
244
        self.pack_transport = pack_transport
254
245
        if None in (revision_index, inventory_index, text_index,
271
262
 
272
263
    def __init__(self, name, revision_index, inventory_index, text_index,
273
264
        signature_index, upload_transport, pack_transport, index_transport,
274
 
        pack_collection, chk_index=None):
 
265
        pack_collection):
275
266
        """Create a ResumedPack object."""
276
267
        ExistingPack.__init__(self, pack_transport, name, revision_index,
277
 
            inventory_index, text_index, signature_index,
278
 
            chk_index=chk_index)
 
268
            inventory_index, text_index, signature_index)
279
269
        self.upload_transport = upload_transport
280
270
        self.index_transport = index_transport
281
271
        self.index_sizes = [None, None, None, None]
285
275
            ('text', text_index),
286
276
            ('signature', signature_index),
287
277
            ]
288
 
        if chk_index is not None:
289
 
            indices.append(('chk', chk_index))
290
 
            self.index_sizes.append(None)
291
278
        for index_type, index in indices:
292
279
            offset = self.index_offset(index_type)
293
280
            self.index_sizes[offset] = index._size
308
295
        self.upload_transport.delete(self.file_name())
309
296
        indices = [self.revision_index, self.inventory_index, self.text_index,
310
297
            self.signature_index]
311
 
        if self.chk_index is not None:
312
 
            indices.append(self.chk_index)
313
298
        for index in indices:
314
299
            index._transport.delete(index._name)
315
300
 
316
301
    def finish(self):
317
302
        self._check_references()
318
 
        index_types = ['revision', 'inventory', 'text', 'signature']
319
 
        if self.chk_index is not None:
320
 
            index_types.append('chk')
321
 
        for index_type in index_types:
 
303
        new_name = '../packs/' + self.file_name()
 
304
        self.upload_transport.rename(self.file_name(), new_name)
 
305
        for index_type in ['revision', 'inventory', 'text', 'signature']:
322
306
            old_name = self.index_name(index_type, self.name)
323
307
            new_name = '../indices/' + old_name
324
308
            self.upload_transport.rename(old_name, new_name)
325
309
            self._replace_index_with_readonly(index_type)
326
 
        new_name = '../packs/' + self.file_name()
327
 
        self.upload_transport.rename(self.file_name(), new_name)
328
310
        self._state = 'finished'
329
311
 
330
312
    def _get_external_refs(self, index):
331
 
        """Return compression parents for this index that are not present.
332
 
 
333
 
        This returns any compression parents that are referenced by this index,
334
 
        which are not contained *in* this index. They may be present elsewhere.
335
 
        """
336
313
        return index.external_references(1)
337
314
 
338
315
 
350
327
        # The relative locations of the packs are constrained, but all are
351
328
        # passed in because the caller has them, so as to avoid object churn.
352
329
        index_builder_class = pack_collection._index_builder_class
353
 
        if pack_collection.chk_index is not None:
354
 
            chk_index = index_builder_class(reference_lists=0)
355
 
        else:
356
 
            chk_index = None
357
330
        Pack.__init__(self,
358
331
            # Revisions: parents list, no text compression.
359
332
            index_builder_class(reference_lists=1),
368
341
            # Signatures: Just blobs to store, no compression, no parents
369
342
            # listing.
370
343
            index_builder_class(reference_lists=0),
371
 
            # CHK based storage - just blobs, no compression or parents.
372
 
            chk_index=chk_index
373
344
            )
374
345
        self._pack_collection = pack_collection
375
346
        # When we make readonly indices, we need this.
384
355
        self._file_mode = file_mode
385
356
        # tracks the content written to the .pack file.
386
357
        self._hash = osutils.md5()
387
 
        # a tuple with the length in bytes of the indices, once the pack
388
 
        # is finalised. (rev, inv, text, sigs, chk_if_in_use)
 
358
        # a four-tuple with the length in bytes of the indices, once the pack
 
359
        # is finalised. (rev, inv, text, sigs)
389
360
        self.index_sizes = None
390
361
        # How much data to cache when writing packs. Note that this is not
391
362
        # synchronised with reads, because it's not in the transport layer, so
429
400
        self._writer.begin()
430
401
        # what state is the pack in? (open, finished, aborted)
431
402
        self._state = 'open'
432
 
        # no name until we finish writing the content
433
 
        self.name = None
434
403
 
435
404
    def abort(self):
436
405
        """Cancel creating this pack."""
454
423
        return bool(self.get_revision_count() or
455
424
            self.inventory_index.key_count() or
456
425
            self.text_index.key_count() or
457
 
            self.signature_index.key_count() or
458
 
            (self.chk_index is not None and self.chk_index.key_count()))
459
 
 
460
 
    def finish_content(self):
461
 
        if self.name is not None:
462
 
            return
463
 
        self._writer.end()
464
 
        if self._buffer[1]:
465
 
            self._write_data('', flush=True)
466
 
        self.name = self._hash.hexdigest()
 
426
            self.signature_index.key_count())
467
427
 
468
428
    def finish(self, suspend=False):
469
429
        """Finish the new pack.
476
436
         - stores the index size tuple for the pack in the index_sizes
477
437
           attribute.
478
438
        """
479
 
        self.finish_content()
 
439
        self._writer.end()
 
440
        if self._buffer[1]:
 
441
            self._write_data('', flush=True)
 
442
        self.name = self._hash.hexdigest()
480
443
        if not suspend:
481
444
            self._check_references()
482
445
        # write indices
491
454
        self._write_index('text', self.text_index, 'file texts', suspend)
492
455
        self._write_index('signature', self.signature_index,
493
456
            'revision signatures', suspend)
494
 
        if self.chk_index is not None:
495
 
            self.index_sizes.append(None)
496
 
            self._write_index('chk', self.chk_index,
497
 
                'content hash bytes', suspend)
498
457
        self.write_stream.close()
499
458
        # Note that this will clobber an existing pack with the same name,
500
459
        # without checking for hash collisions. While this is undesirable this
573
532
    # XXX: Probably 'can be written to' could/should be separated from 'acts
574
533
    # like a knit index' -- mbp 20071024
575
534
 
576
 
    def __init__(self, reload_func=None, flush_func=None):
 
535
    def __init__(self, reload_func=None):
577
536
        """Create an AggregateIndex.
578
537
 
579
538
        :param reload_func: A function to call if we find we are missing an
584
543
        self.index_to_pack = {}
585
544
        self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
586
545
        self.data_access = _DirectPackAccess(self.index_to_pack,
587
 
                                             reload_func=reload_func,
588
 
                                             flush_func=flush_func)
 
546
                                             reload_func=reload_func)
 
547
        self.add_callback = None
 
548
 
 
549
    def replace_indices(self, index_to_pack, indices):
 
550
        """Replace the current mappings with fresh ones.
 
551
 
 
552
        This should probably not be used eventually, rather incremental add and
 
553
        removal of indices. It has been added during refactoring of existing
 
554
        code.
 
555
 
 
556
        :param index_to_pack: A mapping from index objects to
 
557
            (transport, name) tuples for the pack file data.
 
558
        :param indices: A list of indices.
 
559
        """
 
560
        # refresh the revision pack map dict without replacing the instance.
 
561
        self.index_to_pack.clear()
 
562
        self.index_to_pack.update(index_to_pack)
 
563
        # XXX: API break - clearly a 'replace' method would be good?
 
564
        self.combined_index._indices[:] = indices
 
565
        # the current add nodes callback for the current writable index if
 
566
        # there is one.
589
567
        self.add_callback = None
590
568
 
591
569
    def add_index(self, index, pack):
600
578
        # expose it to the index map
601
579
        self.index_to_pack[index] = pack.access_tuple()
602
580
        # put it at the front of the linear index list
603
 
        self.combined_index.insert_index(0, index, pack.name)
 
581
        self.combined_index.insert_index(0, index)
604
582
 
605
583
    def add_writable_index(self, index, pack):
606
584
        """Add an index which is able to have data added to it.
626
604
        self.data_access.set_writer(None, None, (None, None))
627
605
        self.index_to_pack.clear()
628
606
        del self.combined_index._indices[:]
629
 
        del self.combined_index._index_names[:]
630
607
        self.add_callback = None
631
608
 
632
 
    def remove_index(self, index):
 
609
    def remove_index(self, index, pack):
633
610
        """Remove index from the indices used to answer queries.
634
611
 
635
612
        :param index: An index from the pack parameter.
 
613
        :param pack: A Pack instance.
636
614
        """
637
615
        del self.index_to_pack[index]
638
 
        pos = self.combined_index._indices.index(index)
639
 
        del self.combined_index._indices[pos]
640
 
        del self.combined_index._index_names[pos]
 
616
        self.combined_index._indices.remove(index)
641
617
        if (self.add_callback is not None and
642
618
            getattr(index, 'add_nodes', None) == self.add_callback):
643
619
            self.add_callback = None
749
725
 
750
726
    def open_pack(self):
751
727
        """Open a pack for the pack we are creating."""
752
 
        new_pack = self._pack_collection.pack_factory(self._pack_collection,
753
 
                upload_suffix=self.suffix,
 
728
        new_pack = NewPack(self._pack_collection, upload_suffix=self.suffix,
754
729
                file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
755
730
        # We know that we will process all nodes in order, and don't need to
756
731
        # query, so don't combine any indices spilled to disk until we are done
921
896
                time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
922
897
                new_pack.signature_index.key_count(),
923
898
                time.time() - new_pack.start_time)
924
 
        # copy chk contents
925
 
        # NB XXX: how to check CHK references are present? perhaps by yielding
926
 
        # the items? How should that interact with stacked repos?
927
 
        if new_pack.chk_index is not None:
928
 
            self._copy_chks()
929
 
            if 'pack' in debug.debug_flags:
930
 
                mutter('%s: create_pack: chk content copied: %s%s %d items t+%6.3fs',
931
 
                    time.ctime(), self._pack_collection._upload_transport.base,
932
 
                    new_pack.random_name,
933
 
                    new_pack.chk_index.key_count(),
934
 
                    time.time() - new_pack.start_time)
935
899
        new_pack._check_references()
936
900
        if not self._use_pack(new_pack):
937
901
            new_pack.abort()
941
905
        self._pack_collection.allocate(new_pack)
942
906
        return new_pack
943
907
 
944
 
    def _copy_chks(self, refs=None):
945
 
        # XXX: Todo, recursive follow-pointers facility when fetching some
946
 
        # revisions only.
947
 
        chk_index_map, chk_indices = self._pack_map_and_index_list(
948
 
            'chk_index')
949
 
        chk_nodes = self._index_contents(chk_indices, refs)
950
 
        new_refs = set()
951
 
        # TODO: This isn't strictly tasteful as we are accessing some private
952
 
        #       variables (_serializer). Perhaps a better way would be to have
953
 
        #       Repository._deserialise_chk_node()
954
 
        search_key_func = chk_map.search_key_registry.get(
955
 
            self._pack_collection.repo._serializer.search_key_name)
956
 
        def accumlate_refs(lines):
957
 
            # XXX: move to a generic location
958
 
            # Yay mismatch:
959
 
            bytes = ''.join(lines)
960
 
            node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
961
 
            new_refs.update(node.refs())
962
 
        self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
963
 
            self.new_pack.chk_index, output_lines=accumlate_refs)
964
 
        return new_refs
965
 
 
966
 
    def _copy_nodes(self, nodes, index_map, writer, write_index,
967
 
        output_lines=None):
968
 
        """Copy knit nodes between packs with no graph references.
969
 
 
970
 
        :param output_lines: Output full texts of copied items.
971
 
        """
 
908
    def _copy_nodes(self, nodes, index_map, writer, write_index):
 
909
        """Copy knit nodes between packs with no graph references."""
972
910
        pb = ui.ui_factory.nested_progress_bar()
973
911
        try:
974
912
            return self._do_copy_nodes(nodes, index_map, writer,
975
 
                write_index, pb, output_lines=output_lines)
 
913
                write_index, pb)
976
914
        finally:
977
915
            pb.finished()
978
916
 
979
 
    def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
980
 
        output_lines=None):
 
917
    def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
981
918
        # for record verification
982
919
        knit = KnitVersionedFiles(None, None)
983
920
        # plan a readv on each source pack:
1017
954
                izip(reader.iter_records(), pack_readv_requests):
1018
955
                raw_data = read_func(None)
1019
956
                # check the header only
1020
 
                if output_lines is not None:
1021
 
                    output_lines(knit._parse_record(key[-1], raw_data)[0])
1022
 
                else:
1023
 
                    df, _ = knit._parse_record_header(key, raw_data)
1024
 
                    df.close()
 
957
                df, _ = knit._parse_record_header(key, raw_data)
 
958
                df.close()
1025
959
                pos, size = writer.add_bytes_record(raw_data, names)
1026
960
                write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
1027
961
                pb.update("Copied record", record_index)
1101
1035
            iterator is a tuple with:
1102
1036
            index, readv_vector, node_vector. readv_vector is a list ready to
1103
1037
            hand to the transport readv method, and node_vector is a list of
1104
 
            (key, eol_flag, references) for the node retrieved by the
 
1038
            (key, eol_flag, references) for the the node retrieved by the
1105
1039
            matching readv_vector.
1106
1040
        """
1107
1041
        # group by pack so we do one readv per pack
1298
1232
        # space (we only topo sort the revisions, which is smaller).
1299
1233
        topo_order = tsort.topo_sort(ancestors)
1300
1234
        rev_order = dict(zip(topo_order, range(len(topo_order))))
1301
 
        bad_texts.sort(key=lambda key:rev_order.get(key[0][1], 0))
 
1235
        bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1302
1236
        transaction = repo.get_transaction()
1303
1237
        file_id_index = GraphIndexPrefixAdapter(
1304
1238
            self.new_pack.text_index,
1357
1291
    :ivar _names: map of {pack_name: (index_size,)}
1358
1292
    """
1359
1293
 
1360
 
    pack_factory = NewPack
1361
 
    resumed_pack_factory = ResumedPack
1362
 
 
1363
1294
    def __init__(self, repo, transport, index_transport, upload_transport,
1364
 
                 pack_transport, index_builder_class, index_class,
1365
 
                 use_chk_index):
 
1295
                 pack_transport, index_builder_class, index_class):
1366
1296
        """Create a new RepositoryPackCollection.
1367
1297
 
1368
1298
        :param transport: Addresses the repository base directory
1373
1303
        :param pack_transport: Addresses the directory of existing complete packs.
1374
1304
        :param index_builder_class: The index builder class to use.
1375
1305
        :param index_class: The index class to use.
1376
 
        :param use_chk_index: Whether to setup and manage a CHK index.
1377
1306
        """
1378
1307
        # XXX: This should call self.reset()
1379
1308
        self.repo = repo
1383
1312
        self._pack_transport = pack_transport
1384
1313
        self._index_builder_class = index_builder_class
1385
1314
        self._index_class = index_class
1386
 
        self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1387
 
            '.cix': 4}
 
1315
        self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1388
1316
        self.packs = []
1389
1317
        # name:Pack mapping
1390
1318
        self._names = None
1394
1322
        # when a pack is being created by this object, the state of that pack.
1395
1323
        self._new_pack = None
1396
1324
        # aggregated revision index data
1397
 
        flush = self._flush_new_pack
1398
 
        self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1399
 
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1400
 
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1401
 
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1402
 
        all_indices = [self.revision_index, self.inventory_index,
1403
 
                self.text_index, self.signature_index]
1404
 
        if use_chk_index:
1405
 
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1406
 
            all_indices.append(self.chk_index)
1407
 
        else:
1408
 
            # used to determine if we're using a chk_index elsewhere.
1409
 
            self.chk_index = None
1410
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1411
 
        # share hints about which pack names to search first.
1412
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1413
 
        for combined_idx in all_combined:
1414
 
            combined_idx.set_sibling_indices(
1415
 
                set(all_combined).difference([combined_idx]))
 
1325
        self.revision_index = AggregateIndex(self.reload_pack_names)
 
1326
        self.inventory_index = AggregateIndex(self.reload_pack_names)
 
1327
        self.text_index = AggregateIndex(self.reload_pack_names)
 
1328
        self.signature_index = AggregateIndex(self.reload_pack_names)
1416
1329
        # resumed packs
1417
1330
        self._resumed_packs = []
1418
1331
 
1419
 
    def __repr__(self):
1420
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1421
 
 
1422
1332
    def add_pack_to_memory(self, pack):
1423
1333
        """Make a Pack object available to the repository to satisfy queries.
1424
1334
 
1433
1343
        self.inventory_index.add_index(pack.inventory_index, pack)
1434
1344
        self.text_index.add_index(pack.text_index, pack)
1435
1345
        self.signature_index.add_index(pack.signature_index, pack)
1436
 
        if self.chk_index is not None:
1437
 
            self.chk_index.add_index(pack.chk_index, pack)
1438
1346
 
1439
1347
    def all_packs(self):
1440
1348
        """Return a list of all the Pack objects this repository has.
1462
1370
        in synchronisation with certain steps. Otherwise the names collection
1463
1371
        is not flushed.
1464
1372
 
1465
 
        :return: Something evaluating true if packing took place.
 
1373
        :return: True if packing took place.
1466
1374
        """
1467
1375
        while True:
1468
1376
            try:
1469
1377
                return self._do_autopack()
1470
 
            except errors.RetryAutopack:
 
1378
            except errors.RetryAutopack, e:
1471
1379
                # If we get a RetryAutopack exception, we should abort the
1472
1380
                # current action, and retry.
1473
1381
                pass
1477
1385
        total_revisions = self.revision_index.combined_index.key_count()
1478
1386
        total_packs = len(self._names)
1479
1387
        if self._max_pack_count(total_revisions) >= total_packs:
1480
 
            return None
 
1388
            return False
 
1389
        # XXX: the following may want to be a class, to pack with a given
 
1390
        # policy.
1481
1391
        # determine which packs need changing
1482
1392
        pack_distribution = self.pack_distribution(total_revisions)
1483
1393
        existing_packs = []
1505
1415
            'containing %d revisions. Packing %d files into %d affecting %d'
1506
1416
            ' revisions', self, total_packs, total_revisions, num_old_packs,
1507
1417
            num_new_packs, num_revs_affected)
1508
 
        result = self._execute_pack_operations(pack_operations,
 
1418
        self._execute_pack_operations(pack_operations,
1509
1419
                                      reload_func=self._restart_autopack)
1510
 
        mutter('Auto-packing repository %s completed', self)
1511
 
        return result
 
1420
        return True
1512
1421
 
1513
1422
    def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1514
1423
                                 reload_func=None):
1516
1425
 
1517
1426
        :param pack_operations: A list of [revision_count, packs_to_combine].
1518
1427
        :param _packer_class: The class of packer to use (default: Packer).
1519
 
        :return: The new pack names.
 
1428
        :return: None.
1520
1429
        """
1521
1430
        for revision_count, packs in pack_operations:
1522
1431
            # we may have no-ops from the setup logic
1538
1447
                self._remove_pack_from_memory(pack)
1539
1448
        # record the newly available packs and stop advertising the old
1540
1449
        # packs
1541
 
        to_be_obsoleted = []
1542
 
        for _, packs in pack_operations:
1543
 
            to_be_obsoleted.extend(packs)
1544
 
        result = self._save_pack_names(clear_obsolete_packs=True,
1545
 
                                       obsolete_packs=to_be_obsoleted)
1546
 
        return result
1547
 
 
1548
 
    def _flush_new_pack(self):
1549
 
        if self._new_pack is not None:
1550
 
            self._new_pack.flush()
 
1450
        self._save_pack_names(clear_obsolete_packs=True)
 
1451
        # Move the old packs out of the way now they are no longer referenced.
 
1452
        for revision_count, packs in pack_operations:
 
1453
            self._obsolete_packs(packs)
1551
1454
 
1552
1455
    def lock_names(self):
1553
1456
        """Acquire the mutex around the pack-names index.
1557
1460
        """
1558
1461
        self.repo.control_files.lock_write()
1559
1462
 
1560
 
    def _already_packed(self):
1561
 
        """Is the collection already packed?"""
1562
 
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1563
 
 
1564
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1463
    def pack(self):
1565
1464
        """Pack the pack collection totally."""
1566
1465
        self.ensure_loaded()
1567
1466
        total_packs = len(self._names)
1568
 
        if self._already_packed():
 
1467
        if total_packs < 2:
 
1468
            # This is arguably wrong because we might not be optimal, but for
 
1469
            # now lets leave it in. (e.g. reconcile -> one pack. But not
 
1470
            # optimal.
1569
1471
            return
1570
1472
        total_revisions = self.revision_index.combined_index.key_count()
1571
1473
        # XXX: the following may want to be a class, to pack with a given
1572
1474
        # policy.
1573
1475
        mutter('Packing repository %s, which has %d pack files, '
1574
 
            'containing %d revisions with hint %r.', self, total_packs,
1575
 
            total_revisions, hint)
 
1476
            'containing %d revisions into 1 packs.', self, total_packs,
 
1477
            total_revisions)
1576
1478
        # determine which packs need changing
 
1479
        pack_distribution = [1]
1577
1480
        pack_operations = [[0, []]]
1578
1481
        for pack in self.all_packs():
1579
 
            if hint is None or pack.name in hint:
1580
 
                # Either no hint was provided (so we are packing everything),
1581
 
                # or this pack was included in the hint.
1582
 
                pack_operations[-1][0] += pack.get_revision_count()
1583
 
                pack_operations[-1][1].append(pack)
 
1482
            pack_operations[-1][0] += pack.get_revision_count()
 
1483
            pack_operations[-1][1].append(pack)
1584
1484
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1585
1485
 
1586
 
        if clean_obsolete_packs:
1587
 
            self._clear_obsolete_packs()
1588
 
 
1589
1486
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1590
1487
        """Plan a pack operation.
1591
1488
 
1678
1575
            inv_index = self._make_index(name, '.iix')
1679
1576
            txt_index = self._make_index(name, '.tix')
1680
1577
            sig_index = self._make_index(name, '.six')
1681
 
            if self.chk_index is not None:
1682
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1683
 
            else:
1684
 
                chk_index = None
1685
1578
            result = ExistingPack(self._pack_transport, name, rev_index,
1686
 
                inv_index, txt_index, sig_index, chk_index)
 
1579
                inv_index, txt_index, sig_index)
1687
1580
            self.add_pack_to_memory(result)
1688
1581
            return result
1689
1582
 
1703
1596
            inv_index = self._make_index(name, '.iix', resume=True)
1704
1597
            txt_index = self._make_index(name, '.tix', resume=True)
1705
1598
            sig_index = self._make_index(name, '.six', resume=True)
1706
 
            if self.chk_index is not None:
1707
 
                chk_index = self._make_index(name, '.cix', resume=True,
1708
 
                                             unlimited_cache=True)
1709
 
            else:
1710
 
                chk_index = None
1711
 
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1712
 
                txt_index, sig_index, self._upload_transport,
1713
 
                self._pack_transport, self._index_transport, self,
1714
 
                chk_index=chk_index)
 
1599
            result = ResumedPack(name, rev_index, inv_index, txt_index,
 
1600
                sig_index, self._upload_transport, self._pack_transport,
 
1601
                self._index_transport, self)
1715
1602
        except errors.NoSuchFile, e:
1716
1603
            raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1717
1604
        self.add_pack_to_memory(result)
1741
1628
        return self._index_class(self.transport, 'pack-names', None
1742
1629
                ).iter_all_entries()
1743
1630
 
1744
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1631
    def _make_index(self, name, suffix, resume=False):
1745
1632
        size_offset = self._suffix_offsets[suffix]
1746
1633
        index_name = name + suffix
1747
1634
        if resume:
1750
1637
        else:
1751
1638
            transport = self._index_transport
1752
1639
            index_size = self._names[name][size_offset]
1753
 
        return self._index_class(transport, index_name, index_size,
1754
 
                                 unlimited_cache=unlimited_cache)
 
1640
        return self._index_class(transport, index_name, index_size)
1755
1641
 
1756
1642
    def _max_pack_count(self, total_revisions):
1757
1643
        """Return the maximum number of packs to use for total revisions.
1785
1671
        :param return: None.
1786
1672
        """
1787
1673
        for pack in packs:
1788
 
            try:
1789
 
                pack.pack_transport.rename(pack.file_name(),
1790
 
                    '../obsolete_packs/' + pack.file_name())
1791
 
            except (errors.PathError, errors.TransportError), e:
1792
 
                # TODO: Should these be warnings or mutters?
1793
 
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
1794
 
                       % (e,))
 
1674
            pack.pack_transport.rename(pack.file_name(),
 
1675
                '../obsolete_packs/' + pack.file_name())
1795
1676
            # TODO: Probably needs to know all possible indices for this pack
1796
1677
            # - or maybe list the directory and move all indices matching this
1797
1678
            # name whether we recognize it or not?
1798
 
            suffixes = ['.iix', '.six', '.tix', '.rix']
1799
 
            if self.chk_index is not None:
1800
 
                suffixes.append('.cix')
1801
 
            for suffix in suffixes:
1802
 
                try:
1803
 
                    self._index_transport.rename(pack.name + suffix,
1804
 
                        '../obsolete_packs/' + pack.name + suffix)
1805
 
                except (errors.PathError, errors.TransportError), e:
1806
 
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
1807
 
                           % (e,))
 
1679
            for suffix in ('.iix', '.six', '.tix', '.rix'):
 
1680
                self._index_transport.rename(pack.name + suffix,
 
1681
                    '../obsolete_packs/' + pack.name + suffix)
1808
1682
 
1809
1683
    def pack_distribution(self, total_revisions):
1810
1684
        """Generate a list of the number of revisions to put in each pack.
1836
1710
        self._remove_pack_indices(pack)
1837
1711
        self.packs.remove(pack)
1838
1712
 
1839
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1840
 
        """Remove the indices for pack from the aggregated indices.
1841
 
        
1842
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1843
 
        """
1844
 
        for index_type in Pack.index_definitions.keys():
1845
 
            attr_name = index_type + '_index'
1846
 
            aggregate_index = getattr(self, attr_name)
1847
 
            if aggregate_index is not None:
1848
 
                pack_index = getattr(pack, attr_name)
1849
 
                try:
1850
 
                    aggregate_index.remove_index(pack_index)
1851
 
                except KeyError:
1852
 
                    if ignore_missing:
1853
 
                        continue
1854
 
                    raise
 
1713
    def _remove_pack_indices(self, pack):
 
1714
        """Remove the indices for pack from the aggregated indices."""
 
1715
        self.revision_index.remove_index(pack.revision_index, pack)
 
1716
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1717
        self.text_index.remove_index(pack.text_index, pack)
 
1718
        self.signature_index.remove_index(pack.signature_index, pack)
1855
1719
 
1856
1720
    def reset(self):
1857
1721
        """Clear all cached data."""
1858
1722
        # cached revision data
 
1723
        self.repo._revision_knit = None
1859
1724
        self.revision_index.clear()
1860
1725
        # cached signature data
 
1726
        self.repo._signature_knit = None
1861
1727
        self.signature_index.clear()
1862
1728
        # cached file text data
1863
1729
        self.text_index.clear()
 
1730
        self.repo._text_knit = None
1864
1731
        # cached inventory data
1865
1732
        self.inventory_index.clear()
1866
 
        # cached chk data
1867
 
        if self.chk_index is not None:
1868
 
            self.chk_index.clear()
1869
1733
        # remove the open pack
1870
1734
        self._new_pack = None
1871
1735
        # information about packs.
1890
1754
        disk_nodes = set()
1891
1755
        for index, key, value in self._iter_disk_pack_index():
1892
1756
            disk_nodes.add((key, value))
1893
 
        orig_disk_nodes = set(disk_nodes)
1894
1757
 
1895
1758
        # do a two-way diff against our original content
1896
1759
        current_nodes = set()
1909
1772
        disk_nodes.difference_update(deleted_nodes)
1910
1773
        disk_nodes.update(new_nodes)
1911
1774
 
1912
 
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
 
1775
        return disk_nodes, deleted_nodes, new_nodes
1913
1776
 
1914
1777
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1915
1778
        """Given the correct set of pack files, update our saved info.
1955
1818
                added.append(name)
1956
1819
        return removed, added, modified
1957
1820
 
1958
 
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
 
1821
    def _save_pack_names(self, clear_obsolete_packs=False):
1959
1822
        """Save the list of packs.
1960
1823
 
1961
1824
        This will take out the mutex around the pack names list for the
1965
1828
 
1966
1829
        :param clear_obsolete_packs: If True, clear out the contents of the
1967
1830
            obsolete_packs directory.
1968
 
        :param obsolete_packs: Packs that are obsolete once the new pack-names
1969
 
            file has been written.
1970
 
        :return: A list of the names saved that were not previously on disk.
1971
1831
        """
1972
 
        already_obsolete = []
1973
1832
        self.lock_names()
1974
1833
        try:
1975
1834
            builder = self._index_builder_class()
1976
 
            (disk_nodes, deleted_nodes, new_nodes,
1977
 
             orig_disk_nodes) = self._diff_pack_names()
 
1835
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1978
1836
            # TODO: handle same-name, index-size-changes here -
1979
1837
            # e.g. use the value from disk, not ours, *unless* we're the one
1980
1838
            # changing it.
1982
1840
                builder.add_node(key, value)
1983
1841
            self.transport.put_file('pack-names', builder.finish(),
1984
1842
                mode=self.repo.bzrdir._get_file_mode())
 
1843
            # move the baseline forward
1985
1844
            self._packs_at_load = disk_nodes
1986
1845
            if clear_obsolete_packs:
1987
 
                to_preserve = None
1988
 
                if obsolete_packs:
1989
 
                    to_preserve = set([o.name for o in obsolete_packs])
1990
 
                already_obsolete = self._clear_obsolete_packs(to_preserve)
 
1846
                self._clear_obsolete_packs()
1991
1847
        finally:
1992
1848
            self._unlock_names()
1993
1849
        # synchronise the memory packs list with what we just wrote:
1994
1850
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1995
 
        if obsolete_packs:
1996
 
            # TODO: We could add one more condition here. "if o.name not in
1997
 
            #       orig_disk_nodes and o != the new_pack we haven't written to
1998
 
            #       disk yet. However, the new pack object is not easily
1999
 
            #       accessible here (it would have to be passed through the
2000
 
            #       autopacking code, etc.)
2001
 
            obsolete_packs = [o for o in obsolete_packs
2002
 
                              if o.name not in already_obsolete]
2003
 
            self._obsolete_packs(obsolete_packs)
2004
 
        return [new_node[0][0] for new_node in new_nodes]
2005
1851
 
2006
1852
    def reload_pack_names(self):
2007
1853
        """Sync our pack listing with what is present in the repository.
2021
1867
        if first_read:
2022
1868
            return True
2023
1869
        # out the new value.
2024
 
        (disk_nodes, deleted_nodes, new_nodes,
2025
 
         orig_disk_nodes) = self._diff_pack_names()
2026
 
        # _packs_at_load is meant to be the explicit list of names in
2027
 
        # 'pack-names' at then start. As such, it should not contain any
2028
 
        # pending names that haven't been written out yet.
2029
 
        self._packs_at_load = orig_disk_nodes
 
1870
        disk_nodes, _, _ = self._diff_pack_names()
 
1871
        self._packs_at_load = disk_nodes
2030
1872
        (removed, added,
2031
1873
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2032
1874
        if removed or added or modified:
2041
1883
            raise
2042
1884
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2043
1885
 
2044
 
    def _clear_obsolete_packs(self, preserve=None):
 
1886
    def _clear_obsolete_packs(self):
2045
1887
        """Delete everything from the obsolete-packs directory.
2046
 
 
2047
 
        :return: A list of pack identifiers (the filename without '.pack') that
2048
 
            were found in obsolete_packs.
2049
1888
        """
2050
 
        found = []
2051
1889
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
2052
 
        if preserve is None:
2053
 
            preserve = set()
2054
1890
        for filename in obsolete_pack_transport.list_dir('.'):
2055
 
            name, ext = osutils.splitext(filename)
2056
 
            if ext == '.pack':
2057
 
                found.append(name)
2058
 
            if name in preserve:
2059
 
                continue
2060
1891
            try:
2061
1892
                obsolete_pack_transport.delete(filename)
2062
1893
            except (errors.PathError, errors.TransportError), e:
2063
 
                warning("couldn't delete obsolete pack, skipping it:\n%s"
2064
 
                        % (e,))
2065
 
        return found
 
1894
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2066
1895
 
2067
1896
    def _start_write_group(self):
2068
1897
        # Do not permit preparation for writing if we're not in a 'write lock'.
2069
1898
        if not self.repo.is_write_locked():
2070
1899
            raise errors.NotWriteLocked(self)
2071
 
        self._new_pack = self.pack_factory(self, upload_suffix='.pack',
 
1900
        self._new_pack = NewPack(self, upload_suffix='.pack',
2072
1901
            file_mode=self.repo.bzrdir._get_file_mode())
2073
1902
        # allow writing: queue writes to a new index
2074
1903
        self.revision_index.add_writable_index(self._new_pack.revision_index,
2077
1906
            self._new_pack)
2078
1907
        self.text_index.add_writable_index(self._new_pack.text_index,
2079
1908
            self._new_pack)
2080
 
        self._new_pack.text_index.set_optimize(combine_backing_indices=False)
2081
1909
        self.signature_index.add_writable_index(self._new_pack.signature_index,
2082
1910
            self._new_pack)
2083
 
        if self.chk_index is not None:
2084
 
            self.chk_index.add_writable_index(self._new_pack.chk_index,
2085
 
                self._new_pack)
2086
 
            self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
2087
 
            self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
2088
1911
 
2089
1912
        self.repo.inventories._index._add_callback = self.inventory_index.add_callback
2090
1913
        self.repo.revisions._index._add_callback = self.revision_index.add_callback
2095
1918
        # FIXME: just drop the transient index.
2096
1919
        # forget what names there are
2097
1920
        if self._new_pack is not None:
2098
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2099
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2100
 
            # If we aborted while in the middle of finishing the write
2101
 
            # group, _remove_pack_indices could fail because the indexes are
2102
 
            # already gone.  But they're not there we shouldn't fail in this
2103
 
            # case, so we pass ignore_missing=True.
2104
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2105
 
                ignore_missing=True)
2106
 
            operation.run_simple()
 
1921
            try:
 
1922
                self._new_pack.abort()
 
1923
            finally:
 
1924
                # XXX: If we aborted while in the middle of finishing the write
 
1925
                # group, _remove_pack_indices can fail because the indexes are
 
1926
                # already gone.  If they're not there we shouldn't fail in this
 
1927
                # case.  -- mbp 20081113
 
1928
                self._remove_pack_indices(self._new_pack)
 
1929
                self._new_pack = None
2107
1930
        for resumed_pack in self._resumed_packs:
2108
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2109
 
            # See comment in previous finally block.
2110
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2111
 
                ignore_missing=True)
2112
 
            operation.run_simple()
 
1931
            try:
 
1932
                resumed_pack.abort()
 
1933
            finally:
 
1934
                # See comment in previous finally block.
 
1935
                try:
 
1936
                    self._remove_pack_indices(resumed_pack)
 
1937
                except KeyError:
 
1938
                    pass
2113
1939
        del self._resumed_packs[:]
 
1940
        self.repo._text_knit = None
2114
1941
 
2115
1942
    def _remove_resumed_pack_indices(self):
2116
1943
        for resumed_pack in self._resumed_packs:
2117
1944
            self._remove_pack_indices(resumed_pack)
2118
1945
        del self._resumed_packs[:]
2119
1946
 
2120
 
    def _check_new_inventories(self):
2121
 
        """Detect missing inventories in this write group.
2122
 
 
2123
 
        :returns: list of strs, summarising any problems found.  If the list is
2124
 
            empty no problems were found.
2125
 
        """
2126
 
        # The base implementation does no checks.  GCRepositoryPackCollection
2127
 
        # overrides this.
2128
 
        return []
2129
 
        
2130
1947
    def _commit_write_group(self):
2131
1948
        all_missing = set()
2132
1949
        for prefix, versioned_file in (
2141
1958
            raise errors.BzrCheckError(
2142
1959
                "Repository %s has missing compression parent(s) %r "
2143
1960
                 % (self.repo, sorted(all_missing)))
2144
 
        problems = self._check_new_inventories()
2145
 
        if problems:
2146
 
            problems_summary = '\n'.join(problems)
2147
 
            raise errors.BzrCheckError(
2148
 
                "Cannot add revision(s) to repository: " + problems_summary)
2149
1961
        self._remove_pack_indices(self._new_pack)
2150
 
        any_new_content = False
 
1962
        should_autopack = False
2151
1963
        if self._new_pack.data_inserted():
2152
1964
            # get all the data to disk and read to use
2153
1965
            self._new_pack.finish()
2154
1966
            self.allocate(self._new_pack)
2155
1967
            self._new_pack = None
2156
 
            any_new_content = True
 
1968
            should_autopack = True
2157
1969
        else:
2158
1970
            self._new_pack.abort()
2159
1971
            self._new_pack = None
2164
1976
            self._remove_pack_from_memory(resumed_pack)
2165
1977
            resumed_pack.finish()
2166
1978
            self.allocate(resumed_pack)
2167
 
            any_new_content = True
 
1979
            should_autopack = True
2168
1980
        del self._resumed_packs[:]
2169
 
        if any_new_content:
2170
 
            result = self.autopack()
2171
 
            if not result:
 
1981
        if should_autopack:
 
1982
            if not self.autopack():
2172
1983
                # when autopack takes no steps, the names list is still
2173
1984
                # unsaved.
2174
 
                return self._save_pack_names()
2175
 
            return result
2176
 
        return []
 
1985
                self._save_pack_names()
 
1986
        self.repo._text_knit = None
2177
1987
 
2178
1988
    def _suspend_write_group(self):
2179
1989
        tokens = [pack.name for pack in self._resumed_packs]
2187
1997
            self._new_pack.abort()
2188
1998
            self._new_pack = None
2189
1999
        self._remove_resumed_pack_indices()
 
2000
        self.repo._text_knit = None
2190
2001
        return tokens
2191
2002
 
2192
2003
    def _resume_write_group(self, tokens):
2229
2040
            self._transport.clone('upload'),
2230
2041
            self._transport.clone('packs'),
2231
2042
            _format.index_builder_class,
2232
 
            _format.index_class,
2233
 
            use_chk_index=self._format.supports_chks,
2234
 
            )
 
2043
            _format.index_class)
2235
2044
        self.inventories = KnitVersionedFiles(
2236
2045
            _KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
2237
2046
                add_callback=self._pack_collection.inventory_index.add_callback,
2241
2050
        self.revisions = KnitVersionedFiles(
2242
2051
            _KnitGraphIndex(self._pack_collection.revision_index.combined_index,
2243
2052
                add_callback=self._pack_collection.revision_index.add_callback,
2244
 
                deltas=False, parents=True, is_locked=self.is_locked,
2245
 
                track_external_parent_refs=True),
 
2053
                deltas=False, parents=True, is_locked=self.is_locked),
2246
2054
            data_access=self._pack_collection.revision_index.data_access,
2247
2055
            max_delta_chain=0)
2248
2056
        self.signatures = KnitVersionedFiles(
2257
2065
                deltas=True, parents=True, is_locked=self.is_locked),
2258
2066
            data_access=self._pack_collection.text_index.data_access,
2259
2067
            max_delta_chain=200)
2260
 
        if _format.supports_chks:
2261
 
            # No graph, no compression:- references from chks are between
2262
 
            # different objects not temporal versions of the same; and without
2263
 
            # some sort of temporal structure knit compression will just fail.
2264
 
            self.chk_bytes = KnitVersionedFiles(
2265
 
                _KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2266
 
                    add_callback=self._pack_collection.chk_index.add_callback,
2267
 
                    deltas=False, parents=False, is_locked=self.is_locked),
2268
 
                data_access=self._pack_collection.chk_index.data_access,
2269
 
                max_delta_chain=0)
2270
 
        else:
2271
 
            self.chk_bytes = None
2272
2068
        # True when the repository object is 'write locked' (as opposed to the
2273
2069
        # physical lock only taken out around changes to the pack-names list.)
2274
2070
        # Another way to represent this would be a decorator around the control
2281
2077
        self._reconcile_fixes_text_parents = True
2282
2078
        self._reconcile_backsup_inventory = False
2283
2079
 
2284
 
    def _warn_if_deprecated(self, branch=None):
 
2080
    def _warn_if_deprecated(self):
2285
2081
        # This class isn't deprecated, but one sub-format is
2286
2082
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2287
 
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
 
2083
            from bzrlib import repository
 
2084
            if repository._deprecation_warning_done:
 
2085
                return
 
2086
            repository._deprecation_warning_done = True
 
2087
            warning("Format %s for %s is deprecated - please use"
 
2088
                    " 'bzr upgrade --1.6.1-rich-root'"
 
2089
                    % (self._format, self.bzrdir.transport.base))
2288
2090
 
2289
2091
    def _abort_write_group(self):
2290
 
        self.revisions._index._key_dependencies.clear()
2291
2092
        self._pack_collection._abort_write_group()
2292
2093
 
2293
 
    def _get_source(self, to_format):
2294
 
        if to_format.network_name() == self._format.network_name():
2295
 
            return KnitPackStreamSource(self, to_format)
2296
 
        return super(KnitPackRepository, self)._get_source(to_format)
 
2094
    def _find_inconsistent_revision_parents(self):
 
2095
        """Find revisions with incorrectly cached parents.
 
2096
 
 
2097
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
2098
            parents-in-revision).
 
2099
        """
 
2100
        if not self.is_locked():
 
2101
            raise errors.ObjectNotLocked(self)
 
2102
        pb = ui.ui_factory.nested_progress_bar()
 
2103
        result = []
 
2104
        try:
 
2105
            revision_nodes = self._pack_collection.revision_index \
 
2106
                .combined_index.iter_all_entries()
 
2107
            index_positions = []
 
2108
            # Get the cached index values for all revisions, and also the location
 
2109
            # in each index of the revision text so we can perform linear IO.
 
2110
            for index, key, value, refs in revision_nodes:
 
2111
                pos, length = value[1:].split(' ')
 
2112
                index_positions.append((index, int(pos), key[0],
 
2113
                    tuple(parent[0] for parent in refs[0])))
 
2114
                pb.update("Reading revision index", 0, 0)
 
2115
            index_positions.sort()
 
2116
            batch_count = len(index_positions) / 1000 + 1
 
2117
            pb.update("Checking cached revision graph", 0, batch_count)
 
2118
            for offset in xrange(batch_count):
 
2119
                pb.update("Checking cached revision graph", offset)
 
2120
                to_query = index_positions[offset * 1000:(offset + 1) * 1000]
 
2121
                if not to_query:
 
2122
                    break
 
2123
                rev_ids = [item[2] for item in to_query]
 
2124
                revs = self.get_revisions(rev_ids)
 
2125
                for revision, item in zip(revs, to_query):
 
2126
                    index_parents = item[3]
 
2127
                    rev_parents = tuple(revision.parent_ids)
 
2128
                    if index_parents != rev_parents:
 
2129
                        result.append((revision.revision_id, index_parents, rev_parents))
 
2130
        finally:
 
2131
            pb.finished()
 
2132
        return result
2297
2133
 
2298
2134
    def _make_parents_provider(self):
2299
2135
        return graph.CachingParentsProvider(self)
2307
2143
        self._pack_collection._start_write_group()
2308
2144
 
2309
2145
    def _commit_write_group(self):
2310
 
        hint = self._pack_collection._commit_write_group()
2311
 
        self.revisions._index._key_dependencies.clear()
2312
 
        return hint
 
2146
        return self._pack_collection._commit_write_group()
2313
2147
 
2314
2148
    def suspend_write_group(self):
2315
2149
        # XXX check self._write_group is self.get_transaction()?
2316
2150
        tokens = self._pack_collection._suspend_write_group()
2317
 
        self.revisions._index._key_dependencies.clear()
2318
2151
        self._write_group = None
2319
2152
        return tokens
2320
2153
 
2321
2154
    def _resume_write_group(self, tokens):
2322
2155
        self._start_write_group()
2323
 
        try:
2324
 
            self._pack_collection._resume_write_group(tokens)
2325
 
        except errors.UnresumableWriteGroup:
2326
 
            self._abort_write_group()
2327
 
            raise
2328
 
        for pack in self._pack_collection._resumed_packs:
2329
 
            self.revisions._index.scan_unvalidated_index(pack.revision_index)
 
2156
        self._pack_collection._resume_write_group(tokens)
2330
2157
 
2331
2158
    def get_transaction(self):
2332
2159
        if self._write_lock_count:
2347
2174
        self._write_lock_count += 1
2348
2175
        if self._write_lock_count == 1:
2349
2176
            self._transaction = transactions.WriteTransaction()
2350
 
        if not locked:
2351
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2352
 
                note('%r was write locked again', self)
2353
 
            self._prev_lock = 'w'
2354
2177
            for repo in self._fallback_repositories:
2355
2178
                # Writes don't affect fallback repos
2356
2179
                repo.lock_read()
 
2180
        if not locked:
2357
2181
            self._refresh_data()
2358
 
        return RepositoryWriteLockResult(self.unlock, None)
2359
2182
 
2360
2183
    def lock_read(self):
2361
2184
        locked = self.is_locked()
2363
2186
            self._write_lock_count += 1
2364
2187
        else:
2365
2188
            self.control_files.lock_read()
2366
 
        if not locked:
2367
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2368
 
                note('%r was read locked again', self)
2369
 
            self._prev_lock = 'r'
2370
2189
            for repo in self._fallback_repositories:
 
2190
                # Writes don't affect fallback repos
2371
2191
                repo.lock_read()
 
2192
        if not locked:
2372
2193
            self._refresh_data()
2373
 
        return self
2374
2194
 
2375
2195
    def leave_lock_in_place(self):
2376
2196
        # not supported - raise an error
2381
2201
        raise NotImplementedError(self.dont_leave_lock_in_place)
2382
2202
 
2383
2203
    @needs_write_lock
2384
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2204
    def pack(self):
2385
2205
        """Compress the data within the repository.
2386
2206
 
2387
2207
        This will pack all the data to a single pack. In future it may
2388
2208
        recompress deltas or do other such expensive operations.
2389
2209
        """
2390
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2210
        self._pack_collection.pack()
2391
2211
 
2392
2212
    @needs_write_lock
2393
2213
    def reconcile(self, other=None, thorough=False):
2397
2217
        reconciler.reconcile()
2398
2218
        return reconciler
2399
2219
 
2400
 
    def _reconcile_pack(self, collection, packs, extension, revs, pb):
2401
 
        packer = ReconcilePacker(collection, packs, extension, revs)
2402
 
        return packer.pack(pb)
2403
 
 
2404
 
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2405
2220
    def unlock(self):
2406
2221
        if self._write_lock_count == 1 and self._write_group is not None:
2407
2222
            self.abort_write_group()
2416
2231
                transaction = self._transaction
2417
2232
                self._transaction = None
2418
2233
                transaction.finish()
 
2234
                for repo in self._fallback_repositories:
 
2235
                    repo.unlock()
2419
2236
        else:
2420
2237
            self.control_files.unlock()
2421
 
 
2422
 
        if not self.is_locked():
2423
2238
            for repo in self._fallback_repositories:
2424
2239
                repo.unlock()
2425
2240
 
2426
2241
 
2427
 
class KnitPackStreamSource(StreamSource):
2428
 
    """A StreamSource used to transfer data between same-format KnitPack repos.
2429
 
 
2430
 
    This source assumes:
2431
 
        1) Same serialization format for all objects
2432
 
        2) Same root information
2433
 
        3) XML format inventories
2434
 
        4) Atomic inserts (so we can stream inventory texts before text
2435
 
           content)
2436
 
        5) No chk_bytes
2437
 
    """
2438
 
 
2439
 
    def __init__(self, from_repository, to_format):
2440
 
        super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2441
 
        self._text_keys = None
2442
 
        self._text_fetch_order = 'unordered'
2443
 
 
2444
 
    def _get_filtered_inv_stream(self, revision_ids):
2445
 
        from_repo = self.from_repository
2446
 
        parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2447
 
        parent_keys = [(p,) for p in parent_ids]
2448
 
        find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2449
 
        parent_text_keys = set(find_text_keys(
2450
 
            from_repo._inventory_xml_lines_for_keys(parent_keys)))
2451
 
        content_text_keys = set()
2452
 
        knit = KnitVersionedFiles(None, None)
2453
 
        factory = KnitPlainFactory()
2454
 
        def find_text_keys_from_content(record):
2455
 
            if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2456
 
                raise ValueError("Unknown content storage kind for"
2457
 
                    " inventory text: %s" % (record.storage_kind,))
2458
 
            # It's a knit record, it has a _raw_record field (even if it was
2459
 
            # reconstituted from a network stream).
2460
 
            raw_data = record._raw_record
2461
 
            # read the entire thing
2462
 
            revision_id = record.key[-1]
2463
 
            content, _ = knit._parse_record(revision_id, raw_data)
2464
 
            if record.storage_kind == 'knit-delta-gz':
2465
 
                line_iterator = factory.get_linedelta_content(content)
2466
 
            elif record.storage_kind == 'knit-ft-gz':
2467
 
                line_iterator = factory.get_fulltext_content(content)
2468
 
            content_text_keys.update(find_text_keys(
2469
 
                [(line, revision_id) for line in line_iterator]))
2470
 
        revision_keys = [(r,) for r in revision_ids]
2471
 
        def _filtered_inv_stream():
2472
 
            source_vf = from_repo.inventories
2473
 
            stream = source_vf.get_record_stream(revision_keys,
2474
 
                                                 'unordered', False)
2475
 
            for record in stream:
2476
 
                if record.storage_kind == 'absent':
2477
 
                    raise errors.NoSuchRevision(from_repo, record.key)
2478
 
                find_text_keys_from_content(record)
2479
 
                yield record
2480
 
            self._text_keys = content_text_keys - parent_text_keys
2481
 
        return ('inventories', _filtered_inv_stream())
2482
 
 
2483
 
    def _get_text_stream(self):
2484
 
        # Note: We know we don't have to handle adding root keys, because both
2485
 
        # the source and target are the identical network name.
2486
 
        text_stream = self.from_repository.texts.get_record_stream(
2487
 
                        self._text_keys, self._text_fetch_order, False)
2488
 
        return ('texts', text_stream)
2489
 
 
2490
 
    def get_stream(self, search):
2491
 
        revision_ids = search.get_keys()
2492
 
        for stream_info in self._fetch_revision_texts(revision_ids):
2493
 
            yield stream_info
2494
 
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2495
 
        yield self._get_filtered_inv_stream(revision_ids)
2496
 
        yield self._get_text_stream()
2497
 
 
2498
 
 
2499
 
 
2500
2242
class RepositoryFormatPack(MetaDirRepositoryFormat):
2501
2243
    """Format logic for pack structured repositories.
2502
2244
 
2526
2268
    supports_ghosts = True
2527
2269
    # External references are not supported in pack repositories yet.
2528
2270
    supports_external_lookups = False
2529
 
    # Most pack formats do not use chk lookups.
2530
 
    supports_chks = False
2531
2271
    # What index classes to use
2532
2272
    index_builder_class = None
2533
2273
    index_class = None
2549
2289
        utf8_files = [('format', self.get_format_string())]
2550
2290
 
2551
2291
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2552
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2553
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2554
 
        return repository
 
2292
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2555
2293
 
2556
2294
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2557
2295
        """See RepositoryFormat.open().
2606
2344
        """See RepositoryFormat.get_format_description()."""
2607
2345
        return "Packs containing knits without subtree support"
2608
2346
 
 
2347
    def check_conversion_target(self, target_format):
 
2348
        pass
 
2349
 
2609
2350
 
2610
2351
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2611
2352
    """A subtrees parameterized Pack repository.
2620
2361
    repository_class = KnitPackRepository
2621
2362
    _commit_builder_class = PackRootCommitBuilder
2622
2363
    rich_root_data = True
2623
 
    experimental = True
2624
2364
    supports_tree_reference = True
2625
2365
    @property
2626
2366
    def _serializer(self):
2638
2378
 
2639
2379
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2640
2380
 
 
2381
    def check_conversion_target(self, target_format):
 
2382
        if not target_format.rich_root_data:
 
2383
            raise errors.BadConversionTarget(
 
2384
                'Does not support rich root data.', target_format)
 
2385
        if not getattr(target_format, 'supports_tree_reference', False):
 
2386
            raise errors.BadConversionTarget(
 
2387
                'Does not support nested trees', target_format)
 
2388
 
2641
2389
    def get_format_string(self):
2642
2390
        """See RepositoryFormat.get_format_string()."""
2643
2391
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2676
2424
 
2677
2425
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2678
2426
 
 
2427
    def check_conversion_target(self, target_format):
 
2428
        if not target_format.rich_root_data:
 
2429
            raise errors.BadConversionTarget(
 
2430
                'Does not support rich root data.', target_format)
 
2431
 
2679
2432
    def get_format_string(self):
2680
2433
        """See RepositoryFormat.get_format_string()."""
2681
2434
        return ("Bazaar pack repository format 1 with rich root"
2722
2475
        """See RepositoryFormat.get_format_description()."""
2723
2476
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2724
2477
 
 
2478
    def check_conversion_target(self, target_format):
 
2479
        pass
 
2480
 
2725
2481
 
2726
2482
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2727
2483
    """A repository with rich roots and stacking.
2754
2510
 
2755
2511
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2756
2512
 
 
2513
    def check_conversion_target(self, target_format):
 
2514
        if not target_format.rich_root_data:
 
2515
            raise errors.BadConversionTarget(
 
2516
                'Does not support rich root data.', target_format)
 
2517
 
2757
2518
    def get_format_string(self):
2758
2519
        """See RepositoryFormat.get_format_string()."""
2759
2520
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2800
2561
 
2801
2562
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2802
2563
 
 
2564
    def check_conversion_target(self, target_format):
 
2565
        if not target_format.rich_root_data:
 
2566
            raise errors.BadConversionTarget(
 
2567
                'Does not support rich root data.', target_format)
 
2568
 
2803
2569
    def get_format_string(self):
2804
2570
        """See RepositoryFormat.get_format_string()."""
2805
2571
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2843
2609
        """See RepositoryFormat.get_format_description()."""
2844
2610
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2845
2611
 
 
2612
    def check_conversion_target(self, target_format):
 
2613
        pass
 
2614
 
2846
2615
 
2847
2616
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2848
2617
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2872
2641
 
2873
2642
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2874
2643
 
 
2644
    def check_conversion_target(self, target_format):
 
2645
        if not target_format.rich_root_data:
 
2646
            raise errors.BadConversionTarget(
 
2647
                'Does not support rich root data.', target_format)
 
2648
 
2875
2649
    def get_format_string(self):
2876
2650
        """See RepositoryFormat.get_format_string()."""
2877
2651
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2880
2654
        return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2881
2655
 
2882
2656
 
 
2657
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
 
2658
    """A no-subtrees development repository.
 
2659
 
 
2660
    This format should be retained until the second release after bzr 1.7.
 
2661
 
 
2662
    This is pack-1.6.1 with B+Tree indices.
 
2663
    """
 
2664
 
 
2665
    repository_class = KnitPackRepository
 
2666
    _commit_builder_class = PackCommitBuilder
 
2667
    supports_external_lookups = True
 
2668
    # What index classes to use
 
2669
    index_builder_class = BTreeBuilder
 
2670
    index_class = BTreeGraphIndex
 
2671
    # Set to true to get the fast-commit code path tested until a really fast
 
2672
    # format lands in trunk. Not actually fast in this format.
 
2673
    fast_deltas = True
 
2674
 
 
2675
    @property
 
2676
    def _serializer(self):
 
2677
        return xml5.serializer_v5
 
2678
 
 
2679
    def _get_matching_bzrdir(self):
 
2680
        return bzrdir.format_registry.make_bzrdir('development2')
 
2681
 
 
2682
    def _ignore_setting_bzrdir(self, format):
 
2683
        pass
 
2684
 
 
2685
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
 
2686
 
 
2687
    def get_format_string(self):
 
2688
        """See RepositoryFormat.get_format_string()."""
 
2689
        return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
 
2690
 
 
2691
    def get_format_description(self):
 
2692
        """See RepositoryFormat.get_format_description()."""
 
2693
        return ("Development repository format, currently the same as "
 
2694
            "1.6.1 with B+Trees.\n")
 
2695
 
 
2696
    def check_conversion_target(self, target_format):
 
2697
        pass
 
2698
 
 
2699
 
2883
2700
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2884
2701
    """A subtrees development repository.
2885
2702
 
2886
2703
    This format should be retained until the second release after bzr 1.7.
2887
2704
 
2888
2705
    1.6.1-subtree[as it might have been] with B+Tree indices.
2889
 
 
2890
 
    This is [now] retained until we have a CHK based subtree format in
2891
 
    development.
2892
2706
    """
2893
2707
 
2894
2708
    repository_class = KnitPackRepository
2895
2709
    _commit_builder_class = PackRootCommitBuilder
2896
2710
    rich_root_data = True
2897
 
    experimental = True
2898
2711
    supports_tree_reference = True
2899
2712
    supports_external_lookups = True
2900
2713
    # What index classes to use
2907
2720
 
2908
2721
    def _get_matching_bzrdir(self):
2909
2722
        return bzrdir.format_registry.make_bzrdir(
2910
 
            'development-subtree')
 
2723
            'development2-subtree')
2911
2724
 
2912
2725
    def _ignore_setting_bzrdir(self, format):
2913
2726
        pass
2914
2727
 
2915
2728
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2916
2729
 
 
2730
    def check_conversion_target(self, target_format):
 
2731
        if not target_format.rich_root_data:
 
2732
            raise errors.BadConversionTarget(
 
2733
                'Does not support rich root data.', target_format)
 
2734
        if not getattr(target_format, 'supports_tree_reference', False):
 
2735
            raise errors.BadConversionTarget(
 
2736
                'Does not support nested trees', target_format)
 
2737
 
2917
2738
    def get_format_string(self):
2918
2739
        """See RepositoryFormat.get_format_string()."""
2919
2740
        return ("Bazaar development format 2 with subtree support "
2923
2744
        """See RepositoryFormat.get_format_description()."""
2924
2745
        return ("Development repository format, currently the same as "
2925
2746
            "1.6.1-subtree with B+Tree indices.\n")
2926