/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-03-12 03:39:10 UTC
  • mfrom: (4103.3.5 progress)
  • Revision ID: pqm@pqm.ubuntu.com-20090312033910-9umj7rwjo98zl7up
(mbp) small progress improvements

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2007-2010 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
12
12
#
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
 
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
16
 
17
17
import re
18
18
import sys
23
23
import time
24
24
 
25
25
from bzrlib import (
26
 
    chk_map,
27
 
    cleanup,
28
26
    debug,
29
27
    graph,
30
28
    osutils,
37
35
    )
38
36
from bzrlib.index import (
39
37
    CombinedGraphIndex,
 
38
    GraphIndex,
 
39
    GraphIndexBuilder,
40
40
    GraphIndexPrefixAdapter,
 
41
    InMemoryGraphIndex,
41
42
    )
42
43
from bzrlib.knit import (
43
44
    KnitPlainFactory,
52
53
    errors,
53
54
    lockable_files,
54
55
    lockdir,
55
 
    revision as _mod_revision,
 
56
    symbol_versioning,
56
57
    )
57
58
 
58
 
from bzrlib.decorators import needs_write_lock, only_raises
 
59
from bzrlib.decorators import needs_write_lock
59
60
from bzrlib.btree_index import (
60
61
    BTreeGraphIndex,
61
62
    BTreeBuilder,
70
71
    MetaDirRepositoryFormat,
71
72
    RepositoryFormat,
72
73
    RootCommitBuilder,
73
 
    StreamSource,
74
74
    )
 
75
import bzrlib.revision as _mod_revision
75
76
from bzrlib.trace import (
76
77
    mutter,
77
 
    note,
78
78
    warning,
79
79
    )
80
80
 
131
131
    # A map of index 'type' to the file extension and position in the
132
132
    # index_sizes array.
133
133
    index_definitions = {
134
 
        'chk': ('.cix', 4),
135
134
        'revision': ('.rix', 0),
136
135
        'inventory': ('.iix', 1),
137
136
        'text': ('.tix', 2),
139
138
        }
140
139
 
141
140
    def __init__(self, revision_index, inventory_index, text_index,
142
 
        signature_index, chk_index=None):
 
141
        signature_index):
143
142
        """Create a pack instance.
144
143
 
145
144
        :param revision_index: A GraphIndex for determining what revisions are
152
151
            texts/deltas (via (fileid, revisionid) tuples).
153
152
        :param signature_index: A GraphIndex for determining what signatures are
154
153
            present in the Pack and accessing the locations of their texts.
155
 
        :param chk_index: A GraphIndex for accessing content by CHK, if the
156
 
            pack has one.
157
154
        """
158
155
        self.revision_index = revision_index
159
156
        self.inventory_index = inventory_index
160
157
        self.text_index = text_index
161
158
        self.signature_index = signature_index
162
 
        self.chk_index = chk_index
163
159
 
164
160
    def access_tuple(self):
165
161
        """Return a tuple (transport, name) for the pack content."""
226
222
        return self.index_name('text', name)
227
223
 
228
224
    def _replace_index_with_readonly(self, index_type):
229
 
        unlimited_cache = False
230
 
        if index_type == 'chk':
231
 
            unlimited_cache = True
232
225
        setattr(self, index_type + '_index',
233
226
            self.index_class(self.index_transport,
234
227
                self.index_name(index_type, self.name),
235
 
                self.index_sizes[self.index_offset(index_type)],
236
 
                unlimited_cache=unlimited_cache))
 
228
                self.index_sizes[self.index_offset(index_type)]))
237
229
 
238
230
 
239
231
class ExistingPack(Pack):
240
232
    """An in memory proxy for an existing .pack and its disk indices."""
241
233
 
242
234
    def __init__(self, pack_transport, name, revision_index, inventory_index,
243
 
        text_index, signature_index, chk_index=None):
 
235
        text_index, signature_index):
244
236
        """Create an ExistingPack object.
245
237
 
246
238
        :param pack_transport: The transport where the pack file resides.
247
239
        :param name: The name of the pack on disk in the pack_transport.
248
240
        """
249
241
        Pack.__init__(self, revision_index, inventory_index, text_index,
250
 
            signature_index, chk_index)
 
242
            signature_index)
251
243
        self.name = name
252
244
        self.pack_transport = pack_transport
253
245
        if None in (revision_index, inventory_index, text_index,
270
262
 
271
263
    def __init__(self, name, revision_index, inventory_index, text_index,
272
264
        signature_index, upload_transport, pack_transport, index_transport,
273
 
        pack_collection, chk_index=None):
 
265
        pack_collection):
274
266
        """Create a ResumedPack object."""
275
267
        ExistingPack.__init__(self, pack_transport, name, revision_index,
276
 
            inventory_index, text_index, signature_index,
277
 
            chk_index=chk_index)
 
268
            inventory_index, text_index, signature_index)
278
269
        self.upload_transport = upload_transport
279
270
        self.index_transport = index_transport
280
271
        self.index_sizes = [None, None, None, None]
284
275
            ('text', text_index),
285
276
            ('signature', signature_index),
286
277
            ]
287
 
        if chk_index is not None:
288
 
            indices.append(('chk', chk_index))
289
 
            self.index_sizes.append(None)
290
278
        for index_type, index in indices:
291
279
            offset = self.index_offset(index_type)
292
280
            self.index_sizes[offset] = index._size
307
295
        self.upload_transport.delete(self.file_name())
308
296
        indices = [self.revision_index, self.inventory_index, self.text_index,
309
297
            self.signature_index]
310
 
        if self.chk_index is not None:
311
 
            indices.append(self.chk_index)
312
298
        for index in indices:
313
299
            index._transport.delete(index._name)
314
300
 
315
301
    def finish(self):
316
302
        self._check_references()
317
 
        index_types = ['revision', 'inventory', 'text', 'signature']
318
 
        if self.chk_index is not None:
319
 
            index_types.append('chk')
320
 
        for index_type in index_types:
 
303
        new_name = '../packs/' + self.file_name()
 
304
        self.upload_transport.rename(self.file_name(), new_name)
 
305
        for index_type in ['revision', 'inventory', 'text', 'signature']:
321
306
            old_name = self.index_name(index_type, self.name)
322
307
            new_name = '../indices/' + old_name
323
308
            self.upload_transport.rename(old_name, new_name)
324
309
            self._replace_index_with_readonly(index_type)
325
 
        new_name = '../packs/' + self.file_name()
326
 
        self.upload_transport.rename(self.file_name(), new_name)
327
310
        self._state = 'finished'
328
311
 
329
312
    def _get_external_refs(self, index):
330
 
        """Return compression parents for this index that are not present.
331
 
 
332
 
        This returns any compression parents that are referenced by this index,
333
 
        which are not contained *in* this index. They may be present elsewhere.
334
 
        """
335
313
        return index.external_references(1)
336
314
 
337
315
 
349
327
        # The relative locations of the packs are constrained, but all are
350
328
        # passed in because the caller has them, so as to avoid object churn.
351
329
        index_builder_class = pack_collection._index_builder_class
352
 
        if pack_collection.chk_index is not None:
353
 
            chk_index = index_builder_class(reference_lists=0)
354
 
        else:
355
 
            chk_index = None
356
330
        Pack.__init__(self,
357
331
            # Revisions: parents list, no text compression.
358
332
            index_builder_class(reference_lists=1),
367
341
            # Signatures: Just blobs to store, no compression, no parents
368
342
            # listing.
369
343
            index_builder_class(reference_lists=0),
370
 
            # CHK based storage - just blobs, no compression or parents.
371
 
            chk_index=chk_index
372
344
            )
373
345
        self._pack_collection = pack_collection
374
346
        # When we make readonly indices, we need this.
383
355
        self._file_mode = file_mode
384
356
        # tracks the content written to the .pack file.
385
357
        self._hash = osutils.md5()
386
 
        # a tuple with the length in bytes of the indices, once the pack
387
 
        # is finalised. (rev, inv, text, sigs, chk_if_in_use)
 
358
        # a four-tuple with the length in bytes of the indices, once the pack
 
359
        # is finalised. (rev, inv, text, sigs)
388
360
        self.index_sizes = None
389
361
        # How much data to cache when writing packs. Note that this is not
390
362
        # synchronised with reads, because it's not in the transport layer, so
428
400
        self._writer.begin()
429
401
        # what state is the pack in? (open, finished, aborted)
430
402
        self._state = 'open'
431
 
        # no name until we finish writing the content
432
 
        self.name = None
433
403
 
434
404
    def abort(self):
435
405
        """Cancel creating this pack."""
453
423
        return bool(self.get_revision_count() or
454
424
            self.inventory_index.key_count() or
455
425
            self.text_index.key_count() or
456
 
            self.signature_index.key_count() or
457
 
            (self.chk_index is not None and self.chk_index.key_count()))
458
 
 
459
 
    def finish_content(self):
460
 
        if self.name is not None:
461
 
            return
462
 
        self._writer.end()
463
 
        if self._buffer[1]:
464
 
            self._write_data('', flush=True)
465
 
        self.name = self._hash.hexdigest()
 
426
            self.signature_index.key_count())
466
427
 
467
428
    def finish(self, suspend=False):
468
429
        """Finish the new pack.
475
436
         - stores the index size tuple for the pack in the index_sizes
476
437
           attribute.
477
438
        """
478
 
        self.finish_content()
 
439
        self._writer.end()
 
440
        if self._buffer[1]:
 
441
            self._write_data('', flush=True)
 
442
        self.name = self._hash.hexdigest()
479
443
        if not suspend:
480
444
            self._check_references()
481
445
        # write indices
490
454
        self._write_index('text', self.text_index, 'file texts', suspend)
491
455
        self._write_index('signature', self.signature_index,
492
456
            'revision signatures', suspend)
493
 
        if self.chk_index is not None:
494
 
            self.index_sizes.append(None)
495
 
            self._write_index('chk', self.chk_index,
496
 
                'content hash bytes', suspend)
497
457
        self.write_stream.close()
498
458
        # Note that this will clobber an existing pack with the same name,
499
459
        # without checking for hash collisions. While this is undesirable this
572
532
    # XXX: Probably 'can be written to' could/should be separated from 'acts
573
533
    # like a knit index' -- mbp 20071024
574
534
 
575
 
    def __init__(self, reload_func=None, flush_func=None):
 
535
    def __init__(self, reload_func=None):
576
536
        """Create an AggregateIndex.
577
537
 
578
538
        :param reload_func: A function to call if we find we are missing an
583
543
        self.index_to_pack = {}
584
544
        self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
585
545
        self.data_access = _DirectPackAccess(self.index_to_pack,
586
 
                                             reload_func=reload_func,
587
 
                                             flush_func=flush_func)
 
546
                                             reload_func=reload_func)
 
547
        self.add_callback = None
 
548
 
 
549
    def replace_indices(self, index_to_pack, indices):
 
550
        """Replace the current mappings with fresh ones.
 
551
 
 
552
        This should probably not be used eventually, rather incremental add and
 
553
        removal of indices. It has been added during refactoring of existing
 
554
        code.
 
555
 
 
556
        :param index_to_pack: A mapping from index objects to
 
557
            (transport, name) tuples for the pack file data.
 
558
        :param indices: A list of indices.
 
559
        """
 
560
        # refresh the revision pack map dict without replacing the instance.
 
561
        self.index_to_pack.clear()
 
562
        self.index_to_pack.update(index_to_pack)
 
563
        # XXX: API break - clearly a 'replace' method would be good?
 
564
        self.combined_index._indices[:] = indices
 
565
        # the current add nodes callback for the current writable index if
 
566
        # there is one.
588
567
        self.add_callback = None
589
568
 
590
569
    def add_index(self, index, pack):
599
578
        # expose it to the index map
600
579
        self.index_to_pack[index] = pack.access_tuple()
601
580
        # put it at the front of the linear index list
602
 
        self.combined_index.insert_index(0, index, pack.name)
 
581
        self.combined_index.insert_index(0, index)
603
582
 
604
583
    def add_writable_index(self, index, pack):
605
584
        """Add an index which is able to have data added to it.
625
604
        self.data_access.set_writer(None, None, (None, None))
626
605
        self.index_to_pack.clear()
627
606
        del self.combined_index._indices[:]
628
 
        del self.combined_index._index_names[:]
629
607
        self.add_callback = None
630
608
 
631
 
    def remove_index(self, index):
 
609
    def remove_index(self, index, pack):
632
610
        """Remove index from the indices used to answer queries.
633
611
 
634
612
        :param index: An index from the pack parameter.
 
613
        :param pack: A Pack instance.
635
614
        """
636
615
        del self.index_to_pack[index]
637
 
        pos = self.combined_index._indices.index(index)
638
 
        del self.combined_index._indices[pos]
639
 
        del self.combined_index._index_names[pos]
 
616
        self.combined_index._indices.remove(index)
640
617
        if (self.add_callback is not None and
641
618
            getattr(index, 'add_nodes', None) == self.add_callback):
642
619
            self.add_callback = None
748
725
 
749
726
    def open_pack(self):
750
727
        """Open a pack for the pack we are creating."""
751
 
        new_pack = self._pack_collection.pack_factory(self._pack_collection,
752
 
                upload_suffix=self.suffix,
 
728
        return NewPack(self._pack_collection, upload_suffix=self.suffix,
753
729
                file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
754
 
        # We know that we will process all nodes in order, and don't need to
755
 
        # query, so don't combine any indices spilled to disk until we are done
756
 
        new_pack.revision_index.set_optimize(combine_backing_indices=False)
757
 
        new_pack.inventory_index.set_optimize(combine_backing_indices=False)
758
 
        new_pack.text_index.set_optimize(combine_backing_indices=False)
759
 
        new_pack.signature_index.set_optimize(combine_backing_indices=False)
760
 
        return new_pack
761
730
 
762
731
    def _update_pack_order(self, entries, index_to_pack_map):
763
732
        """Determine how we want our packs to be ordered.
920
889
                time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
921
890
                new_pack.signature_index.key_count(),
922
891
                time.time() - new_pack.start_time)
923
 
        # copy chk contents
924
 
        # NB XXX: how to check CHK references are present? perhaps by yielding
925
 
        # the items? How should that interact with stacked repos?
926
 
        if new_pack.chk_index is not None:
927
 
            self._copy_chks()
928
 
            if 'pack' in debug.debug_flags:
929
 
                mutter('%s: create_pack: chk content copied: %s%s %d items t+%6.3fs',
930
 
                    time.ctime(), self._pack_collection._upload_transport.base,
931
 
                    new_pack.random_name,
932
 
                    new_pack.chk_index.key_count(),
933
 
                    time.time() - new_pack.start_time)
934
892
        new_pack._check_references()
935
893
        if not self._use_pack(new_pack):
936
894
            new_pack.abort()
940
898
        self._pack_collection.allocate(new_pack)
941
899
        return new_pack
942
900
 
943
 
    def _copy_chks(self, refs=None):
944
 
        # XXX: Todo, recursive follow-pointers facility when fetching some
945
 
        # revisions only.
946
 
        chk_index_map, chk_indices = self._pack_map_and_index_list(
947
 
            'chk_index')
948
 
        chk_nodes = self._index_contents(chk_indices, refs)
949
 
        new_refs = set()
950
 
        # TODO: This isn't strictly tasteful as we are accessing some private
951
 
        #       variables (_serializer). Perhaps a better way would be to have
952
 
        #       Repository._deserialise_chk_node()
953
 
        search_key_func = chk_map.search_key_registry.get(
954
 
            self._pack_collection.repo._serializer.search_key_name)
955
 
        def accumlate_refs(lines):
956
 
            # XXX: move to a generic location
957
 
            # Yay mismatch:
958
 
            bytes = ''.join(lines)
959
 
            node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
960
 
            new_refs.update(node.refs())
961
 
        self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
962
 
            self.new_pack.chk_index, output_lines=accumlate_refs)
963
 
        return new_refs
964
 
 
965
 
    def _copy_nodes(self, nodes, index_map, writer, write_index,
966
 
        output_lines=None):
967
 
        """Copy knit nodes between packs with no graph references.
968
 
 
969
 
        :param output_lines: Output full texts of copied items.
970
 
        """
 
901
    def _copy_nodes(self, nodes, index_map, writer, write_index):
 
902
        """Copy knit nodes between packs with no graph references."""
971
903
        pb = ui.ui_factory.nested_progress_bar()
972
904
        try:
973
905
            return self._do_copy_nodes(nodes, index_map, writer,
974
 
                write_index, pb, output_lines=output_lines)
 
906
                write_index, pb)
975
907
        finally:
976
908
            pb.finished()
977
909
 
978
 
    def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
979
 
        output_lines=None):
 
910
    def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
980
911
        # for record verification
981
912
        knit = KnitVersionedFiles(None, None)
982
913
        # plan a readv on each source pack:
1016
947
                izip(reader.iter_records(), pack_readv_requests):
1017
948
                raw_data = read_func(None)
1018
949
                # check the header only
1019
 
                if output_lines is not None:
1020
 
                    output_lines(knit._parse_record(key[-1], raw_data)[0])
1021
 
                else:
1022
 
                    df, _ = knit._parse_record_header(key, raw_data)
1023
 
                    df.close()
 
950
                df, _ = knit._parse_record_header(key, raw_data)
 
951
                df.close()
1024
952
                pos, size = writer.add_bytes_record(raw_data, names)
1025
953
                write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
1026
954
                pb.update("Copied record", record_index)
1100
1028
            iterator is a tuple with:
1101
1029
            index, readv_vector, node_vector. readv_vector is a list ready to
1102
1030
            hand to the transport readv method, and node_vector is a list of
1103
 
            (key, eol_flag, references) for the node retrieved by the
 
1031
            (key, eol_flag, references) for the the node retrieved by the
1104
1032
            matching readv_vector.
1105
1033
        """
1106
1034
        # group by pack so we do one readv per pack
1297
1225
        # space (we only topo sort the revisions, which is smaller).
1298
1226
        topo_order = tsort.topo_sort(ancestors)
1299
1227
        rev_order = dict(zip(topo_order, range(len(topo_order))))
1300
 
        bad_texts.sort(key=lambda key:rev_order.get(key[0][1], 0))
 
1228
        bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1301
1229
        transaction = repo.get_transaction()
1302
1230
        file_id_index = GraphIndexPrefixAdapter(
1303
1231
            self.new_pack.text_index,
1356
1284
    :ivar _names: map of {pack_name: (index_size,)}
1357
1285
    """
1358
1286
 
1359
 
    pack_factory = NewPack
1360
 
    resumed_pack_factory = ResumedPack
1361
 
 
1362
1287
    def __init__(self, repo, transport, index_transport, upload_transport,
1363
 
                 pack_transport, index_builder_class, index_class,
1364
 
                 use_chk_index):
 
1288
                 pack_transport, index_builder_class, index_class):
1365
1289
        """Create a new RepositoryPackCollection.
1366
1290
 
1367
1291
        :param transport: Addresses the repository base directory
1372
1296
        :param pack_transport: Addresses the directory of existing complete packs.
1373
1297
        :param index_builder_class: The index builder class to use.
1374
1298
        :param index_class: The index class to use.
1375
 
        :param use_chk_index: Whether to setup and manage a CHK index.
1376
1299
        """
1377
 
        # XXX: This should call self.reset()
1378
1300
        self.repo = repo
1379
1301
        self.transport = transport
1380
1302
        self._index_transport = index_transport
1382
1304
        self._pack_transport = pack_transport
1383
1305
        self._index_builder_class = index_builder_class
1384
1306
        self._index_class = index_class
1385
 
        self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1386
 
            '.cix': 4}
 
1307
        self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1387
1308
        self.packs = []
1388
1309
        # name:Pack mapping
1389
 
        self._names = None
1390
1310
        self._packs_by_name = {}
1391
1311
        # the previous pack-names content
1392
1312
        self._packs_at_load = None
1393
1313
        # when a pack is being created by this object, the state of that pack.
1394
1314
        self._new_pack = None
1395
1315
        # aggregated revision index data
1396
 
        flush = self._flush_new_pack
1397
 
        self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1398
 
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1399
 
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1400
 
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1401
 
        all_indices = [self.revision_index, self.inventory_index,
1402
 
                self.text_index, self.signature_index]
1403
 
        if use_chk_index:
1404
 
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1405
 
            all_indices.append(self.chk_index)
1406
 
        else:
1407
 
            # used to determine if we're using a chk_index elsewhere.
1408
 
            self.chk_index = None
1409
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1410
 
        # share hints about which pack names to search first.
1411
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1412
 
        for combined_idx in all_combined:
1413
 
            combined_idx.set_sibling_indices(
1414
 
                set(all_combined).difference([combined_idx]))
 
1316
        self.revision_index = AggregateIndex(self.reload_pack_names)
 
1317
        self.inventory_index = AggregateIndex(self.reload_pack_names)
 
1318
        self.text_index = AggregateIndex(self.reload_pack_names)
 
1319
        self.signature_index = AggregateIndex(self.reload_pack_names)
1415
1320
        # resumed packs
1416
1321
        self._resumed_packs = []
1417
1322
 
1418
 
    def __repr__(self):
1419
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1420
 
 
1421
1323
    def add_pack_to_memory(self, pack):
1422
1324
        """Make a Pack object available to the repository to satisfy queries.
1423
1325
 
1432
1334
        self.inventory_index.add_index(pack.inventory_index, pack)
1433
1335
        self.text_index.add_index(pack.text_index, pack)
1434
1336
        self.signature_index.add_index(pack.signature_index, pack)
1435
 
        if self.chk_index is not None:
1436
 
            self.chk_index.add_index(pack.chk_index, pack)
1437
1337
 
1438
1338
    def all_packs(self):
1439
1339
        """Return a list of all the Pack objects this repository has.
1461
1361
        in synchronisation with certain steps. Otherwise the names collection
1462
1362
        is not flushed.
1463
1363
 
1464
 
        :return: Something evaluating true if packing took place.
 
1364
        :return: True if packing took place.
1465
1365
        """
1466
1366
        while True:
1467
1367
            try:
1468
1368
                return self._do_autopack()
1469
 
            except errors.RetryAutopack:
 
1369
            except errors.RetryAutopack, e:
1470
1370
                # If we get a RetryAutopack exception, we should abort the
1471
1371
                # current action, and retry.
1472
1372
                pass
1476
1376
        total_revisions = self.revision_index.combined_index.key_count()
1477
1377
        total_packs = len(self._names)
1478
1378
        if self._max_pack_count(total_revisions) >= total_packs:
1479
 
            return None
 
1379
            return False
 
1380
        # XXX: the following may want to be a class, to pack with a given
 
1381
        # policy.
1480
1382
        # determine which packs need changing
1481
1383
        pack_distribution = self.pack_distribution(total_revisions)
1482
1384
        existing_packs = []
1504
1406
            'containing %d revisions. Packing %d files into %d affecting %d'
1505
1407
            ' revisions', self, total_packs, total_revisions, num_old_packs,
1506
1408
            num_new_packs, num_revs_affected)
1507
 
        result = self._execute_pack_operations(pack_operations,
 
1409
        self._execute_pack_operations(pack_operations,
1508
1410
                                      reload_func=self._restart_autopack)
1509
 
        mutter('Auto-packing repository %s completed', self)
1510
 
        return result
 
1411
        return True
1511
1412
 
1512
1413
    def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1513
1414
                                 reload_func=None):
1515
1416
 
1516
1417
        :param pack_operations: A list of [revision_count, packs_to_combine].
1517
1418
        :param _packer_class: The class of packer to use (default: Packer).
1518
 
        :return: The new pack names.
 
1419
        :return: None.
1519
1420
        """
1520
1421
        for revision_count, packs in pack_operations:
1521
1422
            # we may have no-ops from the setup logic
1537
1438
                self._remove_pack_from_memory(pack)
1538
1439
        # record the newly available packs and stop advertising the old
1539
1440
        # packs
1540
 
        to_be_obsoleted = []
1541
 
        for _, packs in pack_operations:
1542
 
            to_be_obsoleted.extend(packs)
1543
 
        result = self._save_pack_names(clear_obsolete_packs=True,
1544
 
                                       obsolete_packs=to_be_obsoleted)
1545
 
        return result
1546
 
 
1547
 
    def _flush_new_pack(self):
1548
 
        if self._new_pack is not None:
1549
 
            self._new_pack.flush()
 
1441
        self._save_pack_names(clear_obsolete_packs=True)
 
1442
        # Move the old packs out of the way now they are no longer referenced.
 
1443
        for revision_count, packs in pack_operations:
 
1444
            self._obsolete_packs(packs)
1550
1445
 
1551
1446
    def lock_names(self):
1552
1447
        """Acquire the mutex around the pack-names index.
1556
1451
        """
1557
1452
        self.repo.control_files.lock_write()
1558
1453
 
1559
 
    def _already_packed(self):
1560
 
        """Is the collection already packed?"""
1561
 
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1562
 
 
1563
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1454
    def pack(self):
1564
1455
        """Pack the pack collection totally."""
1565
1456
        self.ensure_loaded()
1566
1457
        total_packs = len(self._names)
1567
 
        if self._already_packed():
 
1458
        if total_packs < 2:
 
1459
            # This is arguably wrong because we might not be optimal, but for
 
1460
            # now lets leave it in. (e.g. reconcile -> one pack. But not
 
1461
            # optimal.
1568
1462
            return
1569
1463
        total_revisions = self.revision_index.combined_index.key_count()
1570
1464
        # XXX: the following may want to be a class, to pack with a given
1571
1465
        # policy.
1572
1466
        mutter('Packing repository %s, which has %d pack files, '
1573
 
            'containing %d revisions with hint %r.', self, total_packs,
1574
 
            total_revisions, hint)
 
1467
            'containing %d revisions into 1 packs.', self, total_packs,
 
1468
            total_revisions)
1575
1469
        # determine which packs need changing
 
1470
        pack_distribution = [1]
1576
1471
        pack_operations = [[0, []]]
1577
1472
        for pack in self.all_packs():
1578
 
            if hint is None or pack.name in hint:
1579
 
                # Either no hint was provided (so we are packing everything),
1580
 
                # or this pack was included in the hint.
1581
 
                pack_operations[-1][0] += pack.get_revision_count()
1582
 
                pack_operations[-1][1].append(pack)
 
1473
            pack_operations[-1][0] += pack.get_revision_count()
 
1474
            pack_operations[-1][1].append(pack)
1583
1475
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1584
1476
 
1585
 
        if clean_obsolete_packs:
1586
 
            self._clear_obsolete_packs()
1587
 
 
1588
1477
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1478
        """Plan a pack operation.
1590
1479
 
1638
1527
        return [[final_rev_count, final_pack_list]]
1639
1528
 
1640
1529
    def ensure_loaded(self):
1641
 
        """Ensure we have read names from disk.
1642
 
 
1643
 
        :return: True if the disk names had not been previously read.
1644
 
        """
1645
1530
        # NB: if you see an assertion error here, its probably access against
1646
1531
        # an unlocked repo. Naughty.
1647
1532
        if not self.repo.is_locked():
1653
1538
                name = key[0]
1654
1539
                self._names[name] = self._parse_index_sizes(value)
1655
1540
                self._packs_at_load.add((key, value))
1656
 
            result = True
1657
 
        else:
1658
 
            result = False
1659
1541
        # populate all the metadata.
1660
1542
        self.all_packs()
1661
 
        return result
1662
1543
 
1663
1544
    def _parse_index_sizes(self, value):
1664
1545
        """Parse a string of index sizes."""
1677
1558
            inv_index = self._make_index(name, '.iix')
1678
1559
            txt_index = self._make_index(name, '.tix')
1679
1560
            sig_index = self._make_index(name, '.six')
1680
 
            if self.chk_index is not None:
1681
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1682
 
            else:
1683
 
                chk_index = None
1684
1561
            result = ExistingPack(self._pack_transport, name, rev_index,
1685
 
                inv_index, txt_index, sig_index, chk_index)
 
1562
                inv_index, txt_index, sig_index)
1686
1563
            self.add_pack_to_memory(result)
1687
1564
            return result
1688
1565
 
1702
1579
            inv_index = self._make_index(name, '.iix', resume=True)
1703
1580
            txt_index = self._make_index(name, '.tix', resume=True)
1704
1581
            sig_index = self._make_index(name, '.six', resume=True)
1705
 
            if self.chk_index is not None:
1706
 
                chk_index = self._make_index(name, '.cix', resume=True,
1707
 
                                             unlimited_cache=True)
1708
 
            else:
1709
 
                chk_index = None
1710
 
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1711
 
                txt_index, sig_index, self._upload_transport,
1712
 
                self._pack_transport, self._index_transport, self,
1713
 
                chk_index=chk_index)
 
1582
            result = ResumedPack(name, rev_index, inv_index, txt_index,
 
1583
                sig_index, self._upload_transport, self._pack_transport,
 
1584
                self._index_transport, self)
1714
1585
        except errors.NoSuchFile, e:
1715
1586
            raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1716
1587
        self.add_pack_to_memory(result)
1740
1611
        return self._index_class(self.transport, 'pack-names', None
1741
1612
                ).iter_all_entries()
1742
1613
 
1743
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1614
    def _make_index(self, name, suffix, resume=False):
1744
1615
        size_offset = self._suffix_offsets[suffix]
1745
1616
        index_name = name + suffix
1746
1617
        if resume:
1749
1620
        else:
1750
1621
            transport = self._index_transport
1751
1622
            index_size = self._names[name][size_offset]
1752
 
        return self._index_class(transport, index_name, index_size,
1753
 
                                 unlimited_cache=unlimited_cache)
 
1623
        return self._index_class(transport, index_name, index_size)
1754
1624
 
1755
1625
    def _max_pack_count(self, total_revisions):
1756
1626
        """Return the maximum number of packs to use for total revisions.
1784
1654
        :param return: None.
1785
1655
        """
1786
1656
        for pack in packs:
1787
 
            try:
1788
 
                pack.pack_transport.rename(pack.file_name(),
1789
 
                    '../obsolete_packs/' + pack.file_name())
1790
 
            except (errors.PathError, errors.TransportError), e:
1791
 
                # TODO: Should these be warnings or mutters?
1792
 
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
1793
 
                       % (e,))
 
1657
            pack.pack_transport.rename(pack.file_name(),
 
1658
                '../obsolete_packs/' + pack.file_name())
1794
1659
            # TODO: Probably needs to know all possible indices for this pack
1795
1660
            # - or maybe list the directory and move all indices matching this
1796
1661
            # name whether we recognize it or not?
1797
 
            suffixes = ['.iix', '.six', '.tix', '.rix']
1798
 
            if self.chk_index is not None:
1799
 
                suffixes.append('.cix')
1800
 
            for suffix in suffixes:
1801
 
                try:
1802
 
                    self._index_transport.rename(pack.name + suffix,
1803
 
                        '../obsolete_packs/' + pack.name + suffix)
1804
 
                except (errors.PathError, errors.TransportError), e:
1805
 
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
1806
 
                           % (e,))
 
1662
            for suffix in ('.iix', '.six', '.tix', '.rix'):
 
1663
                self._index_transport.rename(pack.name + suffix,
 
1664
                    '../obsolete_packs/' + pack.name + suffix)
1807
1665
 
1808
1666
    def pack_distribution(self, total_revisions):
1809
1667
        """Generate a list of the number of revisions to put in each pack.
1835
1693
        self._remove_pack_indices(pack)
1836
1694
        self.packs.remove(pack)
1837
1695
 
1838
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1839
 
        """Remove the indices for pack from the aggregated indices.
1840
 
        
1841
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1842
 
        """
1843
 
        for index_type in Pack.index_definitions.keys():
1844
 
            attr_name = index_type + '_index'
1845
 
            aggregate_index = getattr(self, attr_name)
1846
 
            if aggregate_index is not None:
1847
 
                pack_index = getattr(pack, attr_name)
1848
 
                try:
1849
 
                    aggregate_index.remove_index(pack_index)
1850
 
                except KeyError:
1851
 
                    if ignore_missing:
1852
 
                        continue
1853
 
                    raise
 
1696
    def _remove_pack_indices(self, pack):
 
1697
        """Remove the indices for pack from the aggregated indices."""
 
1698
        self.revision_index.remove_index(pack.revision_index, pack)
 
1699
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1700
        self.text_index.remove_index(pack.text_index, pack)
 
1701
        self.signature_index.remove_index(pack.signature_index, pack)
1854
1702
 
1855
1703
    def reset(self):
1856
1704
        """Clear all cached data."""
1857
1705
        # cached revision data
 
1706
        self.repo._revision_knit = None
1858
1707
        self.revision_index.clear()
1859
1708
        # cached signature data
 
1709
        self.repo._signature_knit = None
1860
1710
        self.signature_index.clear()
1861
1711
        # cached file text data
1862
1712
        self.text_index.clear()
 
1713
        self.repo._text_knit = None
1863
1714
        # cached inventory data
1864
1715
        self.inventory_index.clear()
1865
 
        # cached chk data
1866
 
        if self.chk_index is not None:
1867
 
            self.chk_index.clear()
1868
1716
        # remove the open pack
1869
1717
        self._new_pack = None
1870
1718
        # information about packs.
1889
1737
        disk_nodes = set()
1890
1738
        for index, key, value in self._iter_disk_pack_index():
1891
1739
            disk_nodes.add((key, value))
1892
 
        orig_disk_nodes = set(disk_nodes)
1893
1740
 
1894
1741
        # do a two-way diff against our original content
1895
1742
        current_nodes = set()
1908
1755
        disk_nodes.difference_update(deleted_nodes)
1909
1756
        disk_nodes.update(new_nodes)
1910
1757
 
1911
 
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
 
1758
        return disk_nodes, deleted_nodes, new_nodes
1912
1759
 
1913
1760
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1914
1761
        """Given the correct set of pack files, update our saved info.
1954
1801
                added.append(name)
1955
1802
        return removed, added, modified
1956
1803
 
1957
 
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
 
1804
    def _save_pack_names(self, clear_obsolete_packs=False):
1958
1805
        """Save the list of packs.
1959
1806
 
1960
1807
        This will take out the mutex around the pack names list for the
1964
1811
 
1965
1812
        :param clear_obsolete_packs: If True, clear out the contents of the
1966
1813
            obsolete_packs directory.
1967
 
        :param obsolete_packs: Packs that are obsolete once the new pack-names
1968
 
            file has been written.
1969
 
        :return: A list of the names saved that were not previously on disk.
1970
1814
        """
1971
 
        already_obsolete = []
1972
1815
        self.lock_names()
1973
1816
        try:
1974
1817
            builder = self._index_builder_class()
1975
 
            (disk_nodes, deleted_nodes, new_nodes,
1976
 
             orig_disk_nodes) = self._diff_pack_names()
 
1818
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1977
1819
            # TODO: handle same-name, index-size-changes here -
1978
1820
            # e.g. use the value from disk, not ours, *unless* we're the one
1979
1821
            # changing it.
1981
1823
                builder.add_node(key, value)
1982
1824
            self.transport.put_file('pack-names', builder.finish(),
1983
1825
                mode=self.repo.bzrdir._get_file_mode())
 
1826
            # move the baseline forward
1984
1827
            self._packs_at_load = disk_nodes
1985
1828
            if clear_obsolete_packs:
1986
 
                to_preserve = None
1987
 
                if obsolete_packs:
1988
 
                    to_preserve = set([o.name for o in obsolete_packs])
1989
 
                already_obsolete = self._clear_obsolete_packs(to_preserve)
 
1829
                self._clear_obsolete_packs()
1990
1830
        finally:
1991
1831
            self._unlock_names()
1992
1832
        # synchronise the memory packs list with what we just wrote:
1993
1833
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1994
 
        if obsolete_packs:
1995
 
            # TODO: We could add one more condition here. "if o.name not in
1996
 
            #       orig_disk_nodes and o != the new_pack we haven't written to
1997
 
            #       disk yet. However, the new pack object is not easily
1998
 
            #       accessible here (it would have to be passed through the
1999
 
            #       autopacking code, etc.)
2000
 
            obsolete_packs = [o for o in obsolete_packs
2001
 
                              if o.name not in already_obsolete]
2002
 
            self._obsolete_packs(obsolete_packs)
2003
 
        return [new_node[0][0] for new_node in new_nodes]
2004
1834
 
2005
1835
    def reload_pack_names(self):
2006
1836
        """Sync our pack listing with what is present in the repository.
2008
1838
        This should be called when we find out that something we thought was
2009
1839
        present is now missing. This happens when another process re-packs the
2010
1840
        repository, etc.
2011
 
 
2012
 
        :return: True if the in-memory list of packs has been altered at all.
2013
1841
        """
2014
 
        # The ensure_loaded call is to handle the case where the first call
2015
 
        # made involving the collection was to reload_pack_names, where we 
2016
 
        # don't have a view of disk contents. Its a bit of a bandaid, and
2017
 
        # causes two reads of pack-names, but its a rare corner case not struck
2018
 
        # with regular push/pull etc.
2019
 
        first_read = self.ensure_loaded()
2020
 
        if first_read:
2021
 
            return True
 
1842
        # This is functionally similar to _save_pack_names, but we don't write
2022
1843
        # out the new value.
2023
 
        (disk_nodes, deleted_nodes, new_nodes,
2024
 
         orig_disk_nodes) = self._diff_pack_names()
2025
 
        # _packs_at_load is meant to be the explicit list of names in
2026
 
        # 'pack-names' at then start. As such, it should not contain any
2027
 
        # pending names that haven't been written out yet.
2028
 
        self._packs_at_load = orig_disk_nodes
 
1844
        disk_nodes, _, _ = self._diff_pack_names()
 
1845
        self._packs_at_load = disk_nodes
2029
1846
        (removed, added,
2030
1847
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2031
1848
        if removed or added or modified:
2040
1857
            raise
2041
1858
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2042
1859
 
2043
 
    def _clear_obsolete_packs(self, preserve=None):
 
1860
    def _clear_obsolete_packs(self):
2044
1861
        """Delete everything from the obsolete-packs directory.
2045
 
 
2046
 
        :return: A list of pack identifiers (the filename without '.pack') that
2047
 
            were found in obsolete_packs.
2048
1862
        """
2049
 
        found = []
2050
1863
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
2051
 
        if preserve is None:
2052
 
            preserve = set()
2053
1864
        for filename in obsolete_pack_transport.list_dir('.'):
2054
 
            name, ext = osutils.splitext(filename)
2055
 
            if ext == '.pack':
2056
 
                found.append(name)
2057
 
            if name in preserve:
2058
 
                continue
2059
1865
            try:
2060
1866
                obsolete_pack_transport.delete(filename)
2061
1867
            except (errors.PathError, errors.TransportError), e:
2062
 
                warning("couldn't delete obsolete pack, skipping it:\n%s"
2063
 
                        % (e,))
2064
 
        return found
 
1868
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2065
1869
 
2066
1870
    def _start_write_group(self):
2067
1871
        # Do not permit preparation for writing if we're not in a 'write lock'.
2068
1872
        if not self.repo.is_write_locked():
2069
1873
            raise errors.NotWriteLocked(self)
2070
 
        self._new_pack = self.pack_factory(self, upload_suffix='.pack',
 
1874
        self._new_pack = NewPack(self, upload_suffix='.pack',
2071
1875
            file_mode=self.repo.bzrdir._get_file_mode())
2072
1876
        # allow writing: queue writes to a new index
2073
1877
        self.revision_index.add_writable_index(self._new_pack.revision_index,
2076
1880
            self._new_pack)
2077
1881
        self.text_index.add_writable_index(self._new_pack.text_index,
2078
1882
            self._new_pack)
2079
 
        self._new_pack.text_index.set_optimize(combine_backing_indices=False)
2080
1883
        self.signature_index.add_writable_index(self._new_pack.signature_index,
2081
1884
            self._new_pack)
2082
 
        if self.chk_index is not None:
2083
 
            self.chk_index.add_writable_index(self._new_pack.chk_index,
2084
 
                self._new_pack)
2085
 
            self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
2086
 
            self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
2087
1885
 
2088
1886
        self.repo.inventories._index._add_callback = self.inventory_index.add_callback
2089
1887
        self.repo.revisions._index._add_callback = self.revision_index.add_callback
2094
1892
        # FIXME: just drop the transient index.
2095
1893
        # forget what names there are
2096
1894
        if self._new_pack is not None:
2097
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2098
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2099
 
            # If we aborted while in the middle of finishing the write
2100
 
            # group, _remove_pack_indices could fail because the indexes are
2101
 
            # already gone.  But they're not there we shouldn't fail in this
2102
 
            # case, so we pass ignore_missing=True.
2103
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2104
 
                ignore_missing=True)
2105
 
            operation.run_simple()
 
1895
            try:
 
1896
                self._new_pack.abort()
 
1897
            finally:
 
1898
                # XXX: If we aborted while in the middle of finishing the write
 
1899
                # group, _remove_pack_indices can fail because the indexes are
 
1900
                # already gone.  If they're not there we shouldn't fail in this
 
1901
                # case.  -- mbp 20081113
 
1902
                self._remove_pack_indices(self._new_pack)
 
1903
                self._new_pack = None
2106
1904
        for resumed_pack in self._resumed_packs:
2107
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2108
 
            # See comment in previous finally block.
2109
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2110
 
                ignore_missing=True)
2111
 
            operation.run_simple()
 
1905
            try:
 
1906
                resumed_pack.abort()
 
1907
            finally:
 
1908
                # See comment in previous finally block.
 
1909
                try:
 
1910
                    self._remove_pack_indices(resumed_pack)
 
1911
                except KeyError:
 
1912
                    pass
2112
1913
        del self._resumed_packs[:]
 
1914
        self.repo._text_knit = None
2113
1915
 
2114
1916
    def _remove_resumed_pack_indices(self):
2115
1917
        for resumed_pack in self._resumed_packs:
2116
1918
            self._remove_pack_indices(resumed_pack)
2117
1919
        del self._resumed_packs[:]
2118
1920
 
2119
 
    def _check_new_inventories(self):
2120
 
        """Detect missing inventories in this write group.
2121
 
 
2122
 
        :returns: list of strs, summarising any problems found.  If the list is
2123
 
            empty no problems were found.
2124
 
        """
2125
 
        # The base implementation does no checks.  GCRepositoryPackCollection
2126
 
        # overrides this.
2127
 
        return []
2128
 
        
2129
1921
    def _commit_write_group(self):
2130
1922
        all_missing = set()
2131
1923
        for prefix, versioned_file in (
2140
1932
            raise errors.BzrCheckError(
2141
1933
                "Repository %s has missing compression parent(s) %r "
2142
1934
                 % (self.repo, sorted(all_missing)))
2143
 
        problems = self._check_new_inventories()
2144
 
        if problems:
2145
 
            problems_summary = '\n'.join(problems)
2146
 
            raise errors.BzrCheckError(
2147
 
                "Cannot add revision(s) to repository: " + problems_summary)
2148
1935
        self._remove_pack_indices(self._new_pack)
2149
 
        any_new_content = False
 
1936
        should_autopack = False
2150
1937
        if self._new_pack.data_inserted():
2151
1938
            # get all the data to disk and read to use
2152
1939
            self._new_pack.finish()
2153
1940
            self.allocate(self._new_pack)
2154
1941
            self._new_pack = None
2155
 
            any_new_content = True
 
1942
            should_autopack = True
2156
1943
        else:
2157
1944
            self._new_pack.abort()
2158
1945
            self._new_pack = None
2163
1950
            self._remove_pack_from_memory(resumed_pack)
2164
1951
            resumed_pack.finish()
2165
1952
            self.allocate(resumed_pack)
2166
 
            any_new_content = True
 
1953
            should_autopack = True
2167
1954
        del self._resumed_packs[:]
2168
 
        if any_new_content:
2169
 
            result = self.autopack()
2170
 
            if not result:
 
1955
        if should_autopack:
 
1956
            if not self.autopack():
2171
1957
                # when autopack takes no steps, the names list is still
2172
1958
                # unsaved.
2173
 
                return self._save_pack_names()
2174
 
            return result
2175
 
        return []
 
1959
                self._save_pack_names()
 
1960
        self.repo._text_knit = None
2176
1961
 
2177
1962
    def _suspend_write_group(self):
2178
1963
        tokens = [pack.name for pack in self._resumed_packs]
2186
1971
            self._new_pack.abort()
2187
1972
            self._new_pack = None
2188
1973
        self._remove_resumed_pack_indices()
 
1974
        self.repo._text_knit = None
2189
1975
        return tokens
2190
1976
 
2191
1977
    def _resume_write_group(self, tokens):
2228
2014
            self._transport.clone('upload'),
2229
2015
            self._transport.clone('packs'),
2230
2016
            _format.index_builder_class,
2231
 
            _format.index_class,
2232
 
            use_chk_index=self._format.supports_chks,
2233
 
            )
 
2017
            _format.index_class)
2234
2018
        self.inventories = KnitVersionedFiles(
2235
2019
            _KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
2236
2020
                add_callback=self._pack_collection.inventory_index.add_callback,
2240
2024
        self.revisions = KnitVersionedFiles(
2241
2025
            _KnitGraphIndex(self._pack_collection.revision_index.combined_index,
2242
2026
                add_callback=self._pack_collection.revision_index.add_callback,
2243
 
                deltas=False, parents=True, is_locked=self.is_locked,
2244
 
                track_external_parent_refs=True),
 
2027
                deltas=False, parents=True, is_locked=self.is_locked),
2245
2028
            data_access=self._pack_collection.revision_index.data_access,
2246
2029
            max_delta_chain=0)
2247
2030
        self.signatures = KnitVersionedFiles(
2256
2039
                deltas=True, parents=True, is_locked=self.is_locked),
2257
2040
            data_access=self._pack_collection.text_index.data_access,
2258
2041
            max_delta_chain=200)
2259
 
        if _format.supports_chks:
2260
 
            # No graph, no compression:- references from chks are between
2261
 
            # different objects not temporal versions of the same; and without
2262
 
            # some sort of temporal structure knit compression will just fail.
2263
 
            self.chk_bytes = KnitVersionedFiles(
2264
 
                _KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2265
 
                    add_callback=self._pack_collection.chk_index.add_callback,
2266
 
                    deltas=False, parents=False, is_locked=self.is_locked),
2267
 
                data_access=self._pack_collection.chk_index.data_access,
2268
 
                max_delta_chain=0)
2269
 
        else:
2270
 
            self.chk_bytes = None
2271
2042
        # True when the repository object is 'write locked' (as opposed to the
2272
2043
        # physical lock only taken out around changes to the pack-names list.)
2273
2044
        # Another way to represent this would be a decorator around the control
2280
2051
        self._reconcile_fixes_text_parents = True
2281
2052
        self._reconcile_backsup_inventory = False
2282
2053
 
2283
 
    def _warn_if_deprecated(self, branch=None):
 
2054
    def _warn_if_deprecated(self):
2284
2055
        # This class isn't deprecated, but one sub-format is
2285
2056
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2286
 
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
 
2057
            from bzrlib import repository
 
2058
            if repository._deprecation_warning_done:
 
2059
                return
 
2060
            repository._deprecation_warning_done = True
 
2061
            warning("Format %s for %s is deprecated - please use"
 
2062
                    " 'bzr upgrade --1.6.1-rich-root'"
 
2063
                    % (self._format, self.bzrdir.transport.base))
2287
2064
 
2288
2065
    def _abort_write_group(self):
2289
 
        self.revisions._index._key_dependencies.clear()
2290
2066
        self._pack_collection._abort_write_group()
2291
2067
 
2292
 
    def _get_source(self, to_format):
2293
 
        if to_format.network_name() == self._format.network_name():
2294
 
            return KnitPackStreamSource(self, to_format)
2295
 
        return super(KnitPackRepository, self)._get_source(to_format)
 
2068
    def _find_inconsistent_revision_parents(self):
 
2069
        """Find revisions with incorrectly cached parents.
 
2070
 
 
2071
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
2072
            parents-in-revision).
 
2073
        """
 
2074
        if not self.is_locked():
 
2075
            raise errors.ObjectNotLocked(self)
 
2076
        pb = ui.ui_factory.nested_progress_bar()
 
2077
        result = []
 
2078
        try:
 
2079
            revision_nodes = self._pack_collection.revision_index \
 
2080
                .combined_index.iter_all_entries()
 
2081
            index_positions = []
 
2082
            # Get the cached index values for all revisions, and also the location
 
2083
            # in each index of the revision text so we can perform linear IO.
 
2084
            for index, key, value, refs in revision_nodes:
 
2085
                pos, length = value[1:].split(' ')
 
2086
                index_positions.append((index, int(pos), key[0],
 
2087
                    tuple(parent[0] for parent in refs[0])))
 
2088
                pb.update("Reading revision index", 0, 0)
 
2089
            index_positions.sort()
 
2090
            batch_count = len(index_positions) / 1000 + 1
 
2091
            pb.update("Checking cached revision graph", 0, batch_count)
 
2092
            for offset in xrange(batch_count):
 
2093
                pb.update("Checking cached revision graph", offset)
 
2094
                to_query = index_positions[offset * 1000:(offset + 1) * 1000]
 
2095
                if not to_query:
 
2096
                    break
 
2097
                rev_ids = [item[2] for item in to_query]
 
2098
                revs = self.get_revisions(rev_ids)
 
2099
                for revision, item in zip(revs, to_query):
 
2100
                    index_parents = item[3]
 
2101
                    rev_parents = tuple(revision.parent_ids)
 
2102
                    if index_parents != rev_parents:
 
2103
                        result.append((revision.revision_id, index_parents, rev_parents))
 
2104
        finally:
 
2105
            pb.finished()
 
2106
        return result
 
2107
 
 
2108
    @symbol_versioning.deprecated_method(symbol_versioning.one_one)
 
2109
    def get_parents(self, revision_ids):
 
2110
        """See graph._StackedParentsProvider.get_parents."""
 
2111
        parent_map = self.get_parent_map(revision_ids)
 
2112
        return [parent_map.get(r, None) for r in revision_ids]
2296
2113
 
2297
2114
    def _make_parents_provider(self):
2298
2115
        return graph.CachingParentsProvider(self)
2299
2116
 
2300
2117
    def _refresh_data(self):
2301
 
        if not self.is_locked():
2302
 
            return
2303
 
        self._pack_collection.reload_pack_names()
 
2118
        if self._write_lock_count == 1 or (
 
2119
            self.control_files._lock_count == 1 and
 
2120
            self.control_files._lock_mode == 'r'):
 
2121
            # forget what names there are
 
2122
            self._pack_collection.reset()
 
2123
            # XXX: Better to do an in-memory merge when acquiring a new lock -
 
2124
            # factor out code from _save_pack_names.
 
2125
            self._pack_collection.ensure_loaded()
2304
2126
 
2305
2127
    def _start_write_group(self):
2306
2128
        self._pack_collection._start_write_group()
2307
2129
 
2308
2130
    def _commit_write_group(self):
2309
 
        hint = self._pack_collection._commit_write_group()
2310
 
        self.revisions._index._key_dependencies.clear()
2311
 
        return hint
 
2131
        return self._pack_collection._commit_write_group()
2312
2132
 
2313
2133
    def suspend_write_group(self):
2314
2134
        # XXX check self._write_group is self.get_transaction()?
2315
2135
        tokens = self._pack_collection._suspend_write_group()
2316
 
        self.revisions._index._key_dependencies.clear()
2317
2136
        self._write_group = None
2318
2137
        return tokens
2319
2138
 
2320
2139
    def _resume_write_group(self, tokens):
2321
2140
        self._start_write_group()
2322
 
        try:
2323
 
            self._pack_collection._resume_write_group(tokens)
2324
 
        except errors.UnresumableWriteGroup:
2325
 
            self._abort_write_group()
2326
 
            raise
2327
 
        for pack in self._pack_collection._resumed_packs:
2328
 
            self.revisions._index.scan_unvalidated_index(pack.revision_index)
 
2141
        self._pack_collection._resume_write_group(tokens)
2329
2142
 
2330
2143
    def get_transaction(self):
2331
2144
        if self._write_lock_count:
2340
2153
        return self._write_lock_count
2341
2154
 
2342
2155
    def lock_write(self, token=None):
2343
 
        locked = self.is_locked()
2344
 
        if not self._write_lock_count and locked:
 
2156
        if not self._write_lock_count and self.is_locked():
2345
2157
            raise errors.ReadOnlyError(self)
2346
2158
        self._write_lock_count += 1
2347
2159
        if self._write_lock_count == 1:
2348
2160
            self._transaction = transactions.WriteTransaction()
2349
 
        if not locked:
2350
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2351
 
                note('%r was write locked again', self)
2352
 
            self._prev_lock = 'w'
2353
2161
            for repo in self._fallback_repositories:
2354
2162
                # Writes don't affect fallback repos
2355
2163
                repo.lock_read()
2356
 
            self._refresh_data()
 
2164
        self._refresh_data()
2357
2165
 
2358
2166
    def lock_read(self):
2359
 
        locked = self.is_locked()
2360
2167
        if self._write_lock_count:
2361
2168
            self._write_lock_count += 1
2362
2169
        else:
2363
2170
            self.control_files.lock_read()
2364
 
        if not locked:
2365
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2366
 
                note('%r was read locked again', self)
2367
 
            self._prev_lock = 'r'
2368
2171
            for repo in self._fallback_repositories:
 
2172
                # Writes don't affect fallback repos
2369
2173
                repo.lock_read()
2370
 
            self._refresh_data()
 
2174
        self._refresh_data()
2371
2175
 
2372
2176
    def leave_lock_in_place(self):
2373
2177
        # not supported - raise an error
2378
2182
        raise NotImplementedError(self.dont_leave_lock_in_place)
2379
2183
 
2380
2184
    @needs_write_lock
2381
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2185
    def pack(self):
2382
2186
        """Compress the data within the repository.
2383
2187
 
2384
2188
        This will pack all the data to a single pack. In future it may
2385
2189
        recompress deltas or do other such expensive operations.
2386
2190
        """
2387
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2191
        self._pack_collection.pack()
2388
2192
 
2389
2193
    @needs_write_lock
2390
2194
    def reconcile(self, other=None, thorough=False):
2394
2198
        reconciler.reconcile()
2395
2199
        return reconciler
2396
2200
 
2397
 
    def _reconcile_pack(self, collection, packs, extension, revs, pb):
2398
 
        packer = ReconcilePacker(collection, packs, extension, revs)
2399
 
        return packer.pack(pb)
2400
 
 
2401
 
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2402
2201
    def unlock(self):
2403
2202
        if self._write_lock_count == 1 and self._write_group is not None:
2404
2203
            self.abort_write_group()
2413
2212
                transaction = self._transaction
2414
2213
                self._transaction = None
2415
2214
                transaction.finish()
 
2215
                for repo in self._fallback_repositories:
 
2216
                    repo.unlock()
2416
2217
        else:
2417
2218
            self.control_files.unlock()
2418
 
 
2419
 
        if not self.is_locked():
2420
2219
            for repo in self._fallback_repositories:
2421
2220
                repo.unlock()
2422
2221
 
2423
2222
 
2424
 
class KnitPackStreamSource(StreamSource):
2425
 
    """A StreamSource used to transfer data between same-format KnitPack repos.
2426
 
 
2427
 
    This source assumes:
2428
 
        1) Same serialization format for all objects
2429
 
        2) Same root information
2430
 
        3) XML format inventories
2431
 
        4) Atomic inserts (so we can stream inventory texts before text
2432
 
           content)
2433
 
        5) No chk_bytes
2434
 
    """
2435
 
 
2436
 
    def __init__(self, from_repository, to_format):
2437
 
        super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2438
 
        self._text_keys = None
2439
 
        self._text_fetch_order = 'unordered'
2440
 
 
2441
 
    def _get_filtered_inv_stream(self, revision_ids):
2442
 
        from_repo = self.from_repository
2443
 
        parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2444
 
        parent_keys = [(p,) for p in parent_ids]
2445
 
        find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2446
 
        parent_text_keys = set(find_text_keys(
2447
 
            from_repo._inventory_xml_lines_for_keys(parent_keys)))
2448
 
        content_text_keys = set()
2449
 
        knit = KnitVersionedFiles(None, None)
2450
 
        factory = KnitPlainFactory()
2451
 
        def find_text_keys_from_content(record):
2452
 
            if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2453
 
                raise ValueError("Unknown content storage kind for"
2454
 
                    " inventory text: %s" % (record.storage_kind,))
2455
 
            # It's a knit record, it has a _raw_record field (even if it was
2456
 
            # reconstituted from a network stream).
2457
 
            raw_data = record._raw_record
2458
 
            # read the entire thing
2459
 
            revision_id = record.key[-1]
2460
 
            content, _ = knit._parse_record(revision_id, raw_data)
2461
 
            if record.storage_kind == 'knit-delta-gz':
2462
 
                line_iterator = factory.get_linedelta_content(content)
2463
 
            elif record.storage_kind == 'knit-ft-gz':
2464
 
                line_iterator = factory.get_fulltext_content(content)
2465
 
            content_text_keys.update(find_text_keys(
2466
 
                [(line, revision_id) for line in line_iterator]))
2467
 
        revision_keys = [(r,) for r in revision_ids]
2468
 
        def _filtered_inv_stream():
2469
 
            source_vf = from_repo.inventories
2470
 
            stream = source_vf.get_record_stream(revision_keys,
2471
 
                                                 'unordered', False)
2472
 
            for record in stream:
2473
 
                if record.storage_kind == 'absent':
2474
 
                    raise errors.NoSuchRevision(from_repo, record.key)
2475
 
                find_text_keys_from_content(record)
2476
 
                yield record
2477
 
            self._text_keys = content_text_keys - parent_text_keys
2478
 
        return ('inventories', _filtered_inv_stream())
2479
 
 
2480
 
    def _get_text_stream(self):
2481
 
        # Note: We know we don't have to handle adding root keys, because both
2482
 
        # the source and target are the identical network name.
2483
 
        text_stream = self.from_repository.texts.get_record_stream(
2484
 
                        self._text_keys, self._text_fetch_order, False)
2485
 
        return ('texts', text_stream)
2486
 
 
2487
 
    def get_stream(self, search):
2488
 
        revision_ids = search.get_keys()
2489
 
        for stream_info in self._fetch_revision_texts(revision_ids):
2490
 
            yield stream_info
2491
 
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2492
 
        yield self._get_filtered_inv_stream(revision_ids)
2493
 
        yield self._get_text_stream()
2494
 
 
2495
 
 
2496
 
 
2497
2223
class RepositoryFormatPack(MetaDirRepositoryFormat):
2498
2224
    """Format logic for pack structured repositories.
2499
2225
 
2523
2249
    supports_ghosts = True
2524
2250
    # External references are not supported in pack repositories yet.
2525
2251
    supports_external_lookups = False
2526
 
    # Most pack formats do not use chk lookups.
2527
 
    supports_chks = False
2528
2252
    # What index classes to use
2529
2253
    index_builder_class = None
2530
2254
    index_class = None
2531
2255
    _fetch_uses_deltas = True
2532
 
    fast_deltas = False
2533
2256
 
2534
2257
    def initialize(self, a_bzrdir, shared=False):
2535
2258
        """Create a pack based repository.
2546
2269
        utf8_files = [('format', self.get_format_string())]
2547
2270
 
2548
2271
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2549
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2550
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2551
 
        return repository
 
2272
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2552
2273
 
2553
2274
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2554
2275
        """See RepositoryFormat.open().
2603
2324
        """See RepositoryFormat.get_format_description()."""
2604
2325
        return "Packs containing knits without subtree support"
2605
2326
 
 
2327
    def check_conversion_target(self, target_format):
 
2328
        pass
 
2329
 
2606
2330
 
2607
2331
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2608
2332
    """A subtrees parameterized Pack repository.
2617
2341
    repository_class = KnitPackRepository
2618
2342
    _commit_builder_class = PackRootCommitBuilder
2619
2343
    rich_root_data = True
2620
 
    experimental = True
2621
2344
    supports_tree_reference = True
2622
2345
    @property
2623
2346
    def _serializer(self):
2635
2358
 
2636
2359
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2637
2360
 
 
2361
    def check_conversion_target(self, target_format):
 
2362
        if not target_format.rich_root_data:
 
2363
            raise errors.BadConversionTarget(
 
2364
                'Does not support rich root data.', target_format)
 
2365
        if not getattr(target_format, 'supports_tree_reference', False):
 
2366
            raise errors.BadConversionTarget(
 
2367
                'Does not support nested trees', target_format)
 
2368
 
2638
2369
    def get_format_string(self):
2639
2370
        """See RepositoryFormat.get_format_string()."""
2640
2371
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2673
2404
 
2674
2405
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2675
2406
 
 
2407
    def check_conversion_target(self, target_format):
 
2408
        if not target_format.rich_root_data:
 
2409
            raise errors.BadConversionTarget(
 
2410
                'Does not support rich root data.', target_format)
 
2411
 
2676
2412
    def get_format_string(self):
2677
2413
        """See RepositoryFormat.get_format_string()."""
2678
2414
        return ("Bazaar pack repository format 1 with rich root"
2719
2455
        """See RepositoryFormat.get_format_description()."""
2720
2456
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2721
2457
 
 
2458
    def check_conversion_target(self, target_format):
 
2459
        pass
 
2460
 
2722
2461
 
2723
2462
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2724
2463
    """A repository with rich roots and stacking.
2751
2490
 
2752
2491
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2753
2492
 
 
2493
    def check_conversion_target(self, target_format):
 
2494
        if not target_format.rich_root_data:
 
2495
            raise errors.BadConversionTarget(
 
2496
                'Does not support rich root data.', target_format)
 
2497
 
2754
2498
    def get_format_string(self):
2755
2499
        """See RepositoryFormat.get_format_string()."""
2756
2500
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2797
2541
 
2798
2542
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2799
2543
 
 
2544
    def check_conversion_target(self, target_format):
 
2545
        if not target_format.rich_root_data:
 
2546
            raise errors.BadConversionTarget(
 
2547
                'Does not support rich root data.', target_format)
 
2548
 
2800
2549
    def get_format_string(self):
2801
2550
        """See RepositoryFormat.get_format_string()."""
2802
2551
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2840
2589
        """See RepositoryFormat.get_format_description()."""
2841
2590
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2842
2591
 
 
2592
    def check_conversion_target(self, target_format):
 
2593
        pass
 
2594
 
2843
2595
 
2844
2596
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2845
2597
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2869
2621
 
2870
2622
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2871
2623
 
 
2624
    def check_conversion_target(self, target_format):
 
2625
        if not target_format.rich_root_data:
 
2626
            raise errors.BadConversionTarget(
 
2627
                'Does not support rich root data.', target_format)
 
2628
 
2872
2629
    def get_format_string(self):
2873
2630
        """See RepositoryFormat.get_format_string()."""
2874
2631
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2877
2634
        return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2878
2635
 
2879
2636
 
 
2637
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
 
2638
    """A no-subtrees development repository.
 
2639
 
 
2640
    This format should be retained until the second release after bzr 1.7.
 
2641
 
 
2642
    This is pack-1.6.1 with B+Tree indices.
 
2643
    """
 
2644
 
 
2645
    repository_class = KnitPackRepository
 
2646
    _commit_builder_class = PackCommitBuilder
 
2647
    supports_external_lookups = True
 
2648
    # What index classes to use
 
2649
    index_builder_class = BTreeBuilder
 
2650
    index_class = BTreeGraphIndex
 
2651
 
 
2652
    @property
 
2653
    def _serializer(self):
 
2654
        return xml5.serializer_v5
 
2655
 
 
2656
    def _get_matching_bzrdir(self):
 
2657
        return bzrdir.format_registry.make_bzrdir('development2')
 
2658
 
 
2659
    def _ignore_setting_bzrdir(self, format):
 
2660
        pass
 
2661
 
 
2662
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
 
2663
 
 
2664
    def get_format_string(self):
 
2665
        """See RepositoryFormat.get_format_string()."""
 
2666
        return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
 
2667
 
 
2668
    def get_format_description(self):
 
2669
        """See RepositoryFormat.get_format_description()."""
 
2670
        return ("Development repository format, currently the same as "
 
2671
            "1.6.1 with B+Trees.\n")
 
2672
 
 
2673
    def check_conversion_target(self, target_format):
 
2674
        pass
 
2675
 
 
2676
 
2880
2677
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2881
2678
    """A subtrees development repository.
2882
2679
 
2883
2680
    This format should be retained until the second release after bzr 1.7.
2884
2681
 
2885
2682
    1.6.1-subtree[as it might have been] with B+Tree indices.
2886
 
 
2887
 
    This is [now] retained until we have a CHK based subtree format in
2888
 
    development.
2889
2683
    """
2890
2684
 
2891
2685
    repository_class = KnitPackRepository
2892
2686
    _commit_builder_class = PackRootCommitBuilder
2893
2687
    rich_root_data = True
2894
 
    experimental = True
2895
2688
    supports_tree_reference = True
2896
2689
    supports_external_lookups = True
2897
2690
    # What index classes to use
2904
2697
 
2905
2698
    def _get_matching_bzrdir(self):
2906
2699
        return bzrdir.format_registry.make_bzrdir(
2907
 
            'development-subtree')
 
2700
            'development2-subtree')
2908
2701
 
2909
2702
    def _ignore_setting_bzrdir(self, format):
2910
2703
        pass
2911
2704
 
2912
2705
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2913
2706
 
 
2707
    def check_conversion_target(self, target_format):
 
2708
        if not target_format.rich_root_data:
 
2709
            raise errors.BadConversionTarget(
 
2710
                'Does not support rich root data.', target_format)
 
2711
        if not getattr(target_format, 'supports_tree_reference', False):
 
2712
            raise errors.BadConversionTarget(
 
2713
                'Does not support nested trees', target_format)
 
2714
 
2914
2715
    def get_format_string(self):
2915
2716
        """See RepositoryFormat.get_format_string()."""
2916
2717
        return ("Bazaar development format 2 with subtree support "
2920
2721
        """See RepositoryFormat.get_format_description()."""
2921
2722
        return ("Development repository format, currently the same as "
2922
2723
            "1.6.1-subtree with B+Tree indices.\n")
2923