/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-05-28 14:25:00 UTC
  • mfrom: (4354.4.9 commit-preview)
  • Revision ID: pqm@pqm.ubuntu.com-20090528142500-n7ki7gucmkxzx611
(abentley) move get_file_with_stat from MutableTree to Tree.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2007-2010 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
27
 
    cleanup,
28
27
    debug,
29
28
    graph,
30
29
    osutils,
37
36
    )
38
37
from bzrlib.index import (
39
38
    CombinedGraphIndex,
 
39
    GraphIndex,
 
40
    GraphIndexBuilder,
40
41
    GraphIndexPrefixAdapter,
 
42
    InMemoryGraphIndex,
41
43
    )
42
44
from bzrlib.knit import (
43
45
    KnitPlainFactory,
53
55
    lockable_files,
54
56
    lockdir,
55
57
    revision as _mod_revision,
 
58
    symbol_versioning,
56
59
    )
57
60
 
58
 
from bzrlib.decorators import needs_write_lock, only_raises
 
61
from bzrlib.decorators import needs_write_lock
59
62
from bzrlib.btree_index import (
60
63
    BTreeGraphIndex,
61
64
    BTreeBuilder,
69
72
    CommitBuilder,
70
73
    MetaDirRepositoryFormat,
71
74
    RepositoryFormat,
72
 
    RepositoryWriteLockResult,
73
75
    RootCommitBuilder,
74
 
    StreamSource,
75
76
    )
 
77
import bzrlib.revision as _mod_revision
76
78
from bzrlib.trace import (
77
79
    mutter,
78
 
    note,
79
80
    warning,
80
81
    )
81
82
 
227
228
        return self.index_name('text', name)
228
229
 
229
230
    def _replace_index_with_readonly(self, index_type):
230
 
        unlimited_cache = False
231
 
        if index_type == 'chk':
232
 
            unlimited_cache = True
233
231
        setattr(self, index_type + '_index',
234
232
            self.index_class(self.index_transport,
235
233
                self.index_name(index_type, self.name),
236
 
                self.index_sizes[self.index_offset(index_type)],
237
 
                unlimited_cache=unlimited_cache))
 
234
                self.index_sizes[self.index_offset(index_type)]))
238
235
 
239
236
 
240
237
class ExistingPack(Pack):
271
268
 
272
269
    def __init__(self, name, revision_index, inventory_index, text_index,
273
270
        signature_index, upload_transport, pack_transport, index_transport,
274
 
        pack_collection, chk_index=None):
 
271
        pack_collection):
275
272
        """Create a ResumedPack object."""
276
273
        ExistingPack.__init__(self, pack_transport, name, revision_index,
277
 
            inventory_index, text_index, signature_index,
278
 
            chk_index=chk_index)
 
274
            inventory_index, text_index, signature_index)
279
275
        self.upload_transport = upload_transport
280
276
        self.index_transport = index_transport
281
277
        self.index_sizes = [None, None, None, None]
285
281
            ('text', text_index),
286
282
            ('signature', signature_index),
287
283
            ]
288
 
        if chk_index is not None:
289
 
            indices.append(('chk', chk_index))
290
 
            self.index_sizes.append(None)
291
284
        for index_type, index in indices:
292
285
            offset = self.index_offset(index_type)
293
286
            self.index_sizes[offset] = index._size
308
301
        self.upload_transport.delete(self.file_name())
309
302
        indices = [self.revision_index, self.inventory_index, self.text_index,
310
303
            self.signature_index]
311
 
        if self.chk_index is not None:
312
 
            indices.append(self.chk_index)
313
304
        for index in indices:
314
305
            index._transport.delete(index._name)
315
306
 
316
307
    def finish(self):
317
308
        self._check_references()
318
 
        index_types = ['revision', 'inventory', 'text', 'signature']
319
 
        if self.chk_index is not None:
320
 
            index_types.append('chk')
321
 
        for index_type in index_types:
 
309
        new_name = '../packs/' + self.file_name()
 
310
        self.upload_transport.rename(self.file_name(), new_name)
 
311
        for index_type in ['revision', 'inventory', 'text', 'signature']:
322
312
            old_name = self.index_name(index_type, self.name)
323
313
            new_name = '../indices/' + old_name
324
314
            self.upload_transport.rename(old_name, new_name)
325
315
            self._replace_index_with_readonly(index_type)
326
 
        new_name = '../packs/' + self.file_name()
327
 
        self.upload_transport.rename(self.file_name(), new_name)
328
316
        self._state = 'finished'
329
317
 
330
318
    def _get_external_refs(self, index):
331
 
        """Return compression parents for this index that are not present.
332
 
 
333
 
        This returns any compression parents that are referenced by this index,
334
 
        which are not contained *in* this index. They may be present elsewhere.
335
 
        """
336
319
        return index.external_references(1)
337
320
 
338
321
 
429
412
        self._writer.begin()
430
413
        # what state is the pack in? (open, finished, aborted)
431
414
        self._state = 'open'
432
 
        # no name until we finish writing the content
433
 
        self.name = None
434
415
 
435
416
    def abort(self):
436
417
        """Cancel creating this pack."""
457
438
            self.signature_index.key_count() or
458
439
            (self.chk_index is not None and self.chk_index.key_count()))
459
440
 
460
 
    def finish_content(self):
461
 
        if self.name is not None:
462
 
            return
463
 
        self._writer.end()
464
 
        if self._buffer[1]:
465
 
            self._write_data('', flush=True)
466
 
        self.name = self._hash.hexdigest()
467
 
 
468
441
    def finish(self, suspend=False):
469
442
        """Finish the new pack.
470
443
 
476
449
         - stores the index size tuple for the pack in the index_sizes
477
450
           attribute.
478
451
        """
479
 
        self.finish_content()
 
452
        self._writer.end()
 
453
        if self._buffer[1]:
 
454
            self._write_data('', flush=True)
 
455
        self.name = self._hash.hexdigest()
480
456
        if not suspend:
481
457
            self._check_references()
482
458
        # write indices
588
564
                                             flush_func=flush_func)
589
565
        self.add_callback = None
590
566
 
 
567
    def replace_indices(self, index_to_pack, indices):
 
568
        """Replace the current mappings with fresh ones.
 
569
 
 
570
        This should probably not be used eventually, rather incremental add and
 
571
        removal of indices. It has been added during refactoring of existing
 
572
        code.
 
573
 
 
574
        :param index_to_pack: A mapping from index objects to
 
575
            (transport, name) tuples for the pack file data.
 
576
        :param indices: A list of indices.
 
577
        """
 
578
        # refresh the revision pack map dict without replacing the instance.
 
579
        self.index_to_pack.clear()
 
580
        self.index_to_pack.update(index_to_pack)
 
581
        # XXX: API break - clearly a 'replace' method would be good?
 
582
        self.combined_index._indices[:] = indices
 
583
        # the current add nodes callback for the current writable index if
 
584
        # there is one.
 
585
        self.add_callback = None
 
586
 
591
587
    def add_index(self, index, pack):
592
588
        """Add index to the aggregate, which is an index for Pack pack.
593
589
 
600
596
        # expose it to the index map
601
597
        self.index_to_pack[index] = pack.access_tuple()
602
598
        # put it at the front of the linear index list
603
 
        self.combined_index.insert_index(0, index, pack.name)
 
599
        self.combined_index.insert_index(0, index)
604
600
 
605
601
    def add_writable_index(self, index, pack):
606
602
        """Add an index which is able to have data added to it.
626
622
        self.data_access.set_writer(None, None, (None, None))
627
623
        self.index_to_pack.clear()
628
624
        del self.combined_index._indices[:]
629
 
        del self.combined_index._index_names[:]
630
625
        self.add_callback = None
631
626
 
632
 
    def remove_index(self, index):
 
627
    def remove_index(self, index, pack):
633
628
        """Remove index from the indices used to answer queries.
634
629
 
635
630
        :param index: An index from the pack parameter.
 
631
        :param pack: A Pack instance.
636
632
        """
637
633
        del self.index_to_pack[index]
638
 
        pos = self.combined_index._indices.index(index)
639
 
        del self.combined_index._indices[pos]
640
 
        del self.combined_index._index_names[pos]
 
634
        self.combined_index._indices.remove(index)
641
635
        if (self.add_callback is not None and
642
636
            getattr(index, 'add_nodes', None) == self.add_callback):
643
637
            self.add_callback = None
1101
1095
            iterator is a tuple with:
1102
1096
            index, readv_vector, node_vector. readv_vector is a list ready to
1103
1097
            hand to the transport readv method, and node_vector is a list of
1104
 
            (key, eol_flag, references) for the node retrieved by the
 
1098
            (key, eol_flag, references) for the the node retrieved by the
1105
1099
            matching readv_vector.
1106
1100
        """
1107
1101
        # group by pack so we do one readv per pack
1298
1292
        # space (we only topo sort the revisions, which is smaller).
1299
1293
        topo_order = tsort.topo_sort(ancestors)
1300
1294
        rev_order = dict(zip(topo_order, range(len(topo_order))))
1301
 
        bad_texts.sort(key=lambda key:rev_order.get(key[0][1], 0))
 
1295
        bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1302
1296
        transaction = repo.get_transaction()
1303
1297
        file_id_index = GraphIndexPrefixAdapter(
1304
1298
            self.new_pack.text_index,
1358
1352
    """
1359
1353
 
1360
1354
    pack_factory = NewPack
1361
 
    resumed_pack_factory = ResumedPack
1362
1355
 
1363
1356
    def __init__(self, repo, transport, index_transport, upload_transport,
1364
1357
                 pack_transport, index_builder_class, index_class,
1399
1392
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1400
1393
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1401
1394
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1402
 
        all_indices = [self.revision_index, self.inventory_index,
1403
 
                self.text_index, self.signature_index]
1404
1395
        if use_chk_index:
1405
1396
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1406
 
            all_indices.append(self.chk_index)
1407
1397
        else:
1408
1398
            # used to determine if we're using a chk_index elsewhere.
1409
1399
            self.chk_index = None
1410
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1411
 
        # share hints about which pack names to search first.
1412
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1413
 
        for combined_idx in all_combined:
1414
 
            combined_idx.set_sibling_indices(
1415
 
                set(all_combined).difference([combined_idx]))
1416
1400
        # resumed packs
1417
1401
        self._resumed_packs = []
1418
1402
 
1419
 
    def __repr__(self):
1420
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1421
 
 
1422
1403
    def add_pack_to_memory(self, pack):
1423
1404
        """Make a Pack object available to the repository to satisfy queries.
1424
1405
 
1462
1443
        in synchronisation with certain steps. Otherwise the names collection
1463
1444
        is not flushed.
1464
1445
 
1465
 
        :return: Something evaluating true if packing took place.
 
1446
        :return: True if packing took place.
1466
1447
        """
1467
1448
        while True:
1468
1449
            try:
1469
1450
                return self._do_autopack()
1470
 
            except errors.RetryAutopack:
 
1451
            except errors.RetryAutopack, e:
1471
1452
                # If we get a RetryAutopack exception, we should abort the
1472
1453
                # current action, and retry.
1473
1454
                pass
1477
1458
        total_revisions = self.revision_index.combined_index.key_count()
1478
1459
        total_packs = len(self._names)
1479
1460
        if self._max_pack_count(total_revisions) >= total_packs:
1480
 
            return None
 
1461
            return False
1481
1462
        # determine which packs need changing
1482
1463
        pack_distribution = self.pack_distribution(total_revisions)
1483
1464
        existing_packs = []
1505
1486
            'containing %d revisions. Packing %d files into %d affecting %d'
1506
1487
            ' revisions', self, total_packs, total_revisions, num_old_packs,
1507
1488
            num_new_packs, num_revs_affected)
1508
 
        result = self._execute_pack_operations(pack_operations,
 
1489
        self._execute_pack_operations(pack_operations,
1509
1490
                                      reload_func=self._restart_autopack)
1510
1491
        mutter('Auto-packing repository %s completed', self)
1511
 
        return result
 
1492
        return True
1512
1493
 
1513
1494
    def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1514
1495
                                 reload_func=None):
1516
1497
 
1517
1498
        :param pack_operations: A list of [revision_count, packs_to_combine].
1518
1499
        :param _packer_class: The class of packer to use (default: Packer).
1519
 
        :return: The new pack names.
 
1500
        :return: None.
1520
1501
        """
1521
1502
        for revision_count, packs in pack_operations:
1522
1503
            # we may have no-ops from the setup logic
1538
1519
                self._remove_pack_from_memory(pack)
1539
1520
        # record the newly available packs and stop advertising the old
1540
1521
        # packs
1541
 
        to_be_obsoleted = []
1542
 
        for _, packs in pack_operations:
1543
 
            to_be_obsoleted.extend(packs)
1544
 
        result = self._save_pack_names(clear_obsolete_packs=True,
1545
 
                                       obsolete_packs=to_be_obsoleted)
1546
 
        return result
 
1522
        self._save_pack_names(clear_obsolete_packs=True)
 
1523
        # Move the old packs out of the way now they are no longer referenced.
 
1524
        for revision_count, packs in pack_operations:
 
1525
            self._obsolete_packs(packs)
1547
1526
 
1548
1527
    def _flush_new_pack(self):
1549
1528
        if self._new_pack is not None:
1559
1538
 
1560
1539
    def _already_packed(self):
1561
1540
        """Is the collection already packed?"""
1562
 
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
 
1541
        return len(self._names) < 2
1563
1542
 
1564
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1543
    def pack(self):
1565
1544
        """Pack the pack collection totally."""
1566
1545
        self.ensure_loaded()
1567
1546
        total_packs = len(self._names)
1568
1547
        if self._already_packed():
 
1548
            # This is arguably wrong because we might not be optimal, but for
 
1549
            # now lets leave it in. (e.g. reconcile -> one pack. But not
 
1550
            # optimal.
1569
1551
            return
1570
1552
        total_revisions = self.revision_index.combined_index.key_count()
1571
1553
        # XXX: the following may want to be a class, to pack with a given
1572
1554
        # policy.
1573
1555
        mutter('Packing repository %s, which has %d pack files, '
1574
 
            'containing %d revisions with hint %r.', self, total_packs,
1575
 
            total_revisions, hint)
 
1556
            'containing %d revisions into 1 packs.', self, total_packs,
 
1557
            total_revisions)
1576
1558
        # determine which packs need changing
 
1559
        pack_distribution = [1]
1577
1560
        pack_operations = [[0, []]]
1578
1561
        for pack in self.all_packs():
1579
 
            if hint is None or pack.name in hint:
1580
 
                # Either no hint was provided (so we are packing everything),
1581
 
                # or this pack was included in the hint.
1582
 
                pack_operations[-1][0] += pack.get_revision_count()
1583
 
                pack_operations[-1][1].append(pack)
 
1562
            pack_operations[-1][0] += pack.get_revision_count()
 
1563
            pack_operations[-1][1].append(pack)
1584
1564
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1585
1565
 
1586
 
        if clean_obsolete_packs:
1587
 
            self._clear_obsolete_packs()
1588
 
 
1589
1566
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1590
1567
        """Plan a pack operation.
1591
1568
 
1679
1656
            txt_index = self._make_index(name, '.tix')
1680
1657
            sig_index = self._make_index(name, '.six')
1681
1658
            if self.chk_index is not None:
1682
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
 
1659
                chk_index = self._make_index(name, '.cix')
1683
1660
            else:
1684
1661
                chk_index = None
1685
1662
            result = ExistingPack(self._pack_transport, name, rev_index,
1703
1680
            inv_index = self._make_index(name, '.iix', resume=True)
1704
1681
            txt_index = self._make_index(name, '.tix', resume=True)
1705
1682
            sig_index = self._make_index(name, '.six', resume=True)
1706
 
            if self.chk_index is not None:
1707
 
                chk_index = self._make_index(name, '.cix', resume=True,
1708
 
                                             unlimited_cache=True)
1709
 
            else:
1710
 
                chk_index = None
1711
 
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1712
 
                txt_index, sig_index, self._upload_transport,
1713
 
                self._pack_transport, self._index_transport, self,
1714
 
                chk_index=chk_index)
 
1683
            result = ResumedPack(name, rev_index, inv_index, txt_index,
 
1684
                sig_index, self._upload_transport, self._pack_transport,
 
1685
                self._index_transport, self)
1715
1686
        except errors.NoSuchFile, e:
1716
1687
            raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1717
1688
        self.add_pack_to_memory(result)
1741
1712
        return self._index_class(self.transport, 'pack-names', None
1742
1713
                ).iter_all_entries()
1743
1714
 
1744
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1715
    def _make_index(self, name, suffix, resume=False):
1745
1716
        size_offset = self._suffix_offsets[suffix]
1746
1717
        index_name = name + suffix
1747
1718
        if resume:
1750
1721
        else:
1751
1722
            transport = self._index_transport
1752
1723
            index_size = self._names[name][size_offset]
1753
 
        return self._index_class(transport, index_name, index_size,
1754
 
                                 unlimited_cache=unlimited_cache)
 
1724
        return self._index_class(transport, index_name, index_size)
1755
1725
 
1756
1726
    def _max_pack_count(self, total_revisions):
1757
1727
        """Return the maximum number of packs to use for total revisions.
1785
1755
        :param return: None.
1786
1756
        """
1787
1757
        for pack in packs:
1788
 
            try:
1789
 
                pack.pack_transport.rename(pack.file_name(),
1790
 
                    '../obsolete_packs/' + pack.file_name())
1791
 
            except (errors.PathError, errors.TransportError), e:
1792
 
                # TODO: Should these be warnings or mutters?
1793
 
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
1794
 
                       % (e,))
 
1758
            pack.pack_transport.rename(pack.file_name(),
 
1759
                '../obsolete_packs/' + pack.file_name())
1795
1760
            # TODO: Probably needs to know all possible indices for this pack
1796
1761
            # - or maybe list the directory and move all indices matching this
1797
1762
            # name whether we recognize it or not?
1799
1764
            if self.chk_index is not None:
1800
1765
                suffixes.append('.cix')
1801
1766
            for suffix in suffixes:
1802
 
                try:
1803
 
                    self._index_transport.rename(pack.name + suffix,
1804
 
                        '../obsolete_packs/' + pack.name + suffix)
1805
 
                except (errors.PathError, errors.TransportError), e:
1806
 
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
1807
 
                           % (e,))
 
1767
                self._index_transport.rename(pack.name + suffix,
 
1768
                    '../obsolete_packs/' + pack.name + suffix)
1808
1769
 
1809
1770
    def pack_distribution(self, total_revisions):
1810
1771
        """Generate a list of the number of revisions to put in each pack.
1836
1797
        self._remove_pack_indices(pack)
1837
1798
        self.packs.remove(pack)
1838
1799
 
1839
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1840
 
        """Remove the indices for pack from the aggregated indices.
1841
 
        
1842
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1843
 
        """
1844
 
        for index_type in Pack.index_definitions.keys():
1845
 
            attr_name = index_type + '_index'
1846
 
            aggregate_index = getattr(self, attr_name)
1847
 
            if aggregate_index is not None:
1848
 
                pack_index = getattr(pack, attr_name)
1849
 
                try:
1850
 
                    aggregate_index.remove_index(pack_index)
1851
 
                except KeyError:
1852
 
                    if ignore_missing:
1853
 
                        continue
1854
 
                    raise
 
1800
    def _remove_pack_indices(self, pack):
 
1801
        """Remove the indices for pack from the aggregated indices."""
 
1802
        self.revision_index.remove_index(pack.revision_index, pack)
 
1803
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1804
        self.text_index.remove_index(pack.text_index, pack)
 
1805
        self.signature_index.remove_index(pack.signature_index, pack)
 
1806
        if self.chk_index is not None:
 
1807
            self.chk_index.remove_index(pack.chk_index, pack)
1855
1808
 
1856
1809
    def reset(self):
1857
1810
        """Clear all cached data."""
1858
1811
        # cached revision data
 
1812
        self.repo._revision_knit = None
1859
1813
        self.revision_index.clear()
1860
1814
        # cached signature data
 
1815
        self.repo._signature_knit = None
1861
1816
        self.signature_index.clear()
1862
1817
        # cached file text data
1863
1818
        self.text_index.clear()
 
1819
        self.repo._text_knit = None
1864
1820
        # cached inventory data
1865
1821
        self.inventory_index.clear()
1866
1822
        # cached chk data
1890
1846
        disk_nodes = set()
1891
1847
        for index, key, value in self._iter_disk_pack_index():
1892
1848
            disk_nodes.add((key, value))
1893
 
        orig_disk_nodes = set(disk_nodes)
1894
1849
 
1895
1850
        # do a two-way diff against our original content
1896
1851
        current_nodes = set()
1909
1864
        disk_nodes.difference_update(deleted_nodes)
1910
1865
        disk_nodes.update(new_nodes)
1911
1866
 
1912
 
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
 
1867
        return disk_nodes, deleted_nodes, new_nodes
1913
1868
 
1914
1869
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1915
1870
        """Given the correct set of pack files, update our saved info.
1955
1910
                added.append(name)
1956
1911
        return removed, added, modified
1957
1912
 
1958
 
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
 
1913
    def _save_pack_names(self, clear_obsolete_packs=False):
1959
1914
        """Save the list of packs.
1960
1915
 
1961
1916
        This will take out the mutex around the pack names list for the
1965
1920
 
1966
1921
        :param clear_obsolete_packs: If True, clear out the contents of the
1967
1922
            obsolete_packs directory.
1968
 
        :param obsolete_packs: Packs that are obsolete once the new pack-names
1969
 
            file has been written.
1970
 
        :return: A list of the names saved that were not previously on disk.
1971
1923
        """
1972
 
        already_obsolete = []
1973
1924
        self.lock_names()
1974
1925
        try:
1975
1926
            builder = self._index_builder_class()
1976
 
            (disk_nodes, deleted_nodes, new_nodes,
1977
 
             orig_disk_nodes) = self._diff_pack_names()
 
1927
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1978
1928
            # TODO: handle same-name, index-size-changes here -
1979
1929
            # e.g. use the value from disk, not ours, *unless* we're the one
1980
1930
            # changing it.
1982
1932
                builder.add_node(key, value)
1983
1933
            self.transport.put_file('pack-names', builder.finish(),
1984
1934
                mode=self.repo.bzrdir._get_file_mode())
 
1935
            # move the baseline forward
1985
1936
            self._packs_at_load = disk_nodes
1986
1937
            if clear_obsolete_packs:
1987
 
                to_preserve = None
1988
 
                if obsolete_packs:
1989
 
                    to_preserve = set([o.name for o in obsolete_packs])
1990
 
                already_obsolete = self._clear_obsolete_packs(to_preserve)
 
1938
                self._clear_obsolete_packs()
1991
1939
        finally:
1992
1940
            self._unlock_names()
1993
1941
        # synchronise the memory packs list with what we just wrote:
1994
1942
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1995
 
        if obsolete_packs:
1996
 
            # TODO: We could add one more condition here. "if o.name not in
1997
 
            #       orig_disk_nodes and o != the new_pack we haven't written to
1998
 
            #       disk yet. However, the new pack object is not easily
1999
 
            #       accessible here (it would have to be passed through the
2000
 
            #       autopacking code, etc.)
2001
 
            obsolete_packs = [o for o in obsolete_packs
2002
 
                              if o.name not in already_obsolete]
2003
 
            self._obsolete_packs(obsolete_packs)
2004
 
        return [new_node[0][0] for new_node in new_nodes]
2005
1943
 
2006
1944
    def reload_pack_names(self):
2007
1945
        """Sync our pack listing with what is present in the repository.
2021
1959
        if first_read:
2022
1960
            return True
2023
1961
        # out the new value.
2024
 
        (disk_nodes, deleted_nodes, new_nodes,
2025
 
         orig_disk_nodes) = self._diff_pack_names()
2026
 
        # _packs_at_load is meant to be the explicit list of names in
2027
 
        # 'pack-names' at then start. As such, it should not contain any
2028
 
        # pending names that haven't been written out yet.
2029
 
        self._packs_at_load = orig_disk_nodes
 
1962
        disk_nodes, _, _ = self._diff_pack_names()
 
1963
        self._packs_at_load = disk_nodes
2030
1964
        (removed, added,
2031
1965
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2032
1966
        if removed or added or modified:
2041
1975
            raise
2042
1976
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2043
1977
 
2044
 
    def _clear_obsolete_packs(self, preserve=None):
 
1978
    def _clear_obsolete_packs(self):
2045
1979
        """Delete everything from the obsolete-packs directory.
2046
 
 
2047
 
        :return: A list of pack identifiers (the filename without '.pack') that
2048
 
            were found in obsolete_packs.
2049
1980
        """
2050
 
        found = []
2051
1981
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
2052
 
        if preserve is None:
2053
 
            preserve = set()
2054
1982
        for filename in obsolete_pack_transport.list_dir('.'):
2055
 
            name, ext = osutils.splitext(filename)
2056
 
            if ext == '.pack':
2057
 
                found.append(name)
2058
 
            if name in preserve:
2059
 
                continue
2060
1983
            try:
2061
1984
                obsolete_pack_transport.delete(filename)
2062
1985
            except (errors.PathError, errors.TransportError), e:
2063
 
                warning("couldn't delete obsolete pack, skipping it:\n%s"
2064
 
                        % (e,))
2065
 
        return found
 
1986
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2066
1987
 
2067
1988
    def _start_write_group(self):
2068
1989
        # Do not permit preparation for writing if we're not in a 'write lock'.
2095
2016
        # FIXME: just drop the transient index.
2096
2017
        # forget what names there are
2097
2018
        if self._new_pack is not None:
2098
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2099
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2100
 
            # If we aborted while in the middle of finishing the write
2101
 
            # group, _remove_pack_indices could fail because the indexes are
2102
 
            # already gone.  But they're not there we shouldn't fail in this
2103
 
            # case, so we pass ignore_missing=True.
2104
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2105
 
                ignore_missing=True)
2106
 
            operation.run_simple()
 
2019
            try:
 
2020
                self._new_pack.abort()
 
2021
            finally:
 
2022
                # XXX: If we aborted while in the middle of finishing the write
 
2023
                # group, _remove_pack_indices can fail because the indexes are
 
2024
                # already gone.  If they're not there we shouldn't fail in this
 
2025
                # case.  -- mbp 20081113
 
2026
                self._remove_pack_indices(self._new_pack)
 
2027
                self._new_pack = None
2107
2028
        for resumed_pack in self._resumed_packs:
2108
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2109
 
            # See comment in previous finally block.
2110
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2111
 
                ignore_missing=True)
2112
 
            operation.run_simple()
 
2029
            try:
 
2030
                resumed_pack.abort()
 
2031
            finally:
 
2032
                # See comment in previous finally block.
 
2033
                try:
 
2034
                    self._remove_pack_indices(resumed_pack)
 
2035
                except KeyError:
 
2036
                    pass
2113
2037
        del self._resumed_packs[:]
 
2038
        self.repo._text_knit = None
2114
2039
 
2115
2040
    def _remove_resumed_pack_indices(self):
2116
2041
        for resumed_pack in self._resumed_packs:
2117
2042
            self._remove_pack_indices(resumed_pack)
2118
2043
        del self._resumed_packs[:]
2119
2044
 
2120
 
    def _check_new_inventories(self):
2121
 
        """Detect missing inventories in this write group.
2122
 
 
2123
 
        :returns: list of strs, summarising any problems found.  If the list is
2124
 
            empty no problems were found.
2125
 
        """
2126
 
        # The base implementation does no checks.  GCRepositoryPackCollection
2127
 
        # overrides this.
2128
 
        return []
2129
 
        
2130
2045
    def _commit_write_group(self):
2131
2046
        all_missing = set()
2132
2047
        for prefix, versioned_file in (
2141
2056
            raise errors.BzrCheckError(
2142
2057
                "Repository %s has missing compression parent(s) %r "
2143
2058
                 % (self.repo, sorted(all_missing)))
2144
 
        problems = self._check_new_inventories()
2145
 
        if problems:
2146
 
            problems_summary = '\n'.join(problems)
2147
 
            raise errors.BzrCheckError(
2148
 
                "Cannot add revision(s) to repository: " + problems_summary)
2149
2059
        self._remove_pack_indices(self._new_pack)
2150
 
        any_new_content = False
 
2060
        should_autopack = False
2151
2061
        if self._new_pack.data_inserted():
2152
2062
            # get all the data to disk and read to use
2153
2063
            self._new_pack.finish()
2154
2064
            self.allocate(self._new_pack)
2155
2065
            self._new_pack = None
2156
 
            any_new_content = True
 
2066
            should_autopack = True
2157
2067
        else:
2158
2068
            self._new_pack.abort()
2159
2069
            self._new_pack = None
2164
2074
            self._remove_pack_from_memory(resumed_pack)
2165
2075
            resumed_pack.finish()
2166
2076
            self.allocate(resumed_pack)
2167
 
            any_new_content = True
 
2077
            should_autopack = True
2168
2078
        del self._resumed_packs[:]
2169
 
        if any_new_content:
2170
 
            result = self.autopack()
2171
 
            if not result:
 
2079
        if should_autopack:
 
2080
            if not self.autopack():
2172
2081
                # when autopack takes no steps, the names list is still
2173
2082
                # unsaved.
2174
 
                return self._save_pack_names()
2175
 
            return result
2176
 
        return []
 
2083
                self._save_pack_names()
 
2084
        self.repo._text_knit = None
2177
2085
 
2178
2086
    def _suspend_write_group(self):
2179
2087
        tokens = [pack.name for pack in self._resumed_packs]
2187
2095
            self._new_pack.abort()
2188
2096
            self._new_pack = None
2189
2097
        self._remove_resumed_pack_indices()
 
2098
        self.repo._text_knit = None
2190
2099
        return tokens
2191
2100
 
2192
2101
    def _resume_write_group(self, tokens):
2281
2190
        self._reconcile_fixes_text_parents = True
2282
2191
        self._reconcile_backsup_inventory = False
2283
2192
 
2284
 
    def _warn_if_deprecated(self, branch=None):
 
2193
    def _warn_if_deprecated(self):
2285
2194
        # This class isn't deprecated, but one sub-format is
2286
2195
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2287
 
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
 
2196
            from bzrlib import repository
 
2197
            if repository._deprecation_warning_done:
 
2198
                return
 
2199
            repository._deprecation_warning_done = True
 
2200
            warning("Format %s for %s is deprecated - please use"
 
2201
                    " 'bzr upgrade --1.6.1-rich-root'"
 
2202
                    % (self._format, self.bzrdir.transport.base))
2288
2203
 
2289
2204
    def _abort_write_group(self):
2290
 
        self.revisions._index._key_dependencies.clear()
2291
2205
        self._pack_collection._abort_write_group()
2292
2206
 
2293
 
    def _get_source(self, to_format):
2294
 
        if to_format.network_name() == self._format.network_name():
2295
 
            return KnitPackStreamSource(self, to_format)
2296
 
        return super(KnitPackRepository, self)._get_source(to_format)
 
2207
    def _find_inconsistent_revision_parents(self):
 
2208
        """Find revisions with incorrectly cached parents.
 
2209
 
 
2210
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
2211
            parents-in-revision).
 
2212
        """
 
2213
        if not self.is_locked():
 
2214
            raise errors.ObjectNotLocked(self)
 
2215
        pb = ui.ui_factory.nested_progress_bar()
 
2216
        result = []
 
2217
        try:
 
2218
            revision_nodes = self._pack_collection.revision_index \
 
2219
                .combined_index.iter_all_entries()
 
2220
            index_positions = []
 
2221
            # Get the cached index values for all revisions, and also the
 
2222
            # location in each index of the revision text so we can perform
 
2223
            # linear IO.
 
2224
            for index, key, value, refs in revision_nodes:
 
2225
                node = (index, key, value, refs)
 
2226
                index_memo = self.revisions._index._node_to_position(node)
 
2227
                if index_memo[0] != index:
 
2228
                    raise AssertionError('%r != %r' % (index_memo[0], index))
 
2229
                index_positions.append((index_memo, key[0],
 
2230
                                       tuple(parent[0] for parent in refs[0])))
 
2231
                pb.update("Reading revision index", 0, 0)
 
2232
            index_positions.sort()
 
2233
            batch_size = 1000
 
2234
            pb.update("Checking cached revision graph", 0,
 
2235
                      len(index_positions))
 
2236
            for offset in xrange(0, len(index_positions), 1000):
 
2237
                pb.update("Checking cached revision graph", offset)
 
2238
                to_query = index_positions[offset:offset + batch_size]
 
2239
                if not to_query:
 
2240
                    break
 
2241
                rev_ids = [item[1] for item in to_query]
 
2242
                revs = self.get_revisions(rev_ids)
 
2243
                for revision, item in zip(revs, to_query):
 
2244
                    index_parents = item[2]
 
2245
                    rev_parents = tuple(revision.parent_ids)
 
2246
                    if index_parents != rev_parents:
 
2247
                        result.append((revision.revision_id, index_parents,
 
2248
                                       rev_parents))
 
2249
        finally:
 
2250
            pb.finished()
 
2251
        return result
2297
2252
 
2298
2253
    def _make_parents_provider(self):
2299
2254
        return graph.CachingParentsProvider(self)
2307
2262
        self._pack_collection._start_write_group()
2308
2263
 
2309
2264
    def _commit_write_group(self):
2310
 
        hint = self._pack_collection._commit_write_group()
2311
 
        self.revisions._index._key_dependencies.clear()
2312
 
        return hint
 
2265
        return self._pack_collection._commit_write_group()
2313
2266
 
2314
2267
    def suspend_write_group(self):
2315
2268
        # XXX check self._write_group is self.get_transaction()?
2316
2269
        tokens = self._pack_collection._suspend_write_group()
2317
 
        self.revisions._index._key_dependencies.clear()
2318
2270
        self._write_group = None
2319
2271
        return tokens
2320
2272
 
2321
2273
    def _resume_write_group(self, tokens):
2322
2274
        self._start_write_group()
2323
 
        try:
2324
 
            self._pack_collection._resume_write_group(tokens)
2325
 
        except errors.UnresumableWriteGroup:
2326
 
            self._abort_write_group()
2327
 
            raise
 
2275
        self._pack_collection._resume_write_group(tokens)
2328
2276
        for pack in self._pack_collection._resumed_packs:
2329
2277
            self.revisions._index.scan_unvalidated_index(pack.revision_index)
2330
2278
 
2347
2295
        self._write_lock_count += 1
2348
2296
        if self._write_lock_count == 1:
2349
2297
            self._transaction = transactions.WriteTransaction()
2350
 
        if not locked:
2351
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2352
 
                note('%r was write locked again', self)
2353
 
            self._prev_lock = 'w'
2354
2298
            for repo in self._fallback_repositories:
2355
2299
                # Writes don't affect fallback repos
2356
2300
                repo.lock_read()
 
2301
        if not locked:
2357
2302
            self._refresh_data()
2358
 
        return RepositoryWriteLockResult(self.unlock, None)
2359
2303
 
2360
2304
    def lock_read(self):
2361
2305
        locked = self.is_locked()
2363
2307
            self._write_lock_count += 1
2364
2308
        else:
2365
2309
            self.control_files.lock_read()
2366
 
        if not locked:
2367
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2368
 
                note('%r was read locked again', self)
2369
 
            self._prev_lock = 'r'
2370
2310
            for repo in self._fallback_repositories:
 
2311
                # Writes don't affect fallback repos
2371
2312
                repo.lock_read()
 
2313
        if not locked:
2372
2314
            self._refresh_data()
2373
 
        return self
2374
2315
 
2375
2316
    def leave_lock_in_place(self):
2376
2317
        # not supported - raise an error
2381
2322
        raise NotImplementedError(self.dont_leave_lock_in_place)
2382
2323
 
2383
2324
    @needs_write_lock
2384
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2325
    def pack(self):
2385
2326
        """Compress the data within the repository.
2386
2327
 
2387
2328
        This will pack all the data to a single pack. In future it may
2388
2329
        recompress deltas or do other such expensive operations.
2389
2330
        """
2390
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2331
        self._pack_collection.pack()
2391
2332
 
2392
2333
    @needs_write_lock
2393
2334
    def reconcile(self, other=None, thorough=False):
2401
2342
        packer = ReconcilePacker(collection, packs, extension, revs)
2402
2343
        return packer.pack(pb)
2403
2344
 
2404
 
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2405
2345
    def unlock(self):
2406
2346
        if self._write_lock_count == 1 and self._write_group is not None:
2407
2347
            self.abort_write_group()
2416
2356
                transaction = self._transaction
2417
2357
                self._transaction = None
2418
2358
                transaction.finish()
 
2359
                for repo in self._fallback_repositories:
 
2360
                    repo.unlock()
2419
2361
        else:
2420
2362
            self.control_files.unlock()
2421
 
 
2422
 
        if not self.is_locked():
2423
2363
            for repo in self._fallback_repositories:
2424
2364
                repo.unlock()
2425
2365
 
2426
2366
 
2427
 
class KnitPackStreamSource(StreamSource):
2428
 
    """A StreamSource used to transfer data between same-format KnitPack repos.
2429
 
 
2430
 
    This source assumes:
2431
 
        1) Same serialization format for all objects
2432
 
        2) Same root information
2433
 
        3) XML format inventories
2434
 
        4) Atomic inserts (so we can stream inventory texts before text
2435
 
           content)
2436
 
        5) No chk_bytes
2437
 
    """
2438
 
 
2439
 
    def __init__(self, from_repository, to_format):
2440
 
        super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2441
 
        self._text_keys = None
2442
 
        self._text_fetch_order = 'unordered'
2443
 
 
2444
 
    def _get_filtered_inv_stream(self, revision_ids):
2445
 
        from_repo = self.from_repository
2446
 
        parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2447
 
        parent_keys = [(p,) for p in parent_ids]
2448
 
        find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2449
 
        parent_text_keys = set(find_text_keys(
2450
 
            from_repo._inventory_xml_lines_for_keys(parent_keys)))
2451
 
        content_text_keys = set()
2452
 
        knit = KnitVersionedFiles(None, None)
2453
 
        factory = KnitPlainFactory()
2454
 
        def find_text_keys_from_content(record):
2455
 
            if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2456
 
                raise ValueError("Unknown content storage kind for"
2457
 
                    " inventory text: %s" % (record.storage_kind,))
2458
 
            # It's a knit record, it has a _raw_record field (even if it was
2459
 
            # reconstituted from a network stream).
2460
 
            raw_data = record._raw_record
2461
 
            # read the entire thing
2462
 
            revision_id = record.key[-1]
2463
 
            content, _ = knit._parse_record(revision_id, raw_data)
2464
 
            if record.storage_kind == 'knit-delta-gz':
2465
 
                line_iterator = factory.get_linedelta_content(content)
2466
 
            elif record.storage_kind == 'knit-ft-gz':
2467
 
                line_iterator = factory.get_fulltext_content(content)
2468
 
            content_text_keys.update(find_text_keys(
2469
 
                [(line, revision_id) for line in line_iterator]))
2470
 
        revision_keys = [(r,) for r in revision_ids]
2471
 
        def _filtered_inv_stream():
2472
 
            source_vf = from_repo.inventories
2473
 
            stream = source_vf.get_record_stream(revision_keys,
2474
 
                                                 'unordered', False)
2475
 
            for record in stream:
2476
 
                if record.storage_kind == 'absent':
2477
 
                    raise errors.NoSuchRevision(from_repo, record.key)
2478
 
                find_text_keys_from_content(record)
2479
 
                yield record
2480
 
            self._text_keys = content_text_keys - parent_text_keys
2481
 
        return ('inventories', _filtered_inv_stream())
2482
 
 
2483
 
    def _get_text_stream(self):
2484
 
        # Note: We know we don't have to handle adding root keys, because both
2485
 
        # the source and target are the identical network name.
2486
 
        text_stream = self.from_repository.texts.get_record_stream(
2487
 
                        self._text_keys, self._text_fetch_order, False)
2488
 
        return ('texts', text_stream)
2489
 
 
2490
 
    def get_stream(self, search):
2491
 
        revision_ids = search.get_keys()
2492
 
        for stream_info in self._fetch_revision_texts(revision_ids):
2493
 
            yield stream_info
2494
 
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2495
 
        yield self._get_filtered_inv_stream(revision_ids)
2496
 
        yield self._get_text_stream()
2497
 
 
2498
 
 
2499
 
 
2500
2367
class RepositoryFormatPack(MetaDirRepositoryFormat):
2501
2368
    """Format logic for pack structured repositories.
2502
2369
 
2549
2416
        utf8_files = [('format', self.get_format_string())]
2550
2417
 
2551
2418
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2552
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2553
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2554
 
        return repository
 
2419
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2555
2420
 
2556
2421
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2557
2422
        """See RepositoryFormat.open().
2606
2471
        """See RepositoryFormat.get_format_description()."""
2607
2472
        return "Packs containing knits without subtree support"
2608
2473
 
 
2474
    def check_conversion_target(self, target_format):
 
2475
        pass
 
2476
 
2609
2477
 
2610
2478
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2611
2479
    """A subtrees parameterized Pack repository.
2620
2488
    repository_class = KnitPackRepository
2621
2489
    _commit_builder_class = PackRootCommitBuilder
2622
2490
    rich_root_data = True
2623
 
    experimental = True
2624
2491
    supports_tree_reference = True
2625
2492
    @property
2626
2493
    def _serializer(self):
2638
2505
 
2639
2506
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2640
2507
 
 
2508
    def check_conversion_target(self, target_format):
 
2509
        if not target_format.rich_root_data:
 
2510
            raise errors.BadConversionTarget(
 
2511
                'Does not support rich root data.', target_format)
 
2512
        if not getattr(target_format, 'supports_tree_reference', False):
 
2513
            raise errors.BadConversionTarget(
 
2514
                'Does not support nested trees', target_format)
 
2515
 
2641
2516
    def get_format_string(self):
2642
2517
        """See RepositoryFormat.get_format_string()."""
2643
2518
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2676
2551
 
2677
2552
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2678
2553
 
 
2554
    def check_conversion_target(self, target_format):
 
2555
        if not target_format.rich_root_data:
 
2556
            raise errors.BadConversionTarget(
 
2557
                'Does not support rich root data.', target_format)
 
2558
 
2679
2559
    def get_format_string(self):
2680
2560
        """See RepositoryFormat.get_format_string()."""
2681
2561
        return ("Bazaar pack repository format 1 with rich root"
2722
2602
        """See RepositoryFormat.get_format_description()."""
2723
2603
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2724
2604
 
 
2605
    def check_conversion_target(self, target_format):
 
2606
        pass
 
2607
 
2725
2608
 
2726
2609
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2727
2610
    """A repository with rich roots and stacking.
2754
2637
 
2755
2638
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2756
2639
 
 
2640
    def check_conversion_target(self, target_format):
 
2641
        if not target_format.rich_root_data:
 
2642
            raise errors.BadConversionTarget(
 
2643
                'Does not support rich root data.', target_format)
 
2644
 
2757
2645
    def get_format_string(self):
2758
2646
        """See RepositoryFormat.get_format_string()."""
2759
2647
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2800
2688
 
2801
2689
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2802
2690
 
 
2691
    def check_conversion_target(self, target_format):
 
2692
        if not target_format.rich_root_data:
 
2693
            raise errors.BadConversionTarget(
 
2694
                'Does not support rich root data.', target_format)
 
2695
 
2803
2696
    def get_format_string(self):
2804
2697
        """See RepositoryFormat.get_format_string()."""
2805
2698
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2843
2736
        """See RepositoryFormat.get_format_description()."""
2844
2737
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2845
2738
 
 
2739
    def check_conversion_target(self, target_format):
 
2740
        pass
 
2741
 
2846
2742
 
2847
2743
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2848
2744
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2872
2768
 
2873
2769
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2874
2770
 
 
2771
    def check_conversion_target(self, target_format):
 
2772
        if not target_format.rich_root_data:
 
2773
            raise errors.BadConversionTarget(
 
2774
                'Does not support rich root data.', target_format)
 
2775
 
2875
2776
    def get_format_string(self):
2876
2777
        """See RepositoryFormat.get_format_string()."""
2877
2778
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2894
2795
    repository_class = KnitPackRepository
2895
2796
    _commit_builder_class = PackRootCommitBuilder
2896
2797
    rich_root_data = True
2897
 
    experimental = True
2898
2798
    supports_tree_reference = True
2899
2799
    supports_external_lookups = True
2900
2800
    # What index classes to use
2914
2814
 
2915
2815
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2916
2816
 
 
2817
    def check_conversion_target(self, target_format):
 
2818
        if not target_format.rich_root_data:
 
2819
            raise errors.BadConversionTarget(
 
2820
                'Does not support rich root data.', target_format)
 
2821
        if not getattr(target_format, 'supports_tree_reference', False):
 
2822
            raise errors.BadConversionTarget(
 
2823
                'Does not support nested trees', target_format)
 
2824
 
2917
2825
    def get_format_string(self):
2918
2826
        """See RepositoryFormat.get_format_string()."""
2919
2827
        return ("Bazaar development format 2 with subtree support "