/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Ian Clatworthy
  • Date: 2009-06-10 23:29:48 UTC
  • mfrom: (4423.2.2 eol-none-bug)
  • mto: This revision was merged to the branch mainline in revision 4428.
  • Revision ID: ian.clatworthy@canonical.com-20090610232948-srfxg31kurqa769c
(igc) fix rule handling so that eol is optional

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2007-2010 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
27
 
    cleanup,
28
27
    debug,
29
28
    graph,
30
29
    osutils,
37
36
    )
38
37
from bzrlib.index import (
39
38
    CombinedGraphIndex,
 
39
    GraphIndex,
 
40
    GraphIndexBuilder,
40
41
    GraphIndexPrefixAdapter,
 
42
    InMemoryGraphIndex,
41
43
    )
42
44
from bzrlib.knit import (
43
45
    KnitPlainFactory,
53
55
    lockable_files,
54
56
    lockdir,
55
57
    revision as _mod_revision,
 
58
    symbol_versioning,
56
59
    )
57
60
 
58
 
from bzrlib.decorators import needs_write_lock, only_raises
 
61
from bzrlib.decorators import needs_write_lock
59
62
from bzrlib.btree_index import (
60
63
    BTreeGraphIndex,
61
64
    BTreeBuilder,
70
73
    MetaDirRepositoryFormat,
71
74
    RepositoryFormat,
72
75
    RootCommitBuilder,
73
 
    StreamSource,
74
76
    )
 
77
import bzrlib.revision as _mod_revision
75
78
from bzrlib.trace import (
76
79
    mutter,
77
 
    note,
78
80
    warning,
79
81
    )
80
82
 
226
228
        return self.index_name('text', name)
227
229
 
228
230
    def _replace_index_with_readonly(self, index_type):
229
 
        unlimited_cache = False
230
 
        if index_type == 'chk':
231
 
            unlimited_cache = True
232
231
        setattr(self, index_type + '_index',
233
232
            self.index_class(self.index_transport,
234
233
                self.index_name(index_type, self.name),
235
 
                self.index_sizes[self.index_offset(index_type)],
236
 
                unlimited_cache=unlimited_cache))
 
234
                self.index_sizes[self.index_offset(index_type)]))
237
235
 
238
236
 
239
237
class ExistingPack(Pack):
314
312
 
315
313
    def finish(self):
316
314
        self._check_references()
 
315
        new_name = '../packs/' + self.file_name()
 
316
        self.upload_transport.rename(self.file_name(), new_name)
317
317
        index_types = ['revision', 'inventory', 'text', 'signature']
318
318
        if self.chk_index is not None:
319
319
            index_types.append('chk')
322
322
            new_name = '../indices/' + old_name
323
323
            self.upload_transport.rename(old_name, new_name)
324
324
            self._replace_index_with_readonly(index_type)
325
 
        new_name = '../packs/' + self.file_name()
326
 
        self.upload_transport.rename(self.file_name(), new_name)
327
325
        self._state = 'finished'
328
326
 
329
327
    def _get_external_refs(self, index):
428
426
        self._writer.begin()
429
427
        # what state is the pack in? (open, finished, aborted)
430
428
        self._state = 'open'
431
 
        # no name until we finish writing the content
432
 
        self.name = None
433
429
 
434
430
    def abort(self):
435
431
        """Cancel creating this pack."""
456
452
            self.signature_index.key_count() or
457
453
            (self.chk_index is not None and self.chk_index.key_count()))
458
454
 
459
 
    def finish_content(self):
460
 
        if self.name is not None:
461
 
            return
462
 
        self._writer.end()
463
 
        if self._buffer[1]:
464
 
            self._write_data('', flush=True)
465
 
        self.name = self._hash.hexdigest()
466
 
 
467
455
    def finish(self, suspend=False):
468
456
        """Finish the new pack.
469
457
 
475
463
         - stores the index size tuple for the pack in the index_sizes
476
464
           attribute.
477
465
        """
478
 
        self.finish_content()
 
466
        self._writer.end()
 
467
        if self._buffer[1]:
 
468
            self._write_data('', flush=True)
 
469
        self.name = self._hash.hexdigest()
479
470
        if not suspend:
480
471
            self._check_references()
481
472
        # write indices
587
578
                                             flush_func=flush_func)
588
579
        self.add_callback = None
589
580
 
 
581
    def replace_indices(self, index_to_pack, indices):
 
582
        """Replace the current mappings with fresh ones.
 
583
 
 
584
        This should probably not be used eventually, rather incremental add and
 
585
        removal of indices. It has been added during refactoring of existing
 
586
        code.
 
587
 
 
588
        :param index_to_pack: A mapping from index objects to
 
589
            (transport, name) tuples for the pack file data.
 
590
        :param indices: A list of indices.
 
591
        """
 
592
        # refresh the revision pack map dict without replacing the instance.
 
593
        self.index_to_pack.clear()
 
594
        self.index_to_pack.update(index_to_pack)
 
595
        # XXX: API break - clearly a 'replace' method would be good?
 
596
        self.combined_index._indices[:] = indices
 
597
        # the current add nodes callback for the current writable index if
 
598
        # there is one.
 
599
        self.add_callback = None
 
600
 
590
601
    def add_index(self, index, pack):
591
602
        """Add index to the aggregate, which is an index for Pack pack.
592
603
 
599
610
        # expose it to the index map
600
611
        self.index_to_pack[index] = pack.access_tuple()
601
612
        # put it at the front of the linear index list
602
 
        self.combined_index.insert_index(0, index, pack.name)
 
613
        self.combined_index.insert_index(0, index)
603
614
 
604
615
    def add_writable_index(self, index, pack):
605
616
        """Add an index which is able to have data added to it.
625
636
        self.data_access.set_writer(None, None, (None, None))
626
637
        self.index_to_pack.clear()
627
638
        del self.combined_index._indices[:]
628
 
        del self.combined_index._index_names[:]
629
639
        self.add_callback = None
630
640
 
631
 
    def remove_index(self, index):
 
641
    def remove_index(self, index, pack):
632
642
        """Remove index from the indices used to answer queries.
633
643
 
634
644
        :param index: An index from the pack parameter.
 
645
        :param pack: A Pack instance.
635
646
        """
636
647
        del self.index_to_pack[index]
637
 
        pos = self.combined_index._indices.index(index)
638
 
        del self.combined_index._indices[pos]
639
 
        del self.combined_index._index_names[pos]
 
648
        self.combined_index._indices.remove(index)
640
649
        if (self.add_callback is not None and
641
650
            getattr(index, 'add_nodes', None) == self.add_callback):
642
651
            self.add_callback = None
1100
1109
            iterator is a tuple with:
1101
1110
            index, readv_vector, node_vector. readv_vector is a list ready to
1102
1111
            hand to the transport readv method, and node_vector is a list of
1103
 
            (key, eol_flag, references) for the node retrieved by the
 
1112
            (key, eol_flag, references) for the the node retrieved by the
1104
1113
            matching readv_vector.
1105
1114
        """
1106
1115
        # group by pack so we do one readv per pack
1398
1407
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1399
1408
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1400
1409
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1401
 
        all_indices = [self.revision_index, self.inventory_index,
1402
 
                self.text_index, self.signature_index]
1403
1410
        if use_chk_index:
1404
1411
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1405
 
            all_indices.append(self.chk_index)
1406
1412
        else:
1407
1413
            # used to determine if we're using a chk_index elsewhere.
1408
1414
            self.chk_index = None
1409
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1410
 
        # share hints about which pack names to search first.
1411
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1412
 
        for combined_idx in all_combined:
1413
 
            combined_idx.set_sibling_indices(
1414
 
                set(all_combined).difference([combined_idx]))
1415
1415
        # resumed packs
1416
1416
        self._resumed_packs = []
1417
1417
 
1418
 
    def __repr__(self):
1419
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1420
 
 
1421
1418
    def add_pack_to_memory(self, pack):
1422
1419
        """Make a Pack object available to the repository to satisfy queries.
1423
1420
 
1461
1458
        in synchronisation with certain steps. Otherwise the names collection
1462
1459
        is not flushed.
1463
1460
 
1464
 
        :return: Something evaluating true if packing took place.
 
1461
        :return: True if packing took place.
1465
1462
        """
1466
1463
        while True:
1467
1464
            try:
1468
1465
                return self._do_autopack()
1469
 
            except errors.RetryAutopack:
 
1466
            except errors.RetryAutopack, e:
1470
1467
                # If we get a RetryAutopack exception, we should abort the
1471
1468
                # current action, and retry.
1472
1469
                pass
1476
1473
        total_revisions = self.revision_index.combined_index.key_count()
1477
1474
        total_packs = len(self._names)
1478
1475
        if self._max_pack_count(total_revisions) >= total_packs:
1479
 
            return None
 
1476
            return False
1480
1477
        # determine which packs need changing
1481
1478
        pack_distribution = self.pack_distribution(total_revisions)
1482
1479
        existing_packs = []
1504
1501
            'containing %d revisions. Packing %d files into %d affecting %d'
1505
1502
            ' revisions', self, total_packs, total_revisions, num_old_packs,
1506
1503
            num_new_packs, num_revs_affected)
1507
 
        result = self._execute_pack_operations(pack_operations,
 
1504
        self._execute_pack_operations(pack_operations,
1508
1505
                                      reload_func=self._restart_autopack)
1509
1506
        mutter('Auto-packing repository %s completed', self)
1510
 
        return result
 
1507
        return True
1511
1508
 
1512
1509
    def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1513
1510
                                 reload_func=None):
1515
1512
 
1516
1513
        :param pack_operations: A list of [revision_count, packs_to_combine].
1517
1514
        :param _packer_class: The class of packer to use (default: Packer).
1518
 
        :return: The new pack names.
 
1515
        :return: None.
1519
1516
        """
1520
1517
        for revision_count, packs in pack_operations:
1521
1518
            # we may have no-ops from the setup logic
1537
1534
                self._remove_pack_from_memory(pack)
1538
1535
        # record the newly available packs and stop advertising the old
1539
1536
        # packs
1540
 
        to_be_obsoleted = []
1541
 
        for _, packs in pack_operations:
1542
 
            to_be_obsoleted.extend(packs)
1543
 
        result = self._save_pack_names(clear_obsolete_packs=True,
1544
 
                                       obsolete_packs=to_be_obsoleted)
1545
 
        return result
 
1537
        self._save_pack_names(clear_obsolete_packs=True)
 
1538
        # Move the old packs out of the way now they are no longer referenced.
 
1539
        for revision_count, packs in pack_operations:
 
1540
            self._obsolete_packs(packs)
1546
1541
 
1547
1542
    def _flush_new_pack(self):
1548
1543
        if self._new_pack is not None:
1558
1553
 
1559
1554
    def _already_packed(self):
1560
1555
        """Is the collection already packed?"""
1561
 
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
 
1556
        return len(self._names) < 2
1562
1557
 
1563
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1558
    def pack(self):
1564
1559
        """Pack the pack collection totally."""
1565
1560
        self.ensure_loaded()
1566
1561
        total_packs = len(self._names)
1567
1562
        if self._already_packed():
 
1563
            # This is arguably wrong because we might not be optimal, but for
 
1564
            # now lets leave it in. (e.g. reconcile -> one pack. But not
 
1565
            # optimal.
1568
1566
            return
1569
1567
        total_revisions = self.revision_index.combined_index.key_count()
1570
1568
        # XXX: the following may want to be a class, to pack with a given
1571
1569
        # policy.
1572
1570
        mutter('Packing repository %s, which has %d pack files, '
1573
 
            'containing %d revisions with hint %r.', self, total_packs,
1574
 
            total_revisions, hint)
 
1571
            'containing %d revisions into 1 packs.', self, total_packs,
 
1572
            total_revisions)
1575
1573
        # determine which packs need changing
 
1574
        pack_distribution = [1]
1576
1575
        pack_operations = [[0, []]]
1577
1576
        for pack in self.all_packs():
1578
 
            if hint is None or pack.name in hint:
1579
 
                # Either no hint was provided (so we are packing everything),
1580
 
                # or this pack was included in the hint.
1581
 
                pack_operations[-1][0] += pack.get_revision_count()
1582
 
                pack_operations[-1][1].append(pack)
 
1577
            pack_operations[-1][0] += pack.get_revision_count()
 
1578
            pack_operations[-1][1].append(pack)
1583
1579
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1584
1580
 
1585
 
        if clean_obsolete_packs:
1586
 
            self._clear_obsolete_packs()
1587
 
 
1588
1581
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1582
        """Plan a pack operation.
1590
1583
 
1678
1671
            txt_index = self._make_index(name, '.tix')
1679
1672
            sig_index = self._make_index(name, '.six')
1680
1673
            if self.chk_index is not None:
1681
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
 
1674
                chk_index = self._make_index(name, '.cix')
1682
1675
            else:
1683
1676
                chk_index = None
1684
1677
            result = ExistingPack(self._pack_transport, name, rev_index,
1703
1696
            txt_index = self._make_index(name, '.tix', resume=True)
1704
1697
            sig_index = self._make_index(name, '.six', resume=True)
1705
1698
            if self.chk_index is not None:
1706
 
                chk_index = self._make_index(name, '.cix', resume=True,
1707
 
                                             unlimited_cache=True)
 
1699
                chk_index = self._make_index(name, '.cix', resume=True)
1708
1700
            else:
1709
1701
                chk_index = None
1710
1702
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1740
1732
        return self._index_class(self.transport, 'pack-names', None
1741
1733
                ).iter_all_entries()
1742
1734
 
1743
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1735
    def _make_index(self, name, suffix, resume=False):
1744
1736
        size_offset = self._suffix_offsets[suffix]
1745
1737
        index_name = name + suffix
1746
1738
        if resume:
1749
1741
        else:
1750
1742
            transport = self._index_transport
1751
1743
            index_size = self._names[name][size_offset]
1752
 
        return self._index_class(transport, index_name, index_size,
1753
 
                                 unlimited_cache=unlimited_cache)
 
1744
        return self._index_class(transport, index_name, index_size)
1754
1745
 
1755
1746
    def _max_pack_count(self, total_revisions):
1756
1747
        """Return the maximum number of packs to use for total revisions.
1784
1775
        :param return: None.
1785
1776
        """
1786
1777
        for pack in packs:
1787
 
            try:
1788
 
                pack.pack_transport.rename(pack.file_name(),
1789
 
                    '../obsolete_packs/' + pack.file_name())
1790
 
            except (errors.PathError, errors.TransportError), e:
1791
 
                # TODO: Should these be warnings or mutters?
1792
 
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
1793
 
                       % (e,))
 
1778
            pack.pack_transport.rename(pack.file_name(),
 
1779
                '../obsolete_packs/' + pack.file_name())
1794
1780
            # TODO: Probably needs to know all possible indices for this pack
1795
1781
            # - or maybe list the directory and move all indices matching this
1796
1782
            # name whether we recognize it or not?
1798
1784
            if self.chk_index is not None:
1799
1785
                suffixes.append('.cix')
1800
1786
            for suffix in suffixes:
1801
 
                try:
1802
 
                    self._index_transport.rename(pack.name + suffix,
1803
 
                        '../obsolete_packs/' + pack.name + suffix)
1804
 
                except (errors.PathError, errors.TransportError), e:
1805
 
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
1806
 
                           % (e,))
 
1787
                self._index_transport.rename(pack.name + suffix,
 
1788
                    '../obsolete_packs/' + pack.name + suffix)
1807
1789
 
1808
1790
    def pack_distribution(self, total_revisions):
1809
1791
        """Generate a list of the number of revisions to put in each pack.
1835
1817
        self._remove_pack_indices(pack)
1836
1818
        self.packs.remove(pack)
1837
1819
 
1838
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1839
 
        """Remove the indices for pack from the aggregated indices.
1840
 
        
1841
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1842
 
        """
1843
 
        for index_type in Pack.index_definitions.keys():
1844
 
            attr_name = index_type + '_index'
1845
 
            aggregate_index = getattr(self, attr_name)
1846
 
            if aggregate_index is not None:
1847
 
                pack_index = getattr(pack, attr_name)
1848
 
                try:
1849
 
                    aggregate_index.remove_index(pack_index)
1850
 
                except KeyError:
1851
 
                    if ignore_missing:
1852
 
                        continue
1853
 
                    raise
 
1820
    def _remove_pack_indices(self, pack):
 
1821
        """Remove the indices for pack from the aggregated indices."""
 
1822
        self.revision_index.remove_index(pack.revision_index, pack)
 
1823
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1824
        self.text_index.remove_index(pack.text_index, pack)
 
1825
        self.signature_index.remove_index(pack.signature_index, pack)
 
1826
        if self.chk_index is not None:
 
1827
            self.chk_index.remove_index(pack.chk_index, pack)
1854
1828
 
1855
1829
    def reset(self):
1856
1830
        """Clear all cached data."""
1889
1863
        disk_nodes = set()
1890
1864
        for index, key, value in self._iter_disk_pack_index():
1891
1865
            disk_nodes.add((key, value))
1892
 
        orig_disk_nodes = set(disk_nodes)
1893
1866
 
1894
1867
        # do a two-way diff against our original content
1895
1868
        current_nodes = set()
1908
1881
        disk_nodes.difference_update(deleted_nodes)
1909
1882
        disk_nodes.update(new_nodes)
1910
1883
 
1911
 
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
 
1884
        return disk_nodes, deleted_nodes, new_nodes
1912
1885
 
1913
1886
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1914
1887
        """Given the correct set of pack files, update our saved info.
1954
1927
                added.append(name)
1955
1928
        return removed, added, modified
1956
1929
 
1957
 
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
 
1930
    def _save_pack_names(self, clear_obsolete_packs=False):
1958
1931
        """Save the list of packs.
1959
1932
 
1960
1933
        This will take out the mutex around the pack names list for the
1964
1937
 
1965
1938
        :param clear_obsolete_packs: If True, clear out the contents of the
1966
1939
            obsolete_packs directory.
1967
 
        :param obsolete_packs: Packs that are obsolete once the new pack-names
1968
 
            file has been written.
1969
 
        :return: A list of the names saved that were not previously on disk.
1970
1940
        """
1971
 
        already_obsolete = []
1972
1941
        self.lock_names()
1973
1942
        try:
1974
1943
            builder = self._index_builder_class()
1975
 
            (disk_nodes, deleted_nodes, new_nodes,
1976
 
             orig_disk_nodes) = self._diff_pack_names()
 
1944
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1977
1945
            # TODO: handle same-name, index-size-changes here -
1978
1946
            # e.g. use the value from disk, not ours, *unless* we're the one
1979
1947
            # changing it.
1981
1949
                builder.add_node(key, value)
1982
1950
            self.transport.put_file('pack-names', builder.finish(),
1983
1951
                mode=self.repo.bzrdir._get_file_mode())
 
1952
            # move the baseline forward
1984
1953
            self._packs_at_load = disk_nodes
1985
1954
            if clear_obsolete_packs:
1986
 
                to_preserve = None
1987
 
                if obsolete_packs:
1988
 
                    to_preserve = set([o.name for o in obsolete_packs])
1989
 
                already_obsolete = self._clear_obsolete_packs(to_preserve)
 
1955
                self._clear_obsolete_packs()
1990
1956
        finally:
1991
1957
            self._unlock_names()
1992
1958
        # synchronise the memory packs list with what we just wrote:
1993
1959
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1994
 
        if obsolete_packs:
1995
 
            # TODO: We could add one more condition here. "if o.name not in
1996
 
            #       orig_disk_nodes and o != the new_pack we haven't written to
1997
 
            #       disk yet. However, the new pack object is not easily
1998
 
            #       accessible here (it would have to be passed through the
1999
 
            #       autopacking code, etc.)
2000
 
            obsolete_packs = [o for o in obsolete_packs
2001
 
                              if o.name not in already_obsolete]
2002
 
            self._obsolete_packs(obsolete_packs)
2003
 
        return [new_node[0][0] for new_node in new_nodes]
2004
1960
 
2005
1961
    def reload_pack_names(self):
2006
1962
        """Sync our pack listing with what is present in the repository.
2020
1976
        if first_read:
2021
1977
            return True
2022
1978
        # out the new value.
2023
 
        (disk_nodes, deleted_nodes, new_nodes,
2024
 
         orig_disk_nodes) = self._diff_pack_names()
2025
 
        # _packs_at_load is meant to be the explicit list of names in
2026
 
        # 'pack-names' at then start. As such, it should not contain any
2027
 
        # pending names that haven't been written out yet.
2028
 
        self._packs_at_load = orig_disk_nodes
 
1979
        disk_nodes, _, _ = self._diff_pack_names()
 
1980
        self._packs_at_load = disk_nodes
2029
1981
        (removed, added,
2030
1982
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2031
1983
        if removed or added or modified:
2040
1992
            raise
2041
1993
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2042
1994
 
2043
 
    def _clear_obsolete_packs(self, preserve=None):
 
1995
    def _clear_obsolete_packs(self):
2044
1996
        """Delete everything from the obsolete-packs directory.
2045
 
 
2046
 
        :return: A list of pack identifiers (the filename without '.pack') that
2047
 
            were found in obsolete_packs.
2048
1997
        """
2049
 
        found = []
2050
1998
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
2051
 
        if preserve is None:
2052
 
            preserve = set()
2053
1999
        for filename in obsolete_pack_transport.list_dir('.'):
2054
 
            name, ext = osutils.splitext(filename)
2055
 
            if ext == '.pack':
2056
 
                found.append(name)
2057
 
            if name in preserve:
2058
 
                continue
2059
2000
            try:
2060
2001
                obsolete_pack_transport.delete(filename)
2061
2002
            except (errors.PathError, errors.TransportError), e:
2062
 
                warning("couldn't delete obsolete pack, skipping it:\n%s"
2063
 
                        % (e,))
2064
 
        return found
 
2003
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2065
2004
 
2066
2005
    def _start_write_group(self):
2067
2006
        # Do not permit preparation for writing if we're not in a 'write lock'.
2094
2033
        # FIXME: just drop the transient index.
2095
2034
        # forget what names there are
2096
2035
        if self._new_pack is not None:
2097
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2098
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2099
 
            # If we aborted while in the middle of finishing the write
2100
 
            # group, _remove_pack_indices could fail because the indexes are
2101
 
            # already gone.  But they're not there we shouldn't fail in this
2102
 
            # case, so we pass ignore_missing=True.
2103
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2104
 
                ignore_missing=True)
2105
 
            operation.run_simple()
 
2036
            try:
 
2037
                self._new_pack.abort()
 
2038
            finally:
 
2039
                # XXX: If we aborted while in the middle of finishing the write
 
2040
                # group, _remove_pack_indices can fail because the indexes are
 
2041
                # already gone.  If they're not there we shouldn't fail in this
 
2042
                # case.  -- mbp 20081113
 
2043
                self._remove_pack_indices(self._new_pack)
 
2044
                self._new_pack = None
2106
2045
        for resumed_pack in self._resumed_packs:
2107
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2108
 
            # See comment in previous finally block.
2109
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2110
 
                ignore_missing=True)
2111
 
            operation.run_simple()
 
2046
            try:
 
2047
                resumed_pack.abort()
 
2048
            finally:
 
2049
                # See comment in previous finally block.
 
2050
                try:
 
2051
                    self._remove_pack_indices(resumed_pack)
 
2052
                except KeyError:
 
2053
                    pass
2112
2054
        del self._resumed_packs[:]
2113
2055
 
2114
2056
    def _remove_resumed_pack_indices(self):
2116
2058
            self._remove_pack_indices(resumed_pack)
2117
2059
        del self._resumed_packs[:]
2118
2060
 
2119
 
    def _check_new_inventories(self):
2120
 
        """Detect missing inventories in this write group.
2121
 
 
2122
 
        :returns: list of strs, summarising any problems found.  If the list is
2123
 
            empty no problems were found.
2124
 
        """
2125
 
        # The base implementation does no checks.  GCRepositoryPackCollection
2126
 
        # overrides this.
2127
 
        return []
2128
 
        
2129
2061
    def _commit_write_group(self):
2130
2062
        all_missing = set()
2131
2063
        for prefix, versioned_file in (
2140
2072
            raise errors.BzrCheckError(
2141
2073
                "Repository %s has missing compression parent(s) %r "
2142
2074
                 % (self.repo, sorted(all_missing)))
2143
 
        problems = self._check_new_inventories()
2144
 
        if problems:
2145
 
            problems_summary = '\n'.join(problems)
2146
 
            raise errors.BzrCheckError(
2147
 
                "Cannot add revision(s) to repository: " + problems_summary)
2148
2075
        self._remove_pack_indices(self._new_pack)
2149
 
        any_new_content = False
 
2076
        should_autopack = False
2150
2077
        if self._new_pack.data_inserted():
2151
2078
            # get all the data to disk and read to use
2152
2079
            self._new_pack.finish()
2153
2080
            self.allocate(self._new_pack)
2154
2081
            self._new_pack = None
2155
 
            any_new_content = True
 
2082
            should_autopack = True
2156
2083
        else:
2157
2084
            self._new_pack.abort()
2158
2085
            self._new_pack = None
2163
2090
            self._remove_pack_from_memory(resumed_pack)
2164
2091
            resumed_pack.finish()
2165
2092
            self.allocate(resumed_pack)
2166
 
            any_new_content = True
 
2093
            should_autopack = True
2167
2094
        del self._resumed_packs[:]
2168
 
        if any_new_content:
2169
 
            result = self.autopack()
2170
 
            if not result:
 
2095
        if should_autopack:
 
2096
            if not self.autopack():
2171
2097
                # when autopack takes no steps, the names list is still
2172
2098
                # unsaved.
2173
 
                return self._save_pack_names()
2174
 
            return result
2175
 
        return []
 
2099
                self._save_pack_names()
2176
2100
 
2177
2101
    def _suspend_write_group(self):
2178
2102
        tokens = [pack.name for pack in self._resumed_packs]
2280
2204
        self._reconcile_fixes_text_parents = True
2281
2205
        self._reconcile_backsup_inventory = False
2282
2206
 
2283
 
    def _warn_if_deprecated(self, branch=None):
 
2207
    def _warn_if_deprecated(self):
2284
2208
        # This class isn't deprecated, but one sub-format is
2285
2209
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2286
 
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
 
2210
            from bzrlib import repository
 
2211
            if repository._deprecation_warning_done:
 
2212
                return
 
2213
            repository._deprecation_warning_done = True
 
2214
            warning("Format %s for %s is deprecated - please use"
 
2215
                    " 'bzr upgrade --1.6.1-rich-root'"
 
2216
                    % (self._format, self.bzrdir.transport.base))
2287
2217
 
2288
2218
    def _abort_write_group(self):
2289
 
        self.revisions._index._key_dependencies.clear()
 
2219
        self.revisions._index._key_dependencies.refs.clear()
2290
2220
        self._pack_collection._abort_write_group()
2291
2221
 
2292
 
    def _get_source(self, to_format):
2293
 
        if to_format.network_name() == self._format.network_name():
2294
 
            return KnitPackStreamSource(self, to_format)
2295
 
        return super(KnitPackRepository, self)._get_source(to_format)
 
2222
    def _find_inconsistent_revision_parents(self):
 
2223
        """Find revisions with incorrectly cached parents.
 
2224
 
 
2225
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
2226
            parents-in-revision).
 
2227
        """
 
2228
        if not self.is_locked():
 
2229
            raise errors.ObjectNotLocked(self)
 
2230
        pb = ui.ui_factory.nested_progress_bar()
 
2231
        result = []
 
2232
        try:
 
2233
            revision_nodes = self._pack_collection.revision_index \
 
2234
                .combined_index.iter_all_entries()
 
2235
            index_positions = []
 
2236
            # Get the cached index values for all revisions, and also the
 
2237
            # location in each index of the revision text so we can perform
 
2238
            # linear IO.
 
2239
            for index, key, value, refs in revision_nodes:
 
2240
                node = (index, key, value, refs)
 
2241
                index_memo = self.revisions._index._node_to_position(node)
 
2242
                if index_memo[0] != index:
 
2243
                    raise AssertionError('%r != %r' % (index_memo[0], index))
 
2244
                index_positions.append((index_memo, key[0],
 
2245
                                       tuple(parent[0] for parent in refs[0])))
 
2246
                pb.update("Reading revision index", 0, 0)
 
2247
            index_positions.sort()
 
2248
            batch_size = 1000
 
2249
            pb.update("Checking cached revision graph", 0,
 
2250
                      len(index_positions))
 
2251
            for offset in xrange(0, len(index_positions), 1000):
 
2252
                pb.update("Checking cached revision graph", offset)
 
2253
                to_query = index_positions[offset:offset + batch_size]
 
2254
                if not to_query:
 
2255
                    break
 
2256
                rev_ids = [item[1] for item in to_query]
 
2257
                revs = self.get_revisions(rev_ids)
 
2258
                for revision, item in zip(revs, to_query):
 
2259
                    index_parents = item[2]
 
2260
                    rev_parents = tuple(revision.parent_ids)
 
2261
                    if index_parents != rev_parents:
 
2262
                        result.append((revision.revision_id, index_parents,
 
2263
                                       rev_parents))
 
2264
        finally:
 
2265
            pb.finished()
 
2266
        return result
2296
2267
 
2297
2268
    def _make_parents_provider(self):
2298
2269
        return graph.CachingParentsProvider(self)
2306
2277
        self._pack_collection._start_write_group()
2307
2278
 
2308
2279
    def _commit_write_group(self):
2309
 
        hint = self._pack_collection._commit_write_group()
2310
 
        self.revisions._index._key_dependencies.clear()
2311
 
        return hint
 
2280
        self.revisions._index._key_dependencies.refs.clear()
 
2281
        return self._pack_collection._commit_write_group()
2312
2282
 
2313
2283
    def suspend_write_group(self):
2314
2284
        # XXX check self._write_group is self.get_transaction()?
2315
2285
        tokens = self._pack_collection._suspend_write_group()
2316
 
        self.revisions._index._key_dependencies.clear()
 
2286
        self.revisions._index._key_dependencies.refs.clear()
2317
2287
        self._write_group = None
2318
2288
        return tokens
2319
2289
 
2347
2317
        if self._write_lock_count == 1:
2348
2318
            self._transaction = transactions.WriteTransaction()
2349
2319
        if not locked:
2350
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2351
 
                note('%r was write locked again', self)
2352
 
            self._prev_lock = 'w'
2353
2320
            for repo in self._fallback_repositories:
2354
2321
                # Writes don't affect fallback repos
2355
2322
                repo.lock_read()
2362
2329
        else:
2363
2330
            self.control_files.lock_read()
2364
2331
        if not locked:
2365
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2366
 
                note('%r was read locked again', self)
2367
 
            self._prev_lock = 'r'
2368
2332
            for repo in self._fallback_repositories:
2369
2333
                repo.lock_read()
2370
2334
            self._refresh_data()
2378
2342
        raise NotImplementedError(self.dont_leave_lock_in_place)
2379
2343
 
2380
2344
    @needs_write_lock
2381
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2345
    def pack(self):
2382
2346
        """Compress the data within the repository.
2383
2347
 
2384
2348
        This will pack all the data to a single pack. In future it may
2385
2349
        recompress deltas or do other such expensive operations.
2386
2350
        """
2387
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2351
        self._pack_collection.pack()
2388
2352
 
2389
2353
    @needs_write_lock
2390
2354
    def reconcile(self, other=None, thorough=False):
2398
2362
        packer = ReconcilePacker(collection, packs, extension, revs)
2399
2363
        return packer.pack(pb)
2400
2364
 
2401
 
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2402
2365
    def unlock(self):
2403
2366
        if self._write_lock_count == 1 and self._write_group is not None:
2404
2367
            self.abort_write_group()
2421
2384
                repo.unlock()
2422
2385
 
2423
2386
 
2424
 
class KnitPackStreamSource(StreamSource):
2425
 
    """A StreamSource used to transfer data between same-format KnitPack repos.
2426
 
 
2427
 
    This source assumes:
2428
 
        1) Same serialization format for all objects
2429
 
        2) Same root information
2430
 
        3) XML format inventories
2431
 
        4) Atomic inserts (so we can stream inventory texts before text
2432
 
           content)
2433
 
        5) No chk_bytes
2434
 
    """
2435
 
 
2436
 
    def __init__(self, from_repository, to_format):
2437
 
        super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2438
 
        self._text_keys = None
2439
 
        self._text_fetch_order = 'unordered'
2440
 
 
2441
 
    def _get_filtered_inv_stream(self, revision_ids):
2442
 
        from_repo = self.from_repository
2443
 
        parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2444
 
        parent_keys = [(p,) for p in parent_ids]
2445
 
        find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2446
 
        parent_text_keys = set(find_text_keys(
2447
 
            from_repo._inventory_xml_lines_for_keys(parent_keys)))
2448
 
        content_text_keys = set()
2449
 
        knit = KnitVersionedFiles(None, None)
2450
 
        factory = KnitPlainFactory()
2451
 
        def find_text_keys_from_content(record):
2452
 
            if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2453
 
                raise ValueError("Unknown content storage kind for"
2454
 
                    " inventory text: %s" % (record.storage_kind,))
2455
 
            # It's a knit record, it has a _raw_record field (even if it was
2456
 
            # reconstituted from a network stream).
2457
 
            raw_data = record._raw_record
2458
 
            # read the entire thing
2459
 
            revision_id = record.key[-1]
2460
 
            content, _ = knit._parse_record(revision_id, raw_data)
2461
 
            if record.storage_kind == 'knit-delta-gz':
2462
 
                line_iterator = factory.get_linedelta_content(content)
2463
 
            elif record.storage_kind == 'knit-ft-gz':
2464
 
                line_iterator = factory.get_fulltext_content(content)
2465
 
            content_text_keys.update(find_text_keys(
2466
 
                [(line, revision_id) for line in line_iterator]))
2467
 
        revision_keys = [(r,) for r in revision_ids]
2468
 
        def _filtered_inv_stream():
2469
 
            source_vf = from_repo.inventories
2470
 
            stream = source_vf.get_record_stream(revision_keys,
2471
 
                                                 'unordered', False)
2472
 
            for record in stream:
2473
 
                if record.storage_kind == 'absent':
2474
 
                    raise errors.NoSuchRevision(from_repo, record.key)
2475
 
                find_text_keys_from_content(record)
2476
 
                yield record
2477
 
            self._text_keys = content_text_keys - parent_text_keys
2478
 
        return ('inventories', _filtered_inv_stream())
2479
 
 
2480
 
    def _get_text_stream(self):
2481
 
        # Note: We know we don't have to handle adding root keys, because both
2482
 
        # the source and target are the identical network name.
2483
 
        text_stream = self.from_repository.texts.get_record_stream(
2484
 
                        self._text_keys, self._text_fetch_order, False)
2485
 
        return ('texts', text_stream)
2486
 
 
2487
 
    def get_stream(self, search):
2488
 
        revision_ids = search.get_keys()
2489
 
        for stream_info in self._fetch_revision_texts(revision_ids):
2490
 
            yield stream_info
2491
 
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2492
 
        yield self._get_filtered_inv_stream(revision_ids)
2493
 
        yield self._get_text_stream()
2494
 
 
2495
 
 
2496
 
 
2497
2387
class RepositoryFormatPack(MetaDirRepositoryFormat):
2498
2388
    """Format logic for pack structured repositories.
2499
2389
 
2546
2436
        utf8_files = [('format', self.get_format_string())]
2547
2437
 
2548
2438
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2549
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2550
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2551
 
        return repository
 
2439
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2552
2440
 
2553
2441
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2554
2442
        """See RepositoryFormat.open().
2603
2491
        """See RepositoryFormat.get_format_description()."""
2604
2492
        return "Packs containing knits without subtree support"
2605
2493
 
 
2494
    def check_conversion_target(self, target_format):
 
2495
        pass
 
2496
 
2606
2497
 
2607
2498
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2608
2499
    """A subtrees parameterized Pack repository.
2617
2508
    repository_class = KnitPackRepository
2618
2509
    _commit_builder_class = PackRootCommitBuilder
2619
2510
    rich_root_data = True
2620
 
    experimental = True
2621
2511
    supports_tree_reference = True
2622
2512
    @property
2623
2513
    def _serializer(self):
2635
2525
 
2636
2526
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2637
2527
 
 
2528
    def check_conversion_target(self, target_format):
 
2529
        if not target_format.rich_root_data:
 
2530
            raise errors.BadConversionTarget(
 
2531
                'Does not support rich root data.', target_format)
 
2532
        if not getattr(target_format, 'supports_tree_reference', False):
 
2533
            raise errors.BadConversionTarget(
 
2534
                'Does not support nested trees', target_format)
 
2535
 
2638
2536
    def get_format_string(self):
2639
2537
        """See RepositoryFormat.get_format_string()."""
2640
2538
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2673
2571
 
2674
2572
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2675
2573
 
 
2574
    def check_conversion_target(self, target_format):
 
2575
        if not target_format.rich_root_data:
 
2576
            raise errors.BadConversionTarget(
 
2577
                'Does not support rich root data.', target_format)
 
2578
 
2676
2579
    def get_format_string(self):
2677
2580
        """See RepositoryFormat.get_format_string()."""
2678
2581
        return ("Bazaar pack repository format 1 with rich root"
2719
2622
        """See RepositoryFormat.get_format_description()."""
2720
2623
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2721
2624
 
 
2625
    def check_conversion_target(self, target_format):
 
2626
        pass
 
2627
 
2722
2628
 
2723
2629
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2724
2630
    """A repository with rich roots and stacking.
2751
2657
 
2752
2658
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2753
2659
 
 
2660
    def check_conversion_target(self, target_format):
 
2661
        if not target_format.rich_root_data:
 
2662
            raise errors.BadConversionTarget(
 
2663
                'Does not support rich root data.', target_format)
 
2664
 
2754
2665
    def get_format_string(self):
2755
2666
        """See RepositoryFormat.get_format_string()."""
2756
2667
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2797
2708
 
2798
2709
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2799
2710
 
 
2711
    def check_conversion_target(self, target_format):
 
2712
        if not target_format.rich_root_data:
 
2713
            raise errors.BadConversionTarget(
 
2714
                'Does not support rich root data.', target_format)
 
2715
 
2800
2716
    def get_format_string(self):
2801
2717
        """See RepositoryFormat.get_format_string()."""
2802
2718
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2840
2756
        """See RepositoryFormat.get_format_description()."""
2841
2757
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2842
2758
 
 
2759
    def check_conversion_target(self, target_format):
 
2760
        pass
 
2761
 
2843
2762
 
2844
2763
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2845
2764
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2869
2788
 
2870
2789
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2871
2790
 
 
2791
    def check_conversion_target(self, target_format):
 
2792
        if not target_format.rich_root_data:
 
2793
            raise errors.BadConversionTarget(
 
2794
                'Does not support rich root data.', target_format)
 
2795
 
2872
2796
    def get_format_string(self):
2873
2797
        """See RepositoryFormat.get_format_string()."""
2874
2798
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2891
2815
    repository_class = KnitPackRepository
2892
2816
    _commit_builder_class = PackRootCommitBuilder
2893
2817
    rich_root_data = True
2894
 
    experimental = True
2895
2818
    supports_tree_reference = True
2896
2819
    supports_external_lookups = True
2897
2820
    # What index classes to use
2911
2834
 
2912
2835
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2913
2836
 
 
2837
    def check_conversion_target(self, target_format):
 
2838
        if not target_format.rich_root_data:
 
2839
            raise errors.BadConversionTarget(
 
2840
                'Does not support rich root data.', target_format)
 
2841
        if not getattr(target_format, 'supports_tree_reference', False):
 
2842
            raise errors.BadConversionTarget(
 
2843
                'Does not support nested trees', target_format)
 
2844
 
2914
2845
    def get_format_string(self):
2915
2846
        """See RepositoryFormat.get_format_string()."""
2916
2847
        return ("Bazaar development format 2 with subtree support "