/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-07-30 14:24:06 UTC
  • mfrom: (4576.1.1 export-to-dir)
  • Revision ID: pqm@pqm.ubuntu.com-20090730142406-wg8gmxpcjz4c1z00
(bialix) Allow 'bzr export' to export into an existing (but empty)
        directory

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2007-2010 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
27
 
    cleanup,
28
27
    debug,
29
28
    graph,
30
29
    osutils,
55
54
    revision as _mod_revision,
56
55
    )
57
56
 
58
 
from bzrlib.decorators import needs_write_lock, only_raises
 
57
from bzrlib.decorators import needs_write_lock
59
58
from bzrlib.btree_index import (
60
59
    BTreeGraphIndex,
61
60
    BTreeBuilder,
74
73
    )
75
74
from bzrlib.trace import (
76
75
    mutter,
77
 
    note,
78
76
    warning,
79
77
    )
80
78
 
226
224
        return self.index_name('text', name)
227
225
 
228
226
    def _replace_index_with_readonly(self, index_type):
229
 
        unlimited_cache = False
230
 
        if index_type == 'chk':
231
 
            unlimited_cache = True
232
227
        setattr(self, index_type + '_index',
233
228
            self.index_class(self.index_transport,
234
229
                self.index_name(index_type, self.name),
235
 
                self.index_sizes[self.index_offset(index_type)],
236
 
                unlimited_cache=unlimited_cache))
 
230
                self.index_sizes[self.index_offset(index_type)]))
237
231
 
238
232
 
239
233
class ExistingPack(Pack):
428
422
        self._writer.begin()
429
423
        # what state is the pack in? (open, finished, aborted)
430
424
        self._state = 'open'
431
 
        # no name until we finish writing the content
432
 
        self.name = None
433
425
 
434
426
    def abort(self):
435
427
        """Cancel creating this pack."""
456
448
            self.signature_index.key_count() or
457
449
            (self.chk_index is not None and self.chk_index.key_count()))
458
450
 
459
 
    def finish_content(self):
460
 
        if self.name is not None:
461
 
            return
462
 
        self._writer.end()
463
 
        if self._buffer[1]:
464
 
            self._write_data('', flush=True)
465
 
        self.name = self._hash.hexdigest()
466
 
 
467
451
    def finish(self, suspend=False):
468
452
        """Finish the new pack.
469
453
 
475
459
         - stores the index size tuple for the pack in the index_sizes
476
460
           attribute.
477
461
        """
478
 
        self.finish_content()
 
462
        self._writer.end()
 
463
        if self._buffer[1]:
 
464
            self._write_data('', flush=True)
 
465
        self.name = self._hash.hexdigest()
479
466
        if not suspend:
480
467
            self._check_references()
481
468
        # write indices
587
574
                                             flush_func=flush_func)
588
575
        self.add_callback = None
589
576
 
 
577
    def replace_indices(self, index_to_pack, indices):
 
578
        """Replace the current mappings with fresh ones.
 
579
 
 
580
        This should probably not be used eventually, rather incremental add and
 
581
        removal of indices. It has been added during refactoring of existing
 
582
        code.
 
583
 
 
584
        :param index_to_pack: A mapping from index objects to
 
585
            (transport, name) tuples for the pack file data.
 
586
        :param indices: A list of indices.
 
587
        """
 
588
        # refresh the revision pack map dict without replacing the instance.
 
589
        self.index_to_pack.clear()
 
590
        self.index_to_pack.update(index_to_pack)
 
591
        # XXX: API break - clearly a 'replace' method would be good?
 
592
        self.combined_index._indices[:] = indices
 
593
        # the current add nodes callback for the current writable index if
 
594
        # there is one.
 
595
        self.add_callback = None
 
596
 
590
597
    def add_index(self, index, pack):
591
598
        """Add index to the aggregate, which is an index for Pack pack.
592
599
 
599
606
        # expose it to the index map
600
607
        self.index_to_pack[index] = pack.access_tuple()
601
608
        # put it at the front of the linear index list
602
 
        self.combined_index.insert_index(0, index, pack.name)
 
609
        self.combined_index.insert_index(0, index)
603
610
 
604
611
    def add_writable_index(self, index, pack):
605
612
        """Add an index which is able to have data added to it.
625
632
        self.data_access.set_writer(None, None, (None, None))
626
633
        self.index_to_pack.clear()
627
634
        del self.combined_index._indices[:]
628
 
        del self.combined_index._index_names[:]
629
635
        self.add_callback = None
630
636
 
631
 
    def remove_index(self, index):
 
637
    def remove_index(self, index, pack):
632
638
        """Remove index from the indices used to answer queries.
633
639
 
634
640
        :param index: An index from the pack parameter.
 
641
        :param pack: A Pack instance.
635
642
        """
636
643
        del self.index_to_pack[index]
637
 
        pos = self.combined_index._indices.index(index)
638
 
        del self.combined_index._indices[pos]
639
 
        del self.combined_index._index_names[pos]
 
644
        self.combined_index._indices.remove(index)
640
645
        if (self.add_callback is not None and
641
646
            getattr(index, 'add_nodes', None) == self.add_callback):
642
647
            self.add_callback = None
1100
1105
            iterator is a tuple with:
1101
1106
            index, readv_vector, node_vector. readv_vector is a list ready to
1102
1107
            hand to the transport readv method, and node_vector is a list of
1103
 
            (key, eol_flag, references) for the node retrieved by the
 
1108
            (key, eol_flag, references) for the the node retrieved by the
1104
1109
            matching readv_vector.
1105
1110
        """
1106
1111
        # group by pack so we do one readv per pack
1398
1403
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1399
1404
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1400
1405
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1401
 
        all_indices = [self.revision_index, self.inventory_index,
1402
 
                self.text_index, self.signature_index]
1403
1406
        if use_chk_index:
1404
1407
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1405
 
            all_indices.append(self.chk_index)
1406
1408
        else:
1407
1409
            # used to determine if we're using a chk_index elsewhere.
1408
1410
            self.chk_index = None
1409
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1410
 
        # share hints about which pack names to search first.
1411
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1412
 
        for combined_idx in all_combined:
1413
 
            combined_idx.set_sibling_indices(
1414
 
                set(all_combined).difference([combined_idx]))
1415
1411
        # resumed packs
1416
1412
        self._resumed_packs = []
1417
1413
 
1418
 
    def __repr__(self):
1419
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1420
 
 
1421
1414
    def add_pack_to_memory(self, pack):
1422
1415
        """Make a Pack object available to the repository to satisfy queries.
1423
1416
 
1537
1530
                self._remove_pack_from_memory(pack)
1538
1531
        # record the newly available packs and stop advertising the old
1539
1532
        # packs
1540
 
        to_be_obsoleted = []
1541
 
        for _, packs in pack_operations:
1542
 
            to_be_obsoleted.extend(packs)
1543
 
        result = self._save_pack_names(clear_obsolete_packs=True,
1544
 
                                       obsolete_packs=to_be_obsoleted)
 
1533
        result = self._save_pack_names(clear_obsolete_packs=True)
 
1534
        # Move the old packs out of the way now they are no longer referenced.
 
1535
        for revision_count, packs in pack_operations:
 
1536
            self._obsolete_packs(packs)
1545
1537
        return result
1546
1538
 
1547
1539
    def _flush_new_pack(self):
1560
1552
        """Is the collection already packed?"""
1561
1553
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1562
1554
 
1563
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1555
    def pack(self, hint=None):
1564
1556
        """Pack the pack collection totally."""
1565
1557
        self.ensure_loaded()
1566
1558
        total_packs = len(self._names)
1575
1567
        # determine which packs need changing
1576
1568
        pack_operations = [[0, []]]
1577
1569
        for pack in self.all_packs():
1578
 
            if hint is None or pack.name in hint:
1579
 
                # Either no hint was provided (so we are packing everything),
1580
 
                # or this pack was included in the hint.
 
1570
            if not hint or pack.name in hint:
1581
1571
                pack_operations[-1][0] += pack.get_revision_count()
1582
1572
                pack_operations[-1][1].append(pack)
1583
1573
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1584
1574
 
1585
 
        if clean_obsolete_packs:
1586
 
            self._clear_obsolete_packs()
1587
 
 
1588
1575
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1576
        """Plan a pack operation.
1590
1577
 
1678
1665
            txt_index = self._make_index(name, '.tix')
1679
1666
            sig_index = self._make_index(name, '.six')
1680
1667
            if self.chk_index is not None:
1681
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
 
1668
                chk_index = self._make_index(name, '.cix')
1682
1669
            else:
1683
1670
                chk_index = None
1684
1671
            result = ExistingPack(self._pack_transport, name, rev_index,
1703
1690
            txt_index = self._make_index(name, '.tix', resume=True)
1704
1691
            sig_index = self._make_index(name, '.six', resume=True)
1705
1692
            if self.chk_index is not None:
1706
 
                chk_index = self._make_index(name, '.cix', resume=True,
1707
 
                                             unlimited_cache=True)
 
1693
                chk_index = self._make_index(name, '.cix', resume=True)
1708
1694
            else:
1709
1695
                chk_index = None
1710
1696
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1740
1726
        return self._index_class(self.transport, 'pack-names', None
1741
1727
                ).iter_all_entries()
1742
1728
 
1743
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1729
    def _make_index(self, name, suffix, resume=False):
1744
1730
        size_offset = self._suffix_offsets[suffix]
1745
1731
        index_name = name + suffix
1746
1732
        if resume:
1749
1735
        else:
1750
1736
            transport = self._index_transport
1751
1737
            index_size = self._names[name][size_offset]
1752
 
        return self._index_class(transport, index_name, index_size,
1753
 
                                 unlimited_cache=unlimited_cache)
 
1738
        return self._index_class(transport, index_name, index_size)
1754
1739
 
1755
1740
    def _max_pack_count(self, total_revisions):
1756
1741
        """Return the maximum number of packs to use for total revisions.
1784
1769
        :param return: None.
1785
1770
        """
1786
1771
        for pack in packs:
1787
 
            try:
1788
 
                pack.pack_transport.rename(pack.file_name(),
1789
 
                    '../obsolete_packs/' + pack.file_name())
1790
 
            except (errors.PathError, errors.TransportError), e:
1791
 
                # TODO: Should these be warnings or mutters?
1792
 
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
1793
 
                       % (e,))
 
1772
            pack.pack_transport.rename(pack.file_name(),
 
1773
                '../obsolete_packs/' + pack.file_name())
1794
1774
            # TODO: Probably needs to know all possible indices for this pack
1795
1775
            # - or maybe list the directory and move all indices matching this
1796
1776
            # name whether we recognize it or not?
1798
1778
            if self.chk_index is not None:
1799
1779
                suffixes.append('.cix')
1800
1780
            for suffix in suffixes:
1801
 
                try:
1802
 
                    self._index_transport.rename(pack.name + suffix,
1803
 
                        '../obsolete_packs/' + pack.name + suffix)
1804
 
                except (errors.PathError, errors.TransportError), e:
1805
 
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
1806
 
                           % (e,))
 
1781
                self._index_transport.rename(pack.name + suffix,
 
1782
                    '../obsolete_packs/' + pack.name + suffix)
1807
1783
 
1808
1784
    def pack_distribution(self, total_revisions):
1809
1785
        """Generate a list of the number of revisions to put in each pack.
1835
1811
        self._remove_pack_indices(pack)
1836
1812
        self.packs.remove(pack)
1837
1813
 
1838
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1839
 
        """Remove the indices for pack from the aggregated indices.
1840
 
        
1841
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1842
 
        """
1843
 
        for index_type in Pack.index_definitions.keys():
1844
 
            attr_name = index_type + '_index'
1845
 
            aggregate_index = getattr(self, attr_name)
1846
 
            if aggregate_index is not None:
1847
 
                pack_index = getattr(pack, attr_name)
1848
 
                try:
1849
 
                    aggregate_index.remove_index(pack_index)
1850
 
                except KeyError:
1851
 
                    if ignore_missing:
1852
 
                        continue
1853
 
                    raise
 
1814
    def _remove_pack_indices(self, pack):
 
1815
        """Remove the indices for pack from the aggregated indices."""
 
1816
        self.revision_index.remove_index(pack.revision_index, pack)
 
1817
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1818
        self.text_index.remove_index(pack.text_index, pack)
 
1819
        self.signature_index.remove_index(pack.signature_index, pack)
 
1820
        if self.chk_index is not None:
 
1821
            self.chk_index.remove_index(pack.chk_index, pack)
1854
1822
 
1855
1823
    def reset(self):
1856
1824
        """Clear all cached data."""
1889
1857
        disk_nodes = set()
1890
1858
        for index, key, value in self._iter_disk_pack_index():
1891
1859
            disk_nodes.add((key, value))
1892
 
        orig_disk_nodes = set(disk_nodes)
1893
1860
 
1894
1861
        # do a two-way diff against our original content
1895
1862
        current_nodes = set()
1908
1875
        disk_nodes.difference_update(deleted_nodes)
1909
1876
        disk_nodes.update(new_nodes)
1910
1877
 
1911
 
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
 
1878
        return disk_nodes, deleted_nodes, new_nodes
1912
1879
 
1913
1880
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1914
1881
        """Given the correct set of pack files, update our saved info.
1954
1921
                added.append(name)
1955
1922
        return removed, added, modified
1956
1923
 
1957
 
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
 
1924
    def _save_pack_names(self, clear_obsolete_packs=False):
1958
1925
        """Save the list of packs.
1959
1926
 
1960
1927
        This will take out the mutex around the pack names list for the
1964
1931
 
1965
1932
        :param clear_obsolete_packs: If True, clear out the contents of the
1966
1933
            obsolete_packs directory.
1967
 
        :param obsolete_packs: Packs that are obsolete once the new pack-names
1968
 
            file has been written.
1969
1934
        :return: A list of the names saved that were not previously on disk.
1970
1935
        """
1971
 
        already_obsolete = []
1972
1936
        self.lock_names()
1973
1937
        try:
1974
1938
            builder = self._index_builder_class()
1975
 
            (disk_nodes, deleted_nodes, new_nodes,
1976
 
             orig_disk_nodes) = self._diff_pack_names()
 
1939
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1977
1940
            # TODO: handle same-name, index-size-changes here -
1978
1941
            # e.g. use the value from disk, not ours, *unless* we're the one
1979
1942
            # changing it.
1981
1944
                builder.add_node(key, value)
1982
1945
            self.transport.put_file('pack-names', builder.finish(),
1983
1946
                mode=self.repo.bzrdir._get_file_mode())
 
1947
            # move the baseline forward
1984
1948
            self._packs_at_load = disk_nodes
1985
1949
            if clear_obsolete_packs:
1986
 
                to_preserve = None
1987
 
                if obsolete_packs:
1988
 
                    to_preserve = set([o.name for o in obsolete_packs])
1989
 
                already_obsolete = self._clear_obsolete_packs(to_preserve)
 
1950
                self._clear_obsolete_packs()
1990
1951
        finally:
1991
1952
            self._unlock_names()
1992
1953
        # synchronise the memory packs list with what we just wrote:
1993
1954
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1994
 
        if obsolete_packs:
1995
 
            # TODO: We could add one more condition here. "if o.name not in
1996
 
            #       orig_disk_nodes and o != the new_pack we haven't written to
1997
 
            #       disk yet. However, the new pack object is not easily
1998
 
            #       accessible here (it would have to be passed through the
1999
 
            #       autopacking code, etc.)
2000
 
            obsolete_packs = [o for o in obsolete_packs
2001
 
                              if o.name not in already_obsolete]
2002
 
            self._obsolete_packs(obsolete_packs)
2003
1955
        return [new_node[0][0] for new_node in new_nodes]
2004
1956
 
2005
1957
    def reload_pack_names(self):
2020
1972
        if first_read:
2021
1973
            return True
2022
1974
        # out the new value.
2023
 
        (disk_nodes, deleted_nodes, new_nodes,
2024
 
         orig_disk_nodes) = self._diff_pack_names()
2025
 
        # _packs_at_load is meant to be the explicit list of names in
2026
 
        # 'pack-names' at then start. As such, it should not contain any
2027
 
        # pending names that haven't been written out yet.
2028
 
        self._packs_at_load = orig_disk_nodes
 
1975
        disk_nodes, _, _ = self._diff_pack_names()
 
1976
        self._packs_at_load = disk_nodes
2029
1977
        (removed, added,
2030
1978
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2031
1979
        if removed or added or modified:
2040
1988
            raise
2041
1989
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2042
1990
 
2043
 
    def _clear_obsolete_packs(self, preserve=None):
 
1991
    def _clear_obsolete_packs(self):
2044
1992
        """Delete everything from the obsolete-packs directory.
2045
 
 
2046
 
        :return: A list of pack identifiers (the filename without '.pack') that
2047
 
            were found in obsolete_packs.
2048
1993
        """
2049
 
        found = []
2050
1994
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
2051
 
        if preserve is None:
2052
 
            preserve = set()
2053
1995
        for filename in obsolete_pack_transport.list_dir('.'):
2054
 
            name, ext = osutils.splitext(filename)
2055
 
            if ext == '.pack':
2056
 
                found.append(name)
2057
 
            if name in preserve:
2058
 
                continue
2059
1996
            try:
2060
1997
                obsolete_pack_transport.delete(filename)
2061
1998
            except (errors.PathError, errors.TransportError), e:
2062
 
                warning("couldn't delete obsolete pack, skipping it:\n%s"
2063
 
                        % (e,))
2064
 
        return found
 
1999
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2065
2000
 
2066
2001
    def _start_write_group(self):
2067
2002
        # Do not permit preparation for writing if we're not in a 'write lock'.
2094
2029
        # FIXME: just drop the transient index.
2095
2030
        # forget what names there are
2096
2031
        if self._new_pack is not None:
2097
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2098
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2099
 
            # If we aborted while in the middle of finishing the write
2100
 
            # group, _remove_pack_indices could fail because the indexes are
2101
 
            # already gone.  But they're not there we shouldn't fail in this
2102
 
            # case, so we pass ignore_missing=True.
2103
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2104
 
                ignore_missing=True)
2105
 
            operation.run_simple()
 
2032
            try:
 
2033
                self._new_pack.abort()
 
2034
            finally:
 
2035
                # XXX: If we aborted while in the middle of finishing the write
 
2036
                # group, _remove_pack_indices can fail because the indexes are
 
2037
                # already gone.  If they're not there we shouldn't fail in this
 
2038
                # case.  -- mbp 20081113
 
2039
                self._remove_pack_indices(self._new_pack)
 
2040
                self._new_pack = None
2106
2041
        for resumed_pack in self._resumed_packs:
2107
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2108
 
            # See comment in previous finally block.
2109
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2110
 
                ignore_missing=True)
2111
 
            operation.run_simple()
 
2042
            try:
 
2043
                resumed_pack.abort()
 
2044
            finally:
 
2045
                # See comment in previous finally block.
 
2046
                try:
 
2047
                    self._remove_pack_indices(resumed_pack)
 
2048
                except KeyError:
 
2049
                    pass
2112
2050
        del self._resumed_packs[:]
2113
2051
 
2114
2052
    def _remove_resumed_pack_indices(self):
2116
2054
            self._remove_pack_indices(resumed_pack)
2117
2055
        del self._resumed_packs[:]
2118
2056
 
2119
 
    def _check_new_inventories(self):
2120
 
        """Detect missing inventories in this write group.
2121
 
 
2122
 
        :returns: list of strs, summarising any problems found.  If the list is
2123
 
            empty no problems were found.
2124
 
        """
2125
 
        # The base implementation does no checks.  GCRepositoryPackCollection
2126
 
        # overrides this.
2127
 
        return []
2128
 
        
2129
2057
    def _commit_write_group(self):
2130
2058
        all_missing = set()
2131
2059
        for prefix, versioned_file in (
2140
2068
            raise errors.BzrCheckError(
2141
2069
                "Repository %s has missing compression parent(s) %r "
2142
2070
                 % (self.repo, sorted(all_missing)))
2143
 
        problems = self._check_new_inventories()
2144
 
        if problems:
2145
 
            problems_summary = '\n'.join(problems)
2146
 
            raise errors.BzrCheckError(
2147
 
                "Cannot add revision(s) to repository: " + problems_summary)
2148
2071
        self._remove_pack_indices(self._new_pack)
2149
 
        any_new_content = False
 
2072
        should_autopack = False
2150
2073
        if self._new_pack.data_inserted():
2151
2074
            # get all the data to disk and read to use
2152
2075
            self._new_pack.finish()
2153
2076
            self.allocate(self._new_pack)
2154
2077
            self._new_pack = None
2155
 
            any_new_content = True
 
2078
            should_autopack = True
2156
2079
        else:
2157
2080
            self._new_pack.abort()
2158
2081
            self._new_pack = None
2163
2086
            self._remove_pack_from_memory(resumed_pack)
2164
2087
            resumed_pack.finish()
2165
2088
            self.allocate(resumed_pack)
2166
 
            any_new_content = True
 
2089
            should_autopack = True
2167
2090
        del self._resumed_packs[:]
2168
 
        if any_new_content:
2169
 
            result = self.autopack()
2170
 
            if not result:
 
2091
        if should_autopack:
 
2092
            if not self.autopack():
2171
2093
                # when autopack takes no steps, the names list is still
2172
2094
                # unsaved.
2173
2095
                return self._save_pack_names()
2174
 
            return result
2175
 
        return []
2176
2096
 
2177
2097
    def _suspend_write_group(self):
2178
2098
        tokens = [pack.name for pack in self._resumed_packs]
2280
2200
        self._reconcile_fixes_text_parents = True
2281
2201
        self._reconcile_backsup_inventory = False
2282
2202
 
2283
 
    def _warn_if_deprecated(self, branch=None):
 
2203
    def _warn_if_deprecated(self):
2284
2204
        # This class isn't deprecated, but one sub-format is
2285
2205
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2286
 
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
 
2206
            from bzrlib import repository
 
2207
            if repository._deprecation_warning_done:
 
2208
                return
 
2209
            repository._deprecation_warning_done = True
 
2210
            warning("Format %s for %s is deprecated - please use"
 
2211
                    " 'bzr upgrade --1.6.1-rich-root'"
 
2212
                    % (self._format, self.bzrdir.transport.base))
2287
2213
 
2288
2214
    def _abort_write_group(self):
2289
 
        self.revisions._index._key_dependencies.clear()
 
2215
        self.revisions._index._key_dependencies.refs.clear()
2290
2216
        self._pack_collection._abort_write_group()
2291
2217
 
 
2218
    def _find_inconsistent_revision_parents(self):
 
2219
        """Find revisions with incorrectly cached parents.
 
2220
 
 
2221
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
2222
            parents-in-revision).
 
2223
        """
 
2224
        if not self.is_locked():
 
2225
            raise errors.ObjectNotLocked(self)
 
2226
        pb = ui.ui_factory.nested_progress_bar()
 
2227
        result = []
 
2228
        try:
 
2229
            revision_nodes = self._pack_collection.revision_index \
 
2230
                .combined_index.iter_all_entries()
 
2231
            index_positions = []
 
2232
            # Get the cached index values for all revisions, and also the
 
2233
            # location in each index of the revision text so we can perform
 
2234
            # linear IO.
 
2235
            for index, key, value, refs in revision_nodes:
 
2236
                node = (index, key, value, refs)
 
2237
                index_memo = self.revisions._index._node_to_position(node)
 
2238
                if index_memo[0] != index:
 
2239
                    raise AssertionError('%r != %r' % (index_memo[0], index))
 
2240
                index_positions.append((index_memo, key[0],
 
2241
                                       tuple(parent[0] for parent in refs[0])))
 
2242
                pb.update("Reading revision index", 0, 0)
 
2243
            index_positions.sort()
 
2244
            batch_size = 1000
 
2245
            pb.update("Checking cached revision graph", 0,
 
2246
                      len(index_positions))
 
2247
            for offset in xrange(0, len(index_positions), 1000):
 
2248
                pb.update("Checking cached revision graph", offset)
 
2249
                to_query = index_positions[offset:offset + batch_size]
 
2250
                if not to_query:
 
2251
                    break
 
2252
                rev_ids = [item[1] for item in to_query]
 
2253
                revs = self.get_revisions(rev_ids)
 
2254
                for revision, item in zip(revs, to_query):
 
2255
                    index_parents = item[2]
 
2256
                    rev_parents = tuple(revision.parent_ids)
 
2257
                    if index_parents != rev_parents:
 
2258
                        result.append((revision.revision_id, index_parents,
 
2259
                                       rev_parents))
 
2260
        finally:
 
2261
            pb.finished()
 
2262
        return result
 
2263
 
2292
2264
    def _get_source(self, to_format):
2293
2265
        if to_format.network_name() == self._format.network_name():
2294
2266
            return KnitPackStreamSource(self, to_format)
2306
2278
        self._pack_collection._start_write_group()
2307
2279
 
2308
2280
    def _commit_write_group(self):
2309
 
        hint = self._pack_collection._commit_write_group()
2310
 
        self.revisions._index._key_dependencies.clear()
2311
 
        return hint
 
2281
        self.revisions._index._key_dependencies.refs.clear()
 
2282
        return self._pack_collection._commit_write_group()
2312
2283
 
2313
2284
    def suspend_write_group(self):
2314
2285
        # XXX check self._write_group is self.get_transaction()?
2315
2286
        tokens = self._pack_collection._suspend_write_group()
2316
 
        self.revisions._index._key_dependencies.clear()
 
2287
        self.revisions._index._key_dependencies.refs.clear()
2317
2288
        self._write_group = None
2318
2289
        return tokens
2319
2290
 
2347
2318
        if self._write_lock_count == 1:
2348
2319
            self._transaction = transactions.WriteTransaction()
2349
2320
        if not locked:
2350
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2351
 
                note('%r was write locked again', self)
2352
 
            self._prev_lock = 'w'
2353
2321
            for repo in self._fallback_repositories:
2354
2322
                # Writes don't affect fallback repos
2355
2323
                repo.lock_read()
2362
2330
        else:
2363
2331
            self.control_files.lock_read()
2364
2332
        if not locked:
2365
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2366
 
                note('%r was read locked again', self)
2367
 
            self._prev_lock = 'r'
2368
2333
            for repo in self._fallback_repositories:
2369
2334
                repo.lock_read()
2370
2335
            self._refresh_data()
2378
2343
        raise NotImplementedError(self.dont_leave_lock_in_place)
2379
2344
 
2380
2345
    @needs_write_lock
2381
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2346
    def pack(self, hint=None):
2382
2347
        """Compress the data within the repository.
2383
2348
 
2384
2349
        This will pack all the data to a single pack. In future it may
2385
2350
        recompress deltas or do other such expensive operations.
2386
2351
        """
2387
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2352
        self._pack_collection.pack(hint=hint)
2388
2353
 
2389
2354
    @needs_write_lock
2390
2355
    def reconcile(self, other=None, thorough=False):
2398
2363
        packer = ReconcilePacker(collection, packs, extension, revs)
2399
2364
        return packer.pack(pb)
2400
2365
 
2401
 
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2402
2366
    def unlock(self):
2403
2367
        if self._write_lock_count == 1 and self._write_group is not None:
2404
2368
            self.abort_write_group()
2546
2510
        utf8_files = [('format', self.get_format_string())]
2547
2511
 
2548
2512
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2549
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2550
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2551
 
        return repository
 
2513
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2552
2514
 
2553
2515
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2554
2516
        """See RepositoryFormat.open().
2603
2565
        """See RepositoryFormat.get_format_description()."""
2604
2566
        return "Packs containing knits without subtree support"
2605
2567
 
 
2568
    def check_conversion_target(self, target_format):
 
2569
        pass
 
2570
 
2606
2571
 
2607
2572
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2608
2573
    """A subtrees parameterized Pack repository.
2617
2582
    repository_class = KnitPackRepository
2618
2583
    _commit_builder_class = PackRootCommitBuilder
2619
2584
    rich_root_data = True
2620
 
    experimental = True
2621
2585
    supports_tree_reference = True
2622
2586
    @property
2623
2587
    def _serializer(self):
2635
2599
 
2636
2600
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2637
2601
 
 
2602
    def check_conversion_target(self, target_format):
 
2603
        if not target_format.rich_root_data:
 
2604
            raise errors.BadConversionTarget(
 
2605
                'Does not support rich root data.', target_format)
 
2606
        if not getattr(target_format, 'supports_tree_reference', False):
 
2607
            raise errors.BadConversionTarget(
 
2608
                'Does not support nested trees', target_format)
 
2609
 
2638
2610
    def get_format_string(self):
2639
2611
        """See RepositoryFormat.get_format_string()."""
2640
2612
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2673
2645
 
2674
2646
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2675
2647
 
 
2648
    def check_conversion_target(self, target_format):
 
2649
        if not target_format.rich_root_data:
 
2650
            raise errors.BadConversionTarget(
 
2651
                'Does not support rich root data.', target_format)
 
2652
 
2676
2653
    def get_format_string(self):
2677
2654
        """See RepositoryFormat.get_format_string()."""
2678
2655
        return ("Bazaar pack repository format 1 with rich root"
2719
2696
        """See RepositoryFormat.get_format_description()."""
2720
2697
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2721
2698
 
 
2699
    def check_conversion_target(self, target_format):
 
2700
        pass
 
2701
 
2722
2702
 
2723
2703
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2724
2704
    """A repository with rich roots and stacking.
2751
2731
 
2752
2732
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2753
2733
 
 
2734
    def check_conversion_target(self, target_format):
 
2735
        if not target_format.rich_root_data:
 
2736
            raise errors.BadConversionTarget(
 
2737
                'Does not support rich root data.', target_format)
 
2738
 
2754
2739
    def get_format_string(self):
2755
2740
        """See RepositoryFormat.get_format_string()."""
2756
2741
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2797
2782
 
2798
2783
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2799
2784
 
 
2785
    def check_conversion_target(self, target_format):
 
2786
        if not target_format.rich_root_data:
 
2787
            raise errors.BadConversionTarget(
 
2788
                'Does not support rich root data.', target_format)
 
2789
 
2800
2790
    def get_format_string(self):
2801
2791
        """See RepositoryFormat.get_format_string()."""
2802
2792
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2840
2830
        """See RepositoryFormat.get_format_description()."""
2841
2831
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2842
2832
 
 
2833
    def check_conversion_target(self, target_format):
 
2834
        pass
 
2835
 
2843
2836
 
2844
2837
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2845
2838
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2869
2862
 
2870
2863
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2871
2864
 
 
2865
    def check_conversion_target(self, target_format):
 
2866
        if not target_format.rich_root_data:
 
2867
            raise errors.BadConversionTarget(
 
2868
                'Does not support rich root data.', target_format)
 
2869
 
2872
2870
    def get_format_string(self):
2873
2871
        """See RepositoryFormat.get_format_string()."""
2874
2872
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2891
2889
    repository_class = KnitPackRepository
2892
2890
    _commit_builder_class = PackRootCommitBuilder
2893
2891
    rich_root_data = True
2894
 
    experimental = True
2895
2892
    supports_tree_reference = True
2896
2893
    supports_external_lookups = True
2897
2894
    # What index classes to use
2911
2908
 
2912
2909
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2913
2910
 
 
2911
    def check_conversion_target(self, target_format):
 
2912
        if not target_format.rich_root_data:
 
2913
            raise errors.BadConversionTarget(
 
2914
                'Does not support rich root data.', target_format)
 
2915
        if not getattr(target_format, 'supports_tree_reference', False):
 
2916
            raise errors.BadConversionTarget(
 
2917
                'Does not support nested trees', target_format)
 
2918
 
2914
2919
    def get_format_string(self):
2915
2920
        """See RepositoryFormat.get_format_string()."""
2916
2921
        return ("Bazaar development format 2 with subtree support "