/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Vincent Ladeuil
  • Date: 2011-07-06 08:58:15 UTC
  • mfrom: (5609.48.2 2.3)
  • mto: (6012.1.1 trunk)
  • mto: This revision was merged to the branch mainline in revision 6013.
  • Revision ID: v.ladeuil+lp@free.fr-20110706085815-6leauod52jq2u43d
MergingĀ inĀ 2.3

Show diffs side-by-side

added added

removed removed

Lines of Context:
49
49
""")
50
50
from bzrlib import (
51
51
    bzrdir,
 
52
    btree_index,
52
53
    errors,
53
54
    lockable_files,
54
55
    lockdir,
56
57
    )
57
58
 
58
59
from bzrlib.decorators import needs_write_lock, only_raises
59
 
from bzrlib.btree_index import (
60
 
    BTreeGraphIndex,
61
 
    BTreeBuilder,
62
 
    )
63
60
from bzrlib.index import (
64
61
    GraphIndex,
65
62
    InMemoryGraphIndex,
231
228
        unlimited_cache = False
232
229
        if index_type == 'chk':
233
230
            unlimited_cache = True
234
 
        setattr(self, index_type + '_index',
235
 
            self.index_class(self.index_transport,
236
 
                self.index_name(index_type, self.name),
237
 
                self.index_sizes[self.index_offset(index_type)],
238
 
                unlimited_cache=unlimited_cache))
 
231
        index = self.index_class(self.index_transport,
 
232
                    self.index_name(index_type, self.name),
 
233
                    self.index_sizes[self.index_offset(index_type)],
 
234
                    unlimited_cache=unlimited_cache)
 
235
        if index_type == 'chk':
 
236
            index._leaf_factory = btree_index._gcchk_factory
 
237
        setattr(self, index_type + '_index', index)
239
238
 
240
239
 
241
240
class ExistingPack(Pack):
723
722
        :return: A Pack object, or None if nothing was copied.
724
723
        """
725
724
        # open a pack - using the same name as the last temporary file
726
 
        # - which has already been flushed, so its safe.
 
725
        # - which has already been flushed, so it's safe.
727
726
        # XXX: - duplicate code warning with start_write_group; fix before
728
727
        #      considering 'done'.
729
728
        if self._pack_collection._new_pack is not None:
1293
1292
        # reinserted, and if d3 has incorrect parents it will also be
1294
1293
        # reinserted. If we insert d3 first, d2 is present (as it was bulk
1295
1294
        # copied), so we will try to delta, but d2 is not currently able to be
1296
 
        # extracted because it's basis d1 is not present. Topologically sorting
 
1295
        # extracted because its basis d1 is not present. Topologically sorting
1297
1296
        # addresses this. The following generates a sort for all the texts that
1298
1297
        # are being inserted without having to reference the entire text key
1299
1298
        # space (we only topo sort the revisions, which is smaller).
1613
1612
        pack_operations = [[0, []]]
1614
1613
        # plan out what packs to keep, and what to reorganise
1615
1614
        while len(existing_packs):
1616
 
            # take the largest pack, and if its less than the head of the
 
1615
            # take the largest pack, and if it's less than the head of the
1617
1616
            # distribution chart we will include its contents in the new pack
1618
 
            # for that position. If its larger, we remove its size from the
 
1617
            # for that position. If it's larger, we remove its size from the
1619
1618
            # distribution chart
1620
1619
            next_pack_rev_count, next_pack = existing_packs.pop(0)
1621
1620
            if next_pack_rev_count >= pack_distribution[0]:
1656
1655
 
1657
1656
        :return: True if the disk names had not been previously read.
1658
1657
        """
1659
 
        # NB: if you see an assertion error here, its probably access against
 
1658
        # NB: if you see an assertion error here, it's probably access against
1660
1659
        # an unlocked repo. Naughty.
1661
1660
        if not self.repo.is_locked():
1662
1661
            raise errors.ObjectNotLocked(self.repo)
1692
1691
            txt_index = self._make_index(name, '.tix')
1693
1692
            sig_index = self._make_index(name, '.six')
1694
1693
            if self.chk_index is not None:
1695
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
 
1694
                chk_index = self._make_index(name, '.cix', is_chk=True)
1696
1695
            else:
1697
1696
                chk_index = None
1698
1697
            result = ExistingPack(self._pack_transport, name, rev_index,
1718
1717
            sig_index = self._make_index(name, '.six', resume=True)
1719
1718
            if self.chk_index is not None:
1720
1719
                chk_index = self._make_index(name, '.cix', resume=True,
1721
 
                                             unlimited_cache=True)
 
1720
                                             is_chk=True)
1722
1721
            else:
1723
1722
                chk_index = None
1724
1723
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1754
1753
        return self._index_class(self.transport, 'pack-names', None
1755
1754
                ).iter_all_entries()
1756
1755
 
1757
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1756
    def _make_index(self, name, suffix, resume=False, is_chk=False):
1758
1757
        size_offset = self._suffix_offsets[suffix]
1759
1758
        index_name = name + suffix
1760
1759
        if resume:
1763
1762
        else:
1764
1763
            transport = self._index_transport
1765
1764
            index_size = self._names[name][size_offset]
1766
 
        return self._index_class(transport, index_name, index_size,
1767
 
                                 unlimited_cache=unlimited_cache)
 
1765
        index = self._index_class(transport, index_name, index_size,
 
1766
                                  unlimited_cache=is_chk)
 
1767
        if is_chk and self._index_class is btree_index.BTreeGraphIndex: 
 
1768
            index._leaf_factory = btree_index._gcchk_factory
 
1769
        return index
1768
1770
 
1769
1771
    def _max_pack_count(self, total_revisions):
1770
1772
        """Return the maximum number of packs to use for total revisions.
1956
1958
                    # disk index because the set values are the same, unless
1957
1959
                    # the only index shows up as deleted by the set difference
1958
1960
                    # - which it may. Until there is a specific test for this,
1959
 
                    # assume its broken. RBC 20071017.
 
1961
                    # assume it's broken. RBC 20071017.
1960
1962
                    self._remove_pack_from_memory(self.get_pack_by_name(name))
1961
1963
                    self._names[name] = sizes
1962
1964
                    self.get_pack_by_name(name)
2027
2029
        """
2028
2030
        # The ensure_loaded call is to handle the case where the first call
2029
2031
        # made involving the collection was to reload_pack_names, where we 
2030
 
        # don't have a view of disk contents. Its a bit of a bandaid, and
2031
 
        # causes two reads of pack-names, but its a rare corner case not struck
2032
 
        # with regular push/pull etc.
 
2032
        # don't have a view of disk contents. It's a bit of a bandaid, and
 
2033
        # causes two reads of pack-names, but it's a rare corner case not
 
2034
        # struck with regular push/pull etc.
2033
2035
        first_read = self.ensure_loaded()
2034
2036
        if first_read:
2035
2037
            return True
2849
2851
    _commit_builder_class = PackCommitBuilder
2850
2852
    supports_external_lookups = True
2851
2853
    # What index classes to use
2852
 
    index_builder_class = BTreeBuilder
2853
 
    index_class = BTreeGraphIndex
 
2854
    index_builder_class = btree_index.BTreeBuilder
 
2855
    index_class = btree_index.BTreeGraphIndex
2854
2856
 
2855
2857
    @property
2856
2858
    def _serializer(self):
2885
2887
    supports_tree_reference = False # no subtrees
2886
2888
    supports_external_lookups = True
2887
2889
    # What index classes to use
2888
 
    index_builder_class = BTreeBuilder
2889
 
    index_class = BTreeGraphIndex
 
2890
    index_builder_class = btree_index.BTreeBuilder
 
2891
    index_class = btree_index.BTreeGraphIndex
2890
2892
 
2891
2893
    @property
2892
2894
    def _serializer(self):
2912
2914
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2913
2915
    """A subtrees development repository.
2914
2916
 
2915
 
    This format should be retained until the second release after bzr 1.7.
 
2917
    This format should be retained in 2.3, to provide an upgrade path from this
 
2918
    to RepositoryFormat2aSubtree.  It can be removed in later releases.
2916
2919
 
2917
2920
    1.6.1-subtree[as it might have been] with B+Tree indices.
2918
 
 
2919
 
    This is [now] retained until we have a CHK based subtree format in
2920
 
    development.
2921
2921
    """
2922
2922
 
2923
2923
    repository_class = KnitPackRepository
2927
2927
    supports_tree_reference = True
2928
2928
    supports_external_lookups = True
2929
2929
    # What index classes to use
2930
 
    index_builder_class = BTreeBuilder
2931
 
    index_class = BTreeGraphIndex
 
2930
    index_builder_class = btree_index.BTreeBuilder
 
2931
    index_class = btree_index.BTreeGraphIndex
2932
2932
 
2933
2933
    @property
2934
2934
    def _serializer(self):
2936
2936
 
2937
2937
    def _get_matching_bzrdir(self):
2938
2938
        return bzrdir.format_registry.make_bzrdir(
2939
 
            'development-subtree')
 
2939
            'development5-subtree')
2940
2940
 
2941
2941
    def _ignore_setting_bzrdir(self, format):
2942
2942
        pass