/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Vincent Ladeuil
  • Date: 2010-10-07 14:34:39 UTC
  • mto: This revision was merged to the branch mainline in revision 5469.
  • Revision ID: v.ladeuil+lp@free.fr-20101007143439-1mncq23hk2h9xxl3
Bump API version to 2.3.0.

Show diffs side-by-side

added added

removed removed

Lines of Context:
49
49
""")
50
50
from bzrlib import (
51
51
    bzrdir,
 
52
    btree_index,
52
53
    errors,
53
54
    lockable_files,
54
55
    lockdir,
56
57
    )
57
58
 
58
59
from bzrlib.decorators import needs_write_lock, only_raises
59
 
from bzrlib.btree_index import (
60
 
    BTreeGraphIndex,
61
 
    BTreeBuilder,
62
 
    )
63
60
from bzrlib.index import (
64
61
    GraphIndex,
65
62
    InMemoryGraphIndex,
66
63
    )
 
64
from bzrlib.lock import LogicalLockResult
67
65
from bzrlib.repofmt.knitrepo import KnitRepository
68
66
from bzrlib.repository import (
69
67
    CommitBuilder,
70
68
    MetaDirRepositoryFormat,
71
69
    RepositoryFormat,
 
70
    RepositoryWriteLockResult,
72
71
    RootCommitBuilder,
73
72
    StreamSource,
74
73
    )
229
228
        unlimited_cache = False
230
229
        if index_type == 'chk':
231
230
            unlimited_cache = True
232
 
        setattr(self, index_type + '_index',
233
 
            self.index_class(self.index_transport,
234
 
                self.index_name(index_type, self.name),
235
 
                self.index_sizes[self.index_offset(index_type)],
236
 
                unlimited_cache=unlimited_cache))
 
231
        index = self.index_class(self.index_transport,
 
232
                    self.index_name(index_type, self.name),
 
233
                    self.index_sizes[self.index_offset(index_type)],
 
234
                    unlimited_cache=unlimited_cache)
 
235
        if index_type == 'chk':
 
236
            index._leaf_factory = btree_index._gcchk_factory
 
237
        setattr(self, index_type + '_index', index)
237
238
 
238
239
 
239
240
class ExistingPack(Pack):
721
722
        :return: A Pack object, or None if nothing was copied.
722
723
        """
723
724
        # open a pack - using the same name as the last temporary file
724
 
        # - which has already been flushed, so its safe.
 
725
        # - which has already been flushed, so it's safe.
725
726
        # XXX: - duplicate code warning with start_write_group; fix before
726
727
        #      considering 'done'.
727
728
        if self._pack_collection._new_pack is not None:
1291
1292
        # reinserted, and if d3 has incorrect parents it will also be
1292
1293
        # reinserted. If we insert d3 first, d2 is present (as it was bulk
1293
1294
        # copied), so we will try to delta, but d2 is not currently able to be
1294
 
        # extracted because it's basis d1 is not present. Topologically sorting
 
1295
        # extracted because its basis d1 is not present. Topologically sorting
1295
1296
        # addresses this. The following generates a sort for all the texts that
1296
1297
        # are being inserted without having to reference the entire text key
1297
1298
        # space (we only topo sort the revisions, which is smaller).
1599
1600
        pack_operations = [[0, []]]
1600
1601
        # plan out what packs to keep, and what to reorganise
1601
1602
        while len(existing_packs):
1602
 
            # take the largest pack, and if its less than the head of the
 
1603
            # take the largest pack, and if it's less than the head of the
1603
1604
            # distribution chart we will include its contents in the new pack
1604
 
            # for that position. If its larger, we remove its size from the
 
1605
            # for that position. If it's larger, we remove its size from the
1605
1606
            # distribution chart
1606
1607
            next_pack_rev_count, next_pack = existing_packs.pop(0)
1607
1608
            if next_pack_rev_count >= pack_distribution[0]:
1642
1643
 
1643
1644
        :return: True if the disk names had not been previously read.
1644
1645
        """
1645
 
        # NB: if you see an assertion error here, its probably access against
 
1646
        # NB: if you see an assertion error here, it's probably access against
1646
1647
        # an unlocked repo. Naughty.
1647
1648
        if not self.repo.is_locked():
1648
1649
            raise errors.ObjectNotLocked(self.repo)
1678
1679
            txt_index = self._make_index(name, '.tix')
1679
1680
            sig_index = self._make_index(name, '.six')
1680
1681
            if self.chk_index is not None:
1681
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
 
1682
                chk_index = self._make_index(name, '.cix', is_chk=True)
1682
1683
            else:
1683
1684
                chk_index = None
1684
1685
            result = ExistingPack(self._pack_transport, name, rev_index,
1704
1705
            sig_index = self._make_index(name, '.six', resume=True)
1705
1706
            if self.chk_index is not None:
1706
1707
                chk_index = self._make_index(name, '.cix', resume=True,
1707
 
                                             unlimited_cache=True)
 
1708
                                             is_chk=True)
1708
1709
            else:
1709
1710
                chk_index = None
1710
1711
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1740
1741
        return self._index_class(self.transport, 'pack-names', None
1741
1742
                ).iter_all_entries()
1742
1743
 
1743
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1744
    def _make_index(self, name, suffix, resume=False, is_chk=False):
1744
1745
        size_offset = self._suffix_offsets[suffix]
1745
1746
        index_name = name + suffix
1746
1747
        if resume:
1749
1750
        else:
1750
1751
            transport = self._index_transport
1751
1752
            index_size = self._names[name][size_offset]
1752
 
        return self._index_class(transport, index_name, index_size,
1753
 
                                 unlimited_cache=unlimited_cache)
 
1753
        index = self._index_class(transport, index_name, index_size,
 
1754
                                  unlimited_cache=is_chk)
 
1755
        if is_chk and self._index_class is btree_index.BTreeGraphIndex: 
 
1756
            index._leaf_factory = btree_index._gcchk_factory
 
1757
        return index
1754
1758
 
1755
1759
    def _max_pack_count(self, total_revisions):
1756
1760
        """Return the maximum number of packs to use for total revisions.
1942
1946
                    # disk index because the set values are the same, unless
1943
1947
                    # the only index shows up as deleted by the set difference
1944
1948
                    # - which it may. Until there is a specific test for this,
1945
 
                    # assume its broken. RBC 20071017.
 
1949
                    # assume it's broken. RBC 20071017.
1946
1950
                    self._remove_pack_from_memory(self.get_pack_by_name(name))
1947
1951
                    self._names[name] = sizes
1948
1952
                    self.get_pack_by_name(name)
2013
2017
        """
2014
2018
        # The ensure_loaded call is to handle the case where the first call
2015
2019
        # made involving the collection was to reload_pack_names, where we 
2016
 
        # don't have a view of disk contents. Its a bit of a bandaid, and
2017
 
        # causes two reads of pack-names, but its a rare corner case not struck
2018
 
        # with regular push/pull etc.
 
2020
        # don't have a view of disk contents. It's a bit of a bandaid, and
 
2021
        # causes two reads of pack-names, but it's a rare corner case not
 
2022
        # struck with regular push/pull etc.
2019
2023
        first_read = self.ensure_loaded()
2020
2024
        if first_read:
2021
2025
            return True
2340
2344
        return self._write_lock_count
2341
2345
 
2342
2346
    def lock_write(self, token=None):
 
2347
        """Lock the repository for writes.
 
2348
 
 
2349
        :return: A bzrlib.repository.RepositoryWriteLockResult.
 
2350
        """
2343
2351
        locked = self.is_locked()
2344
2352
        if not self._write_lock_count and locked:
2345
2353
            raise errors.ReadOnlyError(self)
2354
2362
                # Writes don't affect fallback repos
2355
2363
                repo.lock_read()
2356
2364
            self._refresh_data()
 
2365
        return RepositoryWriteLockResult(self.unlock, None)
2357
2366
 
2358
2367
    def lock_read(self):
 
2368
        """Lock the repository for reads.
 
2369
 
 
2370
        :return: A bzrlib.lock.LogicalLockResult.
 
2371
        """
2359
2372
        locked = self.is_locked()
2360
2373
        if self._write_lock_count:
2361
2374
            self._write_lock_count += 1
2368
2381
            for repo in self._fallback_repositories:
2369
2382
                repo.lock_read()
2370
2383
            self._refresh_data()
 
2384
        return LogicalLockResult(self.unlock)
2371
2385
 
2372
2386
    def leave_lock_in_place(self):
2373
2387
        # not supported - raise an error
2817
2831
    _commit_builder_class = PackCommitBuilder
2818
2832
    supports_external_lookups = True
2819
2833
    # What index classes to use
2820
 
    index_builder_class = BTreeBuilder
2821
 
    index_class = BTreeGraphIndex
 
2834
    index_builder_class = btree_index.BTreeBuilder
 
2835
    index_class = btree_index.BTreeGraphIndex
2822
2836
 
2823
2837
    @property
2824
2838
    def _serializer(self):
2853
2867
    supports_tree_reference = False # no subtrees
2854
2868
    supports_external_lookups = True
2855
2869
    # What index classes to use
2856
 
    index_builder_class = BTreeBuilder
2857
 
    index_class = BTreeGraphIndex
 
2870
    index_builder_class = btree_index.BTreeBuilder
 
2871
    index_class = btree_index.BTreeGraphIndex
2858
2872
 
2859
2873
    @property
2860
2874
    def _serializer(self):
2895
2909
    supports_tree_reference = True
2896
2910
    supports_external_lookups = True
2897
2911
    # What index classes to use
2898
 
    index_builder_class = BTreeBuilder
2899
 
    index_class = BTreeGraphIndex
 
2912
    index_builder_class = btree_index.BTreeBuilder
 
2913
    index_class = btree_index.BTreeGraphIndex
2900
2914
 
2901
2915
    @property
2902
2916
    def _serializer(self):
2904
2918
 
2905
2919
    def _get_matching_bzrdir(self):
2906
2920
        return bzrdir.format_registry.make_bzrdir(
2907
 
            'development-subtree')
 
2921
            'development5-subtree')
2908
2922
 
2909
2923
    def _ignore_setting_bzrdir(self, format):
2910
2924
        pass