/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Martin Pool
  • Date: 2010-10-14 21:23:08 UTC
  • mto: This revision was merged to the branch mainline in revision 5495.
  • Revision ID: mbp@sourcefrog.net-20101014212308-avfax5qlqwo1rrb1
Also mention news about hooks

Show diffs side-by-side

added added

removed removed

Lines of Context:
49
49
""")
50
50
from bzrlib import (
51
51
    bzrdir,
 
52
    btree_index,
52
53
    errors,
53
54
    lockable_files,
54
55
    lockdir,
56
57
    )
57
58
 
58
59
from bzrlib.decorators import needs_write_lock, only_raises
59
 
from bzrlib.btree_index import (
60
 
    BTreeGraphIndex,
61
 
    BTreeBuilder,
62
 
    )
63
60
from bzrlib.index import (
64
61
    GraphIndex,
65
62
    InMemoryGraphIndex,
66
63
    )
 
64
from bzrlib.lock import LogicalLockResult
67
65
from bzrlib.repofmt.knitrepo import KnitRepository
68
66
from bzrlib.repository import (
69
67
    CommitBuilder,
230
228
        unlimited_cache = False
231
229
        if index_type == 'chk':
232
230
            unlimited_cache = True
233
 
        setattr(self, index_type + '_index',
234
 
            self.index_class(self.index_transport,
235
 
                self.index_name(index_type, self.name),
236
 
                self.index_sizes[self.index_offset(index_type)],
237
 
                unlimited_cache=unlimited_cache))
 
231
        index = self.index_class(self.index_transport,
 
232
                    self.index_name(index_type, self.name),
 
233
                    self.index_sizes[self.index_offset(index_type)],
 
234
                    unlimited_cache=unlimited_cache)
 
235
        if index_type == 'chk':
 
236
            index._leaf_factory = btree_index._gcchk_factory
 
237
        setattr(self, index_type + '_index', index)
238
238
 
239
239
 
240
240
class ExistingPack(Pack):
722
722
        :return: A Pack object, or None if nothing was copied.
723
723
        """
724
724
        # open a pack - using the same name as the last temporary file
725
 
        # - which has already been flushed, so its safe.
 
725
        # - which has already been flushed, so it's safe.
726
726
        # XXX: - duplicate code warning with start_write_group; fix before
727
727
        #      considering 'done'.
728
728
        if self._pack_collection._new_pack is not None:
1292
1292
        # reinserted, and if d3 has incorrect parents it will also be
1293
1293
        # reinserted. If we insert d3 first, d2 is present (as it was bulk
1294
1294
        # copied), so we will try to delta, but d2 is not currently able to be
1295
 
        # extracted because it's basis d1 is not present. Topologically sorting
 
1295
        # extracted because its basis d1 is not present. Topologically sorting
1296
1296
        # addresses this. The following generates a sort for all the texts that
1297
1297
        # are being inserted without having to reference the entire text key
1298
1298
        # space (we only topo sort the revisions, which is smaller).
1600
1600
        pack_operations = [[0, []]]
1601
1601
        # plan out what packs to keep, and what to reorganise
1602
1602
        while len(existing_packs):
1603
 
            # take the largest pack, and if its less than the head of the
 
1603
            # take the largest pack, and if it's less than the head of the
1604
1604
            # distribution chart we will include its contents in the new pack
1605
 
            # for that position. If its larger, we remove its size from the
 
1605
            # for that position. If it's larger, we remove its size from the
1606
1606
            # distribution chart
1607
1607
            next_pack_rev_count, next_pack = existing_packs.pop(0)
1608
1608
            if next_pack_rev_count >= pack_distribution[0]:
1643
1643
 
1644
1644
        :return: True if the disk names had not been previously read.
1645
1645
        """
1646
 
        # NB: if you see an assertion error here, its probably access against
 
1646
        # NB: if you see an assertion error here, it's probably access against
1647
1647
        # an unlocked repo. Naughty.
1648
1648
        if not self.repo.is_locked():
1649
1649
            raise errors.ObjectNotLocked(self.repo)
1679
1679
            txt_index = self._make_index(name, '.tix')
1680
1680
            sig_index = self._make_index(name, '.six')
1681
1681
            if self.chk_index is not None:
1682
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
 
1682
                chk_index = self._make_index(name, '.cix', is_chk=True)
1683
1683
            else:
1684
1684
                chk_index = None
1685
1685
            result = ExistingPack(self._pack_transport, name, rev_index,
1705
1705
            sig_index = self._make_index(name, '.six', resume=True)
1706
1706
            if self.chk_index is not None:
1707
1707
                chk_index = self._make_index(name, '.cix', resume=True,
1708
 
                                             unlimited_cache=True)
 
1708
                                             is_chk=True)
1709
1709
            else:
1710
1710
                chk_index = None
1711
1711
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1741
1741
        return self._index_class(self.transport, 'pack-names', None
1742
1742
                ).iter_all_entries()
1743
1743
 
1744
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1744
    def _make_index(self, name, suffix, resume=False, is_chk=False):
1745
1745
        size_offset = self._suffix_offsets[suffix]
1746
1746
        index_name = name + suffix
1747
1747
        if resume:
1750
1750
        else:
1751
1751
            transport = self._index_transport
1752
1752
            index_size = self._names[name][size_offset]
1753
 
        return self._index_class(transport, index_name, index_size,
1754
 
                                 unlimited_cache=unlimited_cache)
 
1753
        index = self._index_class(transport, index_name, index_size,
 
1754
                                  unlimited_cache=is_chk)
 
1755
        if is_chk and self._index_class is btree_index.BTreeGraphIndex: 
 
1756
            index._leaf_factory = btree_index._gcchk_factory
 
1757
        return index
1755
1758
 
1756
1759
    def _max_pack_count(self, total_revisions):
1757
1760
        """Return the maximum number of packs to use for total revisions.
1943
1946
                    # disk index because the set values are the same, unless
1944
1947
                    # the only index shows up as deleted by the set difference
1945
1948
                    # - which it may. Until there is a specific test for this,
1946
 
                    # assume its broken. RBC 20071017.
 
1949
                    # assume it's broken. RBC 20071017.
1947
1950
                    self._remove_pack_from_memory(self.get_pack_by_name(name))
1948
1951
                    self._names[name] = sizes
1949
1952
                    self.get_pack_by_name(name)
2014
2017
        """
2015
2018
        # The ensure_loaded call is to handle the case where the first call
2016
2019
        # made involving the collection was to reload_pack_names, where we 
2017
 
        # don't have a view of disk contents. Its a bit of a bandaid, and
2018
 
        # causes two reads of pack-names, but its a rare corner case not struck
2019
 
        # with regular push/pull etc.
 
2020
        # don't have a view of disk contents. It's a bit of a bandaid, and
 
2021
        # causes two reads of pack-names, but it's a rare corner case not
 
2022
        # struck with regular push/pull etc.
2020
2023
        first_read = self.ensure_loaded()
2021
2024
        if first_read:
2022
2025
            return True
2341
2344
        return self._write_lock_count
2342
2345
 
2343
2346
    def lock_write(self, token=None):
 
2347
        """Lock the repository for writes.
 
2348
 
 
2349
        :return: A bzrlib.repository.RepositoryWriteLockResult.
 
2350
        """
2344
2351
        locked = self.is_locked()
2345
2352
        if not self._write_lock_count and locked:
2346
2353
            raise errors.ReadOnlyError(self)
2358
2365
        return RepositoryWriteLockResult(self.unlock, None)
2359
2366
 
2360
2367
    def lock_read(self):
 
2368
        """Lock the repository for reads.
 
2369
 
 
2370
        :return: A bzrlib.lock.LogicalLockResult.
 
2371
        """
2361
2372
        locked = self.is_locked()
2362
2373
        if self._write_lock_count:
2363
2374
            self._write_lock_count += 1
2370
2381
            for repo in self._fallback_repositories:
2371
2382
                repo.lock_read()
2372
2383
            self._refresh_data()
2373
 
        return self
 
2384
        return LogicalLockResult(self.unlock)
2374
2385
 
2375
2386
    def leave_lock_in_place(self):
2376
2387
        # not supported - raise an error
2820
2831
    _commit_builder_class = PackCommitBuilder
2821
2832
    supports_external_lookups = True
2822
2833
    # What index classes to use
2823
 
    index_builder_class = BTreeBuilder
2824
 
    index_class = BTreeGraphIndex
 
2834
    index_builder_class = btree_index.BTreeBuilder
 
2835
    index_class = btree_index.BTreeGraphIndex
2825
2836
 
2826
2837
    @property
2827
2838
    def _serializer(self):
2856
2867
    supports_tree_reference = False # no subtrees
2857
2868
    supports_external_lookups = True
2858
2869
    # What index classes to use
2859
 
    index_builder_class = BTreeBuilder
2860
 
    index_class = BTreeGraphIndex
 
2870
    index_builder_class = btree_index.BTreeBuilder
 
2871
    index_class = btree_index.BTreeGraphIndex
2861
2872
 
2862
2873
    @property
2863
2874
    def _serializer(self):
2898
2909
    supports_tree_reference = True
2899
2910
    supports_external_lookups = True
2900
2911
    # What index classes to use
2901
 
    index_builder_class = BTreeBuilder
2902
 
    index_class = BTreeGraphIndex
 
2912
    index_builder_class = btree_index.BTreeBuilder
 
2913
    index_class = btree_index.BTreeGraphIndex
2903
2914
 
2904
2915
    @property
2905
2916
    def _serializer(self):
2907
2918
 
2908
2919
    def _get_matching_bzrdir(self):
2909
2920
        return bzrdir.format_registry.make_bzrdir(
2910
 
            'development-subtree')
 
2921
            'development5-subtree')
2911
2922
 
2912
2923
    def _ignore_setting_bzrdir(self, format):
2913
2924
        pass