/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to breezy/bzr/pack_repo.py

  • Committer: Gustav Hartvigsson
  • Date: 2021-01-09 21:36:27 UTC
  • Revision ID: gustav.hartvigsson@gmail.com-20210109213627-h1xwcutzy9m7a99b
Added 'Case Preserving Working Tree Use Cases' from Canonical Wiki

* Addod a page from the Canonical Bazaar wiki
  with information on the scmeatics of case
  perserving filesystems an a case insensitive
  filesystem works.
  
  * Needs re-work, but this will do as it is the
    same inforamoton as what was on the linked
    page in the currint documentation.

Show diffs side-by-side

added added

removed removed

Lines of Context:
14
14
# along with this program; if not, write to the Free Software
15
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
16
 
17
 
from __future__ import absolute_import
18
 
 
19
17
import re
20
18
import sys
21
19
 
22
20
from ..lazy_import import lazy_import
23
21
lazy_import(globals(), """
 
22
import contextlib
24
23
import time
25
24
 
26
25
from breezy import (
27
 
    cleanup,
28
26
    config,
29
27
    debug,
30
28
    graph,
60
58
    MetaDirRepository,
61
59
    RepositoryFormatMetaDir,
62
60
    )
63
 
from ..sixish import (
64
 
    reraise,
65
 
    viewitems,
66
 
    )
67
61
from ..bzr.vf_repository import (
68
62
    MetaDirVersionedFileRepository,
69
63
    MetaDirVersionedFileRepositoryFormat,
1337
1331
 
1338
1332
        # do a two-way diff against our original content
1339
1333
        current_nodes = set()
1340
 
        for name, sizes in viewitems(self._names):
 
1334
        for name, sizes in self._names.items():
1341
1335
            current_nodes.add(
1342
1336
                (name, b' '.join(b'%d' % size for size in sizes)))
1343
1337
 
1550
1544
        # FIXME: just drop the transient index.
1551
1545
        # forget what names there are
1552
1546
        if self._new_pack is not None:
1553
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
1554
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
1555
 
            # If we aborted while in the middle of finishing the write
1556
 
            # group, _remove_pack_indices could fail because the indexes are
1557
 
            # already gone.  But they're not there we shouldn't fail in this
1558
 
            # case, so we pass ignore_missing=True.
1559
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
1560
 
                                  ignore_missing=True)
1561
 
            operation.run_simple()
 
1547
            with contextlib.ExitStack() as stack:
 
1548
                stack.callback(setattr, self, '_new_pack', None)
 
1549
                # If we aborted while in the middle of finishing the write
 
1550
                # group, _remove_pack_indices could fail because the indexes are
 
1551
                # already gone.  But they're not there we shouldn't fail in this
 
1552
                # case, so we pass ignore_missing=True.
 
1553
                stack.callback(self._remove_pack_indices, self._new_pack,
 
1554
                               ignore_missing=True)
 
1555
                self._new_pack.abort()
1562
1556
        for resumed_pack in self._resumed_packs:
1563
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
1564
 
            # See comment in previous finally block.
1565
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
1566
 
                                  ignore_missing=True)
1567
 
            operation.run_simple()
 
1557
            with contextlib.ExitStack() as stack:
 
1558
                # See comment in previous finally block.
 
1559
                stack.callback(self._remove_pack_indices, resumed_pack,
 
1560
                               ignore_missing=True)
 
1561
                resumed_pack.abort()
1568
1562
        del self._resumed_packs[:]
1569
1563
 
1570
1564
    def _remove_resumed_pack_indices(self):
1819
1813
 
1820
1814
    def reconcile(self, other=None, thorough=False):
1821
1815
        """Reconcile this repository."""
1822
 
        from breezy.reconcile import PackReconciler
 
1816
        from .reconcile import PackReconciler
1823
1817
        with self.lock_write():
1824
1818
            reconciler = PackReconciler(self, thorough=thorough)
1825
 
            reconciler.reconcile()
1826
 
            return reconciler
 
1819
            return reconciler.reconcile()
1827
1820
 
1828
1821
    def _reconcile_pack(self, collection, packs, extension, revs, pb):
1829
1822
        raise NotImplementedError(self._reconcile_pack)
1965
1958
        self._reload_func = reload_func
1966
1959
        self._flush_func = flush_func
1967
1960
 
 
1961
    def add_raw_record(self, key, size, raw_data):
 
1962
        """Add raw knit bytes to a storage area.
 
1963
 
 
1964
        The data is spooled to the container writer in one bytes-record per
 
1965
        raw data item.
 
1966
 
 
1967
        :param key: key of the data segment
 
1968
        :param size: length of the data segment
 
1969
        :param raw_data: A bytestring containing the data.
 
1970
        :return: An opaque index memo For _DirectPackAccess the memo is
 
1971
            (index, pos, length), where the index field is the write_index
 
1972
            object supplied to the PackAccess object.
 
1973
        """
 
1974
        p_offset, p_length = self._container_writer.add_bytes_record(
 
1975
            raw_data, size, [])
 
1976
        return (self._write_index, p_offset, p_length)
 
1977
 
1968
1978
    def add_raw_records(self, key_sizes, raw_data):
1969
1979
        """Add raw knit bytes to a storage area.
1970
1980
 
1979
1989
            length), where the index field is the write_index object supplied
1980
1990
            to the PackAccess object.
1981
1991
        """
 
1992
        raw_data = b''.join(raw_data)
1982
1993
        if not isinstance(raw_data, bytes):
1983
1994
            raise AssertionError(
1984
1995
                'data must be plain bytes was %s' % type(raw_data))
1985
1996
        result = []
1986
1997
        offset = 0
1987
1998
        for key, size in key_sizes:
1988
 
            p_offset, p_length = self._container_writer.add_bytes_record(
1989
 
                raw_data[offset:offset + size], [])
 
1999
            result.append(
 
2000
                self.add_raw_record(key, size, [raw_data[offset:offset + size]]))
1990
2001
            offset += size
1991
 
            result.append((self._write_index, p_offset, p_length))
1992
2002
        return result
1993
2003
 
1994
2004
    def flush(self):
2083
2093
                is_error = True
2084
2094
        if is_error:
2085
2095
            # GZ 2017-03-27: No real reason this needs the original traceback.
2086
 
            reraise(*retry_exc.exc_info)
 
2096
            raise retry_exc.exc_info[1]