/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/groupcompress.py

  • Committer: Michael Hudson
  • Date: 2009-10-23 04:22:05 UTC
  • mto: This revision was merged to the branch mainline in revision 4778.
  • Revision ID: michael.hudson@canonical.com-20091023042205-aryfoiqnzaik91r6
handle different escaping of vfs paths

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
 
1
# Copyright (C) 2008, 2009 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
31
31
    knit,
32
32
    osutils,
33
33
    pack,
34
 
    static_tuple,
35
34
    trace,
36
35
    )
37
36
from bzrlib.btree_index import BTreeBuilder
1270
1269
        """See VersionedFiles.clear_cache()"""
1271
1270
        self._group_cache.clear()
1272
1271
        self._index._graph_index.clear_cache()
1273
 
        self._index._int_cache.clear()
1274
1272
 
1275
1273
    def _check_add(self, key, lines, random_id, check_content):
1276
1274
        """check that version_id and lines are safe to add."""
1631
1629
        keys_to_add = []
1632
1630
        def flush():
1633
1631
            bytes = self._compressor.flush().to_bytes()
1634
 
            self._compressor = GroupCompressor()
1635
1632
            index, start, length = self._access.add_raw_records(
1636
1633
                [(None, len(bytes))], bytes)[0]
1637
1634
            nodes = []
1640
1637
            self._index.add_records(nodes, random_id=random_id)
1641
1638
            self._unadded_refs = {}
1642
1639
            del keys_to_add[:]
 
1640
            self._compressor = GroupCompressor()
1643
1641
 
1644
1642
        last_prefix = None
1645
1643
        max_fulltext_len = 0
1747
1745
                key = record.key
1748
1746
            self._unadded_refs[key] = record.parents
1749
1747
            yield found_sha1
1750
 
            as_st = static_tuple.StaticTuple.from_sequence
1751
 
            if record.parents is not None:
1752
 
                parents = as_st([as_st(p) for p in record.parents])
1753
 
            else:
1754
 
                parents = None
1755
 
            refs = static_tuple.StaticTuple(parents)
1756
 
            keys_to_add.append((key, '%d %d' % (start_point, end_point), refs))
 
1748
            keys_to_add.append((key, '%d %d' % (start_point, end_point),
 
1749
                (record.parents,)))
1757
1750
        if len(keys_to_add):
1758
1751
            flush()
1759
1752
        self._compressor = None
1839
1832
        self.has_graph = parents
1840
1833
        self._is_locked = is_locked
1841
1834
        self._inconsistency_fatal = inconsistency_fatal
1842
 
        # GroupCompress records tend to have the same 'group' start + offset
1843
 
        # repeated over and over, this creates a surplus of ints
1844
 
        self._int_cache = {}
1845
1835
        if track_external_parent_refs:
1846
1836
            self._key_dependencies = knit._KeyRefs(
1847
1837
                track_new_keys=track_new_keys)
1883
1873
        if not random_id:
1884
1874
            present_nodes = self._get_entries(keys)
1885
1875
            for (index, key, value, node_refs) in present_nodes:
1886
 
                # Sometimes these are passed as a list rather than a tuple
1887
 
                node_refs = static_tuple.as_tuples(node_refs)
1888
 
                passed = static_tuple.as_tuples(keys[key])
1889
 
                if node_refs != passed[1]:
1890
 
                    details = '%s %s %s' % (key, (value, node_refs), passed)
 
1876
                if node_refs != keys[key][1]:
 
1877
                    details = '%s %s %s' % (key, (value, node_refs), keys[key])
1891
1878
                    if self._inconsistency_fatal:
1892
1879
                        raise errors.KnitCorrupt(self, "inconsistent details"
1893
1880
                                                 " in add_records: %s" %
2026
2013
        """Convert an index value to position details."""
2027
2014
        bits = node[2].split(' ')
2028
2015
        # It would be nice not to read the entire gzip.
2029
 
        # start and stop are put into _int_cache because they are very common.
2030
 
        # They define the 'group' that an entry is in, and many groups can have
2031
 
        # thousands of objects.
2032
 
        # Branching Launchpad, for example, saves ~600k integers, at 12 bytes
2033
 
        # each, or about 7MB. Note that it might be even more when you consider
2034
 
        # how PyInt is allocated in separate slabs. And you can't return a slab
2035
 
        # to the OS if even 1 int on it is in use. Note though that Python uses
2036
 
        # a LIFO when re-using PyInt slots, which probably causes more
2037
 
        # fragmentation.
2038
2016
        start = int(bits[0])
2039
 
        start = self._int_cache.setdefault(start, start)
2040
2017
        stop = int(bits[1])
2041
 
        stop = self._int_cache.setdefault(stop, stop)
2042
2018
        basis_end = int(bits[2])
2043
2019
        delta_end = int(bits[3])
2044
 
        # We can't use StaticTuple here, because node[0] is a BTreeGraphIndex
2045
 
        # instance...
2046
 
        return (node[0], start, stop, basis_end, delta_end)
 
2020
        return node[0], start, stop, basis_end, delta_end
2047
2021
 
2048
2022
    def scan_unvalidated_index(self, graph_index):
2049
2023
        """Inform this _GCGraphIndex that there is an unvalidated index.