/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
4431.3.8 by Jonathan Lange
Cherrypick bzr.dev r4477.
1
# Copyright (C) 2008, 2009 Canonical Ltd
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
17
"""Persistent maps from tuple_of_strings->string using CHK stores.
18
19
Overview and current status:
20
21
The CHKMap class implements a dict from tuple_of_strings->string by using a trie
22
with internal nodes of 8-bit fan out; The key tuples are mapped to strings by
23
joining them by \x00, and \x00 padding shorter keys out to the length of the
24
longest key. Leaf nodes are packed as densely as possible, and internal nodes
3735.19.1 by Ian Clatworthy
CHKMap cleanups
25
are all an additional 8-bits wide leading to a sparse upper tree.
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
26
27
Updates to a CHKMap are done preferentially via the apply_delta method, to
28
allow optimisation of the update operation; but individual map/unmap calls are
4526.9.5 by Robert Collins
Require that added ids in inventory deltas be new.
29
possible and supported. Individual changes via map/unmap are buffered in memory
30
until the _save method is called to force serialisation of the tree.
31
apply_delta records its changes immediately by performing an implicit _save.
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
32
33
TODO:
34
-----
35
36
Densely packed upper nodes.
37
38
"""
39
40
import heapq
41
42
from bzrlib import lazy_import
43
lazy_import.lazy_import(globals(), """
4526.9.5 by Robert Collins
Require that added ids in inventory deltas be new.
44
from bzrlib import (
45
    errors,
46
    versionedfile,
47
    )
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
48
""")
49
from bzrlib import (
50
    lru_cache,
51
    osutils,
52
    registry,
53
    trace,
54
    )
55
3735.19.1 by Ian Clatworthy
CHKMap cleanups
56
# approx 4MB
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
57
# If each line is 50 bytes, and you have 255 internal pages, with 255-way fan
58
# out, it takes 3.1MB to cache the layer.
59
_PAGE_CACHE_SIZE = 4*1024*1024
60
# We are caching bytes so len(value) is perfectly accurate
61
_page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
62
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
63
# If a ChildNode falls below this many bytes, we check for a remap
64
_INTERESTING_NEW_SIZE = 50
65
# If a ChildNode shrinks by more than this amount, we check for a remap
66
_INTERESTING_SHRINKAGE_LIMIT = 20
67
# If we delete more than this many nodes applying a delta, we check for a remap
68
_INTERESTING_DELETES_LIMIT = 5
69
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
70
71
def _search_key_plain(key):
72
    """Map the key tuple into a search string that just uses the key bytes."""
73
    return '\x00'.join(key)
74
75
76
search_key_registry = registry.Registry()
77
search_key_registry.register('plain', _search_key_plain)
78
79
80
class CHKMap(object):
81
    """A persistent map from string to string backed by a CHK store."""
82
83
    def __init__(self, store, root_key, search_key_func=None):
84
        """Create a CHKMap object.
85
86
        :param store: The store the CHKMap is stored in.
87
        :param root_key: The root key of the map. None to create an empty
88
            CHKMap.
89
        :param search_key_func: A function mapping a key => bytes. These bytes
90
            are then used by the internal nodes to split up leaf nodes into
91
            multiple pages.
92
        """
93
        self._store = store
94
        if search_key_func is None:
95
            search_key_func = _search_key_plain
96
        self._search_key_func = search_key_func
97
        if root_key is None:
98
            self._root_node = LeafNode(search_key_func=search_key_func)
99
        else:
100
            self._root_node = self._node_key(root_key)
101
102
    def apply_delta(self, delta):
103
        """Apply a delta to the map.
104
105
        :param delta: An iterable of old_key, new_key, new_value tuples.
106
            If new_key is not None, then new_key->new_value is inserted
107
            into the map; if old_key is not None, then the old mapping
108
            of old_key is removed.
109
        """
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
110
        delete_count = 0
4526.9.5 by Robert Collins
Require that added ids in inventory deltas be new.
111
        # Check preconditions first.
112
        new_items = set([key for (old, key, value) in delta if key is not None
113
            and old is None])
114
        existing_new = list(self.iteritems(key_filter=new_items))
115
        if existing_new:
116
            raise errors.InconsistentDeltaDelta(delta,
117
                "New items are already in the map %r." % existing_new)
118
        # Now apply changes.
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
119
        for old, new, value in delta:
120
            if old is not None and old != new:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
121
                self.unmap(old, check_remap=False)
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
122
                delete_count += 1
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
123
        for old, new, value in delta:
124
            if new is not None:
125
                self.map(new, value)
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
126
        if delete_count > _INTERESTING_DELETES_LIMIT:
127
            trace.mutter("checking remap as %d deletions", delete_count)
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
128
            self._check_remap()
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
129
        return self._save()
130
131
    def _ensure_root(self):
132
        """Ensure that the root node is an object not a key."""
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
133
        if type(self._root_node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
134
            # Demand-load the root
135
            self._root_node = self._get_node(self._root_node)
136
137
    def _get_node(self, node):
138
        """Get a node.
139
3735.19.1 by Ian Clatworthy
CHKMap cleanups
140
        Note that this does not update the _items dict in objects containing a
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
141
        reference to this node. As such it does not prevent subsequent IO being
142
        performed.
143
144
        :param node: A tuple key or node object.
145
        :return: A node object.
146
        """
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
147
        if type(node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
148
            bytes = self._read_bytes(node)
149
            return _deserialise(bytes, node,
150
                search_key_func=self._search_key_func)
151
        else:
152
            return node
153
154
    def _read_bytes(self, key):
3735.2.124 by Ian Clatworthy
use the page cache in CHKMap._read_bytes()
155
        try:
156
            return _page_cache[key]
157
        except KeyError:
158
            stream = self._store.get_record_stream([key], 'unordered', True)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
159
            bytes = stream.next().get_bytes_as('fulltext')
160
            _page_cache[key] = bytes
161
            return bytes
162
163
    def _dump_tree(self, include_keys=False):
164
        """Return the tree in a string representation."""
165
        self._ensure_root()
166
        res = self._dump_tree_node(self._root_node, prefix='', indent='',
167
                                   include_keys=include_keys)
168
        res.append('') # Give a trailing '\n'
169
        return '\n'.join(res)
170
171
    def _dump_tree_node(self, node, prefix, indent, include_keys=True):
172
        """For this node and all children, generate a string representation."""
173
        result = []
174
        if not include_keys:
175
            key_str = ''
176
        else:
177
            node_key = node.key()
178
            if node_key is not None:
179
                key_str = ' %s' % (node_key[0],)
180
            else:
181
                key_str = ' None'
182
        result.append('%s%r %s%s' % (indent, prefix, node.__class__.__name__,
183
                                     key_str))
184
        if type(node) is InternalNode:
185
            # Trigger all child nodes to get loaded
186
            list(node._iter_nodes(self._store))
187
            for prefix, sub in sorted(node._items.iteritems()):
188
                result.extend(self._dump_tree_node(sub, prefix, indent + '  ',
189
                                                   include_keys=include_keys))
190
        else:
191
            for key, value in sorted(node._items.iteritems()):
192
                # Don't use prefix nor indent here to line up when used in
193
                # tests in conjunction with assertEqualDiff
194
                result.append('      %r %r' % (key, value))
195
        return result
196
197
    @classmethod
3735.19.1 by Ian Clatworthy
CHKMap cleanups
198
    def from_dict(klass, store, initial_value, maximum_size=0, key_width=1,
199
        search_key_func=None):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
200
        """Create a CHKMap in store with initial_value as the content.
201
202
        :param store: The store to record initial_value in, a VersionedFiles
203
            object with 1-tuple keys supporting CHK key generation.
204
        :param initial_value: A dict to store in store. Its keys and values
205
            must be bytestrings.
206
        :param maximum_size: The maximum_size rule to apply to nodes. This
207
            determines the size at which no new data is added to a single node.
208
        :param key_width: The number of elements in each key_tuple being stored
209
            in this map.
3735.19.1 by Ian Clatworthy
CHKMap cleanups
210
        :param search_key_func: A function mapping a key => bytes. These bytes
211
            are then used by the internal nodes to split up leaf nodes into
212
            multiple pages.
213
        :return: The root chk of the resulting CHKMap.
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
214
        """
4413.5.7 by John Arbash Meinel
Switch to using a single code path for from_dict().
215
        root_key = klass._create_directly(store, initial_value,
216
            maximum_size=maximum_size, key_width=key_width,
217
            search_key_func=search_key_func)
4413.5.5 by John Arbash Meinel
Make it more obvious how the two creation methods are defined.
218
        return root_key
219
220
    @classmethod
221
    def _create_via_map(klass, store, initial_value, maximum_size=0,
222
                        key_width=1, search_key_func=None):
223
        result = klass(store, None, search_key_func=search_key_func)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
224
        result._root_node.set_maximum_size(maximum_size)
225
        result._root_node._key_width = key_width
226
        delta = []
227
        for key, value in initial_value.items():
228
            delta.append((None, key, value))
4413.5.4 by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it.
229
        root_key = result.apply_delta(delta)
4413.5.5 by John Arbash Meinel
Make it more obvious how the two creation methods are defined.
230
        return root_key
231
232
    @classmethod
233
    def _create_directly(klass, store, initial_value, maximum_size=0,
234
                         key_width=1, search_key_func=None):
4413.5.4 by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it.
235
        node = LeafNode(search_key_func=search_key_func)
236
        node.set_maximum_size(maximum_size)
237
        node._key_width = key_width
238
        node._items = dict(initial_value)
239
        node._raw_size = sum([node._key_value_len(key, value)
240
                              for key,value in initial_value.iteritems()])
241
        node._len = len(node._items)
242
        node._compute_search_prefix()
243
        node._compute_serialised_prefix()
244
        if (node._len > 1
245
            and maximum_size
246
            and node._current_size() > maximum_size):
247
            prefix, node_details = node._split(store)
4413.5.8 by John Arbash Meinel
Change some asserts into raise: calls.
248
            if len(node_details) == 1:
249
                raise AssertionError('Failed to split using node._split')
4413.5.4 by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it.
250
            node = InternalNode(prefix, search_key_func=search_key_func)
251
            node.set_maximum_size(maximum_size)
252
            node._key_width = key_width
253
            for split, subnode in node_details:
254
                node.add_node(split, subnode)
255
        keys = list(node.serialise(store))
4413.5.5 by John Arbash Meinel
Make it more obvious how the two creation methods are defined.
256
        return keys[-1]
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
257
258
    def iter_changes(self, basis):
259
        """Iterate over the changes between basis and self.
260
261
        :return: An iterator of tuples: (key, old_value, new_value). Old_value
262
            is None for keys only in self; new_value is None for keys only in
263
            basis.
264
        """
265
        # Overview:
266
        # Read both trees in lexographic, highest-first order.
267
        # Any identical nodes we skip
268
        # Any unique prefixes we output immediately.
269
        # values in a leaf node are treated as single-value nodes in the tree
270
        # which allows them to be not-special-cased. We know to output them
271
        # because their value is a string, not a key(tuple) or node.
272
        #
273
        # corner cases to beware of when considering this function:
274
        # *) common references are at different heights.
275
        #    consider two trees:
276
        #    {'a': LeafNode={'aaa':'foo', 'aab':'bar'}, 'b': LeafNode={'b'}}
277
        #    {'a': InternalNode={'aa':LeafNode={'aaa':'foo', 'aab':'bar'},
278
        #                        'ab':LeafNode={'ab':'bar'}}
279
        #     'b': LeafNode={'b'}}
280
        #    the node with aaa/aab will only be encountered in the second tree
281
        #    after reading the 'a' subtree, but it is encountered in the first
282
        #    tree immediately. Variations on this may have read internal nodes
283
        #    like this.  we want to cut the entire pending subtree when we
284
        #    realise we have a common node.  For this we use a list of keys -
285
        #    the path to a node - and check the entire path is clean as we
286
        #    process each item.
287
        if self._node_key(self._root_node) == self._node_key(basis._root_node):
288
            return
289
        self._ensure_root()
290
        basis._ensure_root()
291
        excluded_keys = set()
292
        self_node = self._root_node
293
        basis_node = basis._root_node
294
        # A heap, each element is prefix, node(tuple/NodeObject/string),
295
        # key_path (a list of tuples, tail-sharing down the tree.)
296
        self_pending = []
297
        basis_pending = []
298
        def process_node(node, path, a_map, pending):
299
            # take a node and expand it
300
            node = a_map._get_node(node)
301
            if type(node) == LeafNode:
302
                path = (node._key, path)
303
                for key, value in node._items.items():
304
                    # For a LeafNode, the key is a serialized_key, rather than
305
                    # a search_key, but the heap is using search_keys
306
                    search_key = node._search_key_func(key)
307
                    heapq.heappush(pending, (search_key, key, value, path))
308
            else:
309
                # type(node) == InternalNode
310
                path = (node._key, path)
311
                for prefix, child in node._items.items():
312
                    heapq.heappush(pending, (prefix, None, child, path))
313
        def process_common_internal_nodes(self_node, basis_node):
314
            self_items = set(self_node._items.items())
315
            basis_items = set(basis_node._items.items())
316
            path = (self_node._key, None)
317
            for prefix, child in self_items - basis_items:
318
                heapq.heappush(self_pending, (prefix, None, child, path))
319
            path = (basis_node._key, None)
320
            for prefix, child in basis_items - self_items:
321
                heapq.heappush(basis_pending, (prefix, None, child, path))
322
        def process_common_leaf_nodes(self_node, basis_node):
323
            self_items = set(self_node._items.items())
324
            basis_items = set(basis_node._items.items())
325
            path = (self_node._key, None)
326
            for key, value in self_items - basis_items:
327
                prefix = self._search_key_func(key)
328
                heapq.heappush(self_pending, (prefix, key, value, path))
329
            path = (basis_node._key, None)
330
            for key, value in basis_items - self_items:
331
                prefix = basis._search_key_func(key)
332
                heapq.heappush(basis_pending, (prefix, key, value, path))
333
        def process_common_prefix_nodes(self_node, self_path,
334
                                        basis_node, basis_path):
335
            # Would it be more efficient if we could request both at the same
336
            # time?
337
            self_node = self._get_node(self_node)
338
            basis_node = basis._get_node(basis_node)
339
            if (type(self_node) == InternalNode
340
                and type(basis_node) == InternalNode):
341
                # Matching internal nodes
342
                process_common_internal_nodes(self_node, basis_node)
343
            elif (type(self_node) == LeafNode
344
                  and type(basis_node) == LeafNode):
345
                process_common_leaf_nodes(self_node, basis_node)
346
            else:
347
                process_node(self_node, self_path, self, self_pending)
348
                process_node(basis_node, basis_path, basis, basis_pending)
349
        process_common_prefix_nodes(self_node, None, basis_node, None)
350
        self_seen = set()
351
        basis_seen = set()
352
        excluded_keys = set()
353
        def check_excluded(key_path):
354
            # Note that this is N^2, it depends on us trimming trees
355
            # aggressively to not become slow.
356
            # A better implementation would probably have a reverse map
357
            # back to the children of a node, and jump straight to it when
358
            # a common node is detected, the proceed to remove the already
359
            # pending children. bzrlib.graph has a searcher module with a
360
            # similar problem.
361
            while key_path is not None:
362
                key, key_path = key_path
363
                if key in excluded_keys:
364
                    return True
365
            return False
366
367
        loop_counter = 0
368
        while self_pending or basis_pending:
369
            loop_counter += 1
370
            if not self_pending:
371
                # self is exhausted: output remainder of basis
372
                for prefix, key, node, path in basis_pending:
373
                    if check_excluded(path):
374
                        continue
375
                    node = basis._get_node(node)
376
                    if key is not None:
377
                        # a value
378
                        yield (key, node, None)
379
                    else:
380
                        # subtree - fastpath the entire thing.
381
                        for key, value in node.iteritems(basis._store):
382
                            yield (key, value, None)
383
                return
384
            elif not basis_pending:
385
                # basis is exhausted: output remainder of self.
386
                for prefix, key, node, path in self_pending:
387
                    if check_excluded(path):
388
                        continue
389
                    node = self._get_node(node)
390
                    if key is not None:
391
                        # a value
392
                        yield (key, None, node)
393
                    else:
394
                        # subtree - fastpath the entire thing.
395
                        for key, value in node.iteritems(self._store):
396
                            yield (key, None, value)
397
                return
398
            else:
399
                # XXX: future optimisation - yield the smaller items
400
                # immediately rather than pushing everything on/off the
401
                # heaps. Applies to both internal nodes and leafnodes.
402
                if self_pending[0][0] < basis_pending[0][0]:
403
                    # expand self
404
                    prefix, key, node, path = heapq.heappop(self_pending)
405
                    if check_excluded(path):
406
                        continue
407
                    if key is not None:
408
                        # a value
409
                        yield (key, None, node)
410
                    else:
411
                        process_node(node, path, self, self_pending)
412
                        continue
413
                elif self_pending[0][0] > basis_pending[0][0]:
414
                    # expand basis
415
                    prefix, key, node, path = heapq.heappop(basis_pending)
416
                    if check_excluded(path):
417
                        continue
418
                    if key is not None:
419
                        # a value
420
                        yield (key, node, None)
421
                    else:
422
                        process_node(node, path, basis, basis_pending)
423
                        continue
424
                else:
425
                    # common prefix: possibly expand both
426
                    if self_pending[0][1] is None:
427
                        # process next self
428
                        read_self = True
429
                    else:
430
                        read_self = False
431
                    if basis_pending[0][1] is None:
432
                        # process next basis
433
                        read_basis = True
434
                    else:
435
                        read_basis = False
436
                    if not read_self and not read_basis:
437
                        # compare a common value
438
                        self_details = heapq.heappop(self_pending)
439
                        basis_details = heapq.heappop(basis_pending)
440
                        if self_details[2] != basis_details[2]:
441
                            yield (self_details[1],
442
                                basis_details[2], self_details[2])
443
                        continue
444
                    # At least one side wasn't a simple value
445
                    if (self._node_key(self_pending[0][2]) ==
446
                        self._node_key(basis_pending[0][2])):
447
                        # Identical pointers, skip (and don't bother adding to
448
                        # excluded, it won't turn up again.
449
                        heapq.heappop(self_pending)
450
                        heapq.heappop(basis_pending)
451
                        continue
452
                    # Now we need to expand this node before we can continue
453
                    if read_self and read_basis:
454
                        # Both sides start with the same prefix, so process
455
                        # them in parallel
456
                        self_prefix, _, self_node, self_path = heapq.heappop(
457
                            self_pending)
458
                        basis_prefix, _, basis_node, basis_path = heapq.heappop(
459
                            basis_pending)
460
                        if self_prefix != basis_prefix:
461
                            raise AssertionError(
462
                                '%r != %r' % (self_prefix, basis_prefix))
463
                        process_common_prefix_nodes(
464
                            self_node, self_path,
465
                            basis_node, basis_path)
466
                        continue
467
                    if read_self:
468
                        prefix, key, node, path = heapq.heappop(self_pending)
469
                        if check_excluded(path):
470
                            continue
471
                        process_node(node, path, self, self_pending)
472
                    if read_basis:
473
                        prefix, key, node, path = heapq.heappop(basis_pending)
474
                        if check_excluded(path):
475
                            continue
476
                        process_node(node, path, basis, basis_pending)
477
        # print loop_counter
478
479
    def iteritems(self, key_filter=None):
480
        """Iterate over the entire CHKMap's contents."""
481
        self._ensure_root()
482
        return self._root_node.iteritems(self._store, key_filter=key_filter)
483
484
    def key(self):
485
        """Return the key for this map."""
486
        if type(self._root_node) is tuple:
487
            return self._root_node
488
        else:
489
            return self._root_node._key
490
491
    def __len__(self):
492
        self._ensure_root()
493
        return len(self._root_node)
494
495
    def map(self, key, value):
4526.9.5 by Robert Collins
Require that added ids in inventory deltas be new.
496
        """Map a key tuple to value.
497
        
498
        :param key: A key to map.
499
        :param value: The value to assign to key.
500
        """
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
501
        # Need a root object.
502
        self._ensure_root()
503
        prefix, node_details = self._root_node.map(self._store, key, value)
504
        if len(node_details) == 1:
505
            self._root_node = node_details[0][1]
506
        else:
507
            self._root_node = InternalNode(prefix,
508
                                search_key_func=self._search_key_func)
509
            self._root_node.set_maximum_size(node_details[0][1].maximum_size)
510
            self._root_node._key_width = node_details[0][1]._key_width
511
            for split, node in node_details:
512
                self._root_node.add_node(split, node)
513
514
    def _node_key(self, node):
3735.19.1 by Ian Clatworthy
CHKMap cleanups
515
        """Get the key for a node whether it's a tuple or node."""
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
516
        if type(node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
517
            return node
518
        else:
519
            return node._key
520
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
521
    def unmap(self, key, check_remap=True):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
522
        """remove key from the map."""
523
        self._ensure_root()
524
        if type(self._root_node) is InternalNode:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
525
            unmapped = self._root_node.unmap(self._store, key,
526
                check_remap=check_remap)
527
        else:
528
            unmapped = self._root_node.unmap(self._store, key)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
529
        self._root_node = unmapped
530
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
531
    def _check_remap(self):
532
        """Check if nodes can be collapsed."""
533
        self._ensure_root()
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
534
        if type(self._root_node) is InternalNode:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
535
            self._root_node._check_remap(self._store)
536
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
537
    def _save(self):
538
        """Save the map completely.
539
540
        :return: The key of the root node.
541
        """
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
542
        if type(self._root_node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
543
            # Already saved.
544
            return self._root_node
545
        keys = list(self._root_node.serialise(self._store))
546
        return keys[-1]
547
548
549
class Node(object):
550
    """Base class defining the protocol for CHK Map nodes.
551
552
    :ivar _raw_size: The total size of the serialized key:value data, before
553
        adding the header bytes, and without prefix compression.
554
    """
555
556
    def __init__(self, key_width=1):
557
        """Create a node.
558
559
        :param key_width: The width of keys for this node.
560
        """
561
        self._key = None
562
        # Current number of elements
563
        self._len = 0
564
        self._maximum_size = 0
3735.19.1 by Ian Clatworthy
CHKMap cleanups
565
        self._key_width = key_width
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
566
        # current size in bytes
567
        self._raw_size = 0
568
        # The pointers/values this node has - meaning defined by child classes.
569
        self._items = {}
570
        # The common search prefix
571
        self._search_prefix = None
572
573
    def __repr__(self):
574
        items_str = str(sorted(self._items))
575
        if len(items_str) > 20:
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
576
            items_str = items_str[:16] + '...]'
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
577
        return '%s(key:%s len:%s size:%s max:%s prefix:%s items:%s)' % (
578
            self.__class__.__name__, self._key, self._len, self._raw_size,
579
            self._maximum_size, self._search_prefix, items_str)
580
581
    def key(self):
582
        return self._key
583
584
    def __len__(self):
585
        return self._len
586
587
    @property
588
    def maximum_size(self):
589
        """What is the upper limit for adding references to a node."""
590
        return self._maximum_size
591
592
    def set_maximum_size(self, new_size):
593
        """Set the size threshold for nodes.
594
595
        :param new_size: The size at which no data is added to a node. 0 for
596
            unlimited.
597
        """
598
        self._maximum_size = new_size
599
600
    @classmethod
601
    def common_prefix(cls, prefix, key):
602
        """Given 2 strings, return the longest prefix common to both.
603
604
        :param prefix: This has been the common prefix for other keys, so it is
605
            more likely to be the common prefix in this case as well.
606
        :param key: Another string to compare to
607
        """
608
        if key.startswith(prefix):
609
            return prefix
4358.1.1 by Jelmer Vernooij
Support empty keys when looking for common prefixes in CHKMap.
610
        pos = -1
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
611
        # Is there a better way to do this?
612
        for pos, (left, right) in enumerate(zip(prefix, key)):
613
            if left != right:
614
                pos -= 1
615
                break
616
        common = prefix[:pos+1]
617
        return common
618
619
    @classmethod
620
    def common_prefix_for_keys(cls, keys):
621
        """Given a list of keys, find their common prefix.
622
623
        :param keys: An iterable of strings.
624
        :return: The longest common prefix of all keys.
625
        """
626
        common_prefix = None
627
        for key in keys:
628
            if common_prefix is None:
629
                common_prefix = key
630
                continue
631
            common_prefix = cls.common_prefix(common_prefix, key)
632
            if not common_prefix:
633
                # if common_prefix is the empty string, then we know it won't
634
                # change further
635
                return ''
636
        return common_prefix
637
638
639
# Singleton indicating we have not computed _search_prefix yet
640
_unknown = object()
641
642
class LeafNode(Node):
643
    """A node containing actual key:value pairs.
644
645
    :ivar _items: A dict of key->value items. The key is in tuple form.
646
    :ivar _size: The number of bytes that would be used by serializing all of
647
        the key/value pairs.
648
    """
649
650
    def __init__(self, search_key_func=None):
651
        Node.__init__(self)
652
        # All of the keys in this leaf node share this common prefix
653
        self._common_serialised_prefix = None
654
        self._serialise_key = '\x00'.join
655
        if search_key_func is None:
656
            self._search_key_func = _search_key_plain
657
        else:
658
            self._search_key_func = search_key_func
659
660
    def __repr__(self):
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
661
        items_str = str(sorted(self._items))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
662
        if len(items_str) > 20:
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
663
            items_str = items_str[:16] + '...]'
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
664
        return \
665
            '%s(key:%s len:%s size:%s max:%s prefix:%s keywidth:%s items:%s)' \
666
            % (self.__class__.__name__, self._key, self._len, self._raw_size,
667
            self._maximum_size, self._search_prefix, self._key_width, items_str)
668
669
    def _current_size(self):
670
        """Answer the current serialised size of this node.
671
672
        This differs from self._raw_size in that it includes the bytes used for
673
        the header.
674
        """
675
        if self._common_serialised_prefix is None:
676
            bytes_for_items = 0
677
            prefix_len = 0
678
        else:
679
            # We will store a single string with the common prefix
680
            # And then that common prefix will not be stored in any of the
681
            # entry lines
682
            prefix_len = len(self._common_serialised_prefix)
683
            bytes_for_items = (self._raw_size - (prefix_len * self._len))
684
        return (9 # 'chkleaf:\n'
685
            + len(str(self._maximum_size)) + 1
686
            + len(str(self._key_width)) + 1
687
            + len(str(self._len)) + 1
688
            + prefix_len + 1
689
            + bytes_for_items)
690
691
    @classmethod
692
    def deserialise(klass, bytes, key, search_key_func=None):
693
        """Deserialise bytes, with key key, into a LeafNode.
694
695
        :param bytes: The bytes of the node.
696
        :param key: The key that the serialised node has.
697
        """
698
        return _deserialise_leaf_node(bytes, key,
699
                                      search_key_func=search_key_func)
700
701
    def iteritems(self, store, key_filter=None):
702
        """Iterate over items in the node.
703
704
        :param key_filter: A filter to apply to the node. It should be a
705
            list/set/dict or similar repeatedly iterable container.
706
        """
707
        if key_filter is not None:
708
            # Adjust the filter - short elements go to a prefix filter. All
709
            # other items are looked up directly.
710
            # XXX: perhaps defaultdict? Profiling<rinse and repeat>
711
            filters = {}
712
            for key in key_filter:
713
                if len(key) == self._key_width:
714
                    # This filter is meant to match exactly one key, yield it
715
                    # if we have it.
716
                    try:
717
                        yield key, self._items[key]
718
                    except KeyError:
719
                        # This key is not present in this map, continue
720
                        pass
721
                else:
722
                    # Short items, we need to match based on a prefix
723
                    length_filter = filters.setdefault(len(key), set())
724
                    length_filter.add(key)
725
            if filters:
726
                filters = filters.items()
727
                for item in self._items.iteritems():
728
                    for length, length_filter in filters:
729
                        if item[0][:length] in length_filter:
730
                            yield item
731
                            break
732
        else:
733
            for item in self._items.iteritems():
734
                yield item
735
736
    def _key_value_len(self, key, value):
737
        # TODO: Should probably be done without actually joining the key, but
738
        #       then that can be done via the C extension
739
        return (len(self._serialise_key(key)) + 1
740
                + len(str(value.count('\n'))) + 1
741
                + len(value) + 1)
742
743
    def _search_key(self, key):
744
        return self._search_key_func(key)
745
746
    def _map_no_split(self, key, value):
747
        """Map a key to a value.
748
749
        This assumes either the key does not already exist, or you have already
750
        removed its size and length from self.
751
752
        :return: True if adding this node should cause us to split.
753
        """
754
        self._items[key] = value
755
        self._raw_size += self._key_value_len(key, value)
756
        self._len += 1
757
        serialised_key = self._serialise_key(key)
758
        if self._common_serialised_prefix is None:
759
            self._common_serialised_prefix = serialised_key
760
        else:
761
            self._common_serialised_prefix = self.common_prefix(
762
                self._common_serialised_prefix, serialised_key)
763
        search_key = self._search_key(key)
764
        if self._search_prefix is _unknown:
765
            self._compute_search_prefix()
766
        if self._search_prefix is None:
767
            self._search_prefix = search_key
768
        else:
769
            self._search_prefix = self.common_prefix(
770
                self._search_prefix, search_key)
771
        if (self._len > 1
772
            and self._maximum_size
773
            and self._current_size() > self._maximum_size):
774
            # Check to see if all of the search_keys for this node are
775
            # identical. We allow the node to grow under that circumstance
776
            # (we could track this as common state, but it is infrequent)
777
            if (search_key != self._search_prefix
778
                or not self._are_search_keys_identical()):
779
                return True
780
        return False
781
782
    def _split(self, store):
783
        """We have overflowed.
784
785
        Split this node into multiple LeafNodes, return it up the stack so that
786
        the next layer creates a new InternalNode and references the new nodes.
787
788
        :return: (common_serialised_prefix, [(node_serialised_prefix, node)])
789
        """
790
        if self._search_prefix is _unknown:
791
            raise AssertionError('Search prefix must be known')
792
        common_prefix = self._search_prefix
793
        split_at = len(common_prefix) + 1
794
        result = {}
795
        for key, value in self._items.iteritems():
796
            search_key = self._search_key(key)
797
            prefix = search_key[:split_at]
798
            # TODO: Generally only 1 key can be exactly the right length,
799
            #       which means we can only have 1 key in the node pointed
800
            #       at by the 'prefix\0' key. We might want to consider
801
            #       folding it into the containing InternalNode rather than
802
            #       having a fixed length-1 node.
803
            #       Note this is probably not true for hash keys, as they
804
            #       may get a '\00' node anywhere, but won't have keys of
805
            #       different lengths.
806
            if len(prefix) < split_at:
807
                prefix += '\x00'*(split_at - len(prefix))
808
            if prefix not in result:
809
                node = LeafNode(search_key_func=self._search_key_func)
810
                node.set_maximum_size(self._maximum_size)
811
                node._key_width = self._key_width
812
                result[prefix] = node
813
            else:
814
                node = result[prefix]
4413.5.4 by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it.
815
            sub_prefix, node_details = node.map(store, key, value)
816
            if len(node_details) > 1:
817
                if prefix != sub_prefix:
818
                    # This node has been split and is now found via a different
819
                    # path
820
                    result.pop(prefix)
821
                new_node = InternalNode(sub_prefix,
822
                    search_key_func=self._search_key_func)
823
                new_node.set_maximum_size(self._maximum_size)
824
                new_node._key_width = self._key_width
825
                for split, node in node_details:
826
                    new_node.add_node(split, node)
827
                result[prefix] = new_node
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
828
        return common_prefix, result.items()
829
830
    def map(self, store, key, value):
831
        """Map key to value."""
832
        if key in self._items:
833
            self._raw_size -= self._key_value_len(key, self._items[key])
834
            self._len -= 1
835
        self._key = None
836
        if self._map_no_split(key, value):
837
            return self._split(store)
838
        else:
839
            if self._search_prefix is _unknown:
840
                raise AssertionError('%r must be known' % self._search_prefix)
841
            return self._search_prefix, [("", self)]
842
843
    def serialise(self, store):
844
        """Serialise the LeafNode to store.
845
846
        :param store: A VersionedFiles honouring the CHK extensions.
847
        :return: An iterable of the keys inserted by this operation.
848
        """
849
        lines = ["chkleaf:\n"]
850
        lines.append("%d\n" % self._maximum_size)
851
        lines.append("%d\n" % self._key_width)
852
        lines.append("%d\n" % self._len)
853
        if self._common_serialised_prefix is None:
854
            lines.append('\n')
855
            if len(self._items) != 0:
856
                raise AssertionError('If _common_serialised_prefix is None'
857
                    ' we should have no items')
858
        else:
859
            lines.append('%s\n' % (self._common_serialised_prefix,))
860
            prefix_len = len(self._common_serialised_prefix)
861
        for key, value in sorted(self._items.items()):
862
            # Always add a final newline
863
            value_lines = osutils.chunks_to_lines([value + '\n'])
864
            serialized = "%s\x00%s\n" % (self._serialise_key(key),
865
                                         len(value_lines))
866
            if not serialized.startswith(self._common_serialised_prefix):
867
                raise AssertionError('We thought the common prefix was %r'
868
                    ' but entry %r does not have it in common'
869
                    % (self._common_serialised_prefix, serialized))
870
            lines.append(serialized[prefix_len:])
871
            lines.extend(value_lines)
872
        sha1, _, _ = store.add_lines((None,), (), lines)
873
        self._key = ("sha1:" + sha1,)
874
        bytes = ''.join(lines)
875
        if len(bytes) != self._current_size():
876
            raise AssertionError('Invalid _current_size')
877
        _page_cache.add(self._key, bytes)
878
        return [self._key]
879
880
    def refs(self):
881
        """Return the references to other CHK's held by this node."""
882
        return []
883
884
    def _compute_search_prefix(self):
885
        """Determine the common search prefix for all keys in this node.
886
887
        :return: A bytestring of the longest search key prefix that is
888
            unique within this node.
889
        """
890
        search_keys = [self._search_key_func(key) for key in self._items]
891
        self._search_prefix = self.common_prefix_for_keys(search_keys)
892
        return self._search_prefix
893
894
    def _are_search_keys_identical(self):
895
        """Check to see if the search keys for all entries are the same.
896
897
        When using a hash as the search_key it is possible for non-identical
898
        keys to collide. If that happens enough, we may try overflow a
899
        LeafNode, but as all are collisions, we must not split.
900
        """
901
        common_search_key = None
902
        for key in self._items:
903
            search_key = self._search_key(key)
904
            if common_search_key is None:
905
                common_search_key = search_key
906
            elif search_key != common_search_key:
907
                return False
908
        return True
909
910
    def _compute_serialised_prefix(self):
911
        """Determine the common prefix for serialised keys in this node.
912
913
        :return: A bytestring of the longest serialised key prefix that is
914
            unique within this node.
915
        """
916
        serialised_keys = [self._serialise_key(key) for key in self._items]
917
        self._common_serialised_prefix = self.common_prefix_for_keys(
918
            serialised_keys)
3735.19.1 by Ian Clatworthy
CHKMap cleanups
919
        return self._common_serialised_prefix
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
920
921
    def unmap(self, store, key):
922
        """Unmap key from the node."""
923
        try:
924
            self._raw_size -= self._key_value_len(key, self._items[key])
925
        except KeyError:
926
            trace.mutter("key %s not found in %r", key, self._items)
927
            raise
928
        self._len -= 1
929
        del self._items[key]
930
        self._key = None
931
        # Recompute from scratch
932
        self._compute_search_prefix()
933
        self._compute_serialised_prefix()
934
        return self
935
936
937
class InternalNode(Node):
938
    """A node that contains references to other nodes.
939
940
    An InternalNode is responsible for mapping search key prefixes to child
941
    nodes.
942
943
    :ivar _items: serialised_key => node dictionary. node may be a tuple,
944
        LeafNode or InternalNode.
945
    """
946
947
    def __init__(self, prefix='', search_key_func=None):
948
        Node.__init__(self)
949
        # The size of an internalnode with default values and no children.
950
        # How many octets key prefixes within this node are.
951
        self._node_width = 0
952
        self._search_prefix = prefix
953
        if search_key_func is None:
954
            self._search_key_func = _search_key_plain
955
        else:
956
            self._search_key_func = search_key_func
957
958
    def add_node(self, prefix, node):
959
        """Add a child node with prefix prefix, and node node.
960
961
        :param prefix: The search key prefix for node.
962
        :param node: The node being added.
963
        """
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
964
        if self._search_prefix is None:
965
            raise AssertionError("_search_prefix should not be None")
966
        if not prefix.startswith(self._search_prefix):
967
            raise AssertionError("prefixes mismatch: %s must start with %s"
968
                % (prefix,self._search_prefix))
969
        if len(prefix) != len(self._search_prefix) + 1:
970
            raise AssertionError("prefix wrong length: len(%s) is not %d" %
971
                (prefix, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
972
        self._len += len(node)
973
        if not len(self._items):
974
            self._node_width = len(prefix)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
975
        if self._node_width != len(self._search_prefix) + 1:
976
            raise AssertionError("node width mismatch: %d is not %d" %
977
                (self._node_width, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
978
        self._items[prefix] = node
979
        self._key = None
980
981
    def _current_size(self):
982
        """Answer the current serialised size of this node."""
983
        return (self._raw_size + len(str(self._len)) + len(str(self._key_width)) +
984
            len(str(self._maximum_size)))
985
986
    @classmethod
987
    def deserialise(klass, bytes, key, search_key_func=None):
988
        """Deserialise bytes to an InternalNode, with key key.
989
990
        :param bytes: The bytes of the node.
991
        :param key: The key that the serialised node has.
992
        :return: An InternalNode instance.
993
        """
994
        return _deserialise_internal_node(bytes, key,
995
                                          search_key_func=search_key_func)
996
997
    def iteritems(self, store, key_filter=None):
998
        for node, node_filter in self._iter_nodes(store, key_filter=key_filter):
999
            for item in node.iteritems(store, key_filter=node_filter):
1000
                yield item
1001
1002
    def _iter_nodes(self, store, key_filter=None, batch_size=None):
1003
        """Iterate over node objects which match key_filter.
1004
1005
        :param store: A store to use for accessing content.
1006
        :param key_filter: A key filter to filter nodes. Only nodes that might
1007
            contain a key in key_filter will be returned.
1008
        :param batch_size: If not None, then we will return the nodes that had
1009
            to be read using get_record_stream in batches, rather than reading
1010
            them all at once.
1011
        :return: An iterable of nodes. This function does not have to be fully
1012
            consumed.  (There will be no pending I/O when items are being returned.)
1013
        """
1014
        # Map from chk key ('sha1:...',) to (prefix, key_filter)
1015
        # prefix is the key in self._items to use, key_filter is the key_filter
1016
        # entries that would match this node
1017
        keys = {}
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1018
        shortcut = False
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1019
        if key_filter is None:
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1020
            # yielding all nodes, yield whatever we have, and queue up a read
1021
            # for whatever we are missing
1022
            shortcut = True
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1023
            for prefix, node in self._items.iteritems():
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1024
                if node.__class__ is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1025
                    keys[node] = (prefix, None)
1026
                else:
1027
                    yield node, None
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1028
        elif len(key_filter) == 1:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1029
            # Technically, this path could also be handled by the first check
1030
            # in 'self._node_width' in length_filters. However, we can handle
1031
            # this case without spending any time building up the
1032
            # prefix_to_keys, etc state.
1033
1034
            # This is a bit ugly, but TIMEIT showed it to be by far the fastest
1035
            # 0.626us   list(key_filter)[0]
1036
            #       is a func() for list(), 2 mallocs, and a getitem
1037
            # 0.489us   [k for k in key_filter][0]
1038
            #       still has the mallocs, avoids the func() call
1039
            # 0.350us   iter(key_filter).next()
1040
            #       has a func() call, and mallocs an iterator
1041
            # 0.125us   for key in key_filter: pass
1042
            #       no func() overhead, might malloc an iterator
1043
            # 0.105us   for key in key_filter: break
1044
            #       no func() overhead, might malloc an iterator, probably
1045
            #       avoids checking an 'else' clause as part of the for
1046
            for key in key_filter:
1047
                break
1048
            search_prefix = self._search_prefix_filter(key)
1049
            if len(search_prefix) == self._node_width:
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1050
                # This item will match exactly, so just do a dict lookup, and
1051
                # see what we can return
1052
                shortcut = True
1053
                try:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1054
                    node = self._items[search_prefix]
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1055
                except KeyError:
1056
                    # A given key can only match 1 child node, if it isn't
1057
                    # there, then we can just return nothing
1058
                    return
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1059
                if node.__class__ is tuple:
1060
                    keys[node] = (search_prefix, [key])
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1061
                else:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1062
                    # This is loaded, and the only thing that can match,
1063
                    # return
1064
                    yield node, [key]
1065
                    return
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1066
        if not shortcut:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1067
            # First, convert all keys into a list of search prefixes
1068
            # Aggregate common prefixes, and track the keys they come from
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1069
            prefix_to_keys = {}
1070
            length_filters = {}
1071
            for key in key_filter:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1072
                search_prefix = self._search_prefix_filter(key)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1073
                length_filter = length_filters.setdefault(
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1074
                                    len(search_prefix), set())
1075
                length_filter.add(search_prefix)
1076
                prefix_to_keys.setdefault(search_prefix, []).append(key)
1077
1078
            if (self._node_width in length_filters
1079
                and len(length_filters) == 1):
1080
                # all of the search prefixes match exactly _node_width. This
1081
                # means that everything is an exact match, and we can do a
1082
                # lookup into self._items, rather than iterating over the items
1083
                # dict.
1084
                search_prefixes = length_filters[self._node_width]
1085
                for search_prefix in search_prefixes:
1086
                    try:
1087
                        node = self._items[search_prefix]
1088
                    except KeyError:
1089
                        # We can ignore this one
1090
                        continue
1091
                    node_key_filter = prefix_to_keys[search_prefix]
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1092
                    if node.__class__ is tuple:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1093
                        keys[node] = (search_prefix, node_key_filter)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1094
                    else:
1095
                        yield node, node_key_filter
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1096
            else:
1097
                # The slow way. We walk every item in self._items, and check to
1098
                # see if there are any matches
1099
                length_filters = length_filters.items()
1100
                for prefix, node in self._items.iteritems():
1101
                    node_key_filter = []
1102
                    for length, length_filter in length_filters:
1103
                        sub_prefix = prefix[:length]
1104
                        if sub_prefix in length_filter:
1105
                            node_key_filter.extend(prefix_to_keys[sub_prefix])
1106
                    if node_key_filter: # this key matched something, yield it
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1107
                        if node.__class__ is tuple:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1108
                            keys[node] = (prefix, node_key_filter)
1109
                        else:
1110
                            yield node, node_key_filter
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1111
        if keys:
1112
            # Look in the page cache for some more bytes
1113
            found_keys = set()
1114
            for key in keys:
1115
                try:
1116
                    bytes = _page_cache[key]
1117
                except KeyError:
1118
                    continue
1119
                else:
1120
                    node = _deserialise(bytes, key,
1121
                        search_key_func=self._search_key_func)
1122
                    prefix, node_key_filter = keys[key]
1123
                    self._items[prefix] = node
1124
                    found_keys.add(key)
1125
                    yield node, node_key_filter
1126
            for key in found_keys:
1127
                del keys[key]
1128
        if keys:
1129
            # demand load some pages.
1130
            if batch_size is None:
1131
                # Read all the keys in
1132
                batch_size = len(keys)
1133
            key_order = list(keys)
1134
            for batch_start in range(0, len(key_order), batch_size):
1135
                batch = key_order[batch_start:batch_start + batch_size]
1136
                # We have to fully consume the stream so there is no pending
1137
                # I/O, so we buffer the nodes for now.
1138
                stream = store.get_record_stream(batch, 'unordered', True)
1139
                node_and_filters = []
1140
                for record in stream:
1141
                    bytes = record.get_bytes_as('fulltext')
1142
                    node = _deserialise(bytes, record.key,
1143
                        search_key_func=self._search_key_func)
1144
                    prefix, node_key_filter = keys[record.key]
1145
                    node_and_filters.append((node, node_key_filter))
1146
                    self._items[prefix] = node
1147
                    _page_cache.add(record.key, bytes)
1148
                for info in node_and_filters:
1149
                    yield info
1150
1151
    def map(self, store, key, value):
1152
        """Map key to value."""
1153
        if not len(self._items):
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1154
            raise AssertionError("can't map in an empty InternalNode.")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1155
        search_key = self._search_key(key)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1156
        if self._node_width != len(self._search_prefix) + 1:
1157
            raise AssertionError("node width mismatch: %d is not %d" %
1158
                (self._node_width, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1159
        if not search_key.startswith(self._search_prefix):
1160
            # This key doesn't fit in this index, so we need to split at the
1161
            # point where it would fit, insert self into that internal node,
1162
            # and then map this key into that node.
1163
            new_prefix = self.common_prefix(self._search_prefix,
1164
                                            search_key)
1165
            new_parent = InternalNode(new_prefix,
1166
                search_key_func=self._search_key_func)
1167
            new_parent.set_maximum_size(self._maximum_size)
1168
            new_parent._key_width = self._key_width
1169
            new_parent.add_node(self._search_prefix[:len(new_prefix)+1],
1170
                                self)
1171
            return new_parent.map(store, key, value)
1172
        children = [node for node, _
1173
                          in self._iter_nodes(store, key_filter=[key])]
1174
        if children:
1175
            child = children[0]
1176
        else:
1177
            # new child needed:
1178
            child = self._new_child(search_key, LeafNode)
1179
        old_len = len(child)
1180
        if type(child) is LeafNode:
1181
            old_size = child._current_size()
1182
        else:
1183
            old_size = None
1184
        prefix, node_details = child.map(store, key, value)
1185
        if len(node_details) == 1:
1186
            # child may have shrunk, or might be a new node
1187
            child = node_details[0][1]
1188
            self._len = self._len - old_len + len(child)
1189
            self._items[search_key] = child
1190
            self._key = None
1191
            new_node = self
1192
            if type(child) is LeafNode:
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
1193
                if old_size is None:
1194
                    # The old node was an InternalNode which means it has now
1195
                    # collapsed, so we need to check if it will chain to a
1196
                    # collapse at this level.
1197
                    trace.mutter("checking remap as InternalNode -> LeafNode")
1198
                    new_node = self._check_remap(store)
1199
                else:
1200
                    # If the LeafNode has shrunk in size, we may want to run
1201
                    # a remap check. Checking for a remap is expensive though
1202
                    # and the frequency of a successful remap is very low.
1203
                    # Shrinkage by small amounts is common, so we only do the
1204
                    # remap check if the new_size is low or the shrinkage
1205
                    # amount is over a configurable limit.
1206
                    new_size = child._current_size()
1207
                    shrinkage = old_size - new_size
1208
                    if (shrinkage > 0 and new_size < _INTERESTING_NEW_SIZE
1209
                        or shrinkage > _INTERESTING_SHRINKAGE_LIMIT):
1210
                        trace.mutter(
1211
                            "checking remap as size shrunk by %d to be %d",
1212
                            shrinkage, new_size)
1213
                        new_node = self._check_remap(store)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1214
            if new_node._search_prefix is None:
1215
                raise AssertionError("_search_prefix should not be None")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1216
            return new_node._search_prefix, [('', new_node)]
1217
        # child has overflown - create a new intermediate node.
1218
        # XXX: This is where we might want to try and expand our depth
1219
        # to refer to more bytes of every child (which would give us
1220
        # multiple pointers to child nodes, but less intermediate nodes)
1221
        child = self._new_child(search_key, InternalNode)
1222
        child._search_prefix = prefix
1223
        for split, node in node_details:
1224
            child.add_node(split, node)
1225
        self._len = self._len - old_len + len(child)
1226
        self._key = None
1227
        return self._search_prefix, [("", self)]
1228
1229
    def _new_child(self, search_key, klass):
1230
        """Create a new child node of type klass."""
1231
        child = klass()
1232
        child.set_maximum_size(self._maximum_size)
1233
        child._key_width = self._key_width
1234
        child._search_key_func = self._search_key_func
1235
        self._items[search_key] = child
1236
        return child
1237
1238
    def serialise(self, store):
1239
        """Serialise the node to store.
1240
1241
        :param store: A VersionedFiles honouring the CHK extensions.
1242
        :return: An iterable of the keys inserted by this operation.
1243
        """
1244
        for node in self._items.itervalues():
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1245
            if type(node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1246
                # Never deserialised.
1247
                continue
1248
            if node._key is not None:
1249
                # Never altered
1250
                continue
1251
            for key in node.serialise(store):
1252
                yield key
1253
        lines = ["chknode:\n"]
1254
        lines.append("%d\n" % self._maximum_size)
1255
        lines.append("%d\n" % self._key_width)
1256
        lines.append("%d\n" % self._len)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1257
        if self._search_prefix is None:
1258
            raise AssertionError("_search_prefix should not be None")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1259
        lines.append('%s\n' % (self._search_prefix,))
1260
        prefix_len = len(self._search_prefix)
1261
        for prefix, node in sorted(self._items.items()):
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1262
            if type(node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1263
                key = node[0]
1264
            else:
1265
                key = node._key[0]
1266
            serialised = "%s\x00%s\n" % (prefix, key)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1267
            if not serialised.startswith(self._search_prefix):
1268
                raise AssertionError("prefixes mismatch: %s must start with %s"
1269
                    % (serialised, self._search_prefix))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1270
            lines.append(serialised[prefix_len:])
1271
        sha1, _, _ = store.add_lines((None,), (), lines)
1272
        self._key = ("sha1:" + sha1,)
1273
        _page_cache.add(self._key, ''.join(lines))
1274
        yield self._key
1275
1276
    def _search_key(self, key):
1277
        """Return the serialised key for key in this node."""
1278
        # search keys are fixed width. All will be self._node_width wide, so we
1279
        # pad as necessary.
1280
        return (self._search_key_func(key) + '\x00'*self._node_width)[:self._node_width]
1281
1282
    def _search_prefix_filter(self, key):
1283
        """Serialise key for use as a prefix filter in iteritems."""
1284
        return self._search_key_func(key)[:self._node_width]
1285
1286
    def _split(self, offset):
1287
        """Split this node into smaller nodes starting at offset.
1288
1289
        :param offset: The offset to start the new child nodes at.
1290
        :return: An iterable of (prefix, node) tuples. prefix is a byte
1291
            prefix for reaching node.
1292
        """
1293
        if offset >= self._node_width:
1294
            for node in self._items.values():
1295
                for result in node._split(offset):
1296
                    yield result
1297
            return
1298
        for key, node in self._items.items():
1299
            pass
1300
1301
    def refs(self):
1302
        """Return the references to other CHK's held by this node."""
1303
        if self._key is None:
1304
            raise AssertionError("unserialised nodes have no refs.")
1305
        refs = []
1306
        for value in self._items.itervalues():
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1307
            if type(value) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1308
                refs.append(value)
1309
            else:
1310
                refs.append(value.key())
1311
        return refs
1312
1313
    def _compute_search_prefix(self, extra_key=None):
1314
        """Return the unique key prefix for this node.
1315
1316
        :return: A bytestring of the longest search key prefix that is
1317
            unique within this node.
1318
        """
1319
        self._search_prefix = self.common_prefix_for_keys(self._items)
1320
        return self._search_prefix
1321
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1322
    def unmap(self, store, key, check_remap=True):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1323
        """Remove key from this node and it's children."""
1324
        if not len(self._items):
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1325
            raise AssertionError("can't unmap in an empty InternalNode.")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1326
        children = [node for node, _
1327
                          in self._iter_nodes(store, key_filter=[key])]
1328
        if children:
1329
            child = children[0]
1330
        else:
1331
            raise KeyError(key)
1332
        self._len -= 1
1333
        unmapped = child.unmap(store, key)
1334
        self._key = None
1335
        search_key = self._search_key(key)
1336
        if len(unmapped) == 0:
1337
            # All child nodes are gone, remove the child:
1338
            del self._items[search_key]
1339
            unmapped = None
1340
        else:
1341
            # Stash the returned node
1342
            self._items[search_key] = unmapped
1343
        if len(self._items) == 1:
1344
            # this node is no longer needed:
1345
            return self._items.values()[0]
1346
        if type(unmapped) is InternalNode:
1347
            return self
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1348
        if check_remap:
1349
            return self._check_remap(store)
1350
        else:
1351
            return self
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1352
1353
    def _check_remap(self, store):
1354
        """Check if all keys contained by children fit in a single LeafNode.
1355
1356
        :param store: A store to use for reading more nodes
1357
        :return: Either self, or a new LeafNode which should replace self.
1358
        """
1359
        # Logic for how we determine when we need to rebuild
1360
        # 1) Implicitly unmap() is removing a key which means that the child
1361
        #    nodes are going to be shrinking by some extent.
1362
        # 2) If all children are LeafNodes, it is possible that they could be
1363
        #    combined into a single LeafNode, which can then completely replace
1364
        #    this internal node with a single LeafNode
1365
        # 3) If *one* child is an InternalNode, we assume it has already done
1366
        #    all the work to determine that its children cannot collapse, and
1367
        #    we can then assume that those nodes *plus* the current nodes don't
1368
        #    have a chance of collapsing either.
1369
        #    So a very cheap check is to just say if 'unmapped' is an
1370
        #    InternalNode, we don't have to check further.
1371
1372
        # TODO: Another alternative is to check the total size of all known
1373
        #       LeafNodes. If there is some formula we can use to determine the
1374
        #       final size without actually having to read in any more
1375
        #       children, it would be nice to have. However, we have to be
1376
        #       careful with stuff like nodes that pull out the common prefix
1377
        #       of each key, as adding a new key can change the common prefix
1378
        #       and cause size changes greater than the length of one key.
1379
        #       So for now, we just add everything to a new Leaf until it
1380
        #       splits, as we know that will give the right answer
1381
        new_leaf = LeafNode(search_key_func=self._search_key_func)
1382
        new_leaf.set_maximum_size(self._maximum_size)
1383
        new_leaf._key_width = self._key_width
1384
        # A batch_size of 16 was chosen because:
1385
        #   a) In testing, a 4k page held 14 times. So if we have more than 16
1386
        #      leaf nodes we are unlikely to hold them in a single new leaf
1387
        #      node. This still allows for 1 round trip
1388
        #   b) With 16-way fan out, we can still do a single round trip
1389
        #   c) With 255-way fan out, we don't want to read all 255 and destroy
1390
        #      the page cache, just to determine that we really don't need it.
1391
        for node, _ in self._iter_nodes(store, batch_size=16):
1392
            if type(node) is InternalNode:
1393
                # Without looking at any leaf nodes, we are sure
1394
                return self
1395
            for key, value in node._items.iteritems():
1396
                if new_leaf._map_no_split(key, value):
1397
                    return self
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
1398
        trace.mutter("remap generated a new LeafNode")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1399
        return new_leaf
1400
1401
1402
def _deserialise(bytes, key, search_key_func):
1403
    """Helper for repositorydetails - convert bytes to a node."""
1404
    if bytes.startswith("chkleaf:\n"):
1405
        node = LeafNode.deserialise(bytes, key, search_key_func=search_key_func)
1406
    elif bytes.startswith("chknode:\n"):
1407
        node = InternalNode.deserialise(bytes, key,
1408
            search_key_func=search_key_func)
1409
    else:
1410
        raise AssertionError("Unknown node type.")
1411
    return node
1412
1413
4476.1.38 by John Arbash Meinel
Rename InterestingNodeIterator => CHKMapDifference, update tests.
1414
class CHKMapDifference(object):
1415
    """Iterate the stored pages and key,value pairs for (new - old).
1416
1417
    This class provides a generator over the stored CHK pages and the
1418
    (key, value) pairs that are in any of the new maps and not in any of the
1419
    old maps.
1420
1421
    Note that it may yield chk pages that are common (especially root nodes),
1422
    but it won't yield (key,value) pairs that are common.
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1423
    """
1424
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1425
    def __init__(self, store, new_root_keys, old_root_keys,
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1426
                 search_key_func, pb=None):
1427
        self._store = store
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1428
        self._new_root_keys = new_root_keys
1429
        self._old_root_keys = old_root_keys
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1430
        self._pb = pb
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1431
        # All uninteresting chks that we have seen. By the time they are added
1432
        # here, they should be either fully ignored, or queued up for
1433
        # processing
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1434
        self._all_old_chks = set(self._old_root_keys)
1435
        # All items that we have seen from the old_root_keys
1436
        self._all_old_items = set()
4476.1.32 by John Arbash Meinel
A few more updates.
1437
        # These are interesting items which were either read, or already in the
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1438
        # interesting queue (so we don't need to walk them again)
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1439
        self._processed_new_refs = set()
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1440
        self._search_key_func = search_key_func
1441
4476.1.33 by John Arbash Meinel
Simpify the code a lot by ignoring the heapq stuff.
1442
        # The uninteresting and interesting nodes to be searched
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1443
        self._old_queue = []
1444
        self._new_queue = []
4476.1.34 by John Arbash Meinel
Major rework, simplify what is put into the queues.
1445
        # Holds the (key, value) items found when processing the root nodes,
1446
        # waiting for the uninteresting nodes to be walked
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1447
        self._new_item_queue = []
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1448
        self._state = None
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1449
1450
    def _read_nodes_from_store(self, keys):
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1451
        # We chose not to use _page_cache, because we think in terms of records
1452
        # to be yielded. Also, we expect to touch each page only 1 time during
1453
        # this code. (We may want to evaluate saving the raw bytes into the
1454
        # page cache, which would allow a working tree update after the fetch
1455
        # to not have to read the bytes again.)
4476.1.12 by John Arbash Meinel
Start testing the new class.
1456
        stream = self._store.get_record_stream(keys, 'unordered', True)
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1457
        for record in stream:
1458
            if self._pb is not None:
1459
                self._pb.tick()
1460
            if record.storage_kind == 'absent':
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1461
                raise errors.NoSuchRevision(self._store, record.key)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1462
            bytes = record.get_bytes_as('fulltext')
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1463
            node = _deserialise(bytes, record.key,
1464
                                search_key_func=self._search_key_func)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1465
            if type(node) is InternalNode:
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1466
                # Note we don't have to do node.refs() because we know that
1467
                # there are no children that have been pushed into this node
1468
                prefix_refs = node._items.items()
1469
                items = []
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1470
            else:
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1471
                prefix_refs = []
1472
                items = node._items.items()
1473
            yield record, node, prefix_refs, items
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1474
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1475
    def _read_old_roots(self):
1476
        old_chks_to_enqueue = []
1477
        all_old_chks = self._all_old_chks
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1478
        for record, node, prefix_refs, items in \
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1479
                self._read_nodes_from_store(self._old_root_keys):
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1480
            # Uninteresting node
4476.1.34 by John Arbash Meinel
Major rework, simplify what is put into the queues.
1481
            prefix_refs = [p_r for p_r in prefix_refs
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1482
                                if p_r[1] not in all_old_chks]
4476.1.34 by John Arbash Meinel
Major rework, simplify what is put into the queues.
1483
            new_refs = [p_r[1] for p_r in prefix_refs]
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1484
            all_old_chks.update(new_refs)
1485
            self._all_old_items.update(items)
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1486
            # Queue up the uninteresting references
1487
            # Don't actually put them in the 'to-read' queue until we have
1488
            # finished checking the interesting references
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1489
            old_chks_to_enqueue.extend(prefix_refs)
1490
        return old_chks_to_enqueue
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1491
4476.1.40 by John Arbash Meinel
cleanup indentation.
1492
    def _enqueue_old(self, new_prefixes, old_chks_to_enqueue):
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1493
        # At this point, we have read all the uninteresting and interesting
1494
        # items, so we can queue up the uninteresting stuff, knowing that we've
1495
        # handled the interesting ones
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1496
        for prefix, ref in old_chks_to_enqueue:
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1497
            not_interesting = True
1498
            for i in xrange(len(prefix), 0, -1):
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1499
                if prefix[:i] in new_prefixes:
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1500
                    not_interesting = False
1501
                    break
1502
            if not_interesting:
1503
                # This prefix is not part of the remaining 'interesting set'
1504
                continue
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1505
            self._old_queue.append(ref)
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1506
1507
    def _read_all_roots(self):
1508
        """Read the root pages.
1509
1510
        This is structured as a generator, so that the root records can be
1511
        yielded up to whoever needs them without any buffering.
1512
        """
1513
        # This is the bootstrap phase
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1514
        if not self._old_root_keys:
1515
            # With no old_root_keys we can just shortcut and be ready
1516
            # for _flush_new_queue
1517
            self._new_queue = list(self._new_root_keys)
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1518
            return
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1519
        old_chks_to_enqueue = self._read_old_roots()
4476.1.12 by John Arbash Meinel
Start testing the new class.
1520
        # filter out any root keys that are already known to be uninteresting
4476.1.40 by John Arbash Meinel
cleanup indentation.
1521
        new_keys = set(self._new_root_keys).difference(self._all_old_chks)
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1522
        # These are prefixes that are present in new_keys that we are
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1523
        # thinking to yield
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1524
        new_prefixes = set()
4476.1.18 by John Arbash Meinel
Tracked it down.
1525
        # We are about to yield all of these, so we don't want them getting
1526
        # added a second time
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1527
        processed_new_refs = self._processed_new_refs
1528
        processed_new_refs.update(new_keys)
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1529
        for record, node, prefix_refs, items in \
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1530
                self._read_nodes_from_store(new_keys):
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1531
            # At this level, we now know all the uninteresting references
4476.1.35 by John Arbash Meinel
Change some of the inner loop workings into list comprehensions.
1532
            # So we filter and queue up whatever is remaining
1533
            prefix_refs = [p_r for p_r in prefix_refs
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1534
                           if p_r[1] not in self._all_old_chks
1535
                              and p_r[1] not in processed_new_refs]
4476.1.35 by John Arbash Meinel
Change some of the inner loop workings into list comprehensions.
1536
            refs = [p_r[1] for p_r in prefix_refs]
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1537
            new_prefixes.update([p_r[0] for p_r in prefix_refs])
1538
            self._new_queue.extend(refs)
4476.1.34 by John Arbash Meinel
Major rework, simplify what is put into the queues.
1539
            # TODO: We can potentially get multiple items here, however the
1540
            #       current design allows for this, as callers will do the work
1541
            #       to make the results unique. We might profile whether we
1542
            #       gain anything by ensuring unique return values for items
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1543
            new_items = [item for item in items
4476.1.40 by John Arbash Meinel
cleanup indentation.
1544
                               if item not in self._all_old_items]
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1545
            self._new_item_queue.extend(new_items)
1546
            new_prefixes.update([self._search_key_func(item[0])
4476.1.40 by John Arbash Meinel
cleanup indentation.
1547
                                 for item in new_items])
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1548
            processed_new_refs.update(refs)
4476.1.13 by John Arbash Meinel
Test that _read_all_roots does what is expected
1549
            yield record
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1550
        # For new_prefixes we have the full length prefixes queued up.
4476.1.35 by John Arbash Meinel
Change some of the inner loop workings into list comprehensions.
1551
        # However, we also need possible prefixes. (If we have a known ref to
1552
        # 'ab', then we also need to include 'a'.) So expand the
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1553
        # new_prefixes to include all shorter prefixes
1554
        for prefix in list(new_prefixes):
4476.1.40 by John Arbash Meinel
cleanup indentation.
1555
            new_prefixes.update([prefix[:i] for i in xrange(1, len(prefix))])
1556
        self._enqueue_old(new_prefixes, old_chks_to_enqueue)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1557
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1558
    def _flush_new_queue(self):
4476.1.27 by John Arbash Meinel
Rewrite of _flush_interesting_queue
1559
        # No need to maintain the heap invariant anymore, just pull things out
1560
        # and process them
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1561
        refs = set(self._new_queue)
1562
        self._new_queue = []
4476.1.31 by John Arbash Meinel
streamline the _flush_interesting_queue a bit.
1563
        # First pass, flush all interesting items and convert to using direct refs
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1564
        all_old_chks = self._all_old_chks
1565
        processed_new_refs = self._processed_new_refs
1566
        all_old_items = self._all_old_items
1567
        new_items = [item for item in self._new_item_queue
4476.1.40 by John Arbash Meinel
cleanup indentation.
1568
                           if item not in all_old_items]
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1569
        self._new_item_queue = []
1570
        if new_items:
1571
            yield None, new_items
1572
        refs = refs.difference(all_old_chks)
4476.1.31 by John Arbash Meinel
streamline the _flush_interesting_queue a bit.
1573
        while refs:
1574
            next_refs = set()
1575
            next_refs_update = next_refs.update
1576
            # Inlining _read_nodes_from_store improves 'bzr branch bzr.dev'
1577
            # from 1m54s to 1m51s. Consider it.
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1578
            for record, _, p_refs, items in self._read_nodes_from_store(refs):
4476.1.27 by John Arbash Meinel
Rewrite of _flush_interesting_queue
1579
                items = [item for item in items
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1580
                         if item not in all_old_items]
4476.1.27 by John Arbash Meinel
Rewrite of _flush_interesting_queue
1581
                yield record, items
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1582
                next_refs_update([p_r[1] for p_r in p_refs])
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1583
            next_refs = next_refs.difference(all_old_chks)
1584
            next_refs = next_refs.difference(processed_new_refs)
1585
            processed_new_refs.update(next_refs)
4476.1.31 by John Arbash Meinel
streamline the _flush_interesting_queue a bit.
1586
            refs = next_refs
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1587
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1588
    def _process_next_old(self):
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1589
        # Since we don't filter uninteresting any further than during
1590
        # _read_all_roots, process the whole queue in a single pass.
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1591
        refs = self._old_queue
1592
        self._old_queue = []
1593
        all_old_chks = self._all_old_chks
4476.1.32 by John Arbash Meinel
A few more updates.
1594
        for record, _, prefix_refs, items in self._read_nodes_from_store(refs):
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1595
            self._all_old_items.update(items)
1596
            refs = [r for _,r in prefix_refs if r not in all_old_chks]
1597
            self._old_queue.extend(refs)
1598
            all_old_chks.update(refs)
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1599
1600
    def _process_queues(self):
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1601
        while self._old_queue:
1602
            self._process_next_old()
1603
        return self._flush_new_queue()
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1604
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1605
    def process(self):
1606
        for record in self._read_all_roots():
1607
            yield record, []
1608
        for record, items in self._process_queues():
1609
            yield record, items
1610
4476.1.25 by John Arbash Meinel
A bit more testing.
1611
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1612
def iter_interesting_nodes(store, interesting_root_keys,
1613
                           uninteresting_root_keys, pb=None):
1614
    """Given root keys, find interesting nodes.
1615
1616
    Evaluate nodes referenced by interesting_root_keys. Ones that are also
1617
    referenced from uninteresting_root_keys are not considered interesting.
1618
1619
    :param interesting_root_keys: keys which should be part of the
1620
        "interesting" nodes (which will be yielded)
1621
    :param uninteresting_root_keys: keys which should be filtered out of the
1622
        result set.
1623
    :return: Yield
1624
        (interesting record, {interesting key:values})
1625
    """
4476.1.38 by John Arbash Meinel
Rename InterestingNodeIterator => CHKMapDifference, update tests.
1626
    iterator = CHKMapDifference(store, interesting_root_keys,
1627
                                uninteresting_root_keys,
1628
                                search_key_func=store._search_key_func,
1629
                                pb=pb)
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1630
    return iterator.process()
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1631
1632
1633
try:
1634
    from bzrlib._chk_map_pyx import (
1635
        _search_key_16,
1636
        _search_key_255,
1637
        _deserialise_leaf_node,
1638
        _deserialise_internal_node,
1639
        )
1640
except ImportError:
1641
    from bzrlib._chk_map_py import (
1642
        _search_key_16,
1643
        _search_key_255,
1644
        _deserialise_leaf_node,
1645
        _deserialise_internal_node,
1646
        )
1647
search_key_registry.register('hash-16-way', _search_key_16)
1648
search_key_registry.register('hash-255-way', _search_key_255)