/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
4431.3.8 by Jonathan Lange
Cherrypick bzr.dev r4477.
1
# Copyright (C) 2008, 2009 Canonical Ltd
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
17
"""Persistent maps from tuple_of_strings->string using CHK stores.
18
19
Overview and current status:
20
21
The CHKMap class implements a dict from tuple_of_strings->string by using a trie
22
with internal nodes of 8-bit fan out; The key tuples are mapped to strings by
23
joining them by \x00, and \x00 padding shorter keys out to the length of the
24
longest key. Leaf nodes are packed as densely as possible, and internal nodes
3735.19.1 by Ian Clatworthy
CHKMap cleanups
25
are all an additional 8-bits wide leading to a sparse upper tree.
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
26
27
Updates to a CHKMap are done preferentially via the apply_delta method, to
28
allow optimisation of the update operation; but individual map/unmap calls are
29
possible and supported. All changes via map/unmap are buffered in memory until
30
the _save method is called to force serialisation of the tree. apply_delta
31
performs a _save implicitly.
32
33
TODO:
34
-----
35
36
Densely packed upper nodes.
37
38
"""
39
40
import heapq
41
42
from bzrlib import lazy_import
43
lazy_import.lazy_import(globals(), """
44
from bzrlib import versionedfile
45
""")
46
from bzrlib import (
47
    lru_cache,
48
    osutils,
49
    registry,
50
    trace,
51
    )
52
3735.19.1 by Ian Clatworthy
CHKMap cleanups
53
# approx 4MB
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
54
# If each line is 50 bytes, and you have 255 internal pages, with 255-way fan
55
# out, it takes 3.1MB to cache the layer.
56
_PAGE_CACHE_SIZE = 4*1024*1024
57
# We are caching bytes so len(value) is perfectly accurate
58
_page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
59
4543.2.2 by John Arbash Meinel
work out some tests that expose that bundles don't work w/ 2a formats.
60
def clear_cache():
61
    _page_cache.clear()
62
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
63
# If a ChildNode falls below this many bytes, we check for a remap
64
_INTERESTING_NEW_SIZE = 50
65
# If a ChildNode shrinks by more than this amount, we check for a remap
66
_INTERESTING_SHRINKAGE_LIMIT = 20
67
# If we delete more than this many nodes applying a delta, we check for a remap
68
_INTERESTING_DELETES_LIMIT = 5
69
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
70
71
def _search_key_plain(key):
72
    """Map the key tuple into a search string that just uses the key bytes."""
73
    return '\x00'.join(key)
74
75
76
search_key_registry = registry.Registry()
77
search_key_registry.register('plain', _search_key_plain)
78
79
80
class CHKMap(object):
81
    """A persistent map from string to string backed by a CHK store."""
82
83
    def __init__(self, store, root_key, search_key_func=None):
84
        """Create a CHKMap object.
85
86
        :param store: The store the CHKMap is stored in.
87
        :param root_key: The root key of the map. None to create an empty
88
            CHKMap.
89
        :param search_key_func: A function mapping a key => bytes. These bytes
90
            are then used by the internal nodes to split up leaf nodes into
91
            multiple pages.
92
        """
93
        self._store = store
94
        if search_key_func is None:
95
            search_key_func = _search_key_plain
96
        self._search_key_func = search_key_func
97
        if root_key is None:
98
            self._root_node = LeafNode(search_key_func=search_key_func)
99
        else:
100
            self._root_node = self._node_key(root_key)
101
102
    def apply_delta(self, delta):
103
        """Apply a delta to the map.
104
105
        :param delta: An iterable of old_key, new_key, new_value tuples.
106
            If new_key is not None, then new_key->new_value is inserted
107
            into the map; if old_key is not None, then the old mapping
108
            of old_key is removed.
109
        """
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
110
        delete_count = 0
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
111
        for old, new, value in delta:
112
            if old is not None and old != new:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
113
                self.unmap(old, check_remap=False)
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
114
                delete_count += 1
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
115
        for old, new, value in delta:
116
            if new is not None:
117
                self.map(new, value)
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
118
        if delete_count > _INTERESTING_DELETES_LIMIT:
119
            trace.mutter("checking remap as %d deletions", delete_count)
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
120
            self._check_remap()
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
121
        return self._save()
122
123
    def _ensure_root(self):
124
        """Ensure that the root node is an object not a key."""
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
125
        if type(self._root_node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
126
            # Demand-load the root
127
            self._root_node = self._get_node(self._root_node)
128
129
    def _get_node(self, node):
130
        """Get a node.
131
3735.19.1 by Ian Clatworthy
CHKMap cleanups
132
        Note that this does not update the _items dict in objects containing a
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
133
        reference to this node. As such it does not prevent subsequent IO being
134
        performed.
135
136
        :param node: A tuple key or node object.
137
        :return: A node object.
138
        """
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
139
        if type(node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
140
            bytes = self._read_bytes(node)
141
            return _deserialise(bytes, node,
142
                search_key_func=self._search_key_func)
143
        else:
144
            return node
145
146
    def _read_bytes(self, key):
3735.2.124 by Ian Clatworthy
use the page cache in CHKMap._read_bytes()
147
        try:
148
            return _page_cache[key]
149
        except KeyError:
150
            stream = self._store.get_record_stream([key], 'unordered', True)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
151
            bytes = stream.next().get_bytes_as('fulltext')
152
            _page_cache[key] = bytes
153
            return bytes
154
155
    def _dump_tree(self, include_keys=False):
156
        """Return the tree in a string representation."""
157
        self._ensure_root()
158
        res = self._dump_tree_node(self._root_node, prefix='', indent='',
159
                                   include_keys=include_keys)
160
        res.append('') # Give a trailing '\n'
161
        return '\n'.join(res)
162
163
    def _dump_tree_node(self, node, prefix, indent, include_keys=True):
164
        """For this node and all children, generate a string representation."""
165
        result = []
166
        if not include_keys:
167
            key_str = ''
168
        else:
169
            node_key = node.key()
170
            if node_key is not None:
171
                key_str = ' %s' % (node_key[0],)
172
            else:
173
                key_str = ' None'
174
        result.append('%s%r %s%s' % (indent, prefix, node.__class__.__name__,
175
                                     key_str))
176
        if type(node) is InternalNode:
177
            # Trigger all child nodes to get loaded
178
            list(node._iter_nodes(self._store))
179
            for prefix, sub in sorted(node._items.iteritems()):
180
                result.extend(self._dump_tree_node(sub, prefix, indent + '  ',
181
                                                   include_keys=include_keys))
182
        else:
183
            for key, value in sorted(node._items.iteritems()):
184
                # Don't use prefix nor indent here to line up when used in
185
                # tests in conjunction with assertEqualDiff
186
                result.append('      %r %r' % (key, value))
187
        return result
188
189
    @classmethod
3735.19.1 by Ian Clatworthy
CHKMap cleanups
190
    def from_dict(klass, store, initial_value, maximum_size=0, key_width=1,
191
        search_key_func=None):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
192
        """Create a CHKMap in store with initial_value as the content.
193
194
        :param store: The store to record initial_value in, a VersionedFiles
195
            object with 1-tuple keys supporting CHK key generation.
196
        :param initial_value: A dict to store in store. Its keys and values
197
            must be bytestrings.
198
        :param maximum_size: The maximum_size rule to apply to nodes. This
199
            determines the size at which no new data is added to a single node.
200
        :param key_width: The number of elements in each key_tuple being stored
201
            in this map.
3735.19.1 by Ian Clatworthy
CHKMap cleanups
202
        :param search_key_func: A function mapping a key => bytes. These bytes
203
            are then used by the internal nodes to split up leaf nodes into
204
            multiple pages.
205
        :return: The root chk of the resulting CHKMap.
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
206
        """
4413.5.7 by John Arbash Meinel
Switch to using a single code path for from_dict().
207
        root_key = klass._create_directly(store, initial_value,
208
            maximum_size=maximum_size, key_width=key_width,
209
            search_key_func=search_key_func)
4413.5.5 by John Arbash Meinel
Make it more obvious how the two creation methods are defined.
210
        return root_key
211
212
    @classmethod
213
    def _create_via_map(klass, store, initial_value, maximum_size=0,
214
                        key_width=1, search_key_func=None):
215
        result = klass(store, None, search_key_func=search_key_func)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
216
        result._root_node.set_maximum_size(maximum_size)
217
        result._root_node._key_width = key_width
218
        delta = []
219
        for key, value in initial_value.items():
220
            delta.append((None, key, value))
4413.5.4 by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it.
221
        root_key = result.apply_delta(delta)
4413.5.5 by John Arbash Meinel
Make it more obvious how the two creation methods are defined.
222
        return root_key
223
224
    @classmethod
225
    def _create_directly(klass, store, initial_value, maximum_size=0,
226
                         key_width=1, search_key_func=None):
4413.5.4 by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it.
227
        node = LeafNode(search_key_func=search_key_func)
228
        node.set_maximum_size(maximum_size)
229
        node._key_width = key_width
230
        node._items = dict(initial_value)
231
        node._raw_size = sum([node._key_value_len(key, value)
232
                              for key,value in initial_value.iteritems()])
233
        node._len = len(node._items)
234
        node._compute_search_prefix()
235
        node._compute_serialised_prefix()
236
        if (node._len > 1
237
            and maximum_size
238
            and node._current_size() > maximum_size):
239
            prefix, node_details = node._split(store)
4413.5.8 by John Arbash Meinel
Change some asserts into raise: calls.
240
            if len(node_details) == 1:
241
                raise AssertionError('Failed to split using node._split')
4413.5.4 by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it.
242
            node = InternalNode(prefix, search_key_func=search_key_func)
243
            node.set_maximum_size(maximum_size)
244
            node._key_width = key_width
245
            for split, subnode in node_details:
246
                node.add_node(split, subnode)
247
        keys = list(node.serialise(store))
4413.5.5 by John Arbash Meinel
Make it more obvious how the two creation methods are defined.
248
        return keys[-1]
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
249
250
    def iter_changes(self, basis):
251
        """Iterate over the changes between basis and self.
252
253
        :return: An iterator of tuples: (key, old_value, new_value). Old_value
254
            is None for keys only in self; new_value is None for keys only in
255
            basis.
256
        """
257
        # Overview:
258
        # Read both trees in lexographic, highest-first order.
259
        # Any identical nodes we skip
260
        # Any unique prefixes we output immediately.
261
        # values in a leaf node are treated as single-value nodes in the tree
262
        # which allows them to be not-special-cased. We know to output them
263
        # because their value is a string, not a key(tuple) or node.
264
        #
265
        # corner cases to beware of when considering this function:
266
        # *) common references are at different heights.
267
        #    consider two trees:
268
        #    {'a': LeafNode={'aaa':'foo', 'aab':'bar'}, 'b': LeafNode={'b'}}
269
        #    {'a': InternalNode={'aa':LeafNode={'aaa':'foo', 'aab':'bar'},
270
        #                        'ab':LeafNode={'ab':'bar'}}
271
        #     'b': LeafNode={'b'}}
272
        #    the node with aaa/aab will only be encountered in the second tree
273
        #    after reading the 'a' subtree, but it is encountered in the first
274
        #    tree immediately. Variations on this may have read internal nodes
275
        #    like this.  we want to cut the entire pending subtree when we
276
        #    realise we have a common node.  For this we use a list of keys -
277
        #    the path to a node - and check the entire path is clean as we
278
        #    process each item.
279
        if self._node_key(self._root_node) == self._node_key(basis._root_node):
280
            return
281
        self._ensure_root()
282
        basis._ensure_root()
283
        excluded_keys = set()
284
        self_node = self._root_node
285
        basis_node = basis._root_node
286
        # A heap, each element is prefix, node(tuple/NodeObject/string),
287
        # key_path (a list of tuples, tail-sharing down the tree.)
288
        self_pending = []
289
        basis_pending = []
290
        def process_node(node, path, a_map, pending):
291
            # take a node and expand it
292
            node = a_map._get_node(node)
293
            if type(node) == LeafNode:
294
                path = (node._key, path)
295
                for key, value in node._items.items():
296
                    # For a LeafNode, the key is a serialized_key, rather than
297
                    # a search_key, but the heap is using search_keys
298
                    search_key = node._search_key_func(key)
299
                    heapq.heappush(pending, (search_key, key, value, path))
300
            else:
301
                # type(node) == InternalNode
302
                path = (node._key, path)
303
                for prefix, child in node._items.items():
304
                    heapq.heappush(pending, (prefix, None, child, path))
305
        def process_common_internal_nodes(self_node, basis_node):
306
            self_items = set(self_node._items.items())
307
            basis_items = set(basis_node._items.items())
308
            path = (self_node._key, None)
309
            for prefix, child in self_items - basis_items:
310
                heapq.heappush(self_pending, (prefix, None, child, path))
311
            path = (basis_node._key, None)
312
            for prefix, child in basis_items - self_items:
313
                heapq.heappush(basis_pending, (prefix, None, child, path))
314
        def process_common_leaf_nodes(self_node, basis_node):
315
            self_items = set(self_node._items.items())
316
            basis_items = set(basis_node._items.items())
317
            path = (self_node._key, None)
318
            for key, value in self_items - basis_items:
319
                prefix = self._search_key_func(key)
320
                heapq.heappush(self_pending, (prefix, key, value, path))
321
            path = (basis_node._key, None)
322
            for key, value in basis_items - self_items:
323
                prefix = basis._search_key_func(key)
324
                heapq.heappush(basis_pending, (prefix, key, value, path))
325
        def process_common_prefix_nodes(self_node, self_path,
326
                                        basis_node, basis_path):
327
            # Would it be more efficient if we could request both at the same
328
            # time?
329
            self_node = self._get_node(self_node)
330
            basis_node = basis._get_node(basis_node)
331
            if (type(self_node) == InternalNode
332
                and type(basis_node) == InternalNode):
333
                # Matching internal nodes
334
                process_common_internal_nodes(self_node, basis_node)
335
            elif (type(self_node) == LeafNode
336
                  and type(basis_node) == LeafNode):
337
                process_common_leaf_nodes(self_node, basis_node)
338
            else:
339
                process_node(self_node, self_path, self, self_pending)
340
                process_node(basis_node, basis_path, basis, basis_pending)
341
        process_common_prefix_nodes(self_node, None, basis_node, None)
342
        self_seen = set()
343
        basis_seen = set()
344
        excluded_keys = set()
345
        def check_excluded(key_path):
346
            # Note that this is N^2, it depends on us trimming trees
347
            # aggressively to not become slow.
348
            # A better implementation would probably have a reverse map
349
            # back to the children of a node, and jump straight to it when
350
            # a common node is detected, the proceed to remove the already
351
            # pending children. bzrlib.graph has a searcher module with a
352
            # similar problem.
353
            while key_path is not None:
354
                key, key_path = key_path
355
                if key in excluded_keys:
356
                    return True
357
            return False
358
359
        loop_counter = 0
360
        while self_pending or basis_pending:
361
            loop_counter += 1
362
            if not self_pending:
363
                # self is exhausted: output remainder of basis
364
                for prefix, key, node, path in basis_pending:
365
                    if check_excluded(path):
366
                        continue
367
                    node = basis._get_node(node)
368
                    if key is not None:
369
                        # a value
370
                        yield (key, node, None)
371
                    else:
372
                        # subtree - fastpath the entire thing.
373
                        for key, value in node.iteritems(basis._store):
374
                            yield (key, value, None)
375
                return
376
            elif not basis_pending:
377
                # basis is exhausted: output remainder of self.
378
                for prefix, key, node, path in self_pending:
379
                    if check_excluded(path):
380
                        continue
381
                    node = self._get_node(node)
382
                    if key is not None:
383
                        # a value
384
                        yield (key, None, node)
385
                    else:
386
                        # subtree - fastpath the entire thing.
387
                        for key, value in node.iteritems(self._store):
388
                            yield (key, None, value)
389
                return
390
            else:
391
                # XXX: future optimisation - yield the smaller items
392
                # immediately rather than pushing everything on/off the
393
                # heaps. Applies to both internal nodes and leafnodes.
394
                if self_pending[0][0] < basis_pending[0][0]:
395
                    # expand self
396
                    prefix, key, node, path = heapq.heappop(self_pending)
397
                    if check_excluded(path):
398
                        continue
399
                    if key is not None:
400
                        # a value
401
                        yield (key, None, node)
402
                    else:
403
                        process_node(node, path, self, self_pending)
404
                        continue
405
                elif self_pending[0][0] > basis_pending[0][0]:
406
                    # expand basis
407
                    prefix, key, node, path = heapq.heappop(basis_pending)
408
                    if check_excluded(path):
409
                        continue
410
                    if key is not None:
411
                        # a value
412
                        yield (key, node, None)
413
                    else:
414
                        process_node(node, path, basis, basis_pending)
415
                        continue
416
                else:
417
                    # common prefix: possibly expand both
418
                    if self_pending[0][1] is None:
419
                        # process next self
420
                        read_self = True
421
                    else:
422
                        read_self = False
423
                    if basis_pending[0][1] is None:
424
                        # process next basis
425
                        read_basis = True
426
                    else:
427
                        read_basis = False
428
                    if not read_self and not read_basis:
429
                        # compare a common value
430
                        self_details = heapq.heappop(self_pending)
431
                        basis_details = heapq.heappop(basis_pending)
432
                        if self_details[2] != basis_details[2]:
433
                            yield (self_details[1],
434
                                basis_details[2], self_details[2])
435
                        continue
436
                    # At least one side wasn't a simple value
437
                    if (self._node_key(self_pending[0][2]) ==
438
                        self._node_key(basis_pending[0][2])):
439
                        # Identical pointers, skip (and don't bother adding to
440
                        # excluded, it won't turn up again.
441
                        heapq.heappop(self_pending)
442
                        heapq.heappop(basis_pending)
443
                        continue
444
                    # Now we need to expand this node before we can continue
445
                    if read_self and read_basis:
446
                        # Both sides start with the same prefix, so process
447
                        # them in parallel
448
                        self_prefix, _, self_node, self_path = heapq.heappop(
449
                            self_pending)
450
                        basis_prefix, _, basis_node, basis_path = heapq.heappop(
451
                            basis_pending)
452
                        if self_prefix != basis_prefix:
453
                            raise AssertionError(
454
                                '%r != %r' % (self_prefix, basis_prefix))
455
                        process_common_prefix_nodes(
456
                            self_node, self_path,
457
                            basis_node, basis_path)
458
                        continue
459
                    if read_self:
460
                        prefix, key, node, path = heapq.heappop(self_pending)
461
                        if check_excluded(path):
462
                            continue
463
                        process_node(node, path, self, self_pending)
464
                    if read_basis:
465
                        prefix, key, node, path = heapq.heappop(basis_pending)
466
                        if check_excluded(path):
467
                            continue
468
                        process_node(node, path, basis, basis_pending)
469
        # print loop_counter
470
471
    def iteritems(self, key_filter=None):
472
        """Iterate over the entire CHKMap's contents."""
473
        self._ensure_root()
474
        return self._root_node.iteritems(self._store, key_filter=key_filter)
475
476
    def key(self):
477
        """Return the key for this map."""
478
        if type(self._root_node) is tuple:
479
            return self._root_node
480
        else:
481
            return self._root_node._key
482
483
    def __len__(self):
484
        self._ensure_root()
485
        return len(self._root_node)
486
487
    def map(self, key, value):
488
        """Map a key tuple to value."""
489
        # Need a root object.
490
        self._ensure_root()
491
        prefix, node_details = self._root_node.map(self._store, key, value)
492
        if len(node_details) == 1:
493
            self._root_node = node_details[0][1]
494
        else:
495
            self._root_node = InternalNode(prefix,
496
                                search_key_func=self._search_key_func)
497
            self._root_node.set_maximum_size(node_details[0][1].maximum_size)
498
            self._root_node._key_width = node_details[0][1]._key_width
499
            for split, node in node_details:
500
                self._root_node.add_node(split, node)
501
502
    def _node_key(self, node):
3735.19.1 by Ian Clatworthy
CHKMap cleanups
503
        """Get the key for a node whether it's a tuple or node."""
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
504
        if type(node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
505
            return node
506
        else:
507
            return node._key
508
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
509
    def unmap(self, key, check_remap=True):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
510
        """remove key from the map."""
511
        self._ensure_root()
512
        if type(self._root_node) is InternalNode:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
513
            unmapped = self._root_node.unmap(self._store, key,
514
                check_remap=check_remap)
515
        else:
516
            unmapped = self._root_node.unmap(self._store, key)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
517
        self._root_node = unmapped
518
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
519
    def _check_remap(self):
520
        """Check if nodes can be collapsed."""
521
        self._ensure_root()
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
522
        if type(self._root_node) is InternalNode:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
523
            self._root_node._check_remap(self._store)
524
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
525
    def _save(self):
526
        """Save the map completely.
527
528
        :return: The key of the root node.
529
        """
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
530
        if type(self._root_node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
531
            # Already saved.
532
            return self._root_node
533
        keys = list(self._root_node.serialise(self._store))
534
        return keys[-1]
535
536
537
class Node(object):
538
    """Base class defining the protocol for CHK Map nodes.
539
540
    :ivar _raw_size: The total size of the serialized key:value data, before
541
        adding the header bytes, and without prefix compression.
542
    """
543
544
    def __init__(self, key_width=1):
545
        """Create a node.
546
547
        :param key_width: The width of keys for this node.
548
        """
549
        self._key = None
550
        # Current number of elements
551
        self._len = 0
552
        self._maximum_size = 0
3735.19.1 by Ian Clatworthy
CHKMap cleanups
553
        self._key_width = key_width
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
554
        # current size in bytes
555
        self._raw_size = 0
556
        # The pointers/values this node has - meaning defined by child classes.
557
        self._items = {}
558
        # The common search prefix
559
        self._search_prefix = None
560
561
    def __repr__(self):
562
        items_str = str(sorted(self._items))
563
        if len(items_str) > 20:
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
564
            items_str = items_str[:16] + '...]'
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
565
        return '%s(key:%s len:%s size:%s max:%s prefix:%s items:%s)' % (
566
            self.__class__.__name__, self._key, self._len, self._raw_size,
567
            self._maximum_size, self._search_prefix, items_str)
568
569
    def key(self):
570
        return self._key
571
572
    def __len__(self):
573
        return self._len
574
575
    @property
576
    def maximum_size(self):
577
        """What is the upper limit for adding references to a node."""
578
        return self._maximum_size
579
580
    def set_maximum_size(self, new_size):
581
        """Set the size threshold for nodes.
582
583
        :param new_size: The size at which no data is added to a node. 0 for
584
            unlimited.
585
        """
586
        self._maximum_size = new_size
587
588
    @classmethod
589
    def common_prefix(cls, prefix, key):
590
        """Given 2 strings, return the longest prefix common to both.
591
592
        :param prefix: This has been the common prefix for other keys, so it is
593
            more likely to be the common prefix in this case as well.
594
        :param key: Another string to compare to
595
        """
596
        if key.startswith(prefix):
597
            return prefix
4358.1.1 by Jelmer Vernooij
Support empty keys when looking for common prefixes in CHKMap.
598
        pos = -1
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
599
        # Is there a better way to do this?
600
        for pos, (left, right) in enumerate(zip(prefix, key)):
601
            if left != right:
602
                pos -= 1
603
                break
604
        common = prefix[:pos+1]
605
        return common
606
607
    @classmethod
608
    def common_prefix_for_keys(cls, keys):
609
        """Given a list of keys, find their common prefix.
610
611
        :param keys: An iterable of strings.
612
        :return: The longest common prefix of all keys.
613
        """
614
        common_prefix = None
615
        for key in keys:
616
            if common_prefix is None:
617
                common_prefix = key
618
                continue
619
            common_prefix = cls.common_prefix(common_prefix, key)
620
            if not common_prefix:
621
                # if common_prefix is the empty string, then we know it won't
622
                # change further
623
                return ''
624
        return common_prefix
625
626
627
# Singleton indicating we have not computed _search_prefix yet
628
_unknown = object()
629
630
class LeafNode(Node):
631
    """A node containing actual key:value pairs.
632
633
    :ivar _items: A dict of key->value items. The key is in tuple form.
634
    :ivar _size: The number of bytes that would be used by serializing all of
635
        the key/value pairs.
636
    """
637
638
    def __init__(self, search_key_func=None):
639
        Node.__init__(self)
640
        # All of the keys in this leaf node share this common prefix
641
        self._common_serialised_prefix = None
642
        self._serialise_key = '\x00'.join
643
        if search_key_func is None:
644
            self._search_key_func = _search_key_plain
645
        else:
646
            self._search_key_func = search_key_func
647
648
    def __repr__(self):
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
649
        items_str = str(sorted(self._items))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
650
        if len(items_str) > 20:
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
651
            items_str = items_str[:16] + '...]'
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
652
        return \
653
            '%s(key:%s len:%s size:%s max:%s prefix:%s keywidth:%s items:%s)' \
654
            % (self.__class__.__name__, self._key, self._len, self._raw_size,
655
            self._maximum_size, self._search_prefix, self._key_width, items_str)
656
657
    def _current_size(self):
658
        """Answer the current serialised size of this node.
659
660
        This differs from self._raw_size in that it includes the bytes used for
661
        the header.
662
        """
663
        if self._common_serialised_prefix is None:
664
            bytes_for_items = 0
665
            prefix_len = 0
666
        else:
667
            # We will store a single string with the common prefix
668
            # And then that common prefix will not be stored in any of the
669
            # entry lines
670
            prefix_len = len(self._common_serialised_prefix)
671
            bytes_for_items = (self._raw_size - (prefix_len * self._len))
672
        return (9 # 'chkleaf:\n'
673
            + len(str(self._maximum_size)) + 1
674
            + len(str(self._key_width)) + 1
675
            + len(str(self._len)) + 1
676
            + prefix_len + 1
677
            + bytes_for_items)
678
679
    @classmethod
680
    def deserialise(klass, bytes, key, search_key_func=None):
681
        """Deserialise bytes, with key key, into a LeafNode.
682
683
        :param bytes: The bytes of the node.
684
        :param key: The key that the serialised node has.
685
        """
686
        return _deserialise_leaf_node(bytes, key,
687
                                      search_key_func=search_key_func)
688
689
    def iteritems(self, store, key_filter=None):
690
        """Iterate over items in the node.
691
692
        :param key_filter: A filter to apply to the node. It should be a
693
            list/set/dict or similar repeatedly iterable container.
694
        """
695
        if key_filter is not None:
696
            # Adjust the filter - short elements go to a prefix filter. All
697
            # other items are looked up directly.
698
            # XXX: perhaps defaultdict? Profiling<rinse and repeat>
699
            filters = {}
700
            for key in key_filter:
701
                if len(key) == self._key_width:
702
                    # This filter is meant to match exactly one key, yield it
703
                    # if we have it.
704
                    try:
705
                        yield key, self._items[key]
706
                    except KeyError:
707
                        # This key is not present in this map, continue
708
                        pass
709
                else:
710
                    # Short items, we need to match based on a prefix
711
                    length_filter = filters.setdefault(len(key), set())
712
                    length_filter.add(key)
713
            if filters:
714
                filters = filters.items()
715
                for item in self._items.iteritems():
716
                    for length, length_filter in filters:
717
                        if item[0][:length] in length_filter:
718
                            yield item
719
                            break
720
        else:
721
            for item in self._items.iteritems():
722
                yield item
723
724
    def _key_value_len(self, key, value):
725
        # TODO: Should probably be done without actually joining the key, but
726
        #       then that can be done via the C extension
727
        return (len(self._serialise_key(key)) + 1
728
                + len(str(value.count('\n'))) + 1
729
                + len(value) + 1)
730
731
    def _search_key(self, key):
732
        return self._search_key_func(key)
733
734
    def _map_no_split(self, key, value):
735
        """Map a key to a value.
736
737
        This assumes either the key does not already exist, or you have already
738
        removed its size and length from self.
739
740
        :return: True if adding this node should cause us to split.
741
        """
742
        self._items[key] = value
743
        self._raw_size += self._key_value_len(key, value)
744
        self._len += 1
745
        serialised_key = self._serialise_key(key)
746
        if self._common_serialised_prefix is None:
747
            self._common_serialised_prefix = serialised_key
748
        else:
749
            self._common_serialised_prefix = self.common_prefix(
750
                self._common_serialised_prefix, serialised_key)
751
        search_key = self._search_key(key)
752
        if self._search_prefix is _unknown:
753
            self._compute_search_prefix()
754
        if self._search_prefix is None:
755
            self._search_prefix = search_key
756
        else:
757
            self._search_prefix = self.common_prefix(
758
                self._search_prefix, search_key)
759
        if (self._len > 1
760
            and self._maximum_size
761
            and self._current_size() > self._maximum_size):
762
            # Check to see if all of the search_keys for this node are
763
            # identical. We allow the node to grow under that circumstance
764
            # (we could track this as common state, but it is infrequent)
765
            if (search_key != self._search_prefix
766
                or not self._are_search_keys_identical()):
767
                return True
768
        return False
769
770
    def _split(self, store):
771
        """We have overflowed.
772
773
        Split this node into multiple LeafNodes, return it up the stack so that
774
        the next layer creates a new InternalNode and references the new nodes.
775
776
        :return: (common_serialised_prefix, [(node_serialised_prefix, node)])
777
        """
778
        if self._search_prefix is _unknown:
779
            raise AssertionError('Search prefix must be known')
780
        common_prefix = self._search_prefix
781
        split_at = len(common_prefix) + 1
782
        result = {}
783
        for key, value in self._items.iteritems():
784
            search_key = self._search_key(key)
785
            prefix = search_key[:split_at]
786
            # TODO: Generally only 1 key can be exactly the right length,
787
            #       which means we can only have 1 key in the node pointed
788
            #       at by the 'prefix\0' key. We might want to consider
789
            #       folding it into the containing InternalNode rather than
790
            #       having a fixed length-1 node.
791
            #       Note this is probably not true for hash keys, as they
792
            #       may get a '\00' node anywhere, but won't have keys of
793
            #       different lengths.
794
            if len(prefix) < split_at:
795
                prefix += '\x00'*(split_at - len(prefix))
796
            if prefix not in result:
797
                node = LeafNode(search_key_func=self._search_key_func)
798
                node.set_maximum_size(self._maximum_size)
799
                node._key_width = self._key_width
800
                result[prefix] = node
801
            else:
802
                node = result[prefix]
4413.5.4 by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it.
803
            sub_prefix, node_details = node.map(store, key, value)
804
            if len(node_details) > 1:
805
                if prefix != sub_prefix:
806
                    # This node has been split and is now found via a different
807
                    # path
808
                    result.pop(prefix)
809
                new_node = InternalNode(sub_prefix,
810
                    search_key_func=self._search_key_func)
811
                new_node.set_maximum_size(self._maximum_size)
812
                new_node._key_width = self._key_width
813
                for split, node in node_details:
814
                    new_node.add_node(split, node)
815
                result[prefix] = new_node
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
816
        return common_prefix, result.items()
817
818
    def map(self, store, key, value):
819
        """Map key to value."""
820
        if key in self._items:
821
            self._raw_size -= self._key_value_len(key, self._items[key])
822
            self._len -= 1
823
        self._key = None
824
        if self._map_no_split(key, value):
825
            return self._split(store)
826
        else:
827
            if self._search_prefix is _unknown:
828
                raise AssertionError('%r must be known' % self._search_prefix)
829
            return self._search_prefix, [("", self)]
830
831
    def serialise(self, store):
832
        """Serialise the LeafNode to store.
833
834
        :param store: A VersionedFiles honouring the CHK extensions.
835
        :return: An iterable of the keys inserted by this operation.
836
        """
837
        lines = ["chkleaf:\n"]
838
        lines.append("%d\n" % self._maximum_size)
839
        lines.append("%d\n" % self._key_width)
840
        lines.append("%d\n" % self._len)
841
        if self._common_serialised_prefix is None:
842
            lines.append('\n')
843
            if len(self._items) != 0:
844
                raise AssertionError('If _common_serialised_prefix is None'
845
                    ' we should have no items')
846
        else:
847
            lines.append('%s\n' % (self._common_serialised_prefix,))
848
            prefix_len = len(self._common_serialised_prefix)
849
        for key, value in sorted(self._items.items()):
850
            # Always add a final newline
851
            value_lines = osutils.chunks_to_lines([value + '\n'])
852
            serialized = "%s\x00%s\n" % (self._serialise_key(key),
853
                                         len(value_lines))
854
            if not serialized.startswith(self._common_serialised_prefix):
855
                raise AssertionError('We thought the common prefix was %r'
856
                    ' but entry %r does not have it in common'
857
                    % (self._common_serialised_prefix, serialized))
858
            lines.append(serialized[prefix_len:])
859
            lines.extend(value_lines)
860
        sha1, _, _ = store.add_lines((None,), (), lines)
861
        self._key = ("sha1:" + sha1,)
862
        bytes = ''.join(lines)
863
        if len(bytes) != self._current_size():
864
            raise AssertionError('Invalid _current_size')
865
        _page_cache.add(self._key, bytes)
866
        return [self._key]
867
868
    def refs(self):
869
        """Return the references to other CHK's held by this node."""
870
        return []
871
872
    def _compute_search_prefix(self):
873
        """Determine the common search prefix for all keys in this node.
874
875
        :return: A bytestring of the longest search key prefix that is
876
            unique within this node.
877
        """
878
        search_keys = [self._search_key_func(key) for key in self._items]
879
        self._search_prefix = self.common_prefix_for_keys(search_keys)
880
        return self._search_prefix
881
882
    def _are_search_keys_identical(self):
883
        """Check to see if the search keys for all entries are the same.
884
885
        When using a hash as the search_key it is possible for non-identical
886
        keys to collide. If that happens enough, we may try overflow a
887
        LeafNode, but as all are collisions, we must not split.
888
        """
889
        common_search_key = None
890
        for key in self._items:
891
            search_key = self._search_key(key)
892
            if common_search_key is None:
893
                common_search_key = search_key
894
            elif search_key != common_search_key:
895
                return False
896
        return True
897
898
    def _compute_serialised_prefix(self):
899
        """Determine the common prefix for serialised keys in this node.
900
901
        :return: A bytestring of the longest serialised key prefix that is
902
            unique within this node.
903
        """
904
        serialised_keys = [self._serialise_key(key) for key in self._items]
905
        self._common_serialised_prefix = self.common_prefix_for_keys(
906
            serialised_keys)
3735.19.1 by Ian Clatworthy
CHKMap cleanups
907
        return self._common_serialised_prefix
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
908
909
    def unmap(self, store, key):
910
        """Unmap key from the node."""
911
        try:
912
            self._raw_size -= self._key_value_len(key, self._items[key])
913
        except KeyError:
914
            trace.mutter("key %s not found in %r", key, self._items)
915
            raise
916
        self._len -= 1
917
        del self._items[key]
918
        self._key = None
919
        # Recompute from scratch
920
        self._compute_search_prefix()
921
        self._compute_serialised_prefix()
922
        return self
923
924
925
class InternalNode(Node):
926
    """A node that contains references to other nodes.
927
928
    An InternalNode is responsible for mapping search key prefixes to child
929
    nodes.
930
931
    :ivar _items: serialised_key => node dictionary. node may be a tuple,
932
        LeafNode or InternalNode.
933
    """
934
935
    def __init__(self, prefix='', search_key_func=None):
936
        Node.__init__(self)
937
        # The size of an internalnode with default values and no children.
938
        # How many octets key prefixes within this node are.
939
        self._node_width = 0
940
        self._search_prefix = prefix
941
        if search_key_func is None:
942
            self._search_key_func = _search_key_plain
943
        else:
944
            self._search_key_func = search_key_func
945
946
    def add_node(self, prefix, node):
947
        """Add a child node with prefix prefix, and node node.
948
949
        :param prefix: The search key prefix for node.
950
        :param node: The node being added.
951
        """
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
952
        if self._search_prefix is None:
953
            raise AssertionError("_search_prefix should not be None")
954
        if not prefix.startswith(self._search_prefix):
955
            raise AssertionError("prefixes mismatch: %s must start with %s"
956
                % (prefix,self._search_prefix))
957
        if len(prefix) != len(self._search_prefix) + 1:
958
            raise AssertionError("prefix wrong length: len(%s) is not %d" %
959
                (prefix, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
960
        self._len += len(node)
961
        if not len(self._items):
962
            self._node_width = len(prefix)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
963
        if self._node_width != len(self._search_prefix) + 1:
964
            raise AssertionError("node width mismatch: %d is not %d" %
965
                (self._node_width, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
966
        self._items[prefix] = node
967
        self._key = None
968
969
    def _current_size(self):
970
        """Answer the current serialised size of this node."""
971
        return (self._raw_size + len(str(self._len)) + len(str(self._key_width)) +
972
            len(str(self._maximum_size)))
973
974
    @classmethod
975
    def deserialise(klass, bytes, key, search_key_func=None):
976
        """Deserialise bytes to an InternalNode, with key key.
977
978
        :param bytes: The bytes of the node.
979
        :param key: The key that the serialised node has.
980
        :return: An InternalNode instance.
981
        """
982
        return _deserialise_internal_node(bytes, key,
983
                                          search_key_func=search_key_func)
984
985
    def iteritems(self, store, key_filter=None):
986
        for node, node_filter in self._iter_nodes(store, key_filter=key_filter):
987
            for item in node.iteritems(store, key_filter=node_filter):
988
                yield item
989
990
    def _iter_nodes(self, store, key_filter=None, batch_size=None):
991
        """Iterate over node objects which match key_filter.
992
993
        :param store: A store to use for accessing content.
994
        :param key_filter: A key filter to filter nodes. Only nodes that might
995
            contain a key in key_filter will be returned.
996
        :param batch_size: If not None, then we will return the nodes that had
997
            to be read using get_record_stream in batches, rather than reading
998
            them all at once.
999
        :return: An iterable of nodes. This function does not have to be fully
1000
            consumed.  (There will be no pending I/O when items are being returned.)
1001
        """
1002
        # Map from chk key ('sha1:...',) to (prefix, key_filter)
1003
        # prefix is the key in self._items to use, key_filter is the key_filter
1004
        # entries that would match this node
1005
        keys = {}
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1006
        shortcut = False
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1007
        if key_filter is None:
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1008
            # yielding all nodes, yield whatever we have, and queue up a read
1009
            # for whatever we are missing
1010
            shortcut = True
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1011
            for prefix, node in self._items.iteritems():
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1012
                if node.__class__ is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1013
                    keys[node] = (prefix, None)
1014
                else:
1015
                    yield node, None
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1016
        elif len(key_filter) == 1:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1017
            # Technically, this path could also be handled by the first check
1018
            # in 'self._node_width' in length_filters. However, we can handle
1019
            # this case without spending any time building up the
1020
            # prefix_to_keys, etc state.
1021
1022
            # This is a bit ugly, but TIMEIT showed it to be by far the fastest
1023
            # 0.626us   list(key_filter)[0]
1024
            #       is a func() for list(), 2 mallocs, and a getitem
1025
            # 0.489us   [k for k in key_filter][0]
1026
            #       still has the mallocs, avoids the func() call
1027
            # 0.350us   iter(key_filter).next()
1028
            #       has a func() call, and mallocs an iterator
1029
            # 0.125us   for key in key_filter: pass
1030
            #       no func() overhead, might malloc an iterator
1031
            # 0.105us   for key in key_filter: break
1032
            #       no func() overhead, might malloc an iterator, probably
1033
            #       avoids checking an 'else' clause as part of the for
1034
            for key in key_filter:
1035
                break
1036
            search_prefix = self._search_prefix_filter(key)
1037
            if len(search_prefix) == self._node_width:
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1038
                # This item will match exactly, so just do a dict lookup, and
1039
                # see what we can return
1040
                shortcut = True
1041
                try:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1042
                    node = self._items[search_prefix]
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1043
                except KeyError:
1044
                    # A given key can only match 1 child node, if it isn't
1045
                    # there, then we can just return nothing
1046
                    return
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1047
                if node.__class__ is tuple:
1048
                    keys[node] = (search_prefix, [key])
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1049
                else:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1050
                    # This is loaded, and the only thing that can match,
1051
                    # return
1052
                    yield node, [key]
1053
                    return
4413.4.1 by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key.
1054
        if not shortcut:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1055
            # First, convert all keys into a list of search prefixes
1056
            # Aggregate common prefixes, and track the keys they come from
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1057
            prefix_to_keys = {}
1058
            length_filters = {}
1059
            for key in key_filter:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1060
                search_prefix = self._search_prefix_filter(key)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1061
                length_filter = length_filters.setdefault(
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1062
                                    len(search_prefix), set())
1063
                length_filter.add(search_prefix)
1064
                prefix_to_keys.setdefault(search_prefix, []).append(key)
1065
1066
            if (self._node_width in length_filters
1067
                and len(length_filters) == 1):
1068
                # all of the search prefixes match exactly _node_width. This
1069
                # means that everything is an exact match, and we can do a
1070
                # lookup into self._items, rather than iterating over the items
1071
                # dict.
1072
                search_prefixes = length_filters[self._node_width]
1073
                for search_prefix in search_prefixes:
1074
                    try:
1075
                        node = self._items[search_prefix]
1076
                    except KeyError:
1077
                        # We can ignore this one
1078
                        continue
1079
                    node_key_filter = prefix_to_keys[search_prefix]
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1080
                    if node.__class__ is tuple:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1081
                        keys[node] = (search_prefix, node_key_filter)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1082
                    else:
1083
                        yield node, node_key_filter
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1084
            else:
1085
                # The slow way. We walk every item in self._items, and check to
1086
                # see if there are any matches
1087
                length_filters = length_filters.items()
1088
                for prefix, node in self._items.iteritems():
1089
                    node_key_filter = []
1090
                    for length, length_filter in length_filters:
1091
                        sub_prefix = prefix[:length]
1092
                        if sub_prefix in length_filter:
1093
                            node_key_filter.extend(prefix_to_keys[sub_prefix])
1094
                    if node_key_filter: # this key matched something, yield it
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1095
                        if node.__class__ is tuple:
4413.4.2 by John Arbash Meinel
Rewrite the shortcuts.
1096
                            keys[node] = (prefix, node_key_filter)
1097
                        else:
1098
                            yield node, node_key_filter
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1099
        if keys:
1100
            # Look in the page cache for some more bytes
1101
            found_keys = set()
1102
            for key in keys:
1103
                try:
1104
                    bytes = _page_cache[key]
1105
                except KeyError:
1106
                    continue
1107
                else:
1108
                    node = _deserialise(bytes, key,
1109
                        search_key_func=self._search_key_func)
1110
                    prefix, node_key_filter = keys[key]
1111
                    self._items[prefix] = node
1112
                    found_keys.add(key)
1113
                    yield node, node_key_filter
1114
            for key in found_keys:
1115
                del keys[key]
1116
        if keys:
1117
            # demand load some pages.
1118
            if batch_size is None:
1119
                # Read all the keys in
1120
                batch_size = len(keys)
1121
            key_order = list(keys)
1122
            for batch_start in range(0, len(key_order), batch_size):
1123
                batch = key_order[batch_start:batch_start + batch_size]
1124
                # We have to fully consume the stream so there is no pending
1125
                # I/O, so we buffer the nodes for now.
1126
                stream = store.get_record_stream(batch, 'unordered', True)
1127
                node_and_filters = []
1128
                for record in stream:
1129
                    bytes = record.get_bytes_as('fulltext')
1130
                    node = _deserialise(bytes, record.key,
1131
                        search_key_func=self._search_key_func)
1132
                    prefix, node_key_filter = keys[record.key]
1133
                    node_and_filters.append((node, node_key_filter))
1134
                    self._items[prefix] = node
1135
                    _page_cache.add(record.key, bytes)
1136
                for info in node_and_filters:
1137
                    yield info
1138
1139
    def map(self, store, key, value):
1140
        """Map key to value."""
1141
        if not len(self._items):
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1142
            raise AssertionError("can't map in an empty InternalNode.")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1143
        search_key = self._search_key(key)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1144
        if self._node_width != len(self._search_prefix) + 1:
1145
            raise AssertionError("node width mismatch: %d is not %d" %
1146
                (self._node_width, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1147
        if not search_key.startswith(self._search_prefix):
1148
            # This key doesn't fit in this index, so we need to split at the
1149
            # point where it would fit, insert self into that internal node,
1150
            # and then map this key into that node.
1151
            new_prefix = self.common_prefix(self._search_prefix,
1152
                                            search_key)
1153
            new_parent = InternalNode(new_prefix,
1154
                search_key_func=self._search_key_func)
1155
            new_parent.set_maximum_size(self._maximum_size)
1156
            new_parent._key_width = self._key_width
1157
            new_parent.add_node(self._search_prefix[:len(new_prefix)+1],
1158
                                self)
1159
            return new_parent.map(store, key, value)
1160
        children = [node for node, _
1161
                          in self._iter_nodes(store, key_filter=[key])]
1162
        if children:
1163
            child = children[0]
1164
        else:
1165
            # new child needed:
1166
            child = self._new_child(search_key, LeafNode)
1167
        old_len = len(child)
1168
        if type(child) is LeafNode:
1169
            old_size = child._current_size()
1170
        else:
1171
            old_size = None
1172
        prefix, node_details = child.map(store, key, value)
1173
        if len(node_details) == 1:
1174
            # child may have shrunk, or might be a new node
1175
            child = node_details[0][1]
1176
            self._len = self._len - old_len + len(child)
1177
            self._items[search_key] = child
1178
            self._key = None
1179
            new_node = self
1180
            if type(child) is LeafNode:
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
1181
                if old_size is None:
1182
                    # The old node was an InternalNode which means it has now
1183
                    # collapsed, so we need to check if it will chain to a
1184
                    # collapse at this level.
1185
                    trace.mutter("checking remap as InternalNode -> LeafNode")
1186
                    new_node = self._check_remap(store)
1187
                else:
1188
                    # If the LeafNode has shrunk in size, we may want to run
1189
                    # a remap check. Checking for a remap is expensive though
1190
                    # and the frequency of a successful remap is very low.
1191
                    # Shrinkage by small amounts is common, so we only do the
1192
                    # remap check if the new_size is low or the shrinkage
1193
                    # amount is over a configurable limit.
1194
                    new_size = child._current_size()
1195
                    shrinkage = old_size - new_size
1196
                    if (shrinkage > 0 and new_size < _INTERESTING_NEW_SIZE
1197
                        or shrinkage > _INTERESTING_SHRINKAGE_LIMIT):
1198
                        trace.mutter(
1199
                            "checking remap as size shrunk by %d to be %d",
1200
                            shrinkage, new_size)
1201
                        new_node = self._check_remap(store)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1202
            if new_node._search_prefix is None:
1203
                raise AssertionError("_search_prefix should not be None")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1204
            return new_node._search_prefix, [('', new_node)]
1205
        # child has overflown - create a new intermediate node.
1206
        # XXX: This is where we might want to try and expand our depth
1207
        # to refer to more bytes of every child (which would give us
1208
        # multiple pointers to child nodes, but less intermediate nodes)
1209
        child = self._new_child(search_key, InternalNode)
1210
        child._search_prefix = prefix
1211
        for split, node in node_details:
1212
            child.add_node(split, node)
1213
        self._len = self._len - old_len + len(child)
1214
        self._key = None
1215
        return self._search_prefix, [("", self)]
1216
1217
    def _new_child(self, search_key, klass):
1218
        """Create a new child node of type klass."""
1219
        child = klass()
1220
        child.set_maximum_size(self._maximum_size)
1221
        child._key_width = self._key_width
1222
        child._search_key_func = self._search_key_func
1223
        self._items[search_key] = child
1224
        return child
1225
1226
    def serialise(self, store):
1227
        """Serialise the node to store.
1228
1229
        :param store: A VersionedFiles honouring the CHK extensions.
1230
        :return: An iterable of the keys inserted by this operation.
1231
        """
1232
        for node in self._items.itervalues():
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1233
            if type(node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1234
                # Never deserialised.
1235
                continue
1236
            if node._key is not None:
1237
                # Never altered
1238
                continue
1239
            for key in node.serialise(store):
1240
                yield key
1241
        lines = ["chknode:\n"]
1242
        lines.append("%d\n" % self._maximum_size)
1243
        lines.append("%d\n" % self._key_width)
1244
        lines.append("%d\n" % self._len)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1245
        if self._search_prefix is None:
1246
            raise AssertionError("_search_prefix should not be None")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1247
        lines.append('%s\n' % (self._search_prefix,))
1248
        prefix_len = len(self._search_prefix)
1249
        for prefix, node in sorted(self._items.items()):
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1250
            if type(node) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1251
                key = node[0]
1252
            else:
1253
                key = node._key[0]
1254
            serialised = "%s\x00%s\n" % (prefix, key)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1255
            if not serialised.startswith(self._search_prefix):
1256
                raise AssertionError("prefixes mismatch: %s must start with %s"
1257
                    % (serialised, self._search_prefix))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1258
            lines.append(serialised[prefix_len:])
1259
        sha1, _, _ = store.add_lines((None,), (), lines)
1260
        self._key = ("sha1:" + sha1,)
1261
        _page_cache.add(self._key, ''.join(lines))
1262
        yield self._key
1263
1264
    def _search_key(self, key):
1265
        """Return the serialised key for key in this node."""
1266
        # search keys are fixed width. All will be self._node_width wide, so we
1267
        # pad as necessary.
1268
        return (self._search_key_func(key) + '\x00'*self._node_width)[:self._node_width]
1269
1270
    def _search_prefix_filter(self, key):
1271
        """Serialise key for use as a prefix filter in iteritems."""
1272
        return self._search_key_func(key)[:self._node_width]
1273
1274
    def _split(self, offset):
1275
        """Split this node into smaller nodes starting at offset.
1276
1277
        :param offset: The offset to start the new child nodes at.
1278
        :return: An iterable of (prefix, node) tuples. prefix is a byte
1279
            prefix for reaching node.
1280
        """
1281
        if offset >= self._node_width:
1282
            for node in self._items.values():
1283
                for result in node._split(offset):
1284
                    yield result
1285
            return
1286
        for key, node in self._items.items():
1287
            pass
1288
1289
    def refs(self):
1290
        """Return the references to other CHK's held by this node."""
1291
        if self._key is None:
1292
            raise AssertionError("unserialised nodes have no refs.")
1293
        refs = []
1294
        for value in self._items.itervalues():
4413.4.4 by John Arbash Meinel
Fix some type() == tuple to be 'type() is tuple' or '.__class__ is tuple'
1295
            if type(value) is tuple:
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1296
                refs.append(value)
1297
            else:
1298
                refs.append(value.key())
1299
        return refs
1300
1301
    def _compute_search_prefix(self, extra_key=None):
1302
        """Return the unique key prefix for this node.
1303
1304
        :return: A bytestring of the longest search key prefix that is
1305
            unique within this node.
1306
        """
1307
        self._search_prefix = self.common_prefix_for_keys(self._items)
1308
        return self._search_prefix
1309
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1310
    def unmap(self, store, key, check_remap=True):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1311
        """Remove key from this node and it's children."""
1312
        if not len(self._items):
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1313
            raise AssertionError("can't unmap in an empty InternalNode.")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1314
        children = [node for node, _
1315
                          in self._iter_nodes(store, key_filter=[key])]
1316
        if children:
1317
            child = children[0]
1318
        else:
1319
            raise KeyError(key)
1320
        self._len -= 1
1321
        unmapped = child.unmap(store, key)
1322
        self._key = None
1323
        search_key = self._search_key(key)
1324
        if len(unmapped) == 0:
1325
            # All child nodes are gone, remove the child:
1326
            del self._items[search_key]
1327
            unmapped = None
1328
        else:
1329
            # Stash the returned node
1330
            self._items[search_key] = unmapped
1331
        if len(self._items) == 1:
1332
            # this node is no longer needed:
1333
            return self._items.values()[0]
1334
        if type(unmapped) is InternalNode:
1335
            return self
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1336
        if check_remap:
1337
            return self._check_remap(store)
1338
        else:
1339
            return self
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1340
1341
    def _check_remap(self, store):
1342
        """Check if all keys contained by children fit in a single LeafNode.
1343
1344
        :param store: A store to use for reading more nodes
1345
        :return: Either self, or a new LeafNode which should replace self.
1346
        """
1347
        # Logic for how we determine when we need to rebuild
1348
        # 1) Implicitly unmap() is removing a key which means that the child
1349
        #    nodes are going to be shrinking by some extent.
1350
        # 2) If all children are LeafNodes, it is possible that they could be
1351
        #    combined into a single LeafNode, which can then completely replace
1352
        #    this internal node with a single LeafNode
1353
        # 3) If *one* child is an InternalNode, we assume it has already done
1354
        #    all the work to determine that its children cannot collapse, and
1355
        #    we can then assume that those nodes *plus* the current nodes don't
1356
        #    have a chance of collapsing either.
1357
        #    So a very cheap check is to just say if 'unmapped' is an
1358
        #    InternalNode, we don't have to check further.
1359
1360
        # TODO: Another alternative is to check the total size of all known
1361
        #       LeafNodes. If there is some formula we can use to determine the
1362
        #       final size without actually having to read in any more
1363
        #       children, it would be nice to have. However, we have to be
1364
        #       careful with stuff like nodes that pull out the common prefix
1365
        #       of each key, as adding a new key can change the common prefix
1366
        #       and cause size changes greater than the length of one key.
1367
        #       So for now, we just add everything to a new Leaf until it
1368
        #       splits, as we know that will give the right answer
1369
        new_leaf = LeafNode(search_key_func=self._search_key_func)
1370
        new_leaf.set_maximum_size(self._maximum_size)
1371
        new_leaf._key_width = self._key_width
1372
        # A batch_size of 16 was chosen because:
1373
        #   a) In testing, a 4k page held 14 times. So if we have more than 16
1374
        #      leaf nodes we are unlikely to hold them in a single new leaf
1375
        #      node. This still allows for 1 round trip
1376
        #   b) With 16-way fan out, we can still do a single round trip
1377
        #   c) With 255-way fan out, we don't want to read all 255 and destroy
1378
        #      the page cache, just to determine that we really don't need it.
1379
        for node, _ in self._iter_nodes(store, batch_size=16):
1380
            if type(node) is InternalNode:
1381
                # Without looking at any leaf nodes, we are sure
1382
                return self
1383
            for key, value in node._items.iteritems():
1384
                if new_leaf._map_no_split(key, value):
1385
                    return self
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
1386
        trace.mutter("remap generated a new LeafNode")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1387
        return new_leaf
1388
1389
1390
def _deserialise(bytes, key, search_key_func):
1391
    """Helper for repositorydetails - convert bytes to a node."""
1392
    if bytes.startswith("chkleaf:\n"):
1393
        node = LeafNode.deserialise(bytes, key, search_key_func=search_key_func)
1394
    elif bytes.startswith("chknode:\n"):
1395
        node = InternalNode.deserialise(bytes, key,
1396
            search_key_func=search_key_func)
1397
    else:
1398
        raise AssertionError("Unknown node type.")
1399
    return node
1400
1401
4476.1.38 by John Arbash Meinel
Rename InterestingNodeIterator => CHKMapDifference, update tests.
1402
class CHKMapDifference(object):
1403
    """Iterate the stored pages and key,value pairs for (new - old).
1404
1405
    This class provides a generator over the stored CHK pages and the
1406
    (key, value) pairs that are in any of the new maps and not in any of the
1407
    old maps.
1408
1409
    Note that it may yield chk pages that are common (especially root nodes),
1410
    but it won't yield (key,value) pairs that are common.
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1411
    """
1412
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1413
    def __init__(self, store, new_root_keys, old_root_keys,
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1414
                 search_key_func, pb=None):
1415
        self._store = store
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1416
        self._new_root_keys = new_root_keys
1417
        self._old_root_keys = old_root_keys
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1418
        self._pb = pb
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1419
        # All uninteresting chks that we have seen. By the time they are added
1420
        # here, they should be either fully ignored, or queued up for
1421
        # processing
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1422
        self._all_old_chks = set(self._old_root_keys)
1423
        # All items that we have seen from the old_root_keys
1424
        self._all_old_items = set()
4476.1.32 by John Arbash Meinel
A few more updates.
1425
        # These are interesting items which were either read, or already in the
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1426
        # interesting queue (so we don't need to walk them again)
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1427
        self._processed_new_refs = set()
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1428
        self._search_key_func = search_key_func
1429
4476.1.33 by John Arbash Meinel
Simpify the code a lot by ignoring the heapq stuff.
1430
        # The uninteresting and interesting nodes to be searched
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1431
        self._old_queue = []
1432
        self._new_queue = []
4476.1.34 by John Arbash Meinel
Major rework, simplify what is put into the queues.
1433
        # Holds the (key, value) items found when processing the root nodes,
1434
        # waiting for the uninteresting nodes to be walked
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1435
        self._new_item_queue = []
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1436
        self._state = None
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1437
1438
    def _read_nodes_from_store(self, keys):
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1439
        # We chose not to use _page_cache, because we think in terms of records
1440
        # to be yielded. Also, we expect to touch each page only 1 time during
1441
        # this code. (We may want to evaluate saving the raw bytes into the
1442
        # page cache, which would allow a working tree update after the fetch
1443
        # to not have to read the bytes again.)
4476.1.12 by John Arbash Meinel
Start testing the new class.
1444
        stream = self._store.get_record_stream(keys, 'unordered', True)
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1445
        for record in stream:
1446
            if self._pb is not None:
1447
                self._pb.tick()
1448
            if record.storage_kind == 'absent':
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1449
                raise errors.NoSuchRevision(self._store, record.key)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1450
            bytes = record.get_bytes_as('fulltext')
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1451
            node = _deserialise(bytes, record.key,
1452
                                search_key_func=self._search_key_func)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1453
            if type(node) is InternalNode:
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1454
                # Note we don't have to do node.refs() because we know that
1455
                # there are no children that have been pushed into this node
1456
                prefix_refs = node._items.items()
1457
                items = []
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1458
            else:
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1459
                prefix_refs = []
1460
                items = node._items.items()
1461
            yield record, node, prefix_refs, items
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1462
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1463
    def _read_old_roots(self):
1464
        old_chks_to_enqueue = []
1465
        all_old_chks = self._all_old_chks
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1466
        for record, node, prefix_refs, items in \
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1467
                self._read_nodes_from_store(self._old_root_keys):
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1468
            # Uninteresting node
4476.1.34 by John Arbash Meinel
Major rework, simplify what is put into the queues.
1469
            prefix_refs = [p_r for p_r in prefix_refs
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1470
                                if p_r[1] not in all_old_chks]
4476.1.34 by John Arbash Meinel
Major rework, simplify what is put into the queues.
1471
            new_refs = [p_r[1] for p_r in prefix_refs]
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1472
            all_old_chks.update(new_refs)
1473
            self._all_old_items.update(items)
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1474
            # Queue up the uninteresting references
1475
            # Don't actually put them in the 'to-read' queue until we have
1476
            # finished checking the interesting references
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1477
            old_chks_to_enqueue.extend(prefix_refs)
1478
        return old_chks_to_enqueue
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1479
4476.1.40 by John Arbash Meinel
cleanup indentation.
1480
    def _enqueue_old(self, new_prefixes, old_chks_to_enqueue):
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1481
        # At this point, we have read all the uninteresting and interesting
1482
        # items, so we can queue up the uninteresting stuff, knowing that we've
1483
        # handled the interesting ones
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1484
        for prefix, ref in old_chks_to_enqueue:
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1485
            not_interesting = True
1486
            for i in xrange(len(prefix), 0, -1):
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1487
                if prefix[:i] in new_prefixes:
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1488
                    not_interesting = False
1489
                    break
1490
            if not_interesting:
1491
                # This prefix is not part of the remaining 'interesting set'
1492
                continue
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1493
            self._old_queue.append(ref)
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1494
1495
    def _read_all_roots(self):
1496
        """Read the root pages.
1497
1498
        This is structured as a generator, so that the root records can be
1499
        yielded up to whoever needs them without any buffering.
1500
        """
1501
        # This is the bootstrap phase
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1502
        if not self._old_root_keys:
1503
            # With no old_root_keys we can just shortcut and be ready
1504
            # for _flush_new_queue
1505
            self._new_queue = list(self._new_root_keys)
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1506
            return
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1507
        old_chks_to_enqueue = self._read_old_roots()
4476.1.12 by John Arbash Meinel
Start testing the new class.
1508
        # filter out any root keys that are already known to be uninteresting
4476.1.40 by John Arbash Meinel
cleanup indentation.
1509
        new_keys = set(self._new_root_keys).difference(self._all_old_chks)
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1510
        # These are prefixes that are present in new_keys that we are
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1511
        # thinking to yield
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1512
        new_prefixes = set()
4476.1.18 by John Arbash Meinel
Tracked it down.
1513
        # We are about to yield all of these, so we don't want them getting
1514
        # added a second time
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1515
        processed_new_refs = self._processed_new_refs
1516
        processed_new_refs.update(new_keys)
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1517
        for record, node, prefix_refs, items in \
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1518
                self._read_nodes_from_store(new_keys):
4476.1.5 by John Arbash Meinel
Start working on a new InterestingNodeIterator class.
1519
            # At this level, we now know all the uninteresting references
4476.1.35 by John Arbash Meinel
Change some of the inner loop workings into list comprehensions.
1520
            # So we filter and queue up whatever is remaining
1521
            prefix_refs = [p_r for p_r in prefix_refs
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1522
                           if p_r[1] not in self._all_old_chks
1523
                              and p_r[1] not in processed_new_refs]
4476.1.35 by John Arbash Meinel
Change some of the inner loop workings into list comprehensions.
1524
            refs = [p_r[1] for p_r in prefix_refs]
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1525
            new_prefixes.update([p_r[0] for p_r in prefix_refs])
1526
            self._new_queue.extend(refs)
4476.1.34 by John Arbash Meinel
Major rework, simplify what is put into the queues.
1527
            # TODO: We can potentially get multiple items here, however the
1528
            #       current design allows for this, as callers will do the work
1529
            #       to make the results unique. We might profile whether we
1530
            #       gain anything by ensuring unique return values for items
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1531
            new_items = [item for item in items
4476.1.40 by John Arbash Meinel
cleanup indentation.
1532
                               if item not in self._all_old_items]
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1533
            self._new_item_queue.extend(new_items)
1534
            new_prefixes.update([self._search_key_func(item[0])
4476.1.40 by John Arbash Meinel
cleanup indentation.
1535
                                 for item in new_items])
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1536
            processed_new_refs.update(refs)
4476.1.13 by John Arbash Meinel
Test that _read_all_roots does what is expected
1537
            yield record
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1538
        # For new_prefixes we have the full length prefixes queued up.
4476.1.35 by John Arbash Meinel
Change some of the inner loop workings into list comprehensions.
1539
        # However, we also need possible prefixes. (If we have a known ref to
1540
        # 'ab', then we also need to include 'a'.) So expand the
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1541
        # new_prefixes to include all shorter prefixes
1542
        for prefix in list(new_prefixes):
4476.1.40 by John Arbash Meinel
cleanup indentation.
1543
            new_prefixes.update([prefix[:i] for i in xrange(1, len(prefix))])
1544
        self._enqueue_old(new_prefixes, old_chks_to_enqueue)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1545
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1546
    def _flush_new_queue(self):
4476.1.27 by John Arbash Meinel
Rewrite of _flush_interesting_queue
1547
        # No need to maintain the heap invariant anymore, just pull things out
1548
        # and process them
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1549
        refs = set(self._new_queue)
1550
        self._new_queue = []
4476.1.31 by John Arbash Meinel
streamline the _flush_interesting_queue a bit.
1551
        # First pass, flush all interesting items and convert to using direct refs
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1552
        all_old_chks = self._all_old_chks
1553
        processed_new_refs = self._processed_new_refs
1554
        all_old_items = self._all_old_items
1555
        new_items = [item for item in self._new_item_queue
4476.1.40 by John Arbash Meinel
cleanup indentation.
1556
                           if item not in all_old_items]
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1557
        self._new_item_queue = []
1558
        if new_items:
1559
            yield None, new_items
1560
        refs = refs.difference(all_old_chks)
4476.1.31 by John Arbash Meinel
streamline the _flush_interesting_queue a bit.
1561
        while refs:
1562
            next_refs = set()
1563
            next_refs_update = next_refs.update
1564
            # Inlining _read_nodes_from_store improves 'bzr branch bzr.dev'
1565
            # from 1m54s to 1m51s. Consider it.
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1566
            for record, _, p_refs, items in self._read_nodes_from_store(refs):
4476.1.27 by John Arbash Meinel
Rewrite of _flush_interesting_queue
1567
                items = [item for item in items
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1568
                         if item not in all_old_items]
4476.1.27 by John Arbash Meinel
Rewrite of _flush_interesting_queue
1569
                yield record, items
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1570
                next_refs_update([p_r[1] for p_r in p_refs])
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1571
            next_refs = next_refs.difference(all_old_chks)
1572
            next_refs = next_refs.difference(processed_new_refs)
1573
            processed_new_refs.update(next_refs)
4476.1.31 by John Arbash Meinel
streamline the _flush_interesting_queue a bit.
1574
            refs = next_refs
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1575
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1576
    def _process_next_old(self):
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1577
        # Since we don't filter uninteresting any further than during
1578
        # _read_all_roots, process the whole queue in a single pass.
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1579
        refs = self._old_queue
1580
        self._old_queue = []
1581
        all_old_chks = self._all_old_chks
4476.1.32 by John Arbash Meinel
A few more updates.
1582
        for record, _, prefix_refs, items in self._read_nodes_from_store(refs):
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1583
            self._all_old_items.update(items)
1584
            refs = [r for _,r in prefix_refs if r not in all_old_chks]
1585
            self._old_queue.extend(refs)
1586
            all_old_chks.update(refs)
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1587
1588
    def _process_queues(self):
4476.1.39 by John Arbash Meinel
Rename interesting => new, uninteresting => old
1589
        while self._old_queue:
1590
            self._process_next_old()
1591
        return self._flush_new_queue()
4476.1.17 by John Arbash Meinel
Start running all of the iter_interesting_nodes tests
1592
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1593
    def process(self):
1594
        for record in self._read_all_roots():
1595
            yield record, []
1596
        for record, items in self._process_queues():
1597
            yield record, items
1598
4476.1.25 by John Arbash Meinel
A bit more testing.
1599
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1600
def iter_interesting_nodes(store, interesting_root_keys,
1601
                           uninteresting_root_keys, pb=None):
1602
    """Given root keys, find interesting nodes.
1603
1604
    Evaluate nodes referenced by interesting_root_keys. Ones that are also
1605
    referenced from uninteresting_root_keys are not considered interesting.
1606
1607
    :param interesting_root_keys: keys which should be part of the
1608
        "interesting" nodes (which will be yielded)
1609
    :param uninteresting_root_keys: keys which should be filtered out of the
1610
        result set.
1611
    :return: Yield
1612
        (interesting record, {interesting key:values})
1613
    """
4476.1.38 by John Arbash Meinel
Rename InterestingNodeIterator => CHKMapDifference, update tests.
1614
    iterator = CHKMapDifference(store, interesting_root_keys,
1615
                                uninteresting_root_keys,
1616
                                search_key_func=store._search_key_func,
1617
                                pb=pb)
4476.1.37 by John Arbash Meinel
Some small code cleanup passes
1618
    return iterator.process()
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1619
1620
1621
try:
1622
    from bzrlib._chk_map_pyx import (
1623
        _search_key_16,
1624
        _search_key_255,
1625
        _deserialise_leaf_node,
1626
        _deserialise_internal_node,
1627
        )
1628
except ImportError:
1629
    from bzrlib._chk_map_py import (
1630
        _search_key_16,
1631
        _search_key_255,
1632
        _deserialise_leaf_node,
1633
        _deserialise_internal_node,
1634
        )
1635
search_key_registry.register('hash-16-way', _search_key_16)
1636
search_key_registry.register('hash-255-way', _search_key_255)