1
# Copyright (C) 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22
from bisect import bisect_right
23
from copy import deepcopy
38
from bzrlib.index import _OPTION_NODE_REFS, _OPTION_KEY_ELEMENTS, _OPTION_LEN
39
from bzrlib.transport import get_transport
42
_BTSIGNATURE = "B+Tree Graph Index 2\n"
43
_OPTION_ROW_LENGTHS = "row_lengths="
44
_LEAF_FLAG = "type=leaf\n"
45
_INTERNAL_FLAG = "type=internal\n"
46
_INTERNAL_OFFSET = "offset="
48
_RESERVED_HEADER_BYTES = 120
51
# 4K per page: 4MB - 1000 entries
52
_NODE_CACHE_SIZE = 1000
55
class _BuilderRow(object):
56
"""The stored state accumulated while writing out a row in the index.
58
:ivar spool: A temporary file used to accumulate nodes for this row
60
:ivar nodes: The count of nodes emitted so far.
64
"""Create a _BuilderRow."""
66
self.spool = tempfile.TemporaryFile()
69
def finish_node(self, pad=True):
70
byte_lines, _, padding = self.writer.finish()
73
self.spool.write("\x00" * _RESERVED_HEADER_BYTES)
75
if not pad and padding:
77
skipped_bytes = padding
78
self.spool.writelines(byte_lines)
79
remainder = (self.spool.tell() + skipped_bytes) % _PAGE_SIZE
81
raise AssertionError("incorrect node length: %d, %d"
82
% (self.spool.tell(), remainder))
87
class _InternalBuilderRow(_BuilderRow):
88
"""The stored state accumulated while writing out internal rows."""
90
def finish_node(self, pad=True):
92
raise AssertionError("Must pad internal nodes only.")
93
_BuilderRow.finish_node(self)
96
class _LeafBuilderRow(_BuilderRow):
97
"""The stored state accumulated while writing out a leaf rows."""
100
class BTreeBuilder(index.GraphIndexBuilder):
101
"""A Builder for B+Tree based Graph indices.
103
The resulting graph has the structure:
105
_SIGNATURE OPTIONS NODES
106
_SIGNATURE := 'B+Tree Graph Index 1' NEWLINE
107
OPTIONS := REF_LISTS KEY_ELEMENTS LENGTH
108
REF_LISTS := 'node_ref_lists=' DIGITS NEWLINE
109
KEY_ELEMENTS := 'key_elements=' DIGITS NEWLINE
110
LENGTH := 'len=' DIGITS NEWLINE
111
ROW_LENGTHS := 'row_lengths' DIGITS (COMMA DIGITS)*
112
NODES := NODE_COMPRESSED*
113
NODE_COMPRESSED:= COMPRESSED_BYTES{4096}
114
NODE_RAW := INTERNAL | LEAF
115
INTERNAL := INTERNAL_FLAG POINTERS
116
LEAF := LEAF_FLAG ROWS
117
KEY_ELEMENT := Not-whitespace-utf8
118
KEY := KEY_ELEMENT (NULL KEY_ELEMENT)*
120
ROW := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
122
REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
123
REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
125
VALUE := no-newline-no-null-bytes
128
def __init__(self, reference_lists=0, key_elements=1, spill_at=100000):
129
"""See GraphIndexBuilder.__init__.
131
:param spill_at: Optional parameter controlling the maximum number
132
of nodes that BTreeBuilder will hold in memory.
134
index.GraphIndexBuilder.__init__(self, reference_lists=reference_lists,
135
key_elements=key_elements)
136
self._spill_at = spill_at
137
self._backing_indices = []
138
# A map of {key: (node_refs, value)}
140
# Indicate it hasn't been built yet
141
self._nodes_by_key = None
142
self._optimize_for_size = False
144
def add_node(self, key, value, references=()):
145
"""Add a node to the index.
147
If adding the node causes the builder to reach its spill_at threshold,
148
disk spilling will be triggered.
150
:param key: The key. keys are non-empty tuples containing
151
as many whitespace-free utf8 bytestrings as the key length
152
defined for this index.
153
:param references: An iterable of iterables of keys. Each is a
154
reference to another key.
155
:param value: The value to associate with the key. It may be any
156
bytes as long as it does not contain \0 or \n.
158
# we don't care about absent_references
159
node_refs, _ = self._check_key_ref_value(key, references, value)
160
if key in self._nodes:
161
raise errors.BadIndexDuplicateKey(key, self)
162
self._nodes[key] = (node_refs, value)
164
if self._nodes_by_key is not None and self._key_length > 1:
165
self._update_nodes_by_key(key, value, node_refs)
166
if len(self._keys) < self._spill_at:
168
self._spill_mem_keys_to_disk()
170
def _spill_mem_keys_to_disk(self):
171
"""Write the in memory keys down to disk to cap memory consumption.
173
If we already have some keys written to disk, we will combine them so
174
as to preserve the sorted order. The algorithm for combining uses
175
powers of two. So on the first spill, write all mem nodes into a
176
single index. On the second spill, combine the mem nodes with the nodes
177
on disk to create a 2x sized disk index and get rid of the first index.
178
On the third spill, create a single new disk index, which will contain
179
the mem nodes, and preserve the existing 2x sized index. On the fourth,
180
combine mem with the first and second indexes, creating a new one of
181
size 4x. On the fifth create a single new one, etc.
183
iterators_to_combine = [self._iter_mem_nodes()]
185
for pos, backing in enumerate(self._backing_indices):
189
iterators_to_combine.append(backing.iter_all_entries())
190
backing_pos = pos + 1
191
new_backing_file, size = \
192
self._write_nodes(self._iter_smallest(iterators_to_combine))
193
dir_path, base_name = osutils.split(new_backing_file.name)
194
# Note: The transport here isn't strictly needed, because we will use
195
# direct access to the new_backing._file object
196
new_backing = BTreeGraphIndex(get_transport(dir_path),
198
# GC will clean up the file
199
new_backing._file = new_backing_file
200
if len(self._backing_indices) == backing_pos:
201
self._backing_indices.append(None)
202
self._backing_indices[backing_pos] = new_backing
203
for pos in range(backing_pos):
204
self._backing_indices[pos] = None
207
self._nodes_by_key = None
209
def add_nodes(self, nodes):
210
"""Add nodes to the index.
212
:param nodes: An iterable of (key, node_refs, value) entries to add.
214
if self.reference_lists:
215
for (key, value, node_refs) in nodes:
216
self.add_node(key, value, node_refs)
218
for (key, value) in nodes:
219
self.add_node(key, value)
221
def _iter_mem_nodes(self):
222
"""Iterate over the nodes held in memory."""
224
if self.reference_lists:
225
for key in sorted(nodes):
226
references, value = nodes[key]
227
yield self, key, value, references
229
for key in sorted(nodes):
230
references, value = nodes[key]
231
yield self, key, value
233
def _iter_smallest(self, iterators_to_combine):
234
if len(iterators_to_combine) == 1:
235
for value in iterators_to_combine[0]:
239
for iterator in iterators_to_combine:
241
current_values.append(iterator.next())
242
except StopIteration:
243
current_values.append(None)
246
# Decorate candidates with the value to allow 2.4's min to be used.
247
candidates = [(item[1][1], item) for item
248
in enumerate(current_values) if item[1] is not None]
249
if not len(candidates):
251
selected = min(candidates)
252
# undecorate back to (pos, node)
253
selected = selected[1]
254
if last == selected[1][1]:
255
raise errors.BadIndexDuplicateKey(last, self)
256
last = selected[1][1]
257
# Yield, with self as the index
258
yield (self,) + selected[1][1:]
261
current_values[pos] = iterators_to_combine[pos].next()
262
except StopIteration:
263
current_values[pos] = None
265
def _add_key(self, string_key, line, rows):
266
"""Add a key to the current chunk.
268
:param string_key: The key to add.
269
:param line: The fully serialised key and value.
271
if rows[-1].writer is None:
272
# opening a new leaf chunk;
273
for pos, internal_row in enumerate(rows[:-1]):
274
# flesh out any internal nodes that are needed to
275
# preserve the height of the tree
276
if internal_row.writer is None:
278
if internal_row.nodes == 0:
279
length -= _RESERVED_HEADER_BYTES # padded
280
internal_row.writer = chunk_writer.ChunkWriter(length, 0,
281
optimize_for_size=self._optimize_for_size)
282
internal_row.writer.write(_INTERNAL_FLAG)
283
internal_row.writer.write(_INTERNAL_OFFSET +
284
str(rows[pos + 1].nodes) + "\n")
287
if rows[-1].nodes == 0:
288
length -= _RESERVED_HEADER_BYTES # padded
289
rows[-1].writer = chunk_writer.ChunkWriter(length,
290
optimize_for_size=self._optimize_for_size)
291
rows[-1].writer.write(_LEAF_FLAG)
292
if rows[-1].writer.write(line):
293
# this key did not fit in the node:
294
rows[-1].finish_node()
295
key_line = string_key + "\n"
297
for row in reversed(rows[:-1]):
298
# Mark the start of the next node in the node above. If it
299
# doesn't fit then propogate upwards until we find one that
301
if row.writer.write(key_line):
304
# We've found a node that can handle the pointer.
307
# If we reached the current root without being able to mark the
308
# division point, then we need a new root:
311
if 'index' in debug.debug_flags:
312
trace.mutter('Inserting new global row.')
313
new_row = _InternalBuilderRow()
315
rows.insert(0, new_row)
316
# This will be padded, hence the -100
317
new_row.writer = chunk_writer.ChunkWriter(
318
_PAGE_SIZE - _RESERVED_HEADER_BYTES,
320
optimize_for_size=self._optimize_for_size)
321
new_row.writer.write(_INTERNAL_FLAG)
322
new_row.writer.write(_INTERNAL_OFFSET +
323
str(rows[1].nodes - 1) + "\n")
324
new_row.writer.write(key_line)
325
self._add_key(string_key, line, rows)
327
def _write_nodes(self, node_iterator):
328
"""Write node_iterator out as a B+Tree.
330
:param node_iterator: An iterator of sorted nodes. Each node should
331
match the output given by iter_all_entries.
332
:return: A file handle for a temporary file containing a B+Tree for
335
# The index rows - rows[0] is the root, rows[1] is the layer under it
338
# forward sorted by key. In future we may consider topological sorting,
339
# at the cost of table scans for direct lookup, or a second index for
342
# A stack with the number of nodes of each size. 0 is the root node
343
# and must always be 1 (if there are any nodes in the tree).
344
self.row_lengths = []
345
# Loop over all nodes adding them to the bottom row
346
# (rows[-1]). When we finish a chunk in a row,
347
# propogate the key that didn't fit (comes after the chunk) to the
348
# row above, transitively.
349
for node in node_iterator:
351
# First key triggers the first row
352
rows.append(_LeafBuilderRow())
354
string_key, line = _btree_serializer._flatten_node(node,
355
self.reference_lists)
356
self._add_key(string_key, line, rows)
357
for row in reversed(rows):
358
pad = (type(row) != _LeafBuilderRow)
359
row.finish_node(pad=pad)
360
result = tempfile.NamedTemporaryFile()
361
lines = [_BTSIGNATURE]
362
lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
363
lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
364
lines.append(_OPTION_LEN + str(key_count) + '\n')
365
row_lengths = [row.nodes for row in rows]
366
lines.append(_OPTION_ROW_LENGTHS + ','.join(map(str, row_lengths)) + '\n')
367
result.writelines(lines)
368
position = sum(map(len, lines))
370
if position > _RESERVED_HEADER_BYTES:
371
raise AssertionError("Could not fit the header in the"
372
" reserved space: %d > %d"
373
% (position, _RESERVED_HEADER_BYTES))
374
# write the rows out:
376
reserved = _RESERVED_HEADER_BYTES # reserved space for first node
379
# copy nodes to the finalised file.
380
# Special case the first node as it may be prefixed
381
node = row.spool.read(_PAGE_SIZE)
382
result.write(node[reserved:])
383
result.write("\x00" * (reserved - position))
384
position = 0 # Only the root row actually has an offset
385
copied_len = osutils.pumpfile(row.spool, result)
386
if copied_len != (row.nodes - 1) * _PAGE_SIZE:
387
if type(row) != _LeafBuilderRow:
388
raise AssertionError("Incorrect amount of data copied"
389
" expected: %d, got: %d"
390
% ((row.nodes - 1) * _PAGE_SIZE,
398
"""Finalise the index.
400
:return: A file handle for a temporary file containing the nodes added
403
return self._write_nodes(self.iter_all_entries())[0]
405
def iter_all_entries(self):
406
"""Iterate over all keys within the index
408
:return: An iterable of (index, key, reference_lists, value). There is no
409
defined order for the result iteration - it will be in the most
410
efficient order for the index (in this case dictionary hash order).
412
if 'evil' in debug.debug_flags:
413
trace.mutter_callsite(3,
414
"iter_all_entries scales with size of history.")
415
# Doing serial rather than ordered would be faster; but this shouldn't
416
# be getting called routinely anyway.
417
iterators = [self._iter_mem_nodes()]
418
for backing in self._backing_indices:
419
if backing is not None:
420
iterators.append(backing.iter_all_entries())
421
if len(iterators) == 1:
423
return self._iter_smallest(iterators)
425
def iter_entries(self, keys):
426
"""Iterate over keys within the index.
428
:param keys: An iterable providing the keys to be retrieved.
429
:return: An iterable of (index, key, value, reference_lists). There is no
430
defined order for the result iteration - it will be in the most
431
efficient order for the index (keys iteration order in this case).
434
local_keys = keys.intersection(self._keys)
435
if self.reference_lists:
436
for key in local_keys:
437
node = self._nodes[key]
438
yield self, key, node[1], node[0]
440
for key in local_keys:
441
node = self._nodes[key]
442
yield self, key, node[1]
443
# Find things that are in backing indices that have not been handled
445
if not self._backing_indices:
446
return # We won't find anything there either
447
# Remove all of the keys that we found locally
448
keys.difference_update(local_keys)
449
for backing in self._backing_indices:
454
for node in backing.iter_entries(keys):
456
yield (self,) + node[1:]
458
def iter_entries_prefix(self, keys):
459
"""Iterate over keys within the index using prefix matching.
461
Prefix matching is applied within the tuple of a key, not to within
462
the bytestring of each key element. e.g. if you have the keys ('foo',
463
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
464
only the former key is returned.
466
:param keys: An iterable providing the key prefixes to be retrieved.
467
Each key prefix takes the form of a tuple the length of a key, but
468
with the last N elements 'None' rather than a regular bytestring.
469
The first element cannot be 'None'.
470
:return: An iterable as per iter_all_entries, but restricted to the
471
keys with a matching prefix to those supplied. No additional keys
472
will be returned, and every match that is in the index will be
475
# XXX: To much duplication with the GraphIndex class; consider finding
476
# a good place to pull out the actual common logic.
480
for backing in self._backing_indices:
483
for node in backing.iter_entries_prefix(keys):
484
yield (self,) + node[1:]
485
if self._key_length == 1:
489
raise errors.BadIndexKey(key)
490
if len(key) != self._key_length:
491
raise errors.BadIndexKey(key)
493
node = self._nodes[key]
496
if self.reference_lists:
497
yield self, key, node[1], node[0]
499
yield self, key, node[1]
504
raise errors.BadIndexKey(key)
505
if len(key) != self._key_length:
506
raise errors.BadIndexKey(key)
507
# find what it refers to:
508
key_dict = self._get_nodes_by_key()
510
# find the subdict to return
512
while len(elements) and elements[0] is not None:
513
key_dict = key_dict[elements[0]]
516
# a non-existant lookup.
521
key_dict = dicts.pop(-1)
522
# can't be empty or would not exist
523
item, value = key_dict.iteritems().next()
524
if type(value) == dict:
526
dicts.extend(key_dict.itervalues())
529
for value in key_dict.itervalues():
530
yield (self, ) + value
532
yield (self, ) + key_dict
534
def _get_nodes_by_key(self):
535
if self._nodes_by_key is None:
537
if self.reference_lists:
538
for key, (references, value) in self._nodes.iteritems():
539
key_dict = nodes_by_key
540
for subkey in key[:-1]:
541
key_dict = key_dict.setdefault(subkey, {})
542
key_dict[key[-1]] = key, value, references
544
for key, (references, value) in self._nodes.iteritems():
545
key_dict = nodes_by_key
546
for subkey in key[:-1]:
547
key_dict = key_dict.setdefault(subkey, {})
548
key_dict[key[-1]] = key, value
549
self._nodes_by_key = nodes_by_key
550
return self._nodes_by_key
553
"""Return an estimate of the number of keys in this index.
555
For InMemoryGraphIndex the estimate is exact.
557
return len(self._keys) + sum(backing.key_count() for backing in
558
self._backing_indices if backing is not None)
561
"""In memory index's have no known corruption at the moment."""
564
class _LeafNode(object):
565
"""A leaf node for a serialised B+Tree index."""
567
def __init__(self, bytes, key_length, ref_list_length):
568
"""Parse bytes to create a leaf node object."""
569
# splitlines mangles the \r delimiters.. don't use it.
570
self.keys = dict(_btree_serializer._parse_leaf_lines(bytes,
571
key_length, ref_list_length))
574
class _InternalNode(object):
575
"""An internal node for a serialised B+Tree index."""
577
def __init__(self, bytes):
578
"""Parse bytes to create an internal node object."""
579
# splitlines mangles the \r delimiters.. don't use it.
580
self.keys = self._parse_lines(bytes.split('\n'))
582
def _parse_lines(self, lines):
584
self.offset = int(lines[1][7:])
585
for line in lines[2:]:
588
nodes.append(tuple(line.split('\0')))
592
class BTreeGraphIndex(object):
593
"""Access to nodes via the standard GraphIndex interface for B+Tree's.
595
Individual nodes are held in a LRU cache. This holds the root node in
596
memory except when very large walks are done.
599
def __init__(self, transport, name, size):
600
"""Create a B+Tree index object on the index name.
602
:param transport: The transport to read data for the index from.
603
:param name: The file name of the index on transport.
604
:param size: Optional size of the index in bytes. This allows
605
compatibility with the GraphIndex API, as well as ensuring that
606
the initial read (to read the root node header) can be done
607
without over-reading even on empty indices, and on small indices
608
allows single-IO to read the entire index.
610
self._transport = transport
614
self._recommended_pages = self._compute_recommended_pages()
615
self._root_node = None
616
# Default max size is 100,000 leave values
617
self._leaf_value_cache = None # lru_cache.LRUCache(100*1000)
618
self._leaf_node_cache = lru_cache.LRUCache(_NODE_CACHE_SIZE)
619
self._internal_node_cache = lru_cache.LRUCache()
620
self._key_count = None
621
self._row_lengths = None
622
self._row_offsets = None # Start of each row, [-1] is the end
624
def __eq__(self, other):
625
"""Equal when self and other were created with the same parameters."""
627
type(self) == type(other) and
628
self._transport == other._transport and
629
self._name == other._name and
630
self._size == other._size)
632
def __ne__(self, other):
633
return not self.__eq__(other)
635
def _get_and_cache_nodes(self, nodes):
636
"""Read nodes and cache them in the lru.
638
The nodes list supplied is sorted and then read from disk, each node
639
being inserted it into the _node_cache.
641
Note: Asking for more nodes than the _node_cache can contain will
642
result in some of the results being immediately discarded, to prevent
643
this an assertion is raised if more nodes are asked for than are
646
:return: A dict of {node_pos: node}
649
start_of_leaves = None
650
for node_pos, node in self._read_nodes(sorted(nodes)):
651
if node_pos == 0: # Special case
652
self._root_node = node
654
if start_of_leaves is None:
655
start_of_leaves = self._row_offsets[-2]
656
if node_pos < start_of_leaves:
657
self._internal_node_cache.add(node_pos, node)
659
self._leaf_node_cache.add(node_pos, node)
660
found[node_pos] = node
663
def _compute_recommended_pages(self):
664
"""Convert transport's recommended_page_size into btree pages.
666
recommended_page_size is in bytes, we want to know how many _PAGE_SIZE
667
pages fit in that length.
669
recommended_read = self._transport.recommended_page_size()
670
recommended_pages = int(math.ceil(recommended_read /
672
return recommended_pages
674
def _compute_total_pages_in_index(self):
675
"""How many pages are in the index.
677
If we have read the header we will use the value stored there.
678
Otherwise it will be computed based on the length of the index.
680
if self._size is None:
681
raise AssertionError('_compute_total_pages_in_index should not be'
682
' called when self._size is None')
683
if self._root_node is not None:
684
# This is the number of pages as defined by the header
685
return self._row_offsets[-1]
686
# This is the number of pages as defined by the size of the index. They
687
# should be indentical.
688
total_pages = int(math.ceil(self._size / float(_PAGE_SIZE)))
691
def _expand_offsets(self, offsets):
692
"""Find extra pages to download.
694
The idea is that we always want to make big-enough requests (like 64kB
695
for http), so that we don't waste round trips. So given the entries
696
that we already have cached and the new pages being downloaded figure
697
out what other pages we might want to read.
699
See also doc/developers/btree_index_prefetch.txt for more details.
701
:param offsets: The offsets to be read
702
:return: A list of offsets to download
704
if 'index' in debug.debug_flags:
705
trace.mutter('expanding: %s\toffsets: %s', self._name, offsets)
707
if len(offsets) >= self._recommended_pages:
708
# Don't add more, we are already requesting more than enough
709
if 'index' in debug.debug_flags:
710
trace.mutter(' not expanding large request (%s >= %s)',
711
len(offsets), self._recommended_pages)
713
if self._size is None:
714
# Don't try anything, because we don't know where the file ends
715
if 'index' in debug.debug_flags:
716
trace.mutter(' not expanding without knowing index size')
718
total_pages = self._compute_total_pages_in_index()
719
cached_offsets = self._get_offsets_to_cached_pages()
720
# If reading recommended_pages would read the rest of the index, just
722
if total_pages - len(cached_offsets) <= self._recommended_pages:
723
# Read whatever is left
725
expanded = [x for x in xrange(total_pages)
726
if x not in cached_offsets]
728
expanded = range(total_pages)
729
if 'index' in debug.debug_flags:
730
trace.mutter(' reading all unread pages: %s', expanded)
733
if self._root_node is None:
734
# ATM on the first read of the root node of a large index, we don't
735
# bother pre-reading any other pages. This is because the
736
# likelyhood of actually reading interesting pages is very low.
737
# See doc/developers/btree_index_prefetch.txt for a discussion, and
738
# a possible implementation when we are guessing that the second
739
# layer index is small
740
final_offsets = offsets
742
tree_depth = len(self._row_lengths)
743
if len(cached_offsets) < tree_depth and len(offsets) == 1:
744
# We haven't read enough to justify expansion
745
# If we are only going to read the root node, and 1 leaf node,
746
# then it isn't worth expanding our request. Once we've read at
747
# least 2 nodes, then we are probably doing a search, and we
748
# start expanding our requests.
749
if 'index' in debug.debug_flags:
750
trace.mutter(' not expanding on first reads')
752
final_offsets = self._expand_to_neighbors(offsets, cached_offsets,
755
final_offsets = sorted(final_offsets)
756
if 'index' in debug.debug_flags:
757
trace.mutter('expanded: %s', final_offsets)
760
def _expand_to_neighbors(self, offsets, cached_offsets, total_pages):
761
"""Expand requests to neighbors until we have enough pages.
763
This is called from _expand_offsets after policy has determined that we
765
We only want to expand requests within a given layer. We cheat a little
766
bit and assume all requests will be in the same layer. This is true
767
given the current design, but if it changes this algorithm may perform
770
:param offsets: requested offsets
771
:param cached_offsets: offsets for pages we currently have cached
772
:return: A set() of offsets after expansion
774
final_offsets = set(offsets)
776
new_tips = set(final_offsets)
777
while len(final_offsets) < self._recommended_pages and new_tips:
781
first, end = self._find_layer_first_and_end(pos)
784
and previous not in cached_offsets
785
and previous not in final_offsets
786
and previous >= first):
787
next_tips.add(previous)
789
if (after < total_pages
790
and after not in cached_offsets
791
and after not in final_offsets
794
# This would keep us from going bigger than
795
# recommended_pages by only expanding the first offsets.
796
# However, if we are making a 'wide' request, it is
797
# reasonable to expand all points equally.
798
# if len(final_offsets) > recommended_pages:
800
final_offsets.update(next_tips)
804
def _find_layer_first_and_end(self, offset):
805
"""Find the start/stop nodes for the layer corresponding to offset.
807
:return: (first, end)
808
first is the first node in this layer
809
end is the first node of the next layer
812
for roffset in self._row_offsets:
819
def _get_offsets_to_cached_pages(self):
820
"""Determine what nodes we already have cached."""
821
cached_offsets = set(self._internal_node_cache.keys())
822
cached_offsets.update(self._leaf_node_cache.keys())
823
if self._root_node is not None:
824
cached_offsets.add(0)
825
return cached_offsets
827
def _get_root_node(self):
828
if self._root_node is None:
829
# We may not have a root node yet
830
self._get_internal_nodes([0])
831
return self._root_node
833
def _get_nodes(self, cache, node_indexes):
836
for idx in node_indexes:
837
if idx == 0 and self._root_node is not None:
838
found[0] = self._root_node
841
found[idx] = cache[idx]
846
needed = self._expand_offsets(needed)
847
found.update(self._get_and_cache_nodes(needed))
850
def _get_internal_nodes(self, node_indexes):
851
"""Get a node, from cache or disk.
853
After getting it, the node will be cached.
855
return self._get_nodes(self._internal_node_cache, node_indexes)
857
def _cache_leaf_values(self, nodes):
858
"""Cache directly from key => value, skipping the btree."""
859
if self._leaf_value_cache is not None:
860
for node in nodes.itervalues():
861
for key, value in node.keys.iteritems():
862
if key in self._leaf_value_cache:
863
# Don't add the rest of the keys, we've seen this node
866
self._leaf_value_cache[key] = value
868
def _get_leaf_nodes(self, node_indexes):
869
"""Get a bunch of nodes, from cache or disk."""
870
found = self._get_nodes(self._leaf_node_cache, node_indexes)
871
self._cache_leaf_values(found)
874
def iter_all_entries(self):
875
"""Iterate over all keys within the index.
877
:return: An iterable of (index, key, value) or (index, key, value, reference_lists).
878
The former tuple is used when there are no reference lists in the
879
index, making the API compatible with simple key:value index types.
880
There is no defined order for the result iteration - it will be in
881
the most efficient order for the index.
883
if 'evil' in debug.debug_flags:
884
trace.mutter_callsite(3,
885
"iter_all_entries scales with size of history.")
886
if not self.key_count():
888
start_of_leaves = self._row_offsets[-2]
889
end_of_leaves = self._row_offsets[-1]
890
needed_offsets = range(start_of_leaves, end_of_leaves)
891
if needed_offsets == [0]:
892
# Special case when we only have a root node, as we have already
894
nodes = [(0, self._root_node)]
896
nodes = self._read_nodes(needed_offsets)
897
# We iterate strictly in-order so that we can use this function
898
# for spilling index builds to disk.
899
if self.node_ref_lists:
900
for _, node in nodes:
901
for key, (value, refs) in sorted(node.keys.items()):
902
yield (self, key, value, refs)
904
for _, node in nodes:
905
for key, (value, refs) in sorted(node.keys.items()):
906
yield (self, key, value)
909
def _multi_bisect_right(in_keys, fixed_keys):
910
"""Find the positions where each 'in_key' would fit in fixed_keys.
912
This is equivalent to doing "bisect_right" on each in_key into
915
:param in_keys: A sorted list of keys to match with fixed_keys
916
:param fixed_keys: A sorted list of keys to match against
917
:return: A list of (integer position, [key list]) tuples.
922
# no pointers in the fixed_keys list, which means everything must
924
return [(0, in_keys)]
926
# TODO: Iterating both lists will generally take M + N steps
927
# Bisecting each key will generally take M * log2 N steps.
928
# If we had an efficient way to compare, we could pick the method
929
# based on which has the fewer number of steps.
930
# There is also the argument that bisect_right is a compiled
931
# function, so there is even more to be gained.
932
# iter_steps = len(in_keys) + len(fixed_keys)
933
# bisect_steps = len(in_keys) * math.log(len(fixed_keys), 2)
934
if len(in_keys) == 1: # Bisect will always be faster for M = 1
935
return [(bisect_right(fixed_keys, in_keys[0]), in_keys)]
936
# elif bisect_steps < iter_steps:
938
# for key in in_keys:
939
# offsets.setdefault(bisect_right(fixed_keys, key),
941
# return [(o, offsets[o]) for o in sorted(offsets)]
942
in_keys_iter = iter(in_keys)
943
fixed_keys_iter = enumerate(fixed_keys)
944
cur_in_key = in_keys_iter.next()
945
cur_fixed_offset, cur_fixed_key = fixed_keys_iter.next()
947
class InputDone(Exception): pass
948
class FixedDone(Exception): pass
953
# TODO: Another possibility is that rather than iterating on each side,
954
# we could use a combination of bisecting and iterating. For
955
# example, while cur_in_key < fixed_key, bisect to find its
956
# point, then iterate all matching keys, then bisect (restricted
957
# to only the remainder) for the next one, etc.
960
if cur_in_key < cur_fixed_key:
962
cur_out = (cur_fixed_offset, cur_keys)
963
output.append(cur_out)
964
while cur_in_key < cur_fixed_key:
965
cur_keys.append(cur_in_key)
967
cur_in_key = in_keys_iter.next()
968
except StopIteration:
970
# At this point cur_in_key must be >= cur_fixed_key
971
# step the cur_fixed_key until we pass the cur key, or walk off
973
while cur_in_key >= cur_fixed_key:
975
cur_fixed_offset, cur_fixed_key = fixed_keys_iter.next()
976
except StopIteration:
979
# We consumed all of the input, nothing more to do
982
# There was some input left, but we consumed all of fixed, so we
983
# have to add one more for the tail
984
cur_keys = [cur_in_key]
985
cur_keys.extend(in_keys_iter)
986
cur_out = (len(fixed_keys), cur_keys)
987
output.append(cur_out)
990
def iter_entries(self, keys):
991
"""Iterate over keys within the index.
993
:param keys: An iterable providing the keys to be retrieved.
994
:return: An iterable as per iter_all_entries, but restricted to the
995
keys supplied. No additional keys will be returned, and every
996
key supplied that is in the index will be returned.
998
# 6 seconds spent in miss_torture using the sorted() line.
999
# Even with out of order disk IO it seems faster not to sort it when
1000
# large queries are being made.
1001
# However, now that we are doing multi-way bisecting, we need the keys
1002
# in sorted order anyway. We could change the multi-way code to not
1003
# require sorted order. (For example, it bisects for the first node,
1004
# does an in-order search until a key comes before the current point,
1005
# which it then bisects for, etc.)
1006
keys = frozenset(keys)
1010
if not self.key_count():
1014
if self._leaf_value_cache is None:
1018
value = self._leaf_value_cache.get(key, None)
1019
if value is not None:
1020
# This key is known not to be here, skip it
1022
if self.node_ref_lists:
1023
yield (self, key, value, refs)
1025
yield (self, key, value)
1027
needed_keys.append(key)
1033
# 6 seconds spent in miss_torture using the sorted() line.
1034
# Even with out of order disk IO it seems faster not to sort it when
1035
# large queries are being made.
1036
needed_keys = sorted(needed_keys)
1038
nodes_and_keys = [(0, needed_keys)]
1040
for row_pos, next_row_start in enumerate(self._row_offsets[1:-1]):
1041
node_indexes = [idx for idx, s_keys in nodes_and_keys]
1042
nodes = self._get_internal_nodes(node_indexes)
1044
next_nodes_and_keys = []
1045
for node_index, sub_keys in nodes_and_keys:
1046
node = nodes[node_index]
1047
positions = self._multi_bisect_right(sub_keys, node.keys)
1048
node_offset = next_row_start + node.offset
1049
next_nodes_and_keys.extend([(node_offset + pos, s_keys)
1050
for pos, s_keys in positions])
1051
nodes_and_keys = next_nodes_and_keys
1052
# We should now be at the _LeafNodes
1053
node_indexes = [idx for idx, s_keys in nodes_and_keys]
1055
# TODO: We may *not* want to always read all the nodes in one
1056
# big go. Consider setting a max size on this.
1058
nodes = self._get_leaf_nodes(node_indexes)
1059
for node_index, sub_keys in nodes_and_keys:
1062
node = nodes[node_index]
1063
for next_sub_key in sub_keys:
1064
if next_sub_key in node.keys:
1065
value, refs = node.keys[next_sub_key]
1066
if self.node_ref_lists:
1067
yield (self, next_sub_key, value, refs)
1069
yield (self, next_sub_key, value)
1071
def iter_entries_prefix(self, keys):
1072
"""Iterate over keys within the index using prefix matching.
1074
Prefix matching is applied within the tuple of a key, not to within
1075
the bytestring of each key element. e.g. if you have the keys ('foo',
1076
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
1077
only the former key is returned.
1079
WARNING: Note that this method currently causes a full index parse
1080
unconditionally (which is reasonably appropriate as it is a means for
1081
thunking many small indices into one larger one and still supplies
1082
iter_all_entries at the thunk layer).
1084
:param keys: An iterable providing the key prefixes to be retrieved.
1085
Each key prefix takes the form of a tuple the length of a key, but
1086
with the last N elements 'None' rather than a regular bytestring.
1087
The first element cannot be 'None'.
1088
:return: An iterable as per iter_all_entries, but restricted to the
1089
keys with a matching prefix to those supplied. No additional keys
1090
will be returned, and every match that is in the index will be
1093
keys = sorted(set(keys))
1096
# Load if needed to check key lengths
1097
if self._key_count is None:
1098
self._get_root_node()
1099
# TODO: only access nodes that can satisfy the prefixes we are looking
1100
# for. For now, to meet API usage (as this function is not used by
1101
# current bzrlib) just suck the entire index and iterate in memory.
1103
if self.node_ref_lists:
1104
if self._key_length == 1:
1105
for _1, key, value, refs in self.iter_all_entries():
1106
nodes[key] = value, refs
1109
for _1, key, value, refs in self.iter_all_entries():
1110
key_value = key, value, refs
1111
# For a key of (foo, bar, baz) create
1112
# _nodes_by_key[foo][bar][baz] = key_value
1113
key_dict = nodes_by_key
1114
for subkey in key[:-1]:
1115
key_dict = key_dict.setdefault(subkey, {})
1116
key_dict[key[-1]] = key_value
1118
if self._key_length == 1:
1119
for _1, key, value in self.iter_all_entries():
1123
for _1, key, value in self.iter_all_entries():
1124
key_value = key, value
1125
# For a key of (foo, bar, baz) create
1126
# _nodes_by_key[foo][bar][baz] = key_value
1127
key_dict = nodes_by_key
1128
for subkey in key[:-1]:
1129
key_dict = key_dict.setdefault(subkey, {})
1130
key_dict[key[-1]] = key_value
1131
if self._key_length == 1:
1135
raise errors.BadIndexKey(key)
1136
if len(key) != self._key_length:
1137
raise errors.BadIndexKey(key)
1139
if self.node_ref_lists:
1140
value, node_refs = nodes[key]
1141
yield self, key, value, node_refs
1143
yield self, key, nodes[key]
1150
raise errors.BadIndexKey(key)
1151
if len(key) != self._key_length:
1152
raise errors.BadIndexKey(key)
1153
# find what it refers to:
1154
key_dict = nodes_by_key
1155
elements = list(key)
1156
# find the subdict whose contents should be returned.
1158
while len(elements) and elements[0] is not None:
1159
key_dict = key_dict[elements[0]]
1162
# a non-existant lookup.
1167
key_dict = dicts.pop(-1)
1168
# can't be empty or would not exist
1169
item, value = key_dict.iteritems().next()
1170
if type(value) == dict:
1172
dicts.extend(key_dict.itervalues())
1175
for value in key_dict.itervalues():
1176
# each value is the key:value:node refs tuple
1178
yield (self, ) + value
1180
# the last thing looked up was a terminal element
1181
yield (self, ) + key_dict
1183
def key_count(self):
1184
"""Return an estimate of the number of keys in this index.
1186
For BTreeGraphIndex the estimate is exact as it is contained in the
1189
if self._key_count is None:
1190
self._get_root_node()
1191
return self._key_count
1193
def _compute_row_offsets(self):
1194
"""Fill out the _row_offsets attribute based on _row_lengths."""
1197
for row in self._row_lengths:
1198
offsets.append(row_offset)
1200
offsets.append(row_offset)
1201
self._row_offsets = offsets
1203
def _parse_header_from_bytes(self, bytes):
1204
"""Parse the header from a region of bytes.
1206
:param bytes: The data to parse.
1207
:return: An offset, data tuple such as readv yields, for the unparsed
1208
data. (which may be of length 0).
1210
signature = bytes[0:len(self._signature())]
1211
if not signature == self._signature():
1212
raise errors.BadIndexFormatSignature(self._name, BTreeGraphIndex)
1213
lines = bytes[len(self._signature()):].splitlines()
1214
options_line = lines[0]
1215
if not options_line.startswith(_OPTION_NODE_REFS):
1216
raise errors.BadIndexOptions(self)
1218
self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):])
1220
raise errors.BadIndexOptions(self)
1221
options_line = lines[1]
1222
if not options_line.startswith(_OPTION_KEY_ELEMENTS):
1223
raise errors.BadIndexOptions(self)
1225
self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):])
1227
raise errors.BadIndexOptions(self)
1228
options_line = lines[2]
1229
if not options_line.startswith(_OPTION_LEN):
1230
raise errors.BadIndexOptions(self)
1232
self._key_count = int(options_line[len(_OPTION_LEN):])
1234
raise errors.BadIndexOptions(self)
1235
options_line = lines[3]
1236
if not options_line.startswith(_OPTION_ROW_LENGTHS):
1237
raise errors.BadIndexOptions(self)
1239
self._row_lengths = map(int, [length for length in
1240
options_line[len(_OPTION_ROW_LENGTHS):].split(',')
1243
raise errors.BadIndexOptions(self)
1244
self._compute_row_offsets()
1246
# calculate the bytes we have processed
1247
header_end = (len(signature) + sum(map(len, lines[0:4])) + 4)
1248
return header_end, bytes[header_end:]
1250
def _read_nodes(self, nodes):
1251
"""Read some nodes from disk into the LRU cache.
1253
This performs a readv to get the node data into memory, and parses each
1254
node, the yields it to the caller. The nodes are requested in the
1255
supplied order. If possible doing sort() on the list before requesting
1256
a read may improve performance.
1258
:param nodes: The nodes to read. 0 - first node, 1 - second node etc.
1264
offset = index * _PAGE_SIZE
1267
# Root node - special case
1269
size = min(_PAGE_SIZE, self._size)
1271
# The only case where we don't know the size, is for very
1272
# small indexes. So we read the whole thing
1273
bytes = self._transport.get_bytes(self._name)
1274
self._size = len(bytes)
1275
ranges.append((0, len(bytes)))
1278
if offset > self._size:
1279
raise AssertionError('tried to read past the end'
1280
' of the file %s > %s'
1281
% (offset, self._size))
1282
size = min(size, self._size - offset)
1283
ranges.append((offset, size))
1287
data_ranges = [(offset, bytes[offset:offset+_PAGE_SIZE])
1288
for offset in xrange(0, len(bytes), _PAGE_SIZE)]
1289
elif self._file is None:
1290
data_ranges = self._transport.readv(self._name, ranges)
1293
for offset, size in ranges:
1294
self._file.seek(offset)
1295
data_ranges.append((offset, self._file.read(size)))
1296
for offset, data in data_ranges:
1298
# extract the header
1299
offset, data = self._parse_header_from_bytes(data)
1302
bytes = zlib.decompress(data)
1303
if bytes.startswith(_LEAF_FLAG):
1304
node = _LeafNode(bytes, self._key_length, self.node_ref_lists)
1305
elif bytes.startswith(_INTERNAL_FLAG):
1306
node = _InternalNode(bytes)
1308
raise AssertionError("Unknown node type for %r" % bytes)
1309
yield offset / _PAGE_SIZE, node
1311
def _signature(self):
1312
"""The file signature for this index type."""
1316
"""Validate that everything in the index can be accessed."""
1317
# just read and parse every node.
1318
self._get_root_node()
1319
if len(self._row_lengths) > 1:
1320
start_node = self._row_offsets[1]
1322
# We shouldn't be reading anything anyway
1324
node_end = self._row_offsets[-1]
1325
for node in self._read_nodes(range(start_node, node_end)):
1330
from bzrlib import _btree_serializer_c as _btree_serializer
1332
from bzrlib import _btree_serializer_py as _btree_serializer