1
# Copyright (C) 2008-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Core compression logic for compressing streams of related files."""
19
from __future__ import absolute_import
24
from ..lazy_import import lazy_import
25
lazy_import(globals(), """
35
from breezy.bzr import (
41
from breezy.i18n import gettext
47
from .btree_index import BTreeBuilder
48
from ..lru_cache import LRUSizeCache
49
from ..sixish import (
55
from .versionedfile import (
59
ChunkedContentFactory,
60
FulltextContentFactory,
61
VersionedFilesWithFallbacks,
64
# Minimum number of uncompressed bytes to try fetch at once when retrieving
65
# groupcompress blocks.
68
# osutils.sha_string(b'')
69
_null_sha1 = b'da39a3ee5e6b4b0d3255bfef95601890afd80709'
72
def sort_gc_optimal(parent_map):
73
"""Sort and group the keys in parent_map into groupcompress order.
75
groupcompress is defined (currently) as reverse-topological order, grouped
78
:return: A sorted-list of keys
80
# groupcompress ordering is approximately reverse topological,
81
# properly grouped by file-id.
83
for key, value in viewitems(parent_map):
84
if isinstance(key, bytes) or len(key) == 1:
89
per_prefix_map[prefix][key] = value
91
per_prefix_map[prefix] = {key: value}
94
for prefix in sorted(per_prefix_map):
95
present_keys.extend(reversed(tsort.topo_sort(per_prefix_map[prefix])))
99
class DecompressCorruption(errors.BzrError):
101
_fmt = "Corruption while decompressing repository file%(orig_error)s"
103
def __init__(self, orig_error=None):
104
if orig_error is not None:
105
self.orig_error = ", %s" % (orig_error,)
108
errors.BzrError.__init__(self)
111
# The max zlib window size is 32kB, so if we set 'max_size' output of the
112
# decompressor to the requested bytes + 32kB, then we should guarantee
113
# num_bytes coming out.
114
_ZLIB_DECOMP_WINDOW = 32 * 1024
117
class GroupCompressBlock(object):
118
"""An object which maintains the internal structure of the compressed data.
120
This tracks the meta info (start of text, length, type, etc.)
123
# Group Compress Block v1 Zlib
124
GCB_HEADER = b'gcb1z\n'
125
# Group Compress Block v1 Lzma
126
GCB_LZ_HEADER = b'gcb1l\n'
127
GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
130
# map by key? or just order in file?
131
self._compressor_name = None
132
self._z_content_chunks = None
133
self._z_content_decompressor = None
134
self._z_content_length = None
135
self._content_length = None
137
self._content_chunks = None
140
# This is the maximum number of bytes this object will reference if
141
# everything is decompressed. However, if we decompress less than
142
# everything... (this would cause some problems for LRUSizeCache)
143
return self._content_length + self._z_content_length
145
def _ensure_content(self, num_bytes=None):
146
"""Make sure that content has been expanded enough.
148
:param num_bytes: Ensure that we have extracted at least num_bytes of
149
content. If None, consume everything
151
if self._content_length is None:
152
raise AssertionError('self._content_length should never be None')
153
if num_bytes is None:
154
num_bytes = self._content_length
155
elif (self._content_length is not None
156
and num_bytes > self._content_length):
157
raise AssertionError(
158
'requested num_bytes (%d) > content length (%d)'
159
% (num_bytes, self._content_length))
160
# Expand the content if required
161
if self._content is None:
162
if self._content_chunks is not None:
163
self._content = b''.join(self._content_chunks)
164
self._content_chunks = None
165
if self._content is None:
166
# We join self._z_content_chunks here, because if we are
167
# decompressing, then it is *very* likely that we have a single
169
if self._z_content_chunks is None:
170
raise AssertionError('No content to decompress')
171
z_content = b''.join(self._z_content_chunks)
174
elif self._compressor_name == 'lzma':
175
# We don't do partial lzma decomp yet
177
self._content = pylzma.decompress(z_content)
178
elif self._compressor_name == 'zlib':
179
# Start a zlib decompressor
180
if num_bytes * 4 > self._content_length * 3:
181
# If we are requesting more that 3/4ths of the content,
182
# just extract the whole thing in a single pass
183
num_bytes = self._content_length
184
self._content = zlib.decompress(z_content)
186
self._z_content_decompressor = zlib.decompressobj()
187
# Seed the decompressor with the uncompressed bytes, so
188
# that the rest of the code is simplified
189
self._content = self._z_content_decompressor.decompress(
190
z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
191
if not self._z_content_decompressor.unconsumed_tail:
192
self._z_content_decompressor = None
194
raise AssertionError('Unknown compressor: %r'
195
% self._compressor_name)
196
# Any bytes remaining to be decompressed will be in the decompressors
199
# Do we have enough bytes already?
200
if len(self._content) >= num_bytes:
202
# If we got this far, and don't have a decompressor, something is wrong
203
if self._z_content_decompressor is None:
204
raise AssertionError(
205
'No decompressor to decompress %d bytes' % num_bytes)
206
remaining_decomp = self._z_content_decompressor.unconsumed_tail
207
if not remaining_decomp:
208
raise AssertionError('Nothing left to decompress')
209
needed_bytes = num_bytes - len(self._content)
210
# We always set max_size to 32kB over the minimum needed, so that
211
# zlib will give us as much as we really want.
212
# TODO: If this isn't good enough, we could make a loop here,
213
# that keeps expanding the request until we get enough
214
self._content += self._z_content_decompressor.decompress(
215
remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
216
if len(self._content) < num_bytes:
217
raise AssertionError('%d bytes wanted, only %d available'
218
% (num_bytes, len(self._content)))
219
if not self._z_content_decompressor.unconsumed_tail:
220
# The stream is finished
221
self._z_content_decompressor = None
223
def _parse_bytes(self, data, pos):
224
"""Read the various lengths from the header.
226
This also populates the various 'compressed' buffers.
228
:return: The position in bytes just after the last newline
230
# At present, we have 2 integers for the compressed and uncompressed
231
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
232
# checking too far, cap the search to 14 bytes.
233
pos2 = data.index(b'\n', pos, pos + 14)
234
self._z_content_length = int(data[pos:pos2])
236
pos2 = data.index(b'\n', pos, pos + 14)
237
self._content_length = int(data[pos:pos2])
239
if len(data) != (pos + self._z_content_length):
240
# XXX: Define some GCCorrupt error ?
241
raise AssertionError('Invalid bytes: (%d) != %d + %d' %
242
(len(data), pos, self._z_content_length))
243
self._z_content_chunks = (data[pos:],)
246
def _z_content(self):
247
"""Return z_content_chunks as a simple string.
249
Meant only to be used by the test suite.
251
if self._z_content_chunks is not None:
252
return b''.join(self._z_content_chunks)
256
def from_bytes(cls, bytes):
259
if header not in cls.GCB_KNOWN_HEADERS:
260
raise ValueError('bytes did not start with any of %r'
261
% (cls.GCB_KNOWN_HEADERS,))
262
if header == cls.GCB_HEADER:
263
out._compressor_name = 'zlib'
264
elif header == cls.GCB_LZ_HEADER:
265
out._compressor_name = 'lzma'
267
raise ValueError('unknown compressor: %r' % (header,))
268
out._parse_bytes(bytes, 6)
271
def extract(self, key, start, end, sha1=None):
272
"""Extract the text for a specific key.
274
:param key: The label used for this content
275
:param sha1: TODO (should we validate only when sha1 is supplied?)
276
:return: The bytes for the content
278
if start == end == 0:
280
self._ensure_content(end)
281
# The bytes are 'f' or 'd' for the type, then a variable-length
282
# base128 integer for the content size, then the actual content
283
# We know that the variable-length integer won't be longer than 5
284
# bytes (it takes 5 bytes to encode 2^32)
285
c = self._content[start:start + 1]
290
raise ValueError('Unknown content control code: %s'
293
content_len, len_len = decode_base128_int(
294
self._content[start + 1:start + 6])
295
content_start = start + 1 + len_len
296
if end != content_start + content_len:
297
raise ValueError('end != len according to field header'
298
' %s != %s' % (end, content_start + content_len))
300
return [self._content[content_start:end]]
301
# Must be type delta as checked above
302
return [apply_delta_to_source(self._content, content_start, end)]
304
def set_chunked_content(self, content_chunks, length):
305
"""Set the content of this block to the given chunks."""
306
# If we have lots of short lines, it is may be more efficient to join
307
# the content ahead of time. If the content is <10MiB, we don't really
308
# care about the extra memory consumption, so we can just pack it and
309
# be done. However, timing showed 18s => 17.9s for repacking 1k revs of
310
# mysql, which is below the noise margin
311
self._content_length = length
312
self._content_chunks = content_chunks
314
self._z_content_chunks = None
316
def set_content(self, content):
317
"""Set the content of this block."""
318
self._content_length = len(content)
319
self._content = content
320
self._z_content_chunks = None
322
def _create_z_content_from_chunks(self, chunks):
323
compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION)
324
# Peak in this point is 1 fulltext, 1 compressed text, + zlib overhead
325
# (measured peak is maybe 30MB over the above...)
326
compressed_chunks = list(map(compressor.compress, chunks))
327
compressed_chunks.append(compressor.flush())
328
# Ignore empty chunks
329
self._z_content_chunks = [c for c in compressed_chunks if c]
330
self._z_content_length = sum(map(len, self._z_content_chunks))
332
def _create_z_content(self):
333
if self._z_content_chunks is not None:
335
if self._content_chunks is not None:
336
chunks = self._content_chunks
338
chunks = (self._content,)
339
self._create_z_content_from_chunks(chunks)
342
"""Create the byte stream as a series of 'chunks'"""
343
self._create_z_content()
344
header = self.GCB_HEADER
345
chunks = [b'%s%d\n%d\n'
346
% (header, self._z_content_length, self._content_length),
348
chunks.extend(self._z_content_chunks)
349
total_len = sum(map(len, chunks))
350
return total_len, chunks
353
"""Encode the information into a byte stream."""
354
total_len, chunks = self.to_chunks()
355
return b''.join(chunks)
357
def _dump(self, include_text=False):
358
"""Take this block, and spit out a human-readable structure.
360
:param include_text: Inserts also include text bits, chose whether you
361
want this displayed in the dump or not.
362
:return: A dump of the given block. The layout is something like:
363
[('f', length), ('d', delta_length, text_length, [delta_info])]
364
delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
367
self._ensure_content()
370
while pos < self._content_length:
371
kind = self._content[pos:pos + 1]
373
if kind not in (b'f', b'd'):
374
raise ValueError('invalid kind character: %r' % (kind,))
375
content_len, len_len = decode_base128_int(
376
self._content[pos:pos + 5])
378
if content_len + pos > self._content_length:
379
raise ValueError('invalid content_len %d for record @ pos %d'
380
% (content_len, pos - len_len - 1))
381
if kind == b'f': # Fulltext
383
text = self._content[pos:pos + content_len]
384
result.append((b'f', content_len, text))
386
result.append((b'f', content_len))
387
elif kind == b'd': # Delta
388
delta_content = self._content[pos:pos + content_len]
390
# The first entry in a delta is the decompressed length
391
decomp_len, delta_pos = decode_base128_int(delta_content)
392
result.append((b'd', content_len, decomp_len, delta_info))
394
while delta_pos < content_len:
395
c = indexbytes(delta_content, delta_pos)
399
delta_pos) = decode_copy_instruction(delta_content, c,
402
text = self._content[offset:offset + length]
403
delta_info.append((b'c', offset, length, text))
405
delta_info.append((b'c', offset, length))
406
measured_len += length
409
txt = delta_content[delta_pos:delta_pos + c]
412
delta_info.append((b'i', c, txt))
415
if delta_pos != content_len:
416
raise ValueError('Delta consumed a bad number of bytes:'
417
' %d != %d' % (delta_pos, content_len))
418
if measured_len != decomp_len:
419
raise ValueError('Delta claimed fulltext was %d bytes, but'
420
' extraction resulted in %d bytes'
421
% (decomp_len, measured_len))
426
class _LazyGroupCompressFactory(object):
427
"""Yield content from a GroupCompressBlock on demand."""
429
def __init__(self, key, parents, manager, start, end, first):
430
"""Create a _LazyGroupCompressFactory
432
:param key: The key of just this record
433
:param parents: The parents of this key (possibly None)
434
:param gc_block: A GroupCompressBlock object
435
:param start: Offset of the first byte for this record in the
437
:param end: Offset of the byte just after the end of this record
438
(ie, bytes = content[start:end])
439
:param first: Is this the first Factory for the given block?
442
self.parents = parents
445
# Note: This attribute coupled with Manager._factories creates a
446
# reference cycle. Perhaps we would rather use a weakref(), or
447
# find an appropriate time to release the ref. After the first
448
# get_bytes_as call? After Manager.get_record_stream() returns
450
self._manager = manager
452
self.storage_kind = 'groupcompress-block'
454
self.storage_kind = 'groupcompress-block-ref'
460
return '%s(%s, first=%s)' % (self.__class__.__name__,
461
self.key, self._first)
463
def _extract_bytes(self):
464
# Grab and cache the raw bytes for this entry
465
# and break the ref-cycle with _manager since we don't need it
468
self._manager._prepare_for_extract()
469
except zlib.error as value:
470
raise DecompressCorruption("zlib: " + str(value))
471
block = self._manager._block
472
self._chunks = block.extract(self.key, self._start, self._end)
473
# There are code paths that first extract as fulltext, and then
474
# extract as storage_kind (smart fetch). So we don't break the
475
# refcycle here, but instead in manager.get_record_stream()
477
def get_bytes_as(self, storage_kind):
478
if storage_kind == self.storage_kind:
480
# wire bytes, something...
481
return self._manager._wire_bytes()
484
if storage_kind in ('fulltext', 'chunked', 'lines'):
485
if self._chunks is None:
486
self._extract_bytes()
487
if storage_kind == 'fulltext':
488
return b''.join(self._chunks)
489
elif storage_kind == 'chunked':
492
return osutils.chunks_to_lines(self._chunks)
493
raise errors.UnavailableRepresentation(self.key, storage_kind,
496
def iter_bytes_as(self, storage_kind):
497
if self._chunks is None:
498
self._extract_bytes()
499
if storage_kind == 'chunked':
500
return iter(self._chunks)
501
elif storage_kind == 'lines':
502
return iter(osutils.chunks_to_lines(self._chunks))
503
raise errors.UnavailableRepresentation(self.key, storage_kind,
507
class _LazyGroupContentManager(object):
508
"""This manages a group of _LazyGroupCompressFactory objects."""
510
_max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of
511
# current size, and still be considered
513
_full_block_size = 4 * 1024 * 1024
514
_full_mixed_block_size = 2 * 1024 * 1024
515
_full_enough_block_size = 3 * 1024 * 1024 # size at which we won't repack
516
_full_enough_mixed_block_size = 2 * 768 * 1024 # 1.5MB
518
def __init__(self, block, get_compressor_settings=None):
520
# We need to preserve the ordering
523
self._get_settings = get_compressor_settings
524
self._compressor_settings = None
526
def _get_compressor_settings(self):
527
if self._compressor_settings is not None:
528
return self._compressor_settings
530
if self._get_settings is not None:
531
settings = self._get_settings()
533
vf = GroupCompressVersionedFiles
534
settings = vf._DEFAULT_COMPRESSOR_SETTINGS
535
self._compressor_settings = settings
536
return self._compressor_settings
538
def add_factory(self, key, parents, start, end):
539
if not self._factories:
543
# Note that this creates a reference cycle....
544
factory = _LazyGroupCompressFactory(key, parents, self,
545
start, end, first=first)
546
# max() works here, but as a function call, doing a compare seems to be
547
# significantly faster, timeit says 250ms for max() and 100ms for the
549
if end > self._last_byte:
550
self._last_byte = end
551
self._factories.append(factory)
553
def get_record_stream(self):
554
"""Get a record for all keys added so far."""
555
for factory in self._factories:
557
# Break the ref-cycle
558
factory._bytes = None
559
factory._manager = None
560
# TODO: Consider setting self._factories = None after the above loop,
561
# as it will break the reference cycle
563
def _trim_block(self, last_byte):
564
"""Create a new GroupCompressBlock, with just some of the content."""
565
# None of the factories need to be adjusted, because the content is
566
# located in an identical place. Just that some of the unreferenced
567
# trailing bytes are stripped
568
trace.mutter('stripping trailing bytes from groupcompress block'
569
' %d => %d', self._block._content_length, last_byte)
570
new_block = GroupCompressBlock()
571
self._block._ensure_content(last_byte)
572
new_block.set_content(self._block._content[:last_byte])
573
self._block = new_block
575
def _make_group_compressor(self):
576
return GroupCompressor(self._get_compressor_settings())
578
def _rebuild_block(self):
579
"""Create a new GroupCompressBlock with only the referenced texts."""
580
compressor = self._make_group_compressor()
582
old_length = self._block._content_length
584
for factory in self._factories:
585
chunks = factory.get_bytes_as('chunked')
586
chunks_len = factory.size
587
if chunks_len is None:
588
chunks_len = sum(map(len, chunks))
589
(found_sha1, start_point, end_point,
590
type) = compressor.compress(
591
factory.key, chunks, chunks_len, factory.sha1)
592
# Now update this factory with the new offsets, etc
593
factory.sha1 = found_sha1
594
factory._start = start_point
595
factory._end = end_point
596
self._last_byte = end_point
597
new_block = compressor.flush()
598
# TODO: Should we check that new_block really *is* smaller than the old
599
# block? It seems hard to come up with a method that it would
600
# expand, since we do full compression again. Perhaps based on a
601
# request that ends up poorly ordered?
602
# TODO: If the content would have expanded, then we would want to
603
# handle a case where we need to split the block.
604
# Now that we have a user-tweakable option
605
# (max_bytes_to_index), it is possible that one person set it
606
# to a very low value, causing poor compression.
607
delta = time.time() - tstart
608
self._block = new_block
609
trace.mutter('creating new compressed block on-the-fly in %.3fs'
610
' %d bytes => %d bytes', delta, old_length,
611
self._block._content_length)
613
def _prepare_for_extract(self):
614
"""A _LazyGroupCompressFactory is about to extract to fulltext."""
615
# We expect that if one child is going to fulltext, all will be. This
616
# helps prevent all of them from extracting a small amount at a time.
617
# Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
618
# time (self._block._content) is a little expensive.
619
self._block._ensure_content(self._last_byte)
621
def _check_rebuild_action(self):
622
"""Check to see if our block should be repacked."""
625
for factory in self._factories:
626
total_bytes_used += factory._end - factory._start
627
if last_byte_used < factory._end:
628
last_byte_used = factory._end
629
# If we are using more than half of the bytes from the block, we have
630
# nothing else to check
631
if total_bytes_used * 2 >= self._block._content_length:
632
return None, last_byte_used, total_bytes_used
633
# We are using less than 50% of the content. Is the content we are
634
# using at the beginning of the block? If so, we can just trim the
635
# tail, rather than rebuilding from scratch.
636
if total_bytes_used * 2 > last_byte_used:
637
return 'trim', last_byte_used, total_bytes_used
639
# We are using a small amount of the data, and it isn't just packed
640
# nicely at the front, so rebuild the content.
641
# Note: This would be *nicer* as a strip-data-from-group, rather than
642
# building it up again from scratch
643
# It might be reasonable to consider the fulltext sizes for
644
# different bits when deciding this, too. As you may have a small
645
# fulltext, and a trivial delta, and you are just trading around
646
# for another fulltext. If we do a simple 'prune' you may end up
647
# expanding many deltas into fulltexts, as well.
648
# If we build a cheap enough 'strip', then we could try a strip,
649
# if that expands the content, we then rebuild.
650
return 'rebuild', last_byte_used, total_bytes_used
652
def check_is_well_utilized(self):
653
"""Is the current block considered 'well utilized'?
655
This heuristic asks if the current block considers itself to be a fully
656
developed group, rather than just a loose collection of data.
658
if len(self._factories) == 1:
659
# A block of length 1 could be improved by combining with other
660
# groups - don't look deeper. Even larger than max size groups
661
# could compress well with adjacent versions of the same thing.
663
action, last_byte_used, total_bytes_used = self._check_rebuild_action()
664
block_size = self._block._content_length
665
if total_bytes_used < block_size * self._max_cut_fraction:
666
# This block wants to trim itself small enough that we want to
667
# consider it under-utilized.
669
# TODO: This code is meant to be the twin of _insert_record_stream's
670
# 'start_new_block' logic. It would probably be better to factor
671
# out that logic into a shared location, so that it stays
673
# We currently assume a block is properly utilized whenever it is >75%
674
# of the size of a 'full' block. In normal operation, a block is
675
# considered full when it hits 4MB of same-file content. So any block
676
# >3MB is 'full enough'.
677
# The only time this isn't true is when a given block has large-object
678
# content. (a single file >4MB, etc.)
679
# Under these circumstances, we allow a block to grow to
680
# 2 x largest_content. Which means that if a given block had a large
681
# object, it may actually be under-utilized. However, given that this
682
# is 'pack-on-the-fly' it is probably reasonable to not repack large
683
# content blobs on-the-fly. Note that because we return False for all
684
# 1-item blobs, we will repack them; we may wish to reevaluate our
685
# treatment of large object blobs in the future.
686
if block_size >= self._full_enough_block_size:
688
# If a block is <3MB, it still may be considered 'full' if it contains
689
# mixed content. The current rule is 2MB of mixed content is considered
690
# full. So check to see if this block contains mixed content, and
691
# set the threshold appropriately.
693
for factory in self._factories:
694
prefix = factory.key[:-1]
695
if common_prefix is None:
696
common_prefix = prefix
697
elif prefix != common_prefix:
698
# Mixed content, check the size appropriately
699
if block_size >= self._full_enough_mixed_block_size:
702
# The content failed both the mixed check and the single-content check
703
# so obviously it is not fully utilized
704
# TODO: there is one other constraint that isn't being checked
705
# namely, that the entries in the block are in the appropriate
706
# order. For example, you could insert the entries in exactly
707
# reverse groupcompress order, and we would think that is ok.
708
# (all the right objects are in one group, and it is fully
709
# utilized, etc.) For now, we assume that case is rare,
710
# especially since we should always fetch in 'groupcompress'
714
def _check_rebuild_block(self):
715
action, last_byte_used, total_bytes_used = self._check_rebuild_action()
719
self._trim_block(last_byte_used)
720
elif action == 'rebuild':
721
self._rebuild_block()
723
raise ValueError('unknown rebuild action: %r' % (action,))
725
def _wire_bytes(self):
726
"""Return a byte stream suitable for transmitting over the wire."""
727
self._check_rebuild_block()
728
# The outer block starts with:
729
# 'groupcompress-block\n'
730
# <length of compressed key info>\n
731
# <length of uncompressed info>\n
732
# <length of gc block>\n
735
lines = [b'groupcompress-block\n']
736
# The minimal info we need is the key, the start offset, and the
737
# parents. The length and type are encoded in the record itself.
738
# However, passing in the other bits makes it easier. The list of
739
# keys, and the start offset, the length
741
# 1 line with parents, '' for ()
742
# 1 line for start offset
743
# 1 line for end byte
745
for factory in self._factories:
746
key_bytes = b'\x00'.join(factory.key)
747
parents = factory.parents
749
parent_bytes = b'None:'
751
parent_bytes = b'\t'.join(b'\x00'.join(key) for key in parents)
752
record_header = b'%s\n%s\n%d\n%d\n' % (
753
key_bytes, parent_bytes, factory._start, factory._end)
754
header_lines.append(record_header)
755
# TODO: Can we break the refcycle at this point and set
756
# factory._manager = None?
757
header_bytes = b''.join(header_lines)
759
header_bytes_len = len(header_bytes)
760
z_header_bytes = zlib.compress(header_bytes)
762
z_header_bytes_len = len(z_header_bytes)
763
block_bytes_len, block_chunks = self._block.to_chunks()
764
lines.append(b'%d\n%d\n%d\n' % (
765
z_header_bytes_len, header_bytes_len, block_bytes_len))
766
lines.append(z_header_bytes)
767
lines.extend(block_chunks)
768
del z_header_bytes, block_chunks
769
# TODO: This is a point where we will double the memory consumption. To
770
# avoid this, we probably have to switch to a 'chunked' api
771
return b''.join(lines)
774
def from_bytes(cls, bytes):
775
# TODO: This does extra string copying, probably better to do it a
776
# different way. At a minimum this creates 2 copies of the
778
(storage_kind, z_header_len, header_len,
779
block_len, rest) = bytes.split(b'\n', 4)
781
if storage_kind != b'groupcompress-block':
782
raise ValueError('Unknown storage kind: %s' % (storage_kind,))
783
z_header_len = int(z_header_len)
784
if len(rest) < z_header_len:
785
raise ValueError('Compressed header len shorter than all bytes')
786
z_header = rest[:z_header_len]
787
header_len = int(header_len)
788
header = zlib.decompress(z_header)
789
if len(header) != header_len:
790
raise ValueError('invalid length for decompressed bytes')
792
block_len = int(block_len)
793
if len(rest) != z_header_len + block_len:
794
raise ValueError('Invalid length for block')
795
block_bytes = rest[z_header_len:]
797
# So now we have a valid GCB, we just need to parse the factories that
799
header_lines = header.split(b'\n')
801
last = header_lines.pop()
803
raise ValueError('header lines did not end with a trailing'
805
if len(header_lines) % 4 != 0:
806
raise ValueError('The header was not an even multiple of 4 lines')
807
block = GroupCompressBlock.from_bytes(block_bytes)
810
for start in range(0, len(header_lines), 4):
812
key = tuple(header_lines[start].split(b'\x00'))
813
parents_line = header_lines[start + 1]
814
if parents_line == b'None:':
817
parents = tuple([tuple(segment.split(b'\x00'))
818
for segment in parents_line.split(b'\t')
820
start_offset = int(header_lines[start + 2])
821
end_offset = int(header_lines[start + 3])
822
result.add_factory(key, parents, start_offset, end_offset)
826
def network_block_to_records(storage_kind, bytes, line_end):
827
if storage_kind != 'groupcompress-block':
828
raise ValueError('Unknown storage kind: %s' % (storage_kind,))
829
manager = _LazyGroupContentManager.from_bytes(bytes)
830
return manager.get_record_stream()
833
class _CommonGroupCompressor(object):
835
def __init__(self, settings=None):
836
"""Create a GroupCompressor."""
841
self.labels_deltas = {}
842
self._delta_index = None # Set by the children
843
self._block = GroupCompressBlock()
847
self._settings = settings
849
def compress(self, key, chunks, length, expected_sha, nostore_sha=None,
851
"""Compress lines with label key.
853
:param key: A key tuple. It is stored in the output
854
for identification of the text during decompression. If the last
855
element is b'None' it is replaced with the sha1 of the text -
857
:param chunks: Chunks of bytes to be compressed
858
:param length: Length of chunks
859
:param expected_sha: If non-None, the sha the lines are believed to
860
have. During compression the sha is calculated; a mismatch will
862
:param nostore_sha: If the computed sha1 sum matches, we will raise
863
ExistingContent rather than adding the text.
864
:param soft: Do a 'soft' compression. This means that we require larger
865
ranges to match to be considered for a copy command.
867
:return: The sha1 of lines, the start and end offsets in the delta, and
868
the type ('fulltext' or 'delta').
870
:seealso VersionedFiles.add_lines:
872
if length == 0: # empty, like a dir entry, etc
873
if nostore_sha == _null_sha1:
874
raise errors.ExistingContent()
875
return _null_sha1, 0, 0, 'fulltext'
876
# we assume someone knew what they were doing when they passed it in
877
if expected_sha is not None:
880
sha1 = osutils.sha_strings(chunks)
881
if nostore_sha is not None:
882
if sha1 == nostore_sha:
883
raise errors.ExistingContent()
885
key = key[:-1] + (b'sha1:' + sha1,)
887
start, end, type = self._compress(key, chunks, length, length / 2, soft)
888
return sha1, start, end, type
890
def _compress(self, key, chunks, input_len, max_delta_size, soft=False):
891
"""Compress lines with label key.
893
:param key: A key tuple. It is stored in the output for identification
894
of the text during decompression.
896
:param chunks: The chunks of bytes to be compressed
898
:param input_len: The length of the chunks
900
:param max_delta_size: The size above which we issue a fulltext instead
903
:param soft: Do a 'soft' compression. This means that we require larger
904
ranges to match to be considered for a copy command.
906
:return: The sha1 of lines, the start and end offsets in the delta, and
907
the type ('fulltext' or 'delta').
909
raise NotImplementedError(self._compress)
911
def extract(self, key):
912
"""Extract a key previously added to the compressor.
914
:param key: The key to extract.
915
:return: An iterable over chunks and the sha1.
917
(start_byte, start_chunk, end_byte,
918
end_chunk) = self.labels_deltas[key]
919
delta_chunks = self.chunks[start_chunk:end_chunk]
920
stored_bytes = b''.join(delta_chunks)
921
kind = stored_bytes[:1]
923
fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
924
data_len = fulltext_len + 1 + offset
925
if data_len != len(stored_bytes):
926
raise ValueError('Index claimed fulltext len, but stored bytes'
928
% (len(stored_bytes), data_len))
929
data = [stored_bytes[offset + 1:]]
932
raise ValueError('Unknown content kind, bytes claim %s' % kind)
933
# XXX: This is inefficient at best
934
source = b''.join(self.chunks[:start_chunk])
935
delta_len, offset = decode_base128_int(stored_bytes[1:10])
936
data_len = delta_len + 1 + offset
937
if data_len != len(stored_bytes):
938
raise ValueError('Index claimed delta len, but stored bytes'
940
% (len(stored_bytes), data_len))
941
data = [apply_delta(source, stored_bytes[offset + 1:])]
942
data_sha1 = osutils.sha_strings(data)
943
return data, data_sha1
946
"""Finish this group, creating a formatted stream.
948
After calling this, the compressor should no longer be used
950
self._block.set_chunked_content(self.chunks, self.endpoint)
952
self._delta_index = None
956
"""Call this if you want to 'revoke' the last compression.
958
After this, the data structures will be rolled back, but you cannot do
961
self._delta_index = None
962
del self.chunks[self._last[0]:]
963
self.endpoint = self._last[1]
967
"""Return the overall compression ratio."""
968
return float(self.input_bytes) / float(self.endpoint)
971
class PythonGroupCompressor(_CommonGroupCompressor):
973
def __init__(self, settings=None):
974
"""Create a GroupCompressor.
976
Used only if the pyrex version is not available.
978
super(PythonGroupCompressor, self).__init__(settings)
979
self._delta_index = LinesDeltaIndex([])
980
# The actual content is managed by LinesDeltaIndex
981
self.chunks = self._delta_index.lines
983
def _compress(self, key, chunks, input_len, max_delta_size, soft=False):
984
"""see _CommonGroupCompressor._compress"""
985
new_lines = osutils.chunks_to_lines(chunks)
986
out_lines, index_lines = self._delta_index.make_delta(
987
new_lines, bytes_length=input_len, soft=soft)
988
delta_length = sum(map(len, out_lines))
989
if delta_length > max_delta_size:
990
# The delta is longer than the fulltext, insert a fulltext
992
out_lines = [b'f', encode_base128_int(input_len)]
993
out_lines.extend(new_lines)
994
index_lines = [False, False]
995
index_lines.extend([True] * len(new_lines))
997
# this is a worthy delta, output it
1000
# Update the delta_length to include those two encoded integers
1001
out_lines[1] = encode_base128_int(delta_length)
1003
start = self.endpoint
1004
chunk_start = len(self.chunks)
1005
self._last = (chunk_start, self.endpoint)
1006
self._delta_index.extend_lines(out_lines, index_lines)
1007
self.endpoint = self._delta_index.endpoint
1008
self.input_bytes += input_len
1009
chunk_end = len(self.chunks)
1010
self.labels_deltas[key] = (start, chunk_start,
1011
self.endpoint, chunk_end)
1012
return start, self.endpoint, type
1015
class PyrexGroupCompressor(_CommonGroupCompressor):
1016
"""Produce a serialised group of compressed texts.
1018
It contains code very similar to SequenceMatcher because of having a similar
1019
task. However some key differences apply:
1021
* there is no junk, we want a minimal edit not a human readable diff.
1022
* we don't filter very common lines (because we don't know where a good
1023
range will start, and after the first text we want to be emitting minmal
1025
* we chain the left side, not the right side
1026
* we incrementally update the adjacency matrix as new lines are provided.
1027
* we look for matches in all of the left side, so the routine which does
1028
the analagous task of find_longest_match does not need to filter on the
1032
def __init__(self, settings=None):
1033
super(PyrexGroupCompressor, self).__init__(settings)
1034
max_bytes_to_index = self._settings.get('max_bytes_to_index', 0)
1035
self._delta_index = DeltaIndex(max_bytes_to_index=max_bytes_to_index)
1037
def _compress(self, key, chunks, input_len, max_delta_size, soft=False):
1038
"""see _CommonGroupCompressor._compress"""
1039
# By having action/label/sha1/len, we can parse the group if the index
1040
# was ever destroyed, we have the key in 'label', we know the final
1041
# bytes are valid from sha1, and we know where to find the end of this
1042
# record because of 'len'. (the delta record itself will store the
1043
# total length for the expanded record)
1044
# 'len: %d\n' costs approximately 1% increase in total data
1045
# Having the labels at all costs us 9-10% increase, 38% increase for
1046
# inventory pages, and 5.8% increase for text pages
1047
# new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
1048
if self._delta_index._source_offset != self.endpoint:
1049
raise AssertionError('_source_offset != endpoint'
1050
' somehow the DeltaIndex got out of sync with'
1051
' the output lines')
1052
bytes = b''.join(chunks)
1053
delta = self._delta_index.make_delta(bytes, max_delta_size)
1056
enc_length = encode_base128_int(input_len)
1057
len_mini_header = 1 + len(enc_length)
1058
self._delta_index.add_source(bytes, len_mini_header)
1059
new_chunks = [b'f', enc_length] + chunks
1062
enc_length = encode_base128_int(len(delta))
1063
len_mini_header = 1 + len(enc_length)
1064
new_chunks = [b'd', enc_length, delta]
1065
self._delta_index.add_delta_source(delta, len_mini_header)
1067
start = self.endpoint
1068
chunk_start = len(self.chunks)
1069
# Now output these bytes
1070
self._output_chunks(new_chunks)
1071
self.input_bytes += input_len
1072
chunk_end = len(self.chunks)
1073
self.labels_deltas[key] = (start, chunk_start,
1074
self.endpoint, chunk_end)
1075
if not self._delta_index._source_offset == self.endpoint:
1076
raise AssertionError('the delta index is out of sync'
1077
'with the output lines %s != %s'
1078
% (self._delta_index._source_offset, self.endpoint))
1079
return start, self.endpoint, type
1081
def _output_chunks(self, new_chunks):
1082
"""Output some chunks.
1084
:param new_chunks: The chunks to output.
1086
self._last = (len(self.chunks), self.endpoint)
1087
endpoint = self.endpoint
1088
self.chunks.extend(new_chunks)
1089
endpoint += sum(map(len, new_chunks))
1090
self.endpoint = endpoint
1093
def make_pack_factory(graph, delta, keylength, inconsistency_fatal=True):
1094
"""Create a factory for creating a pack based groupcompress.
1096
This is only functional enough to run interface tests, it doesn't try to
1097
provide a full pack environment.
1099
:param graph: Store a graph.
1100
:param delta: Delta compress contents.
1101
:param keylength: How long should keys be.
1103
def factory(transport):
1108
graph_index = BTreeBuilder(reference_lists=ref_length,
1109
key_elements=keylength)
1110
stream = transport.open_write_stream('newpack')
1111
writer = pack.ContainerWriter(stream.write)
1113
index = _GCGraphIndex(graph_index, lambda: True, parents=parents,
1114
add_callback=graph_index.add_nodes,
1115
inconsistency_fatal=inconsistency_fatal)
1116
access = pack_repo._DirectPackAccess({})
1117
access.set_writer(writer, graph_index, (transport, 'newpack'))
1118
result = GroupCompressVersionedFiles(index, access, delta)
1119
result.stream = stream
1120
result.writer = writer
1125
def cleanup_pack_group(versioned_files):
1126
versioned_files.writer.end()
1127
versioned_files.stream.close()
1130
class _BatchingBlockFetcher(object):
1131
"""Fetch group compress blocks in batches.
1133
:ivar total_bytes: int of expected number of bytes needed to fetch the
1134
currently pending batch.
1137
def __init__(self, gcvf, locations, get_compressor_settings=None):
1139
self.locations = locations
1141
self.batch_memos = {}
1142
self.memos_to_get = []
1143
self.total_bytes = 0
1144
self.last_read_memo = None
1146
self._get_compressor_settings = get_compressor_settings
1148
def add_key(self, key):
1149
"""Add another to key to fetch.
1151
:return: The estimated number of bytes needed to fetch the batch so
1154
self.keys.append(key)
1155
index_memo, _, _, _ = self.locations[key]
1156
read_memo = index_memo[0:3]
1157
# Three possibilities for this read_memo:
1158
# - it's already part of this batch; or
1159
# - it's not yet part of this batch, but is already cached; or
1160
# - it's not yet part of this batch and will need to be fetched.
1161
if read_memo in self.batch_memos:
1162
# This read memo is already in this batch.
1163
return self.total_bytes
1165
cached_block = self.gcvf._group_cache[read_memo]
1167
# This read memo is new to this batch, and the data isn't cached
1169
self.batch_memos[read_memo] = None
1170
self.memos_to_get.append(read_memo)
1171
byte_length = read_memo[2]
1172
self.total_bytes += byte_length
1174
# This read memo is new to this batch, but cached.
1175
# Keep a reference to the cached block in batch_memos because it's
1176
# certain that we'll use it when this batch is processed, but
1177
# there's a risk that it would fall out of _group_cache between now
1179
self.batch_memos[read_memo] = cached_block
1180
return self.total_bytes
1182
def _flush_manager(self):
1183
if self.manager is not None:
1184
for factory in self.manager.get_record_stream():
1187
self.last_read_memo = None
1189
def yield_factories(self, full_flush=False):
1190
"""Yield factories for keys added since the last yield. They will be
1191
returned in the order they were added via add_key.
1193
:param full_flush: by default, some results may not be returned in case
1194
they can be part of the next batch. If full_flush is True, then
1195
all results are returned.
1197
if self.manager is None and not self.keys:
1199
# Fetch all memos in this batch.
1200
blocks = self.gcvf._get_blocks(self.memos_to_get)
1201
# Turn blocks into factories and yield them.
1202
memos_to_get_stack = list(self.memos_to_get)
1203
memos_to_get_stack.reverse()
1204
for key in self.keys:
1205
index_memo, _, parents, _ = self.locations[key]
1206
read_memo = index_memo[:3]
1207
if self.last_read_memo != read_memo:
1208
# We are starting a new block. If we have a
1209
# manager, we have found everything that fits for
1210
# now, so yield records
1211
for factory in self._flush_manager():
1213
# Now start a new manager.
1214
if memos_to_get_stack and memos_to_get_stack[-1] == read_memo:
1215
# The next block from _get_blocks will be the block we
1217
block_read_memo, block = next(blocks)
1218
if block_read_memo != read_memo:
1219
raise AssertionError(
1220
"block_read_memo out of sync with read_memo"
1221
"(%r != %r)" % (block_read_memo, read_memo))
1222
self.batch_memos[read_memo] = block
1223
memos_to_get_stack.pop()
1225
block = self.batch_memos[read_memo]
1226
self.manager = _LazyGroupContentManager(block,
1227
get_compressor_settings=self._get_compressor_settings)
1228
self.last_read_memo = read_memo
1229
start, end = index_memo[3:5]
1230
self.manager.add_factory(key, parents, start, end)
1232
for factory in self._flush_manager():
1235
self.batch_memos.clear()
1236
del self.memos_to_get[:]
1237
self.total_bytes = 0
1240
class GroupCompressVersionedFiles(VersionedFilesWithFallbacks):
1241
"""A group-compress based VersionedFiles implementation."""
1243
# This controls how the GroupCompress DeltaIndex works. Basically, we
1244
# compute hash pointers into the source blocks (so hash(text) => text).
1245
# However each of these references costs some memory in trade against a
1246
# more accurate match result. For very large files, they either are
1247
# pre-compressed and change in bulk whenever they change, or change in just
1248
# local blocks. Either way, 'improved resolution' is not very helpful,
1249
# versus running out of memory trying to track everything. The default max
1250
# gives 100% sampling of a 1MB file.
1251
_DEFAULT_MAX_BYTES_TO_INDEX = 1024 * 1024
1252
_DEFAULT_COMPRESSOR_SETTINGS = {'max_bytes_to_index':
1253
_DEFAULT_MAX_BYTES_TO_INDEX}
1255
def __init__(self, index, access, delta=True, _unadded_refs=None,
1257
"""Create a GroupCompressVersionedFiles object.
1259
:param index: The index object storing access and graph data.
1260
:param access: The access object storing raw data.
1261
:param delta: Whether to delta compress or just entropy compress.
1262
:param _unadded_refs: private parameter, don't use.
1263
:param _group_cache: private parameter, don't use.
1266
self._access = access
1268
if _unadded_refs is None:
1270
self._unadded_refs = _unadded_refs
1271
if _group_cache is None:
1272
_group_cache = LRUSizeCache(max_size=50 * 1024 * 1024)
1273
self._group_cache = _group_cache
1274
self._immediate_fallback_vfs = []
1275
self._max_bytes_to_index = None
1277
def without_fallbacks(self):
1278
"""Return a clone of this object without any fallbacks configured."""
1279
return GroupCompressVersionedFiles(self._index, self._access,
1280
self._delta, _unadded_refs=dict(
1281
self._unadded_refs),
1282
_group_cache=self._group_cache)
1284
def add_lines(self, key, parents, lines, parent_texts=None,
1285
left_matching_blocks=None, nostore_sha=None, random_id=False,
1286
check_content=True):
1287
"""Add a text to the store.
1289
:param key: The key tuple of the text to add.
1290
:param parents: The parents key tuples of the text to add.
1291
:param lines: A list of lines. Each line must be a bytestring. And all
1292
of them except the last must be terminated with \\n and contain no
1293
other \\n's. The last line may either contain no \\n's or a single
1294
terminating \\n. If the lines list does meet this constraint the
1295
add routine may error or may succeed - but you will be unable to
1296
read the data back accurately. (Checking the lines have been split
1297
correctly is expensive and extremely unlikely to catch bugs so it
1298
is not done at runtime unless check_content is True.)
1299
:param parent_texts: An optional dictionary containing the opaque
1300
representations of some or all of the parents of version_id to
1301
allow delta optimisations. VERY IMPORTANT: the texts must be those
1302
returned by add_lines or data corruption can be caused.
1303
:param left_matching_blocks: a hint about which areas are common
1304
between the text and its left-hand-parent. The format is
1305
the SequenceMatcher.get_matching_blocks format.
1306
:param nostore_sha: Raise ExistingContent and do not add the lines to
1307
the versioned file if the digest of the lines matches this.
1308
:param random_id: If True a random id has been selected rather than
1309
an id determined by some deterministic process such as a converter
1310
from a foreign VCS. When True the backend may choose not to check
1311
for uniqueness of the resulting key within the versioned file, so
1312
this should only be done when the result is expected to be unique
1314
:param check_content: If True, the lines supplied are verified to be
1315
bytestrings that are correctly formed lines.
1316
:return: The text sha1, the number of bytes in the text, and an opaque
1317
representation of the inserted version which can be provided
1318
back to future add_lines calls in the parent_texts dictionary.
1320
self._index._check_write_ok()
1322
self._check_lines_not_unicode(lines)
1323
self._check_lines_are_lines(lines)
1324
return self.add_content(
1325
ChunkedContentFactory(
1326
key, parents, osutils.sha_strings(lines), lines, chunks_are_lines=True),
1327
parent_texts, left_matching_blocks, nostore_sha, random_id)
1329
def add_content(self, factory, parent_texts=None,
1330
left_matching_blocks=None, nostore_sha=None,
1332
"""Add a text to the store.
1334
:param factory: A ContentFactory that can be used to retrieve the key,
1335
parents and contents.
1336
:param parent_texts: An optional dictionary containing the opaque
1337
representations of some or all of the parents of version_id to
1338
allow delta optimisations. VERY IMPORTANT: the texts must be those
1339
returned by add_lines or data corruption can be caused.
1340
:param left_matching_blocks: a hint about which areas are common
1341
between the text and its left-hand-parent. The format is
1342
the SequenceMatcher.get_matching_blocks format.
1343
:param nostore_sha: Raise ExistingContent and do not add the lines to
1344
the versioned file if the digest of the lines matches this.
1345
:param random_id: If True a random id has been selected rather than
1346
an id determined by some deterministic process such as a converter
1347
from a foreign VCS. When True the backend may choose not to check
1348
for uniqueness of the resulting key within the versioned file, so
1349
this should only be done when the result is expected to be unique
1351
:return: The text sha1, the number of bytes in the text, and an opaque
1352
representation of the inserted version which can be provided
1353
back to future add_lines calls in the parent_texts dictionary.
1355
self._index._check_write_ok()
1356
parents = factory.parents
1357
self._check_add(factory.key, random_id)
1359
# The caller might pass None if there is no graph data, but kndx
1360
# indexes can't directly store that, so we give them
1361
# an empty tuple instead.
1363
# double handling for now. Make it work until then.
1364
sha1, length = list(self._insert_record_stream(
1365
[factory], random_id=random_id, nostore_sha=nostore_sha))[0]
1366
return sha1, length, None
1368
def add_fallback_versioned_files(self, a_versioned_files):
1369
"""Add a source of texts for texts not present in this knit.
1371
:param a_versioned_files: A VersionedFiles object.
1373
self._immediate_fallback_vfs.append(a_versioned_files)
1375
def annotate(self, key):
1376
"""See VersionedFiles.annotate."""
1377
ann = annotate.Annotator(self)
1378
return ann.annotate_flat(key)
1380
def get_annotator(self):
1381
return annotate.Annotator(self)
1383
def check(self, progress_bar=None, keys=None):
1384
"""See VersionedFiles.check()."""
1387
for record in self.get_record_stream(keys, 'unordered', True):
1388
for chunk in record.iter_bytes_as('chunked'):
1391
return self.get_record_stream(keys, 'unordered', True)
1393
def clear_cache(self):
1394
"""See VersionedFiles.clear_cache()"""
1395
self._group_cache.clear()
1396
self._index._graph_index.clear_cache()
1397
self._index._int_cache.clear()
1399
def _check_add(self, key, random_id):
1400
"""check that version_id and lines are safe to add."""
1401
version_id = key[-1]
1402
if version_id is not None:
1403
if osutils.contains_whitespace(version_id):
1404
raise errors.InvalidRevisionId(version_id, self)
1405
self.check_not_reserved_id(version_id)
1406
# TODO: If random_id==False and the key is already present, we should
1407
# probably check that the existing content is identical to what is
1408
# being inserted, and otherwise raise an exception. This would make
1409
# the bundle code simpler.
1411
def get_parent_map(self, keys):
1412
"""Get a map of the graph parents of keys.
1414
:param keys: The keys to look up parents for.
1415
:return: A mapping from keys to parents. Absent keys are absent from
1418
return self._get_parent_map_with_sources(keys)[0]
1420
def _get_parent_map_with_sources(self, keys):
1421
"""Get a map of the parents of keys.
1423
:param keys: The keys to look up parents for.
1424
:return: A tuple. The first element is a mapping from keys to parents.
1425
Absent keys are absent from the mapping. The second element is a
1426
list with the locations each key was found in. The first element
1427
is the in-this-knit parents, the second the first fallback source,
1431
sources = [self._index] + self._immediate_fallback_vfs
1434
for source in sources:
1437
new_result = source.get_parent_map(missing)
1438
source_results.append(new_result)
1439
result.update(new_result)
1440
missing.difference_update(set(new_result))
1441
return result, source_results
1443
def _get_blocks(self, read_memos):
1444
"""Get GroupCompressBlocks for the given read_memos.
1446
:returns: a series of (read_memo, block) pairs, in the order they were
1450
for read_memo in read_memos:
1452
block = self._group_cache[read_memo]
1456
cached[read_memo] = block
1458
not_cached_seen = set()
1459
for read_memo in read_memos:
1460
if read_memo in cached:
1461
# Don't fetch what we already have
1463
if read_memo in not_cached_seen:
1464
# Don't try to fetch the same data twice
1466
not_cached.append(read_memo)
1467
not_cached_seen.add(read_memo)
1468
raw_records = self._access.get_raw_records(not_cached)
1469
for read_memo in read_memos:
1471
yield read_memo, cached[read_memo]
1473
# Read the block, and cache it.
1474
zdata = next(raw_records)
1475
block = GroupCompressBlock.from_bytes(zdata)
1476
self._group_cache[read_memo] = block
1477
cached[read_memo] = block
1478
yield read_memo, block
1480
def get_missing_compression_parent_keys(self):
1481
"""Return the keys of missing compression parents.
1483
Missing compression parents occur when a record stream was missing
1484
basis texts, or a index was scanned that had missing basis texts.
1486
# GroupCompress cannot currently reference texts that are not in the
1487
# group, so this is valid for now
1490
def get_record_stream(self, keys, ordering, include_delta_closure):
1491
"""Get a stream of records for keys.
1493
:param keys: The keys to include.
1494
:param ordering: Either 'unordered' or 'topological'. A topologically
1495
sorted stream has compression parents strictly before their
1497
:param include_delta_closure: If True then the closure across any
1498
compression parents will be included (in the opaque data).
1499
:return: An iterator of ContentFactory objects, each of which is only
1500
valid until the iterator is advanced.
1502
# keys might be a generator
1503
orig_keys = list(keys)
1507
if (not self._index.has_graph
1508
and ordering in ('topological', 'groupcompress')):
1509
# Cannot topological order when no graph has been stored.
1510
# but we allow 'as-requested' or 'unordered'
1511
ordering = 'unordered'
1513
remaining_keys = keys
1516
keys = set(remaining_keys)
1517
for content_factory in self._get_remaining_record_stream(keys,
1518
orig_keys, ordering, include_delta_closure):
1519
remaining_keys.discard(content_factory.key)
1520
yield content_factory
1522
except errors.RetryWithNewPacks as e:
1523
self._access.reload_or_raise(e)
1525
def _find_from_fallback(self, missing):
1526
"""Find whatever keys you can from the fallbacks.
1528
:param missing: A set of missing keys. This set will be mutated as keys
1529
are found from a fallback_vfs
1530
:return: (parent_map, key_to_source_map, source_results)
1531
parent_map the overall key => parent_keys
1532
key_to_source_map a dict from {key: source}
1533
source_results a list of (source: keys)
1536
key_to_source_map = {}
1538
for source in self._immediate_fallback_vfs:
1541
source_parents = source.get_parent_map(missing)
1542
parent_map.update(source_parents)
1543
source_parents = list(source_parents)
1544
source_results.append((source, source_parents))
1545
key_to_source_map.update((key, source) for key in source_parents)
1546
missing.difference_update(source_parents)
1547
return parent_map, key_to_source_map, source_results
1549
def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map):
1550
"""Get the (source, [keys]) list.
1552
The returned objects should be in the order defined by 'ordering',
1553
which can weave between different sources.
1555
:param ordering: Must be one of 'topological' or 'groupcompress'
1556
:return: List of [(source, [keys])] tuples, such that all keys are in
1557
the defined order, regardless of source.
1559
if ordering == 'topological':
1560
present_keys = tsort.topo_sort(parent_map)
1562
# ordering == 'groupcompress'
1563
# XXX: This only optimizes for the target ordering. We may need
1564
# to balance that with the time it takes to extract
1565
# ordering, by somehow grouping based on
1566
# locations[key][0:3]
1567
present_keys = sort_gc_optimal(parent_map)
1568
# Now group by source:
1570
current_source = None
1571
for key in present_keys:
1572
source = key_to_source_map.get(key, self)
1573
if source is not current_source:
1574
source_keys.append((source, []))
1575
current_source = source
1576
source_keys[-1][1].append(key)
1579
def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys,
1582
current_source = None
1583
for key in orig_keys:
1584
if key in locations or key in unadded_keys:
1586
elif key in key_to_source_map:
1587
source = key_to_source_map[key]
1590
if source is not current_source:
1591
source_keys.append((source, []))
1592
current_source = source
1593
source_keys[-1][1].append(key)
1596
def _get_io_ordered_source_keys(self, locations, unadded_keys,
1599
# This is the group the bytes are stored in, followed by the
1600
# location in the group
1601
return locations[key][0]
1602
# We don't have an ordering for keys in the in-memory object, but
1603
# lets process the in-memory ones first.
1604
present_keys = list(unadded_keys)
1605
present_keys.extend(sorted(locations, key=get_group))
1606
# Now grab all of the ones from other sources
1607
source_keys = [(self, present_keys)]
1608
source_keys.extend(source_result)
1611
def _get_remaining_record_stream(self, keys, orig_keys, ordering,
1612
include_delta_closure):
1613
"""Get a stream of records for keys.
1615
:param keys: The keys to include.
1616
:param ordering: one of 'unordered', 'topological', 'groupcompress' or
1618
:param include_delta_closure: If True then the closure across any
1619
compression parents will be included (in the opaque data).
1620
:return: An iterator of ContentFactory objects, each of which is only
1621
valid until the iterator is advanced.
1624
locations = self._index.get_build_details(keys)
1625
unadded_keys = set(self._unadded_refs).intersection(keys)
1626
missing = keys.difference(locations)
1627
missing.difference_update(unadded_keys)
1628
(fallback_parent_map, key_to_source_map,
1629
source_result) = self._find_from_fallback(missing)
1630
if ordering in ('topological', 'groupcompress'):
1631
# would be better to not globally sort initially but instead
1632
# start with one key, recurse to its oldest parent, then grab
1633
# everything in the same group, etc.
1634
parent_map = dict((key, details[2]) for key, details in
1635
viewitems(locations))
1636
for key in unadded_keys:
1637
parent_map[key] = self._unadded_refs[key]
1638
parent_map.update(fallback_parent_map)
1639
source_keys = self._get_ordered_source_keys(ordering, parent_map,
1641
elif ordering == 'as-requested':
1642
source_keys = self._get_as_requested_source_keys(orig_keys,
1643
locations, unadded_keys, key_to_source_map)
1645
# We want to yield the keys in a semi-optimal (read-wise) ordering.
1646
# Otherwise we thrash the _group_cache and destroy performance
1647
source_keys = self._get_io_ordered_source_keys(locations,
1648
unadded_keys, source_result)
1650
yield AbsentContentFactory(key)
1651
# Batch up as many keys as we can until either:
1652
# - we encounter an unadded ref, or
1653
# - we run out of keys, or
1654
# - the total bytes to retrieve for this batch > BATCH_SIZE
1655
batcher = _BatchingBlockFetcher(self, locations,
1656
get_compressor_settings=self._get_compressor_settings)
1657
for source, keys in source_keys:
1660
if key in self._unadded_refs:
1661
# Flush batch, then yield unadded ref from
1663
for factory in batcher.yield_factories(full_flush=True):
1665
chunks, sha1 = self._compressor.extract(key)
1666
parents = self._unadded_refs[key]
1667
yield ChunkedContentFactory(key, parents, sha1, chunks)
1669
if batcher.add_key(key) > BATCH_SIZE:
1670
# Ok, this batch is big enough. Yield some results.
1671
for factory in batcher.yield_factories():
1674
for factory in batcher.yield_factories(full_flush=True):
1676
for record in source.get_record_stream(keys, ordering,
1677
include_delta_closure):
1679
for factory in batcher.yield_factories(full_flush=True):
1682
def get_sha1s(self, keys):
1683
"""See VersionedFiles.get_sha1s()."""
1685
for record in self.get_record_stream(keys, 'unordered', True):
1686
if record.sha1 is not None:
1687
result[record.key] = record.sha1
1689
if record.storage_kind != 'absent':
1690
result[record.key] = osutils.sha_strings(
1691
record.iter_bytes_as('chunked'))
1694
def insert_record_stream(self, stream):
1695
"""Insert a record stream into this container.
1697
:param stream: A stream of records to insert.
1699
:seealso VersionedFiles.get_record_stream:
1701
# XXX: Setting random_id=True makes
1702
# test_insert_record_stream_existing_keys fail for groupcompress and
1703
# groupcompress-nograph, this needs to be revisited while addressing
1704
# 'bzr branch' performance issues.
1705
for _, _ in self._insert_record_stream(stream, random_id=False):
1708
def _get_compressor_settings(self):
1709
if self._max_bytes_to_index is None:
1710
# TODO: VersionedFiles don't know about their containing
1711
# repository, so they don't have much of an idea about their
1712
# location. So for now, this is only a global option.
1713
c = config.GlobalConfig()
1714
val = c.get_user_option('bzr.groupcompress.max_bytes_to_index')
1718
except ValueError as e:
1719
trace.warning('Value for '
1720
'"bzr.groupcompress.max_bytes_to_index"'
1721
' %r is not an integer'
1725
val = self._DEFAULT_MAX_BYTES_TO_INDEX
1726
self._max_bytes_to_index = val
1727
return {'max_bytes_to_index': self._max_bytes_to_index}
1729
def _make_group_compressor(self):
1730
return GroupCompressor(self._get_compressor_settings())
1732
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
1734
"""Internal core to insert a record stream into this container.
1736
This helper function has a different interface than insert_record_stream
1737
to allow add_lines to be minimal, but still return the needed data.
1739
:param stream: A stream of records to insert.
1740
:param nostore_sha: If the sha1 of a given text matches nostore_sha,
1741
raise ExistingContent, rather than committing the new text.
1742
:param reuse_blocks: If the source is streaming from
1743
groupcompress-blocks, just insert the blocks as-is, rather than
1744
expanding the texts and inserting again.
1745
:return: An iterator over (sha1, length) of the inserted records.
1746
:seealso insert_record_stream:
1751
def get_adapter(adapter_key):
1753
return adapters[adapter_key]
1755
adapter_factory = adapter_registry.get(adapter_key)
1756
adapter = adapter_factory(self)
1757
adapters[adapter_key] = adapter
1759
# This will go up to fulltexts for gc to gc fetching, which isn't
1761
self._compressor = self._make_group_compressor()
1762
self._unadded_refs = {}
1766
bytes_len, chunks = self._compressor.flush().to_chunks()
1767
self._compressor = self._make_group_compressor()
1768
# Note: At this point we still have 1 copy of the fulltext (in
1769
# record and the var 'bytes'), and this generates 2 copies of
1770
# the compressed text (one for bytes, one in chunks)
1771
# TODO: Figure out how to indicate that we would be happy to free
1772
# the fulltext content at this point. Note that sometimes we
1773
# will want it later (streaming CHK pages), but most of the
1774
# time we won't (everything else)
1775
index, start, length = self._access.add_raw_record(
1776
None, bytes_len, chunks)
1778
for key, reads, refs in keys_to_add:
1779
nodes.append((key, b"%d %d %s" % (start, length, reads), refs))
1780
self._index.add_records(nodes, random_id=random_id)
1781
self._unadded_refs = {}
1785
max_fulltext_len = 0
1786
max_fulltext_prefix = None
1787
insert_manager = None
1790
# XXX: TODO: remove this, it is just for safety checking for now
1791
inserted_keys = set()
1792
reuse_this_block = reuse_blocks
1793
for record in stream:
1794
# Raise an error when a record is missing.
1795
if record.storage_kind == 'absent':
1796
raise errors.RevisionNotPresent(record.key, self)
1798
if record.key in inserted_keys:
1799
trace.note(gettext('Insert claimed random_id=True,'
1800
' but then inserted %r two times'), record.key)
1802
inserted_keys.add(record.key)
1804
# If the reuse_blocks flag is set, check to see if we can just
1805
# copy a groupcompress block as-is.
1806
# We only check on the first record (groupcompress-block) not
1807
# on all of the (groupcompress-block-ref) entries.
1808
# The reuse_this_block flag is then kept for as long as
1809
if record.storage_kind == 'groupcompress-block':
1810
# Check to see if we really want to re-use this block
1811
insert_manager = record._manager
1812
reuse_this_block = insert_manager.check_is_well_utilized()
1814
reuse_this_block = False
1815
if reuse_this_block:
1816
# We still want to reuse this block
1817
if record.storage_kind == 'groupcompress-block':
1818
# Insert the raw block into the target repo
1819
insert_manager = record._manager
1820
bytes_len, chunks = record._manager._block.to_chunks()
1821
_, start, length = self._access.add_raw_record(
1822
None, bytes_len, chunks)
1824
block_length = length
1825
if record.storage_kind in ('groupcompress-block',
1826
'groupcompress-block-ref'):
1827
if insert_manager is None:
1828
raise AssertionError('No insert_manager set')
1829
if insert_manager is not record._manager:
1830
raise AssertionError('insert_manager does not match'
1831
' the current record, we cannot be positive'
1832
' that the appropriate content was inserted.'
1834
value = b"%d %d %d %d" % (block_start, block_length,
1835
record._start, record._end)
1836
nodes = [(record.key, value, (record.parents,))]
1837
# TODO: Consider buffering up many nodes to be added, not
1838
# sure how much overhead this has, but we're seeing
1839
# ~23s / 120s in add_records calls
1840
self._index.add_records(nodes, random_id=random_id)
1843
chunks = record.get_bytes_as('chunked')
1844
except errors.UnavailableRepresentation:
1845
adapter_key = record.storage_kind, 'chunked'
1846
adapter = get_adapter(adapter_key)
1847
chunks = adapter.get_bytes(record, 'chunked')
1848
chunks_len = record.size
1849
if chunks_len is None:
1850
chunks_len = sum(map(len, chunks))
1851
if len(record.key) > 1:
1852
prefix = record.key[0]
1853
soft = (prefix == last_prefix)
1857
if max_fulltext_len < chunks_len:
1858
max_fulltext_len = chunks_len
1859
max_fulltext_prefix = prefix
1860
(found_sha1, start_point, end_point,
1861
type) = self._compressor.compress(
1862
record.key, chunks, chunks_len, record.sha1, soft=soft,
1863
nostore_sha=nostore_sha)
1864
# delta_ratio = float(chunks_len) / (end_point - start_point)
1865
# Check if we want to continue to include that text
1866
if (prefix == max_fulltext_prefix
1867
and end_point < 2 * max_fulltext_len):
1868
# As long as we are on the same file_id, we will fill at least
1869
# 2 * max_fulltext_len
1870
start_new_block = False
1871
elif end_point > 4 * 1024 * 1024:
1872
start_new_block = True
1873
elif (prefix is not None and prefix != last_prefix
1874
and end_point > 2 * 1024 * 1024):
1875
start_new_block = True
1877
start_new_block = False
1878
last_prefix = prefix
1880
self._compressor.pop_last()
1882
max_fulltext_len = chunks_len
1883
(found_sha1, start_point, end_point,
1884
type) = self._compressor.compress(
1885
record.key, chunks, chunks_len, record.sha1)
1886
if record.key[-1] is None:
1887
key = record.key[:-1] + (b'sha1:' + found_sha1,)
1890
self._unadded_refs[key] = record.parents
1891
yield found_sha1, chunks_len
1892
as_st = static_tuple.StaticTuple.from_sequence
1893
if record.parents is not None:
1894
parents = as_st([as_st(p) for p in record.parents])
1897
refs = static_tuple.StaticTuple(parents)
1899
(key, b'%d %d' % (start_point, end_point), refs))
1900
if len(keys_to_add):
1902
self._compressor = None
1904
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1905
"""Iterate over the lines in the versioned files from keys.
1907
This may return lines from other keys. Each item the returned
1908
iterator yields is a tuple of a line and a text version that that line
1909
is present in (not introduced in).
1911
Ordering of results is in whatever order is most suitable for the
1912
underlying storage format.
1914
If a progress bar is supplied, it may be used to indicate progress.
1915
The caller is responsible for cleaning up progress bars (because this
1919
* Lines are normalised by the underlying store: they will all have \n
1921
* Lines are returned in arbitrary order.
1923
:return: An iterator over (line, key).
1927
# we don't care about inclusions, the caller cares.
1928
# but we need to setup a list of records to visit.
1929
# we need key, position, length
1930
for key_idx, record in enumerate(self.get_record_stream(keys,
1931
'unordered', True)):
1932
# XXX: todo - optimise to use less than full texts.
1935
pb.update('Walking content', key_idx, total)
1936
if record.storage_kind == 'absent':
1937
raise errors.RevisionNotPresent(key, self)
1938
for line in record.iter_bytes_as('lines'):
1941
pb.update('Walking content', total, total)
1944
"""See VersionedFiles.keys."""
1945
if 'evil' in debug.debug_flags:
1946
trace.mutter_callsite(2, "keys scales with size of history")
1947
sources = [self._index] + self._immediate_fallback_vfs
1949
for source in sources:
1950
result.update(source.keys())
1954
class _GCBuildDetails(object):
1955
"""A blob of data about the build details.
1957
This stores the minimal data, which then allows compatibility with the old
1958
api, without taking as much memory.
1961
__slots__ = ('_index', '_group_start', '_group_end', '_basis_end',
1962
'_delta_end', '_parents')
1965
compression_parent = None
1967
def __init__(self, parents, position_info):
1968
self._parents = parents
1969
(self._index, self._group_start, self._group_end, self._basis_end,
1970
self._delta_end) = position_info
1973
return '%s(%s, %s)' % (self.__class__.__name__,
1974
self.index_memo, self._parents)
1977
def index_memo(self):
1978
return (self._index, self._group_start, self._group_end,
1979
self._basis_end, self._delta_end)
1982
def record_details(self):
1983
return static_tuple.StaticTuple(self.method, None)
1985
def __getitem__(self, offset):
1986
"""Compatibility thunk to act like a tuple."""
1988
return self.index_memo
1990
return self.compression_parent # Always None
1992
return self._parents
1994
return self.record_details
1996
raise IndexError('offset out of range')
2002
class _GCGraphIndex(object):
2003
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
2005
def __init__(self, graph_index, is_locked, parents=True,
2006
add_callback=None, track_external_parent_refs=False,
2007
inconsistency_fatal=True, track_new_keys=False):
2008
"""Construct a _GCGraphIndex on a graph_index.
2010
:param graph_index: An implementation of breezy.index.GraphIndex.
2011
:param is_locked: A callback, returns True if the index is locked and
2013
:param parents: If True, record knits parents, if not do not record
2015
:param add_callback: If not None, allow additions to the index and call
2016
this callback with a list of added GraphIndex nodes:
2017
[(node, value, node_refs), ...]
2018
:param track_external_parent_refs: As keys are added, keep track of the
2019
keys they reference, so that we can query get_missing_parents(),
2021
:param inconsistency_fatal: When asked to add records that are already
2022
present, and the details are inconsistent with the existing
2023
record, raise an exception instead of warning (and skipping the
2026
self._add_callback = add_callback
2027
self._graph_index = graph_index
2028
self._parents = parents
2029
self.has_graph = parents
2030
self._is_locked = is_locked
2031
self._inconsistency_fatal = inconsistency_fatal
2032
# GroupCompress records tend to have the same 'group' start + offset
2033
# repeated over and over, this creates a surplus of ints
2034
self._int_cache = {}
2035
if track_external_parent_refs:
2036
self._key_dependencies = _KeyRefs(
2037
track_new_keys=track_new_keys)
2039
self._key_dependencies = None
2041
def add_records(self, records, random_id=False):
2042
"""Add multiple records to the index.
2044
This function does not insert data into the Immutable GraphIndex
2045
backing the KnitGraphIndex, instead it prepares data for insertion by
2046
the caller and checks that it is safe to insert then calls
2047
self._add_callback with the prepared GraphIndex nodes.
2049
:param records: a list of tuples:
2050
(key, options, access_memo, parents).
2051
:param random_id: If True the ids being added were randomly generated
2052
and no check for existence will be performed.
2054
if not self._add_callback:
2055
raise errors.ReadOnlyError(self)
2056
# we hope there are no repositories with inconsistent parentage
2061
for (key, value, refs) in records:
2062
if not self._parents:
2066
raise knit.KnitCorrupt(self,
2067
"attempt to add node with parents "
2068
"in parentless index.")
2071
keys[key] = (value, refs)
2074
present_nodes = self._get_entries(keys)
2075
for (index, key, value, node_refs) in present_nodes:
2076
# Sometimes these are passed as a list rather than a tuple
2077
node_refs = static_tuple.as_tuples(node_refs)
2078
passed = static_tuple.as_tuples(keys[key])
2079
if node_refs != passed[1]:
2080
details = '%s %s %s' % (key, (value, node_refs), passed)
2081
if self._inconsistency_fatal:
2082
raise knit.KnitCorrupt(self, "inconsistent details"
2083
" in add_records: %s" %
2086
trace.warning("inconsistent details in skipped"
2087
" record: %s", details)
2093
for key, (value, node_refs) in viewitems(keys):
2094
result.append((key, value, node_refs))
2096
for key, (value, node_refs) in viewitems(keys):
2097
result.append((key, value))
2099
key_dependencies = self._key_dependencies
2100
if key_dependencies is not None:
2102
for key, value, refs in records:
2104
key_dependencies.add_references(key, parents)
2106
for key, value, refs in records:
2107
new_keys.add_key(key)
2108
self._add_callback(records)
2110
def _check_read(self):
2111
"""Raise an exception if reads are not permitted."""
2112
if not self._is_locked():
2113
raise errors.ObjectNotLocked(self)
2115
def _check_write_ok(self):
2116
"""Raise an exception if writes are not permitted."""
2117
if not self._is_locked():
2118
raise errors.ObjectNotLocked(self)
2120
def _get_entries(self, keys, check_present=False):
2121
"""Get the entries for keys.
2123
Note: Callers are responsible for checking that the index is locked
2124
before calling this method.
2126
:param keys: An iterable of index key tuples.
2131
for node in self._graph_index.iter_entries(keys):
2133
found_keys.add(node[1])
2135
# adapt parentless index to the rest of the code.
2136
for node in self._graph_index.iter_entries(keys):
2137
yield node[0], node[1], node[2], ()
2138
found_keys.add(node[1])
2140
missing_keys = keys.difference(found_keys)
2142
raise errors.RevisionNotPresent(missing_keys.pop(), self)
2144
def find_ancestry(self, keys):
2145
"""See CombinedGraphIndex.find_ancestry"""
2146
return self._graph_index.find_ancestry(keys, 0)
2148
def get_parent_map(self, keys):
2149
"""Get a map of the parents of keys.
2151
:param keys: The keys to look up parents for.
2152
:return: A mapping from keys to parents. Absent keys are absent from
2156
nodes = self._get_entries(keys)
2160
result[node[1]] = node[3][0]
2163
result[node[1]] = None
2166
def get_missing_parents(self):
2167
"""Return the keys of missing parents."""
2168
# Copied from _KnitGraphIndex.get_missing_parents
2169
# We may have false positives, so filter those out.
2170
self._key_dependencies.satisfy_refs_for_keys(
2171
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
2172
return frozenset(self._key_dependencies.get_unsatisfied_refs())
2174
def get_build_details(self, keys):
2175
"""Get the various build details for keys.
2177
Ghosts are omitted from the result.
2179
:param keys: An iterable of keys.
2180
:return: A dict of key:
2181
(index_memo, compression_parent, parents, record_details).
2183
* index_memo: opaque structure to pass to read_records to extract
2185
* compression_parent: Content that this record is built upon, may
2187
* parents: Logical parents of this node
2188
* record_details: extra information about the content which needs
2189
to be passed to Factory.parse_record
2193
entries = self._get_entries(keys)
2194
for entry in entries:
2196
if not self._parents:
2199
parents = entry[3][0]
2200
details = _GCBuildDetails(parents, self._node_to_position(entry))
2201
result[key] = details
2205
"""Get all the keys in the collection.
2207
The keys are not ordered.
2210
return [node[1] for node in self._graph_index.iter_all_entries()]
2212
def _node_to_position(self, node):
2213
"""Convert an index value to position details."""
2214
bits = node[2].split(b' ')
2215
# It would be nice not to read the entire gzip.
2216
# start and stop are put into _int_cache because they are very common.
2217
# They define the 'group' that an entry is in, and many groups can have
2218
# thousands of objects.
2219
# Branching Launchpad, for example, saves ~600k integers, at 12 bytes
2220
# each, or about 7MB. Note that it might be even more when you consider
2221
# how PyInt is allocated in separate slabs. And you can't return a slab
2222
# to the OS if even 1 int on it is in use. Note though that Python uses
2223
# a LIFO when re-using PyInt slots, which might cause more
2225
start = int(bits[0])
2226
start = self._int_cache.setdefault(start, start)
2228
stop = self._int_cache.setdefault(stop, stop)
2229
basis_end = int(bits[2])
2230
delta_end = int(bits[3])
2231
# We can't use StaticTuple here, because node[0] is a BTreeGraphIndex
2233
return (node[0], start, stop, basis_end, delta_end)
2235
def scan_unvalidated_index(self, graph_index):
2236
"""Inform this _GCGraphIndex that there is an unvalidated index.
2238
This allows this _GCGraphIndex to keep track of any missing
2239
compression parents we may want to have filled in to make those
2240
indices valid. It also allows _GCGraphIndex to track any new keys.
2242
:param graph_index: A GraphIndex
2244
key_dependencies = self._key_dependencies
2245
if key_dependencies is None:
2247
for node in graph_index.iter_all_entries():
2248
# Add parent refs from graph_index (and discard parent refs
2249
# that the graph_index has).
2250
key_dependencies.add_references(node[1], node[3][0])
2253
from ._groupcompress_py import (
2255
apply_delta_to_source,
2258
decode_copy_instruction,
2262
from ._groupcompress_pyx import (
2264
apply_delta_to_source,
2269
GroupCompressor = PyrexGroupCompressor
2270
except ImportError as e:
2271
osutils.failed_to_load_extension(e)
2272
GroupCompressor = PythonGroupCompressor