17
17
"""Core compression logic for compressing streams of related files."""
19
from __future__ import absolute_import
24
from ..lazy_import import lazy_import
25
lazy_import(globals(), """
37
from bzrlib.btree_index import BTreeBuilder
38
from bzrlib.lru_cache import LRUSizeCache
39
from bzrlib.tsort import topo_sort
40
from bzrlib.versionedfile import (
35
from breezy.bzr import (
41
from breezy.i18n import gettext
47
from .btree_index import BTreeBuilder
48
from ..lru_cache import LRUSizeCache
49
from ..sixish import (
55
from .versionedfile import (
42
58
AbsentContentFactory,
43
59
ChunkedContentFactory,
44
60
FulltextContentFactory,
61
VersionedFilesWithFallbacks,
48
64
# Minimum number of uncompressed bytes to try fetch at once when retrieving
49
65
# groupcompress blocks.
52
_USE_LZMA = False and (pylzma is not None)
68
# osutils.sha_string(b'')
69
_null_sha1 = b'da39a3ee5e6b4b0d3255bfef95601890afd80709'
54
# osutils.sha_string('')
55
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
57
72
def sort_gc_optimal(parent_map):
58
73
"""Sort and group the keys in parent_map into groupcompress order.
79
94
for prefix in sorted(per_prefix_map):
80
present_keys.extend(reversed(topo_sort(per_prefix_map[prefix])))
95
present_keys.extend(reversed(tsort.topo_sort(per_prefix_map[prefix])))
81
96
return present_keys
99
class DecompressCorruption(errors.BzrError):
101
_fmt = "Corruption while decompressing repository file%(orig_error)s"
103
def __init__(self, orig_error=None):
104
if orig_error is not None:
105
self.orig_error = ", %s" % (orig_error,)
108
errors.BzrError.__init__(self)
84
111
# The max zlib window size is 32kB, so if we set 'max_size' output of the
85
112
# decompressor to the requested bytes + 32kB, then we should guarantee
86
113
# num_bytes coming out.
87
_ZLIB_DECOMP_WINDOW = 32*1024
114
_ZLIB_DECOMP_WINDOW = 32 * 1024
89
117
class GroupCompressBlock(object):
90
118
"""An object which maintains the internal structure of the compressed data.
132
160
# Expand the content if required
133
161
if self._content is None:
134
162
if self._content_chunks is not None:
135
self._content = ''.join(self._content_chunks)
163
self._content = b''.join(self._content_chunks)
136
164
self._content_chunks = None
137
165
if self._content is None:
138
if self._z_content is None:
166
# We join self._z_content_chunks here, because if we are
167
# decompressing, then it is *very* likely that we have a single
169
if self._z_content_chunks is None:
139
170
raise AssertionError('No content to decompress')
140
if self._z_content == '':
171
z_content = b''.join(self._z_content_chunks)
142
174
elif self._compressor_name == 'lzma':
143
175
# We don't do partial lzma decomp yet
144
self._content = pylzma.decompress(self._z_content)
177
self._content = pylzma.decompress(z_content)
145
178
elif self._compressor_name == 'zlib':
146
179
# Start a zlib decompressor
147
180
if num_bytes * 4 > self._content_length * 3:
148
181
# If we are requesting more that 3/4ths of the content,
149
182
# just extract the whole thing in a single pass
150
183
num_bytes = self._content_length
151
self._content = zlib.decompress(self._z_content)
184
self._content = zlib.decompress(z_content)
153
186
self._z_content_decompressor = zlib.decompressobj()
154
187
# Seed the decompressor with the uncompressed bytes, so
155
188
# that the rest of the code is simplified
156
189
self._content = self._z_content_decompressor.decompress(
157
self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
190
z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
158
191
if not self._z_content_decompressor.unconsumed_tail:
159
192
self._z_content_decompressor = None
197
230
# At present, we have 2 integers for the compressed and uncompressed
198
231
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
199
232
# checking too far, cap the search to 14 bytes.
200
pos2 = bytes.index('\n', pos, pos + 14)
201
self._z_content_length = int(bytes[pos:pos2])
203
pos2 = bytes.index('\n', pos, pos + 14)
204
self._content_length = int(bytes[pos:pos2])
206
if len(bytes) != (pos + self._z_content_length):
233
pos2 = data.index(b'\n', pos, pos + 14)
234
self._z_content_length = int(data[pos:pos2])
236
pos2 = data.index(b'\n', pos, pos + 14)
237
self._content_length = int(data[pos:pos2])
239
if len(data) != (pos + self._z_content_length):
207
240
# XXX: Define some GCCorrupt error ?
208
241
raise AssertionError('Invalid bytes: (%d) != %d + %d' %
209
(len(bytes), pos, self._z_content_length))
210
self._z_content = bytes[pos:]
242
(len(data), pos, self._z_content_length))
243
self._z_content_chunks = (data[pos:],)
246
def _z_content(self):
247
"""Return z_content_chunks as a simple string.
249
Meant only to be used by the test suite.
251
if self._z_content_chunks is not None:
252
return b''.join(self._z_content_chunks)
213
256
def from_bytes(cls, bytes):
215
if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
259
if header not in cls.GCB_KNOWN_HEADERS:
216
260
raise ValueError('bytes did not start with any of %r'
217
261
% (cls.GCB_KNOWN_HEADERS,))
218
# XXX: why not testing the whole header ?
262
if header == cls.GCB_HEADER:
220
263
out._compressor_name = 'zlib'
221
elif bytes[4] == 'l':
264
elif header == cls.GCB_LZ_HEADER:
222
265
out._compressor_name = 'lzma'
224
raise ValueError('unknown compressor: %r' % (bytes,))
267
raise ValueError('unknown compressor: %r' % (header,))
225
268
out._parse_bytes(bytes, 6)
233
276
:return: The bytes for the content
235
278
if start == end == 0:
237
280
self._ensure_content(end)
238
281
# The bytes are 'f' or 'd' for the type, then a variable-length
239
282
# base128 integer for the content size, then the actual content
240
283
# We know that the variable-length integer won't be longer than 5
241
284
# bytes (it takes 5 bytes to encode 2^32)
242
c = self._content[start]
285
c = self._content[start:start + 1]
244
287
type = 'fulltext'
247
290
raise ValueError('Unknown content control code: %s'
250
293
content_len, len_len = decode_base128_int(
251
self._content[start + 1:start + 6])
294
self._content[start + 1:start + 6])
252
295
content_start = start + 1 + len_len
253
296
if end != content_start + content_len:
254
297
raise ValueError('end != len according to field header'
255
' %s != %s' % (end, content_start + content_len))
257
bytes = self._content[content_start:end]
259
bytes = apply_delta_to_source(self._content, content_start, end)
298
' %s != %s' % (end, content_start + content_len))
300
return self._content[content_start:end]
301
# Must be type delta as checked above
302
return apply_delta_to_source(self._content, content_start, end)
262
304
def set_chunked_content(self, content_chunks, length):
263
305
"""Set the content of this block to the given chunks."""
269
311
self._content_length = length
270
312
self._content_chunks = content_chunks
271
313
self._content = None
272
self._z_content = None
314
self._z_content_chunks = None
274
316
def set_content(self, content):
275
317
"""Set the content of this block."""
276
318
self._content_length = len(content)
277
319
self._content = content
278
self._z_content = None
280
def _create_z_content_using_lzma(self):
281
if self._content_chunks is not None:
282
self._content = ''.join(self._content_chunks)
283
self._content_chunks = None
284
if self._content is None:
285
raise AssertionError('Nothing to compress')
286
self._z_content = pylzma.compress(self._content)
287
self._z_content_length = len(self._z_content)
289
def _create_z_content_from_chunks(self):
320
self._z_content_chunks = None
322
def _create_z_content_from_chunks(self, chunks):
290
323
compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION)
291
compressed_chunks = map(compressor.compress, self._content_chunks)
324
# Peak in this point is 1 fulltext, 1 compressed text, + zlib overhead
325
# (measured peak is maybe 30MB over the above...)
326
compressed_chunks = list(map(compressor.compress, chunks))
292
327
compressed_chunks.append(compressor.flush())
293
self._z_content = ''.join(compressed_chunks)
294
self._z_content_length = len(self._z_content)
328
# Ignore empty chunks
329
self._z_content_chunks = [c for c in compressed_chunks if c]
330
self._z_content_length = sum(map(len, self._z_content_chunks))
296
332
def _create_z_content(self):
297
if self._z_content is not None:
300
self._create_z_content_using_lzma()
333
if self._z_content_chunks is not None:
302
335
if self._content_chunks is not None:
303
self._create_z_content_from_chunks()
305
self._z_content = zlib.compress(self._content)
306
self._z_content_length = len(self._z_content)
336
chunks = self._content_chunks
338
chunks = (self._content,)
339
self._create_z_content_from_chunks(chunks)
342
"""Create the byte stream as a series of 'chunks'"""
343
self._create_z_content()
344
header = self.GCB_HEADER
345
chunks = [b'%s%d\n%d\n'
346
% (header, self._z_content_length, self._content_length),
348
chunks.extend(self._z_content_chunks)
349
total_len = sum(map(len, chunks))
350
return total_len, chunks
308
352
def to_bytes(self):
309
353
"""Encode the information into a byte stream."""
310
self._create_z_content()
312
header = self.GCB_LZ_HEADER
314
header = self.GCB_HEADER
316
'%d\n%d\n' % (self._z_content_length, self._content_length),
319
return ''.join(chunks)
354
total_len, chunks = self.to_chunks()
355
return b''.join(chunks)
321
357
def _dump(self, include_text=False):
322
358
"""Take this block, and spit out a human-readable structure.
334
370
while pos < self._content_length:
335
kind = self._content[pos]
371
kind = self._content[pos:pos + 1]
337
if kind not in ('f', 'd'):
373
if kind not in (b'f', b'd'):
338
374
raise ValueError('invalid kind character: %r' % (kind,))
339
375
content_len, len_len = decode_base128_int(
340
self._content[pos:pos + 5])
376
self._content[pos:pos + 5])
342
378
if content_len + pos > self._content_length:
343
379
raise ValueError('invalid content_len %d for record @ pos %d'
344
380
% (content_len, pos - len_len - 1))
345
if kind == 'f': # Fulltext
381
if kind == b'f': # Fulltext
347
text = self._content[pos:pos+content_len]
348
result.append(('f', content_len, text))
383
text = self._content[pos:pos + content_len]
384
result.append((b'f', content_len, text))
350
result.append(('f', content_len))
351
elif kind == 'd': # Delta
352
delta_content = self._content[pos:pos+content_len]
386
result.append((b'f', content_len))
387
elif kind == b'd': # Delta
388
delta_content = self._content[pos:pos + content_len]
354
390
# The first entry in a delta is the decompressed length
355
391
decomp_len, delta_pos = decode_base128_int(delta_content)
356
result.append(('d', content_len, decomp_len, delta_info))
392
result.append((b'd', content_len, decomp_len, delta_info))
358
394
while delta_pos < content_len:
359
c = ord(delta_content[delta_pos])
395
c = indexbytes(delta_content, delta_pos)
363
399
delta_pos) = decode_copy_instruction(delta_content, c,
366
text = self._content[offset:offset+length]
367
delta_info.append(('c', offset, length, text))
402
text = self._content[offset:offset + length]
403
delta_info.append((b'c', offset, length, text))
369
delta_info.append(('c', offset, length))
405
delta_info.append((b'c', offset, length))
370
406
measured_len += length
373
txt = delta_content[delta_pos:delta_pos+c]
409
txt = delta_content[delta_pos:delta_pos + c]
376
delta_info.append(('i', c, txt))
412
delta_info.append((b'i', c, txt))
377
413
measured_len += c
379
415
if delta_pos != content_len:
452
491
class _LazyGroupContentManager(object):
453
492
"""This manages a group of _LazyGroupCompressFactory objects."""
455
_max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of
456
# current size, and still be considered
458
_full_block_size = 4*1024*1024
459
_full_mixed_block_size = 2*1024*1024
460
_full_enough_block_size = 3*1024*1024 # size at which we won't repack
461
_full_enough_mixed_block_size = 2*768*1024 # 1.5MB
494
_max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of
495
# current size, and still be considered
497
_full_block_size = 4 * 1024 * 1024
498
_full_mixed_block_size = 2 * 1024 * 1024
499
_full_enough_block_size = 3 * 1024 * 1024 # size at which we won't repack
500
_full_enough_mixed_block_size = 2 * 768 * 1024 # 1.5MB
463
def __init__(self, block):
502
def __init__(self, block, get_compressor_settings=None):
464
503
self._block = block
465
504
# We need to preserve the ordering
466
505
self._factories = []
467
506
self._last_byte = 0
507
self._get_settings = get_compressor_settings
508
self._compressor_settings = None
510
def _get_compressor_settings(self):
511
if self._compressor_settings is not None:
512
return self._compressor_settings
514
if self._get_settings is not None:
515
settings = self._get_settings()
517
vf = GroupCompressVersionedFiles
518
settings = vf._DEFAULT_COMPRESSOR_SETTINGS
519
self._compressor_settings = settings
520
return self._compressor_settings
469
522
def add_factory(self, key, parents, start, end):
470
523
if not self._factories:
662
723
# 1 line for end byte
663
724
header_lines = []
664
725
for factory in self._factories:
665
key_bytes = '\x00'.join(factory.key)
726
key_bytes = b'\x00'.join(factory.key)
666
727
parents = factory.parents
667
728
if parents is None:
668
parent_bytes = 'None:'
729
parent_bytes = b'None:'
670
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
671
record_header = '%s\n%s\n%d\n%d\n' % (
731
parent_bytes = b'\t'.join(b'\x00'.join(key) for key in parents)
732
record_header = b'%s\n%s\n%d\n%d\n' % (
672
733
key_bytes, parent_bytes, factory._start, factory._end)
673
734
header_lines.append(record_header)
674
735
# TODO: Can we break the refcycle at this point and set
675
736
# factory._manager = None?
676
header_bytes = ''.join(header_lines)
737
header_bytes = b''.join(header_lines)
678
739
header_bytes_len = len(header_bytes)
679
740
z_header_bytes = zlib.compress(header_bytes)
681
742
z_header_bytes_len = len(z_header_bytes)
682
block_bytes = self._block.to_bytes()
683
lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
743
block_bytes_len, block_chunks = self._block.to_chunks()
744
lines.append(b'%d\n%d\n%d\n' % (
745
z_header_bytes_len, header_bytes_len, block_bytes_len))
685
746
lines.append(z_header_bytes)
686
lines.append(block_bytes)
687
del z_header_bytes, block_bytes
688
return ''.join(lines)
747
lines.extend(block_chunks)
748
del z_header_bytes, block_chunks
749
# TODO: This is a point where we will double the memory consumption. To
750
# avoid this, we probably have to switch to a 'chunked' api
751
return b''.join(lines)
691
754
def from_bytes(cls, bytes):
692
755
# TODO: This does extra string copying, probably better to do it a
756
# different way. At a minimum this creates 2 copies of the
694
758
(storage_kind, z_header_len, header_len,
695
block_len, rest) = bytes.split('\n', 4)
759
block_len, rest) = bytes.split(b'\n', 4)
697
if storage_kind != 'groupcompress-block':
761
if storage_kind != b'groupcompress-block':
698
762
raise ValueError('Unknown storage kind: %s' % (storage_kind,))
699
763
z_header_len = int(z_header_len)
700
764
if len(rest) < z_header_len:
723
787
block = GroupCompressBlock.from_bytes(block_bytes)
725
789
result = cls(block)
726
for start in xrange(0, len(header_lines), 4):
790
for start in range(0, len(header_lines), 4):
728
key = tuple(header_lines[start].split('\x00'))
729
parents_line = header_lines[start+1]
730
if parents_line == 'None:':
792
key = tuple(header_lines[start].split(b'\x00'))
793
parents_line = header_lines[start + 1]
794
if parents_line == b'None:':
733
parents = tuple([tuple(segment.split('\x00'))
734
for segment in parents_line.split('\t')
736
start_offset = int(header_lines[start+2])
737
end_offset = int(header_lines[start+3])
797
parents = tuple([tuple(segment.split(b'\x00'))
798
for segment in parents_line.split(b'\t')
800
start_offset = int(header_lines[start + 2])
801
end_offset = int(header_lines[start + 3])
738
802
result.add_factory(key, parents, start_offset, end_offset)
749
813
class _CommonGroupCompressor(object):
815
def __init__(self, settings=None):
752
816
"""Create a GroupCompressor."""
754
818
self._last = None
755
819
self.endpoint = 0
756
820
self.input_bytes = 0
757
821
self.labels_deltas = {}
758
self._delta_index = None # Set by the children
822
self._delta_index = None # Set by the children
759
823
self._block = GroupCompressBlock()
827
self._settings = settings
761
829
def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False):
762
830
"""Compress lines with label key.
764
832
:param key: A key tuple. It is stored in the output
765
833
for identification of the text during decompression. If the last
766
element is 'None' it is replaced with the sha1 of the text -
834
element is b'None' it is replaced with the sha1 of the text -
767
835
e.g. sha1:xxxxxxx.
768
836
:param bytes: The bytes to be compressed
769
837
:param expected_sha: If non-None, the sha the lines are believed to
822
890
:param key: The key to extract.
823
891
:return: An iterable over bytes and the sha1.
825
(start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
893
(start_byte, start_chunk, end_byte,
894
end_chunk) = self.labels_deltas[key]
826
895
delta_chunks = self.chunks[start_chunk:end_chunk]
827
stored_bytes = ''.join(delta_chunks)
828
if stored_bytes[0] == 'f':
896
stored_bytes = b''.join(delta_chunks)
897
kind = stored_bytes[:1]
829
899
fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
830
900
data_len = fulltext_len + 1 + offset
831
if data_len != len(stored_bytes):
901
if data_len != len(stored_bytes):
832
902
raise ValueError('Index claimed fulltext len, but stored bytes'
833
903
' claim %s != %s'
834
904
% (len(stored_bytes), data_len))
835
bytes = stored_bytes[offset + 1:]
905
data = stored_bytes[offset + 1:]
908
raise ValueError('Unknown content kind, bytes claim %s' % kind)
837
909
# XXX: This is inefficient at best
838
source = ''.join(self.chunks[:start_chunk])
839
if stored_bytes[0] != 'd':
840
raise ValueError('Unknown content kind, bytes claim %s'
841
% (stored_bytes[0],))
910
source = b''.join(self.chunks[:start_chunk])
842
911
delta_len, offset = decode_base128_int(stored_bytes[1:10])
843
912
data_len = delta_len + 1 + offset
844
913
if data_len != len(stored_bytes):
845
914
raise ValueError('Index claimed delta len, but stored bytes'
846
915
' claim %s != %s'
847
916
% (len(stored_bytes), data_len))
848
bytes = apply_delta(source, stored_bytes[offset + 1:])
849
bytes_sha1 = osutils.sha_string(bytes)
850
return bytes, bytes_sha1
917
data = apply_delta(source, stored_bytes[offset + 1:])
918
data_sha1 = osutils.sha_string(data)
919
return data, data_sha1
853
922
"""Finish this group, creating a formatted stream.
855
924
After calling this, the compressor should no longer be used
857
# TODO: this causes us to 'bloat' to 2x the size of content in the
858
# group. This has an impact for 'commit' of large objects.
859
# One possibility is to use self._content_chunks, and be lazy and
860
# only fill out self._content as a full string when we actually
861
# need it. That would at least drop the peak memory consumption
862
# for 'commit' down to ~1x the size of the largest file, at a
863
# cost of increased complexity within this code. 2x is still <<
864
# 3x the size of the largest file, so we are doing ok.
865
926
self._block.set_chunked_content(self.chunks, self.endpoint)
866
927
self.chunks = None
867
928
self._delta_index = None
934
995
It contains code very similar to SequenceMatcher because of having a similar
935
996
task. However some key differences apply:
936
- there is no junk, we want a minimal edit not a human readable diff.
937
- we don't filter very common lines (because we don't know where a good
938
range will start, and after the first text we want to be emitting minmal
940
- we chain the left side, not the right side
941
- we incrementally update the adjacency matrix as new lines are provided.
942
- we look for matches in all of the left side, so the routine which does
943
the analagous task of find_longest_match does not need to filter on the
998
* there is no junk, we want a minimal edit not a human readable diff.
999
* we don't filter very common lines (because we don't know where a good
1000
range will start, and after the first text we want to be emitting minmal
1002
* we chain the left side, not the right side
1003
* we incrementally update the adjacency matrix as new lines are provided.
1004
* we look for matches in all of the left side, so the routine which does
1005
the analagous task of find_longest_match does not need to filter on the
948
super(PyrexGroupCompressor, self).__init__()
949
self._delta_index = DeltaIndex()
1009
def __init__(self, settings=None):
1010
super(PyrexGroupCompressor, self).__init__(settings)
1011
max_bytes_to_index = self._settings.get('max_bytes_to_index', 0)
1012
self._delta_index = DeltaIndex(max_bytes_to_index=max_bytes_to_index)
951
1014
def _compress(self, key, bytes, max_delta_size, soft=False):
952
1015
"""see _CommonGroupCompressor._compress"""
962
1025
# new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
963
1026
if self._delta_index._source_offset != self.endpoint:
964
1027
raise AssertionError('_source_offset != endpoint'
965
' somehow the DeltaIndex got out of sync with'
1028
' somehow the DeltaIndex got out of sync with'
1029
' the output lines')
967
1030
delta = self._delta_index.make_delta(bytes, max_delta_size)
968
1031
if (delta is None):
969
1032
type = 'fulltext'
970
1033
enc_length = encode_base128_int(len(bytes))
971
1034
len_mini_header = 1 + len(enc_length)
972
1035
self._delta_index.add_source(bytes, len_mini_header)
973
new_chunks = ['f', enc_length, bytes]
1036
new_chunks = [b'f', enc_length, bytes]
976
1039
enc_length = encode_base128_int(len(delta))
977
1040
len_mini_header = 1 + len(enc_length)
978
new_chunks = ['d', enc_length, delta]
1041
new_chunks = [b'd', enc_length, delta]
979
1042
self._delta_index.add_delta_source(delta, len_mini_header)
980
1043
# Before insertion
981
1044
start = self.endpoint
1022
1085
graph_index = BTreeBuilder(reference_lists=ref_length,
1023
key_elements=keylength)
1086
key_elements=keylength)
1024
1087
stream = transport.open_write_stream('newpack')
1025
1088
writer = pack.ContainerWriter(stream.write)
1027
index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
1028
add_callback=graph_index.add_nodes,
1029
inconsistency_fatal=inconsistency_fatal)
1030
access = knit._DirectPackAccess({})
1090
index = _GCGraphIndex(graph_index, lambda: True, parents=parents,
1091
add_callback=graph_index.add_nodes,
1092
inconsistency_fatal=inconsistency_fatal)
1093
access = pack_repo._DirectPackAccess({})
1031
1094
access.set_writer(writer, graph_index, (transport, 'newpack'))
1032
1095
result = GroupCompressVersionedFiles(index, access, delta)
1033
1096
result.stream = stream
1149
1214
self.total_bytes = 0
1152
class GroupCompressVersionedFiles(VersionedFiles):
1217
class GroupCompressVersionedFiles(VersionedFilesWithFallbacks):
1153
1218
"""A group-compress based VersionedFiles implementation."""
1155
def __init__(self, index, access, delta=True, _unadded_refs=None):
1220
# This controls how the GroupCompress DeltaIndex works. Basically, we
1221
# compute hash pointers into the source blocks (so hash(text) => text).
1222
# However each of these references costs some memory in trade against a
1223
# more accurate match result. For very large files, they either are
1224
# pre-compressed and change in bulk whenever they change, or change in just
1225
# local blocks. Either way, 'improved resolution' is not very helpful,
1226
# versus running out of memory trying to track everything. The default max
1227
# gives 100% sampling of a 1MB file.
1228
_DEFAULT_MAX_BYTES_TO_INDEX = 1024 * 1024
1229
_DEFAULT_COMPRESSOR_SETTINGS = {'max_bytes_to_index':
1230
_DEFAULT_MAX_BYTES_TO_INDEX}
1232
def __init__(self, index, access, delta=True, _unadded_refs=None,
1156
1234
"""Create a GroupCompressVersionedFiles object.
1158
1236
:param index: The index object storing access and graph data.
1159
1237
:param access: The access object storing raw data.
1160
1238
:param delta: Whether to delta compress or just entropy compress.
1161
1239
:param _unadded_refs: private parameter, don't use.
1240
:param _group_cache: private parameter, don't use.
1163
1242
self._index = index
1164
1243
self._access = access
1166
1245
if _unadded_refs is None:
1167
1246
_unadded_refs = {}
1168
1247
self._unadded_refs = _unadded_refs
1169
self._group_cache = LRUSizeCache(max_size=50*1024*1024)
1170
self._fallback_vfs = []
1248
if _group_cache is None:
1249
_group_cache = LRUSizeCache(max_size=50 * 1024 * 1024)
1250
self._group_cache = _group_cache
1251
self._immediate_fallback_vfs = []
1252
self._max_bytes_to_index = None
1172
1254
def without_fallbacks(self):
1173
1255
"""Return a clone of this object without any fallbacks configured."""
1174
1256
return GroupCompressVersionedFiles(self._index, self._access,
1175
self._delta, _unadded_refs=dict(self._unadded_refs))
1257
self._delta, _unadded_refs=dict(
1258
self._unadded_refs),
1259
_group_cache=self._group_cache)
1177
1261
def add_lines(self, key, parents, lines, parent_texts=None,
1178
left_matching_blocks=None, nostore_sha=None, random_id=False,
1179
check_content=True):
1262
left_matching_blocks=None, nostore_sha=None, random_id=False,
1263
check_content=True):
1180
1264
"""Add a text to the store.
1182
1266
:param key: The key tuple of the text to add.
1183
1267
:param parents: The parents key tuples of the text to add.
1184
1268
:param lines: A list of lines. Each line must be a bytestring. And all
1185
of them except the last must be terminated with \n and contain no
1186
other \n's. The last line may either contain no \n's or a single
1187
terminating \n. If the lines list does meet this constraint the add
1188
routine may error or may succeed - but you will be unable to read
1189
the data back accurately. (Checking the lines have been split
1269
of them except the last must be terminated with \\n and contain no
1270
other \\n's. The last line may either contain no \\n's or a single
1271
terminating \\n. If the lines list does meet this constraint the
1272
add routine may error or may succeed - but you will be unable to
1273
read the data back accurately. (Checking the lines have been split
1190
1274
correctly is expensive and extremely unlikely to catch bugs so it
1191
1275
is not done at runtime unless check_content is True.)
1192
1276
:param parent_texts: An optional dictionary containing the opaque
1211
1295
back to future add_lines calls in the parent_texts dictionary.
1213
1297
self._index._check_write_ok()
1214
self._check_add(key, lines, random_id, check_content)
1216
# The caller might pass None if there is no graph data, but kndx
1217
# indexes can't directly store that, so we give them
1218
# an empty tuple instead.
1220
# double handling for now. Make it work until then.
1221
length = sum(map(len, lines))
1222
record = ChunkedContentFactory(key, parents, None, lines)
1223
sha1 = list(self._insert_record_stream([record], random_id=random_id,
1224
nostore_sha=nostore_sha))[0]
1225
return sha1, length, None
1227
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
1228
"""See VersionedFiles._add_text()."""
1299
self._check_lines_not_unicode(lines)
1300
self._check_lines_are_lines(lines)
1301
return self.add_chunks(
1302
key, parents, iter(lines), parent_texts, left_matching_blocks,
1303
nostore_sha, random_id)
1305
def add_chunks(self, key, parents, chunk_iter, parent_texts=None,
1306
left_matching_blocks=None, nostore_sha=None, random_id=False):
1307
"""Add a text to the store.
1309
:param key: The key tuple of the text to add.
1310
:param parents: The parents key tuples of the text to add.
1311
:param chunk_iter: An iterator over chunks. Chunks
1312
don't need to be file lines; the only requirement is that they
1314
:param parent_texts: An optional dictionary containing the opaque
1315
representations of some or all of the parents of version_id to
1316
allow delta optimisations. VERY IMPORTANT: the texts must be those
1317
returned by add_lines or data corruption can be caused.
1318
:param left_matching_blocks: a hint about which areas are common
1319
between the text and its left-hand-parent. The format is
1320
the SequenceMatcher.get_matching_blocks format.
1321
:param nostore_sha: Raise ExistingContent and do not add the lines to
1322
the versioned file if the digest of the lines matches this.
1323
:param random_id: If True a random id has been selected rather than
1324
an id determined by some deterministic process such as a converter
1325
from a foreign VCS. When True the backend may choose not to check
1326
for uniqueness of the resulting key within the versioned file, so
1327
this should only be done when the result is expected to be unique
1329
:return: The text sha1, the number of bytes in the text, and an opaque
1330
representation of the inserted version which can be provided
1331
back to future add_lines calls in the parent_texts dictionary.
1229
1333
self._index._check_write_ok()
1230
self._check_add(key, None, random_id, check_content=False)
1231
if text.__class__ is not str:
1232
raise errors.BzrBadParameterUnicode("text")
1334
self._check_add(key, random_id)
1233
1335
if parents is None:
1234
1336
# The caller might pass None if there is no graph data, but kndx
1235
1337
# indexes can't directly store that, so we give them
1236
1338
# an empty tuple instead.
1238
1340
# double handling for now. Make it work until then.
1240
record = FulltextContentFactory(key, parents, None, text)
1241
sha1 = list(self._insert_record_stream([record], random_id=random_id,
1242
nostore_sha=nostore_sha))[0]
1243
return sha1, length, None
1341
# TODO(jelmer): problematic for big files: let's not keep the list of
1343
chunks = list(chunk_iter)
1344
record = ChunkedContentFactory(key, parents, None, chunks)
1345
sha1 = list(self._insert_record_stream(
1346
[record], random_id=random_id, nostore_sha=nostore_sha))[0]
1347
return sha1, sum(map(len, chunks)), None
1245
1349
def add_fallback_versioned_files(self, a_versioned_files):
1246
1350
"""Add a source of texts for texts not present in this knit.
1248
1352
:param a_versioned_files: A VersionedFiles object.
1250
self._fallback_vfs.append(a_versioned_files)
1354
self._immediate_fallback_vfs.append(a_versioned_files)
1252
1356
def annotate(self, key):
1253
1357
"""See VersionedFiles.annotate."""
1283
1387
# probably check that the existing content is identical to what is
1284
1388
# being inserted, and otherwise raise an exception. This would make
1285
1389
# the bundle code simpler.
1287
self._check_lines_not_unicode(lines)
1288
self._check_lines_are_lines(lines)
1290
def get_known_graph_ancestry(self, keys):
1291
"""Get a KnownGraph instance with the ancestry of keys."""
1292
# Note that this is identical to
1293
# KnitVersionedFiles.get_known_graph_ancestry, but they don't share
1295
parent_map, missing_keys = self._index.find_ancestry(keys)
1296
for fallback in self._fallback_vfs:
1297
if not missing_keys:
1299
(f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
1301
parent_map.update(f_parent_map)
1302
missing_keys = f_missing_keys
1303
kg = _mod_graph.KnownGraph(parent_map)
1306
1391
def get_parent_map(self, keys):
1307
1392
"""Get a map of the graph parents of keys.
1534
1620
key_to_source_map)
1535
1621
elif ordering == 'as-requested':
1536
1622
source_keys = self._get_as_requested_source_keys(orig_keys,
1537
locations, unadded_keys, key_to_source_map)
1623
locations, unadded_keys, key_to_source_map)
1539
1625
# We want to yield the keys in a semi-optimal (read-wise) ordering.
1540
1626
# Otherwise we thrash the _group_cache and destroy performance
1541
1627
source_keys = self._get_io_ordered_source_keys(locations,
1542
unadded_keys, source_result)
1628
unadded_keys, source_result)
1543
1629
for key in missing:
1544
1630
yield AbsentContentFactory(key)
1545
1631
# Batch up as many keys as we can until either:
1546
1632
# - we encounter an unadded ref, or
1547
1633
# - we run out of keys, or
1548
1634
# - the total bytes to retrieve for this batch > BATCH_SIZE
1549
batcher = _BatchingBlockFetcher(self, locations)
1635
batcher = _BatchingBlockFetcher(self, locations,
1636
get_compressor_settings=self._get_compressor_settings)
1550
1637
for source, keys in source_keys:
1551
1638
if source is self:
1552
1639
for key in keys:
1598
1685
for _ in self._insert_record_stream(stream, random_id=False):
1688
def _get_compressor_settings(self):
1689
if self._max_bytes_to_index is None:
1690
# TODO: VersionedFiles don't know about their containing
1691
# repository, so they don't have much of an idea about their
1692
# location. So for now, this is only a global option.
1693
c = config.GlobalConfig()
1694
val = c.get_user_option('bzr.groupcompress.max_bytes_to_index')
1698
except ValueError as e:
1699
trace.warning('Value for '
1700
'"bzr.groupcompress.max_bytes_to_index"'
1701
' %r is not an integer'
1705
val = self._DEFAULT_MAX_BYTES_TO_INDEX
1706
self._max_bytes_to_index = val
1707
return {'max_bytes_to_index': self._max_bytes_to_index}
1709
def _make_group_compressor(self):
1710
return GroupCompressor(self._get_compressor_settings())
1601
1712
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
1602
1713
reuse_blocks=True):
1603
1714
"""Internal core to insert a record stream into this container.
1627
1739
# This will go up to fulltexts for gc to gc fetching, which isn't
1629
self._compressor = GroupCompressor()
1741
self._compressor = self._make_group_compressor()
1630
1742
self._unadded_refs = {}
1631
1743
keys_to_add = []
1633
bytes = self._compressor.flush().to_bytes()
1634
self._compressor = GroupCompressor()
1746
bytes_len, chunks = self._compressor.flush().to_chunks()
1747
self._compressor = self._make_group_compressor()
1748
# Note: At this point we still have 1 copy of the fulltext (in
1749
# record and the var 'bytes'), and this generates 2 copies of
1750
# the compressed text (one for bytes, one in chunks)
1751
# TODO: Push 'chunks' down into the _access api, so that we don't
1752
# have to double compressed memory here
1753
# TODO: Figure out how to indicate that we would be happy to free
1754
# the fulltext content at this point. Note that sometimes we
1755
# will want it later (streaming CHK pages), but most of the
1756
# time we won't (everything else)
1757
data = b''.join(chunks)
1635
1759
index, start, length = self._access.add_raw_records(
1636
[(None, len(bytes))], bytes)[0]
1760
[(None, len(data))], data)[0]
1638
1762
for key, reads, refs in keys_to_add:
1639
nodes.append((key, "%d %d %s" % (start, length, reads), refs))
1763
nodes.append((key, b"%d %d %s" % (start, length, reads), refs))
1640
1764
self._index.add_records(nodes, random_id=random_id)
1641
1765
self._unadded_refs = {}
1642
1766
del keys_to_add[:]
1689
1813
raise AssertionError('No insert_manager set')
1690
1814
if insert_manager is not record._manager:
1691
1815
raise AssertionError('insert_manager does not match'
1692
' the current record, we cannot be positive'
1693
' that the appropriate content was inserted.'
1695
value = "%d %d %d %d" % (block_start, block_length,
1696
record._start, record._end)
1816
' the current record, we cannot be positive'
1817
' that the appropriate content was inserted.'
1819
value = b"%d %d %d %d" % (block_start, block_length,
1820
record._start, record._end)
1697
1821
nodes = [(record.key, value, (record.parents,))]
1698
1822
# TODO: Consider buffering up many nodes to be added, not
1699
1823
# sure how much overhead this has, but we're seeing
1802
1927
"""See VersionedFiles.keys."""
1803
1928
if 'evil' in debug.debug_flags:
1804
1929
trace.mutter_callsite(2, "keys scales with size of history")
1805
sources = [self._index] + self._fallback_vfs
1930
sources = [self._index] + self._immediate_fallback_vfs
1807
1932
for source in sources:
1808
1933
result.update(source.keys())
1937
class _GCBuildDetails(object):
1938
"""A blob of data about the build details.
1940
This stores the minimal data, which then allows compatibility with the old
1941
api, without taking as much memory.
1944
__slots__ = ('_index', '_group_start', '_group_end', '_basis_end',
1945
'_delta_end', '_parents')
1948
compression_parent = None
1950
def __init__(self, parents, position_info):
1951
self._parents = parents
1952
(self._index, self._group_start, self._group_end, self._basis_end,
1953
self._delta_end) = position_info
1956
return '%s(%s, %s)' % (self.__class__.__name__,
1957
self.index_memo, self._parents)
1960
def index_memo(self):
1961
return (self._index, self._group_start, self._group_end,
1962
self._basis_end, self._delta_end)
1965
def record_details(self):
1966
return static_tuple.StaticTuple(self.method, None)
1968
def __getitem__(self, offset):
1969
"""Compatibility thunk to act like a tuple."""
1971
return self.index_memo
1973
return self.compression_parent # Always None
1975
return self._parents
1977
return self.record_details
1979
raise IndexError('offset out of range')
1812
1985
class _GCGraphIndex(object):
1813
1986
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
1815
1988
def __init__(self, graph_index, is_locked, parents=True,
1816
add_callback=None, track_external_parent_refs=False,
1817
inconsistency_fatal=True, track_new_keys=False):
1989
add_callback=None, track_external_parent_refs=False,
1990
inconsistency_fatal=True, track_new_keys=False):
1818
1991
"""Construct a _GCGraphIndex on a graph_index.
1820
:param graph_index: An implementation of bzrlib.index.GraphIndex.
1993
:param graph_index: An implementation of breezy.index.GraphIndex.
1821
1994
:param is_locked: A callback, returns True if the index is locked and
1823
1996
:param parents: If True, record knits parents, if not do not record