17
17
"""Core compression logic for compressing streams of related files."""
19
from __future__ import absolute_import
24
from .lazy_import import lazy_import
25
lazy_import(globals(), """
30
31
graph as _mod_graph,
37
from bzrlib.btree_index import BTreeBuilder
38
from bzrlib.lru_cache import LRUSizeCache
39
from bzrlib.tsort import topo_sort
40
from bzrlib.versionedfile import (
39
from breezy.repofmt import pack_repo
40
from breezy.i18n import gettext
43
from .btree_index import BTreeBuilder
44
from .lru_cache import LRUSizeCache
50
from .versionedfile import (
42
53
AbsentContentFactory,
43
54
ChunkedContentFactory,
44
55
FulltextContentFactory,
56
VersionedFilesWithFallbacks,
48
59
# Minimum number of uncompressed bytes to try fetch at once when retrieving
49
60
# groupcompress blocks.
52
_USE_LZMA = False and (pylzma is not None)
54
63
# osutils.sha_string('')
55
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
64
_null_sha1 = b'da39a3ee5e6b4b0d3255bfef95601890afd80709'
57
66
def sort_gc_optimal(parent_map):
58
67
"""Sort and group the keys in parent_map into groupcompress order.
65
74
# groupcompress ordering is approximately reverse topological,
66
75
# properly grouped by file-id.
67
76
per_prefix_map = {}
68
for key, value in parent_map.iteritems():
69
if isinstance(key, str) or len(key) == 1:
77
for key, value in viewitems(parent_map):
78
if isinstance(key, bytes) or len(key) == 1:
95
104
# Group Compress Block v1 Zlib
96
GCB_HEADER = 'gcb1z\n'
105
GCB_HEADER = b'gcb1z\n'
97
106
# Group Compress Block v1 Lzma
98
GCB_LZ_HEADER = 'gcb1l\n'
107
GCB_LZ_HEADER = b'gcb1l\n'
99
108
GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
101
110
def __init__(self):
102
111
# map by key? or just order in file?
103
112
self._compressor_name = None
104
self._z_content = None
113
self._z_content_chunks = None
105
114
self._z_content_decompressor = None
106
115
self._z_content_length = None
107
116
self._content_length = None
132
141
# Expand the content if required
133
142
if self._content is None:
134
143
if self._content_chunks is not None:
135
self._content = ''.join(self._content_chunks)
144
self._content = b''.join(self._content_chunks)
136
145
self._content_chunks = None
137
146
if self._content is None:
138
if self._z_content is None:
147
# We join self._z_content_chunks here, because if we are
148
# decompressing, then it is *very* likely that we have a single
150
if self._z_content_chunks is None:
139
151
raise AssertionError('No content to decompress')
140
if self._z_content == '':
152
z_content = b''.join(self._z_content_chunks)
142
155
elif self._compressor_name == 'lzma':
143
156
# We don't do partial lzma decomp yet
144
self._content = pylzma.decompress(self._z_content)
158
self._content = pylzma.decompress(z_content)
145
159
elif self._compressor_name == 'zlib':
146
160
# Start a zlib decompressor
147
161
if num_bytes * 4 > self._content_length * 3:
148
162
# If we are requesting more that 3/4ths of the content,
149
163
# just extract the whole thing in a single pass
150
164
num_bytes = self._content_length
151
self._content = zlib.decompress(self._z_content)
165
self._content = zlib.decompress(z_content)
153
167
self._z_content_decompressor = zlib.decompressobj()
154
168
# Seed the decompressor with the uncompressed bytes, so
155
169
# that the rest of the code is simplified
156
170
self._content = self._z_content_decompressor.decompress(
157
self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
171
z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
158
172
if not self._z_content_decompressor.unconsumed_tail:
159
173
self._z_content_decompressor = None
197
211
# At present, we have 2 integers for the compressed and uncompressed
198
212
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
199
213
# checking too far, cap the search to 14 bytes.
200
pos2 = bytes.index('\n', pos, pos + 14)
201
self._z_content_length = int(bytes[pos:pos2])
203
pos2 = bytes.index('\n', pos, pos + 14)
204
self._content_length = int(bytes[pos:pos2])
206
if len(bytes) != (pos + self._z_content_length):
214
pos2 = data.index(b'\n', pos, pos + 14)
215
self._z_content_length = int(data[pos:pos2])
217
pos2 = data.index(b'\n', pos, pos + 14)
218
self._content_length = int(data[pos:pos2])
220
if len(data) != (pos + self._z_content_length):
207
221
# XXX: Define some GCCorrupt error ?
208
222
raise AssertionError('Invalid bytes: (%d) != %d + %d' %
209
(len(bytes), pos, self._z_content_length))
210
self._z_content = bytes[pos:]
223
(len(data), pos, self._z_content_length))
224
self._z_content_chunks = (data[pos:],)
227
def _z_content(self):
228
"""Return z_content_chunks as a simple string.
230
Meant only to be used by the test suite.
232
if self._z_content_chunks is not None:
233
return b''.join(self._z_content_chunks)
213
237
def from_bytes(cls, bytes):
233
257
:return: The bytes for the content
235
259
if start == end == 0:
237
261
self._ensure_content(end)
238
262
# The bytes are 'f' or 'd' for the type, then a variable-length
239
263
# base128 integer for the content size, then the actual content
240
264
# We know that the variable-length integer won't be longer than 5
241
265
# bytes (it takes 5 bytes to encode 2^32)
242
266
c = self._content[start]
244
268
type = 'fulltext'
247
271
raise ValueError('Unknown content control code: %s'
253
277
if end != content_start + content_len:
254
278
raise ValueError('end != len according to field header'
255
279
' %s != %s' % (end, content_start + content_len))
257
bytes = self._content[content_start:end]
259
bytes = apply_delta_to_source(self._content, content_start, end)
281
return self._content[content_start:end]
282
# Must be type delta as checked above
283
return apply_delta_to_source(self._content, content_start, end)
262
285
def set_chunked_content(self, content_chunks, length):
263
286
"""Set the content of this block to the given chunks."""
269
292
self._content_length = length
270
293
self._content_chunks = content_chunks
271
294
self._content = None
272
self._z_content = None
295
self._z_content_chunks = None
274
297
def set_content(self, content):
275
298
"""Set the content of this block."""
276
299
self._content_length = len(content)
277
300
self._content = content
278
self._z_content = None
280
def _create_z_content_using_lzma(self):
281
if self._content_chunks is not None:
282
self._content = ''.join(self._content_chunks)
283
self._content_chunks = None
284
if self._content is None:
285
raise AssertionError('Nothing to compress')
286
self._z_content = pylzma.compress(self._content)
287
self._z_content_length = len(self._z_content)
289
def _create_z_content_from_chunks(self):
301
self._z_content_chunks = None
303
def _create_z_content_from_chunks(self, chunks):
290
304
compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION)
291
compressed_chunks = map(compressor.compress, self._content_chunks)
305
# Peak in this point is 1 fulltext, 1 compressed text, + zlib overhead
306
# (measured peak is maybe 30MB over the above...)
307
compressed_chunks = list(map(compressor.compress, chunks))
292
308
compressed_chunks.append(compressor.flush())
293
self._z_content = ''.join(compressed_chunks)
294
self._z_content_length = len(self._z_content)
309
# Ignore empty chunks
310
self._z_content_chunks = [c for c in compressed_chunks if c]
311
self._z_content_length = sum(map(len, self._z_content_chunks))
296
313
def _create_z_content(self):
297
if self._z_content is not None:
300
self._create_z_content_using_lzma()
314
if self._z_content_chunks is not None:
302
316
if self._content_chunks is not None:
303
self._create_z_content_from_chunks()
305
self._z_content = zlib.compress(self._content)
306
self._z_content_length = len(self._z_content)
317
chunks = self._content_chunks
319
chunks = (self._content,)
320
self._create_z_content_from_chunks(chunks)
323
"""Create the byte stream as a series of 'chunks'"""
324
self._create_z_content()
325
header = self.GCB_HEADER
326
chunks = [b'%s%d\n%d\n'
327
% (header, self._z_content_length, self._content_length),
329
chunks.extend(self._z_content_chunks)
330
total_len = sum(map(len, chunks))
331
return total_len, chunks
308
333
def to_bytes(self):
309
334
"""Encode the information into a byte stream."""
310
self._create_z_content()
312
header = self.GCB_LZ_HEADER
314
header = self.GCB_HEADER
316
'%d\n%d\n' % (self._z_content_length, self._content_length),
319
return ''.join(chunks)
335
total_len, chunks = self.to_chunks()
336
return b''.join(chunks)
321
338
def _dump(self, include_text=False):
322
339
"""Take this block, and spit out a human-readable structure.
334
351
while pos < self._content_length:
335
352
kind = self._content[pos]
337
if kind not in ('f', 'd'):
354
if kind not in (b'f', b'd'):
338
355
raise ValueError('invalid kind character: %r' % (kind,))
339
356
content_len, len_len = decode_base128_int(
340
357
self._content[pos:pos + 5])
342
359
if content_len + pos > self._content_length:
343
360
raise ValueError('invalid content_len %d for record @ pos %d'
344
361
% (content_len, pos - len_len - 1))
345
if kind == 'f': # Fulltext
362
if kind == b'f': # Fulltext
347
364
text = self._content[pos:pos+content_len]
348
result.append(('f', content_len, text))
365
result.append((b'f', content_len, text))
350
result.append(('f', content_len))
351
elif kind == 'd': # Delta
367
result.append((b'f', content_len))
368
elif kind == b'd': # Delta
352
369
delta_content = self._content[pos:pos+content_len]
354
371
# The first entry in a delta is the decompressed length
355
372
decomp_len, delta_pos = decode_base128_int(delta_content)
356
result.append(('d', content_len, decomp_len, delta_info))
373
result.append((b'd', content_len, decomp_len, delta_info))
358
375
while delta_pos < content_len:
359
376
c = ord(delta_content[delta_pos])
366
383
text = self._content[offset:offset+length]
367
delta_info.append(('c', offset, length, text))
384
delta_info.append((b'c', offset, length, text))
369
delta_info.append(('c', offset, length))
386
delta_info.append((b'c', offset, length))
370
387
measured_len += length
373
390
txt = delta_content[delta_pos:delta_pos+c]
376
delta_info.append(('i', c, txt))
393
delta_info.append((b'i', c, txt))
377
394
measured_len += c
379
396
if delta_pos != content_len:
429
446
# wire bytes, something...
430
447
return self._manager._wire_bytes()
433
450
if storage_kind in ('fulltext', 'chunked'):
434
451
if self._bytes is None:
435
452
# Grab and cache the raw bytes for this entry
436
453
# and break the ref-cycle with _manager since we don't need it
438
self._manager._prepare_for_extract()
456
self._manager._prepare_for_extract()
457
except zlib.error as value:
458
raise errors.DecompressCorruption("zlib: " + str(value))
439
459
block = self._manager._block
440
460
self._bytes = block.extract(self.key, self._start, self._end)
441
461
# There are code paths that first extract as fulltext, and then
460
480
_full_enough_block_size = 3*1024*1024 # size at which we won't repack
461
481
_full_enough_mixed_block_size = 2*768*1024 # 1.5MB
463
def __init__(self, block):
483
def __init__(self, block, get_compressor_settings=None):
464
484
self._block = block
465
485
# We need to preserve the ordering
466
486
self._factories = []
467
487
self._last_byte = 0
488
self._get_settings = get_compressor_settings
489
self._compressor_settings = None
491
def _get_compressor_settings(self):
492
if self._compressor_settings is not None:
493
return self._compressor_settings
495
if self._get_settings is not None:
496
settings = self._get_settings()
498
vf = GroupCompressVersionedFiles
499
settings = vf._DEFAULT_COMPRESSOR_SETTINGS
500
self._compressor_settings = settings
501
return self._compressor_settings
469
503
def add_factory(self, key, parents, start, end):
470
504
if not self._factories:
503
537
new_block.set_content(self._block._content[:last_byte])
504
538
self._block = new_block
540
def _make_group_compressor(self):
541
return GroupCompressor(self._get_compressor_settings())
506
543
def _rebuild_block(self):
507
544
"""Create a new GroupCompressBlock with only the referenced texts."""
508
compressor = GroupCompressor()
545
compressor = self._make_group_compressor()
509
546
tstart = time.time()
510
547
old_length = self._block._content_length
523
560
# block? It seems hard to come up with a method that it would
524
561
# expand, since we do full compression again. Perhaps based on a
525
562
# request that ends up poorly ordered?
563
# TODO: If the content would have expanded, then we would want to
564
# handle a case where we need to split the block.
565
# Now that we have a user-tweakable option
566
# (max_bytes_to_index), it is possible that one person set it
567
# to a very low value, causing poor compression.
526
568
delta = time.time() - tstart
527
569
self._block = new_block
528
570
trace.mutter('creating new compressed block on-the-fly in %.3fs'
679
721
z_header_bytes = zlib.compress(header_bytes)
681
723
z_header_bytes_len = len(z_header_bytes)
682
block_bytes = self._block.to_bytes()
724
block_bytes_len, block_chunks = self._block.to_chunks()
683
725
lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
685
727
lines.append(z_header_bytes)
686
lines.append(block_bytes)
687
del z_header_bytes, block_bytes
728
lines.extend(block_chunks)
729
del z_header_bytes, block_chunks
730
# TODO: This is a point where we will double the memory consumption. To
731
# avoid this, we probably have to switch to a 'chunked' api
688
732
return ''.join(lines)
691
735
def from_bytes(cls, bytes):
692
736
# TODO: This does extra string copying, probably better to do it a
737
# different way. At a minimum this creates 2 copies of the
694
739
(storage_kind, z_header_len, header_len,
695
740
block_len, rest) = bytes.split('\n', 4)
792
841
if sha1 == nostore_sha:
793
842
raise errors.ExistingContent()
794
843
if key[-1] is None:
795
key = key[:-1] + ('sha1:' + sha1,)
844
# GZ 2017-06-10: Seems perverse to have to encode here.
845
sha1 = sha1.encode('ascii')
846
key = key[:-1] + (b'sha1:' + sha1,)
797
848
start, end, type = self._compress(key, bytes, len(bytes) / 2, soft)
798
849
return sha1, start, end, type
825
876
(start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
826
877
delta_chunks = self.chunks[start_chunk:end_chunk]
827
878
stored_bytes = ''.join(delta_chunks)
828
if stored_bytes[0] == 'f':
879
if stored_bytes[0] == b'f':
829
880
fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
830
881
data_len = fulltext_len + 1 + offset
831
882
if data_len != len(stored_bytes):
855
906
After calling this, the compressor should no longer be used
857
# TODO: this causes us to 'bloat' to 2x the size of content in the
858
# group. This has an impact for 'commit' of large objects.
859
# One possibility is to use self._content_chunks, and be lazy and
860
# only fill out self._content as a full string when we actually
861
# need it. That would at least drop the peak memory consumption
862
# for 'commit' down to ~1x the size of the largest file, at a
863
# cost of increased complexity within this code. 2x is still <<
864
# 3x the size of the largest file, so we are doing ok.
865
908
self._block.set_chunked_content(self.chunks, self.endpoint)
866
909
self.chunks = None
867
910
self._delta_index = None
886
929
class PythonGroupCompressor(_CommonGroupCompressor):
931
def __init__(self, settings=None):
889
932
"""Create a GroupCompressor.
891
934
Used only if the pyrex version is not available.
893
super(PythonGroupCompressor, self).__init__()
936
super(PythonGroupCompressor, self).__init__(settings)
894
937
self._delta_index = LinesDeltaIndex([])
895
938
# The actual content is managed by LinesDeltaIndex
896
939
self.chunks = self._delta_index.lines
905
948
if delta_length > max_delta_size:
906
949
# The delta is longer than the fulltext, insert a fulltext
907
950
type = 'fulltext'
908
out_lines = ['f', encode_base128_int(input_len)]
951
out_lines = [b'f', encode_base128_int(input_len)]
909
952
out_lines.extend(new_lines)
910
953
index_lines = [False, False]
911
954
index_lines.extend([True] * len(new_lines))
913
956
# this is a worthy delta, output it
916
959
# Update the delta_length to include those two encoded integers
917
960
out_lines[1] = encode_base128_int(delta_length)
918
961
# Before insertion
934
977
It contains code very similar to SequenceMatcher because of having a similar
935
978
task. However some key differences apply:
936
- there is no junk, we want a minimal edit not a human readable diff.
937
- we don't filter very common lines (because we don't know where a good
938
range will start, and after the first text we want to be emitting minmal
940
- we chain the left side, not the right side
941
- we incrementally update the adjacency matrix as new lines are provided.
942
- we look for matches in all of the left side, so the routine which does
943
the analagous task of find_longest_match does not need to filter on the
980
* there is no junk, we want a minimal edit not a human readable diff.
981
* we don't filter very common lines (because we don't know where a good
982
range will start, and after the first text we want to be emitting minmal
984
* we chain the left side, not the right side
985
* we incrementally update the adjacency matrix as new lines are provided.
986
* we look for matches in all of the left side, so the routine which does
987
the analagous task of find_longest_match does not need to filter on the
948
super(PyrexGroupCompressor, self).__init__()
949
self._delta_index = DeltaIndex()
991
def __init__(self, settings=None):
992
super(PyrexGroupCompressor, self).__init__(settings)
993
max_bytes_to_index = self._settings.get('max_bytes_to_index', 0)
994
self._delta_index = DeltaIndex(max_bytes_to_index=max_bytes_to_index)
951
996
def _compress(self, key, bytes, max_delta_size, soft=False):
952
997
"""see _CommonGroupCompressor._compress"""
970
1015
enc_length = encode_base128_int(len(bytes))
971
1016
len_mini_header = 1 + len(enc_length)
972
1017
self._delta_index.add_source(bytes, len_mini_header)
973
new_chunks = ['f', enc_length, bytes]
1018
new_chunks = [b'f', enc_length, bytes]
976
1021
enc_length = encode_base128_int(len(delta))
977
1022
len_mini_header = 1 + len(enc_length)
978
new_chunks = ['d', enc_length, delta]
1023
new_chunks = [b'd', enc_length, delta]
979
1024
self._delta_index.add_delta_source(delta, len_mini_header)
980
1025
# Before insertion
981
1026
start = self.endpoint
1027
1072
index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
1028
1073
add_callback=graph_index.add_nodes,
1029
1074
inconsistency_fatal=inconsistency_fatal)
1030
access = knit._DirectPackAccess({})
1075
access = pack_repo._DirectPackAccess({})
1031
1076
access.set_writer(writer, graph_index, (transport, 'newpack'))
1032
1077
result = GroupCompressVersionedFiles(index, access, delta)
1033
1078
result.stream = stream
1044
1089
class _BatchingBlockFetcher(object):
1045
1090
"""Fetch group compress blocks in batches.
1047
1092
:ivar total_bytes: int of expected number of bytes needed to fetch the
1048
1093
currently pending batch.
1051
def __init__(self, gcvf, locations):
1096
def __init__(self, gcvf, locations, get_compressor_settings=None):
1052
1097
self.gcvf = gcvf
1053
1098
self.locations = locations
1136
1182
memos_to_get_stack.pop()
1138
1184
block = self.batch_memos[read_memo]
1139
self.manager = _LazyGroupContentManager(block)
1185
self.manager = _LazyGroupContentManager(block,
1186
get_compressor_settings=self._get_compressor_settings)
1140
1187
self.last_read_memo = read_memo
1141
1188
start, end = index_memo[3:5]
1142
1189
self.manager.add_factory(key, parents, start, end)
1149
1196
self.total_bytes = 0
1152
class GroupCompressVersionedFiles(VersionedFiles):
1199
class GroupCompressVersionedFiles(VersionedFilesWithFallbacks):
1153
1200
"""A group-compress based VersionedFiles implementation."""
1155
def __init__(self, index, access, delta=True, _unadded_refs=None):
1202
# This controls how the GroupCompress DeltaIndex works. Basically, we
1203
# compute hash pointers into the source blocks (so hash(text) => text).
1204
# However each of these references costs some memory in trade against a
1205
# more accurate match result. For very large files, they either are
1206
# pre-compressed and change in bulk whenever they change, or change in just
1207
# local blocks. Either way, 'improved resolution' is not very helpful,
1208
# versus running out of memory trying to track everything. The default max
1209
# gives 100% sampling of a 1MB file.
1210
_DEFAULT_MAX_BYTES_TO_INDEX = 1024 * 1024
1211
_DEFAULT_COMPRESSOR_SETTINGS = {'max_bytes_to_index':
1212
_DEFAULT_MAX_BYTES_TO_INDEX}
1214
def __init__(self, index, access, delta=True, _unadded_refs=None,
1156
1216
"""Create a GroupCompressVersionedFiles object.
1158
1218
:param index: The index object storing access and graph data.
1159
1219
:param access: The access object storing raw data.
1160
1220
:param delta: Whether to delta compress or just entropy compress.
1161
1221
:param _unadded_refs: private parameter, don't use.
1222
:param _group_cache: private parameter, don't use.
1163
1224
self._index = index
1164
1225
self._access = access
1166
1227
if _unadded_refs is None:
1167
1228
_unadded_refs = {}
1168
1229
self._unadded_refs = _unadded_refs
1169
self._group_cache = LRUSizeCache(max_size=50*1024*1024)
1170
self._fallback_vfs = []
1230
if _group_cache is None:
1231
_group_cache = LRUSizeCache(max_size=50*1024*1024)
1232
self._group_cache = _group_cache
1233
self._immediate_fallback_vfs = []
1234
self._max_bytes_to_index = None
1172
1236
def without_fallbacks(self):
1173
1237
"""Return a clone of this object without any fallbacks configured."""
1174
1238
return GroupCompressVersionedFiles(self._index, self._access,
1175
self._delta, _unadded_refs=dict(self._unadded_refs))
1239
self._delta, _unadded_refs=dict(self._unadded_refs),
1240
_group_cache=self._group_cache)
1177
1242
def add_lines(self, key, parents, lines, parent_texts=None,
1178
1243
left_matching_blocks=None, nostore_sha=None, random_id=False,
1182
1247
:param key: The key tuple of the text to add.
1183
1248
:param parents: The parents key tuples of the text to add.
1184
1249
:param lines: A list of lines. Each line must be a bytestring. And all
1185
of them except the last must be terminated with \n and contain no
1186
other \n's. The last line may either contain no \n's or a single
1187
terminating \n. If the lines list does meet this constraint the add
1188
routine may error or may succeed - but you will be unable to read
1189
the data back accurately. (Checking the lines have been split
1250
of them except the last must be terminated with \\n and contain no
1251
other \\n's. The last line may either contain no \\n's or a single
1252
terminating \\n. If the lines list does meet this constraint the
1253
add routine may error or may succeed - but you will be unable to
1254
read the data back accurately. (Checking the lines have been split
1190
1255
correctly is expensive and extremely unlikely to catch bugs so it
1191
1256
is not done at runtime unless check_content is True.)
1192
1257
:param parent_texts: An optional dictionary containing the opaque
1287
1352
self._check_lines_not_unicode(lines)
1288
1353
self._check_lines_are_lines(lines)
1290
def get_known_graph_ancestry(self, keys):
1291
"""Get a KnownGraph instance with the ancestry of keys."""
1292
# Note that this is identical to
1293
# KnitVersionedFiles.get_known_graph_ancestry, but they don't share
1295
parent_map, missing_keys = self._index.find_ancestry(keys)
1296
for fallback in self._fallback_vfs:
1297
if not missing_keys:
1299
(f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
1301
parent_map.update(f_parent_map)
1302
missing_keys = f_missing_keys
1303
kg = _mod_graph.KnownGraph(parent_map)
1306
1355
def get_parent_map(self, keys):
1307
1356
"""Get a map of the graph parents of keys.
1447
1496
The returned objects should be in the order defined by 'ordering',
1448
1497
which can weave between different sources.
1449
1499
:param ordering: Must be one of 'topological' or 'groupcompress'
1450
1500
:return: List of [(source, [keys])] tuples, such that all keys are in
1451
1501
the defined order, regardless of source.
1453
1503
if ordering == 'topological':
1454
present_keys = topo_sort(parent_map)
1504
present_keys = tsort.topo_sort(parent_map)
1456
1506
# ordering == 'groupcompress'
1457
1507
# XXX: This only optimizes for the target ordering. We may need
1493
1543
# This is the group the bytes are stored in, followed by the
1494
1544
# location in the group
1495
1545
return locations[key][0]
1496
present_keys = sorted(locations.iterkeys(), key=get_group)
1497
1546
# We don't have an ordering for keys in the in-memory object, but
1498
1547
# lets process the in-memory ones first.
1499
present_keys = list(unadded_keys) + present_keys
1548
present_keys = list(unadded_keys)
1549
present_keys.extend(sorted(locations, key=get_group))
1500
1550
# Now grab all of the ones from other sources
1501
1551
source_keys = [(self, present_keys)]
1502
1552
source_keys.extend(source_result)
1546
1596
# - we encounter an unadded ref, or
1547
1597
# - we run out of keys, or
1548
1598
# - the total bytes to retrieve for this batch > BATCH_SIZE
1549
batcher = _BatchingBlockFetcher(self, locations)
1599
batcher = _BatchingBlockFetcher(self, locations,
1600
get_compressor_settings=self._get_compressor_settings)
1550
1601
for source, keys in source_keys:
1551
1602
if source is self:
1552
1603
for key in keys:
1598
1649
for _ in self._insert_record_stream(stream, random_id=False):
1652
def _get_compressor_settings(self):
1653
if self._max_bytes_to_index is None:
1654
# TODO: VersionedFiles don't know about their containing
1655
# repository, so they don't have much of an idea about their
1656
# location. So for now, this is only a global option.
1657
c = config.GlobalConfig()
1658
val = c.get_user_option('bzr.groupcompress.max_bytes_to_index')
1662
except ValueError as e:
1663
trace.warning('Value for '
1664
'"bzr.groupcompress.max_bytes_to_index"'
1665
' %r is not an integer'
1669
val = self._DEFAULT_MAX_BYTES_TO_INDEX
1670
self._max_bytes_to_index = val
1671
return {'max_bytes_to_index': self._max_bytes_to_index}
1673
def _make_group_compressor(self):
1674
return GroupCompressor(self._get_compressor_settings())
1601
1676
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
1602
1677
reuse_blocks=True):
1603
1678
"""Internal core to insert a record stream into this container.
1627
1702
# This will go up to fulltexts for gc to gc fetching, which isn't
1629
self._compressor = GroupCompressor()
1704
self._compressor = self._make_group_compressor()
1630
1705
self._unadded_refs = {}
1631
1706
keys_to_add = []
1633
bytes = self._compressor.flush().to_bytes()
1634
self._compressor = GroupCompressor()
1708
bytes_len, chunks = self._compressor.flush().to_chunks()
1709
self._compressor = self._make_group_compressor()
1710
# Note: At this point we still have 1 copy of the fulltext (in
1711
# record and the var 'bytes'), and this generates 2 copies of
1712
# the compressed text (one for bytes, one in chunks)
1713
# TODO: Push 'chunks' down into the _access api, so that we don't
1714
# have to double compressed memory here
1715
# TODO: Figure out how to indicate that we would be happy to free
1716
# the fulltext content at this point. Note that sometimes we
1717
# will want it later (streaming CHK pages), but most of the
1718
# time we won't (everything else)
1719
data = b''.join(chunks)
1635
1721
index, start, length = self._access.add_raw_records(
1636
[(None, len(bytes))], bytes)[0]
1722
[(None, len(data))], data)[0]
1638
1724
for key, reads, refs in keys_to_add:
1639
nodes.append((key, "%d %d %s" % (start, length, reads), refs))
1725
nodes.append((key, b"%d %d %s" % (start, length, reads), refs))
1640
1726
self._index.add_records(nodes, random_id=random_id)
1641
1727
self._unadded_refs = {}
1642
1728
del keys_to_add[:]
1656
1742
raise errors.RevisionNotPresent(record.key, self)
1658
1744
if record.key in inserted_keys:
1659
trace.note('Insert claimed random_id=True,'
1660
' but then inserted %r two times', record.key)
1745
trace.note(gettext('Insert claimed random_id=True,'
1746
' but then inserted %r two times'), record.key)
1662
1748
inserted_keys.add(record.key)
1663
1749
if reuse_blocks:
1692
1778
' the current record, we cannot be positive'
1693
1779
' that the appropriate content was inserted.'
1695
value = "%d %d %d %d" % (block_start, block_length,
1781
value = b"%d %d %d %d" % (block_start, block_length,
1696
1782
record._start, record._end)
1697
1783
nodes = [(record.key, value, (record.parents,))]
1698
1784
# TODO: Consider buffering up many nodes to be added, not
1802
1889
"""See VersionedFiles.keys."""
1803
1890
if 'evil' in debug.debug_flags:
1804
1891
trace.mutter_callsite(2, "keys scales with size of history")
1805
sources = [self._index] + self._fallback_vfs
1892
sources = [self._index] + self._immediate_fallback_vfs
1807
1894
for source in sources:
1808
1895
result.update(source.keys())
1899
class _GCBuildDetails(object):
1900
"""A blob of data about the build details.
1902
This stores the minimal data, which then allows compatibility with the old
1903
api, without taking as much memory.
1906
__slots__ = ('_index', '_group_start', '_group_end', '_basis_end',
1907
'_delta_end', '_parents')
1910
compression_parent = None
1912
def __init__(self, parents, position_info):
1913
self._parents = parents
1914
(self._index, self._group_start, self._group_end, self._basis_end,
1915
self._delta_end) = position_info
1918
return '%s(%s, %s)' % (self.__class__.__name__,
1919
self.index_memo, self._parents)
1922
def index_memo(self):
1923
return (self._index, self._group_start, self._group_end,
1924
self._basis_end, self._delta_end)
1927
def record_details(self):
1928
return static_tuple.StaticTuple(self.method, None)
1930
def __getitem__(self, offset):
1931
"""Compatibility thunk to act like a tuple."""
1933
return self.index_memo
1935
return self.compression_parent # Always None
1937
return self._parents
1939
return self.record_details
1941
raise IndexError('offset out of range')
1812
1947
class _GCGraphIndex(object):
1813
1948
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
1817
1952
inconsistency_fatal=True, track_new_keys=False):
1818
1953
"""Construct a _GCGraphIndex on a graph_index.
1820
:param graph_index: An implementation of bzrlib.index.GraphIndex.
1955
:param graph_index: An implementation of breezy.index.GraphIndex.
1821
1956
:param is_locked: A callback, returns True if the index is locked and
1823
1958
:param parents: If True, record knits parents, if not do not record
1902
2037
if self._parents:
1903
for key, (value, node_refs) in keys.iteritems():
2038
for key, (value, node_refs) in viewitems(keys):
1904
2039
result.append((key, value, node_refs))
1906
for key, (value, node_refs) in keys.iteritems():
2041
for key, (value, node_refs) in viewitems(keys):
1907
2042
result.append((key, value))
1908
2043
records = result
1909
2044
key_dependencies = self._key_dependencies
1989
2124
:param keys: An iterable of keys.
1990
2125
:return: A dict of key:
1991
2126
(index_memo, compression_parent, parents, record_details).
1993
opaque structure to pass to read_records to extract the raw
1996
Content that this record is built upon, may be None
1998
Logical parents of this node
2000
extra information about the content which needs to be passed to
2001
Factory.parse_record
2128
* index_memo: opaque structure to pass to read_records to extract
2130
* compression_parent: Content that this record is built upon, may
2132
* parents: Logical parents of this node
2133
* record_details: extra information about the content which needs
2134
to be passed to Factory.parse_record
2003
2136
self._check_read()
2033
2165
# each, or about 7MB. Note that it might be even more when you consider
2034
2166
# how PyInt is allocated in separate slabs. And you can't return a slab
2035
2167
# to the OS if even 1 int on it is in use. Note though that Python uses
2036
# a LIFO when re-using PyInt slots, which probably causes more
2168
# a LIFO when re-using PyInt slots, which might cause more
2037
2169
# fragmentation.
2038
2170
start = int(bits[0])
2039
2171
start = self._int_cache.setdefault(start, start)