1
# Copyright (C) 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Core compression logic for compressing streams of related files."""
36
from bzrlib.btree_index import BTreeBuilder
37
from bzrlib.lru_cache import LRUSizeCache
38
from bzrlib.tsort import topo_sort
39
from bzrlib.versionedfile import (
42
ChunkedContentFactory,
43
FulltextContentFactory,
47
# Minimum number of uncompressed bytes to try fetch at once when retrieving
48
# groupcompress blocks.
51
_USE_LZMA = False and (pylzma is not None)
53
# osutils.sha_string('')
54
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
56
def sort_gc_optimal(parent_map):
57
"""Sort and group the keys in parent_map into groupcompress order.
59
groupcompress is defined (currently) as reverse-topological order, grouped
62
:return: A sorted-list of keys
64
# groupcompress ordering is approximately reverse topological,
65
# properly grouped by file-id.
67
for key, value in parent_map.iteritems():
68
if isinstance(key, str) or len(key) == 1:
73
per_prefix_map[prefix][key] = value
75
per_prefix_map[prefix] = {key: value}
78
for prefix in sorted(per_prefix_map):
79
present_keys.extend(reversed(topo_sort(per_prefix_map[prefix])))
83
# The max zlib window size is 32kB, so if we set 'max_size' output of the
84
# decompressor to the requested bytes + 32kB, then we should guarantee
85
# num_bytes coming out.
86
_ZLIB_DECOMP_WINDOW = 32*1024
88
class GroupCompressBlock(object):
89
"""An object which maintains the internal structure of the compressed data.
91
This tracks the meta info (start of text, length, type, etc.)
94
# Group Compress Block v1 Zlib
95
GCB_HEADER = 'gcb1z\n'
96
# Group Compress Block v1 Lzma
97
GCB_LZ_HEADER = 'gcb1l\n'
98
GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
101
# map by key? or just order in file?
102
self._compressor_name = None
103
self._z_content = None
104
self._z_content_decompressor = None
105
self._z_content_length = None
106
self._content_length = None
108
self._content_chunks = None
111
# This is the maximum number of bytes this object will reference if
112
# everything is decompressed. However, if we decompress less than
113
# everything... (this would cause some problems for LRUSizeCache)
114
return self._content_length + self._z_content_length
116
def _ensure_content(self, num_bytes=None):
117
"""Make sure that content has been expanded enough.
119
:param num_bytes: Ensure that we have extracted at least num_bytes of
120
content. If None, consume everything
122
if self._content_length is None:
123
raise AssertionError('self._content_length should never be None')
124
if num_bytes is None:
125
num_bytes = self._content_length
126
elif (self._content_length is not None
127
and num_bytes > self._content_length):
128
raise AssertionError(
129
'requested num_bytes (%d) > content length (%d)'
130
% (num_bytes, self._content_length))
131
# Expand the content if required
132
if self._content is None:
133
if self._content_chunks is not None:
134
self._content = ''.join(self._content_chunks)
135
self._content_chunks = None
136
if self._content is None:
137
if self._z_content is None:
138
raise AssertionError('No content to decompress')
139
if self._z_content == '':
141
elif self._compressor_name == 'lzma':
142
# We don't do partial lzma decomp yet
143
self._content = pylzma.decompress(self._z_content)
144
elif self._compressor_name == 'zlib':
145
# Start a zlib decompressor
146
if num_bytes * 4 > self._content_length * 3:
147
# If we are requesting more that 3/4ths of the content,
148
# just extract the whole thing in a single pass
149
num_bytes = self._content_length
150
self._content = zlib.decompress(self._z_content)
152
self._z_content_decompressor = zlib.decompressobj()
153
# Seed the decompressor with the uncompressed bytes, so
154
# that the rest of the code is simplified
155
self._content = self._z_content_decompressor.decompress(
156
self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
157
if not self._z_content_decompressor.unconsumed_tail:
158
self._z_content_decompressor = None
160
raise AssertionError('Unknown compressor: %r'
161
% self._compressor_name)
162
# Any bytes remaining to be decompressed will be in the decompressors
165
# Do we have enough bytes already?
166
if len(self._content) >= num_bytes:
168
# If we got this far, and don't have a decompressor, something is wrong
169
if self._z_content_decompressor is None:
170
raise AssertionError(
171
'No decompressor to decompress %d bytes' % num_bytes)
172
remaining_decomp = self._z_content_decompressor.unconsumed_tail
173
if not remaining_decomp:
174
raise AssertionError('Nothing left to decompress')
175
needed_bytes = num_bytes - len(self._content)
176
# We always set max_size to 32kB over the minimum needed, so that
177
# zlib will give us as much as we really want.
178
# TODO: If this isn't good enough, we could make a loop here,
179
# that keeps expanding the request until we get enough
180
self._content += self._z_content_decompressor.decompress(
181
remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
182
if len(self._content) < num_bytes:
183
raise AssertionError('%d bytes wanted, only %d available'
184
% (num_bytes, len(self._content)))
185
if not self._z_content_decompressor.unconsumed_tail:
186
# The stream is finished
187
self._z_content_decompressor = None
189
def _parse_bytes(self, bytes, pos):
190
"""Read the various lengths from the header.
192
This also populates the various 'compressed' buffers.
194
:return: The position in bytes just after the last newline
196
# At present, we have 2 integers for the compressed and uncompressed
197
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
198
# checking too far, cap the search to 14 bytes.
199
pos2 = bytes.index('\n', pos, pos + 14)
200
self._z_content_length = int(bytes[pos:pos2])
202
pos2 = bytes.index('\n', pos, pos + 14)
203
self._content_length = int(bytes[pos:pos2])
205
if len(bytes) != (pos + self._z_content_length):
206
# XXX: Define some GCCorrupt error ?
207
raise AssertionError('Invalid bytes: (%d) != %d + %d' %
208
(len(bytes), pos, self._z_content_length))
209
self._z_content = bytes[pos:]
212
def from_bytes(cls, bytes):
214
if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
215
raise ValueError('bytes did not start with any of %r'
216
% (cls.GCB_KNOWN_HEADERS,))
217
# XXX: why not testing the whole header ?
219
out._compressor_name = 'zlib'
220
elif bytes[4] == 'l':
221
out._compressor_name = 'lzma'
223
raise ValueError('unknown compressor: %r' % (bytes,))
224
out._parse_bytes(bytes, 6)
227
def extract(self, key, start, end, sha1=None):
228
"""Extract the text for a specific key.
230
:param key: The label used for this content
231
:param sha1: TODO (should we validate only when sha1 is supplied?)
232
:return: The bytes for the content
234
if start == end == 0:
236
self._ensure_content(end)
237
# The bytes are 'f' or 'd' for the type, then a variable-length
238
# base128 integer for the content size, then the actual content
239
# We know that the variable-length integer won't be longer than 5
240
# bytes (it takes 5 bytes to encode 2^32)
241
c = self._content[start]
246
raise ValueError('Unknown content control code: %s'
249
content_len, len_len = decode_base128_int(
250
self._content[start + 1:start + 6])
251
content_start = start + 1 + len_len
252
if end != content_start + content_len:
253
raise ValueError('end != len according to field header'
254
' %s != %s' % (end, content_start + content_len))
256
bytes = self._content[content_start:end]
258
bytes = apply_delta_to_source(self._content, content_start, end)
261
def set_chunked_content(self, content_chunks, length):
262
"""Set the content of this block to the given chunks."""
263
# If we have lots of short lines, it is may be more efficient to join
264
# the content ahead of time. If the content is <10MiB, we don't really
265
# care about the extra memory consumption, so we can just pack it and
266
# be done. However, timing showed 18s => 17.9s for repacking 1k revs of
267
# mysql, which is below the noise margin
268
self._content_length = length
269
self._content_chunks = content_chunks
271
self._z_content = None
273
def set_content(self, content):
274
"""Set the content of this block."""
275
self._content_length = len(content)
276
self._content = content
277
self._z_content = None
279
def _create_z_content_using_lzma(self):
280
if self._content_chunks is not None:
281
self._content = ''.join(self._content_chunks)
282
self._content_chunks = None
283
if self._content is None:
284
raise AssertionError('Nothing to compress')
285
self._z_content = pylzma.compress(self._content)
286
self._z_content_length = len(self._z_content)
288
def _create_z_content_from_chunks(self):
289
compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION)
290
compressed_chunks = map(compressor.compress, self._content_chunks)
291
compressed_chunks.append(compressor.flush())
292
self._z_content = ''.join(compressed_chunks)
293
self._z_content_length = len(self._z_content)
295
def _create_z_content(self):
296
if self._z_content is not None:
299
self._create_z_content_using_lzma()
301
if self._content_chunks is not None:
302
self._create_z_content_from_chunks()
304
self._z_content = zlib.compress(self._content)
305
self._z_content_length = len(self._z_content)
308
"""Encode the information into a byte stream."""
309
self._create_z_content()
311
header = self.GCB_LZ_HEADER
313
header = self.GCB_HEADER
315
'%d\n%d\n' % (self._z_content_length, self._content_length),
318
return ''.join(chunks)
320
def _dump(self, include_text=False):
321
"""Take this block, and spit out a human-readable structure.
323
:param include_text: Inserts also include text bits, chose whether you
324
want this displayed in the dump or not.
325
:return: A dump of the given block. The layout is something like:
326
[('f', length), ('d', delta_length, text_length, [delta_info])]
327
delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
330
self._ensure_content()
333
while pos < self._content_length:
334
kind = self._content[pos]
336
if kind not in ('f', 'd'):
337
raise ValueError('invalid kind character: %r' % (kind,))
338
content_len, len_len = decode_base128_int(
339
self._content[pos:pos + 5])
341
if content_len + pos > self._content_length:
342
raise ValueError('invalid content_len %d for record @ pos %d'
343
% (content_len, pos - len_len - 1))
344
if kind == 'f': # Fulltext
346
text = self._content[pos:pos+content_len]
347
result.append(('f', content_len, text))
349
result.append(('f', content_len))
350
elif kind == 'd': # Delta
351
delta_content = self._content[pos:pos+content_len]
353
# The first entry in a delta is the decompressed length
354
decomp_len, delta_pos = decode_base128_int(delta_content)
355
result.append(('d', content_len, decomp_len, delta_info))
357
while delta_pos < content_len:
358
c = ord(delta_content[delta_pos])
362
delta_pos) = decode_copy_instruction(delta_content, c,
365
text = self._content[offset:offset+length]
366
delta_info.append(('c', offset, length, text))
368
delta_info.append(('c', offset, length))
369
measured_len += length
372
txt = delta_content[delta_pos:delta_pos+c]
375
delta_info.append(('i', c, txt))
378
if delta_pos != content_len:
379
raise ValueError('Delta consumed a bad number of bytes:'
380
' %d != %d' % (delta_pos, content_len))
381
if measured_len != decomp_len:
382
raise ValueError('Delta claimed fulltext was %d bytes, but'
383
' extraction resulted in %d bytes'
384
% (decomp_len, measured_len))
389
class _LazyGroupCompressFactory(object):
390
"""Yield content from a GroupCompressBlock on demand."""
392
def __init__(self, key, parents, manager, start, end, first):
393
"""Create a _LazyGroupCompressFactory
395
:param key: The key of just this record
396
:param parents: The parents of this key (possibly None)
397
:param gc_block: A GroupCompressBlock object
398
:param start: Offset of the first byte for this record in the
400
:param end: Offset of the byte just after the end of this record
401
(ie, bytes = content[start:end])
402
:param first: Is this the first Factory for the given block?
405
self.parents = parents
407
# Note: This attribute coupled with Manager._factories creates a
408
# reference cycle. Perhaps we would rather use a weakref(), or
409
# find an appropriate time to release the ref. After the first
410
# get_bytes_as call? After Manager.get_record_stream() returns
412
self._manager = manager
414
self.storage_kind = 'groupcompress-block'
416
self.storage_kind = 'groupcompress-block-ref'
422
return '%s(%s, first=%s)' % (self.__class__.__name__,
423
self.key, self._first)
425
def get_bytes_as(self, storage_kind):
426
if storage_kind == self.storage_kind:
428
# wire bytes, something...
429
return self._manager._wire_bytes()
432
if storage_kind in ('fulltext', 'chunked'):
433
if self._bytes is None:
434
# Grab and cache the raw bytes for this entry
435
# and break the ref-cycle with _manager since we don't need it
437
self._manager._prepare_for_extract()
438
block = self._manager._block
439
self._bytes = block.extract(self.key, self._start, self._end)
440
# There are code paths that first extract as fulltext, and then
441
# extract as storage_kind (smart fetch). So we don't break the
442
# refcycle here, but instead in manager.get_record_stream()
443
if storage_kind == 'fulltext':
447
raise errors.UnavailableRepresentation(self.key, storage_kind,
451
class _LazyGroupContentManager(object):
452
"""This manages a group of _LazyGroupCompressFactory objects."""
454
_max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of
455
# current size, and still be considered
457
_full_block_size = 4*1024*1024
458
_full_mixed_block_size = 2*1024*1024
459
_full_enough_block_size = 3*1024*1024 # size at which we won't repack
460
_full_enough_mixed_block_size = 2*768*1024 # 1.5MB
462
def __init__(self, block):
464
# We need to preserve the ordering
468
def add_factory(self, key, parents, start, end):
469
if not self._factories:
473
# Note that this creates a reference cycle....
474
factory = _LazyGroupCompressFactory(key, parents, self,
475
start, end, first=first)
476
# max() works here, but as a function call, doing a compare seems to be
477
# significantly faster, timeit says 250ms for max() and 100ms for the
479
if end > self._last_byte:
480
self._last_byte = end
481
self._factories.append(factory)
483
def get_record_stream(self):
484
"""Get a record for all keys added so far."""
485
for factory in self._factories:
487
# Break the ref-cycle
488
factory._bytes = None
489
factory._manager = None
490
# TODO: Consider setting self._factories = None after the above loop,
491
# as it will break the reference cycle
493
def _trim_block(self, last_byte):
494
"""Create a new GroupCompressBlock, with just some of the content."""
495
# None of the factories need to be adjusted, because the content is
496
# located in an identical place. Just that some of the unreferenced
497
# trailing bytes are stripped
498
trace.mutter('stripping trailing bytes from groupcompress block'
499
' %d => %d', self._block._content_length, last_byte)
500
new_block = GroupCompressBlock()
501
self._block._ensure_content(last_byte)
502
new_block.set_content(self._block._content[:last_byte])
503
self._block = new_block
505
def _rebuild_block(self):
506
"""Create a new GroupCompressBlock with only the referenced texts."""
507
compressor = GroupCompressor()
509
old_length = self._block._content_length
511
for factory in self._factories:
512
bytes = factory.get_bytes_as('fulltext')
513
(found_sha1, start_point, end_point,
514
type) = compressor.compress(factory.key, bytes, factory.sha1)
515
# Now update this factory with the new offsets, etc
516
factory.sha1 = found_sha1
517
factory._start = start_point
518
factory._end = end_point
519
self._last_byte = end_point
520
new_block = compressor.flush()
521
# TODO: Should we check that new_block really *is* smaller than the old
522
# block? It seems hard to come up with a method that it would
523
# expand, since we do full compression again. Perhaps based on a
524
# request that ends up poorly ordered?
525
delta = time.time() - tstart
526
self._block = new_block
527
trace.mutter('creating new compressed block on-the-fly in %.3fs'
528
' %d bytes => %d bytes', delta, old_length,
529
self._block._content_length)
531
def _prepare_for_extract(self):
532
"""A _LazyGroupCompressFactory is about to extract to fulltext."""
533
# We expect that if one child is going to fulltext, all will be. This
534
# helps prevent all of them from extracting a small amount at a time.
535
# Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
536
# time (self._block._content) is a little expensive.
537
self._block._ensure_content(self._last_byte)
539
def _check_rebuild_action(self):
540
"""Check to see if our block should be repacked."""
543
for factory in self._factories:
544
total_bytes_used += factory._end - factory._start
545
if last_byte_used < factory._end:
546
last_byte_used = factory._end
547
# If we are using more than half of the bytes from the block, we have
548
# nothing else to check
549
if total_bytes_used * 2 >= self._block._content_length:
550
return None, last_byte_used, total_bytes_used
551
# We are using less than 50% of the content. Is the content we are
552
# using at the beginning of the block? If so, we can just trim the
553
# tail, rather than rebuilding from scratch.
554
if total_bytes_used * 2 > last_byte_used:
555
return 'trim', last_byte_used, total_bytes_used
557
# We are using a small amount of the data, and it isn't just packed
558
# nicely at the front, so rebuild the content.
559
# Note: This would be *nicer* as a strip-data-from-group, rather than
560
# building it up again from scratch
561
# It might be reasonable to consider the fulltext sizes for
562
# different bits when deciding this, too. As you may have a small
563
# fulltext, and a trivial delta, and you are just trading around
564
# for another fulltext. If we do a simple 'prune' you may end up
565
# expanding many deltas into fulltexts, as well.
566
# If we build a cheap enough 'strip', then we could try a strip,
567
# if that expands the content, we then rebuild.
568
return 'rebuild', last_byte_used, total_bytes_used
570
def check_is_well_utilized(self):
571
"""Is the current block considered 'well utilized'?
573
This heuristic asks if the current block considers itself to be a fully
574
developed group, rather than just a loose collection of data.
576
if len(self._factories) == 1:
577
# A block of length 1 could be improved by combining with other
578
# groups - don't look deeper. Even larger than max size groups
579
# could compress well with adjacent versions of the same thing.
581
action, last_byte_used, total_bytes_used = self._check_rebuild_action()
582
block_size = self._block._content_length
583
if total_bytes_used < block_size * self._max_cut_fraction:
584
# This block wants to trim itself small enough that we want to
585
# consider it under-utilized.
587
# TODO: This code is meant to be the twin of _insert_record_stream's
588
# 'start_new_block' logic. It would probably be better to factor
589
# out that logic into a shared location, so that it stays
591
# We currently assume a block is properly utilized whenever it is >75%
592
# of the size of a 'full' block. In normal operation, a block is
593
# considered full when it hits 4MB of same-file content. So any block
594
# >3MB is 'full enough'.
595
# The only time this isn't true is when a given block has large-object
596
# content. (a single file >4MB, etc.)
597
# Under these circumstances, we allow a block to grow to
598
# 2 x largest_content. Which means that if a given block had a large
599
# object, it may actually be under-utilized. However, given that this
600
# is 'pack-on-the-fly' it is probably reasonable to not repack large
601
# content blobs on-the-fly. Note that because we return False for all
602
# 1-item blobs, we will repack them; we may wish to reevaluate our
603
# treatment of large object blobs in the future.
604
if block_size >= self._full_enough_block_size:
606
# If a block is <3MB, it still may be considered 'full' if it contains
607
# mixed content. The current rule is 2MB of mixed content is considered
608
# full. So check to see if this block contains mixed content, and
609
# set the threshold appropriately.
611
for factory in self._factories:
612
prefix = factory.key[:-1]
613
if common_prefix is None:
614
common_prefix = prefix
615
elif prefix != common_prefix:
616
# Mixed content, check the size appropriately
617
if block_size >= self._full_enough_mixed_block_size:
620
# The content failed both the mixed check and the single-content check
621
# so obviously it is not fully utilized
622
# TODO: there is one other constraint that isn't being checked
623
# namely, that the entries in the block are in the appropriate
624
# order. For example, you could insert the entries in exactly
625
# reverse groupcompress order, and we would think that is ok.
626
# (all the right objects are in one group, and it is fully
627
# utilized, etc.) For now, we assume that case is rare,
628
# especially since we should always fetch in 'groupcompress'
632
def _check_rebuild_block(self):
633
action, last_byte_used, total_bytes_used = self._check_rebuild_action()
637
self._trim_block(last_byte_used)
638
elif action == 'rebuild':
639
self._rebuild_block()
641
raise ValueError('unknown rebuild action: %r' % (action,))
643
def _wire_bytes(self):
644
"""Return a byte stream suitable for transmitting over the wire."""
645
self._check_rebuild_block()
646
# The outer block starts with:
647
# 'groupcompress-block\n'
648
# <length of compressed key info>\n
649
# <length of uncompressed info>\n
650
# <length of gc block>\n
653
lines = ['groupcompress-block\n']
654
# The minimal info we need is the key, the start offset, and the
655
# parents. The length and type are encoded in the record itself.
656
# However, passing in the other bits makes it easier. The list of
657
# keys, and the start offset, the length
659
# 1 line with parents, '' for ()
660
# 1 line for start offset
661
# 1 line for end byte
663
for factory in self._factories:
664
key_bytes = '\x00'.join(factory.key)
665
parents = factory.parents
667
parent_bytes = 'None:'
669
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
670
record_header = '%s\n%s\n%d\n%d\n' % (
671
key_bytes, parent_bytes, factory._start, factory._end)
672
header_lines.append(record_header)
673
# TODO: Can we break the refcycle at this point and set
674
# factory._manager = None?
675
header_bytes = ''.join(header_lines)
677
header_bytes_len = len(header_bytes)
678
z_header_bytes = zlib.compress(header_bytes)
680
z_header_bytes_len = len(z_header_bytes)
681
block_bytes = self._block.to_bytes()
682
lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
684
lines.append(z_header_bytes)
685
lines.append(block_bytes)
686
del z_header_bytes, block_bytes
687
return ''.join(lines)
690
def from_bytes(cls, bytes):
691
# TODO: This does extra string copying, probably better to do it a
693
(storage_kind, z_header_len, header_len,
694
block_len, rest) = bytes.split('\n', 4)
696
if storage_kind != 'groupcompress-block':
697
raise ValueError('Unknown storage kind: %s' % (storage_kind,))
698
z_header_len = int(z_header_len)
699
if len(rest) < z_header_len:
700
raise ValueError('Compressed header len shorter than all bytes')
701
z_header = rest[:z_header_len]
702
header_len = int(header_len)
703
header = zlib.decompress(z_header)
704
if len(header) != header_len:
705
raise ValueError('invalid length for decompressed bytes')
707
block_len = int(block_len)
708
if len(rest) != z_header_len + block_len:
709
raise ValueError('Invalid length for block')
710
block_bytes = rest[z_header_len:]
712
# So now we have a valid GCB, we just need to parse the factories that
714
header_lines = header.split('\n')
716
last = header_lines.pop()
718
raise ValueError('header lines did not end with a trailing'
720
if len(header_lines) % 4 != 0:
721
raise ValueError('The header was not an even multiple of 4 lines')
722
block = GroupCompressBlock.from_bytes(block_bytes)
725
for start in xrange(0, len(header_lines), 4):
727
key = tuple(header_lines[start].split('\x00'))
728
parents_line = header_lines[start+1]
729
if parents_line == 'None:':
732
parents = tuple([tuple(segment.split('\x00'))
733
for segment in parents_line.split('\t')
735
start_offset = int(header_lines[start+2])
736
end_offset = int(header_lines[start+3])
737
result.add_factory(key, parents, start_offset, end_offset)
741
def network_block_to_records(storage_kind, bytes, line_end):
742
if storage_kind != 'groupcompress-block':
743
raise ValueError('Unknown storage kind: %s' % (storage_kind,))
744
manager = _LazyGroupContentManager.from_bytes(bytes)
745
return manager.get_record_stream()
748
class _CommonGroupCompressor(object):
751
"""Create a GroupCompressor."""
756
self.labels_deltas = {}
757
self._delta_index = None # Set by the children
758
self._block = GroupCompressBlock()
760
def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False):
761
"""Compress lines with label key.
763
:param key: A key tuple. It is stored in the output
764
for identification of the text during decompression. If the last
765
element is 'None' it is replaced with the sha1 of the text -
767
:param bytes: The bytes to be compressed
768
:param expected_sha: If non-None, the sha the lines are believed to
769
have. During compression the sha is calculated; a mismatch will
771
:param nostore_sha: If the computed sha1 sum matches, we will raise
772
ExistingContent rather than adding the text.
773
:param soft: Do a 'soft' compression. This means that we require larger
774
ranges to match to be considered for a copy command.
776
:return: The sha1 of lines, the start and end offsets in the delta, and
777
the type ('fulltext' or 'delta').
779
:seealso VersionedFiles.add_lines:
781
if not bytes: # empty, like a dir entry, etc
782
if nostore_sha == _null_sha1:
783
raise errors.ExistingContent()
784
return _null_sha1, 0, 0, 'fulltext'
785
# we assume someone knew what they were doing when they passed it in
786
if expected_sha is not None:
789
sha1 = osutils.sha_string(bytes)
790
if nostore_sha is not None:
791
if sha1 == nostore_sha:
792
raise errors.ExistingContent()
794
key = key[:-1] + ('sha1:' + sha1,)
796
start, end, type = self._compress(key, bytes, len(bytes) / 2, soft)
797
return sha1, start, end, type
799
def _compress(self, key, bytes, max_delta_size, soft=False):
800
"""Compress lines with label key.
802
:param key: A key tuple. It is stored in the output for identification
803
of the text during decompression.
805
:param bytes: The bytes to be compressed
807
:param max_delta_size: The size above which we issue a fulltext instead
810
:param soft: Do a 'soft' compression. This means that we require larger
811
ranges to match to be considered for a copy command.
813
:return: The sha1 of lines, the start and end offsets in the delta, and
814
the type ('fulltext' or 'delta').
816
raise NotImplementedError(self._compress)
818
def extract(self, key):
819
"""Extract a key previously added to the compressor.
821
:param key: The key to extract.
822
:return: An iterable over bytes and the sha1.
824
(start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
825
delta_chunks = self.chunks[start_chunk:end_chunk]
826
stored_bytes = ''.join(delta_chunks)
827
if stored_bytes[0] == 'f':
828
fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
829
data_len = fulltext_len + 1 + offset
830
if data_len != len(stored_bytes):
831
raise ValueError('Index claimed fulltext len, but stored bytes'
833
% (len(stored_bytes), data_len))
834
bytes = stored_bytes[offset + 1:]
836
# XXX: This is inefficient at best
837
source = ''.join(self.chunks[:start_chunk])
838
if stored_bytes[0] != 'd':
839
raise ValueError('Unknown content kind, bytes claim %s'
840
% (stored_bytes[0],))
841
delta_len, offset = decode_base128_int(stored_bytes[1:10])
842
data_len = delta_len + 1 + offset
843
if data_len != len(stored_bytes):
844
raise ValueError('Index claimed delta len, but stored bytes'
846
% (len(stored_bytes), data_len))
847
bytes = apply_delta(source, stored_bytes[offset + 1:])
848
bytes_sha1 = osutils.sha_string(bytes)
849
return bytes, bytes_sha1
852
"""Finish this group, creating a formatted stream.
854
After calling this, the compressor should no longer be used
856
# TODO: this causes us to 'bloat' to 2x the size of content in the
857
# group. This has an impact for 'commit' of large objects.
858
# One possibility is to use self._content_chunks, and be lazy and
859
# only fill out self._content as a full string when we actually
860
# need it. That would at least drop the peak memory consumption
861
# for 'commit' down to ~1x the size of the largest file, at a
862
# cost of increased complexity within this code. 2x is still <<
863
# 3x the size of the largest file, so we are doing ok.
864
self._block.set_chunked_content(self.chunks, self.endpoint)
866
self._delta_index = None
870
"""Call this if you want to 'revoke' the last compression.
872
After this, the data structures will be rolled back, but you cannot do
875
self._delta_index = None
876
del self.chunks[self._last[0]:]
877
self.endpoint = self._last[1]
881
"""Return the overall compression ratio."""
882
return float(self.input_bytes) / float(self.endpoint)
885
class PythonGroupCompressor(_CommonGroupCompressor):
888
"""Create a GroupCompressor.
890
Used only if the pyrex version is not available.
892
super(PythonGroupCompressor, self).__init__()
893
self._delta_index = LinesDeltaIndex([])
894
# The actual content is managed by LinesDeltaIndex
895
self.chunks = self._delta_index.lines
897
def _compress(self, key, bytes, max_delta_size, soft=False):
898
"""see _CommonGroupCompressor._compress"""
899
input_len = len(bytes)
900
new_lines = osutils.split_lines(bytes)
901
out_lines, index_lines = self._delta_index.make_delta(
902
new_lines, bytes_length=input_len, soft=soft)
903
delta_length = sum(map(len, out_lines))
904
if delta_length > max_delta_size:
905
# The delta is longer than the fulltext, insert a fulltext
907
out_lines = ['f', encode_base128_int(input_len)]
908
out_lines.extend(new_lines)
909
index_lines = [False, False]
910
index_lines.extend([True] * len(new_lines))
912
# this is a worthy delta, output it
915
# Update the delta_length to include those two encoded integers
916
out_lines[1] = encode_base128_int(delta_length)
918
start = self.endpoint
919
chunk_start = len(self.chunks)
920
self._last = (chunk_start, self.endpoint)
921
self._delta_index.extend_lines(out_lines, index_lines)
922
self.endpoint = self._delta_index.endpoint
923
self.input_bytes += input_len
924
chunk_end = len(self.chunks)
925
self.labels_deltas[key] = (start, chunk_start,
926
self.endpoint, chunk_end)
927
return start, self.endpoint, type
930
class PyrexGroupCompressor(_CommonGroupCompressor):
931
"""Produce a serialised group of compressed texts.
933
It contains code very similar to SequenceMatcher because of having a similar
934
task. However some key differences apply:
935
- there is no junk, we want a minimal edit not a human readable diff.
936
- we don't filter very common lines (because we don't know where a good
937
range will start, and after the first text we want to be emitting minmal
939
- we chain the left side, not the right side
940
- we incrementally update the adjacency matrix as new lines are provided.
941
- we look for matches in all of the left side, so the routine which does
942
the analagous task of find_longest_match does not need to filter on the
947
super(PyrexGroupCompressor, self).__init__()
948
self._delta_index = DeltaIndex()
950
def _compress(self, key, bytes, max_delta_size, soft=False):
951
"""see _CommonGroupCompressor._compress"""
952
input_len = len(bytes)
953
# By having action/label/sha1/len, we can parse the group if the index
954
# was ever destroyed, we have the key in 'label', we know the final
955
# bytes are valid from sha1, and we know where to find the end of this
956
# record because of 'len'. (the delta record itself will store the
957
# total length for the expanded record)
958
# 'len: %d\n' costs approximately 1% increase in total data
959
# Having the labels at all costs us 9-10% increase, 38% increase for
960
# inventory pages, and 5.8% increase for text pages
961
# new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
962
if self._delta_index._source_offset != self.endpoint:
963
raise AssertionError('_source_offset != endpoint'
964
' somehow the DeltaIndex got out of sync with'
966
delta = self._delta_index.make_delta(bytes, max_delta_size)
969
enc_length = encode_base128_int(len(bytes))
970
len_mini_header = 1 + len(enc_length)
971
self._delta_index.add_source(bytes, len_mini_header)
972
new_chunks = ['f', enc_length, bytes]
975
enc_length = encode_base128_int(len(delta))
976
len_mini_header = 1 + len(enc_length)
977
new_chunks = ['d', enc_length, delta]
978
self._delta_index.add_delta_source(delta, len_mini_header)
980
start = self.endpoint
981
chunk_start = len(self.chunks)
982
# Now output these bytes
983
self._output_chunks(new_chunks)
984
self.input_bytes += input_len
985
chunk_end = len(self.chunks)
986
self.labels_deltas[key] = (start, chunk_start,
987
self.endpoint, chunk_end)
988
if not self._delta_index._source_offset == self.endpoint:
989
raise AssertionError('the delta index is out of sync'
990
'with the output lines %s != %s'
991
% (self._delta_index._source_offset, self.endpoint))
992
return start, self.endpoint, type
994
def _output_chunks(self, new_chunks):
995
"""Output some chunks.
997
:param new_chunks: The chunks to output.
999
self._last = (len(self.chunks), self.endpoint)
1000
endpoint = self.endpoint
1001
self.chunks.extend(new_chunks)
1002
endpoint += sum(map(len, new_chunks))
1003
self.endpoint = endpoint
1006
def make_pack_factory(graph, delta, keylength, inconsistency_fatal=True):
1007
"""Create a factory for creating a pack based groupcompress.
1009
This is only functional enough to run interface tests, it doesn't try to
1010
provide a full pack environment.
1012
:param graph: Store a graph.
1013
:param delta: Delta compress contents.
1014
:param keylength: How long should keys be.
1016
def factory(transport):
1021
graph_index = BTreeBuilder(reference_lists=ref_length,
1022
key_elements=keylength)
1023
stream = transport.open_write_stream('newpack')
1024
writer = pack.ContainerWriter(stream.write)
1026
index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
1027
add_callback=graph_index.add_nodes,
1028
inconsistency_fatal=inconsistency_fatal)
1029
access = knit._DirectPackAccess({})
1030
access.set_writer(writer, graph_index, (transport, 'newpack'))
1031
result = GroupCompressVersionedFiles(index, access, delta)
1032
result.stream = stream
1033
result.writer = writer
1038
def cleanup_pack_group(versioned_files):
1039
versioned_files.writer.end()
1040
versioned_files.stream.close()
1043
class _BatchingBlockFetcher(object):
1044
"""Fetch group compress blocks in batches.
1046
:ivar total_bytes: int of expected number of bytes needed to fetch the
1047
currently pending batch.
1050
def __init__(self, gcvf, locations):
1052
self.locations = locations
1054
self.batch_memos = {}
1055
self.memos_to_get = []
1056
self.total_bytes = 0
1057
self.last_read_memo = None
1060
def add_key(self, key):
1061
"""Add another to key to fetch.
1063
:return: The estimated number of bytes needed to fetch the batch so
1066
self.keys.append(key)
1067
index_memo, _, _, _ = self.locations[key]
1068
read_memo = index_memo[0:3]
1069
# Three possibilities for this read_memo:
1070
# - it's already part of this batch; or
1071
# - it's not yet part of this batch, but is already cached; or
1072
# - it's not yet part of this batch and will need to be fetched.
1073
if read_memo in self.batch_memos:
1074
# This read memo is already in this batch.
1075
return self.total_bytes
1077
cached_block = self.gcvf._group_cache[read_memo]
1079
# This read memo is new to this batch, and the data isn't cached
1081
self.batch_memos[read_memo] = None
1082
self.memos_to_get.append(read_memo)
1083
byte_length = read_memo[2]
1084
self.total_bytes += byte_length
1086
# This read memo is new to this batch, but cached.
1087
# Keep a reference to the cached block in batch_memos because it's
1088
# certain that we'll use it when this batch is processed, but
1089
# there's a risk that it would fall out of _group_cache between now
1091
self.batch_memos[read_memo] = cached_block
1092
return self.total_bytes
1094
def _flush_manager(self):
1095
if self.manager is not None:
1096
for factory in self.manager.get_record_stream():
1099
self.last_read_memo = None
1101
def yield_factories(self, full_flush=False):
1102
"""Yield factories for keys added since the last yield. They will be
1103
returned in the order they were added via add_key.
1105
:param full_flush: by default, some results may not be returned in case
1106
they can be part of the next batch. If full_flush is True, then
1107
all results are returned.
1109
if self.manager is None and not self.keys:
1111
# Fetch all memos in this batch.
1112
blocks = self.gcvf._get_blocks(self.memos_to_get)
1113
# Turn blocks into factories and yield them.
1114
memos_to_get_stack = list(self.memos_to_get)
1115
memos_to_get_stack.reverse()
1116
for key in self.keys:
1117
index_memo, _, parents, _ = self.locations[key]
1118
read_memo = index_memo[:3]
1119
if self.last_read_memo != read_memo:
1120
# We are starting a new block. If we have a
1121
# manager, we have found everything that fits for
1122
# now, so yield records
1123
for factory in self._flush_manager():
1125
# Now start a new manager.
1126
if memos_to_get_stack and memos_to_get_stack[-1] == read_memo:
1127
# The next block from _get_blocks will be the block we
1129
block_read_memo, block = blocks.next()
1130
if block_read_memo != read_memo:
1131
raise AssertionError(
1132
"block_read_memo out of sync with read_memo"
1133
"(%r != %r)" % (block_read_memo, read_memo))
1134
self.batch_memos[read_memo] = block
1135
memos_to_get_stack.pop()
1137
block = self.batch_memos[read_memo]
1138
self.manager = _LazyGroupContentManager(block)
1139
self.last_read_memo = read_memo
1140
start, end = index_memo[3:5]
1141
self.manager.add_factory(key, parents, start, end)
1143
for factory in self._flush_manager():
1146
self.batch_memos.clear()
1147
del self.memos_to_get[:]
1148
self.total_bytes = 0
1151
class GroupCompressVersionedFiles(VersionedFiles):
1152
"""A group-compress based VersionedFiles implementation."""
1154
def __init__(self, index, access, delta=True, _unadded_refs=None):
1155
"""Create a GroupCompressVersionedFiles object.
1157
:param index: The index object storing access and graph data.
1158
:param access: The access object storing raw data.
1159
:param delta: Whether to delta compress or just entropy compress.
1160
:param _unadded_refs: private parameter, don't use.
1163
self._access = access
1165
if _unadded_refs is None:
1167
self._unadded_refs = _unadded_refs
1168
self._group_cache = LRUSizeCache(max_size=50*1024*1024)
1169
self._fallback_vfs = []
1171
def without_fallbacks(self):
1172
"""Return a clone of this object without any fallbacks configured."""
1173
return GroupCompressVersionedFiles(self._index, self._access,
1174
self._delta, _unadded_refs=dict(self._unadded_refs))
1176
def add_lines(self, key, parents, lines, parent_texts=None,
1177
left_matching_blocks=None, nostore_sha=None, random_id=False,
1178
check_content=True):
1179
"""Add a text to the store.
1181
:param key: The key tuple of the text to add.
1182
:param parents: The parents key tuples of the text to add.
1183
:param lines: A list of lines. Each line must be a bytestring. And all
1184
of them except the last must be terminated with \n and contain no
1185
other \n's. The last line may either contain no \n's or a single
1186
terminating \n. If the lines list does meet this constraint the add
1187
routine may error or may succeed - but you will be unable to read
1188
the data back accurately. (Checking the lines have been split
1189
correctly is expensive and extremely unlikely to catch bugs so it
1190
is not done at runtime unless check_content is True.)
1191
:param parent_texts: An optional dictionary containing the opaque
1192
representations of some or all of the parents of version_id to
1193
allow delta optimisations. VERY IMPORTANT: the texts must be those
1194
returned by add_lines or data corruption can be caused.
1195
:param left_matching_blocks: a hint about which areas are common
1196
between the text and its left-hand-parent. The format is
1197
the SequenceMatcher.get_matching_blocks format.
1198
:param nostore_sha: Raise ExistingContent and do not add the lines to
1199
the versioned file if the digest of the lines matches this.
1200
:param random_id: If True a random id has been selected rather than
1201
an id determined by some deterministic process such as a converter
1202
from a foreign VCS. When True the backend may choose not to check
1203
for uniqueness of the resulting key within the versioned file, so
1204
this should only be done when the result is expected to be unique
1206
:param check_content: If True, the lines supplied are verified to be
1207
bytestrings that are correctly formed lines.
1208
:return: The text sha1, the number of bytes in the text, and an opaque
1209
representation of the inserted version which can be provided
1210
back to future add_lines calls in the parent_texts dictionary.
1212
self._index._check_write_ok()
1213
self._check_add(key, lines, random_id, check_content)
1215
# The caller might pass None if there is no graph data, but kndx
1216
# indexes can't directly store that, so we give them
1217
# an empty tuple instead.
1219
# double handling for now. Make it work until then.
1220
length = sum(map(len, lines))
1221
record = ChunkedContentFactory(key, parents, None, lines)
1222
sha1 = list(self._insert_record_stream([record], random_id=random_id,
1223
nostore_sha=nostore_sha))[0]
1224
return sha1, length, None
1226
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
1227
"""See VersionedFiles._add_text()."""
1228
self._index._check_write_ok()
1229
self._check_add(key, None, random_id, check_content=False)
1230
if text.__class__ is not str:
1231
raise errors.BzrBadParameterUnicode("text")
1233
# The caller might pass None if there is no graph data, but kndx
1234
# indexes can't directly store that, so we give them
1235
# an empty tuple instead.
1237
# double handling for now. Make it work until then.
1239
record = FulltextContentFactory(key, parents, None, text)
1240
sha1 = list(self._insert_record_stream([record], random_id=random_id,
1241
nostore_sha=nostore_sha))[0]
1242
return sha1, length, None
1244
def add_fallback_versioned_files(self, a_versioned_files):
1245
"""Add a source of texts for texts not present in this knit.
1247
:param a_versioned_files: A VersionedFiles object.
1249
self._fallback_vfs.append(a_versioned_files)
1251
def annotate(self, key):
1252
"""See VersionedFiles.annotate."""
1253
ann = annotate.Annotator(self)
1254
return ann.annotate_flat(key)
1256
def get_annotator(self):
1257
return annotate.Annotator(self)
1259
def check(self, progress_bar=None, keys=None):
1260
"""See VersionedFiles.check()."""
1263
for record in self.get_record_stream(keys, 'unordered', True):
1264
record.get_bytes_as('fulltext')
1266
return self.get_record_stream(keys, 'unordered', True)
1268
def clear_cache(self):
1269
"""See VersionedFiles.clear_cache()"""
1270
self._group_cache.clear()
1272
def _check_add(self, key, lines, random_id, check_content):
1273
"""check that version_id and lines are safe to add."""
1274
version_id = key[-1]
1275
if version_id is not None:
1276
if osutils.contains_whitespace(version_id):
1277
raise errors.InvalidRevisionId(version_id, self)
1278
self.check_not_reserved_id(version_id)
1279
# TODO: If random_id==False and the key is already present, we should
1280
# probably check that the existing content is identical to what is
1281
# being inserted, and otherwise raise an exception. This would make
1282
# the bundle code simpler.
1284
self._check_lines_not_unicode(lines)
1285
self._check_lines_are_lines(lines)
1287
def get_known_graph_ancestry(self, keys):
1288
"""Get a KnownGraph instance with the ancestry of keys."""
1289
# Note that this is identical to
1290
# KnitVersionedFiles.get_known_graph_ancestry, but they don't share
1292
parent_map, missing_keys = self._index.find_ancestry(keys)
1293
for fallback in self._fallback_vfs:
1294
if not missing_keys:
1296
(f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
1298
parent_map.update(f_parent_map)
1299
missing_keys = f_missing_keys
1300
kg = _mod_graph.KnownGraph(parent_map)
1303
def get_parent_map(self, keys):
1304
"""Get a map of the graph parents of keys.
1306
:param keys: The keys to look up parents for.
1307
:return: A mapping from keys to parents. Absent keys are absent from
1310
return self._get_parent_map_with_sources(keys)[0]
1312
def _get_parent_map_with_sources(self, keys):
1313
"""Get a map of the parents of keys.
1315
:param keys: The keys to look up parents for.
1316
:return: A tuple. The first element is a mapping from keys to parents.
1317
Absent keys are absent from the mapping. The second element is a
1318
list with the locations each key was found in. The first element
1319
is the in-this-knit parents, the second the first fallback source,
1323
sources = [self._index] + self._fallback_vfs
1326
for source in sources:
1329
new_result = source.get_parent_map(missing)
1330
source_results.append(new_result)
1331
result.update(new_result)
1332
missing.difference_update(set(new_result))
1333
return result, source_results
1335
def _get_blocks(self, read_memos):
1336
"""Get GroupCompressBlocks for the given read_memos.
1338
:returns: a series of (read_memo, block) pairs, in the order they were
1342
for read_memo in read_memos:
1344
block = self._group_cache[read_memo]
1348
cached[read_memo] = block
1350
not_cached_seen = set()
1351
for read_memo in read_memos:
1352
if read_memo in cached:
1353
# Don't fetch what we already have
1355
if read_memo in not_cached_seen:
1356
# Don't try to fetch the same data twice
1358
not_cached.append(read_memo)
1359
not_cached_seen.add(read_memo)
1360
raw_records = self._access.get_raw_records(not_cached)
1361
for read_memo in read_memos:
1363
yield read_memo, cached[read_memo]
1365
# Read the block, and cache it.
1366
zdata = raw_records.next()
1367
block = GroupCompressBlock.from_bytes(zdata)
1368
self._group_cache[read_memo] = block
1369
cached[read_memo] = block
1370
yield read_memo, block
1372
def get_missing_compression_parent_keys(self):
1373
"""Return the keys of missing compression parents.
1375
Missing compression parents occur when a record stream was missing
1376
basis texts, or a index was scanned that had missing basis texts.
1378
# GroupCompress cannot currently reference texts that are not in the
1379
# group, so this is valid for now
1382
def get_record_stream(self, keys, ordering, include_delta_closure):
1383
"""Get a stream of records for keys.
1385
:param keys: The keys to include.
1386
:param ordering: Either 'unordered' or 'topological'. A topologically
1387
sorted stream has compression parents strictly before their
1389
:param include_delta_closure: If True then the closure across any
1390
compression parents will be included (in the opaque data).
1391
:return: An iterator of ContentFactory objects, each of which is only
1392
valid until the iterator is advanced.
1394
# keys might be a generator
1395
orig_keys = list(keys)
1399
if (not self._index.has_graph
1400
and ordering in ('topological', 'groupcompress')):
1401
# Cannot topological order when no graph has been stored.
1402
# but we allow 'as-requested' or 'unordered'
1403
ordering = 'unordered'
1405
remaining_keys = keys
1408
keys = set(remaining_keys)
1409
for content_factory in self._get_remaining_record_stream(keys,
1410
orig_keys, ordering, include_delta_closure):
1411
remaining_keys.discard(content_factory.key)
1412
yield content_factory
1414
except errors.RetryWithNewPacks, e:
1415
self._access.reload_or_raise(e)
1417
def _find_from_fallback(self, missing):
1418
"""Find whatever keys you can from the fallbacks.
1420
:param missing: A set of missing keys. This set will be mutated as keys
1421
are found from a fallback_vfs
1422
:return: (parent_map, key_to_source_map, source_results)
1423
parent_map the overall key => parent_keys
1424
key_to_source_map a dict from {key: source}
1425
source_results a list of (source: keys)
1428
key_to_source_map = {}
1430
for source in self._fallback_vfs:
1433
source_parents = source.get_parent_map(missing)
1434
parent_map.update(source_parents)
1435
source_parents = list(source_parents)
1436
source_results.append((source, source_parents))
1437
key_to_source_map.update((key, source) for key in source_parents)
1438
missing.difference_update(source_parents)
1439
return parent_map, key_to_source_map, source_results
1441
def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map):
1442
"""Get the (source, [keys]) list.
1444
The returned objects should be in the order defined by 'ordering',
1445
which can weave between different sources.
1446
:param ordering: Must be one of 'topological' or 'groupcompress'
1447
:return: List of [(source, [keys])] tuples, such that all keys are in
1448
the defined order, regardless of source.
1450
if ordering == 'topological':
1451
present_keys = topo_sort(parent_map)
1453
# ordering == 'groupcompress'
1454
# XXX: This only optimizes for the target ordering. We may need
1455
# to balance that with the time it takes to extract
1456
# ordering, by somehow grouping based on
1457
# locations[key][0:3]
1458
present_keys = sort_gc_optimal(parent_map)
1459
# Now group by source:
1461
current_source = None
1462
for key in present_keys:
1463
source = key_to_source_map.get(key, self)
1464
if source is not current_source:
1465
source_keys.append((source, []))
1466
current_source = source
1467
source_keys[-1][1].append(key)
1470
def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys,
1473
current_source = None
1474
for key in orig_keys:
1475
if key in locations or key in unadded_keys:
1477
elif key in key_to_source_map:
1478
source = key_to_source_map[key]
1481
if source is not current_source:
1482
source_keys.append((source, []))
1483
current_source = source
1484
source_keys[-1][1].append(key)
1487
def _get_io_ordered_source_keys(self, locations, unadded_keys,
1490
# This is the group the bytes are stored in, followed by the
1491
# location in the group
1492
return locations[key][0]
1493
present_keys = sorted(locations.iterkeys(), key=get_group)
1494
# We don't have an ordering for keys in the in-memory object, but
1495
# lets process the in-memory ones first.
1496
present_keys = list(unadded_keys) + present_keys
1497
# Now grab all of the ones from other sources
1498
source_keys = [(self, present_keys)]
1499
source_keys.extend(source_result)
1502
def _get_remaining_record_stream(self, keys, orig_keys, ordering,
1503
include_delta_closure):
1504
"""Get a stream of records for keys.
1506
:param keys: The keys to include.
1507
:param ordering: one of 'unordered', 'topological', 'groupcompress' or
1509
:param include_delta_closure: If True then the closure across any
1510
compression parents will be included (in the opaque data).
1511
:return: An iterator of ContentFactory objects, each of which is only
1512
valid until the iterator is advanced.
1515
locations = self._index.get_build_details(keys)
1516
unadded_keys = set(self._unadded_refs).intersection(keys)
1517
missing = keys.difference(locations)
1518
missing.difference_update(unadded_keys)
1519
(fallback_parent_map, key_to_source_map,
1520
source_result) = self._find_from_fallback(missing)
1521
if ordering in ('topological', 'groupcompress'):
1522
# would be better to not globally sort initially but instead
1523
# start with one key, recurse to its oldest parent, then grab
1524
# everything in the same group, etc.
1525
parent_map = dict((key, details[2]) for key, details in
1526
locations.iteritems())
1527
for key in unadded_keys:
1528
parent_map[key] = self._unadded_refs[key]
1529
parent_map.update(fallback_parent_map)
1530
source_keys = self._get_ordered_source_keys(ordering, parent_map,
1532
elif ordering == 'as-requested':
1533
source_keys = self._get_as_requested_source_keys(orig_keys,
1534
locations, unadded_keys, key_to_source_map)
1536
# We want to yield the keys in a semi-optimal (read-wise) ordering.
1537
# Otherwise we thrash the _group_cache and destroy performance
1538
source_keys = self._get_io_ordered_source_keys(locations,
1539
unadded_keys, source_result)
1541
yield AbsentContentFactory(key)
1542
# Batch up as many keys as we can until either:
1543
# - we encounter an unadded ref, or
1544
# - we run out of keys, or
1545
# - the total bytes to retrieve for this batch > BATCH_SIZE
1546
batcher = _BatchingBlockFetcher(self, locations)
1547
for source, keys in source_keys:
1550
if key in self._unadded_refs:
1551
# Flush batch, then yield unadded ref from
1553
for factory in batcher.yield_factories(full_flush=True):
1555
bytes, sha1 = self._compressor.extract(key)
1556
parents = self._unadded_refs[key]
1557
yield FulltextContentFactory(key, parents, sha1, bytes)
1559
if batcher.add_key(key) > BATCH_SIZE:
1560
# Ok, this batch is big enough. Yield some results.
1561
for factory in batcher.yield_factories():
1564
for factory in batcher.yield_factories(full_flush=True):
1566
for record in source.get_record_stream(keys, ordering,
1567
include_delta_closure):
1569
for factory in batcher.yield_factories(full_flush=True):
1572
def get_sha1s(self, keys):
1573
"""See VersionedFiles.get_sha1s()."""
1575
for record in self.get_record_stream(keys, 'unordered', True):
1576
if record.sha1 != None:
1577
result[record.key] = record.sha1
1579
if record.storage_kind != 'absent':
1580
result[record.key] = osutils.sha_string(
1581
record.get_bytes_as('fulltext'))
1584
def insert_record_stream(self, stream):
1585
"""Insert a record stream into this container.
1587
:param stream: A stream of records to insert.
1589
:seealso VersionedFiles.get_record_stream:
1591
# XXX: Setting random_id=True makes
1592
# test_insert_record_stream_existing_keys fail for groupcompress and
1593
# groupcompress-nograph, this needs to be revisited while addressing
1594
# 'bzr branch' performance issues.
1595
for _ in self._insert_record_stream(stream, random_id=False):
1598
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
1600
"""Internal core to insert a record stream into this container.
1602
This helper function has a different interface than insert_record_stream
1603
to allow add_lines to be minimal, but still return the needed data.
1605
:param stream: A stream of records to insert.
1606
:param nostore_sha: If the sha1 of a given text matches nostore_sha,
1607
raise ExistingContent, rather than committing the new text.
1608
:param reuse_blocks: If the source is streaming from
1609
groupcompress-blocks, just insert the blocks as-is, rather than
1610
expanding the texts and inserting again.
1611
:return: An iterator over the sha1 of the inserted records.
1612
:seealso insert_record_stream:
1616
def get_adapter(adapter_key):
1618
return adapters[adapter_key]
1620
adapter_factory = adapter_registry.get(adapter_key)
1621
adapter = adapter_factory(self)
1622
adapters[adapter_key] = adapter
1624
# This will go up to fulltexts for gc to gc fetching, which isn't
1626
self._compressor = GroupCompressor()
1627
self._unadded_refs = {}
1630
bytes = self._compressor.flush().to_bytes()
1631
index, start, length = self._access.add_raw_records(
1632
[(None, len(bytes))], bytes)[0]
1634
for key, reads, refs in keys_to_add:
1635
nodes.append((key, "%d %d %s" % (start, length, reads), refs))
1636
self._index.add_records(nodes, random_id=random_id)
1637
self._unadded_refs = {}
1639
self._compressor = GroupCompressor()
1642
max_fulltext_len = 0
1643
max_fulltext_prefix = None
1644
insert_manager = None
1647
# XXX: TODO: remove this, it is just for safety checking for now
1648
inserted_keys = set()
1649
reuse_this_block = reuse_blocks
1650
for record in stream:
1651
# Raise an error when a record is missing.
1652
if record.storage_kind == 'absent':
1653
raise errors.RevisionNotPresent(record.key, self)
1655
if record.key in inserted_keys:
1656
trace.note('Insert claimed random_id=True,'
1657
' but then inserted %r two times', record.key)
1659
inserted_keys.add(record.key)
1661
# If the reuse_blocks flag is set, check to see if we can just
1662
# copy a groupcompress block as-is.
1663
# We only check on the first record (groupcompress-block) not
1664
# on all of the (groupcompress-block-ref) entries.
1665
# The reuse_this_block flag is then kept for as long as
1666
if record.storage_kind == 'groupcompress-block':
1667
# Check to see if we really want to re-use this block
1668
insert_manager = record._manager
1669
reuse_this_block = insert_manager.check_is_well_utilized()
1671
reuse_this_block = False
1672
if reuse_this_block:
1673
# We still want to reuse this block
1674
if record.storage_kind == 'groupcompress-block':
1675
# Insert the raw block into the target repo
1676
insert_manager = record._manager
1677
bytes = record._manager._block.to_bytes()
1678
_, start, length = self._access.add_raw_records(
1679
[(None, len(bytes))], bytes)[0]
1682
block_length = length
1683
if record.storage_kind in ('groupcompress-block',
1684
'groupcompress-block-ref'):
1685
if insert_manager is None:
1686
raise AssertionError('No insert_manager set')
1687
if insert_manager is not record._manager:
1688
raise AssertionError('insert_manager does not match'
1689
' the current record, we cannot be positive'
1690
' that the appropriate content was inserted.'
1692
value = "%d %d %d %d" % (block_start, block_length,
1693
record._start, record._end)
1694
nodes = [(record.key, value, (record.parents,))]
1695
# TODO: Consider buffering up many nodes to be added, not
1696
# sure how much overhead this has, but we're seeing
1697
# ~23s / 120s in add_records calls
1698
self._index.add_records(nodes, random_id=random_id)
1701
bytes = record.get_bytes_as('fulltext')
1702
except errors.UnavailableRepresentation:
1703
adapter_key = record.storage_kind, 'fulltext'
1704
adapter = get_adapter(adapter_key)
1705
bytes = adapter.get_bytes(record)
1706
if len(record.key) > 1:
1707
prefix = record.key[0]
1708
soft = (prefix == last_prefix)
1712
if max_fulltext_len < len(bytes):
1713
max_fulltext_len = len(bytes)
1714
max_fulltext_prefix = prefix
1715
(found_sha1, start_point, end_point,
1716
type) = self._compressor.compress(record.key,
1717
bytes, record.sha1, soft=soft,
1718
nostore_sha=nostore_sha)
1719
# delta_ratio = float(len(bytes)) / (end_point - start_point)
1720
# Check if we want to continue to include that text
1721
if (prefix == max_fulltext_prefix
1722
and end_point < 2 * max_fulltext_len):
1723
# As long as we are on the same file_id, we will fill at least
1724
# 2 * max_fulltext_len
1725
start_new_block = False
1726
elif end_point > 4*1024*1024:
1727
start_new_block = True
1728
elif (prefix is not None and prefix != last_prefix
1729
and end_point > 2*1024*1024):
1730
start_new_block = True
1732
start_new_block = False
1733
last_prefix = prefix
1735
self._compressor.pop_last()
1737
max_fulltext_len = len(bytes)
1738
(found_sha1, start_point, end_point,
1739
type) = self._compressor.compress(record.key, bytes,
1741
if record.key[-1] is None:
1742
key = record.key[:-1] + ('sha1:' + found_sha1,)
1745
self._unadded_refs[key] = record.parents
1747
keys_to_add.append((key, '%d %d' % (start_point, end_point),
1749
if len(keys_to_add):
1751
self._compressor = None
1753
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1754
"""Iterate over the lines in the versioned files from keys.
1756
This may return lines from other keys. Each item the returned
1757
iterator yields is a tuple of a line and a text version that that line
1758
is present in (not introduced in).
1760
Ordering of results is in whatever order is most suitable for the
1761
underlying storage format.
1763
If a progress bar is supplied, it may be used to indicate progress.
1764
The caller is responsible for cleaning up progress bars (because this
1768
* Lines are normalised by the underlying store: they will all have \n
1770
* Lines are returned in arbitrary order.
1772
:return: An iterator over (line, key).
1776
# we don't care about inclusions, the caller cares.
1777
# but we need to setup a list of records to visit.
1778
# we need key, position, length
1779
for key_idx, record in enumerate(self.get_record_stream(keys,
1780
'unordered', True)):
1781
# XXX: todo - optimise to use less than full texts.
1784
pb.update('Walking content', key_idx, total)
1785
if record.storage_kind == 'absent':
1786
raise errors.RevisionNotPresent(key, self)
1787
lines = osutils.split_lines(record.get_bytes_as('fulltext'))
1791
pb.update('Walking content', total, total)
1794
"""See VersionedFiles.keys."""
1795
if 'evil' in debug.debug_flags:
1796
trace.mutter_callsite(2, "keys scales with size of history")
1797
sources = [self._index] + self._fallback_vfs
1799
for source in sources:
1800
result.update(source.keys())
1804
class _GCGraphIndex(object):
1805
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
1807
def __init__(self, graph_index, is_locked, parents=True,
1808
add_callback=None, track_external_parent_refs=False,
1809
inconsistency_fatal=True, track_new_keys=False):
1810
"""Construct a _GCGraphIndex on a graph_index.
1812
:param graph_index: An implementation of bzrlib.index.GraphIndex.
1813
:param is_locked: A callback, returns True if the index is locked and
1815
:param parents: If True, record knits parents, if not do not record
1817
:param add_callback: If not None, allow additions to the index and call
1818
this callback with a list of added GraphIndex nodes:
1819
[(node, value, node_refs), ...]
1820
:param track_external_parent_refs: As keys are added, keep track of the
1821
keys they reference, so that we can query get_missing_parents(),
1823
:param inconsistency_fatal: When asked to add records that are already
1824
present, and the details are inconsistent with the existing
1825
record, raise an exception instead of warning (and skipping the
1828
self._add_callback = add_callback
1829
self._graph_index = graph_index
1830
self._parents = parents
1831
self.has_graph = parents
1832
self._is_locked = is_locked
1833
self._inconsistency_fatal = inconsistency_fatal
1834
if track_external_parent_refs:
1835
self._key_dependencies = knit._KeyRefs(
1836
track_new_keys=track_new_keys)
1838
self._key_dependencies = None
1840
def add_records(self, records, random_id=False):
1841
"""Add multiple records to the index.
1843
This function does not insert data into the Immutable GraphIndex
1844
backing the KnitGraphIndex, instead it prepares data for insertion by
1845
the caller and checks that it is safe to insert then calls
1846
self._add_callback with the prepared GraphIndex nodes.
1848
:param records: a list of tuples:
1849
(key, options, access_memo, parents).
1850
:param random_id: If True the ids being added were randomly generated
1851
and no check for existence will be performed.
1853
if not self._add_callback:
1854
raise errors.ReadOnlyError(self)
1855
# we hope there are no repositories with inconsistent parentage
1860
for (key, value, refs) in records:
1861
if not self._parents:
1865
raise errors.KnitCorrupt(self,
1866
"attempt to add node with parents "
1867
"in parentless index.")
1870
keys[key] = (value, refs)
1873
present_nodes = self._get_entries(keys)
1874
for (index, key, value, node_refs) in present_nodes:
1875
if node_refs != keys[key][1]:
1876
details = '%s %s %s' % (key, (value, node_refs), keys[key])
1877
if self._inconsistency_fatal:
1878
raise errors.KnitCorrupt(self, "inconsistent details"
1879
" in add_records: %s" %
1882
trace.warning("inconsistent details in skipped"
1883
" record: %s", details)
1889
for key, (value, node_refs) in keys.iteritems():
1890
result.append((key, value, node_refs))
1892
for key, (value, node_refs) in keys.iteritems():
1893
result.append((key, value))
1895
key_dependencies = self._key_dependencies
1896
if key_dependencies is not None:
1898
for key, value, refs in records:
1900
key_dependencies.add_references(key, parents)
1902
for key, value, refs in records:
1903
new_keys.add_key(key)
1904
self._add_callback(records)
1906
def _check_read(self):
1907
"""Raise an exception if reads are not permitted."""
1908
if not self._is_locked():
1909
raise errors.ObjectNotLocked(self)
1911
def _check_write_ok(self):
1912
"""Raise an exception if writes are not permitted."""
1913
if not self._is_locked():
1914
raise errors.ObjectNotLocked(self)
1916
def _get_entries(self, keys, check_present=False):
1917
"""Get the entries for keys.
1919
Note: Callers are responsible for checking that the index is locked
1920
before calling this method.
1922
:param keys: An iterable of index key tuples.
1927
for node in self._graph_index.iter_entries(keys):
1929
found_keys.add(node[1])
1931
# adapt parentless index to the rest of the code.
1932
for node in self._graph_index.iter_entries(keys):
1933
yield node[0], node[1], node[2], ()
1934
found_keys.add(node[1])
1936
missing_keys = keys.difference(found_keys)
1938
raise errors.RevisionNotPresent(missing_keys.pop(), self)
1940
def find_ancestry(self, keys):
1941
"""See CombinedGraphIndex.find_ancestry"""
1942
return self._graph_index.find_ancestry(keys, 0)
1944
def get_parent_map(self, keys):
1945
"""Get a map of the parents of keys.
1947
:param keys: The keys to look up parents for.
1948
:return: A mapping from keys to parents. Absent keys are absent from
1952
nodes = self._get_entries(keys)
1956
result[node[1]] = node[3][0]
1959
result[node[1]] = None
1962
def get_missing_parents(self):
1963
"""Return the keys of missing parents."""
1964
# Copied from _KnitGraphIndex.get_missing_parents
1965
# We may have false positives, so filter those out.
1966
self._key_dependencies.satisfy_refs_for_keys(
1967
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
1968
return frozenset(self._key_dependencies.get_unsatisfied_refs())
1970
def get_build_details(self, keys):
1971
"""Get the various build details for keys.
1973
Ghosts are omitted from the result.
1975
:param keys: An iterable of keys.
1976
:return: A dict of key:
1977
(index_memo, compression_parent, parents, record_details).
1979
opaque structure to pass to read_records to extract the raw
1982
Content that this record is built upon, may be None
1984
Logical parents of this node
1986
extra information about the content which needs to be passed to
1987
Factory.parse_record
1991
entries = self._get_entries(keys)
1992
for entry in entries:
1994
if not self._parents:
1997
parents = entry[3][0]
1999
result[key] = (self._node_to_position(entry),
2000
None, parents, (method, None))
2004
"""Get all the keys in the collection.
2006
The keys are not ordered.
2009
return [node[1] for node in self._graph_index.iter_all_entries()]
2011
def _node_to_position(self, node):
2012
"""Convert an index value to position details."""
2013
bits = node[2].split(' ')
2014
# It would be nice not to read the entire gzip.
2015
start = int(bits[0])
2017
basis_end = int(bits[2])
2018
delta_end = int(bits[3])
2019
return node[0], start, stop, basis_end, delta_end
2021
def scan_unvalidated_index(self, graph_index):
2022
"""Inform this _GCGraphIndex that there is an unvalidated index.
2024
This allows this _GCGraphIndex to keep track of any missing
2025
compression parents we may want to have filled in to make those
2026
indices valid. It also allows _GCGraphIndex to track any new keys.
2028
:param graph_index: A GraphIndex
2030
key_dependencies = self._key_dependencies
2031
if key_dependencies is None:
2033
for node in graph_index.iter_all_entries():
2034
# Add parent refs from graph_index (and discard parent refs
2035
# that the graph_index has).
2036
key_dependencies.add_references(node[1], node[3][0])
2039
from bzrlib._groupcompress_py import (
2041
apply_delta_to_source,
2044
decode_copy_instruction,
2048
from bzrlib._groupcompress_pyx import (
2050
apply_delta_to_source,
2055
GroupCompressor = PyrexGroupCompressor
2056
except ImportError, e:
2057
osutils.failed_to_load_extension(e)
2058
GroupCompressor = PythonGroupCompressor