31
from bzrlib.osutils import sha_string
32
from bzrlib.tests.test__groupcompress import compiled_groupcompress_feature
35
def load_tests(standard_tests, module, loader):
36
"""Parameterize tests for all versions of groupcompress."""
37
to_adapt, result = tests.split_suite_by_condition(
38
standard_tests, tests.condition_isinstance(TestAllGroupCompressors))
35
from ..osutils import sha_string
36
from .test__groupcompress import compiled_groupcompress_feature
37
from .scenarios import load_tests_apply_scenarios
40
def group_compress_implementation_scenarios():
40
42
('python', {'compressor': groupcompress.PythonGroupCompressor}),
42
44
if compiled_groupcompress_feature.available():
43
45
scenarios.append(('C',
44
46
{'compressor': groupcompress.PyrexGroupCompressor}))
45
return tests.multiply_tests(to_adapt, scenarios, result)
50
load_tests = load_tests_apply_scenarios
48
53
class TestGroupCompressor(tests.TestCase):
301
307
for key in sorted(key_to_text):
302
308
compressor.compress(key, key_to_text[key], None)
303
309
locs = dict((key, (start, end)) for key, (start, _, end, _)
304
in compressor.labels_deltas.iteritems())
310
in compressor.labels_deltas.items())
305
311
block = compressor.flush()
306
312
raw_bytes = block.to_bytes()
307
313
# Go through from_bytes(to_bytes()) so that we start with a compressed
347
353
self.assertEqual(z_content, block._z_content)
348
354
self.assertEqual(content, block._content)
356
def test_to_chunks(self):
357
content_chunks = ['this is some content\n',
358
'this content will be compressed\n']
359
content_len = sum(map(len, content_chunks))
360
content = ''.join(content_chunks)
361
gcb = groupcompress.GroupCompressBlock()
362
gcb.set_chunked_content(content_chunks, content_len)
363
total_len, block_chunks = gcb.to_chunks()
364
block_bytes = ''.join(block_chunks)
365
self.assertEqual(gcb._z_content_length, len(gcb._z_content))
366
self.assertEqual(total_len, len(block_bytes))
367
self.assertEqual(gcb._content_length, content_len)
368
expected_header =('gcb1z\n' # group compress block v1 zlib
369
'%d\n' # Length of compressed content
370
'%d\n' # Length of uncompressed content
371
) % (gcb._z_content_length, gcb._content_length)
372
# The first chunk should be the header chunk. It is small, fixed size,
373
# and there is no compelling reason to split it up
374
self.assertEqual(expected_header, block_chunks[0])
375
self.assertStartsWith(block_bytes, expected_header)
376
remaining_bytes = block_bytes[len(expected_header):]
377
raw_bytes = zlib.decompress(remaining_bytes)
378
self.assertEqual(content, raw_bytes)
350
380
def test_to_bytes(self):
351
381
content = ('this is some content\n'
352
382
'this content will be compressed\n')
526
556
'as-requested', False)]
527
557
self.assertEqual([('b',), ('a',), ('d',), ('c',)], keys)
559
def test_get_record_stream_max_bytes_to_index_default(self):
560
vf = self.make_test_vf(True, dir='source')
561
vf.add_lines(('a',), (), ['lines\n'])
563
record = next(vf.get_record_stream([('a',)], 'unordered', True))
564
self.assertEqual(vf._DEFAULT_COMPRESSOR_SETTINGS,
565
record._manager._get_compressor_settings())
567
def test_get_record_stream_accesses_compressor_settings(self):
568
vf = self.make_test_vf(True, dir='source')
569
vf.add_lines(('a',), (), ['lines\n'])
571
vf._max_bytes_to_index = 1234
572
record = next(vf.get_record_stream([('a',)], 'unordered', True))
573
self.assertEqual(dict(max_bytes_to_index=1234),
574
record._manager._get_compressor_settings())
529
576
def test_insert_record_stream_reuses_blocks(self):
530
577
vf = self.make_test_vf(True, dir='source')
531
578
def grouped_stream(revision_ids, first_parents=()):
744
791
self.assertEqual(0, len(vf._group_cache))
794
class TestGroupCompressConfig(tests.TestCaseWithTransport):
796
def make_test_vf(self):
797
t = self.get_transport('.')
799
factory = groupcompress.make_pack_factory(graph=True,
800
delta=False, keylength=1, inconsistency_fatal=True)
802
self.addCleanup(groupcompress.cleanup_pack_group, vf)
805
def test_max_bytes_to_index_default(self):
806
vf = self.make_test_vf()
807
gc = vf._make_group_compressor()
808
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
809
vf._max_bytes_to_index)
810
if isinstance(gc, groupcompress.PyrexGroupCompressor):
811
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
812
gc._delta_index._max_bytes_to_index)
814
def test_max_bytes_to_index_in_config(self):
815
c = config.GlobalConfig()
816
c.set_user_option('bzr.groupcompress.max_bytes_to_index', '10000')
817
vf = self.make_test_vf()
818
gc = vf._make_group_compressor()
819
self.assertEqual(10000, vf._max_bytes_to_index)
820
if isinstance(gc, groupcompress.PyrexGroupCompressor):
821
self.assertEqual(10000, gc._delta_index._max_bytes_to_index)
823
def test_max_bytes_to_index_bad_config(self):
824
c = config.GlobalConfig()
825
c.set_user_option('bzr.groupcompress.max_bytes_to_index', 'boogah')
826
vf = self.make_test_vf()
827
# TODO: This is triggering a warning, we might want to trap and make
828
# sure it is readable.
829
gc = vf._make_group_compressor()
830
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
831
vf._max_bytes_to_index)
832
if isinstance(gc, groupcompress.PyrexGroupCompressor):
833
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
834
gc._delta_index._max_bytes_to_index)
748
837
class StubGCVF(object):
749
838
def __init__(self, canned_get_blocks=None):
875
964
for key in sorted(key_to_text):
876
965
compressor.compress(key, key_to_text[key], None)
877
966
locs = dict((key, (start, end)) for key, (start, _, end, _)
878
in compressor.labels_deltas.iteritems())
967
in compressor.labels_deltas.items())
879
968
block = compressor.flush()
880
969
raw_bytes = block.to_bytes()
881
970
return locs, groupcompress.GroupCompressBlock.from_bytes(raw_bytes)
1020
1109
self.assertEqual(self._texts[record.key],
1021
1110
record.get_bytes_as('fulltext'))
1112
def test_manager_default_compressor_settings(self):
1113
locations, old_block = self.make_block(self._texts)
1114
manager = groupcompress._LazyGroupContentManager(old_block)
1115
gcvf = groupcompress.GroupCompressVersionedFiles
1116
# It doesn't greedily evaluate _max_bytes_to_index
1117
self.assertIs(None, manager._compressor_settings)
1118
self.assertEqual(gcvf._DEFAULT_COMPRESSOR_SETTINGS,
1119
manager._get_compressor_settings())
1121
def test_manager_custom_compressor_settings(self):
1122
locations, old_block = self.make_block(self._texts)
1124
def compressor_settings():
1125
called.append('called')
1127
manager = groupcompress._LazyGroupContentManager(old_block,
1128
get_compressor_settings=compressor_settings)
1129
gcvf = groupcompress.GroupCompressVersionedFiles
1130
# It doesn't greedily evaluate compressor_settings
1131
self.assertIs(None, manager._compressor_settings)
1132
self.assertEqual((10,), manager._get_compressor_settings())
1133
self.assertEqual((10,), manager._get_compressor_settings())
1134
self.assertEqual((10,), manager._compressor_settings)
1135
# Only called 1 time
1136
self.assertEqual(['called'], called)
1138
def test__rebuild_handles_compressor_settings(self):
1139
if not isinstance(groupcompress.GroupCompressor,
1140
groupcompress.PyrexGroupCompressor):
1141
raise tests.TestNotApplicable('pure-python compressor'
1142
' does not handle compressor_settings')
1143
locations, old_block = self.make_block(self._texts)
1144
manager = groupcompress._LazyGroupContentManager(old_block,
1145
get_compressor_settings=lambda: dict(max_bytes_to_index=32))
1146
gc = manager._make_group_compressor()
1147
self.assertEqual(32, gc._delta_index._max_bytes_to_index)
1148
self.add_key_to_manager(('key3',), locations, old_block, manager)
1149
self.add_key_to_manager(('key4',), locations, old_block, manager)
1150
action, last_byte, total_bytes = manager._check_rebuild_action()
1151
self.assertEqual('rebuild', action)
1152
manager._rebuild_block()
1153
new_block = manager._block
1154
self.assertIsNot(old_block, new_block)
1155
# Because of the new max_bytes_to_index, we do a poor job of
1156
# rebuilding. This is a side-effect of the change, but at least it does
1157
# show the setting had an effect.
1158
self.assertTrue(old_block._content_length < new_block._content_length)
1023
1160
def test_check_is_well_utilized_all_keys(self):
1024
1161
block, manager = self.make_block_and_full_manager(self._texts)
1025
1162
self.assertFalse(manager.check_is_well_utilized())
1067
1204
self.add_key_to_manager(('key4',), locations, block, manager)
1068
1205
self.assertTrue(manager.check_is_well_utilized())
1208
class Test_GCBuildDetails(tests.TestCase):
1210
def test_acts_like_tuple(self):
1211
# _GCBuildDetails inlines some of the data that used to be spread out
1212
# across a bunch of tuples
1213
bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)),
1214
('INDEX', 10, 20, 0, 5))
1215
self.assertEqual(4, len(bd))
1216
self.assertEqual(('INDEX', 10, 20, 0, 5), bd[0])
1217
self.assertEqual(None, bd[1]) # Compression Parent is always None
1218
self.assertEqual((('parent1',), ('parent2',)), bd[2])
1219
self.assertEqual(('group', None), bd[3]) # Record details
1221
def test__repr__(self):
1222
bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)),
1223
('INDEX', 10, 20, 0, 5))
1224
self.assertEqual("_GCBuildDetails(('INDEX', 10, 20, 0, 5),"
1225
" (('parent1',), ('parent2',)))",