31
from bzrlib.osutils import sha_string
32
from bzrlib.tests.test__groupcompress import compiled_groupcompress_feature
35
def load_tests(standard_tests, module, loader):
36
"""Parameterize tests for all versions of groupcompress."""
37
to_adapt, result = tests.split_suite_by_condition(
38
standard_tests, tests.condition_isinstance(TestAllGroupCompressors))
34
from ..osutils import sha_string
35
from .test__groupcompress import compiled_groupcompress_feature
36
from .scenarios import load_tests_apply_scenarios
39
def group_compress_implementation_scenarios():
40
41
('python', {'compressor': groupcompress.PythonGroupCompressor}),
42
43
if compiled_groupcompress_feature.available():
43
44
scenarios.append(('C',
44
45
{'compressor': groupcompress.PyrexGroupCompressor}))
45
return tests.multiply_tests(to_adapt, scenarios, result)
49
load_tests = load_tests_apply_scenarios
48
52
class TestGroupCompressor(tests.TestCase):
301
306
for key in sorted(key_to_text):
302
307
compressor.compress(key, key_to_text[key], None)
303
308
locs = dict((key, (start, end)) for key, (start, _, end, _)
304
in compressor.labels_deltas.iteritems())
309
in compressor.labels_deltas.items())
305
310
block = compressor.flush()
306
311
raw_bytes = block.to_bytes()
307
312
# Go through from_bytes(to_bytes()) so that we start with a compressed
347
352
self.assertEqual(z_content, block._z_content)
348
353
self.assertEqual(content, block._content)
355
def test_to_chunks(self):
356
content_chunks = ['this is some content\n',
357
'this content will be compressed\n']
358
content_len = sum(map(len, content_chunks))
359
content = ''.join(content_chunks)
360
gcb = groupcompress.GroupCompressBlock()
361
gcb.set_chunked_content(content_chunks, content_len)
362
total_len, block_chunks = gcb.to_chunks()
363
block_bytes = ''.join(block_chunks)
364
self.assertEqual(gcb._z_content_length, len(gcb._z_content))
365
self.assertEqual(total_len, len(block_bytes))
366
self.assertEqual(gcb._content_length, content_len)
367
expected_header =('gcb1z\n' # group compress block v1 zlib
368
'%d\n' # Length of compressed content
369
'%d\n' # Length of uncompressed content
370
) % (gcb._z_content_length, gcb._content_length)
371
# The first chunk should be the header chunk. It is small, fixed size,
372
# and there is no compelling reason to split it up
373
self.assertEqual(expected_header, block_chunks[0])
374
self.assertStartsWith(block_bytes, expected_header)
375
remaining_bytes = block_bytes[len(expected_header):]
376
raw_bytes = zlib.decompress(remaining_bytes)
377
self.assertEqual(content, raw_bytes)
350
379
def test_to_bytes(self):
351
380
content = ('this is some content\n'
352
381
'this content will be compressed\n')
526
555
'as-requested', False)]
527
556
self.assertEqual([('b',), ('a',), ('d',), ('c',)], keys)
558
def test_get_record_stream_max_bytes_to_index_default(self):
559
vf = self.make_test_vf(True, dir='source')
560
vf.add_lines(('a',), (), ['lines\n'])
562
record = next(vf.get_record_stream([('a',)], 'unordered', True))
563
self.assertEqual(vf._DEFAULT_COMPRESSOR_SETTINGS,
564
record._manager._get_compressor_settings())
566
def test_get_record_stream_accesses_compressor_settings(self):
567
vf = self.make_test_vf(True, dir='source')
568
vf.add_lines(('a',), (), ['lines\n'])
570
vf._max_bytes_to_index = 1234
571
record = next(vf.get_record_stream([('a',)], 'unordered', True))
572
self.assertEqual(dict(max_bytes_to_index=1234),
573
record._manager._get_compressor_settings())
529
575
def test_insert_record_stream_reuses_blocks(self):
530
576
vf = self.make_test_vf(True, dir='source')
531
577
def grouped_stream(revision_ids, first_parents=()):
744
790
self.assertEqual(0, len(vf._group_cache))
793
class TestGroupCompressConfig(tests.TestCaseWithTransport):
795
def make_test_vf(self):
796
t = self.get_transport('.')
798
factory = groupcompress.make_pack_factory(graph=True,
799
delta=False, keylength=1, inconsistency_fatal=True)
801
self.addCleanup(groupcompress.cleanup_pack_group, vf)
804
def test_max_bytes_to_index_default(self):
805
vf = self.make_test_vf()
806
gc = vf._make_group_compressor()
807
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
808
vf._max_bytes_to_index)
809
if isinstance(gc, groupcompress.PyrexGroupCompressor):
810
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
811
gc._delta_index._max_bytes_to_index)
813
def test_max_bytes_to_index_in_config(self):
814
c = config.GlobalConfig()
815
c.set_user_option('bzr.groupcompress.max_bytes_to_index', '10000')
816
vf = self.make_test_vf()
817
gc = vf._make_group_compressor()
818
self.assertEqual(10000, vf._max_bytes_to_index)
819
if isinstance(gc, groupcompress.PyrexGroupCompressor):
820
self.assertEqual(10000, gc._delta_index._max_bytes_to_index)
822
def test_max_bytes_to_index_bad_config(self):
823
c = config.GlobalConfig()
824
c.set_user_option('bzr.groupcompress.max_bytes_to_index', 'boogah')
825
vf = self.make_test_vf()
826
# TODO: This is triggering a warning, we might want to trap and make
827
# sure it is readable.
828
gc = vf._make_group_compressor()
829
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
830
vf._max_bytes_to_index)
831
if isinstance(gc, groupcompress.PyrexGroupCompressor):
832
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX,
833
gc._delta_index._max_bytes_to_index)
748
836
class StubGCVF(object):
749
837
def __init__(self, canned_get_blocks=None):
875
963
for key in sorted(key_to_text):
876
964
compressor.compress(key, key_to_text[key], None)
877
965
locs = dict((key, (start, end)) for key, (start, _, end, _)
878
in compressor.labels_deltas.iteritems())
966
in compressor.labels_deltas.items())
879
967
block = compressor.flush()
880
968
raw_bytes = block.to_bytes()
881
969
return locs, groupcompress.GroupCompressBlock.from_bytes(raw_bytes)
1020
1108
self.assertEqual(self._texts[record.key],
1021
1109
record.get_bytes_as('fulltext'))
1111
def test_manager_default_compressor_settings(self):
1112
locations, old_block = self.make_block(self._texts)
1113
manager = groupcompress._LazyGroupContentManager(old_block)
1114
gcvf = groupcompress.GroupCompressVersionedFiles
1115
# It doesn't greedily evaluate _max_bytes_to_index
1116
self.assertIs(None, manager._compressor_settings)
1117
self.assertEqual(gcvf._DEFAULT_COMPRESSOR_SETTINGS,
1118
manager._get_compressor_settings())
1120
def test_manager_custom_compressor_settings(self):
1121
locations, old_block = self.make_block(self._texts)
1123
def compressor_settings():
1124
called.append('called')
1126
manager = groupcompress._LazyGroupContentManager(old_block,
1127
get_compressor_settings=compressor_settings)
1128
gcvf = groupcompress.GroupCompressVersionedFiles
1129
# It doesn't greedily evaluate compressor_settings
1130
self.assertIs(None, manager._compressor_settings)
1131
self.assertEqual((10,), manager._get_compressor_settings())
1132
self.assertEqual((10,), manager._get_compressor_settings())
1133
self.assertEqual((10,), manager._compressor_settings)
1134
# Only called 1 time
1135
self.assertEqual(['called'], called)
1137
def test__rebuild_handles_compressor_settings(self):
1138
if not isinstance(groupcompress.GroupCompressor,
1139
groupcompress.PyrexGroupCompressor):
1140
raise tests.TestNotApplicable('pure-python compressor'
1141
' does not handle compressor_settings')
1142
locations, old_block = self.make_block(self._texts)
1143
manager = groupcompress._LazyGroupContentManager(old_block,
1144
get_compressor_settings=lambda: dict(max_bytes_to_index=32))
1145
gc = manager._make_group_compressor()
1146
self.assertEqual(32, gc._delta_index._max_bytes_to_index)
1147
self.add_key_to_manager(('key3',), locations, old_block, manager)
1148
self.add_key_to_manager(('key4',), locations, old_block, manager)
1149
action, last_byte, total_bytes = manager._check_rebuild_action()
1150
self.assertEqual('rebuild', action)
1151
manager._rebuild_block()
1152
new_block = manager._block
1153
self.assertIsNot(old_block, new_block)
1154
# Because of the new max_bytes_to_index, we do a poor job of
1155
# rebuilding. This is a side-effect of the change, but at least it does
1156
# show the setting had an effect.
1157
self.assertTrue(old_block._content_length < new_block._content_length)
1023
1159
def test_check_is_well_utilized_all_keys(self):
1024
1160
block, manager = self.make_block_and_full_manager(self._texts)
1025
1161
self.assertFalse(manager.check_is_well_utilized())
1067
1203
self.add_key_to_manager(('key4',), locations, block, manager)
1068
1204
self.assertTrue(manager.check_is_well_utilized())
1207
class Test_GCBuildDetails(tests.TestCase):
1209
def test_acts_like_tuple(self):
1210
# _GCBuildDetails inlines some of the data that used to be spread out
1211
# across a bunch of tuples
1212
bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)),
1213
('INDEX', 10, 20, 0, 5))
1214
self.assertEqual(4, len(bd))
1215
self.assertEqual(('INDEX', 10, 20, 0, 5), bd[0])
1216
self.assertEqual(None, bd[1]) # Compression Parent is always None
1217
self.assertEqual((('parent1',), ('parent2',)), bd[2])
1218
self.assertEqual(('group', None), bd[3]) # Record details
1220
def test__repr__(self):
1221
bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)),
1222
('INDEX', 10, 20, 0, 5))
1223
self.assertEqual("_GCBuildDetails(('INDEX', 10, 20, 0, 5),"
1224
" (('parent1',), ('parent2',)))",