1
# Copyright (C) 2006-2010 Canonical Ltd
1
# Copyright (C) 2006-2011 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
17
17
"""Tests for Knit data structure"""
19
from cStringIO import StringIO
33
from bzrlib.errors import (
34
RevisionAlreadyPresent,
33
from ..errors import (
39
from bzrlib.index import *
40
from bzrlib.knit import (
37
from ..bzr.index import *
38
from ..bzr.knit import (
41
39
AnnotatedKnitContent,
44
41
KnitVersionedFiles,
46
43
_VFContentMapGenerator,
53
from bzrlib.repofmt import pack_repo
54
from bzrlib.tests import (
49
from ..patiencediff import PatienceSequenceMatcher
54
from ..sixish import (
58
59
TestCaseWithMemoryTransport,
59
60
TestCaseWithTransport,
62
from bzrlib.transport import get_transport
63
from bzrlib.transport.memory import MemoryTransport
64
from bzrlib.tuned_gzip import GzipFile
65
from bzrlib.versionedfile import (
63
from ..bzr.versionedfile import (
66
64
AbsentContentFactory,
68
66
network_bytes_to_kind_and_offset,
69
67
RecordingVersionedFilesDecorator,
73
compiled_knit_feature = tests.ModuleAvailableFeature(
74
'bzrlib._knit_load_data_pyx')
74
compiled_knit_feature = features.ModuleAvailableFeature(
75
'breezy.bzr._knit_load_data_pyx')
77
78
class KnitContentTestsMixin(object):
106
107
line_delta = source_content.line_delta(target_content)
107
108
delta_blocks = list(KnitContent.get_line_delta_blocks(line_delta,
108
109
source_lines, target_lines))
109
matcher = KnitSequenceMatcher(None, source_lines, target_lines)
110
matcher_blocks = list(list(matcher.get_matching_blocks()))
110
matcher = PatienceSequenceMatcher(None, source_lines, target_lines)
111
matcher_blocks = list(matcher.get_matching_blocks())
111
112
self.assertEqual(matcher_blocks, delta_blocks)
113
114
def test_get_line_delta_blocks(self):
206
207
content1 = self._make_content([("", "a"), ("", "b")])
207
208
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
208
209
it = content1.line_delta_iter(content2)
209
self.assertEqual(it.next(), (1, 2, 2, ["a", "c"]))
210
self.assertRaises(StopIteration, it.next)
210
self.assertEqual(next(it), (1, 2, 2, ["a", "c"]))
211
self.assertRaises(StopIteration, next, it)
213
214
class TestAnnotatedKnitContent(TestCase, KnitContentTestsMixin):
233
234
content1 = self._make_content([("", "a"), ("", "b")])
234
235
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
235
236
it = content1.line_delta_iter(content2)
236
self.assertEqual(it.next(), (1, 2, 2, [("", "a"), ("", "c")]))
237
self.assertRaises(StopIteration, it.next)
237
self.assertEqual(next(it), (1, 2, 2, [("", "a"), ("", "c")]))
238
self.assertRaises(StopIteration, next, it)
240
241
class MockTransport(object):
249
250
if self.file_lines is None:
250
251
raise NoSuchFile(filename)
252
return StringIO("\n".join(self.file_lines))
253
return BytesIO(b"\n".join(self.file_lines))
254
255
def readv(self, relpath, offsets):
255
256
fp = self.get(relpath)
333
334
transport.append_bytes(packname, bytes)
334
335
writer = pack.ContainerWriter(write_data)
336
access = _DirectPackAccess({})
337
access = pack_repo._DirectPackAccess({})
337
338
access.set_writer(writer, index, (transport, packname))
338
339
return access, writer
350
def test_pack_collection_pack_retries(self):
351
"""An explicit pack of a pack collection succeeds even when a
352
concurrent pack happens.
354
builder = self.make_branch_builder('.')
355
builder.start_series()
356
builder.build_snapshot('rev-1', None, [
357
('add', ('', 'root-id', 'directory', None)),
358
('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
360
builder.build_snapshot('rev-2', ['rev-1'], [
361
('modify', ('file-id', 'content\nrev 2\n')),
363
builder.build_snapshot('rev-3', ['rev-2'], [
364
('modify', ('file-id', 'content\nrev 3\n')),
366
self.addCleanup(builder.finish_series)
367
b = builder.get_branch()
368
self.addCleanup(b.lock_write().unlock)
370
collection = repo._pack_collection
371
# Concurrently repack the repo.
372
reopened_repo = repo.controldir.open_repository()
349
377
def make_vf_for_retrying(self):
350
378
"""Create 3 packs and a reload function.
378
406
collection = repo._pack_collection
379
407
collection.ensure_loaded()
380
408
orig_packs = collection.packs
381
packer = pack_repo.Packer(collection, orig_packs, '.testpack')
409
packer = knitpack_repo.KnitPacker(collection, orig_packs, '.testpack')
382
410
new_pack = packer.pack()
383
411
# forget about the new pack
384
412
collection.reset()
422
450
raise _TestException('foobar')
423
except _TestException, e:
451
except _TestException as e:
424
452
retry_exc = errors.RetryWithNewPacks(None, reload_occurred=False,
425
453
exc_info=sys.exc_info())
454
# GZ 2010-08-10: Cycle with exc_info affects 3 tests
428
457
def test_read_from_several_packs(self):
437
466
memos.extend(access.add_raw_records([('key', 5)], 'alpha'))
439
468
transport = self.get_transport()
440
access = _DirectPackAccess({"FOO":(transport, 'packfile'),
469
access = pack_repo._DirectPackAccess({"FOO":(transport, 'packfile'),
441
470
"FOOBAR":(transport, 'pack2'),
442
471
"BAZ":(transport, 'pack3')})
443
472
self.assertEqual(['1234567890', '12345', 'alpha'],
454
483
def test_set_writer(self):
455
484
"""The writer should be settable post construction."""
456
access = _DirectPackAccess({})
485
access = pack_repo._DirectPackAccess({})
457
486
transport = self.get_transport()
458
487
packname = 'packfile'
471
500
transport = self.get_transport()
472
501
reload_called, reload_func = self.make_reload_func()
473
502
# Note that the index key has changed from 'foo' to 'bar'
474
access = _DirectPackAccess({'bar':(transport, 'packname')},
503
access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')},
475
504
reload_func=reload_func)
476
505
e = self.assertListRaises(errors.RetryWithNewPacks,
477
506
access.get_raw_records, memos)
486
515
memos = self.make_pack_file()
487
516
transport = self.get_transport()
488
517
# Note that the index key has changed from 'foo' to 'bar'
489
access = _DirectPackAccess({'bar':(transport, 'packname')})
518
access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')})
490
519
e = self.assertListRaises(KeyError, access.get_raw_records, memos)
492
521
def test_missing_file_raises_retry(self):
494
523
transport = self.get_transport()
495
524
reload_called, reload_func = self.make_reload_func()
496
525
# Note that the 'filename' has been changed to 'different-packname'
497
access = _DirectPackAccess({'foo':(transport, 'different-packname')},
498
reload_func=reload_func)
526
access = pack_repo._DirectPackAccess(
527
{'foo':(transport, 'different-packname')},
528
reload_func=reload_func)
499
529
e = self.assertListRaises(errors.RetryWithNewPacks,
500
530
access.get_raw_records, memos)
501
531
# The file has gone missing, so we assume we need to reload
509
539
memos = self.make_pack_file()
510
540
transport = self.get_transport()
511
541
# Note that the 'filename' has been changed to 'different-packname'
512
access = _DirectPackAccess({'foo':(transport, 'different-packname')})
542
access = pack_repo._DirectPackAccess(
543
{'foo': (transport, 'different-packname')})
513
544
e = self.assertListRaises(errors.NoSuchFile,
514
545
access.get_raw_records, memos)
519
550
failing_transport = MockReadvFailingTransport(
520
551
[transport.get_bytes('packname')])
521
552
reload_called, reload_func = self.make_reload_func()
522
access = _DirectPackAccess({'foo':(failing_transport, 'packname')},
523
reload_func=reload_func)
553
access = pack_repo._DirectPackAccess(
554
{'foo': (failing_transport, 'packname')},
555
reload_func=reload_func)
524
556
# Asking for a single record will not trigger the Mock failure
525
557
self.assertEqual(['1234567890'],
526
558
list(access.get_raw_records(memos[:1])))
542
574
failing_transport = MockReadvFailingTransport(
543
575
[transport.get_bytes('packname')])
544
576
reload_called, reload_func = self.make_reload_func()
545
access = _DirectPackAccess({'foo':(failing_transport, 'packname')})
577
access = pack_repo._DirectPackAccess(
578
{'foo':(failing_transport, 'packname')})
546
579
# Asking for a single record will not trigger the Mock failure
547
580
self.assertEqual(['1234567890'],
548
581
list(access.get_raw_records(memos[:1])))
553
586
access.get_raw_records, memos)
555
588
def test_reload_or_raise_no_reload(self):
556
access = _DirectPackAccess({}, reload_func=None)
589
access = pack_repo._DirectPackAccess({}, reload_func=None)
557
590
retry_exc = self.make_retry_exception()
558
591
# Without a reload_func, we will just re-raise the original exception
559
592
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
561
594
def test_reload_or_raise_reload_changed(self):
562
595
reload_called, reload_func = self.make_reload_func(return_val=True)
563
access = _DirectPackAccess({}, reload_func=reload_func)
596
access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
564
597
retry_exc = self.make_retry_exception()
565
598
access.reload_or_raise(retry_exc)
566
599
self.assertEqual([1], reload_called)
571
604
def test_reload_or_raise_reload_no_change(self):
572
605
reload_called, reload_func = self.make_reload_func(return_val=False)
573
access = _DirectPackAccess({}, reload_func=reload_func)
606
access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
574
607
retry_exc = self.make_retry_exception()
575
608
# If reload_occurred is False, then we consider it an error to have
576
609
# reload_func() return False (no changes).
595
628
self.fail('Annotation was not identical with reloading.')
596
629
# Now delete the packs-in-use, which should trigger another reload, but
597
630
# this time we just raise an exception because we can't recover
598
for trans, name in vf._access._indices.itervalues():
631
for trans, name in vf._access._indices.values():
599
632
trans.delete(name)
600
633
self.assertRaises(errors.NoSuchFile, vf.annotate, key)
601
634
self.assertEqual([2, 1, 1], reload_counter)
608
641
self.assertEqual([1, 1, 0], reload_counter)
609
642
# Now delete the packs-in-use, which should trigger another reload, but
610
643
# this time we just raise an exception because we can't recover
611
for trans, name in vf._access._indices.itervalues():
644
for trans, name in vf._access._indices.values():
612
645
trans.delete(name)
613
646
self.assertRaises(errors.NoSuchFile, vf._get_record_map, keys)
614
647
self.assertEqual([2, 1, 1], reload_counter)
617
650
vf, reload_counter = self.make_vf_for_retrying()
618
651
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
619
652
record_stream = vf.get_record_stream(keys, 'topological', False)
620
record = record_stream.next()
653
record = next(record_stream)
621
654
self.assertEqual(('rev-1',), record.key)
622
655
self.assertEqual([0, 0, 0], reload_counter)
623
record = record_stream.next()
656
record = next(record_stream)
624
657
self.assertEqual(('rev-2',), record.key)
625
658
self.assertEqual([1, 1, 0], reload_counter)
626
record = record_stream.next()
659
record = next(record_stream)
627
660
self.assertEqual(('rev-3',), record.key)
628
661
self.assertEqual([1, 1, 0], reload_counter)
629
662
# Now delete all pack files, and see that we raise the right error
630
for trans, name in vf._access._indices.itervalues():
663
for trans, name in vf._access._indices.values():
631
664
trans.delete(name)
632
665
self.assertListRaises(errors.NoSuchFile,
633
666
vf.get_record_stream, keys, 'topological', False)
651
684
self.assertEqual(plain_lines, reload_lines)
652
685
self.assertEqual(21, len(plain_lines))
653
686
# Now delete all pack files, and see that we raise the right error
654
for trans, name in vf._access._indices.itervalues():
687
for trans, name in vf._access._indices.values():
655
688
trans.delete(name)
656
689
self.assertListRaises(errors.NoSuchFile,
657
690
vf.iter_lines_added_or_present_in_keys, keys)
708
741
def make_multiple_records(self):
709
742
"""Create the content for multiple records."""
710
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
743
sha1sum = osutils.sha_string('foo\nbar\n')
712
745
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
717
750
record_1 = (0, len(gz_txt), sha1sum)
718
751
total_txt.append(gz_txt)
719
sha1sum = osutils.sha('baz\n').hexdigest()
752
sha1sum = osutils.sha_string('baz\n')
720
753
gz_txt = self.create_gz_content('version rev-id-2 1 %s\n'
726
759
return total_txt, record_1, record_2
728
761
def test_valid_knit_data(self):
729
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
762
sha1sum = osutils.sha_string('foo\nbar\n')
730
763
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
765
798
def test_not_enough_lines(self):
766
sha1sum = osutils.sha('foo\n').hexdigest()
799
sha1sum = osutils.sha_string('foo\n')
767
800
# record says 2 lines data says 1
768
801
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
781
814
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
783
816
def test_too_many_lines(self):
784
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
817
sha1sum = osutils.sha_string('foo\nbar\n')
785
818
# record says 1 lines data says 2
786
819
gz_txt = self.create_gz_content('version rev-id-1 1 %s\n'
800
833
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
802
835
def test_mismatched_version_id(self):
803
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
836
sha1sum = osutils.sha_string('foo\nbar\n')
804
837
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
819
852
knit._read_records_iter_raw(records))
821
854
def test_uncompressed_data(self):
822
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
855
sha1sum = osutils.sha_string('foo\nbar\n')
823
856
txt = ('version rev-id-1 2 %s\n'
839
872
knit._read_records_iter_raw(records))
841
874
def test_corrupted_data(self):
842
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
875
sha1sum = osutils.sha_string('foo\nbar\n')
843
876
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
863
896
def get_knit_index(self, transport, name, mode):
864
897
mapper = ConstantMapper(name)
865
from bzrlib._knit_load_data_py import _load_data_py
898
from ..bzr._knit_load_data_py import _load_data_py
866
899
self.overrideAttr(knit, '_load_data', _load_data_py)
867
900
allow_writes = lambda: 'w' in mode
868
901
return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
872
905
index = self.get_knit_index(transport, "filename", "w")
874
907
call = transport.calls.pop(0)
875
# call[1][1] is a StringIO - we can't test it by simple equality.
908
# call[1][1] is a BytesIO - we can't test it by simple equality.
876
909
self.assertEqual('put_file_non_atomic', call[0])
877
910
self.assertEqual('filename.kndx', call[1][0])
878
911
# With no history, _KndxIndex writes a new index:
915
948
index = self.get_knit_index(transport, "filename", "r")
916
949
self.assertEqual(1, len(index.keys()))
917
self.assertEqual(set([("version",)]), index.keys())
950
self.assertEqual({("version",)}, index.keys())
919
952
def test_read_corrupted_header(self):
920
953
transport = MockTransport(['not a bzr knit index header\n'])
960
993
index.add_records([
961
994
((utf8_revision_id,), ["option"], ((utf8_revision_id,), 0, 1), [])])
962
995
call = transport.calls.pop(0)
963
# call[1][1] is a StringIO - we can't test it by simple equality.
996
# call[1][1] is a BytesIO - we can't test it by simple equality.
964
997
self.assertEqual('put_file_non_atomic', call[0])
965
998
self.assertEqual('filename.kndx', call[1][0])
966
999
# With no history, _KndxIndex writes a new index:
979
1012
index.add_records([
980
1013
(("version",), ["option"], (("version",), 0, 1), [(utf8_revision_id,)])])
981
1014
call = transport.calls.pop(0)
982
# call[1][1] is a StringIO - we can't test it by simple equality.
1015
# call[1][1] is a BytesIO - we can't test it by simple equality.
983
1016
self.assertEqual('put_file_non_atomic', call[0])
984
1017
self.assertEqual('filename.kndx', call[1][0])
985
1018
# With no history, _KndxIndex writes a new index:
997
1030
self.assertEqual(set(), index.keys())
999
1032
index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
1000
self.assertEqual(set([("a",)]), index.keys())
1033
self.assertEqual({("a",)}, index.keys())
1002
1035
index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
1003
self.assertEqual(set([("a",)]), index.keys())
1036
self.assertEqual({("a",)}, index.keys())
1005
1038
index.add_records([(("b",), ["option"], (("b",), 0, 1), [])])
1006
self.assertEqual(set([("a",), ("b",)]), index.keys())
1039
self.assertEqual({("a",), ("b",)}, index.keys())
1008
1041
def add_a_b(self, index, random_id=None):
1034
1067
self.add_a_b(index)
1035
1068
call = transport.calls.pop(0)
1036
# call[1][1] is a StringIO - we can't test it by simple equality.
1069
# call[1][1] is a BytesIO - we can't test it by simple equality.
1037
1070
self.assertEqual('put_file_non_atomic', call[0])
1038
1071
self.assertEqual('filename.kndx', call[1][0])
1039
1072
# With no history, _KndxIndex writes a new index:
1073
1106
self.assertEqual(_KndxIndex.HEADER, call[1][1].getvalue())
1074
1107
self.assertEqual({'create_parent_dir': True}, call[2])
1075
1108
call = transport.calls.pop(0)
1076
# call[1][1] is a StringIO - we can't test it by simple equality.
1109
# call[1][1] is a BytesIO - we can't test it by simple equality.
1077
1110
self.assertEqual('put_file_non_atomic', call[0])
1078
1111
self.assertEqual('filename.kndx', call[1][0])
1079
1112
# With no history, _KndxIndex writes a new index:
1165
1198
index = self.get_knit_index(transport, 'filename', 'r')
1167
1200
self.assertRaises(errors.KnitCorrupt, index.keys)
1168
except TypeError, e:
1201
except TypeError as e:
1169
1202
if (str(e) == ('exceptions must be strings, classes, or instances,'
1170
' not exceptions.IndexError')
1171
and sys.version_info[0:2] >= (2,5)):
1203
' not exceptions.IndexError')):
1172
1204
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1173
1205
' raising new style exceptions with python'
1185
1217
index = self.get_knit_index(transport, 'filename', 'r')
1187
1219
self.assertRaises(errors.KnitCorrupt, index.keys)
1188
except TypeError, e:
1220
except TypeError as e:
1189
1221
if (str(e) == ('exceptions must be strings, classes, or instances,'
1190
' not exceptions.ValueError')
1191
and sys.version_info[0:2] >= (2,5)):
1222
' not exceptions.ValueError')):
1192
1223
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1193
1224
' raising new style exceptions with python'
1205
1236
index = self.get_knit_index(transport, 'filename', 'r')
1207
1238
self.assertRaises(errors.KnitCorrupt, index.keys)
1208
except TypeError, e:
1239
except TypeError as e:
1209
1240
if (str(e) == ('exceptions must be strings, classes, or instances,'
1210
' not exceptions.ValueError')
1211
and sys.version_info[0:2] >= (2,5)):
1241
' not exceptions.ValueError')):
1212
1242
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1213
1243
' raising new style exceptions with python'
1223
1253
index = self.get_knit_index(transport, 'filename', 'r')
1225
1255
self.assertRaises(errors.KnitCorrupt, index.keys)
1226
except TypeError, e:
1256
except TypeError as e:
1227
1257
if (str(e) == ('exceptions must be strings, classes, or instances,'
1228
' not exceptions.ValueError')
1229
and sys.version_info[0:2] >= (2,5)):
1258
' not exceptions.ValueError')):
1230
1259
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1231
1260
' raising new style exceptions with python'
1241
1270
index = self.get_knit_index(transport, 'filename', 'r')
1243
1272
self.assertRaises(errors.KnitCorrupt, index.keys)
1244
except TypeError, e:
1273
except TypeError as e:
1245
1274
if (str(e) == ('exceptions must be strings, classes, or instances,'
1246
' not exceptions.ValueError')
1247
and sys.version_info[0:2] >= (2,5)):
1275
' not exceptions.ValueError')):
1248
1276
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1249
1277
' raising new style exceptions with python'
1267
1295
"b option 10 10 0", # This line isn't terminated, ignored
1269
1297
index = self.get_knit_index(transport, "filename", "r")
1270
self.assertEqual(set([('a',)]), index.keys())
1298
self.assertEqual({('a',)}, index.keys())
1272
1300
def test_skip_incomplete_record(self):
1273
1301
# A line with bogus data should just be skipped
1278
1306
"c option 20 10 0 :", # Properly terminated, and starts with '\n'
1280
1308
index = self.get_knit_index(transport, "filename", "r")
1281
self.assertEqual(set([('a',), ('c',)]), index.keys())
1309
self.assertEqual({('a',), ('c',)}, index.keys())
1283
1311
def test_trailing_characters(self):
1284
1312
# A line with bogus data should just be skipped
1289
1317
"c option 20 10 0 :", # Properly terminated, and starts with '\n'
1291
1319
index = self.get_knit_index(transport, "filename", "r")
1292
self.assertEqual(set([('a',), ('c',)]), index.keys())
1320
self.assertEqual({('a',), ('c',)}, index.keys())
1295
1323
class LowLevelKnitIndexTests_c(LowLevelKnitIndexTests):
1299
1327
def get_knit_index(self, transport, name, mode):
1300
1328
mapper = ConstantMapper(name)
1301
from bzrlib._knit_load_data_pyx import _load_data_c
1329
from ..bzr._knit_load_data_pyx import _load_data_c
1302
1330
self.overrideAttr(knit, '_load_data', _load_data_c)
1303
1331
allow_writes = lambda: mode == 'w'
1304
1332
return _KndxIndex(transport, mapper, lambda:None,
1525
1553
'a-2 fulltext 0 0 0 :\n'
1526
1554
'a-3 fulltext 0 0 1 :'
1528
self.assertEqual(set([('a-3',), ('a-1',), ('a-2',)]), idx.keys())
1556
self.assertEqual({('a-3',), ('a-1',), ('a-2',)}, idx.keys())
1529
1557
self.assertEqual({
1530
1558
('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False)),
1531
1559
('a-2',): ((('a-2',), 0, 0), None, (('a-1',),), ('fulltext', False)),
1564
1592
# Assert the pre-condition
1565
1593
def assertA1Only():
1566
self.assertEqual(set([('a-1',)]), set(idx.keys()))
1594
self.assertEqual({('a-1',)}, set(idx.keys()))
1567
1595
self.assertEqual(
1568
1596
{('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False))},
1569
1597
idx.get_build_details([('a-1',)]))
1579
1607
# could leave an empty .kndx file, which bzr would later claim was a
1580
1608
# corrupted file since the header was not present. In reality, the file
1581
1609
# just wasn't created, so it should be ignored.
1582
t = get_transport('.')
1610
t = transport.get_transport_from_path('.')
1583
1611
t.put_bytes('test.kndx', '')
1585
1613
knit = self.make_test_knit()
1587
1615
def test_knit_index_checks_header(self):
1588
t = get_transport('.')
1616
t = transport.get_transport_from_path('.')
1589
1617
t.put_bytes('test.kndx', '# not really a knit header\n\n')
1590
1618
k = self.make_test_knit()
1591
1619
self.assertRaises(KnitHeaderError, k.keys)
1639
1667
def test_keys(self):
1640
1668
index = self.two_graph_index()
1641
self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
1669
self.assertEqual({('tail',), ('tip',), ('parent',), ('separate',)},
1642
1670
set(index.keys()))
1644
1672
def test_get_position(self):
1941
1969
def test_keys(self):
1942
1970
index = self.two_graph_index()
1943
self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
1971
self.assertEqual({('tail',), ('tip',), ('parent',), ('separate',)},
1944
1972
set(index.keys()))
1946
1974
def test_get_position(self):
2105
2133
self.assertGroupKeysForIo([([f_a], set())],
2106
2134
[f_a], [], positions)
2107
self.assertGroupKeysForIo([([f_a], set([f_a]))],
2135
self.assertGroupKeysForIo([([f_a], {f_a})],
2108
2136
[f_a], [f_a], positions)
2109
2137
self.assertGroupKeysForIo([([f_a, f_b], set([]))],
2110
2138
[f_a, f_b], [], positions)
2111
self.assertGroupKeysForIo([([f_a, f_b], set([f_b]))],
2139
self.assertGroupKeysForIo([([f_a, f_b], {f_b})],
2112
2140
[f_a, f_b], [f_b], positions)
2113
2141
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2114
2142
[f_a, g_a, f_b, g_b], [], positions)
2211
2239
self.assertEqual([(key_basis, 'foo\n'), (key_basis, 'bar\n')], details)
2212
2240
# Not optimised to date:
2213
2241
# self.assertEqual([("annotate", key_basis)], basis.calls)
2214
self.assertEqual([('get_parent_map', set([key_basis])),
2215
('get_parent_map', set([key_basis])),
2242
self.assertEqual([('get_parent_map', {key_basis}),
2243
('get_parent_map', {key_basis}),
2216
2244
('get_record_stream', [key_basis], 'topological', True)],
2238
2266
parent_map = test.get_parent_map([key, key_basis, key_missing])
2239
2267
self.assertEqual({key: (),
2240
2268
key_basis: ()}, parent_map)
2241
self.assertEqual([("get_parent_map", set([key_basis, key_missing]))],
2269
self.assertEqual([("get_parent_map", {key_basis, key_missing})],
2244
2272
def test_get_record_stream_unordered_fulltexts(self):
2275
2303
# It's not strictly minimal, but it seems reasonable for now for it to
2276
2304
# ask which fallbacks have which parents.
2277
2305
self.assertEqual([
2278
("get_parent_map", set([key_basis, key_missing])),
2306
("get_parent_map", {key_basis, key_missing}),
2279
2307
("get_record_stream", [key_basis], 'unordered', True)],
2315
record = source.get_record_stream([result[0]], 'unordered',
2343
record = next(source.get_record_stream([result[0]], 'unordered',
2317
2345
self.assertEqual(record.key, result[0])
2318
2346
self.assertEqual(record.sha1, result[1])
2319
2347
# We used to check that the storage kind matched, but actually it
2324
2352
# It's not strictly minimal, but it seems reasonable for now for it to
2325
2353
# ask which fallbacks have which parents.
2326
2354
self.assertEqual([
2327
("get_parent_map", set([key_basis, key_basis_2, key_missing])),
2355
("get_parent_map", {key_basis, key_basis_2, key_missing}),
2328
2356
# topological is requested from the fallback, because that is what
2329
2357
# was requested at the top level.
2330
2358
("get_record_stream", [key_basis_2, key_basis], 'topological', True)],
2362
2390
# It's not strictly minimal, but it seems reasonable for now for it to
2363
2391
# ask which fallbacks have which parents.
2364
2392
self.assertEqual([
2365
("get_parent_map", set([key_basis, key_missing])),
2393
("get_parent_map", {key_basis, key_missing}),
2366
2394
("get_record_stream", [key_basis], 'unordered', False)],
2402
record = source.get_record_stream([result[0]], 'unordered',
2430
record = next(source.get_record_stream([result[0]], 'unordered',
2404
2432
self.assertEqual(record.key, result[0])
2405
2433
self.assertEqual(record.sha1, result[1])
2406
2434
self.assertEqual(record.storage_kind, result[2])
2408
2436
# It's not strictly minimal, but it seems reasonable for now for it to
2409
2437
# ask which fallbacks have which parents.
2410
2438
self.assertEqual([
2411
("get_parent_map", set([key_basis, key_basis_2, key_missing])),
2439
("get_parent_map", {key_basis, key_basis_2, key_missing}),
2412
2440
("get_record_stream", [key_basis_2, key_basis], 'topological', False)],
2419
2447
key_basis = ('bar',)
2420
2448
key_missing = ('missing',)
2421
2449
test.add_lines(key, (), ['foo\n'])
2422
key_sha1sum = osutils.sha('foo\n').hexdigest()
2450
key_sha1sum = osutils.sha_string('foo\n')
2423
2451
sha1s = test.get_sha1s([key])
2424
2452
self.assertEqual({key: key_sha1sum}, sha1s)
2425
2453
self.assertEqual([], basis.calls)
2427
2455
# directly (rather than via text reconstruction) so that remote servers
2428
2456
# etc don't have to answer with full content.
2429
2457
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2430
basis_sha1sum = osutils.sha('foo\nbar\n').hexdigest()
2458
basis_sha1sum = osutils.sha_string('foo\nbar\n')
2431
2459
basis.calls = []
2432
2460
sha1s = test.get_sha1s([key, key_missing, key_basis])
2433
2461
self.assertEqual({key: key_sha1sum,
2434
2462
key_basis: basis_sha1sum}, sha1s)
2435
self.assertEqual([("get_sha1s", set([key_basis, key_missing]))],
2463
self.assertEqual([("get_sha1s", {key_basis, key_missing})],
2438
2466
def test_insert_record_stream(self):
2451
2479
test.insert_record_stream(stream)
2452
2480
# XXX: this does somewhat too many calls in making sure of whether it
2453
2481
# has to recreate the full text.
2454
self.assertEqual([("get_parent_map", set([key_basis])),
2455
('get_parent_map', set([key_basis])),
2482
self.assertEqual([("get_parent_map", {key_basis}),
2483
('get_parent_map', {key_basis}),
2456
2484
('get_record_stream', [key_basis], 'unordered', True)],
2458
2486
self.assertEqual({key_delta:(key_basis,)},
2471
2499
basis.calls = []
2472
2500
lines = list(test.iter_lines_added_or_present_in_keys([key1]))
2473
2501
self.assertEqual([("foo\n", key1)], lines)
2474
self.assertEqual([("iter_lines_added_or_present_in_keys", set([key1]))],
2502
self.assertEqual([("iter_lines_added_or_present_in_keys", {key1})],
2476
2504
# keys in both are not duplicated:
2477
2505
test.add_lines(key2, (), ["bar\n"])
2493
2521
basis.add_lines(key1, (), [])
2494
2522
basis.calls = []
2495
2523
keys = test.keys()
2496
self.assertEqual(set([key1]), set(keys))
2524
self.assertEqual({key1}, set(keys))
2497
2525
self.assertEqual([("keys",)], basis.calls)
2498
2526
# keys in both are not duplicated:
2499
2527
test.add_lines(key2, (), [])
2501
2529
basis.calls = []
2502
2530
keys = test.keys()
2503
2531
self.assertEqual(2, len(keys))
2504
self.assertEqual(set([key1, key2]), set(keys))
2532
self.assertEqual({key1, key2}, set(keys))
2505
2533
self.assertEqual([("keys",)], basis.calls)
2507
2535
def test_add_mpdiffs(self):
2519
2547
diffs = source.make_mpdiffs([key_delta])
2520
2548
test.add_mpdiffs([(key_delta, (key_basis,),
2521
2549
source.get_sha1s([key_delta])[key_delta], diffs[0])])
2522
self.assertEqual([("get_parent_map", set([key_basis])),
2550
self.assertEqual([("get_parent_map", {key_basis}),
2523
2551
('get_record_stream', [key_basis], 'unordered', True),],
2525
2553
self.assertEqual({key_delta:(key_basis,)},
2548
2576
self.assertEqual(3, len(basis.calls))
2549
2577
self.assertEqual([
2550
("get_parent_map", set([key_left, key_right])),
2551
("get_parent_map", set([key_left, key_right])),
2578
("get_parent_map", {key_left, key_right}),
2579
("get_parent_map", {key_left, key_right}),
2553
2581
basis.calls[:-1])
2554
2582
last_call = basis.calls[-1]
2555
2583
self.assertEqual('get_record_stream', last_call[0])
2556
self.assertEqual(set([key_left, key_right]), set(last_call[1]))
2584
self.assertEqual({key_left, key_right}, set(last_call[1]))
2557
2585
self.assertEqual('topological', last_call[2])
2558
2586
self.assertEqual(True, last_call[3])