21
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
22
# considered typical and check that it can be detected/corrected.
24
from gzip import GzipFile
24
from itertools import chain, izip
25
from StringIO import StringIO
29
29
graph as _mod_graph,
39
from ..errors import (
41
RevisionAlreadyPresent,
43
from ..bzr.knit import (
36
from bzrlib.errors import (
38
RevisionAlreadyPresent,
41
from bzrlib.knit import (
48
from ..sixish import (
48
from bzrlib.tests import (
54
50
TestCaseWithMemoryTransport,
54
split_suite_by_condition,
58
from .http_utils import TestCaseWithWebserver
59
from ..transport.memory import MemoryTransport
60
from ..bzr import versionedfile as versionedfile
61
from ..bzr.versionedfile import (
62
ChunkedContentFactory,
57
from bzrlib.tests.http_utils import TestCaseWithWebserver
58
from bzrlib.trace import mutter
59
from bzrlib.transport import get_transport
60
from bzrlib.transport.memory import MemoryTransport
61
from bzrlib.tsort import topo_sort
62
from bzrlib.tuned_gzip import GzipFile
63
import bzrlib.versionedfile as versionedfile
64
from bzrlib.versionedfile import (
64
66
HashEscapedPrefixMapper,
66
68
VirtualVersionedFiles,
67
69
make_versioned_files_factory,
69
from ..bzr.weave import (
73
from ..bzr.weavefile import write_weave
74
from .scenarios import load_tests_apply_scenarios
77
load_tests = load_tests_apply_scenarios
71
from bzrlib.weave import WeaveFile
72
from bzrlib.weavefile import read_weave, write_weave
75
def load_tests(standard_tests, module, loader):
76
"""Parameterize VersionedFiles tests for different implementations."""
77
to_adapt, result = split_suite_by_condition(
78
standard_tests, condition_isinstance(TestVersionedFiles))
79
# We want to be sure of behaviour for:
80
# weaves prefix layout (weave texts)
81
# individually named weaves (weave inventories)
82
# annotated knits - prefix|hash|hash-escape layout, we test the third only
83
# as it is the most complex mapper.
84
# individually named knits
85
# individual no-graph knits in packs (signatures)
86
# individual graph knits in packs (inventories)
87
# individual graph nocompression knits in packs (revisions)
88
# plain text knits in packs (texts)
92
'factory':make_versioned_files_factory(WeaveFile,
93
ConstantMapper('inventory')),
96
'support_partial_insertion': False,
100
'factory':make_file_factory(False, ConstantMapper('revisions')),
103
'support_partial_insertion': False,
105
('named-nograph-nodelta-knit-pack', {
106
'cleanup':cleanup_pack_knit,
107
'factory':make_pack_factory(False, False, 1),
110
'support_partial_insertion': False,
112
('named-graph-knit-pack', {
113
'cleanup':cleanup_pack_knit,
114
'factory':make_pack_factory(True, True, 1),
117
'support_partial_insertion': True,
119
('named-graph-nodelta-knit-pack', {
120
'cleanup':cleanup_pack_knit,
121
'factory':make_pack_factory(True, False, 1),
124
'support_partial_insertion': False,
126
('groupcompress-nograph', {
127
'cleanup':groupcompress.cleanup_pack_group,
128
'factory':groupcompress.make_pack_factory(False, False, 1),
131
'support_partial_insertion':False,
134
len_two_scenarios = [
137
'factory':make_versioned_files_factory(WeaveFile,
141
'support_partial_insertion': False,
143
('annotated-knit-escape', {
145
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
148
'support_partial_insertion': False,
150
('plain-knit-pack', {
151
'cleanup':cleanup_pack_knit,
152
'factory':make_pack_factory(True, True, 2),
155
'support_partial_insertion': True,
158
'cleanup':groupcompress.cleanup_pack_group,
159
'factory':groupcompress.make_pack_factory(True, False, 1),
162
'support_partial_insertion':False,
165
scenarios = len_one_scenarios + len_two_scenarios
166
return multiply_tests(to_adapt, scenarios, result)
80
169
def get_diamond_vf(f, trailing_eol=True, left_only=False):
213
298
def test_adds_with_parent_texts(self):
214
299
f = self.get_file()
215
300
parent_texts = {}
216
_, _, parent_texts[b'r0'] = f.add_lines(b'r0', [], [b'a\n', b'b\n'])
301
_, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
218
_, _, parent_texts[b'r1'] = f.add_lines_with_ghosts(b'r1',
219
[b'r0', b'ghost'], [b'b\n', b'c\n'], parent_texts=parent_texts)
303
_, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
304
['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
220
305
except NotImplementedError:
221
306
# if the format doesn't support ghosts, just add normally.
222
_, _, parent_texts[b'r1'] = f.add_lines(b'r1',
223
[b'r0'], [b'b\n', b'c\n'], parent_texts=parent_texts)
224
f.add_lines(b'r2', [b'r1'], [b'c\n', b'd\n'],
225
parent_texts=parent_texts)
226
self.assertNotEqual(None, parent_texts[b'r0'])
227
self.assertNotEqual(None, parent_texts[b'r1'])
307
_, _, parent_texts['r1'] = f.add_lines('r1',
308
['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
309
f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
310
self.assertNotEqual(None, parent_texts['r0'])
311
self.assertNotEqual(None, parent_texts['r1'])
229
312
def verify_file(f):
230
313
versions = f.versions()
231
self.assertTrue(b'r0' in versions)
232
self.assertTrue(b'r1' in versions)
233
self.assertTrue(b'r2' in versions)
234
self.assertEqual(f.get_lines(b'r0'), [b'a\n', b'b\n'])
235
self.assertEqual(f.get_lines(b'r1'), [b'b\n', b'c\n'])
236
self.assertEqual(f.get_lines(b'r2'), [b'c\n', b'd\n'])
314
self.assertTrue('r0' in versions)
315
self.assertTrue('r1' in versions)
316
self.assertTrue('r2' in versions)
317
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
318
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
319
self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
237
320
self.assertEqual(3, f.num_versions())
238
origins = f.annotate(b'r1')
239
self.assertEqual(origins[0][0], b'r0')
240
self.assertEqual(origins[1][0], b'r1')
241
origins = f.annotate(b'r2')
242
self.assertEqual(origins[0][0], b'r1')
243
self.assertEqual(origins[1][0], b'r2')
321
origins = f.annotate('r1')
322
self.assertEquals(origins[0][0], 'r0')
323
self.assertEquals(origins[1][0], 'r1')
324
origins = f.annotate('r2')
325
self.assertEquals(origins[0][0], 'r1')
326
self.assertEquals(origins[1][0], 'r2')
246
329
f = self.reopen_file()
265
348
vf = self.get_file()
266
349
if isinstance(vf, WeaveFile):
267
350
raise TestSkipped("WeaveFile ignores left_matching_blocks")
268
vf.add_lines(b'1', [], [b'a\n'])
269
vf.add_lines(b'2', [b'1'], [b'a\n', b'a\n', b'a\n'],
351
vf.add_lines('1', [], ['a\n'])
352
vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
270
353
left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
271
self.assertEqual([b'a\n', b'a\n', b'a\n'], vf.get_lines(b'2'))
272
vf.add_lines(b'3', [b'1'], [b'a\n', b'a\n', b'a\n'],
354
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
355
vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
273
356
left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
274
self.assertEqual([b'a\n', b'a\n', b'a\n'], vf.get_lines(b'3'))
357
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
276
359
def test_inline_newline_throws(self):
277
360
# \r characters are not permitted in lines being added
278
361
vf = self.get_file()
279
362
self.assertRaises(errors.BzrBadParameterContainsNewline,
280
vf.add_lines, b'a', [], [b'a\n\n'])
363
vf.add_lines, 'a', [], ['a\n\n'])
281
364
self.assertRaises(
282
365
(errors.BzrBadParameterContainsNewline, NotImplementedError),
283
vf.add_lines_with_ghosts, b'a', [], [b'a\n\n'])
366
vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
284
367
# but inline CR's are allowed
285
vf.add_lines(b'a', [], [b'a\r\n'])
368
vf.add_lines('a', [], ['a\r\n'])
287
vf.add_lines_with_ghosts(b'b', [], [b'a\r\n'])
370
vf.add_lines_with_ghosts('b', [], ['a\r\n'])
288
371
except NotImplementedError:
291
374
def test_add_reserved(self):
292
375
vf = self.get_file()
293
376
self.assertRaises(errors.ReservedId,
294
vf.add_lines, b'a:', [], [b'a\n', b'b\n', b'c\n'])
377
vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
296
379
def test_add_lines_nostoresha(self):
297
380
"""When nostore_sha is supplied using old content raises."""
298
381
vf = self.get_file()
299
empty_text = (b'a', [])
300
sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
301
sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
382
empty_text = ('a', [])
383
sample_text_nl = ('b', ["foo\n", "bar\n"])
384
sample_text_no_nl = ('c', ["foo\n", "bar"])
303
386
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
304
387
sha, _, _ = vf.add_lines(version, [], lines)
306
389
# we now have a copy of all the lines in the vf.
307
390
for sha, (version, lines) in zip(
308
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
391
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
309
392
self.assertRaises(errors.ExistingContent,
310
vf.add_lines, version + b"2", [], lines,
393
vf.add_lines, version + "2", [], lines,
312
395
# and no new version should have been added.
313
396
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
316
399
def test_add_lines_with_ghosts_nostoresha(self):
317
400
"""When nostore_sha is supplied using old content raises."""
318
401
vf = self.get_file()
319
empty_text = (b'a', [])
320
sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
321
sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
402
empty_text = ('a', [])
403
sample_text_nl = ('b', ["foo\n", "bar\n"])
404
sample_text_no_nl = ('c', ["foo\n", "bar"])
323
406
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
324
407
sha, _, _ = vf.add_lines(version, [], lines)
455
538
def test_make_mpdiffs_with_ghosts(self):
456
539
vf = self.get_file('foo')
458
vf.add_lines_with_ghosts(b'text', [b'ghost'], [b'line\n'])
541
vf.add_lines_with_ghosts('text', ['ghost'], ['line\n'])
459
542
except NotImplementedError:
460
543
# old Weave formats do not allow ghosts
462
self.assertRaises(errors.RevisionNotPresent,
463
vf.make_mpdiffs, [b'ghost'])
545
self.assertRaises(errors.RevisionNotPresent, vf.make_mpdiffs, ['ghost'])
465
547
def _setup_for_deltas(self, f):
466
548
self.assertFalse(f.has_version('base'))
467
549
# add texts that should trip the knit maximum delta chain threshold
468
550
# as well as doing parallel chains of data in knits.
469
551
# this is done by two chains of 25 insertions
470
f.add_lines(b'base', [], [b'line\n'])
471
f.add_lines(b'noeol', [b'base'], [b'line'])
552
f.add_lines('base', [], ['line\n'])
553
f.add_lines('noeol', ['base'], ['line'])
472
554
# detailed eol tests:
473
555
# shared last line with parent no-eol
474
f.add_lines(b'noeolsecond', [b'noeol'], [b'line\n', b'line'])
556
f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
475
557
# differing last line with parent, both no-eol
476
f.add_lines(b'noeolnotshared', [b'noeolsecond'], [b'line\n', b'phone'])
558
f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
477
559
# add eol following a noneol parent, change content
478
f.add_lines(b'eol', [b'noeol'], [b'phone\n'])
560
f.add_lines('eol', ['noeol'], ['phone\n'])
479
561
# add eol following a noneol parent, no change content
480
f.add_lines(b'eolline', [b'noeol'], [b'line\n'])
562
f.add_lines('eolline', ['noeol'], ['line\n'])
481
563
# noeol with no parents:
482
f.add_lines(b'noeolbase', [], [b'line'])
564
f.add_lines('noeolbase', [], ['line'])
483
565
# noeol preceeding its leftmost parent in the output:
484
566
# this is done by making it a merge of two parents with no common
485
567
# anestry: noeolbase and noeol with the
486
568
# later-inserted parent the leftmost.
487
f.add_lines(b'eolbeforefirstparent', [
488
b'noeolbase', b'noeol'], [b'line'])
569
f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
489
570
# two identical eol texts
490
f.add_lines(b'noeoldup', [b'noeol'], [b'line'])
491
next_parent = b'base'
492
text_name = b'chain1-'
494
sha1s = {0: b'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
495
1: b'45e21ea146a81ea44a821737acdb4f9791c8abe7',
496
2: b'e1f11570edf3e2a070052366c582837a4fe4e9fa',
497
3: b'26b4b8626da827088c514b8f9bbe4ebf181edda1',
498
4: b'e28a5510be25ba84d31121cff00956f9970ae6f6',
499
5: b'd63ec0ce22e11dcf65a931b69255d3ac747a318d',
500
6: b'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
501
7: b'95c14da9cafbf828e3e74a6f016d87926ba234ab',
502
8: b'779e9a0b28f9f832528d4b21e17e168c67697272',
503
9: b'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
504
10: b'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
505
11: b'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
506
12: b'31a2286267f24d8bedaa43355f8ad7129509ea85',
507
13: b'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
508
14: b'2c4b1736566b8ca6051e668de68650686a3922f2',
509
15: b'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
510
16: b'b0d2e18d3559a00580f6b49804c23fea500feab3',
511
17: b'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
512
18: b'5cf64a3459ae28efa60239e44b20312d25b253f3',
513
19: b'1ebed371807ba5935958ad0884595126e8c4e823',
514
20: b'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
515
21: b'01edc447978004f6e4e962b417a4ae1955b6fe5d',
516
22: b'd8d8dc49c4bf0bab401e0298bb5ad827768618bb',
517
23: b'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
518
24: b'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
519
25: b'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
571
f.add_lines('noeoldup', ['noeol'], ['line'])
573
text_name = 'chain1-'
575
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
576
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
577
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
578
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
579
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
580
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
581
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
582
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
583
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
584
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
585
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
586
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
587
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
588
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
589
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
590
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
591
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
592
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
593
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
594
19:'1ebed371807ba5935958ad0884595126e8c4e823',
595
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
596
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
597
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
598
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
599
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
600
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
521
602
for depth in range(26):
522
new_version = text_name + b'%d' % depth
523
text = text + [b'line\n']
603
new_version = text_name + '%s' % depth
604
text = text + ['line\n']
524
605
f.add_lines(new_version, [next_parent], text)
525
606
next_parent = new_version
526
next_parent = b'base'
527
text_name = b'chain2-'
608
text_name = 'chain2-'
529
610
for depth in range(26):
530
new_version = text_name + b'%d' % depth
531
text = text + [b'line\n']
611
new_version = text_name + '%s' % depth
612
text = text + ['line\n']
532
613
f.add_lines(new_version, [next_parent], text)
533
614
next_parent = new_version
536
617
def test_ancestry(self):
537
618
f = self.get_file()
538
619
self.assertEqual([], f.get_ancestry([]))
539
f.add_lines(b'r0', [], [b'a\n', b'b\n'])
540
f.add_lines(b'r1', [b'r0'], [b'b\n', b'c\n'])
541
f.add_lines(b'r2', [b'r0'], [b'b\n', b'c\n'])
542
f.add_lines(b'r3', [b'r2'], [b'b\n', b'c\n'])
543
f.add_lines(b'rM', [b'r1', b'r2'], [b'b\n', b'c\n'])
620
f.add_lines('r0', [], ['a\n', 'b\n'])
621
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
622
f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
623
f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
624
f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
544
625
self.assertEqual([], f.get_ancestry([]))
545
versions = f.get_ancestry([b'rM'])
626
versions = f.get_ancestry(['rM'])
546
627
# there are some possibilities:
550
631
# so we check indexes
551
r0 = versions.index(b'r0')
552
r1 = versions.index(b'r1')
553
r2 = versions.index(b'r2')
554
self.assertFalse(b'r3' in versions)
555
rM = versions.index(b'rM')
632
r0 = versions.index('r0')
633
r1 = versions.index('r1')
634
r2 = versions.index('r2')
635
self.assertFalse('r3' in versions)
636
rM = versions.index('rM')
556
637
self.assertTrue(r0 < r1)
557
638
self.assertTrue(r0 < r2)
558
639
self.assertTrue(r1 < rM)
559
640
self.assertTrue(r2 < rM)
561
642
self.assertRaises(RevisionNotPresent,
562
f.get_ancestry, [b'rM', b'rX'])
643
f.get_ancestry, ['rM', 'rX'])
564
self.assertEqual(set(f.get_ancestry(b'rM')),
565
set(f.get_ancestry(b'rM', topo_sorted=False)))
645
self.assertEqual(set(f.get_ancestry('rM')),
646
set(f.get_ancestry('rM', topo_sorted=False)))
567
648
def test_mutate_after_finish(self):
568
649
self._transaction = 'before'
569
650
f = self.get_file()
570
651
self._transaction = 'after'
571
self.assertRaises(errors.OutSideTransaction, f.add_lines, b'', [], [])
572
self.assertRaises(errors.OutSideTransaction,
573
f.add_lines_with_ghosts, b'', [], [])
652
self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
653
self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
575
655
def test_copy_to(self):
576
656
f = self.get_file()
577
f.add_lines(b'0', [], [b'a\n'])
657
f.add_lines('0', [], ['a\n'])
578
658
t = MemoryTransport()
579
659
f.copy_to('foo', t)
580
660
for suffix in self.get_factory().get_suffixes():
588
668
def test_get_parent_map(self):
589
669
f = self.get_file()
590
f.add_lines(b'r0', [], [b'a\n', b'b\n'])
592
{b'r0': ()}, f.get_parent_map([b'r0']))
593
f.add_lines(b'r1', [b'r0'], [b'a\n', b'b\n'])
595
{b'r1': (b'r0',)}, f.get_parent_map([b'r1']))
599
f.get_parent_map([b'r0', b'r1']))
600
f.add_lines(b'r2', [], [b'a\n', b'b\n'])
601
f.add_lines(b'r3', [], [b'a\n', b'b\n'])
602
f.add_lines(b'm', [b'r0', b'r1', b'r2', b'r3'], [b'a\n', b'b\n'])
604
{b'm': (b'r0', b'r1', b'r2', b'r3')}, f.get_parent_map([b'm']))
605
self.assertEqual({}, f.get_parent_map(b'y'))
609
f.get_parent_map([b'r0', b'y', b'r1']))
670
f.add_lines('r0', [], ['a\n', 'b\n'])
672
{'r0':()}, f.get_parent_map(['r0']))
673
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
675
{'r1':('r0',)}, f.get_parent_map(['r1']))
679
f.get_parent_map(['r0', 'r1']))
680
f.add_lines('r2', [], ['a\n', 'b\n'])
681
f.add_lines('r3', [], ['a\n', 'b\n'])
682
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
684
{'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
685
self.assertEqual({}, f.get_parent_map('y'))
689
f.get_parent_map(['r0', 'y', 'r1']))
611
691
def test_annotate(self):
612
692
f = self.get_file()
613
f.add_lines(b'r0', [], [b'a\n', b'b\n'])
614
f.add_lines(b'r1', [b'r0'], [b'c\n', b'b\n'])
615
origins = f.annotate(b'r1')
616
self.assertEqual(origins[0][0], b'r1')
617
self.assertEqual(origins[1][0], b'r0')
693
f.add_lines('r0', [], ['a\n', 'b\n'])
694
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
695
origins = f.annotate('r1')
696
self.assertEquals(origins[0][0], 'r1')
697
self.assertEquals(origins[1][0], 'r0')
619
699
self.assertRaises(RevisionNotPresent,
622
702
def test_detection(self):
623
703
# Test weaves detect corruption.
666
746
vf = self.get_file()
667
747
# add a base to get included
668
vf.add_lines(b'base', [], [b'base\n'])
748
vf.add_lines('base', [], ['base\n'])
669
749
# add a ancestor to be included on one side
670
vf.add_lines(b'lancestor', [], [b'lancestor\n'])
750
vf.add_lines('lancestor', [], ['lancestor\n'])
671
751
# add a ancestor to be included on the other side
672
vf.add_lines(b'rancestor', [b'base'], [b'rancestor\n'])
752
vf.add_lines('rancestor', ['base'], ['rancestor\n'])
673
753
# add a child of rancestor with no eofile-nl
674
vf.add_lines(b'child', [b'rancestor'], [b'base\n', b'child\n'])
754
vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
675
755
# add a child of lancestor and base to join the two roots
676
vf.add_lines(b'otherchild',
677
[b'lancestor', b'base'],
678
[b'base\n', b'lancestor\n', b'otherchild\n'])
756
vf.add_lines('otherchild',
757
['lancestor', 'base'],
758
['base\n', 'lancestor\n', 'otherchild\n'])
680
759
def iter_with_versions(versions, expected):
681
760
# now we need to see what lines are returned, and how often.
683
762
progress = InstrumentedProgress()
684
763
# iterate over the lines
685
764
for line in vf.iter_lines_added_or_present_in_versions(versions,
687
766
lines.setdefault(line, 0)
689
if [] != progress.updates:
768
if []!= progress.updates:
690
769
self.assertEqual(expected, progress.updates)
692
lines = iter_with_versions([b'child', b'otherchild'],
771
lines = iter_with_versions(['child', 'otherchild'],
693
772
[('Walking content', 0, 2),
694
773
('Walking content', 1, 2),
695
774
('Walking content', 2, 2)])
696
775
# we must see child and otherchild
697
self.assertTrue(lines[(b'child\n', b'child')] > 0)
698
self.assertTrue(lines[(b'otherchild\n', b'otherchild')] > 0)
776
self.assertTrue(lines[('child\n', 'child')] > 0)
777
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
699
778
# we dont care if we got more than that.
723
802
parent_id_unicode = u'b\xbfse'
724
803
parent_id_utf8 = parent_id_unicode.encode('utf8')
726
vf.add_lines_with_ghosts(b'notbxbfse', [parent_id_utf8], [])
805
vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
727
806
except NotImplementedError:
728
807
# check the other ghost apis are also not implemented
729
self.assertRaises(NotImplementedError,
730
vf.get_ancestry_with_ghosts, [b'foo'])
731
self.assertRaises(NotImplementedError,
732
vf.get_parents_with_ghosts, b'foo')
808
self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
809
self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
734
811
vf = self.reopen_file()
735
812
# test key graph related apis: getncestry, _graph, get_parents
737
814
# - these are ghost unaware and must not be reflect ghosts
738
self.assertEqual([b'notbxbfse'], vf.get_ancestry(b'notbxbfse'))
815
self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
739
816
self.assertFalse(vf.has_version(parent_id_utf8))
740
817
# we have _with_ghost apis to give us ghost information.
741
self.assertEqual([parent_id_utf8, b'notbxbfse'],
742
vf.get_ancestry_with_ghosts([b'notbxbfse']))
743
self.assertEqual([parent_id_utf8],
744
vf.get_parents_with_ghosts(b'notbxbfse'))
818
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
819
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
745
820
# if we add something that is a ghost of another, it should correct the
746
821
# results of the prior apis
747
822
vf.add_lines(parent_id_utf8, [], [])
748
self.assertEqual([parent_id_utf8, b'notbxbfse'],
749
vf.get_ancestry([b'notbxbfse']))
750
self.assertEqual({b'notbxbfse': (parent_id_utf8,)},
751
vf.get_parent_map([b'notbxbfse']))
823
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
824
self.assertEqual({'notbxbfse':(parent_id_utf8,)},
825
vf.get_parent_map(['notbxbfse']))
752
826
self.assertTrue(vf.has_version(parent_id_utf8))
753
827
# we have _with_ghost apis to give us ghost information.
754
self.assertEqual([parent_id_utf8, b'notbxbfse'],
755
vf.get_ancestry_with_ghosts([b'notbxbfse']))
756
self.assertEqual([parent_id_utf8],
757
vf.get_parents_with_ghosts(b'notbxbfse'))
828
self.assertEqual([parent_id_utf8, 'notbxbfse'],
829
vf.get_ancestry_with_ghosts(['notbxbfse']))
830
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
759
832
def test_add_lines_with_ghosts_after_normal_revs(self):
760
833
# some versioned file formats allow lines to be added with parent
791
864
# check the sha1 data is available
792
865
vf = self.get_file()
794
vf.add_lines(b'a', [], [b'a\n'])
867
vf.add_lines('a', [], ['a\n'])
795
868
# the same file, different metadata
796
vf.add_lines(b'b', [b'a'], [b'a\n'])
869
vf.add_lines('b', ['a'], ['a\n'])
797
870
# a file differing only in last newline.
798
vf.add_lines(b'c', [], [b'a'])
871
vf.add_lines('c', [], ['a'])
799
872
self.assertEqual({
800
b'a': b'3f786850e387550fdab836ed7e6dc881de23001b',
801
b'c': b'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
802
b'b': b'3f786850e387550fdab836ed7e6dc881de23001b',
873
'a': '3f786850e387550fdab836ed7e6dc881de23001b',
874
'c': '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
875
'b': '3f786850e387550fdab836ed7e6dc881de23001b',
804
vf.get_sha1s([b'a', b'c', b'b']))
877
vf.get_sha1s(['a', 'c', 'b']))
807
880
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
809
882
def get_file(self, name='foo'):
810
return WeaveFile(name, self.get_transport(),
812
get_scope=self.get_transaction)
883
return WeaveFile(name, get_transport(self.get_url('.')), create=True,
884
get_scope=self.get_transaction)
814
886
def get_file_corrupted_text(self):
815
w = WeaveFile('foo', self.get_transport(),
817
get_scope=self.get_transaction)
818
w.add_lines(b'v1', [], [b'hello\n'])
819
w.add_lines(b'v2', [b'v1'], [b'hello\n', b'there\n'])
887
w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
888
get_scope=self.get_transaction)
889
w.add_lines('v1', [], ['hello\n'])
890
w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
821
892
# We are going to invasively corrupt the text
822
893
# Make sure the internals of weave are the same
823
self.assertEqual([(b'{', 0), b'hello\n', (b'}', None), (b'{', 1), b'there\n', (b'}', None)
894
self.assertEqual([('{', 0)
826
self.assertEqual([b'f572d396fae9206628714fb2ce00f72e94f2258f', b'90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
902
self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
903
, '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
831
w._weave[4] = b'There\n'
908
w._weave[4] = 'There\n'
834
911
def get_file_corrupted_checksum(self):
835
912
w = self.get_file_corrupted_text()
837
w._weave[4] = b'there\n'
838
self.assertEqual(b'hello\nthere\n', w.get_text(b'v2'))
914
w._weave[4] = 'there\n'
915
self.assertEqual('hello\nthere\n', w.get_text('v2'))
840
# Invalid checksum, first digit changed
841
w._sha1s[1] = b'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
917
#Invalid checksum, first digit changed
918
w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
844
921
def reopen_file(self, name='foo', create=False):
845
return WeaveFile(name, self.get_transport(),
847
get_scope=self.get_transaction)
922
return WeaveFile(name, get_transport(self.get_url('.')), create=create,
923
get_scope=self.get_transaction)
849
925
def test_no_implicit_create(self):
850
926
self.assertRaises(errors.NoSuchFile,
853
self.get_transport(),
929
get_transport(self.get_url('.')),
854
930
get_scope=self.get_transaction)
856
932
def get_factory(self):
869
945
self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
871
947
def test_add_lines(self):
872
self.plan_merge_vf.add_lines((b'root', b'a:'), [], [])
873
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
874
(b'root', b'a'), [], [])
875
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
876
(b'root', b'a:'), None, [])
877
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
878
(b'root', b'a:'), [], None)
948
self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
949
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
950
('root', 'a'), [], [])
951
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
952
('root', 'a:'), None, [])
953
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
954
('root', 'a:'), [], None)
880
956
def setup_abcde(self):
881
self.vf1.add_lines((b'root', b'A'), [], [b'a'])
882
self.vf1.add_lines((b'root', b'B'), [(b'root', b'A')], [b'b'])
883
self.vf2.add_lines((b'root', b'C'), [], [b'c'])
884
self.vf2.add_lines((b'root', b'D'), [(b'root', b'C')], [b'd'])
885
self.plan_merge_vf.add_lines((b'root', b'E:'),
886
[(b'root', b'B'), (b'root', b'D')], [b'e'])
957
self.vf1.add_lines(('root', 'A'), [], ['a'])
958
self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
959
self.vf2.add_lines(('root', 'C'), [], ['c'])
960
self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
961
self.plan_merge_vf.add_lines(('root', 'E:'),
962
[('root', 'B'), ('root', 'D')], ['e'])
888
964
def test_get_parents(self):
889
965
self.setup_abcde()
890
self.assertEqual({(b'root', b'B'): ((b'root', b'A'),)},
891
self.plan_merge_vf.get_parent_map([(b'root', b'B')]))
892
self.assertEqual({(b'root', b'D'): ((b'root', b'C'),)},
893
self.plan_merge_vf.get_parent_map([(b'root', b'D')]))
894
self.assertEqual({(b'root', b'E:'): ((b'root', b'B'), (b'root', b'D'))},
895
self.plan_merge_vf.get_parent_map([(b'root', b'E:')]))
966
self.assertEqual({('root', 'B'):(('root', 'A'),)},
967
self.plan_merge_vf.get_parent_map([('root', 'B')]))
968
self.assertEqual({('root', 'D'):(('root', 'C'),)},
969
self.plan_merge_vf.get_parent_map([('root', 'D')]))
970
self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
971
self.plan_merge_vf.get_parent_map([('root', 'E:')]))
896
972
self.assertEqual({},
897
self.plan_merge_vf.get_parent_map([(b'root', b'F')]))
973
self.plan_merge_vf.get_parent_map([('root', 'F')]))
898
974
self.assertEqual({
899
(b'root', b'B'): ((b'root', b'A'),),
900
(b'root', b'D'): ((b'root', b'C'),),
901
(b'root', b'E:'): ((b'root', b'B'), (b'root', b'D')),
975
('root', 'B'):(('root', 'A'),),
976
('root', 'D'):(('root', 'C'),),
977
('root', 'E:'):(('root', 'B'),('root', 'D')),
903
979
self.plan_merge_vf.get_parent_map(
904
[(b'root', b'B'), (b'root', b'D'), (b'root', b'E:'), (b'root', b'F')]))
980
[('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
906
982
def test_get_record_stream(self):
907
983
self.setup_abcde()
909
984
def get_record(suffix):
910
return next(self.plan_merge_vf.get_record_stream(
911
[(b'root', suffix)], 'unordered', True))
912
self.assertEqual(b'a', get_record(b'A').get_bytes_as('fulltext'))
913
self.assertEqual(b'a', b''.join(get_record(b'A').iter_bytes_as('chunked')))
914
self.assertEqual(b'c', get_record(b'C').get_bytes_as('fulltext'))
915
self.assertEqual(b'e', get_record(b'E:').get_bytes_as('fulltext'))
985
return self.plan_merge_vf.get_record_stream(
986
[('root', suffix)], 'unordered', True).next()
987
self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
988
self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
989
self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
916
990
self.assertEqual('absent', get_record('F').storage_kind)
957
1023
class MergeCasesMixin(object):
959
1025
def doMerge(self, base, a, b, mp):
1026
from cStringIO import StringIO
960
1027
from textwrap import dedent
965
1032
w = self.get_file()
966
w.add_lines(b'text0', [], list(map(addcrlf, base)))
967
w.add_lines(b'text1', [b'text0'], list(map(addcrlf, a)))
968
w.add_lines(b'text2', [b'text0'], list(map(addcrlf, b)))
1033
w.add_lines('text0', [], map(addcrlf, base))
1034
w.add_lines('text1', ['text0'], map(addcrlf, a))
1035
w.add_lines('text2', ['text0'], map(addcrlf, b))
970
1037
self.log_contents(w)
972
1039
self.log('merge plan:')
973
p = list(w.plan_merge(b'text1', b'text2'))
1040
p = list(w.plan_merge('text1', 'text2'))
974
1041
for state, line in p:
976
1043
self.log('%12s | %s' % (state, line[:-1]))
978
1045
self.log('merge:')
980
1047
mt.writelines(w.weave_merge(p))
982
1049
self.log(mt.getvalue())
984
mp = list(map(addcrlf, mp))
1051
mp = map(addcrlf, mp)
985
1052
self.assertEqual(mt.readlines(), mp)
987
1055
def testOneInsert(self):
988
1056
self.doMerge([],
993
1061
def testSeparateInserts(self):
994
self.doMerge([b'aaa', b'bbb', b'ccc'],
995
[b'aaa', b'xxx', b'bbb', b'ccc'],
996
[b'aaa', b'bbb', b'yyy', b'ccc'],
997
[b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'])
1062
self.doMerge(['aaa', 'bbb', 'ccc'],
1063
['aaa', 'xxx', 'bbb', 'ccc'],
1064
['aaa', 'bbb', 'yyy', 'ccc'],
1065
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
999
1067
def testSameInsert(self):
1000
self.doMerge([b'aaa', b'bbb', b'ccc'],
1001
[b'aaa', b'xxx', b'bbb', b'ccc'],
1002
[b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'],
1003
[b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'])
1004
overlappedInsertExpected = [b'aaa', b'xxx', b'yyy', b'bbb']
1068
self.doMerge(['aaa', 'bbb', 'ccc'],
1069
['aaa', 'xxx', 'bbb', 'ccc'],
1070
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
1071
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1072
overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
1006
1073
def testOverlappedInsert(self):
1007
self.doMerge([b'aaa', b'bbb'],
1008
[b'aaa', b'xxx', b'yyy', b'bbb'],
1009
[b'aaa', b'xxx', b'bbb'], self.overlappedInsertExpected)
1074
self.doMerge(['aaa', 'bbb'],
1075
['aaa', 'xxx', 'yyy', 'bbb'],
1076
['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
1011
1078
# really it ought to reduce this to
1012
# [b'aaa', b'xxx', b'yyy', b'bbb']
1079
# ['aaa', 'xxx', 'yyy', 'bbb']
1014
1082
def testClashReplace(self):
1015
self.doMerge([b'aaa'],
1018
[b'<<<<<<< ', b'xxx', b'=======', b'yyy', b'zzz',
1083
self.doMerge(['aaa'],
1086
['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
1021
1089
def testNonClashInsert1(self):
1022
self.doMerge([b'aaa'],
1025
[b'<<<<<<< ', b'xxx', b'aaa', b'=======', b'yyy', b'zzz',
1090
self.doMerge(['aaa'],
1093
['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
1028
1096
def testNonClashInsert2(self):
1029
self.doMerge([b'aaa'],
1097
self.doMerge(['aaa'],
1034
1103
def testDeleteAndModify(self):
1035
1104
"""Clashing delete and modification.
1262
1322
# we need a full text, and a delta
1263
1323
f = self.get_knit()
1264
1324
get_diamond_files(f, 1, trailing_eol=False)
1265
ft_data, delta_data = self.helpGetBytes(
1266
f, 'knit-ft-gz', _mod_knit.FTAnnotatedToUnannotated(None),
1267
'knit-delta-gz', _mod_knit.DeltaAnnotatedToUnannotated(None))
1269
b'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1272
GzipFile(mode='rb', fileobj=BytesIO(ft_data)).read())
1274
b'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1275
b'1,2,3\nleft\nright\nmerged\nend merged\n',
1276
GzipFile(mode='rb', fileobj=BytesIO(delta_data)).read())
1325
ft_data, delta_data = self.helpGetBytes(f,
1326
_mod_knit.FTAnnotatedToUnannotated(None),
1327
_mod_knit.DeltaAnnotatedToUnannotated(None))
1329
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1332
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1334
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1335
'1,2,3\nleft\nright\nmerged\nend merged\n',
1336
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1278
1338
def test_deannotation(self):
1279
1339
"""Test converting annotated knits to unannotated knits."""
1280
1340
# we need a full text, and a delta
1281
1341
f = self.get_knit()
1282
1342
get_diamond_files(f, 1)
1283
ft_data, delta_data = self.helpGetBytes(
1284
f, 'knit-ft-gz', _mod_knit.FTAnnotatedToUnannotated(None),
1285
'knit-delta-gz', _mod_knit.DeltaAnnotatedToUnannotated(None))
1287
b'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1290
GzipFile(mode='rb', fileobj=BytesIO(ft_data)).read())
1292
b'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1293
b'2,2,2\nright\nmerged\nend merged\n',
1294
GzipFile(mode='rb', fileobj=BytesIO(delta_data)).read())
1343
ft_data, delta_data = self.helpGetBytes(f,
1344
_mod_knit.FTAnnotatedToUnannotated(None),
1345
_mod_knit.DeltaAnnotatedToUnannotated(None))
1347
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1350
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1352
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1353
'2,2,2\nright\nmerged\nend merged\n',
1354
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1296
1356
def test_annotated_to_fulltext_no_eol(self):
1297
1357
"""Test adapting annotated knits to full texts (for -> weaves)."""
1370
1430
def test_identity_mapper(self):
1371
1431
mapper = versionedfile.ConstantMapper("inventory")
1372
self.assertEqual("inventory", mapper.map((b'foo@ar',)))
1373
self.assertEqual("inventory", mapper.map((b'quux',)))
1432
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1433
self.assertEqual("inventory", mapper.map(('quux',)))
1375
1435
def test_prefix_mapper(self):
1376
1436
#format5: plain
1377
1437
mapper = versionedfile.PrefixMapper()
1378
self.assertEqual("file-id", mapper.map((b"file-id", b"revision-id")))
1379
self.assertEqual("new-id", mapper.map((b"new-id", b"revision-id")))
1380
self.assertEqual((b'file-id',), mapper.unmap("file-id"))
1381
self.assertEqual((b'new-id',), mapper.unmap("new-id"))
1438
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1439
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1440
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1441
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1383
1443
def test_hash_prefix_mapper(self):
1384
1444
#format6: hash + plain
1385
1445
mapper = versionedfile.HashPrefixMapper()
1387
"9b/file-id", mapper.map((b"file-id", b"revision-id")))
1388
self.assertEqual("45/new-id", mapper.map((b"new-id", b"revision-id")))
1389
self.assertEqual((b'file-id',), mapper.unmap("9b/file-id"))
1390
self.assertEqual((b'new-id',), mapper.unmap("45/new-id"))
1446
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1447
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1448
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1449
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1392
1451
def test_hash_escaped_mapper(self):
1393
1452
#knit1: hash + escaped
1394
1453
mapper = versionedfile.HashEscapedPrefixMapper()
1395
self.assertEqual("88/%2520", mapper.map((b" ", b"revision-id")))
1396
self.assertEqual("ed/fil%2545-%2549d", mapper.map((b"filE-Id",
1398
self.assertEqual("88/ne%2557-%2549d", mapper.map((b"neW-Id",
1400
self.assertEqual((b'filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1401
self.assertEqual((b'neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1454
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1455
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1457
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1459
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1460
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1404
1463
class TestVersionedFiles(TestCaseWithMemoryTransport):
1405
1464
"""Tests for the multiple-file variant of VersionedFile."""
1407
# We want to be sure of behaviour for:
1408
# weaves prefix layout (weave texts)
1409
# individually named weaves (weave inventories)
1410
# annotated knits - prefix|hash|hash-escape layout, we test the third only
1411
# as it is the most complex mapper.
1412
# individually named knits
1413
# individual no-graph knits in packs (signatures)
1414
# individual graph knits in packs (inventories)
1415
# individual graph nocompression knits in packs (revisions)
1416
# plain text knits in packs (texts)
1417
len_one_scenarios = [
1420
'factory': make_versioned_files_factory(WeaveFile,
1421
ConstantMapper('inventory')),
1424
'support_partial_insertion': False,
1428
'factory': make_file_factory(False, ConstantMapper('revisions')),
1431
'support_partial_insertion': False,
1433
('named-nograph-nodelta-knit-pack', {
1434
'cleanup': cleanup_pack_knit,
1435
'factory': make_pack_factory(False, False, 1),
1438
'support_partial_insertion': False,
1440
('named-graph-knit-pack', {
1441
'cleanup': cleanup_pack_knit,
1442
'factory': make_pack_factory(True, True, 1),
1445
'support_partial_insertion': True,
1447
('named-graph-nodelta-knit-pack', {
1448
'cleanup': cleanup_pack_knit,
1449
'factory': make_pack_factory(True, False, 1),
1452
'support_partial_insertion': False,
1454
('groupcompress-nograph', {
1455
'cleanup': groupcompress.cleanup_pack_group,
1456
'factory': groupcompress.make_pack_factory(False, False, 1),
1459
'support_partial_insertion': False,
1462
len_two_scenarios = [
1465
'factory': make_versioned_files_factory(WeaveFile,
1469
'support_partial_insertion': False,
1471
('annotated-knit-escape', {
1473
'factory': make_file_factory(True, HashEscapedPrefixMapper()),
1476
'support_partial_insertion': False,
1478
('plain-knit-pack', {
1479
'cleanup': cleanup_pack_knit,
1480
'factory': make_pack_factory(True, True, 2),
1483
'support_partial_insertion': True,
1486
'cleanup': groupcompress.cleanup_pack_group,
1487
'factory': groupcompress.make_pack_factory(True, False, 1),
1490
'support_partial_insertion': False,
1494
scenarios = len_one_scenarios + len_two_scenarios
1496
1466
def get_versionedfiles(self, relpath='files'):
1497
1467
transport = self.get_transport(relpath)
1498
1468
if relpath != '.':
1540
1497
for record in f.get_record_stream([key0, key1], 'unordered', True):
1541
1498
records.append((record.key, record.get_bytes_as('fulltext')))
1543
self.assertEqual([(key0, b'a\nb\n'), (key1, b'b\nc\n')], records)
1500
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1545
def test_add_chunks(self):
1502
def test__add_text(self):
1546
1503
f = self.get_versionedfiles()
1547
key0 = self.get_simple_key(b'r0')
1548
key1 = self.get_simple_key(b'r1')
1549
key2 = self.get_simple_key(b'r2')
1550
keyf = self.get_simple_key(b'foo')
1551
def add_chunks(key, parents, chunks):
1552
factory = ChunkedContentFactory(
1553
key, parents, osutils.sha_strings(chunks), chunks)
1554
return f.add_content(factory)
1556
add_chunks(key0, [], [b'a', b'\nb\n'])
1504
key0 = self.get_simple_key('r0')
1505
key1 = self.get_simple_key('r1')
1506
key2 = self.get_simple_key('r2')
1507
keyf = self.get_simple_key('foo')
1508
f._add_text(key0, [], 'a\nb\n')
1558
add_chunks(key1, [key0], [b'b', b'\n', b'c\n'])
1510
f._add_text(key1, [key0], 'b\nc\n')
1560
add_chunks(key1, [], [b'b\n', b'c\n'])
1512
f._add_text(key1, [], 'b\nc\n')
1561
1513
keys = f.keys()
1562
self.assertIn(key0, keys)
1563
self.assertIn(key1, keys)
1514
self.assertTrue(key0 in keys)
1515
self.assertTrue(key1 in keys)
1565
1517
for record in f.get_record_stream([key0, key1], 'unordered', True):
1566
1518
records.append((record.key, record.get_bytes_as('fulltext')))
1568
self.assertEqual([(key0, b'a\nb\n'), (key1, b'b\nc\n')], records)
1520
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1570
1522
def test_annotate(self):
1571
1523
files = self.get_versionedfiles()
1573
1525
if self.key_length == 1:
1576
prefix = (b'FileA',)
1577
1529
# introduced full text
1578
origins = files.annotate(prefix + (b'origin',))
1530
origins = files.annotate(prefix + ('origin',))
1579
1531
self.assertEqual([
1580
(prefix + (b'origin',), b'origin\n')],
1532
(prefix + ('origin',), 'origin\n')],
1583
origins = files.annotate(prefix + (b'base',))
1535
origins = files.annotate(prefix + ('base',))
1584
1536
self.assertEqual([
1585
(prefix + (b'base',), b'base\n')],
1537
(prefix + ('base',), 'base\n')],
1588
origins = files.annotate(prefix + (b'merged',))
1540
origins = files.annotate(prefix + ('merged',))
1590
1542
self.assertEqual([
1591
(prefix + (b'base',), b'base\n'),
1592
(prefix + (b'left',), b'left\n'),
1593
(prefix + (b'right',), b'right\n'),
1594
(prefix + (b'merged',), b'merged\n')
1543
(prefix + ('base',), 'base\n'),
1544
(prefix + ('left',), 'left\n'),
1545
(prefix + ('right',), 'right\n'),
1546
(prefix + ('merged',), 'merged\n')
1598
1550
# Without a graph everything is new.
1599
1551
self.assertEqual([
1600
(prefix + (b'merged',), b'base\n'),
1601
(prefix + (b'merged',), b'left\n'),
1602
(prefix + (b'merged',), b'right\n'),
1603
(prefix + (b'merged',), b'merged\n')
1552
(prefix + ('merged',), 'base\n'),
1553
(prefix + ('merged',), 'left\n'),
1554
(prefix + ('merged',), 'right\n'),
1555
(prefix + ('merged',), 'merged\n')
1606
1558
self.assertRaises(RevisionNotPresent,
1607
files.annotate, prefix + ('missing-key',))
1559
files.annotate, prefix + ('missing-key',))
1609
1561
def test_check_no_parameters(self):
1610
1562
files = self.get_versionedfiles()
1637
1589
files = self.get_versionedfiles()
1639
1591
def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1641
1593
return get_diamond_files(files, self.key_length,
1642
trailing_eol=trailing_eol, nograph=not self.graph,
1643
left_only=left_only, nokeys=nokeys)
1594
trailing_eol=trailing_eol, nograph=not self.graph,
1595
left_only=left_only, nokeys=nokeys)
1645
1597
def _add_content_nostoresha(self, add_lines):
1646
1598
"""When nostore_sha is supplied using old content raises."""
1647
1599
vf = self.get_versionedfiles()
1648
empty_text = (b'a', [])
1649
sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
1650
sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
1600
empty_text = ('a', [])
1601
sample_text_nl = ('b', ["foo\n", "bar\n"])
1602
sample_text_no_nl = ('c', ["foo\n", "bar"])
1652
1604
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1654
1606
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1657
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1609
sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1659
1611
shas.append(sha)
1660
1612
# we now have a copy of all the lines in the vf.
1661
1613
for sha, (version, lines) in zip(
1662
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1663
new_key = self.get_simple_key(version + b"2")
1664
self.assertRaises(errors.ExistingContent,
1665
vf.add_lines, new_key, [], lines,
1667
self.assertRaises(errors.ExistingContent,
1668
vf.add_lines, new_key, [], lines,
1614
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1615
new_key = self.get_simple_key(version + "2")
1616
self.assertRaises(errors.ExistingContent,
1617
vf.add_lines, new_key, [], lines,
1619
self.assertRaises(errors.ExistingContent,
1620
vf._add_text, new_key, [], ''.join(lines),
1670
1622
# and no new version should have been added.
1671
record = next(vf.get_record_stream([new_key], 'unordered', True))
1623
record = vf.get_record_stream([new_key], 'unordered', True).next()
1672
1624
self.assertEqual('absent', record.storage_kind)
1674
1626
def test_add_lines_nostoresha(self):
1675
1627
self._add_content_nostoresha(add_lines=True)
1629
def test__add_text_nostoresha(self):
1630
self._add_content_nostoresha(add_lines=False)
1677
1632
def test_add_lines_return(self):
1678
1633
files = self.get_versionedfiles()
1679
1634
# save code by using the stock data insertion helper.
1685
1640
results.append(add[:2])
1686
1641
if self.key_length == 1:
1687
1642
self.assertEqual([
1688
(b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1689
(b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1690
(b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1691
(b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1692
(b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1643
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1644
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1645
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1646
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1647
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1694
1649
elif self.key_length == 2:
1695
1650
self.assertEqual([
1696
(b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1697
(b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1698
(b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1699
(b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1700
(b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1701
(b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1702
(b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1703
(b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1704
(b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1705
(b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1651
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1652
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1653
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1654
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1655
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1656
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1657
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1658
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1659
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1660
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1708
1663
def test_add_lines_no_key_generates_chk_key(self):
1716
1671
results.append(add[:2])
1717
1672
if self.key_length == 1:
1718
1673
self.assertEqual([
1719
(b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1720
(b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1721
(b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1722
(b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1723
(b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1674
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1675
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1676
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1677
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1678
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1725
1680
# Check the added items got CHK keys.
1727
(b'sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1728
(b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1729
(b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1730
(b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1731
(b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1681
self.assertEqual(set([
1682
('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1683
('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1684
('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1685
('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1686
('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1734
1689
elif self.key_length == 2:
1735
1690
self.assertEqual([
1736
(b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1737
(b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1738
(b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1739
(b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1740
(b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1741
(b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1742
(b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1743
(b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1744
(b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1745
(b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1691
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1692
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1693
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1694
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1695
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1696
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1697
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1698
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1699
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1700
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1747
1702
# Check the added items got CHK keys.
1749
(b'FileA', b'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1750
(b'FileA', b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1751
(b'FileA', b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1752
(b'FileA', b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1753
(b'FileA', b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1754
(b'FileB', b'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1755
(b'FileB', b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1756
(b'FileB', b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1757
(b'FileB', b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1758
(b'FileB', b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1703
self.assertEqual(set([
1704
('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1705
('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1706
('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1707
('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1708
('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1709
('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1710
('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1711
('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1712
('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1713
('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1762
1717
def test_empty_lines(self):
1763
1718
"""Empty files can be stored."""
1764
1719
f = self.get_versionedfiles()
1765
key_a = self.get_simple_key(b'a')
1720
key_a = self.get_simple_key('a')
1766
1721
f.add_lines(key_a, [], [])
1767
self.assertEqual(b'',
1768
next(f.get_record_stream([key_a], 'unordered', True
1769
)).get_bytes_as('fulltext'))
1770
key_b = self.get_simple_key(b'b')
1722
self.assertEqual('',
1723
f.get_record_stream([key_a], 'unordered', True
1724
).next().get_bytes_as('fulltext'))
1725
key_b = self.get_simple_key('b')
1771
1726
f.add_lines(key_b, self.get_parents([key_a]), [])
1772
self.assertEqual(b'',
1773
next(f.get_record_stream([key_b], 'unordered', True
1774
)).get_bytes_as('fulltext'))
1727
self.assertEqual('',
1728
f.get_record_stream([key_b], 'unordered', True
1729
).next().get_bytes_as('fulltext'))
1776
1731
def test_newline_only(self):
1777
1732
f = self.get_versionedfiles()
1778
key_a = self.get_simple_key(b'a')
1779
f.add_lines(key_a, [], [b'\n'])
1780
self.assertEqual(b'\n',
1781
next(f.get_record_stream([key_a], 'unordered', True
1782
)).get_bytes_as('fulltext'))
1783
key_b = self.get_simple_key(b'b')
1784
f.add_lines(key_b, self.get_parents([key_a]), [b'\n'])
1785
self.assertEqual(b'\n',
1786
next(f.get_record_stream([key_b], 'unordered', True
1787
)).get_bytes_as('fulltext'))
1733
key_a = self.get_simple_key('a')
1734
f.add_lines(key_a, [], ['\n'])
1735
self.assertEqual('\n',
1736
f.get_record_stream([key_a], 'unordered', True
1737
).next().get_bytes_as('fulltext'))
1738
key_b = self.get_simple_key('b')
1739
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1740
self.assertEqual('\n',
1741
f.get_record_stream([key_b], 'unordered', True
1742
).next().get_bytes_as('fulltext'))
1789
1744
def test_get_known_graph_ancestry(self):
1790
1745
f = self.get_versionedfiles()
1791
1746
if not self.graph:
1792
1747
raise TestNotApplicable('ancestry info only relevant with graph.')
1793
key_a = self.get_simple_key(b'a')
1794
key_b = self.get_simple_key(b'b')
1795
key_c = self.get_simple_key(b'c')
1748
key_a = self.get_simple_key('a')
1749
key_b = self.get_simple_key('b')
1750
key_c = self.get_simple_key('c')
1801
f.add_lines(key_a, [], [b'\n'])
1802
f.add_lines(key_b, [key_a], [b'\n'])
1803
f.add_lines(key_c, [key_a, key_b], [b'\n'])
1756
f.add_lines(key_a, [], ['\n'])
1757
f.add_lines(key_b, [key_a], ['\n'])
1758
f.add_lines(key_c, [key_a, key_b], ['\n'])
1804
1759
kg = f.get_known_graph_ancestry([key_c])
1805
1760
self.assertIsInstance(kg, _mod_graph.KnownGraph)
1806
1761
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1873
1828
def get_keys_and_sort_order(self):
1874
1829
"""Get diamond test keys list, and their sort ordering."""
1875
1830
if self.key_length == 1:
1876
keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
1877
sort_order = {(b'merged',): 2, (b'left',): 1,
1878
(b'right',): 1, (b'base',): 0}
1831
keys = [('merged',), ('left',), ('right',), ('base',)]
1832
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1881
(b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
1882
(b'FileA', b'base'),
1883
(b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
1884
(b'FileB', b'base'),
1835
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1837
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1887
(b'FileA', b'merged'): 2, (b'FileA', b'left'): 1, (b'FileA', b'right'): 1,
1888
(b'FileA', b'base'): 0,
1889
(b'FileB', b'merged'): 2, (b'FileB', b'left'): 1, (b'FileB', b'right'): 1,
1890
(b'FileB', b'base'): 0,
1841
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1842
('FileA', 'base'):0,
1843
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1844
('FileB', 'base'):0,
1892
1846
return keys, sort_order
1894
1848
def get_keys_and_groupcompress_sort_order(self):
1895
1849
"""Get diamond test keys list, and their groupcompress sort ordering."""
1896
1850
if self.key_length == 1:
1897
keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
1898
sort_order = {(b'merged',): 0, (b'left',): 1,
1899
(b'right',): 1, (b'base',): 2}
1851
keys = [('merged',), ('left',), ('right',), ('base',)]
1852
sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
1902
(b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
1903
(b'FileA', b'base'),
1904
(b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
1905
(b'FileB', b'base'),
1855
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1857
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1908
(b'FileA', b'merged'): 0, (b'FileA', b'left'): 1, (b'FileA', b'right'): 1,
1909
(b'FileA', b'base'): 2,
1910
(b'FileB', b'merged'): 3, (b'FileB', b'left'): 4, (b'FileB', b'right'): 4,
1911
(b'FileB', b'base'): 5,
1861
('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
1862
('FileA', 'base'):2,
1863
('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
1864
('FileB', 'base'):5,
1913
1866
return keys, sort_order
2000
1951
self.assertEqual(parent_map[factory.key], factory.parents)
2001
1952
# currently no stream emits mpdiff
2002
1953
self.assertRaises(errors.UnavailableRepresentation,
2003
factory.get_bytes_as, 'mpdiff')
1954
factory.get_bytes_as, 'mpdiff')
2004
1955
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2006
1957
self.assertEqual(set(keys), seen)
2008
1959
def test_get_record_stream_missing_records_are_absent(self):
2009
1960
files = self.get_versionedfiles()
2010
1961
self.get_diamond_files(files)
2011
1962
if self.key_length == 1:
2012
keys = [(b'merged',), (b'left',), (b'right',),
2013
(b'absent',), (b'base',)]
1963
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
2016
(b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
2017
(b'FileA', b'absent'), (b'FileA', b'base'),
2018
(b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
2019
(b'FileB', b'absent'), (b'FileB', b'base'),
2020
(b'absent', b'absent'),
1966
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1967
('FileA', 'absent'), ('FileA', 'base'),
1968
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1969
('FileB', 'absent'), ('FileB', 'base'),
1970
('absent', 'absent'),
2022
1972
parent_map = files.get_parent_map(keys)
2023
1973
entries = files.get_record_stream(keys, 'unordered', False)
2282
2229
self.assertRaises(RevisionNotPresent,
2283
files.get_annotator().annotate, self.get_simple_key(b'missing-key'))
2230
files.get_annotator().annotate, self.get_simple_key('missing-key'))
2285
2232
def test_get_parent_map(self):
2286
2233
files = self.get_versionedfiles()
2287
2234
if self.key_length == 1:
2288
2235
parent_details = [
2289
((b'r0',), self.get_parents(())),
2290
((b'r1',), self.get_parents(((b'r0',),))),
2291
((b'r2',), self.get_parents(())),
2292
((b'r3',), self.get_parents(())),
2293
((b'm',), self.get_parents(((b'r0',), (b'r1',), (b'r2',), (b'r3',)))),
2236
(('r0',), self.get_parents(())),
2237
(('r1',), self.get_parents((('r0',),))),
2238
(('r2',), self.get_parents(())),
2239
(('r3',), self.get_parents(())),
2240
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
2296
2243
parent_details = [
2297
((b'FileA', b'r0'), self.get_parents(())),
2298
((b'FileA', b'r1'), self.get_parents(((b'FileA', b'r0'),))),
2299
((b'FileA', b'r2'), self.get_parents(())),
2300
((b'FileA', b'r3'), self.get_parents(())),
2301
((b'FileA', b'm'), self.get_parents(((b'FileA', b'r0'),
2302
(b'FileA', b'r1'), (b'FileA', b'r2'), (b'FileA', b'r3')))),
2244
(('FileA', 'r0'), self.get_parents(())),
2245
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
2246
(('FileA', 'r2'), self.get_parents(())),
2247
(('FileA', 'r3'), self.get_parents(())),
2248
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
2249
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
2304
2251
for key, parents in parent_details:
2305
2252
files.add_lines(key, parents, [])
2306
2253
# immediately after adding it should be queryable.
2307
self.assertEqual({key: parents}, files.get_parent_map([key]))
2254
self.assertEqual({key:parents}, files.get_parent_map([key]))
2308
2255
# We can ask for an empty set
2309
2256
self.assertEqual({}, files.get_parent_map([]))
2310
2257
# We can ask for many keys
2311
2258
all_parents = dict(parent_details)
2312
2259
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2313
2260
# Absent keys are just not included in the result.
2314
keys = list(all_parents.keys())
2261
keys = all_parents.keys()
2315
2262
if self.key_length == 1:
2316
keys.insert(1, (b'missing',))
2263
keys.insert(1, ('missing',))
2318
keys.insert(1, (b'missing', b'missing'))
2265
keys.insert(1, ('missing', 'missing'))
2319
2266
# Absent keys are just ignored
2320
2267
self.assertEqual(all_parents, files.get_parent_map(keys))
2471
2416
source = self.get_versionedfiles('source')
2472
2417
self.get_diamond_files(source)
2473
2418
if self.key_length == 1:
2474
origin_keys = [(b'origin',)]
2475
end_keys = [(b'merged',), (b'left',)]
2476
start_keys = [(b'right',), (b'base',)]
2419
origin_keys = [('origin',)]
2420
end_keys = [('merged',), ('left',)]
2421
start_keys = [('right',), ('base',)]
2478
origin_keys = [(b'FileA', b'origin'), (b'FileB', b'origin')]
2479
end_keys = [(b'FileA', b'merged',), (b'FileA', b'left',),
2480
(b'FileB', b'merged',), (b'FileB', b'left',)]
2481
start_keys = [(b'FileA', b'right',), (b'FileA', b'base',),
2482
(b'FileB', b'right',), (b'FileB', b'base',)]
2483
origin_entries = source.get_record_stream(
2484
origin_keys, 'unordered', False)
2423
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
2424
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
2425
('FileB', 'merged',), ('FileB', 'left',)]
2426
start_keys = [('FileA', 'right',), ('FileA', 'base',),
2427
('FileB', 'right',), ('FileB', 'base',)]
2428
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
2485
2429
end_entries = source.get_record_stream(end_keys, 'topological', False)
2486
start_entries = source.get_record_stream(
2487
start_keys, 'topological', False)
2488
entries = itertools.chain(origin_entries, end_entries, start_entries)
2430
start_entries = source.get_record_stream(start_keys, 'topological', False)
2431
entries = chain(origin_entries, end_entries, start_entries)
2490
2433
files.insert_record_stream(entries)
2491
2434
except RevisionNotPresent:
2618
2559
files = self.get_versionedfiles()
2619
2560
# add a base to get included
2620
files.add_lines(self.get_simple_key(b'base'), (), [b'base\n'])
2561
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2621
2562
# add a ancestor to be included on one side
2622
files.add_lines(self.get_simple_key(
2623
b'lancestor'), (), [b'lancestor\n'])
2563
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2624
2564
# add a ancestor to be included on the other side
2625
files.add_lines(self.get_simple_key(b'rancestor'),
2626
self.get_parents([self.get_simple_key(b'base')]), [b'rancestor\n'])
2565
files.add_lines(self.get_simple_key('rancestor'),
2566
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2627
2567
# add a child of rancestor with no eofile-nl
2628
files.add_lines(self.get_simple_key(b'child'),
2629
self.get_parents([self.get_simple_key(b'rancestor')]),
2630
[b'base\n', b'child\n'])
2568
files.add_lines(self.get_simple_key('child'),
2569
self.get_parents([self.get_simple_key('rancestor')]),
2570
['base\n', 'child\n'])
2631
2571
# add a child of lancestor and base to join the two roots
2632
files.add_lines(self.get_simple_key(b'otherchild'),
2633
self.get_parents([self.get_simple_key(b'lancestor'),
2634
self.get_simple_key(b'base')]),
2635
[b'base\n', b'lancestor\n', b'otherchild\n'])
2572
files.add_lines(self.get_simple_key('otherchild'),
2573
self.get_parents([self.get_simple_key('lancestor'),
2574
self.get_simple_key('base')]),
2575
['base\n', 'lancestor\n', 'otherchild\n'])
2637
2576
def iter_with_keys(keys, expected):
2638
2577
# now we need to see what lines are returned, and how often.
2640
2579
progress = InstrumentedProgress()
2641
2580
# iterate over the lines
2642
2581
for line in files.iter_lines_added_or_present_in_keys(keys,
2644
2583
lines.setdefault(line, 0)
2645
2584
lines[line] += 1
2646
if [] != progress.updates:
2585
if []!= progress.updates:
2647
2586
self.assertEqual(expected, progress.updates)
2649
2588
lines = iter_with_keys(
2650
[self.get_simple_key(b'child'),
2651
self.get_simple_key(b'otherchild')],
2589
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2652
2590
[('Walking content', 0, 2),
2653
2591
('Walking content', 1, 2),
2654
2592
('Walking content', 2, 2)])
2655
2593
# we must see child and otherchild
2656
self.assertTrue(lines[(b'child\n', self.get_simple_key(b'child'))] > 0)
2594
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2657
2595
self.assertTrue(
2658
lines[(b'otherchild\n', self.get_simple_key(b'otherchild'))] > 0)
2596
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2659
2597
# we dont care if we got more than that.
2661
2599
# test all lines
2662
2600
lines = iter_with_keys(files.keys(),
2663
[('Walking content', 0, 5),
2664
('Walking content', 1, 5),
2665
('Walking content', 2, 5),
2666
('Walking content', 3, 5),
2667
('Walking content', 4, 5),
2668
('Walking content', 5, 5)])
2601
[('Walking content', 0, 5),
2602
('Walking content', 1, 5),
2603
('Walking content', 2, 5),
2604
('Walking content', 3, 5),
2605
('Walking content', 4, 5),
2606
('Walking content', 5, 5)])
2669
2607
# all lines must be seen at least once
2670
self.assertTrue(lines[(b'base\n', self.get_simple_key(b'base'))] > 0)
2672
lines[(b'lancestor\n', self.get_simple_key(b'lancestor'))] > 0)
2674
lines[(b'rancestor\n', self.get_simple_key(b'rancestor'))] > 0)
2675
self.assertTrue(lines[(b'child\n', self.get_simple_key(b'child'))] > 0)
2677
lines[(b'otherchild\n', self.get_simple_key(b'otherchild'))] > 0)
2608
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2610
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2612
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2613
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2615
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2679
2617
def test_make_mpdiffs(self):
2680
from breezy import multiparent
2618
from bzrlib import multiparent
2681
2619
files = self.get_versionedfiles('source')
2682
2620
# add texts that should trip the knit maximum delta chain threshold
2683
2621
# as well as doing parallel chains of data in knits.
2684
2622
# this is done by two chains of 25 insertions
2685
files.add_lines(self.get_simple_key(b'base'), [], [b'line\n'])
2686
files.add_lines(self.get_simple_key(b'noeol'),
2687
self.get_parents([self.get_simple_key(b'base')]), [b'line'])
2623
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2624
files.add_lines(self.get_simple_key('noeol'),
2625
self.get_parents([self.get_simple_key('base')]), ['line'])
2688
2626
# detailed eol tests:
2689
2627
# shared last line with parent no-eol
2690
files.add_lines(self.get_simple_key(b'noeolsecond'),
2691
self.get_parents([self.get_simple_key(b'noeol')]),
2692
[b'line\n', b'line'])
2628
files.add_lines(self.get_simple_key('noeolsecond'),
2629
self.get_parents([self.get_simple_key('noeol')]),
2693
2631
# differing last line with parent, both no-eol
2694
files.add_lines(self.get_simple_key(b'noeolnotshared'),
2696
[self.get_simple_key(b'noeolsecond')]),
2697
[b'line\n', b'phone'])
2632
files.add_lines(self.get_simple_key('noeolnotshared'),
2633
self.get_parents([self.get_simple_key('noeolsecond')]),
2634
['line\n', 'phone'])
2698
2635
# add eol following a noneol parent, change content
2699
files.add_lines(self.get_simple_key(b'eol'),
2700
self.get_parents([self.get_simple_key(b'noeol')]), [b'phone\n'])
2636
files.add_lines(self.get_simple_key('eol'),
2637
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2701
2638
# add eol following a noneol parent, no change content
2702
files.add_lines(self.get_simple_key(b'eolline'),
2703
self.get_parents([self.get_simple_key(b'noeol')]), [b'line\n'])
2639
files.add_lines(self.get_simple_key('eolline'),
2640
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2704
2641
# noeol with no parents:
2705
files.add_lines(self.get_simple_key(b'noeolbase'), [], [b'line'])
2642
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2706
2643
# noeol preceeding its leftmost parent in the output:
2707
2644
# this is done by making it a merge of two parents with no common
2708
2645
# anestry: noeolbase and noeol with the
2709
2646
# later-inserted parent the leftmost.
2710
files.add_lines(self.get_simple_key(b'eolbeforefirstparent'),
2711
self.get_parents([self.get_simple_key(b'noeolbase'),
2712
self.get_simple_key(b'noeol')]),
2647
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2648
self.get_parents([self.get_simple_key('noeolbase'),
2649
self.get_simple_key('noeol')]),
2714
2651
# two identical eol texts
2715
files.add_lines(self.get_simple_key(b'noeoldup'),
2716
self.get_parents([self.get_simple_key(b'noeol')]), [b'line'])
2717
next_parent = self.get_simple_key(b'base')
2718
text_name = b'chain1-'
2720
sha1s = {0: b'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2721
1: b'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2722
2: b'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2723
3: b'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2724
4: b'e28a5510be25ba84d31121cff00956f9970ae6f6',
2725
5: b'd63ec0ce22e11dcf65a931b69255d3ac747a318d',
2726
6: b'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2727
7: b'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2728
8: b'779e9a0b28f9f832528d4b21e17e168c67697272',
2729
9: b'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2730
10: b'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2731
11: b'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2732
12: b'31a2286267f24d8bedaa43355f8ad7129509ea85',
2733
13: b'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2734
14: b'2c4b1736566b8ca6051e668de68650686a3922f2',
2735
15: b'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2736
16: b'b0d2e18d3559a00580f6b49804c23fea500feab3',
2737
17: b'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2738
18: b'5cf64a3459ae28efa60239e44b20312d25b253f3',
2739
19: b'1ebed371807ba5935958ad0884595126e8c4e823',
2740
20: b'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2741
21: b'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2742
22: b'd8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2743
23: b'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2744
24: b'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2745
25: b'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2652
files.add_lines(self.get_simple_key('noeoldup'),
2653
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2654
next_parent = self.get_simple_key('base')
2655
text_name = 'chain1-'
2657
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2658
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2659
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2660
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2661
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2662
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2663
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2664
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2665
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2666
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2667
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2668
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2669
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2670
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2671
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2672
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2673
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2674
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2675
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2676
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2677
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2678
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2679
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2680
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2681
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2682
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2747
2684
for depth in range(26):
2748
new_version = self.get_simple_key(text_name + b'%d' % depth)
2749
text = text + [b'line\n']
2685
new_version = self.get_simple_key(text_name + '%s' % depth)
2686
text = text + ['line\n']
2750
2687
files.add_lines(new_version, self.get_parents([next_parent]), text)
2751
2688
next_parent = new_version
2752
next_parent = self.get_simple_key(b'base')
2753
text_name = b'chain2-'
2689
next_parent = self.get_simple_key('base')
2690
text_name = 'chain2-'
2755
2692
for depth in range(26):
2756
new_version = self.get_simple_key(text_name + b'%d' % depth)
2757
text = text + [b'line\n']
2693
new_version = self.get_simple_key(text_name + '%s' % depth)
2694
text = text + ['line\n']
2758
2695
files.add_lines(new_version, self.get_parents([next_parent]), text)
2759
2696
next_parent = new_version
2760
2697
target = self.get_versionedfiles('target')
2818
2755
def test_get_sha1s_nonexistent(self):
2819
self.assertEqual({}, self.texts.get_sha1s([(b"NONEXISTENT",)]))
2756
self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2821
2758
def test_get_sha1s(self):
2822
self._lines[b"key"] = [b"dataline1", b"dataline2"]
2823
self.assertEqual({(b"key",): osutils.sha_strings(self._lines[b"key"])},
2824
self.texts.get_sha1s([(b"key",)]))
2759
self._lines["key"] = ["dataline1", "dataline2"]
2760
self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2761
self.texts.get_sha1s([("key",)]))
2826
2763
def test_get_parent_map(self):
2827
self._parent_map = {b"G": (b"A", b"B")}
2828
self.assertEqual({(b"G",): ((b"A",), (b"B",))},
2829
self.texts.get_parent_map([(b"G",), (b"L",)]))
2764
self._parent_map = {"G": ("A", "B")}
2765
self.assertEquals({("G",): (("A",),("B",))},
2766
self.texts.get_parent_map([("G",), ("L",)]))
2831
2768
def test_get_record_stream(self):
2832
self._lines[b"A"] = [b"FOO", b"BAR"]
2833
it = self.texts.get_record_stream([(b"A",)], "unordered", True)
2835
self.assertEqual("chunked", record.storage_kind)
2836
self.assertEqual(b"FOOBAR", record.get_bytes_as("fulltext"))
2837
self.assertEqual([b"FOO", b"BAR"], record.get_bytes_as("chunked"))
2769
self._lines["A"] = ["FOO", "BAR"]
2770
it = self.texts.get_record_stream([("A",)], "unordered", True)
2772
self.assertEquals("chunked", record.storage_kind)
2773
self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2774
self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2839
2776
def test_get_record_stream_absent(self):
2840
it = self.texts.get_record_stream([(b"A",)], "unordered", True)
2842
self.assertEqual("absent", record.storage_kind)
2777
it = self.texts.get_record_stream([("A",)], "unordered", True)
2779
self.assertEquals("absent", record.storage_kind)
2844
2781
def test_iter_lines_added_or_present_in_keys(self):
2845
self._lines[b"A"] = [b"FOO", b"BAR"]
2846
self._lines[b"B"] = [b"HEY"]
2847
self._lines[b"C"] = [b"Alberta"]
2848
it = self.texts.iter_lines_added_or_present_in_keys([(b"A",), (b"B",)])
2849
self.assertEqual(sorted([(b"FOO", b"A"), (b"BAR", b"A"), (b"HEY", b"B")]),
2782
self._lines["A"] = ["FOO", "BAR"]
2783
self._lines["B"] = ["HEY"]
2784
self._lines["C"] = ["Alberta"]
2785
it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2786
self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2853
2790
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2873
2809
self.assertEqual([], vf.calls)
2875
2811
def test_get_record_stream_topological(self):
2876
vf = self.get_ordering_vf(
2877
{(b'A',): 3, (b'B',): 2, (b'C',): 4, (b'D',): 1})
2878
request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
2812
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2813
request_keys = [('B',), ('C',), ('D',), ('A',)]
2879
2814
keys = [r.key for r in vf.get_record_stream(request_keys,
2880
'topological', False)]
2815
'topological', False)]
2881
2816
# We should have gotten the keys in topological order
2882
self.assertEqual([(b'A',), (b'B',), (b'C',), (b'D',)], keys)
2817
self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2883
2818
# And recorded that the request was made
2884
2819
self.assertEqual([('get_record_stream', request_keys, 'topological',
2885
2820
False)], vf.calls)
2887
2822
def test_get_record_stream_ordered(self):
2888
vf = self.get_ordering_vf(
2889
{(b'A',): 3, (b'B',): 2, (b'C',): 4, (b'D',): 1})
2890
request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
2823
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2824
request_keys = [('B',), ('C',), ('D',), ('A',)]
2891
2825
keys = [r.key for r in vf.get_record_stream(request_keys,
2892
'unordered', False)]
2826
'unordered', False)]
2893
2827
# They should be returned based on their priority
2894
self.assertEqual([(b'D',), (b'B',), (b'A',), (b'C',)], keys)
2828
self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2895
2829
# And the request recorded
2896
2830
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2897
2831
False)], vf.calls)
2899
2833
def test_get_record_stream_implicit_order(self):
2900
vf = self.get_ordering_vf({(b'B',): 2, (b'D',): 1})
2901
request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
2834
vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2835
request_keys = [('B',), ('C',), ('D',), ('A',)]
2902
2836
keys = [r.key for r in vf.get_record_stream(request_keys,
2903
'unordered', False)]
2837
'unordered', False)]
2904
2838
# A and C are not in the map, so they get sorted to the front. A comes
2905
2839
# before C alphabetically, so it comes back first
2906
self.assertEqual([(b'A',), (b'C',), (b'D',), (b'B',)], keys)
2840
self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2907
2841
# And the request recorded
2908
2842
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2909
2843
False)], vf.calls)