1
# Copyright (C) 2005 Canonical Ltd
4
# Johan Rydberg <jrydberg@gnu.org>
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
11
# This program is distributed in the hope that it will be useful,
12
# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
# GNU General Public License for more details.
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
# considered typical and check that it can be detected/corrected.
24
from itertools import chain
25
from StringIO import StringIO
33
from bzrlib.errors import (
35
RevisionAlreadyPresent,
38
from bzrlib import knit as _mod_knit
39
from bzrlib.knit import (
46
from bzrlib.symbol_versioning import one_four, one_five
47
from bzrlib.tests import (
48
TestCaseWithMemoryTransport,
52
split_suite_by_condition,
55
from bzrlib.tests.http_utils import TestCaseWithWebserver
56
from bzrlib.trace import mutter
57
from bzrlib.transport import get_transport
58
from bzrlib.transport.memory import MemoryTransport
59
from bzrlib.tsort import topo_sort
60
from bzrlib.tuned_gzip import GzipFile
61
import bzrlib.versionedfile as versionedfile
62
from bzrlib.versionedfile import (
64
HashEscapedPrefixMapper,
66
make_versioned_files_factory,
68
from bzrlib.weave import WeaveFile
69
from bzrlib.weavefile import read_weave, write_weave
72
def load_tests(standard_tests, module, loader):
73
"""Parameterize VersionedFiles tests for different implementations."""
74
to_adapt, result = split_suite_by_condition(
75
standard_tests, condition_isinstance(TestVersionedFiles))
76
len_one_adapter = TestScenarioApplier()
77
len_two_adapter = TestScenarioApplier()
78
# We want to be sure of behaviour for:
79
# weaves prefix layout (weave texts)
80
# individually named weaves (weave inventories)
81
# annotated knits - prefix|hash|hash-escape layout, we test the third only
82
# as it is the most complex mapper.
83
# individually named knits
84
# individual no-graph knits in packs (signatures)
85
# individual graph knits in packs (inventories)
86
# individual graph nocompression knits in packs (revisions)
87
# plain text knits in packs (texts)
88
len_one_adapter.scenarios = [
91
'factory':make_versioned_files_factory(WeaveFile,
92
ConstantMapper('inventory')),
98
'factory':make_file_factory(False, ConstantMapper('revisions')),
102
('named-nograph-knit-pack', {
103
'cleanup':cleanup_pack_knit,
104
'factory':make_pack_factory(False, False, 1),
108
('named-graph-knit-pack', {
109
'cleanup':cleanup_pack_knit,
110
'factory':make_pack_factory(True, True, 1),
114
('named-graph-nodelta-knit-pack', {
115
'cleanup':cleanup_pack_knit,
116
'factory':make_pack_factory(True, False, 1),
121
len_two_adapter.scenarios = [
124
'factory':make_versioned_files_factory(WeaveFile,
129
('annotated-knit-escape', {
131
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
135
('plain-knit-pack', {
136
'cleanup':cleanup_pack_knit,
137
'factory':make_pack_factory(True, True, 2),
142
for test in iter_suite_tests(to_adapt):
143
result.addTests(len_one_adapter.adapt(test))
144
result.addTests(len_two_adapter.adapt(test))
148
def get_diamond_vf(f, trailing_eol=True, left_only=False):
149
"""Get a diamond graph to exercise deltas and merges.
151
:param trailing_eol: If True end the last line with \n.
155
'base': (('origin',),),
156
'left': (('base',),),
157
'right': (('base',),),
158
'merged': (('left',), ('right',)),
160
# insert a diamond graph to exercise deltas and merges.
165
f.add_lines('origin', [], ['origin' + last_char])
166
f.add_lines('base', ['origin'], ['base' + last_char])
167
f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
169
f.add_lines('right', ['base'],
170
['base\n', 'right' + last_char])
171
f.add_lines('merged', ['left', 'right'],
172
['base\n', 'left\n', 'right\n', 'merged' + last_char])
176
def get_diamond_files(files, key_length, trailing_eol=True, left_only=False,
178
"""Get a diamond graph to exercise deltas and merges.
180
This creates a 5-node graph in files. If files supports 2-length keys two
181
graphs are made to exercise the support for multiple ids.
183
:param trailing_eol: If True end the last line with \n.
184
:param key_length: The length of keys in files. Currently supports length 1
186
:param left_only: If True do not add the right and merged nodes.
187
:param nograph: If True, do not provide parents to the add_lines calls;
188
this is useful for tests that need inserted data but have graphless
190
:return: The results of the add_lines calls.
195
prefixes = [('FileA',), ('FileB',)]
196
# insert a diamond graph to exercise deltas and merges.
202
def get_parents(suffix_list):
206
result = [prefix + suffix for suffix in suffix_list]
208
# we loop over each key because that spreads the inserts across prefixes,
209
# which is how commit operates.
210
for prefix in prefixes:
211
result.append(files.add_lines(prefix + ('origin',), (),
212
['origin' + last_char]))
213
for prefix in prefixes:
214
result.append(files.add_lines(prefix + ('base',),
215
get_parents([('origin',)]), ['base' + last_char]))
216
for prefix in prefixes:
217
result.append(files.add_lines(prefix + ('left',),
218
get_parents([('base',)]),
219
['base\n', 'left' + last_char]))
221
for prefix in prefixes:
222
result.append(files.add_lines(prefix + ('right',),
223
get_parents([('base',)]),
224
['base\n', 'right' + last_char]))
225
for prefix in prefixes:
226
result.append(files.add_lines(prefix + ('merged',),
227
get_parents([('left',), ('right',)]),
228
['base\n', 'left\n', 'right\n', 'merged' + last_char]))
232
class VersionedFileTestMixIn(object):
233
"""A mixin test class for testing VersionedFiles.
235
This is not an adaptor-style test at this point because
236
theres no dynamic substitution of versioned file implementations,
237
they are strictly controlled by their owning repositories.
240
def get_transaction(self):
241
if not hasattr(self, '_transaction'):
242
self._transaction = None
243
return self._transaction
247
f.add_lines('r0', [], ['a\n', 'b\n'])
248
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
250
versions = f.versions()
251
self.assertTrue('r0' in versions)
252
self.assertTrue('r1' in versions)
253
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
254
self.assertEquals(f.get_text('r0'), 'a\nb\n')
255
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
256
self.assertEqual(2, len(f))
257
self.assertEqual(2, f.num_versions())
259
self.assertRaises(RevisionNotPresent,
260
f.add_lines, 'r2', ['foo'], [])
261
self.assertRaises(RevisionAlreadyPresent,
262
f.add_lines, 'r1', [], [])
264
# this checks that reopen with create=True does not break anything.
265
f = self.reopen_file(create=True)
268
def test_adds_with_parent_texts(self):
271
_, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
273
_, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
274
['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
275
except NotImplementedError:
276
# if the format doesn't support ghosts, just add normally.
277
_, _, parent_texts['r1'] = f.add_lines('r1',
278
['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
279
f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
280
self.assertNotEqual(None, parent_texts['r0'])
281
self.assertNotEqual(None, parent_texts['r1'])
283
versions = f.versions()
284
self.assertTrue('r0' in versions)
285
self.assertTrue('r1' in versions)
286
self.assertTrue('r2' in versions)
287
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
288
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
289
self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
290
self.assertEqual(3, f.num_versions())
291
origins = f.annotate('r1')
292
self.assertEquals(origins[0][0], 'r0')
293
self.assertEquals(origins[1][0], 'r1')
294
origins = f.annotate('r2')
295
self.assertEquals(origins[0][0], 'r1')
296
self.assertEquals(origins[1][0], 'r2')
299
f = self.reopen_file()
302
def test_add_unicode_content(self):
303
# unicode content is not permitted in versioned files.
304
# versioned files version sequences of bytes only.
306
self.assertRaises(errors.BzrBadParameterUnicode,
307
vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
309
(errors.BzrBadParameterUnicode, NotImplementedError),
310
vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
312
def test_add_follows_left_matching_blocks(self):
313
"""If we change left_matching_blocks, delta changes
315
Note: There are multiple correct deltas in this case, because
316
we start with 1 "a" and we get 3.
319
if isinstance(vf, WeaveFile):
320
raise TestSkipped("WeaveFile ignores left_matching_blocks")
321
vf.add_lines('1', [], ['a\n'])
322
vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
323
left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
324
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
325
vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
326
left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
327
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
329
def test_inline_newline_throws(self):
330
# \r characters are not permitted in lines being added
332
self.assertRaises(errors.BzrBadParameterContainsNewline,
333
vf.add_lines, 'a', [], ['a\n\n'])
335
(errors.BzrBadParameterContainsNewline, NotImplementedError),
336
vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
337
# but inline CR's are allowed
338
vf.add_lines('a', [], ['a\r\n'])
340
vf.add_lines_with_ghosts('b', [], ['a\r\n'])
341
except NotImplementedError:
344
def test_add_reserved(self):
346
self.assertRaises(errors.ReservedId,
347
vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
349
def test_add_lines_nostoresha(self):
350
"""When nostore_sha is supplied using old content raises."""
352
empty_text = ('a', [])
353
sample_text_nl = ('b', ["foo\n", "bar\n"])
354
sample_text_no_nl = ('c', ["foo\n", "bar"])
356
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
357
sha, _, _ = vf.add_lines(version, [], lines)
359
# we now have a copy of all the lines in the vf.
360
for sha, (version, lines) in zip(
361
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
362
self.assertRaises(errors.ExistingContent,
363
vf.add_lines, version + "2", [], lines,
365
# and no new version should have been added.
366
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
369
def test_add_lines_with_ghosts_nostoresha(self):
370
"""When nostore_sha is supplied using old content raises."""
372
empty_text = ('a', [])
373
sample_text_nl = ('b', ["foo\n", "bar\n"])
374
sample_text_no_nl = ('c', ["foo\n", "bar"])
376
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
377
sha, _, _ = vf.add_lines(version, [], lines)
379
# we now have a copy of all the lines in the vf.
380
# is the test applicable to this vf implementation?
382
vf.add_lines_with_ghosts('d', [], [])
383
except NotImplementedError:
384
raise TestSkipped("add_lines_with_ghosts is optional")
385
for sha, (version, lines) in zip(
386
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
387
self.assertRaises(errors.ExistingContent,
388
vf.add_lines_with_ghosts, version + "2", [], lines,
390
# and no new version should have been added.
391
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
394
def test_add_lines_return_value(self):
395
# add_lines should return the sha1 and the text size.
397
empty_text = ('a', [])
398
sample_text_nl = ('b', ["foo\n", "bar\n"])
399
sample_text_no_nl = ('c', ["foo\n", "bar"])
400
# check results for the three cases:
401
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
402
# the first two elements are the same for all versioned files:
403
# - the digest and the size of the text. For some versioned files
404
# additional data is returned in additional tuple elements.
405
result = vf.add_lines(version, [], lines)
406
self.assertEqual(3, len(result))
407
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
409
# parents should not affect the result:
410
lines = sample_text_nl[1]
411
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
412
vf.add_lines('d', ['b', 'c'], lines)[0:2])
414
def test_get_reserved(self):
416
self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
417
self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
418
self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
420
def test_make_mpdiffs(self):
421
from bzrlib import multiparent
422
vf = self.get_file('foo')
423
sha1s = self._setup_for_deltas(vf)
424
new_vf = self.get_file('bar')
425
for version in multiparent.topo_iter(vf):
426
mpdiff = vf.make_mpdiffs([version])[0]
427
new_vf.add_mpdiffs([(version, vf.get_parent_map([version])[version],
428
vf.get_sha1s([version])[0], mpdiff)])
429
self.assertEqualDiff(vf.get_text(version),
430
new_vf.get_text(version))
432
def _setup_for_deltas(self, f):
433
self.assertFalse(f.has_version('base'))
434
# add texts that should trip the knit maximum delta chain threshold
435
# as well as doing parallel chains of data in knits.
436
# this is done by two chains of 25 insertions
437
f.add_lines('base', [], ['line\n'])
438
f.add_lines('noeol', ['base'], ['line'])
439
# detailed eol tests:
440
# shared last line with parent no-eol
441
f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
442
# differing last line with parent, both no-eol
443
f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
444
# add eol following a noneol parent, change content
445
f.add_lines('eol', ['noeol'], ['phone\n'])
446
# add eol following a noneol parent, no change content
447
f.add_lines('eolline', ['noeol'], ['line\n'])
448
# noeol with no parents:
449
f.add_lines('noeolbase', [], ['line'])
450
# noeol preceeding its leftmost parent in the output:
451
# this is done by making it a merge of two parents with no common
452
# anestry: noeolbase and noeol with the
453
# later-inserted parent the leftmost.
454
f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
455
# two identical eol texts
456
f.add_lines('noeoldup', ['noeol'], ['line'])
458
text_name = 'chain1-'
460
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
461
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
462
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
463
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
464
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
465
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
466
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
467
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
468
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
469
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
470
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
471
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
472
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
473
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
474
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
475
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
476
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
477
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
478
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
479
19:'1ebed371807ba5935958ad0884595126e8c4e823',
480
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
481
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
482
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
483
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
484
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
485
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
487
for depth in range(26):
488
new_version = text_name + '%s' % depth
489
text = text + ['line\n']
490
f.add_lines(new_version, [next_parent], text)
491
next_parent = new_version
493
text_name = 'chain2-'
495
for depth in range(26):
496
new_version = text_name + '%s' % depth
497
text = text + ['line\n']
498
f.add_lines(new_version, [next_parent], text)
499
next_parent = new_version
502
def test_ancestry(self):
504
self.assertEqual([], f.get_ancestry([]))
505
f.add_lines('r0', [], ['a\n', 'b\n'])
506
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
507
f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
508
f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
509
f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
510
self.assertEqual([], f.get_ancestry([]))
511
versions = f.get_ancestry(['rM'])
512
# there are some possibilities:
516
# so we check indexes
517
r0 = versions.index('r0')
518
r1 = versions.index('r1')
519
r2 = versions.index('r2')
520
self.assertFalse('r3' in versions)
521
rM = versions.index('rM')
522
self.assertTrue(r0 < r1)
523
self.assertTrue(r0 < r2)
524
self.assertTrue(r1 < rM)
525
self.assertTrue(r2 < rM)
527
self.assertRaises(RevisionNotPresent,
528
f.get_ancestry, ['rM', 'rX'])
530
self.assertEqual(set(f.get_ancestry('rM')),
531
set(f.get_ancestry('rM', topo_sorted=False)))
533
def test_mutate_after_finish(self):
534
self._transaction = 'before'
536
self._transaction = 'after'
537
self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
538
self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
540
def test_copy_to(self):
542
f.add_lines('0', [], ['a\n'])
543
t = MemoryTransport()
545
for suffix in self.get_factory().get_suffixes():
546
self.assertTrue(t.has('foo' + suffix))
548
def test_get_suffixes(self):
550
# and should be a list
551
self.assertTrue(isinstance(self.get_factory().get_suffixes(), list))
553
def test_get_parent_map(self):
555
f.add_lines('r0', [], ['a\n', 'b\n'])
557
{'r0':()}, f.get_parent_map(['r0']))
558
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
560
{'r1':('r0',)}, f.get_parent_map(['r1']))
564
f.get_parent_map(['r0', 'r1']))
565
f.add_lines('r2', [], ['a\n', 'b\n'])
566
f.add_lines('r3', [], ['a\n', 'b\n'])
567
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
569
{'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
570
self.assertEqual({}, f.get_parent_map('y'))
574
f.get_parent_map(['r0', 'y', 'r1']))
576
def test_annotate(self):
578
f.add_lines('r0', [], ['a\n', 'b\n'])
579
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
580
origins = f.annotate('r1')
581
self.assertEquals(origins[0][0], 'r1')
582
self.assertEquals(origins[1][0], 'r0')
584
self.assertRaises(RevisionNotPresent,
587
def test_detection(self):
588
# Test weaves detect corruption.
590
# Weaves contain a checksum of their texts.
591
# When a text is extracted, this checksum should be
594
w = self.get_file_corrupted_text()
596
self.assertEqual('hello\n', w.get_text('v1'))
597
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
598
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
599
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
601
w = self.get_file_corrupted_checksum()
603
self.assertEqual('hello\n', w.get_text('v1'))
604
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
605
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
606
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
608
def get_file_corrupted_text(self):
609
"""Return a versioned file with corrupt text but valid metadata."""
610
raise NotImplementedError(self.get_file_corrupted_text)
612
def reopen_file(self, name='foo'):
613
"""Open the versioned file from disk again."""
614
raise NotImplementedError(self.reopen_file)
616
def test_iter_lines_added_or_present_in_versions(self):
617
# test that we get at least an equalset of the lines added by
618
# versions in the weave
619
# the ordering here is to make a tree so that dumb searches have
620
# more changes to muck up.
622
class InstrumentedProgress(progress.DummyProgress):
626
progress.DummyProgress.__init__(self)
629
def update(self, msg=None, current=None, total=None):
630
self.updates.append((msg, current, total))
633
# add a base to get included
634
vf.add_lines('base', [], ['base\n'])
635
# add a ancestor to be included on one side
636
vf.add_lines('lancestor', [], ['lancestor\n'])
637
# add a ancestor to be included on the other side
638
vf.add_lines('rancestor', ['base'], ['rancestor\n'])
639
# add a child of rancestor with no eofile-nl
640
vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
641
# add a child of lancestor and base to join the two roots
642
vf.add_lines('otherchild',
643
['lancestor', 'base'],
644
['base\n', 'lancestor\n', 'otherchild\n'])
645
def iter_with_versions(versions, expected):
646
# now we need to see what lines are returned, and how often.
648
progress = InstrumentedProgress()
649
# iterate over the lines
650
for line in vf.iter_lines_added_or_present_in_versions(versions,
652
lines.setdefault(line, 0)
654
if []!= progress.updates:
655
self.assertEqual(expected, progress.updates)
657
lines = iter_with_versions(['child', 'otherchild'],
658
[('Walking content.', 0, 2),
659
('Walking content.', 1, 2),
660
('Walking content.', 2, 2)])
661
# we must see child and otherchild
662
self.assertTrue(lines[('child\n', 'child')] > 0)
663
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
664
# we dont care if we got more than that.
667
lines = iter_with_versions(None, [('Walking content.', 0, 5),
668
('Walking content.', 1, 5),
669
('Walking content.', 2, 5),
670
('Walking content.', 3, 5),
671
('Walking content.', 4, 5),
672
('Walking content.', 5, 5)])
673
# all lines must be seen at least once
674
self.assertTrue(lines[('base\n', 'base')] > 0)
675
self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
676
self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
677
self.assertTrue(lines[('child\n', 'child')] > 0)
678
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
680
def test_add_lines_with_ghosts(self):
681
# some versioned file formats allow lines to be added with parent
682
# information that is > than that in the format. Formats that do
683
# not support this need to raise NotImplementedError on the
684
# add_lines_with_ghosts api.
686
# add a revision with ghost parents
687
# The preferred form is utf8, but we should translate when needed
688
parent_id_unicode = u'b\xbfse'
689
parent_id_utf8 = parent_id_unicode.encode('utf8')
691
vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
692
except NotImplementedError:
693
# check the other ghost apis are also not implemented
694
self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
695
self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
697
vf = self.reopen_file()
698
# test key graph related apis: getncestry, _graph, get_parents
700
# - these are ghost unaware and must not be reflect ghosts
701
self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
702
self.assertFalse(vf.has_version(parent_id_utf8))
703
# we have _with_ghost apis to give us ghost information.
704
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
705
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
706
# if we add something that is a ghost of another, it should correct the
707
# results of the prior apis
708
vf.add_lines(parent_id_utf8, [], [])
709
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
710
self.assertEqual({'notbxbfse':(parent_id_utf8,)},
711
vf.get_parent_map(['notbxbfse']))
712
self.assertTrue(vf.has_version(parent_id_utf8))
713
# we have _with_ghost apis to give us ghost information.
714
self.assertEqual([parent_id_utf8, 'notbxbfse'],
715
vf.get_ancestry_with_ghosts(['notbxbfse']))
716
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
718
def test_add_lines_with_ghosts_after_normal_revs(self):
719
# some versioned file formats allow lines to be added with parent
720
# information that is > than that in the format. Formats that do
721
# not support this need to raise NotImplementedError on the
722
# add_lines_with_ghosts api.
724
# probe for ghost support
726
vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
727
except NotImplementedError:
729
vf.add_lines_with_ghosts('references_ghost',
731
['line\n', 'line_b\n', 'line_c\n'])
732
origins = vf.annotate('references_ghost')
733
self.assertEquals(('base', 'line\n'), origins[0])
734
self.assertEquals(('base', 'line_b\n'), origins[1])
735
self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
737
def test_readonly_mode(self):
738
transport = get_transport(self.get_url('.'))
739
factory = self.get_factory()
740
vf = factory('id', transport, 0777, create=True, access_mode='w')
741
vf = factory('id', transport, access_mode='r')
742
self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
743
self.assertRaises(errors.ReadOnlyError,
744
vf.add_lines_with_ghosts,
749
def test_get_sha1s(self):
750
# check the sha1 data is available
753
vf.add_lines('a', [], ['a\n'])
754
# the same file, different metadata
755
vf.add_lines('b', ['a'], ['a\n'])
756
# a file differing only in last newline.
757
vf.add_lines('c', [], ['a'])
758
self.assertEqual(['3f786850e387550fdab836ed7e6dc881de23001b',
759
'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
760
'3f786850e387550fdab836ed7e6dc881de23001b'],
761
vf.get_sha1s(['a', 'c', 'b']))
764
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
766
def get_file(self, name='foo'):
767
return WeaveFile(name, get_transport(self.get_url('.')), create=True,
768
get_scope=self.get_transaction)
770
def get_file_corrupted_text(self):
771
w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
772
get_scope=self.get_transaction)
773
w.add_lines('v1', [], ['hello\n'])
774
w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
776
# We are going to invasively corrupt the text
777
# Make sure the internals of weave are the same
778
self.assertEqual([('{', 0)
786
self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
787
, '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
792
w._weave[4] = 'There\n'
795
def get_file_corrupted_checksum(self):
796
w = self.get_file_corrupted_text()
798
w._weave[4] = 'there\n'
799
self.assertEqual('hello\nthere\n', w.get_text('v2'))
801
#Invalid checksum, first digit changed
802
w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
805
def reopen_file(self, name='foo', create=False):
806
return WeaveFile(name, get_transport(self.get_url('.')), create=create,
807
get_scope=self.get_transaction)
809
def test_no_implicit_create(self):
810
self.assertRaises(errors.NoSuchFile,
813
get_transport(self.get_url('.')),
814
get_scope=self.get_transaction)
816
def get_factory(self):
820
class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
823
TestCaseWithMemoryTransport.setUp(self)
824
mapper = PrefixMapper()
825
factory = make_file_factory(True, mapper)
826
self.vf1 = factory(self.get_transport('root-1'))
827
self.vf2 = factory(self.get_transport('root-2'))
828
self.plan_merge_vf = versionedfile._PlanMergeVersionedFile('root')
829
self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
831
def test_add_lines(self):
832
self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
833
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
834
('root', 'a'), [], [])
835
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
836
('root', 'a:'), None, [])
837
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
838
('root', 'a:'), [], None)
840
def setup_abcde(self):
841
self.vf1.add_lines(('root', 'A'), [], ['a'])
842
self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
843
self.vf2.add_lines(('root', 'C'), [], ['c'])
844
self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
845
self.plan_merge_vf.add_lines(('root', 'E:'),
846
[('root', 'B'), ('root', 'D')], ['e'])
848
def test_get_parents(self):
850
self.assertEqual({('root', 'B'):(('root', 'A'),)},
851
self.plan_merge_vf.get_parent_map([('root', 'B')]))
852
self.assertEqual({('root', 'D'):(('root', 'C'),)},
853
self.plan_merge_vf.get_parent_map([('root', 'D')]))
854
self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
855
self.plan_merge_vf.get_parent_map([('root', 'E:')]))
857
self.plan_merge_vf.get_parent_map([('root', 'F')]))
859
('root', 'B'):(('root', 'A'),),
860
('root', 'D'):(('root', 'C'),),
861
('root', 'E:'):(('root', 'B'),('root', 'D')),
863
self.plan_merge_vf.get_parent_map(
864
[('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
866
def test_get_record_stream(self):
868
def get_record(suffix):
869
return self.plan_merge_vf.get_record_stream(
870
[('root', suffix)], 'unordered', True).next()
871
self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
872
self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
873
self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
874
self.assertEqual('absent', get_record('F').storage_kind)
877
class TestReadonlyHttpMixin(object):
879
def get_transaction(self):
882
def test_readonly_http_works(self):
883
# we should be able to read from http with a versioned file.
885
# try an empty file access
886
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
887
self.assertEqual([], readonly_vf.versions())
889
vf.add_lines('1', [], ['a\n'])
890
vf.add_lines('2', ['1'], ['b\n', 'a\n'])
891
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
892
self.assertEqual(['1', '2'], vf.versions())
893
for version in readonly_vf.versions():
894
readonly_vf.get_lines(version)
897
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
900
return WeaveFile('foo', get_transport(self.get_url('.')), create=True,
901
get_scope=self.get_transaction)
903
def get_factory(self):
907
class MergeCasesMixin(object):
909
def doMerge(self, base, a, b, mp):
910
from cStringIO import StringIO
911
from textwrap import dedent
917
w.add_lines('text0', [], map(addcrlf, base))
918
w.add_lines('text1', ['text0'], map(addcrlf, a))
919
w.add_lines('text2', ['text0'], map(addcrlf, b))
923
self.log('merge plan:')
924
p = list(w.plan_merge('text1', 'text2'))
925
for state, line in p:
927
self.log('%12s | %s' % (state, line[:-1]))
931
mt.writelines(w.weave_merge(p))
933
self.log(mt.getvalue())
935
mp = map(addcrlf, mp)
936
self.assertEqual(mt.readlines(), mp)
939
def testOneInsert(self):
945
def testSeparateInserts(self):
946
self.doMerge(['aaa', 'bbb', 'ccc'],
947
['aaa', 'xxx', 'bbb', 'ccc'],
948
['aaa', 'bbb', 'yyy', 'ccc'],
949
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
951
def testSameInsert(self):
952
self.doMerge(['aaa', 'bbb', 'ccc'],
953
['aaa', 'xxx', 'bbb', 'ccc'],
954
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
955
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
956
overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
957
def testOverlappedInsert(self):
958
self.doMerge(['aaa', 'bbb'],
959
['aaa', 'xxx', 'yyy', 'bbb'],
960
['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
962
# really it ought to reduce this to
963
# ['aaa', 'xxx', 'yyy', 'bbb']
966
def testClashReplace(self):
967
self.doMerge(['aaa'],
970
['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
973
def testNonClashInsert1(self):
974
self.doMerge(['aaa'],
977
['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
980
def testNonClashInsert2(self):
981
self.doMerge(['aaa'],
987
def testDeleteAndModify(self):
988
"""Clashing delete and modification.
990
If one side modifies a region and the other deletes it then
991
there should be a conflict with one side blank.
994
#######################################
995
# skippd, not working yet
998
self.doMerge(['aaa', 'bbb', 'ccc'],
999
['aaa', 'ddd', 'ccc'],
1001
['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1003
def _test_merge_from_strings(self, base, a, b, expected):
1005
w.add_lines('text0', [], base.splitlines(True))
1006
w.add_lines('text1', ['text0'], a.splitlines(True))
1007
w.add_lines('text2', ['text0'], b.splitlines(True))
1008
self.log('merge plan:')
1009
p = list(w.plan_merge('text1', 'text2'))
1010
for state, line in p:
1012
self.log('%12s | %s' % (state, line[:-1]))
1013
self.log('merge result:')
1014
result_text = ''.join(w.weave_merge(p))
1015
self.log(result_text)
1016
self.assertEqualDiff(result_text, expected)
1018
def test_weave_merge_conflicts(self):
1019
# does weave merge properly handle plans that end with unchanged?
1020
result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
1021
self.assertEqual(result, 'hello\n')
1023
def test_deletion_extended(self):
1024
"""One side deletes, the other deletes more.
1041
self._test_merge_from_strings(base, a, b, result)
1043
def test_deletion_overlap(self):
1044
"""Delete overlapping regions with no other conflict.
1046
Arguably it'd be better to treat these as agreement, rather than
1047
conflict, but for now conflict is safer.
1075
self._test_merge_from_strings(base, a, b, result)
1077
def test_agreement_deletion(self):
1078
"""Agree to delete some lines, without conflicts."""
1100
self._test_merge_from_strings(base, a, b, result)
1102
def test_sync_on_deletion(self):
1103
"""Specific case of merge where we can synchronize incorrectly.
1105
A previous version of the weave merge concluded that the two versions
1106
agreed on deleting line 2, and this could be a synchronization point.
1107
Line 1 was then considered in isolation, and thought to be deleted on
1110
It's better to consider the whole thing as a disagreement region.
1121
a's replacement line 2
1134
a's replacement line 2
1141
self._test_merge_from_strings(base, a, b, result)
1144
class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1146
def get_file(self, name='foo'):
1147
return WeaveFile(name, get_transport(self.get_url('.')), create=True)
1149
def log_contents(self, w):
1150
self.log('weave is:')
1152
write_weave(w, tmpf)
1153
self.log(tmpf.getvalue())
1155
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1156
'xxx', '>>>>>>> ', 'bbb']
1159
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1161
def test_select_adaptor(self):
1162
"""Test expected adapters exist."""
1163
# One scenario for each lookup combination we expect to use.
1164
# Each is source_kind, requested_kind, adapter class
1166
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1167
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1168
('knit-annotated-delta-gz', 'knit-delta-gz',
1169
_mod_knit.DeltaAnnotatedToUnannotated),
1170
('knit-annotated-delta-gz', 'fulltext',
1171
_mod_knit.DeltaAnnotatedToFullText),
1172
('knit-annotated-ft-gz', 'knit-ft-gz',
1173
_mod_knit.FTAnnotatedToUnannotated),
1174
('knit-annotated-ft-gz', 'fulltext',
1175
_mod_knit.FTAnnotatedToFullText),
1177
for source, requested, klass in scenarios:
1178
adapter_factory = versionedfile.adapter_registry.get(
1179
(source, requested))
1180
adapter = adapter_factory(None)
1181
self.assertIsInstance(adapter, klass)
1183
def get_knit(self, annotated=True):
1184
mapper = ConstantMapper('knit')
1185
transport = self.get_transport()
1186
return make_file_factory(annotated, mapper)(transport)
1188
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1189
"""Grab the interested adapted texts for tests."""
1190
# origin is a fulltext
1191
entries = f.get_record_stream([('origin',)], 'unordered', False)
1192
base = entries.next()
1193
ft_data = ft_adapter.get_bytes(base, base.get_bytes_as(base.storage_kind))
1194
# merged is both a delta and multiple parents.
1195
entries = f.get_record_stream([('merged',)], 'unordered', False)
1196
merged = entries.next()
1197
delta_data = delta_adapter.get_bytes(merged,
1198
merged.get_bytes_as(merged.storage_kind))
1199
return ft_data, delta_data
1201
def test_deannotation_noeol(self):
1202
"""Test converting annotated knits to unannotated knits."""
1203
# we need a full text, and a delta
1205
get_diamond_files(f, 1, trailing_eol=False)
1206
ft_data, delta_data = self.helpGetBytes(f,
1207
_mod_knit.FTAnnotatedToUnannotated(None),
1208
_mod_knit.DeltaAnnotatedToUnannotated(None))
1210
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1213
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1215
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1216
'1,2,3\nleft\nright\nmerged\nend merged\n',
1217
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1219
def test_deannotation(self):
1220
"""Test converting annotated knits to unannotated knits."""
1221
# we need a full text, and a delta
1223
get_diamond_files(f, 1)
1224
ft_data, delta_data = self.helpGetBytes(f,
1225
_mod_knit.FTAnnotatedToUnannotated(None),
1226
_mod_knit.DeltaAnnotatedToUnannotated(None))
1228
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1231
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1233
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1234
'2,2,2\nright\nmerged\nend merged\n',
1235
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1237
def test_annotated_to_fulltext_no_eol(self):
1238
"""Test adapting annotated knits to full texts (for -> weaves)."""
1239
# we need a full text, and a delta
1241
get_diamond_files(f, 1, trailing_eol=False)
1242
# Reconstructing a full text requires a backing versioned file, and it
1243
# must have the base lines requested from it.
1244
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1245
ft_data, delta_data = self.helpGetBytes(f,
1246
_mod_knit.FTAnnotatedToFullText(None),
1247
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1248
self.assertEqual('origin', ft_data)
1249
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1250
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1251
True)], logged_vf.calls)
1253
def test_annotated_to_fulltext(self):
1254
"""Test adapting annotated knits to full texts (for -> weaves)."""
1255
# we need a full text, and a delta
1257
get_diamond_files(f, 1)
1258
# Reconstructing a full text requires a backing versioned file, and it
1259
# must have the base lines requested from it.
1260
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1261
ft_data, delta_data = self.helpGetBytes(f,
1262
_mod_knit.FTAnnotatedToFullText(None),
1263
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1264
self.assertEqual('origin\n', ft_data)
1265
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1266
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1267
True)], logged_vf.calls)
1269
def test_unannotated_to_fulltext(self):
1270
"""Test adapting unannotated knits to full texts.
1272
This is used for -> weaves, and for -> annotated knits.
1274
# we need a full text, and a delta
1275
f = self.get_knit(annotated=False)
1276
get_diamond_files(f, 1)
1277
# Reconstructing a full text requires a backing versioned file, and it
1278
# must have the base lines requested from it.
1279
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1280
ft_data, delta_data = self.helpGetBytes(f,
1281
_mod_knit.FTPlainToFullText(None),
1282
_mod_knit.DeltaPlainToFullText(logged_vf))
1283
self.assertEqual('origin\n', ft_data)
1284
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1285
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1286
True)], logged_vf.calls)
1288
def test_unannotated_to_fulltext_no_eol(self):
1289
"""Test adapting unannotated knits to full texts.
1291
This is used for -> weaves, and for -> annotated knits.
1293
# we need a full text, and a delta
1294
f = self.get_knit(annotated=False)
1295
get_diamond_files(f, 1, trailing_eol=False)
1296
# Reconstructing a full text requires a backing versioned file, and it
1297
# must have the base lines requested from it.
1298
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1299
ft_data, delta_data = self.helpGetBytes(f,
1300
_mod_knit.FTPlainToFullText(None),
1301
_mod_knit.DeltaPlainToFullText(logged_vf))
1302
self.assertEqual('origin', ft_data)
1303
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1304
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1305
True)], logged_vf.calls)
1308
class TestKeyMapper(TestCaseWithMemoryTransport):
1309
"""Tests for various key mapping logic."""
1311
def test_identity_mapper(self):
1312
mapper = versionedfile.ConstantMapper("inventory")
1313
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1314
self.assertEqual("inventory", mapper.map(('quux',)))
1316
def test_prefix_mapper(self):
1318
mapper = versionedfile.PrefixMapper()
1319
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1320
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1321
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1322
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1324
def test_hash_prefix_mapper(self):
1325
#format6: hash + plain
1326
mapper = versionedfile.HashPrefixMapper()
1327
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1328
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1329
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1330
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1332
def test_hash_escaped_mapper(self):
1333
#knit1: hash + escaped
1334
mapper = versionedfile.HashEscapedPrefixMapper()
1335
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1336
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1338
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1340
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1341
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1344
class TestVersionedFiles(TestCaseWithMemoryTransport):
1345
"""Tests for the multiple-file variant of VersionedFile."""
1347
def get_versionedfiles(self, relpath='files'):
1348
transport = self.get_transport(relpath)
1350
transport.mkdir('.')
1351
files = self.factory(transport)
1352
if self.cleanup is not None:
1353
self.addCleanup(lambda:self.cleanup(files))
1356
def test_annotate(self):
1357
files = self.get_versionedfiles()
1358
self.get_diamond_files(files)
1359
if self.key_length == 1:
1363
# introduced full text
1364
origins = files.annotate(prefix + ('origin',))
1366
(prefix + ('origin',), 'origin\n')],
1369
origins = files.annotate(prefix + ('base',))
1371
(prefix + ('base',), 'base\n')],
1374
origins = files.annotate(prefix + ('merged',))
1377
(prefix + ('base',), 'base\n'),
1378
(prefix + ('left',), 'left\n'),
1379
(prefix + ('right',), 'right\n'),
1380
(prefix + ('merged',), 'merged\n')
1384
# Without a graph everything is new.
1386
(prefix + ('merged',), 'base\n'),
1387
(prefix + ('merged',), 'left\n'),
1388
(prefix + ('merged',), 'right\n'),
1389
(prefix + ('merged',), 'merged\n')
1392
self.assertRaises(RevisionNotPresent,
1393
files.annotate, prefix + ('missing-key',))
1395
def test_construct(self):
1396
"""Each parameterised test can be constructed on a transport."""
1397
files = self.get_versionedfiles()
1399
def get_diamond_files(self, files, trailing_eol=True, left_only=False):
1400
return get_diamond_files(files, self.key_length,
1401
trailing_eol=trailing_eol, nograph=not self.graph,
1402
left_only=left_only)
1404
def test_add_lines_return(self):
1405
files = self.get_versionedfiles()
1406
# save code by using the stock data insertion helper.
1407
adds = self.get_diamond_files(files)
1409
# We can only validate the first 2 elements returned from add_lines.
1411
self.assertEqual(3, len(add))
1412
results.append(add[:2])
1413
if self.key_length == 1:
1415
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1416
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1417
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1418
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1419
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1421
elif self.key_length == 2:
1423
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1424
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1425
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1426
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1427
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1428
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1429
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1430
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1431
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1432
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1435
def test_empty_lines(self):
1436
"""Empty files can be stored."""
1437
f = self.get_versionedfiles()
1438
key_a = self.get_simple_key('a')
1439
f.add_lines(key_a, [], [])
1440
self.assertEqual('',
1441
f.get_record_stream([key_a], 'unordered', True
1442
).next().get_bytes_as('fulltext'))
1443
key_b = self.get_simple_key('b')
1444
f.add_lines(key_b, self.get_parents([key_a]), [])
1445
self.assertEqual('',
1446
f.get_record_stream([key_b], 'unordered', True
1447
).next().get_bytes_as('fulltext'))
1449
def test_newline_only(self):
1450
f = self.get_versionedfiles()
1451
key_a = self.get_simple_key('a')
1452
f.add_lines(key_a, [], ['\n'])
1453
self.assertEqual('\n',
1454
f.get_record_stream([key_a], 'unordered', True
1455
).next().get_bytes_as('fulltext'))
1456
key_b = self.get_simple_key('b')
1457
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1458
self.assertEqual('\n',
1459
f.get_record_stream([key_b], 'unordered', True
1460
).next().get_bytes_as('fulltext'))
1462
def test_get_record_stream_empty(self):
1463
"""An empty stream can be requested without error."""
1464
f = self.get_versionedfiles()
1465
entries = f.get_record_stream([], 'unordered', False)
1466
self.assertEqual([], list(entries))
1468
def assertValidStorageKind(self, storage_kind):
1469
"""Assert that storage_kind is a valid storage_kind."""
1470
self.assertSubset([storage_kind],
1471
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1472
'knit-ft', 'knit-delta', 'fulltext', 'knit-annotated-ft-gz',
1473
'knit-annotated-delta-gz', 'knit-ft-gz', 'knit-delta-gz'])
1475
def capture_stream(self, f, entries, on_seen, parents):
1476
"""Capture a stream for testing."""
1477
for factory in entries:
1478
on_seen(factory.key)
1479
self.assertValidStorageKind(factory.storage_kind)
1480
self.assertEqual(f.get_sha1s([factory.key])[0], factory.sha1)
1481
self.assertEqual(parents[factory.key], factory.parents)
1482
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1485
def test_get_record_stream_interface(self):
1486
"""each item in a stream has to provide a regular interface."""
1487
files = self.get_versionedfiles()
1488
self.get_diamond_files(files)
1489
keys, _ = self.get_keys_and_sort_order()
1490
parent_map = files.get_parent_map(keys)
1491
entries = files.get_record_stream(keys, 'unordered', False)
1493
self.capture_stream(files, entries, seen.add, parent_map)
1494
self.assertEqual(set(keys), seen)
1496
def get_simple_key(self, suffix):
1497
"""Return a key for the object under test."""
1498
if self.key_length == 1:
1501
return ('FileA',) + (suffix,)
1503
def get_keys_and_sort_order(self):
1504
"""Get diamond test keys list, and their sort ordering."""
1505
if self.key_length == 1:
1506
keys = [('merged',), ('left',), ('right',), ('base',)]
1507
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1510
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1512
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1516
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1517
('FileA', 'base'):0,
1518
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1519
('FileB', 'base'):0,
1521
return keys, sort_order
1523
def test_get_record_stream_interface_ordered(self):
1524
"""each item in a stream has to provide a regular interface."""
1525
files = self.get_versionedfiles()
1526
self.get_diamond_files(files)
1527
keys, sort_order = self.get_keys_and_sort_order()
1528
parent_map = files.get_parent_map(keys)
1529
entries = files.get_record_stream(keys, 'topological', False)
1531
self.capture_stream(files, entries, seen.append, parent_map)
1532
self.assertStreamOrder(sort_order, seen, keys)
1534
def test_get_record_stream_interface_ordered_with_delta_closure(self):
1535
"""each item must be accessible as a fulltext."""
1536
files = self.get_versionedfiles()
1537
self.get_diamond_files(files)
1538
keys, sort_order = self.get_keys_and_sort_order()
1539
parent_map = files.get_parent_map(keys)
1540
entries = files.get_record_stream(keys, 'topological', True)
1542
for factory in entries:
1543
seen.append(factory.key)
1544
self.assertValidStorageKind(factory.storage_kind)
1545
self.assertSubset([factory.sha1], [None, files.get_sha1s([factory.key])[0]])
1546
self.assertEqual(parent_map[factory.key], factory.parents)
1547
# self.assertEqual(files.get_text(factory.key),
1548
self.assertIsInstance(factory.get_bytes_as('fulltext'), str)
1549
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1551
self.assertStreamOrder(sort_order, seen, keys)
1553
def assertStreamOrder(self, sort_order, seen, keys):
1554
self.assertEqual(len(set(seen)), len(keys))
1555
if self.key_length == 1:
1558
lows = {('FileA',):0, ('FileB',):0}
1560
self.assertEqual(set(keys), set(seen))
1563
sort_pos = sort_order[key]
1564
self.assertTrue(sort_pos >= lows[key[:-1]],
1565
"Out of order in sorted stream: %r, %r" % (key, seen))
1566
lows[key[:-1]] = sort_pos
1568
def test_get_record_stream_unknown_storage_kind_raises(self):
1569
"""Asking for a storage kind that the stream cannot supply raises."""
1570
files = self.get_versionedfiles()
1571
self.get_diamond_files(files)
1572
if self.key_length == 1:
1573
keys = [('merged',), ('left',), ('right',), ('base',)]
1576
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1578
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1581
parent_map = files.get_parent_map(keys)
1582
entries = files.get_record_stream(keys, 'unordered', False)
1583
# We track the contents because we should be able to try, fail a
1584
# particular kind and then ask for one that works and continue.
1586
for factory in entries:
1587
seen.add(factory.key)
1588
self.assertValidStorageKind(factory.storage_kind)
1589
self.assertEqual(files.get_sha1s([factory.key])[0], factory.sha1)
1590
self.assertEqual(parent_map[factory.key], factory.parents)
1591
# currently no stream emits mpdiff
1592
self.assertRaises(errors.UnavailableRepresentation,
1593
factory.get_bytes_as, 'mpdiff')
1594
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1596
self.assertEqual(set(keys), seen)
1598
def test_get_record_stream_missing_records_are_absent(self):
1599
files = self.get_versionedfiles()
1600
self.get_diamond_files(files)
1601
if self.key_length == 1:
1602
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1605
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1606
('FileA', 'absent'), ('FileA', 'base'),
1607
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1608
('FileB', 'absent'), ('FileB', 'base'),
1609
('absent', 'absent'),
1611
parent_map = files.get_parent_map(keys)
1612
entries = files.get_record_stream(keys, 'unordered', False)
1613
self.assertAbsentRecord(files, keys, parent_map, entries)
1614
entries = files.get_record_stream(keys, 'topological', False)
1615
self.assertAbsentRecord(files, keys, parent_map, entries)
1617
def assertAbsentRecord(self, files, keys, parents, entries):
1618
"""Helper for test_get_record_stream_missing_records_are_absent."""
1620
for factory in entries:
1621
seen.add(factory.key)
1622
if factory.key[-1] == 'absent':
1623
self.assertEqual('absent', factory.storage_kind)
1624
self.assertEqual(None, factory.sha1)
1625
self.assertEqual(None, factory.parents)
1627
self.assertValidStorageKind(factory.storage_kind)
1628
self.assertEqual(files.get_sha1s([factory.key])[0], factory.sha1)
1629
self.assertEqual(parents[factory.key], factory.parents)
1630
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1632
self.assertEqual(set(keys), seen)
1634
def test_filter_absent_records(self):
1635
"""Requested missing records can be filter trivially."""
1636
files = self.get_versionedfiles()
1637
self.get_diamond_files(files)
1638
keys, _ = self.get_keys_and_sort_order()
1639
parent_map = files.get_parent_map(keys)
1640
# Add an absent record in the middle of the present keys. (We don't ask
1641
# for just absent keys to ensure that content before and after the
1642
# absent keys is still delivered).
1643
present_keys = list(keys)
1644
if self.key_length == 1:
1645
keys.insert(2, ('extra',))
1647
keys.insert(2, ('extra', 'extra'))
1648
entries = files.get_record_stream(keys, 'unordered', False)
1650
self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
1652
self.assertEqual(set(present_keys), seen)
1654
def get_mapper(self):
1655
"""Get a mapper suitable for the key length of the test interface."""
1656
if self.key_length == 1:
1657
return ConstantMapper('source')
1659
return HashEscapedPrefixMapper()
1661
def get_parents(self, parents):
1662
"""Get parents, taking self.graph into consideration."""
1668
def test_get_parent_map(self):
1669
files = self.get_versionedfiles()
1670
if self.key_length == 1:
1672
(('r0',), self.get_parents(())),
1673
(('r1',), self.get_parents((('r0',),))),
1674
(('r2',), self.get_parents(())),
1675
(('r3',), self.get_parents(())),
1676
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
1680
(('FileA', 'r0'), self.get_parents(())),
1681
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
1682
(('FileA', 'r2'), self.get_parents(())),
1683
(('FileA', 'r3'), self.get_parents(())),
1684
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
1685
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
1687
for key, parents in parent_details:
1688
files.add_lines(key, parents, [])
1689
# immediately after adding it should be queryable.
1690
self.assertEqual({key:parents}, files.get_parent_map([key]))
1691
# We can ask for an empty set
1692
self.assertEqual({}, files.get_parent_map([]))
1693
# We can ask for many keys
1694
all_parents = dict(parent_details)
1695
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
1696
# Absent keys are just not included in the result.
1697
keys = all_parents.keys()
1698
if self.key_length == 1:
1699
keys.insert(1, ('missing',))
1701
keys.insert(1, ('missing', 'missing'))
1702
# Absent keys are just ignored
1703
self.assertEqual(all_parents, files.get_parent_map(keys))
1705
def test_get_sha1s(self):
1706
files = self.get_versionedfiles()
1707
self.get_diamond_files(files)
1708
if self.key_length == 1:
1709
keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
1711
# ask for shas from different prefixes.
1713
('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
1714
('FileA', 'merged'), ('FileB', 'right'),
1717
'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
1718
'00e364d235126be43292ab09cb4686cf703ddc17',
1719
'a8478686da38e370e32e42e8a0c220e33ee9132f',
1720
'ed8bce375198ea62444dc71952b22cfc2b09226d',
1721
'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
1723
files.get_sha1s(keys))
1725
def test_insert_record_stream_empty(self):
1726
"""Inserting an empty record stream should work."""
1727
files = self.get_versionedfiles()
1728
files.insert_record_stream([])
1730
def assertIdenticalVersionedFile(self, expected, actual):
1731
"""Assert that left and right have the same contents."""
1732
self.assertEqual(set(actual.keys()), set(expected.keys()))
1733
actual_parents = actual.get_parent_map(actual.keys())
1735
self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
1737
for key, parents in actual_parents.items():
1738
self.assertEqual(None, parents)
1739
for key in actual.keys():
1740
actual_text = actual.get_record_stream(
1741
[key], 'unordered', True).next().get_bytes_as('fulltext')
1742
expected_text = expected.get_record_stream(
1743
[key], 'unordered', True).next().get_bytes_as('fulltext')
1744
self.assertEqual(actual_text, expected_text)
1746
def test_insert_record_stream_fulltexts(self):
1747
"""Any file should accept a stream of fulltexts."""
1748
files = self.get_versionedfiles()
1749
mapper = self.get_mapper()
1750
source_transport = self.get_transport('source')
1751
source_transport.mkdir('.')
1752
# weaves always output fulltexts.
1753
source = make_versioned_files_factory(WeaveFile, mapper)(
1755
self.get_diamond_files(source, trailing_eol=False)
1756
stream = source.get_record_stream(source.keys(), 'topological',
1758
files.insert_record_stream(stream)
1759
self.assertIdenticalVersionedFile(source, files)
1761
def test_insert_record_stream_fulltexts_noeol(self):
1762
"""Any file should accept a stream of fulltexts."""
1763
files = self.get_versionedfiles()
1764
mapper = self.get_mapper()
1765
source_transport = self.get_transport('source')
1766
source_transport.mkdir('.')
1767
# weaves always output fulltexts.
1768
source = make_versioned_files_factory(WeaveFile, mapper)(
1770
self.get_diamond_files(source, trailing_eol=False)
1771
stream = source.get_record_stream(source.keys(), 'topological',
1773
files.insert_record_stream(stream)
1774
self.assertIdenticalVersionedFile(source, files)
1776
def test_insert_record_stream_annotated_knits(self):
1777
"""Any file should accept a stream from plain knits."""
1778
files = self.get_versionedfiles()
1779
mapper = self.get_mapper()
1780
source_transport = self.get_transport('source')
1781
source_transport.mkdir('.')
1782
source = make_file_factory(True, mapper)(source_transport)
1783
self.get_diamond_files(source)
1784
stream = source.get_record_stream(source.keys(), 'topological',
1786
files.insert_record_stream(stream)
1787
self.assertIdenticalVersionedFile(source, files)
1789
def test_insert_record_stream_annotated_knits_noeol(self):
1790
"""Any file should accept a stream from plain knits."""
1791
files = self.get_versionedfiles()
1792
mapper = self.get_mapper()
1793
source_transport = self.get_transport('source')
1794
source_transport.mkdir('.')
1795
source = make_file_factory(True, mapper)(source_transport)
1796
self.get_diamond_files(source, trailing_eol=False)
1797
stream = source.get_record_stream(source.keys(), 'topological',
1799
files.insert_record_stream(stream)
1800
self.assertIdenticalVersionedFile(source, files)
1802
def test_insert_record_stream_plain_knits(self):
1803
"""Any file should accept a stream from plain knits."""
1804
files = self.get_versionedfiles()
1805
mapper = self.get_mapper()
1806
source_transport = self.get_transport('source')
1807
source_transport.mkdir('.')
1808
source = make_file_factory(False, mapper)(source_transport)
1809
self.get_diamond_files(source)
1810
stream = source.get_record_stream(source.keys(), 'topological',
1812
files.insert_record_stream(stream)
1813
self.assertIdenticalVersionedFile(source, files)
1815
def test_insert_record_stream_plain_knits_noeol(self):
1816
"""Any file should accept a stream from plain knits."""
1817
files = self.get_versionedfiles()
1818
mapper = self.get_mapper()
1819
source_transport = self.get_transport('source')
1820
source_transport.mkdir('.')
1821
source = make_file_factory(False, mapper)(source_transport)
1822
self.get_diamond_files(source, trailing_eol=False)
1823
stream = source.get_record_stream(source.keys(), 'topological',
1825
files.insert_record_stream(stream)
1826
self.assertIdenticalVersionedFile(source, files)
1828
def test_insert_record_stream_existing_keys(self):
1829
"""Inserting keys already in a file should not error."""
1830
files = self.get_versionedfiles()
1831
source = self.get_versionedfiles('source')
1832
self.get_diamond_files(source)
1833
# insert some keys into f.
1834
self.get_diamond_files(files, left_only=True)
1835
stream = source.get_record_stream(source.keys(), 'topological',
1837
files.insert_record_stream(stream)
1838
self.assertIdenticalVersionedFile(source, files)
1840
def test_insert_record_stream_missing_keys(self):
1841
"""Inserting a stream with absent keys should raise an error."""
1842
files = self.get_versionedfiles()
1843
source = self.get_versionedfiles('source')
1844
stream = source.get_record_stream([('missing',) * self.key_length],
1845
'topological', False)
1846
self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
1849
def test_insert_record_stream_out_of_order(self):
1850
"""An out of order stream can either error or work."""
1851
files = self.get_versionedfiles()
1852
source = self.get_versionedfiles('source')
1853
self.get_diamond_files(source)
1854
if self.key_length == 1:
1855
origin_keys = [('origin',)]
1856
end_keys = [('merged',), ('left',)]
1857
start_keys = [('right',), ('base',)]
1859
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
1860
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
1861
('FileB', 'merged',), ('FileB', 'left',)]
1862
start_keys = [('FileA', 'right',), ('FileA', 'base',),
1863
('FileB', 'right',), ('FileB', 'base',)]
1864
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
1865
end_entries = source.get_record_stream(end_keys, 'topological', False)
1866
start_entries = source.get_record_stream(start_keys, 'topological', False)
1867
entries = chain(origin_entries, end_entries, start_entries)
1869
files.insert_record_stream(entries)
1870
except RevisionNotPresent:
1871
# Must not have corrupted the file.
1874
self.assertIdenticalVersionedFile(source, files)
1876
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
1877
"""Insertion where a needed basis is not included aborts safely."""
1878
# We use a knit always here to be sure we are getting a binary delta.
1879
mapper = self.get_mapper()
1880
source_transport = self.get_transport('source')
1881
source_transport.mkdir('.')
1882
source = make_file_factory(False, mapper)(source_transport)
1883
self.get_diamond_files(source)
1884
entries = source.get_record_stream(['origin', 'merged'], 'unordered', False)
1885
files = self.get_versionedfiles()
1886
self.assertRaises(RevisionNotPresent, files.insert_record_stream,
1889
self.assertEqual({}, files.get_parent_map([]))
1891
def test_iter_lines_added_or_present_in_keys(self):
1892
# test that we get at least an equalset of the lines added by
1893
# versions in the store.
1894
# the ordering here is to make a tree so that dumb searches have
1895
# more changes to muck up.
1897
class InstrumentedProgress(progress.DummyProgress):
1901
progress.DummyProgress.__init__(self)
1904
def update(self, msg=None, current=None, total=None):
1905
self.updates.append((msg, current, total))
1907
files = self.get_versionedfiles()
1908
# add a base to get included
1909
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
1910
# add a ancestor to be included on one side
1911
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
1912
# add a ancestor to be included on the other side
1913
files.add_lines(self.get_simple_key('rancestor'),
1914
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
1915
# add a child of rancestor with no eofile-nl
1916
files.add_lines(self.get_simple_key('child'),
1917
self.get_parents([self.get_simple_key('rancestor')]),
1918
['base\n', 'child\n'])
1919
# add a child of lancestor and base to join the two roots
1920
files.add_lines(self.get_simple_key('otherchild'),
1921
self.get_parents([self.get_simple_key('lancestor'),
1922
self.get_simple_key('base')]),
1923
['base\n', 'lancestor\n', 'otherchild\n'])
1924
def iter_with_keys(keys, expected):
1925
# now we need to see what lines are returned, and how often.
1927
progress = InstrumentedProgress()
1928
# iterate over the lines
1929
for line in files.iter_lines_added_or_present_in_keys(keys,
1931
lines.setdefault(line, 0)
1933
if []!= progress.updates:
1934
self.assertEqual(expected, progress.updates)
1936
lines = iter_with_keys(
1937
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
1938
[('Walking content.', 0, 2),
1939
('Walking content.', 1, 2),
1940
('Walking content.', 2, 2)])
1941
# we must see child and otherchild
1942
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
1944
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
1945
# we dont care if we got more than that.
1948
lines = iter_with_keys(files.keys(),
1949
[('Walking content.', 0, 5),
1950
('Walking content.', 1, 5),
1951
('Walking content.', 2, 5),
1952
('Walking content.', 3, 5),
1953
('Walking content.', 4, 5),
1954
('Walking content.', 5, 5)])
1955
# all lines must be seen at least once
1956
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
1958
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
1960
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
1961
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
1963
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
1965
def test_make_mpdiffs(self):
1966
from bzrlib import multiparent
1967
files = self.get_versionedfiles('source')
1968
# add texts that should trip the knit maximum delta chain threshold
1969
# as well as doing parallel chains of data in knits.
1970
# this is done by two chains of 25 insertions
1971
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
1972
files.add_lines(self.get_simple_key('noeol'),
1973
self.get_parents([self.get_simple_key('base')]), ['line'])
1974
# detailed eol tests:
1975
# shared last line with parent no-eol
1976
files.add_lines(self.get_simple_key('noeolsecond'),
1977
self.get_parents([self.get_simple_key('noeol')]),
1979
# differing last line with parent, both no-eol
1980
files.add_lines(self.get_simple_key('noeolnotshared'),
1981
self.get_parents([self.get_simple_key('noeolsecond')]),
1982
['line\n', 'phone'])
1983
# add eol following a noneol parent, change content
1984
files.add_lines(self.get_simple_key('eol'),
1985
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
1986
# add eol following a noneol parent, no change content
1987
files.add_lines(self.get_simple_key('eolline'),
1988
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
1989
# noeol with no parents:
1990
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
1991
# noeol preceeding its leftmost parent in the output:
1992
# this is done by making it a merge of two parents with no common
1993
# anestry: noeolbase and noeol with the
1994
# later-inserted parent the leftmost.
1995
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
1996
self.get_parents([self.get_simple_key('noeolbase'),
1997
self.get_simple_key('noeol')]),
1999
# two identical eol texts
2000
files.add_lines(self.get_simple_key('noeoldup'),
2001
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2002
next_parent = self.get_simple_key('base')
2003
text_name = 'chain1-'
2005
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2006
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2007
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2008
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2009
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2010
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2011
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2012
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2013
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2014
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2015
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2016
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2017
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2018
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2019
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2020
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2021
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2022
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2023
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2024
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2025
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2026
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2027
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2028
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2029
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2030
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2032
for depth in range(26):
2033
new_version = self.get_simple_key(text_name + '%s' % depth)
2034
text = text + ['line\n']
2035
files.add_lines(new_version, self.get_parents([next_parent]), text)
2036
next_parent = new_version
2037
next_parent = self.get_simple_key('base')
2038
text_name = 'chain2-'
2040
for depth in range(26):
2041
new_version = self.get_simple_key(text_name + '%s' % depth)
2042
text = text + ['line\n']
2043
files.add_lines(new_version, self.get_parents([next_parent]), text)
2044
next_parent = new_version
2045
target = self.get_versionedfiles('target')
2046
for key in multiparent.topo_iter_keys(files, files.keys()):
2047
mpdiff = files.make_mpdiffs([key])[0]
2048
parents = files.get_parent_map([key])[key] or []
2050
[(key, parents, files.get_sha1s([key])[0], mpdiff)])
2051
self.assertEqualDiff(
2052
files.get_record_stream([key], 'unordered',
2053
True).next().get_bytes_as('fulltext'),
2054
target.get_record_stream([key], 'unordered',
2055
True).next().get_bytes_as('fulltext')
2058
def test_keys(self):
2059
# While use is discouraged, versions() is still needed by aspects of
2061
files = self.get_versionedfiles()
2062
self.assertEqual(set(), set(files.keys()))
2063
if self.key_length == 1:
2066
key = ('foo', 'bar',)
2067
files.add_lines(key, (), [])
2068
self.assertEqual(set([key]), set(files.keys()))