1
# Copyright (C) 2005 Canonical Ltd
4
# Johan Rydberg <jrydberg@gnu.org>
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
11
# This program is distributed in the hope that it will be useful,
12
# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
# GNU General Public License for more details.
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
# considered typical and check that it can be detected/corrected.
24
from itertools import chain
25
from StringIO import StringIO
33
from bzrlib.errors import (
35
RevisionAlreadyPresent,
38
from bzrlib import knit as _mod_knit
39
from bzrlib.knit import (
46
from bzrlib.symbol_versioning import one_four, one_five
47
from bzrlib.tests import (
48
TestCaseWithMemoryTransport,
52
split_suite_by_condition,
55
from bzrlib.tests.http_utils import TestCaseWithWebserver
56
from bzrlib.trace import mutter
57
from bzrlib.transport import get_transport
58
from bzrlib.transport.memory import MemoryTransport
59
from bzrlib.tsort import topo_sort
60
from bzrlib.tuned_gzip import GzipFile
61
import bzrlib.versionedfile as versionedfile
62
from bzrlib.versionedfile import (
64
HashEscapedPrefixMapper,
66
make_versioned_files_factory,
68
from bzrlib.weave import WeaveFile
69
from bzrlib.weavefile import read_weave, write_weave
72
def load_tests(standard_tests, module, loader):
73
"""Parameterize VersionedFiles tests for different implementations."""
74
to_adapt, result = split_suite_by_condition(
75
standard_tests, condition_isinstance(TestVersionedFiles))
76
len_one_adapter = TestScenarioApplier()
77
len_two_adapter = TestScenarioApplier()
78
# We want to be sure of behaviour for:
79
# weaves prefix layout (weave texts)
80
# individually named weaves (weave inventories)
81
# annotated knits - prefix|hash|hash-escape layout, we test the third only
82
# as it is the most complex mapper.
83
# individually named knits
84
# individual no-graph knits in packs (signatures)
85
# individual graph knits in packs (inventories)
86
# individual graph nocompression knits in packs (revisions)
87
# plain text knits in packs (texts)
88
len_one_adapter.scenarios = [
91
'factory':make_versioned_files_factory(WeaveFile,
92
ConstantMapper('inventory')),
98
'factory':make_file_factory(False, ConstantMapper('revisions')),
102
('named-nograph-knit-pack', {
103
'cleanup':cleanup_pack_knit,
104
'factory':make_pack_factory(False, False, 1),
108
('named-graph-knit-pack', {
109
'cleanup':cleanup_pack_knit,
110
'factory':make_pack_factory(True, True, 1),
114
('named-graph-nodelta-knit-pack', {
115
'cleanup':cleanup_pack_knit,
116
'factory':make_pack_factory(True, False, 1),
121
len_two_adapter.scenarios = [
124
'factory':make_versioned_files_factory(WeaveFile,
129
('annotated-knit-escape', {
131
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
135
('plain-knit-pack', {
136
'cleanup':cleanup_pack_knit,
137
'factory':make_pack_factory(True, True, 2),
142
for test in iter_suite_tests(to_adapt):
143
result.addTests(len_one_adapter.adapt(test))
144
result.addTests(len_two_adapter.adapt(test))
148
def get_diamond_vf(f, trailing_eol=True, left_only=False):
149
"""Get a diamond graph to exercise deltas and merges.
151
:param trailing_eol: If True end the last line with \n.
155
'base': (('origin',),),
156
'left': (('base',),),
157
'right': (('base',),),
158
'merged': (('left',), ('right',)),
160
# insert a diamond graph to exercise deltas and merges.
165
f.add_lines('origin', [], ['origin' + last_char])
166
f.add_lines('base', ['origin'], ['base' + last_char])
167
f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
169
f.add_lines('right', ['base'],
170
['base\n', 'right' + last_char])
171
f.add_lines('merged', ['left', 'right'],
172
['base\n', 'left\n', 'right\n', 'merged' + last_char])
176
def get_diamond_files(files, key_length, trailing_eol=True, left_only=False,
178
"""Get a diamond graph to exercise deltas and merges.
180
This creates a 5-node graph in files. If files supports 2-length keys two
181
graphs are made to exercise the support for multiple ids.
183
:param trailing_eol: If True end the last line with \n.
184
:param key_length: The length of keys in files. Currently supports length 1
186
:param left_only: If True do not add the right and merged nodes.
187
:param nograph: If True, do not provide parents to the add_lines calls;
188
this is useful for tests that need inserted data but have graphless
190
:return: The results of the add_lines calls.
195
prefixes = [('FileA',), ('FileB',)]
196
# insert a diamond graph to exercise deltas and merges.
202
def get_parents(suffix_list):
206
result = [prefix + suffix for suffix in suffix_list]
208
# we loop over each key because that spreads the inserts across prefixes,
209
# which is how commit operates.
210
for prefix in prefixes:
211
result.append(files.add_lines(prefix + ('origin',), (),
212
['origin' + last_char]))
213
for prefix in prefixes:
214
result.append(files.add_lines(prefix + ('base',),
215
get_parents([('origin',)]), ['base' + last_char]))
216
for prefix in prefixes:
217
result.append(files.add_lines(prefix + ('left',),
218
get_parents([('base',)]),
219
['base\n', 'left' + last_char]))
221
for prefix in prefixes:
222
result.append(files.add_lines(prefix + ('right',),
223
get_parents([('base',)]),
224
['base\n', 'right' + last_char]))
225
for prefix in prefixes:
226
result.append(files.add_lines(prefix + ('merged',),
227
get_parents([('left',), ('right',)]),
228
['base\n', 'left\n', 'right\n', 'merged' + last_char]))
232
class VersionedFileTestMixIn(object):
233
"""A mixin test class for testing VersionedFiles.
235
This is not an adaptor-style test at this point because
236
theres no dynamic substitution of versioned file implementations,
237
they are strictly controlled by their owning repositories.
240
def get_transaction(self):
241
if not hasattr(self, '_transaction'):
242
self._transaction = None
243
return self._transaction
247
f.add_lines('r0', [], ['a\n', 'b\n'])
248
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
250
versions = f.versions()
251
self.assertTrue('r0' in versions)
252
self.assertTrue('r1' in versions)
253
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
254
self.assertEquals(f.get_text('r0'), 'a\nb\n')
255
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
256
self.assertEqual(2, len(f))
257
self.assertEqual(2, f.num_versions())
259
self.assertRaises(RevisionNotPresent,
260
f.add_lines, 'r2', ['foo'], [])
261
self.assertRaises(RevisionAlreadyPresent,
262
f.add_lines, 'r1', [], [])
264
# this checks that reopen with create=True does not break anything.
265
f = self.reopen_file(create=True)
268
def test_adds_with_parent_texts(self):
271
_, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
273
_, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
274
['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
275
except NotImplementedError:
276
# if the format doesn't support ghosts, just add normally.
277
_, _, parent_texts['r1'] = f.add_lines('r1',
278
['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
279
f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
280
self.assertNotEqual(None, parent_texts['r0'])
281
self.assertNotEqual(None, parent_texts['r1'])
283
versions = f.versions()
284
self.assertTrue('r0' in versions)
285
self.assertTrue('r1' in versions)
286
self.assertTrue('r2' in versions)
287
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
288
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
289
self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
290
self.assertEqual(3, f.num_versions())
291
origins = f.annotate('r1')
292
self.assertEquals(origins[0][0], 'r0')
293
self.assertEquals(origins[1][0], 'r1')
294
origins = f.annotate('r2')
295
self.assertEquals(origins[0][0], 'r1')
296
self.assertEquals(origins[1][0], 'r2')
299
f = self.reopen_file()
302
def test_add_unicode_content(self):
303
# unicode content is not permitted in versioned files.
304
# versioned files version sequences of bytes only.
306
self.assertRaises(errors.BzrBadParameterUnicode,
307
vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
309
(errors.BzrBadParameterUnicode, NotImplementedError),
310
vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
312
def test_add_follows_left_matching_blocks(self):
313
"""If we change left_matching_blocks, delta changes
315
Note: There are multiple correct deltas in this case, because
316
we start with 1 "a" and we get 3.
319
if isinstance(vf, WeaveFile):
320
raise TestSkipped("WeaveFile ignores left_matching_blocks")
321
vf.add_lines('1', [], ['a\n'])
322
vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
323
left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
324
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
325
vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
326
left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
327
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
329
def test_inline_newline_throws(self):
330
# \r characters are not permitted in lines being added
332
self.assertRaises(errors.BzrBadParameterContainsNewline,
333
vf.add_lines, 'a', [], ['a\n\n'])
335
(errors.BzrBadParameterContainsNewline, NotImplementedError),
336
vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
337
# but inline CR's are allowed
338
vf.add_lines('a', [], ['a\r\n'])
340
vf.add_lines_with_ghosts('b', [], ['a\r\n'])
341
except NotImplementedError:
344
def test_add_reserved(self):
346
self.assertRaises(errors.ReservedId,
347
vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
349
def test_add_lines_nostoresha(self):
350
"""When nostore_sha is supplied using old content raises."""
352
empty_text = ('a', [])
353
sample_text_nl = ('b', ["foo\n", "bar\n"])
354
sample_text_no_nl = ('c', ["foo\n", "bar"])
356
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
357
sha, _, _ = vf.add_lines(version, [], lines)
359
# we now have a copy of all the lines in the vf.
360
for sha, (version, lines) in zip(
361
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
362
self.assertRaises(errors.ExistingContent,
363
vf.add_lines, version + "2", [], lines,
365
# and no new version should have been added.
366
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
369
def test_add_lines_with_ghosts_nostoresha(self):
370
"""When nostore_sha is supplied using old content raises."""
372
empty_text = ('a', [])
373
sample_text_nl = ('b', ["foo\n", "bar\n"])
374
sample_text_no_nl = ('c', ["foo\n", "bar"])
376
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
377
sha, _, _ = vf.add_lines(version, [], lines)
379
# we now have a copy of all the lines in the vf.
380
# is the test applicable to this vf implementation?
382
vf.add_lines_with_ghosts('d', [], [])
383
except NotImplementedError:
384
raise TestSkipped("add_lines_with_ghosts is optional")
385
for sha, (version, lines) in zip(
386
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
387
self.assertRaises(errors.ExistingContent,
388
vf.add_lines_with_ghosts, version + "2", [], lines,
390
# and no new version should have been added.
391
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
394
def test_add_lines_return_value(self):
395
# add_lines should return the sha1 and the text size.
397
empty_text = ('a', [])
398
sample_text_nl = ('b', ["foo\n", "bar\n"])
399
sample_text_no_nl = ('c', ["foo\n", "bar"])
400
# check results for the three cases:
401
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
402
# the first two elements are the same for all versioned files:
403
# - the digest and the size of the text. For some versioned files
404
# additional data is returned in additional tuple elements.
405
result = vf.add_lines(version, [], lines)
406
self.assertEqual(3, len(result))
407
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
409
# parents should not affect the result:
410
lines = sample_text_nl[1]
411
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
412
vf.add_lines('d', ['b', 'c'], lines)[0:2])
414
def test_get_reserved(self):
416
self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
417
self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
418
self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
420
def test_add_unchanged_last_line_noeol_snapshot(self):
421
"""Add a text with an unchanged last line with no eol should work."""
422
# Test adding this in a number of chain lengths; because the interface
423
# for VersionedFile does not allow forcing a specific chain length, we
424
# just use a small base to get the first snapshot, then a much longer
425
# first line for the next add (which will make the third add snapshot)
426
# and so on. 20 has been chosen as an aribtrary figure - knits use 200
427
# as a capped delta length, but ideally we would have some way of
428
# tuning the test to the store (e.g. keep going until a snapshot
430
for length in range(20):
432
vf = self.get_file('case-%d' % length)
435
for step in range(length):
436
version = prefix % step
437
lines = (['prelude \n'] * step) + ['line']
438
vf.add_lines(version, parents, lines)
439
version_lines[version] = lines
441
vf.add_lines('no-eol', parents, ['line'])
442
vf.get_texts(version_lines.keys())
443
self.assertEqualDiff('line', vf.get_text('no-eol'))
445
def test_get_texts_eol_variation(self):
446
# similar to the failure in <http://bugs.launchpad.net/234748>
448
sample_text_nl = ["line\n"]
449
sample_text_no_nl = ["line"]
456
lines = sample_text_nl
458
lines = sample_text_no_nl
459
# left_matching blocks is an internal api; it operates on the
460
# *internal* representation for a knit, which is with *all* lines
461
# being normalised to end with \n - even the final line in a no_nl
462
# file. Using it here ensures that a broken internal implementation
463
# (which is what this test tests) will generate a correct line
464
# delta (which is to say, an empty delta).
465
vf.add_lines(version, parents, lines,
466
left_matching_blocks=[(0, 0, 1)])
468
versions.append(version)
469
version_lines[version] = lines
471
vf.get_texts(versions)
472
vf.get_texts(reversed(versions))
474
def test_add_lines_with_matching_blocks_noeol_last_line(self):
475
"""Add a text with an unchanged last line with no eol should work."""
476
from bzrlib import multiparent
477
# Hand verified sha1 of the text we're adding.
478
sha1 = '6a1d115ec7b60afb664dc14890b5af5ce3c827a4'
479
# Create a mpdiff which adds a new line before the trailing line, and
480
# reuse the last line unaltered (which can cause annotation reuse).
481
# Test adding this in two situations:
482
# On top of a new insertion
483
vf = self.get_file('fulltext')
484
vf.add_lines('noeol', [], ['line'])
485
vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
486
left_matching_blocks=[(0, 1, 1)])
487
self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
489
vf = self.get_file('delta')
490
vf.add_lines('base', [], ['line'])
491
vf.add_lines('noeol', ['base'], ['prelude\n', 'line'])
492
vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
493
left_matching_blocks=[(1, 1, 1)])
494
self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
496
def test_make_mpdiffs(self):
497
from bzrlib import multiparent
498
vf = self.get_file('foo')
499
sha1s = self._setup_for_deltas(vf)
500
new_vf = self.get_file('bar')
501
for version in multiparent.topo_iter(vf):
502
mpdiff = vf.make_mpdiffs([version])[0]
503
new_vf.add_mpdiffs([(version, vf.get_parent_map([version])[version],
504
vf.get_sha1s([version])[version], mpdiff)])
505
self.assertEqualDiff(vf.get_text(version),
506
new_vf.get_text(version))
508
def test_make_mpdiffs_with_ghosts(self):
509
vf = self.get_file('foo')
511
vf.add_lines_with_ghosts('text', ['ghost'], ['line\n'])
512
except NotImplementedError:
513
# old Weave formats do not allow ghosts
515
self.assertRaises(errors.RevisionNotPresent, vf.make_mpdiffs, ['ghost'])
517
def _setup_for_deltas(self, f):
518
self.assertFalse(f.has_version('base'))
519
# add texts that should trip the knit maximum delta chain threshold
520
# as well as doing parallel chains of data in knits.
521
# this is done by two chains of 25 insertions
522
f.add_lines('base', [], ['line\n'])
523
f.add_lines('noeol', ['base'], ['line'])
524
# detailed eol tests:
525
# shared last line with parent no-eol
526
f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
527
# differing last line with parent, both no-eol
528
f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
529
# add eol following a noneol parent, change content
530
f.add_lines('eol', ['noeol'], ['phone\n'])
531
# add eol following a noneol parent, no change content
532
f.add_lines('eolline', ['noeol'], ['line\n'])
533
# noeol with no parents:
534
f.add_lines('noeolbase', [], ['line'])
535
# noeol preceeding its leftmost parent in the output:
536
# this is done by making it a merge of two parents with no common
537
# anestry: noeolbase and noeol with the
538
# later-inserted parent the leftmost.
539
f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
540
# two identical eol texts
541
f.add_lines('noeoldup', ['noeol'], ['line'])
543
text_name = 'chain1-'
545
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
546
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
547
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
548
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
549
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
550
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
551
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
552
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
553
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
554
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
555
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
556
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
557
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
558
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
559
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
560
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
561
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
562
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
563
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
564
19:'1ebed371807ba5935958ad0884595126e8c4e823',
565
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
566
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
567
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
568
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
569
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
570
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
572
for depth in range(26):
573
new_version = text_name + '%s' % depth
574
text = text + ['line\n']
575
f.add_lines(new_version, [next_parent], text)
576
next_parent = new_version
578
text_name = 'chain2-'
580
for depth in range(26):
581
new_version = text_name + '%s' % depth
582
text = text + ['line\n']
583
f.add_lines(new_version, [next_parent], text)
584
next_parent = new_version
587
def test_ancestry(self):
589
self.assertEqual([], f.get_ancestry([]))
590
f.add_lines('r0', [], ['a\n', 'b\n'])
591
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
592
f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
593
f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
594
f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
595
self.assertEqual([], f.get_ancestry([]))
596
versions = f.get_ancestry(['rM'])
597
# there are some possibilities:
601
# so we check indexes
602
r0 = versions.index('r0')
603
r1 = versions.index('r1')
604
r2 = versions.index('r2')
605
self.assertFalse('r3' in versions)
606
rM = versions.index('rM')
607
self.assertTrue(r0 < r1)
608
self.assertTrue(r0 < r2)
609
self.assertTrue(r1 < rM)
610
self.assertTrue(r2 < rM)
612
self.assertRaises(RevisionNotPresent,
613
f.get_ancestry, ['rM', 'rX'])
615
self.assertEqual(set(f.get_ancestry('rM')),
616
set(f.get_ancestry('rM', topo_sorted=False)))
618
def test_mutate_after_finish(self):
619
self._transaction = 'before'
621
self._transaction = 'after'
622
self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
623
self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
625
def test_copy_to(self):
627
f.add_lines('0', [], ['a\n'])
628
t = MemoryTransport()
630
for suffix in self.get_factory().get_suffixes():
631
self.assertTrue(t.has('foo' + suffix))
633
def test_get_suffixes(self):
635
# and should be a list
636
self.assertTrue(isinstance(self.get_factory().get_suffixes(), list))
638
def test_get_parent_map(self):
640
f.add_lines('r0', [], ['a\n', 'b\n'])
642
{'r0':()}, f.get_parent_map(['r0']))
643
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
645
{'r1':('r0',)}, f.get_parent_map(['r1']))
649
f.get_parent_map(['r0', 'r1']))
650
f.add_lines('r2', [], ['a\n', 'b\n'])
651
f.add_lines('r3', [], ['a\n', 'b\n'])
652
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
654
{'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
655
self.assertEqual({}, f.get_parent_map('y'))
659
f.get_parent_map(['r0', 'y', 'r1']))
661
def test_annotate(self):
663
f.add_lines('r0', [], ['a\n', 'b\n'])
664
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
665
origins = f.annotate('r1')
666
self.assertEquals(origins[0][0], 'r1')
667
self.assertEquals(origins[1][0], 'r0')
669
self.assertRaises(RevisionNotPresent,
672
def test_detection(self):
673
# Test weaves detect corruption.
675
# Weaves contain a checksum of their texts.
676
# When a text is extracted, this checksum should be
679
w = self.get_file_corrupted_text()
681
self.assertEqual('hello\n', w.get_text('v1'))
682
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
683
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
684
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
686
w = self.get_file_corrupted_checksum()
688
self.assertEqual('hello\n', w.get_text('v1'))
689
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
690
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
691
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
693
def get_file_corrupted_text(self):
694
"""Return a versioned file with corrupt text but valid metadata."""
695
raise NotImplementedError(self.get_file_corrupted_text)
697
def reopen_file(self, name='foo'):
698
"""Open the versioned file from disk again."""
699
raise NotImplementedError(self.reopen_file)
701
def test_iter_lines_added_or_present_in_versions(self):
702
# test that we get at least an equalset of the lines added by
703
# versions in the weave
704
# the ordering here is to make a tree so that dumb searches have
705
# more changes to muck up.
707
class InstrumentedProgress(progress.DummyProgress):
711
progress.DummyProgress.__init__(self)
714
def update(self, msg=None, current=None, total=None):
715
self.updates.append((msg, current, total))
718
# add a base to get included
719
vf.add_lines('base', [], ['base\n'])
720
# add a ancestor to be included on one side
721
vf.add_lines('lancestor', [], ['lancestor\n'])
722
# add a ancestor to be included on the other side
723
vf.add_lines('rancestor', ['base'], ['rancestor\n'])
724
# add a child of rancestor with no eofile-nl
725
vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
726
# add a child of lancestor and base to join the two roots
727
vf.add_lines('otherchild',
728
['lancestor', 'base'],
729
['base\n', 'lancestor\n', 'otherchild\n'])
730
def iter_with_versions(versions, expected):
731
# now we need to see what lines are returned, and how often.
733
progress = InstrumentedProgress()
734
# iterate over the lines
735
for line in vf.iter_lines_added_or_present_in_versions(versions,
737
lines.setdefault(line, 0)
739
if []!= progress.updates:
740
self.assertEqual(expected, progress.updates)
742
lines = iter_with_versions(['child', 'otherchild'],
743
[('Walking content.', 0, 2),
744
('Walking content.', 1, 2),
745
('Walking content.', 2, 2)])
746
# we must see child and otherchild
747
self.assertTrue(lines[('child\n', 'child')] > 0)
748
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
749
# we dont care if we got more than that.
752
lines = iter_with_versions(None, [('Walking content.', 0, 5),
753
('Walking content.', 1, 5),
754
('Walking content.', 2, 5),
755
('Walking content.', 3, 5),
756
('Walking content.', 4, 5),
757
('Walking content.', 5, 5)])
758
# all lines must be seen at least once
759
self.assertTrue(lines[('base\n', 'base')] > 0)
760
self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
761
self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
762
self.assertTrue(lines[('child\n', 'child')] > 0)
763
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
765
def test_add_lines_with_ghosts(self):
766
# some versioned file formats allow lines to be added with parent
767
# information that is > than that in the format. Formats that do
768
# not support this need to raise NotImplementedError on the
769
# add_lines_with_ghosts api.
771
# add a revision with ghost parents
772
# The preferred form is utf8, but we should translate when needed
773
parent_id_unicode = u'b\xbfse'
774
parent_id_utf8 = parent_id_unicode.encode('utf8')
776
vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
777
except NotImplementedError:
778
# check the other ghost apis are also not implemented
779
self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
780
self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
782
vf = self.reopen_file()
783
# test key graph related apis: getncestry, _graph, get_parents
785
# - these are ghost unaware and must not be reflect ghosts
786
self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
787
self.assertFalse(vf.has_version(parent_id_utf8))
788
# we have _with_ghost apis to give us ghost information.
789
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
790
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
791
# if we add something that is a ghost of another, it should correct the
792
# results of the prior apis
793
vf.add_lines(parent_id_utf8, [], [])
794
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
795
self.assertEqual({'notbxbfse':(parent_id_utf8,)},
796
vf.get_parent_map(['notbxbfse']))
797
self.assertTrue(vf.has_version(parent_id_utf8))
798
# we have _with_ghost apis to give us ghost information.
799
self.assertEqual([parent_id_utf8, 'notbxbfse'],
800
vf.get_ancestry_with_ghosts(['notbxbfse']))
801
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
803
def test_add_lines_with_ghosts_after_normal_revs(self):
804
# some versioned file formats allow lines to be added with parent
805
# information that is > than that in the format. Formats that do
806
# not support this need to raise NotImplementedError on the
807
# add_lines_with_ghosts api.
809
# probe for ghost support
811
vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
812
except NotImplementedError:
814
vf.add_lines_with_ghosts('references_ghost',
816
['line\n', 'line_b\n', 'line_c\n'])
817
origins = vf.annotate('references_ghost')
818
self.assertEquals(('base', 'line\n'), origins[0])
819
self.assertEquals(('base', 'line_b\n'), origins[1])
820
self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
822
def test_readonly_mode(self):
823
transport = get_transport(self.get_url('.'))
824
factory = self.get_factory()
825
vf = factory('id', transport, 0777, create=True, access_mode='w')
826
vf = factory('id', transport, access_mode='r')
827
self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
828
self.assertRaises(errors.ReadOnlyError,
829
vf.add_lines_with_ghosts,
834
def test_get_sha1s(self):
835
# check the sha1 data is available
838
vf.add_lines('a', [], ['a\n'])
839
# the same file, different metadata
840
vf.add_lines('b', ['a'], ['a\n'])
841
# a file differing only in last newline.
842
vf.add_lines('c', [], ['a'])
844
'a': '3f786850e387550fdab836ed7e6dc881de23001b',
845
'c': '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
846
'b': '3f786850e387550fdab836ed7e6dc881de23001b',
848
vf.get_sha1s(['a', 'c', 'b']))
851
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
853
def get_file(self, name='foo'):
854
return WeaveFile(name, get_transport(self.get_url('.')), create=True,
855
get_scope=self.get_transaction)
857
def get_file_corrupted_text(self):
858
w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
859
get_scope=self.get_transaction)
860
w.add_lines('v1', [], ['hello\n'])
861
w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
863
# We are going to invasively corrupt the text
864
# Make sure the internals of weave are the same
865
self.assertEqual([('{', 0)
873
self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
874
, '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
879
w._weave[4] = 'There\n'
882
def get_file_corrupted_checksum(self):
883
w = self.get_file_corrupted_text()
885
w._weave[4] = 'there\n'
886
self.assertEqual('hello\nthere\n', w.get_text('v2'))
888
#Invalid checksum, first digit changed
889
w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
892
def reopen_file(self, name='foo', create=False):
893
return WeaveFile(name, get_transport(self.get_url('.')), create=create,
894
get_scope=self.get_transaction)
896
def test_no_implicit_create(self):
897
self.assertRaises(errors.NoSuchFile,
900
get_transport(self.get_url('.')),
901
get_scope=self.get_transaction)
903
def get_factory(self):
907
class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
910
TestCaseWithMemoryTransport.setUp(self)
911
mapper = PrefixMapper()
912
factory = make_file_factory(True, mapper)
913
self.vf1 = factory(self.get_transport('root-1'))
914
self.vf2 = factory(self.get_transport('root-2'))
915
self.plan_merge_vf = versionedfile._PlanMergeVersionedFile('root')
916
self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
918
def test_add_lines(self):
919
self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
920
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
921
('root', 'a'), [], [])
922
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
923
('root', 'a:'), None, [])
924
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
925
('root', 'a:'), [], None)
927
def setup_abcde(self):
928
self.vf1.add_lines(('root', 'A'), [], ['a'])
929
self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
930
self.vf2.add_lines(('root', 'C'), [], ['c'])
931
self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
932
self.plan_merge_vf.add_lines(('root', 'E:'),
933
[('root', 'B'), ('root', 'D')], ['e'])
935
def test_get_parents(self):
937
self.assertEqual({('root', 'B'):(('root', 'A'),)},
938
self.plan_merge_vf.get_parent_map([('root', 'B')]))
939
self.assertEqual({('root', 'D'):(('root', 'C'),)},
940
self.plan_merge_vf.get_parent_map([('root', 'D')]))
941
self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
942
self.plan_merge_vf.get_parent_map([('root', 'E:')]))
944
self.plan_merge_vf.get_parent_map([('root', 'F')]))
946
('root', 'B'):(('root', 'A'),),
947
('root', 'D'):(('root', 'C'),),
948
('root', 'E:'):(('root', 'B'),('root', 'D')),
950
self.plan_merge_vf.get_parent_map(
951
[('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
953
def test_get_record_stream(self):
955
def get_record(suffix):
956
return self.plan_merge_vf.get_record_stream(
957
[('root', suffix)], 'unordered', True).next()
958
self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
959
self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
960
self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
961
self.assertEqual('absent', get_record('F').storage_kind)
964
class TestReadonlyHttpMixin(object):
966
def get_transaction(self):
969
def test_readonly_http_works(self):
970
# we should be able to read from http with a versioned file.
972
# try an empty file access
973
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
974
self.assertEqual([], readonly_vf.versions())
976
vf.add_lines('1', [], ['a\n'])
977
vf.add_lines('2', ['1'], ['b\n', 'a\n'])
978
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
979
self.assertEqual(['1', '2'], vf.versions())
980
for version in readonly_vf.versions():
981
readonly_vf.get_lines(version)
984
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
987
return WeaveFile('foo', get_transport(self.get_url('.')), create=True,
988
get_scope=self.get_transaction)
990
def get_factory(self):
994
class MergeCasesMixin(object):
996
def doMerge(self, base, a, b, mp):
997
from cStringIO import StringIO
998
from textwrap import dedent
1004
w.add_lines('text0', [], map(addcrlf, base))
1005
w.add_lines('text1', ['text0'], map(addcrlf, a))
1006
w.add_lines('text2', ['text0'], map(addcrlf, b))
1008
self.log_contents(w)
1010
self.log('merge plan:')
1011
p = list(w.plan_merge('text1', 'text2'))
1012
for state, line in p:
1014
self.log('%12s | %s' % (state, line[:-1]))
1018
mt.writelines(w.weave_merge(p))
1020
self.log(mt.getvalue())
1022
mp = map(addcrlf, mp)
1023
self.assertEqual(mt.readlines(), mp)
1026
def testOneInsert(self):
1032
def testSeparateInserts(self):
1033
self.doMerge(['aaa', 'bbb', 'ccc'],
1034
['aaa', 'xxx', 'bbb', 'ccc'],
1035
['aaa', 'bbb', 'yyy', 'ccc'],
1036
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1038
def testSameInsert(self):
1039
self.doMerge(['aaa', 'bbb', 'ccc'],
1040
['aaa', 'xxx', 'bbb', 'ccc'],
1041
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
1042
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1043
overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
1044
def testOverlappedInsert(self):
1045
self.doMerge(['aaa', 'bbb'],
1046
['aaa', 'xxx', 'yyy', 'bbb'],
1047
['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
1049
# really it ought to reduce this to
1050
# ['aaa', 'xxx', 'yyy', 'bbb']
1053
def testClashReplace(self):
1054
self.doMerge(['aaa'],
1057
['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
1060
def testNonClashInsert1(self):
1061
self.doMerge(['aaa'],
1064
['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
1067
def testNonClashInsert2(self):
1068
self.doMerge(['aaa'],
1074
def testDeleteAndModify(self):
1075
"""Clashing delete and modification.
1077
If one side modifies a region and the other deletes it then
1078
there should be a conflict with one side blank.
1081
#######################################
1082
# skippd, not working yet
1085
self.doMerge(['aaa', 'bbb', 'ccc'],
1086
['aaa', 'ddd', 'ccc'],
1088
['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1090
def _test_merge_from_strings(self, base, a, b, expected):
1092
w.add_lines('text0', [], base.splitlines(True))
1093
w.add_lines('text1', ['text0'], a.splitlines(True))
1094
w.add_lines('text2', ['text0'], b.splitlines(True))
1095
self.log('merge plan:')
1096
p = list(w.plan_merge('text1', 'text2'))
1097
for state, line in p:
1099
self.log('%12s | %s' % (state, line[:-1]))
1100
self.log('merge result:')
1101
result_text = ''.join(w.weave_merge(p))
1102
self.log(result_text)
1103
self.assertEqualDiff(result_text, expected)
1105
def test_weave_merge_conflicts(self):
1106
# does weave merge properly handle plans that end with unchanged?
1107
result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
1108
self.assertEqual(result, 'hello\n')
1110
def test_deletion_extended(self):
1111
"""One side deletes, the other deletes more.
1128
self._test_merge_from_strings(base, a, b, result)
1130
def test_deletion_overlap(self):
1131
"""Delete overlapping regions with no other conflict.
1133
Arguably it'd be better to treat these as agreement, rather than
1134
conflict, but for now conflict is safer.
1162
self._test_merge_from_strings(base, a, b, result)
1164
def test_agreement_deletion(self):
1165
"""Agree to delete some lines, without conflicts."""
1187
self._test_merge_from_strings(base, a, b, result)
1189
def test_sync_on_deletion(self):
1190
"""Specific case of merge where we can synchronize incorrectly.
1192
A previous version of the weave merge concluded that the two versions
1193
agreed on deleting line 2, and this could be a synchronization point.
1194
Line 1 was then considered in isolation, and thought to be deleted on
1197
It's better to consider the whole thing as a disagreement region.
1208
a's replacement line 2
1221
a's replacement line 2
1228
self._test_merge_from_strings(base, a, b, result)
1231
class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1233
def get_file(self, name='foo'):
1234
return WeaveFile(name, get_transport(self.get_url('.')), create=True)
1236
def log_contents(self, w):
1237
self.log('weave is:')
1239
write_weave(w, tmpf)
1240
self.log(tmpf.getvalue())
1242
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1243
'xxx', '>>>>>>> ', 'bbb']
1246
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1248
def test_select_adaptor(self):
1249
"""Test expected adapters exist."""
1250
# One scenario for each lookup combination we expect to use.
1251
# Each is source_kind, requested_kind, adapter class
1253
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1254
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1255
('knit-annotated-delta-gz', 'knit-delta-gz',
1256
_mod_knit.DeltaAnnotatedToUnannotated),
1257
('knit-annotated-delta-gz', 'fulltext',
1258
_mod_knit.DeltaAnnotatedToFullText),
1259
('knit-annotated-ft-gz', 'knit-ft-gz',
1260
_mod_knit.FTAnnotatedToUnannotated),
1261
('knit-annotated-ft-gz', 'fulltext',
1262
_mod_knit.FTAnnotatedToFullText),
1264
for source, requested, klass in scenarios:
1265
adapter_factory = versionedfile.adapter_registry.get(
1266
(source, requested))
1267
adapter = adapter_factory(None)
1268
self.assertIsInstance(adapter, klass)
1270
def get_knit(self, annotated=True):
1271
mapper = ConstantMapper('knit')
1272
transport = self.get_transport()
1273
return make_file_factory(annotated, mapper)(transport)
1275
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1276
"""Grab the interested adapted texts for tests."""
1277
# origin is a fulltext
1278
entries = f.get_record_stream([('origin',)], 'unordered', False)
1279
base = entries.next()
1280
ft_data = ft_adapter.get_bytes(base, base.get_bytes_as(base.storage_kind))
1281
# merged is both a delta and multiple parents.
1282
entries = f.get_record_stream([('merged',)], 'unordered', False)
1283
merged = entries.next()
1284
delta_data = delta_adapter.get_bytes(merged,
1285
merged.get_bytes_as(merged.storage_kind))
1286
return ft_data, delta_data
1288
def test_deannotation_noeol(self):
1289
"""Test converting annotated knits to unannotated knits."""
1290
# we need a full text, and a delta
1292
get_diamond_files(f, 1, trailing_eol=False)
1293
ft_data, delta_data = self.helpGetBytes(f,
1294
_mod_knit.FTAnnotatedToUnannotated(None),
1295
_mod_knit.DeltaAnnotatedToUnannotated(None))
1297
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1300
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1302
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1303
'1,2,3\nleft\nright\nmerged\nend merged\n',
1304
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1306
def test_deannotation(self):
1307
"""Test converting annotated knits to unannotated knits."""
1308
# we need a full text, and a delta
1310
get_diamond_files(f, 1)
1311
ft_data, delta_data = self.helpGetBytes(f,
1312
_mod_knit.FTAnnotatedToUnannotated(None),
1313
_mod_knit.DeltaAnnotatedToUnannotated(None))
1315
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1318
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1320
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1321
'2,2,2\nright\nmerged\nend merged\n',
1322
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1324
def test_annotated_to_fulltext_no_eol(self):
1325
"""Test adapting annotated knits to full texts (for -> weaves)."""
1326
# we need a full text, and a delta
1328
get_diamond_files(f, 1, trailing_eol=False)
1329
# Reconstructing a full text requires a backing versioned file, and it
1330
# must have the base lines requested from it.
1331
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1332
ft_data, delta_data = self.helpGetBytes(f,
1333
_mod_knit.FTAnnotatedToFullText(None),
1334
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1335
self.assertEqual('origin', ft_data)
1336
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1337
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1338
True)], logged_vf.calls)
1340
def test_annotated_to_fulltext(self):
1341
"""Test adapting annotated knits to full texts (for -> weaves)."""
1342
# we need a full text, and a delta
1344
get_diamond_files(f, 1)
1345
# Reconstructing a full text requires a backing versioned file, and it
1346
# must have the base lines requested from it.
1347
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1348
ft_data, delta_data = self.helpGetBytes(f,
1349
_mod_knit.FTAnnotatedToFullText(None),
1350
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1351
self.assertEqual('origin\n', ft_data)
1352
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1353
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1354
True)], logged_vf.calls)
1356
def test_unannotated_to_fulltext(self):
1357
"""Test adapting unannotated knits to full texts.
1359
This is used for -> weaves, and for -> annotated knits.
1361
# we need a full text, and a delta
1362
f = self.get_knit(annotated=False)
1363
get_diamond_files(f, 1)
1364
# Reconstructing a full text requires a backing versioned file, and it
1365
# must have the base lines requested from it.
1366
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1367
ft_data, delta_data = self.helpGetBytes(f,
1368
_mod_knit.FTPlainToFullText(None),
1369
_mod_knit.DeltaPlainToFullText(logged_vf))
1370
self.assertEqual('origin\n', ft_data)
1371
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1372
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1373
True)], logged_vf.calls)
1375
def test_unannotated_to_fulltext_no_eol(self):
1376
"""Test adapting unannotated knits to full texts.
1378
This is used for -> weaves, and for -> annotated knits.
1380
# we need a full text, and a delta
1381
f = self.get_knit(annotated=False)
1382
get_diamond_files(f, 1, trailing_eol=False)
1383
# Reconstructing a full text requires a backing versioned file, and it
1384
# must have the base lines requested from it.
1385
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1386
ft_data, delta_data = self.helpGetBytes(f,
1387
_mod_knit.FTPlainToFullText(None),
1388
_mod_knit.DeltaPlainToFullText(logged_vf))
1389
self.assertEqual('origin', ft_data)
1390
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1391
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1392
True)], logged_vf.calls)
1395
class TestKeyMapper(TestCaseWithMemoryTransport):
1396
"""Tests for various key mapping logic."""
1398
def test_identity_mapper(self):
1399
mapper = versionedfile.ConstantMapper("inventory")
1400
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1401
self.assertEqual("inventory", mapper.map(('quux',)))
1403
def test_prefix_mapper(self):
1405
mapper = versionedfile.PrefixMapper()
1406
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1407
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1408
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1409
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1411
def test_hash_prefix_mapper(self):
1412
#format6: hash + plain
1413
mapper = versionedfile.HashPrefixMapper()
1414
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1415
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1416
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1417
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1419
def test_hash_escaped_mapper(self):
1420
#knit1: hash + escaped
1421
mapper = versionedfile.HashEscapedPrefixMapper()
1422
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1423
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1425
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1427
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1428
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1431
class TestVersionedFiles(TestCaseWithMemoryTransport):
1432
"""Tests for the multiple-file variant of VersionedFile."""
1434
def get_versionedfiles(self, relpath='files'):
1435
transport = self.get_transport(relpath)
1437
transport.mkdir('.')
1438
files = self.factory(transport)
1439
if self.cleanup is not None:
1440
self.addCleanup(lambda:self.cleanup(files))
1443
def test_annotate(self):
1444
files = self.get_versionedfiles()
1445
self.get_diamond_files(files)
1446
if self.key_length == 1:
1450
# introduced full text
1451
origins = files.annotate(prefix + ('origin',))
1453
(prefix + ('origin',), 'origin\n')],
1456
origins = files.annotate(prefix + ('base',))
1458
(prefix + ('base',), 'base\n')],
1461
origins = files.annotate(prefix + ('merged',))
1464
(prefix + ('base',), 'base\n'),
1465
(prefix + ('left',), 'left\n'),
1466
(prefix + ('right',), 'right\n'),
1467
(prefix + ('merged',), 'merged\n')
1471
# Without a graph everything is new.
1473
(prefix + ('merged',), 'base\n'),
1474
(prefix + ('merged',), 'left\n'),
1475
(prefix + ('merged',), 'right\n'),
1476
(prefix + ('merged',), 'merged\n')
1479
self.assertRaises(RevisionNotPresent,
1480
files.annotate, prefix + ('missing-key',))
1482
def test_construct(self):
1483
"""Each parameterised test can be constructed on a transport."""
1484
files = self.get_versionedfiles()
1486
def get_diamond_files(self, files, trailing_eol=True, left_only=False):
1487
return get_diamond_files(files, self.key_length,
1488
trailing_eol=trailing_eol, nograph=not self.graph,
1489
left_only=left_only)
1491
def test_add_lines_return(self):
1492
files = self.get_versionedfiles()
1493
# save code by using the stock data insertion helper.
1494
adds = self.get_diamond_files(files)
1496
# We can only validate the first 2 elements returned from add_lines.
1498
self.assertEqual(3, len(add))
1499
results.append(add[:2])
1500
if self.key_length == 1:
1502
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1503
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1504
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1505
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1506
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1508
elif self.key_length == 2:
1510
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1511
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1512
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1513
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1514
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1515
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1516
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1517
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1518
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1519
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1522
def test_empty_lines(self):
1523
"""Empty files can be stored."""
1524
f = self.get_versionedfiles()
1525
key_a = self.get_simple_key('a')
1526
f.add_lines(key_a, [], [])
1527
self.assertEqual('',
1528
f.get_record_stream([key_a], 'unordered', True
1529
).next().get_bytes_as('fulltext'))
1530
key_b = self.get_simple_key('b')
1531
f.add_lines(key_b, self.get_parents([key_a]), [])
1532
self.assertEqual('',
1533
f.get_record_stream([key_b], 'unordered', True
1534
).next().get_bytes_as('fulltext'))
1536
def test_newline_only(self):
1537
f = self.get_versionedfiles()
1538
key_a = self.get_simple_key('a')
1539
f.add_lines(key_a, [], ['\n'])
1540
self.assertEqual('\n',
1541
f.get_record_stream([key_a], 'unordered', True
1542
).next().get_bytes_as('fulltext'))
1543
key_b = self.get_simple_key('b')
1544
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1545
self.assertEqual('\n',
1546
f.get_record_stream([key_b], 'unordered', True
1547
).next().get_bytes_as('fulltext'))
1549
def test_get_record_stream_empty(self):
1550
"""An empty stream can be requested without error."""
1551
f = self.get_versionedfiles()
1552
entries = f.get_record_stream([], 'unordered', False)
1553
self.assertEqual([], list(entries))
1555
def assertValidStorageKind(self, storage_kind):
1556
"""Assert that storage_kind is a valid storage_kind."""
1557
self.assertSubset([storage_kind],
1558
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1559
'knit-ft', 'knit-delta', 'fulltext', 'knit-annotated-ft-gz',
1560
'knit-annotated-delta-gz', 'knit-ft-gz', 'knit-delta-gz'])
1562
def capture_stream(self, f, entries, on_seen, parents):
1563
"""Capture a stream for testing."""
1564
for factory in entries:
1565
on_seen(factory.key)
1566
self.assertValidStorageKind(factory.storage_kind)
1567
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1569
self.assertEqual(parents[factory.key], factory.parents)
1570
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1573
def test_get_record_stream_interface(self):
1574
"""each item in a stream has to provide a regular interface."""
1575
files = self.get_versionedfiles()
1576
self.get_diamond_files(files)
1577
keys, _ = self.get_keys_and_sort_order()
1578
parent_map = files.get_parent_map(keys)
1579
entries = files.get_record_stream(keys, 'unordered', False)
1581
self.capture_stream(files, entries, seen.add, parent_map)
1582
self.assertEqual(set(keys), seen)
1584
def get_simple_key(self, suffix):
1585
"""Return a key for the object under test."""
1586
if self.key_length == 1:
1589
return ('FileA',) + (suffix,)
1591
def get_keys_and_sort_order(self):
1592
"""Get diamond test keys list, and their sort ordering."""
1593
if self.key_length == 1:
1594
keys = [('merged',), ('left',), ('right',), ('base',)]
1595
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1598
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1600
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1604
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1605
('FileA', 'base'):0,
1606
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1607
('FileB', 'base'):0,
1609
return keys, sort_order
1611
def test_get_record_stream_interface_ordered(self):
1612
"""each item in a stream has to provide a regular interface."""
1613
files = self.get_versionedfiles()
1614
self.get_diamond_files(files)
1615
keys, sort_order = self.get_keys_and_sort_order()
1616
parent_map = files.get_parent_map(keys)
1617
entries = files.get_record_stream(keys, 'topological', False)
1619
self.capture_stream(files, entries, seen.append, parent_map)
1620
self.assertStreamOrder(sort_order, seen, keys)
1622
def test_get_record_stream_interface_ordered_with_delta_closure(self):
1623
"""each item must be accessible as a fulltext."""
1624
files = self.get_versionedfiles()
1625
self.get_diamond_files(files)
1626
keys, sort_order = self.get_keys_and_sort_order()
1627
parent_map = files.get_parent_map(keys)
1628
entries = files.get_record_stream(keys, 'topological', True)
1630
for factory in entries:
1631
seen.append(factory.key)
1632
self.assertValidStorageKind(factory.storage_kind)
1633
self.assertSubset([factory.sha1],
1634
[None, files.get_sha1s([factory.key])[factory.key]])
1635
self.assertEqual(parent_map[factory.key], factory.parents)
1636
# self.assertEqual(files.get_text(factory.key),
1637
self.assertIsInstance(factory.get_bytes_as('fulltext'), str)
1638
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1640
self.assertStreamOrder(sort_order, seen, keys)
1642
def assertStreamOrder(self, sort_order, seen, keys):
1643
self.assertEqual(len(set(seen)), len(keys))
1644
if self.key_length == 1:
1647
lows = {('FileA',):0, ('FileB',):0}
1649
self.assertEqual(set(keys), set(seen))
1652
sort_pos = sort_order[key]
1653
self.assertTrue(sort_pos >= lows[key[:-1]],
1654
"Out of order in sorted stream: %r, %r" % (key, seen))
1655
lows[key[:-1]] = sort_pos
1657
def test_get_record_stream_unknown_storage_kind_raises(self):
1658
"""Asking for a storage kind that the stream cannot supply raises."""
1659
files = self.get_versionedfiles()
1660
self.get_diamond_files(files)
1661
if self.key_length == 1:
1662
keys = [('merged',), ('left',), ('right',), ('base',)]
1665
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1667
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1670
parent_map = files.get_parent_map(keys)
1671
entries = files.get_record_stream(keys, 'unordered', False)
1672
# We track the contents because we should be able to try, fail a
1673
# particular kind and then ask for one that works and continue.
1675
for factory in entries:
1676
seen.add(factory.key)
1677
self.assertValidStorageKind(factory.storage_kind)
1678
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1680
self.assertEqual(parent_map[factory.key], factory.parents)
1681
# currently no stream emits mpdiff
1682
self.assertRaises(errors.UnavailableRepresentation,
1683
factory.get_bytes_as, 'mpdiff')
1684
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1686
self.assertEqual(set(keys), seen)
1688
def test_get_record_stream_missing_records_are_absent(self):
1689
files = self.get_versionedfiles()
1690
self.get_diamond_files(files)
1691
if self.key_length == 1:
1692
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1695
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1696
('FileA', 'absent'), ('FileA', 'base'),
1697
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1698
('FileB', 'absent'), ('FileB', 'base'),
1699
('absent', 'absent'),
1701
parent_map = files.get_parent_map(keys)
1702
entries = files.get_record_stream(keys, 'unordered', False)
1703
self.assertAbsentRecord(files, keys, parent_map, entries)
1704
entries = files.get_record_stream(keys, 'topological', False)
1705
self.assertAbsentRecord(files, keys, parent_map, entries)
1707
def assertAbsentRecord(self, files, keys, parents, entries):
1708
"""Helper for test_get_record_stream_missing_records_are_absent."""
1710
for factory in entries:
1711
seen.add(factory.key)
1712
if factory.key[-1] == 'absent':
1713
self.assertEqual('absent', factory.storage_kind)
1714
self.assertEqual(None, factory.sha1)
1715
self.assertEqual(None, factory.parents)
1717
self.assertValidStorageKind(factory.storage_kind)
1718
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1720
self.assertEqual(parents[factory.key], factory.parents)
1721
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1723
self.assertEqual(set(keys), seen)
1725
def test_filter_absent_records(self):
1726
"""Requested missing records can be filter trivially."""
1727
files = self.get_versionedfiles()
1728
self.get_diamond_files(files)
1729
keys, _ = self.get_keys_and_sort_order()
1730
parent_map = files.get_parent_map(keys)
1731
# Add an absent record in the middle of the present keys. (We don't ask
1732
# for just absent keys to ensure that content before and after the
1733
# absent keys is still delivered).
1734
present_keys = list(keys)
1735
if self.key_length == 1:
1736
keys.insert(2, ('extra',))
1738
keys.insert(2, ('extra', 'extra'))
1739
entries = files.get_record_stream(keys, 'unordered', False)
1741
self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
1743
self.assertEqual(set(present_keys), seen)
1745
def get_mapper(self):
1746
"""Get a mapper suitable for the key length of the test interface."""
1747
if self.key_length == 1:
1748
return ConstantMapper('source')
1750
return HashEscapedPrefixMapper()
1752
def get_parents(self, parents):
1753
"""Get parents, taking self.graph into consideration."""
1759
def test_get_parent_map(self):
1760
files = self.get_versionedfiles()
1761
if self.key_length == 1:
1763
(('r0',), self.get_parents(())),
1764
(('r1',), self.get_parents((('r0',),))),
1765
(('r2',), self.get_parents(())),
1766
(('r3',), self.get_parents(())),
1767
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
1771
(('FileA', 'r0'), self.get_parents(())),
1772
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
1773
(('FileA', 'r2'), self.get_parents(())),
1774
(('FileA', 'r3'), self.get_parents(())),
1775
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
1776
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
1778
for key, parents in parent_details:
1779
files.add_lines(key, parents, [])
1780
# immediately after adding it should be queryable.
1781
self.assertEqual({key:parents}, files.get_parent_map([key]))
1782
# We can ask for an empty set
1783
self.assertEqual({}, files.get_parent_map([]))
1784
# We can ask for many keys
1785
all_parents = dict(parent_details)
1786
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
1787
# Absent keys are just not included in the result.
1788
keys = all_parents.keys()
1789
if self.key_length == 1:
1790
keys.insert(1, ('missing',))
1792
keys.insert(1, ('missing', 'missing'))
1793
# Absent keys are just ignored
1794
self.assertEqual(all_parents, files.get_parent_map(keys))
1796
def test_get_sha1s(self):
1797
files = self.get_versionedfiles()
1798
self.get_diamond_files(files)
1799
if self.key_length == 1:
1800
keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
1802
# ask for shas from different prefixes.
1804
('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
1805
('FileA', 'merged'), ('FileB', 'right'),
1808
keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
1809
keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
1810
keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
1811
keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
1812
keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
1814
files.get_sha1s(keys))
1816
def test_insert_record_stream_empty(self):
1817
"""Inserting an empty record stream should work."""
1818
files = self.get_versionedfiles()
1819
files.insert_record_stream([])
1821
def assertIdenticalVersionedFile(self, expected, actual):
1822
"""Assert that left and right have the same contents."""
1823
self.assertEqual(set(actual.keys()), set(expected.keys()))
1824
actual_parents = actual.get_parent_map(actual.keys())
1826
self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
1828
for key, parents in actual_parents.items():
1829
self.assertEqual(None, parents)
1830
for key in actual.keys():
1831
actual_text = actual.get_record_stream(
1832
[key], 'unordered', True).next().get_bytes_as('fulltext')
1833
expected_text = expected.get_record_stream(
1834
[key], 'unordered', True).next().get_bytes_as('fulltext')
1835
self.assertEqual(actual_text, expected_text)
1837
def test_insert_record_stream_fulltexts(self):
1838
"""Any file should accept a stream of fulltexts."""
1839
files = self.get_versionedfiles()
1840
mapper = self.get_mapper()
1841
source_transport = self.get_transport('source')
1842
source_transport.mkdir('.')
1843
# weaves always output fulltexts.
1844
source = make_versioned_files_factory(WeaveFile, mapper)(
1846
self.get_diamond_files(source, trailing_eol=False)
1847
stream = source.get_record_stream(source.keys(), 'topological',
1849
files.insert_record_stream(stream)
1850
self.assertIdenticalVersionedFile(source, files)
1852
def test_insert_record_stream_fulltexts_noeol(self):
1853
"""Any file should accept a stream of fulltexts."""
1854
files = self.get_versionedfiles()
1855
mapper = self.get_mapper()
1856
source_transport = self.get_transport('source')
1857
source_transport.mkdir('.')
1858
# weaves always output fulltexts.
1859
source = make_versioned_files_factory(WeaveFile, mapper)(
1861
self.get_diamond_files(source, trailing_eol=False)
1862
stream = source.get_record_stream(source.keys(), 'topological',
1864
files.insert_record_stream(stream)
1865
self.assertIdenticalVersionedFile(source, files)
1867
def test_insert_record_stream_annotated_knits(self):
1868
"""Any file should accept a stream from plain knits."""
1869
files = self.get_versionedfiles()
1870
mapper = self.get_mapper()
1871
source_transport = self.get_transport('source')
1872
source_transport.mkdir('.')
1873
source = make_file_factory(True, mapper)(source_transport)
1874
self.get_diamond_files(source)
1875
stream = source.get_record_stream(source.keys(), 'topological',
1877
files.insert_record_stream(stream)
1878
self.assertIdenticalVersionedFile(source, files)
1880
def test_insert_record_stream_annotated_knits_noeol(self):
1881
"""Any file should accept a stream from plain knits."""
1882
files = self.get_versionedfiles()
1883
mapper = self.get_mapper()
1884
source_transport = self.get_transport('source')
1885
source_transport.mkdir('.')
1886
source = make_file_factory(True, mapper)(source_transport)
1887
self.get_diamond_files(source, trailing_eol=False)
1888
stream = source.get_record_stream(source.keys(), 'topological',
1890
files.insert_record_stream(stream)
1891
self.assertIdenticalVersionedFile(source, files)
1893
def test_insert_record_stream_plain_knits(self):
1894
"""Any file should accept a stream from plain knits."""
1895
files = self.get_versionedfiles()
1896
mapper = self.get_mapper()
1897
source_transport = self.get_transport('source')
1898
source_transport.mkdir('.')
1899
source = make_file_factory(False, mapper)(source_transport)
1900
self.get_diamond_files(source)
1901
stream = source.get_record_stream(source.keys(), 'topological',
1903
files.insert_record_stream(stream)
1904
self.assertIdenticalVersionedFile(source, files)
1906
def test_insert_record_stream_plain_knits_noeol(self):
1907
"""Any file should accept a stream from plain knits."""
1908
files = self.get_versionedfiles()
1909
mapper = self.get_mapper()
1910
source_transport = self.get_transport('source')
1911
source_transport.mkdir('.')
1912
source = make_file_factory(False, mapper)(source_transport)
1913
self.get_diamond_files(source, trailing_eol=False)
1914
stream = source.get_record_stream(source.keys(), 'topological',
1916
files.insert_record_stream(stream)
1917
self.assertIdenticalVersionedFile(source, files)
1919
def test_insert_record_stream_existing_keys(self):
1920
"""Inserting keys already in a file should not error."""
1921
files = self.get_versionedfiles()
1922
source = self.get_versionedfiles('source')
1923
self.get_diamond_files(source)
1924
# insert some keys into f.
1925
self.get_diamond_files(files, left_only=True)
1926
stream = source.get_record_stream(source.keys(), 'topological',
1928
files.insert_record_stream(stream)
1929
self.assertIdenticalVersionedFile(source, files)
1931
def test_insert_record_stream_missing_keys(self):
1932
"""Inserting a stream with absent keys should raise an error."""
1933
files = self.get_versionedfiles()
1934
source = self.get_versionedfiles('source')
1935
stream = source.get_record_stream([('missing',) * self.key_length],
1936
'topological', False)
1937
self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
1940
def test_insert_record_stream_out_of_order(self):
1941
"""An out of order stream can either error or work."""
1942
files = self.get_versionedfiles()
1943
source = self.get_versionedfiles('source')
1944
self.get_diamond_files(source)
1945
if self.key_length == 1:
1946
origin_keys = [('origin',)]
1947
end_keys = [('merged',), ('left',)]
1948
start_keys = [('right',), ('base',)]
1950
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
1951
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
1952
('FileB', 'merged',), ('FileB', 'left',)]
1953
start_keys = [('FileA', 'right',), ('FileA', 'base',),
1954
('FileB', 'right',), ('FileB', 'base',)]
1955
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
1956
end_entries = source.get_record_stream(end_keys, 'topological', False)
1957
start_entries = source.get_record_stream(start_keys, 'topological', False)
1958
entries = chain(origin_entries, end_entries, start_entries)
1960
files.insert_record_stream(entries)
1961
except RevisionNotPresent:
1962
# Must not have corrupted the file.
1965
self.assertIdenticalVersionedFile(source, files)
1967
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
1968
"""Insertion where a needed basis is not included aborts safely."""
1969
# We use a knit always here to be sure we are getting a binary delta.
1970
mapper = self.get_mapper()
1971
source_transport = self.get_transport('source')
1972
source_transport.mkdir('.')
1973
source = make_file_factory(False, mapper)(source_transport)
1974
self.get_diamond_files(source)
1975
entries = source.get_record_stream(['origin', 'merged'], 'unordered', False)
1976
files = self.get_versionedfiles()
1977
self.assertRaises(RevisionNotPresent, files.insert_record_stream,
1980
self.assertEqual({}, files.get_parent_map([]))
1982
def test_iter_lines_added_or_present_in_keys(self):
1983
# test that we get at least an equalset of the lines added by
1984
# versions in the store.
1985
# the ordering here is to make a tree so that dumb searches have
1986
# more changes to muck up.
1988
class InstrumentedProgress(progress.DummyProgress):
1992
progress.DummyProgress.__init__(self)
1995
def update(self, msg=None, current=None, total=None):
1996
self.updates.append((msg, current, total))
1998
files = self.get_versionedfiles()
1999
# add a base to get included
2000
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2001
# add a ancestor to be included on one side
2002
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2003
# add a ancestor to be included on the other side
2004
files.add_lines(self.get_simple_key('rancestor'),
2005
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2006
# add a child of rancestor with no eofile-nl
2007
files.add_lines(self.get_simple_key('child'),
2008
self.get_parents([self.get_simple_key('rancestor')]),
2009
['base\n', 'child\n'])
2010
# add a child of lancestor and base to join the two roots
2011
files.add_lines(self.get_simple_key('otherchild'),
2012
self.get_parents([self.get_simple_key('lancestor'),
2013
self.get_simple_key('base')]),
2014
['base\n', 'lancestor\n', 'otherchild\n'])
2015
def iter_with_keys(keys, expected):
2016
# now we need to see what lines are returned, and how often.
2018
progress = InstrumentedProgress()
2019
# iterate over the lines
2020
for line in files.iter_lines_added_or_present_in_keys(keys,
2022
lines.setdefault(line, 0)
2024
if []!= progress.updates:
2025
self.assertEqual(expected, progress.updates)
2027
lines = iter_with_keys(
2028
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2029
[('Walking content.', 0, 2),
2030
('Walking content.', 1, 2),
2031
('Walking content.', 2, 2)])
2032
# we must see child and otherchild
2033
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2035
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2036
# we dont care if we got more than that.
2039
lines = iter_with_keys(files.keys(),
2040
[('Walking content.', 0, 5),
2041
('Walking content.', 1, 5),
2042
('Walking content.', 2, 5),
2043
('Walking content.', 3, 5),
2044
('Walking content.', 4, 5),
2045
('Walking content.', 5, 5)])
2046
# all lines must be seen at least once
2047
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2049
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2051
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2052
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2054
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2056
def test_make_mpdiffs(self):
2057
from bzrlib import multiparent
2058
files = self.get_versionedfiles('source')
2059
# add texts that should trip the knit maximum delta chain threshold
2060
# as well as doing parallel chains of data in knits.
2061
# this is done by two chains of 25 insertions
2062
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2063
files.add_lines(self.get_simple_key('noeol'),
2064
self.get_parents([self.get_simple_key('base')]), ['line'])
2065
# detailed eol tests:
2066
# shared last line with parent no-eol
2067
files.add_lines(self.get_simple_key('noeolsecond'),
2068
self.get_parents([self.get_simple_key('noeol')]),
2070
# differing last line with parent, both no-eol
2071
files.add_lines(self.get_simple_key('noeolnotshared'),
2072
self.get_parents([self.get_simple_key('noeolsecond')]),
2073
['line\n', 'phone'])
2074
# add eol following a noneol parent, change content
2075
files.add_lines(self.get_simple_key('eol'),
2076
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2077
# add eol following a noneol parent, no change content
2078
files.add_lines(self.get_simple_key('eolline'),
2079
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2080
# noeol with no parents:
2081
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2082
# noeol preceeding its leftmost parent in the output:
2083
# this is done by making it a merge of two parents with no common
2084
# anestry: noeolbase and noeol with the
2085
# later-inserted parent the leftmost.
2086
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2087
self.get_parents([self.get_simple_key('noeolbase'),
2088
self.get_simple_key('noeol')]),
2090
# two identical eol texts
2091
files.add_lines(self.get_simple_key('noeoldup'),
2092
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2093
next_parent = self.get_simple_key('base')
2094
text_name = 'chain1-'
2096
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2097
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2098
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2099
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2100
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2101
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2102
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2103
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2104
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2105
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2106
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2107
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2108
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2109
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2110
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2111
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2112
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2113
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2114
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2115
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2116
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2117
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2118
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2119
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2120
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2121
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2123
for depth in range(26):
2124
new_version = self.get_simple_key(text_name + '%s' % depth)
2125
text = text + ['line\n']
2126
files.add_lines(new_version, self.get_parents([next_parent]), text)
2127
next_parent = new_version
2128
next_parent = self.get_simple_key('base')
2129
text_name = 'chain2-'
2131
for depth in range(26):
2132
new_version = self.get_simple_key(text_name + '%s' % depth)
2133
text = text + ['line\n']
2134
files.add_lines(new_version, self.get_parents([next_parent]), text)
2135
next_parent = new_version
2136
target = self.get_versionedfiles('target')
2137
for key in multiparent.topo_iter_keys(files, files.keys()):
2138
mpdiff = files.make_mpdiffs([key])[0]
2139
parents = files.get_parent_map([key])[key] or []
2141
[(key, parents, files.get_sha1s([key])[key], mpdiff)])
2142
self.assertEqualDiff(
2143
files.get_record_stream([key], 'unordered',
2144
True).next().get_bytes_as('fulltext'),
2145
target.get_record_stream([key], 'unordered',
2146
True).next().get_bytes_as('fulltext')
2149
def test_keys(self):
2150
# While use is discouraged, versions() is still needed by aspects of
2152
files = self.get_versionedfiles()
2153
self.assertEqual(set(), set(files.keys()))
2154
if self.key_length == 1:
2157
key = ('foo', 'bar',)
2158
files.add_lines(key, (), [])
2159
self.assertEqual(set([key]), set(files.keys()))