1
# Copyright (C) 2005, 2009 Canonical Ltd
4
# Johan Rydberg <jrydberg@gnu.org>
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
11
# This program is distributed in the hope that it will be useful,
12
# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
# GNU General Public License for more details.
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
# considered typical and check that it can be detected/corrected.
24
from itertools import chain, izip
25
from StringIO import StringIO
36
from bzrlib.errors import (
38
RevisionAlreadyPresent,
41
from bzrlib.knit import (
48
from bzrlib.tests import (
50
TestCaseWithMemoryTransport,
54
split_suite_by_condition,
57
from bzrlib.tests.http_utils import TestCaseWithWebserver
58
from bzrlib.trace import mutter
59
from bzrlib.transport import get_transport
60
from bzrlib.transport.memory import MemoryTransport
61
from bzrlib.tsort import topo_sort
62
from bzrlib.tuned_gzip import GzipFile
63
import bzrlib.versionedfile as versionedfile
64
from bzrlib.versionedfile import (
66
HashEscapedPrefixMapper,
68
VirtualVersionedFiles,
69
make_versioned_files_factory,
71
from bzrlib.weave import WeaveFile
72
from bzrlib.weavefile import read_weave, write_weave
75
def load_tests(standard_tests, module, loader):
76
"""Parameterize VersionedFiles tests for different implementations."""
77
to_adapt, result = split_suite_by_condition(
78
standard_tests, condition_isinstance(TestVersionedFiles))
79
# We want to be sure of behaviour for:
80
# weaves prefix layout (weave texts)
81
# individually named weaves (weave inventories)
82
# annotated knits - prefix|hash|hash-escape layout, we test the third only
83
# as it is the most complex mapper.
84
# individually named knits
85
# individual no-graph knits in packs (signatures)
86
# individual graph knits in packs (inventories)
87
# individual graph nocompression knits in packs (revisions)
88
# plain text knits in packs (texts)
92
'factory':make_versioned_files_factory(WeaveFile,
93
ConstantMapper('inventory')),
96
'support_partial_insertion': False,
100
'factory':make_file_factory(False, ConstantMapper('revisions')),
103
'support_partial_insertion': False,
105
('named-nograph-nodelta-knit-pack', {
106
'cleanup':cleanup_pack_knit,
107
'factory':make_pack_factory(False, False, 1),
110
'support_partial_insertion': False,
112
('named-graph-knit-pack', {
113
'cleanup':cleanup_pack_knit,
114
'factory':make_pack_factory(True, True, 1),
117
'support_partial_insertion': True,
119
('named-graph-nodelta-knit-pack', {
120
'cleanup':cleanup_pack_knit,
121
'factory':make_pack_factory(True, False, 1),
124
'support_partial_insertion': False,
126
('groupcompress-nograph', {
127
'cleanup':groupcompress.cleanup_pack_group,
128
'factory':groupcompress.make_pack_factory(False, False, 1),
131
'support_partial_insertion':False,
134
len_two_scenarios = [
137
'factory':make_versioned_files_factory(WeaveFile,
141
'support_partial_insertion': False,
143
('annotated-knit-escape', {
145
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
148
'support_partial_insertion': False,
150
('plain-knit-pack', {
151
'cleanup':cleanup_pack_knit,
152
'factory':make_pack_factory(True, True, 2),
155
'support_partial_insertion': True,
158
'cleanup':groupcompress.cleanup_pack_group,
159
'factory':groupcompress.make_pack_factory(True, False, 1),
162
'support_partial_insertion':False,
165
scenarios = len_one_scenarios + len_two_scenarios
166
return multiply_tests(to_adapt, scenarios, result)
169
def get_diamond_vf(f, trailing_eol=True, left_only=False):
170
"""Get a diamond graph to exercise deltas and merges.
172
:param trailing_eol: If True end the last line with \n.
176
'base': (('origin',),),
177
'left': (('base',),),
178
'right': (('base',),),
179
'merged': (('left',), ('right',)),
181
# insert a diamond graph to exercise deltas and merges.
186
f.add_lines('origin', [], ['origin' + last_char])
187
f.add_lines('base', ['origin'], ['base' + last_char])
188
f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
190
f.add_lines('right', ['base'],
191
['base\n', 'right' + last_char])
192
f.add_lines('merged', ['left', 'right'],
193
['base\n', 'left\n', 'right\n', 'merged' + last_char])
197
def get_diamond_files(files, key_length, trailing_eol=True, left_only=False,
198
nograph=False, nokeys=False):
199
"""Get a diamond graph to exercise deltas and merges.
201
This creates a 5-node graph in files. If files supports 2-length keys two
202
graphs are made to exercise the support for multiple ids.
204
:param trailing_eol: If True end the last line with \n.
205
:param key_length: The length of keys in files. Currently supports length 1
207
:param left_only: If True do not add the right and merged nodes.
208
:param nograph: If True, do not provide parents to the add_lines calls;
209
this is useful for tests that need inserted data but have graphless
211
:param nokeys: If True, pass None is as the key for all insertions.
212
Currently implies nograph.
213
:return: The results of the add_lines calls.
220
prefixes = [('FileA',), ('FileB',)]
221
# insert a diamond graph to exercise deltas and merges.
227
def get_parents(suffix_list):
231
result = [prefix + suffix for suffix in suffix_list]
238
# we loop over each key because that spreads the inserts across prefixes,
239
# which is how commit operates.
240
for prefix in prefixes:
241
result.append(files.add_lines(prefix + get_key('origin'), (),
242
['origin' + last_char]))
243
for prefix in prefixes:
244
result.append(files.add_lines(prefix + get_key('base'),
245
get_parents([('origin',)]), ['base' + last_char]))
246
for prefix in prefixes:
247
result.append(files.add_lines(prefix + get_key('left'),
248
get_parents([('base',)]),
249
['base\n', 'left' + last_char]))
251
for prefix in prefixes:
252
result.append(files.add_lines(prefix + get_key('right'),
253
get_parents([('base',)]),
254
['base\n', 'right' + last_char]))
255
for prefix in prefixes:
256
result.append(files.add_lines(prefix + get_key('merged'),
257
get_parents([('left',), ('right',)]),
258
['base\n', 'left\n', 'right\n', 'merged' + last_char]))
262
class VersionedFileTestMixIn(object):
263
"""A mixin test class for testing VersionedFiles.
265
This is not an adaptor-style test at this point because
266
theres no dynamic substitution of versioned file implementations,
267
they are strictly controlled by their owning repositories.
270
def get_transaction(self):
271
if not hasattr(self, '_transaction'):
272
self._transaction = None
273
return self._transaction
277
f.add_lines('r0', [], ['a\n', 'b\n'])
278
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
280
versions = f.versions()
281
self.assertTrue('r0' in versions)
282
self.assertTrue('r1' in versions)
283
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
284
self.assertEquals(f.get_text('r0'), 'a\nb\n')
285
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
286
self.assertEqual(2, len(f))
287
self.assertEqual(2, f.num_versions())
289
self.assertRaises(RevisionNotPresent,
290
f.add_lines, 'r2', ['foo'], [])
291
self.assertRaises(RevisionAlreadyPresent,
292
f.add_lines, 'r1', [], [])
294
# this checks that reopen with create=True does not break anything.
295
f = self.reopen_file(create=True)
298
def test_adds_with_parent_texts(self):
301
_, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
303
_, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
304
['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
305
except NotImplementedError:
306
# if the format doesn't support ghosts, just add normally.
307
_, _, parent_texts['r1'] = f.add_lines('r1',
308
['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
309
f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
310
self.assertNotEqual(None, parent_texts['r0'])
311
self.assertNotEqual(None, parent_texts['r1'])
313
versions = f.versions()
314
self.assertTrue('r0' in versions)
315
self.assertTrue('r1' in versions)
316
self.assertTrue('r2' in versions)
317
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
318
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
319
self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
320
self.assertEqual(3, f.num_versions())
321
origins = f.annotate('r1')
322
self.assertEquals(origins[0][0], 'r0')
323
self.assertEquals(origins[1][0], 'r1')
324
origins = f.annotate('r2')
325
self.assertEquals(origins[0][0], 'r1')
326
self.assertEquals(origins[1][0], 'r2')
329
f = self.reopen_file()
332
def test_add_unicode_content(self):
333
# unicode content is not permitted in versioned files.
334
# versioned files version sequences of bytes only.
336
self.assertRaises(errors.BzrBadParameterUnicode,
337
vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
339
(errors.BzrBadParameterUnicode, NotImplementedError),
340
vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
342
def test_add_follows_left_matching_blocks(self):
343
"""If we change left_matching_blocks, delta changes
345
Note: There are multiple correct deltas in this case, because
346
we start with 1 "a" and we get 3.
349
if isinstance(vf, WeaveFile):
350
raise TestSkipped("WeaveFile ignores left_matching_blocks")
351
vf.add_lines('1', [], ['a\n'])
352
vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
353
left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
354
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
355
vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
356
left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
357
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
359
def test_inline_newline_throws(self):
360
# \r characters are not permitted in lines being added
362
self.assertRaises(errors.BzrBadParameterContainsNewline,
363
vf.add_lines, 'a', [], ['a\n\n'])
365
(errors.BzrBadParameterContainsNewline, NotImplementedError),
366
vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
367
# but inline CR's are allowed
368
vf.add_lines('a', [], ['a\r\n'])
370
vf.add_lines_with_ghosts('b', [], ['a\r\n'])
371
except NotImplementedError:
374
def test_add_reserved(self):
376
self.assertRaises(errors.ReservedId,
377
vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
379
def test_add_lines_nostoresha(self):
380
"""When nostore_sha is supplied using old content raises."""
382
empty_text = ('a', [])
383
sample_text_nl = ('b', ["foo\n", "bar\n"])
384
sample_text_no_nl = ('c', ["foo\n", "bar"])
386
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
387
sha, _, _ = vf.add_lines(version, [], lines)
389
# we now have a copy of all the lines in the vf.
390
for sha, (version, lines) in zip(
391
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
392
self.assertRaises(errors.ExistingContent,
393
vf.add_lines, version + "2", [], lines,
395
# and no new version should have been added.
396
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
399
def test_add_lines_with_ghosts_nostoresha(self):
400
"""When nostore_sha is supplied using old content raises."""
402
empty_text = ('a', [])
403
sample_text_nl = ('b', ["foo\n", "bar\n"])
404
sample_text_no_nl = ('c', ["foo\n", "bar"])
406
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
407
sha, _, _ = vf.add_lines(version, [], lines)
409
# we now have a copy of all the lines in the vf.
410
# is the test applicable to this vf implementation?
412
vf.add_lines_with_ghosts('d', [], [])
413
except NotImplementedError:
414
raise TestSkipped("add_lines_with_ghosts is optional")
415
for sha, (version, lines) in zip(
416
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
417
self.assertRaises(errors.ExistingContent,
418
vf.add_lines_with_ghosts, version + "2", [], lines,
420
# and no new version should have been added.
421
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
424
def test_add_lines_return_value(self):
425
# add_lines should return the sha1 and the text size.
427
empty_text = ('a', [])
428
sample_text_nl = ('b', ["foo\n", "bar\n"])
429
sample_text_no_nl = ('c', ["foo\n", "bar"])
430
# check results for the three cases:
431
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
432
# the first two elements are the same for all versioned files:
433
# - the digest and the size of the text. For some versioned files
434
# additional data is returned in additional tuple elements.
435
result = vf.add_lines(version, [], lines)
436
self.assertEqual(3, len(result))
437
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
439
# parents should not affect the result:
440
lines = sample_text_nl[1]
441
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
442
vf.add_lines('d', ['b', 'c'], lines)[0:2])
444
def test_get_reserved(self):
446
self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
447
self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
448
self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
450
def test_add_unchanged_last_line_noeol_snapshot(self):
451
"""Add a text with an unchanged last line with no eol should work."""
452
# Test adding this in a number of chain lengths; because the interface
453
# for VersionedFile does not allow forcing a specific chain length, we
454
# just use a small base to get the first snapshot, then a much longer
455
# first line for the next add (which will make the third add snapshot)
456
# and so on. 20 has been chosen as an aribtrary figure - knits use 200
457
# as a capped delta length, but ideally we would have some way of
458
# tuning the test to the store (e.g. keep going until a snapshot
460
for length in range(20):
462
vf = self.get_file('case-%d' % length)
465
for step in range(length):
466
version = prefix % step
467
lines = (['prelude \n'] * step) + ['line']
468
vf.add_lines(version, parents, lines)
469
version_lines[version] = lines
471
vf.add_lines('no-eol', parents, ['line'])
472
vf.get_texts(version_lines.keys())
473
self.assertEqualDiff('line', vf.get_text('no-eol'))
475
def test_get_texts_eol_variation(self):
476
# similar to the failure in <http://bugs.launchpad.net/234748>
478
sample_text_nl = ["line\n"]
479
sample_text_no_nl = ["line"]
486
lines = sample_text_nl
488
lines = sample_text_no_nl
489
# left_matching blocks is an internal api; it operates on the
490
# *internal* representation for a knit, which is with *all* lines
491
# being normalised to end with \n - even the final line in a no_nl
492
# file. Using it here ensures that a broken internal implementation
493
# (which is what this test tests) will generate a correct line
494
# delta (which is to say, an empty delta).
495
vf.add_lines(version, parents, lines,
496
left_matching_blocks=[(0, 0, 1)])
498
versions.append(version)
499
version_lines[version] = lines
501
vf.get_texts(versions)
502
vf.get_texts(reversed(versions))
504
def test_add_lines_with_matching_blocks_noeol_last_line(self):
505
"""Add a text with an unchanged last line with no eol should work."""
506
from bzrlib import multiparent
507
# Hand verified sha1 of the text we're adding.
508
sha1 = '6a1d115ec7b60afb664dc14890b5af5ce3c827a4'
509
# Create a mpdiff which adds a new line before the trailing line, and
510
# reuse the last line unaltered (which can cause annotation reuse).
511
# Test adding this in two situations:
512
# On top of a new insertion
513
vf = self.get_file('fulltext')
514
vf.add_lines('noeol', [], ['line'])
515
vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
516
left_matching_blocks=[(0, 1, 1)])
517
self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
519
vf = self.get_file('delta')
520
vf.add_lines('base', [], ['line'])
521
vf.add_lines('noeol', ['base'], ['prelude\n', 'line'])
522
vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
523
left_matching_blocks=[(1, 1, 1)])
524
self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
526
def test_make_mpdiffs(self):
527
from bzrlib import multiparent
528
vf = self.get_file('foo')
529
sha1s = self._setup_for_deltas(vf)
530
new_vf = self.get_file('bar')
531
for version in multiparent.topo_iter(vf):
532
mpdiff = vf.make_mpdiffs([version])[0]
533
new_vf.add_mpdiffs([(version, vf.get_parent_map([version])[version],
534
vf.get_sha1s([version])[version], mpdiff)])
535
self.assertEqualDiff(vf.get_text(version),
536
new_vf.get_text(version))
538
def test_make_mpdiffs_with_ghosts(self):
539
vf = self.get_file('foo')
541
vf.add_lines_with_ghosts('text', ['ghost'], ['line\n'])
542
except NotImplementedError:
543
# old Weave formats do not allow ghosts
545
self.assertRaises(errors.RevisionNotPresent, vf.make_mpdiffs, ['ghost'])
547
def _setup_for_deltas(self, f):
548
self.assertFalse(f.has_version('base'))
549
# add texts that should trip the knit maximum delta chain threshold
550
# as well as doing parallel chains of data in knits.
551
# this is done by two chains of 25 insertions
552
f.add_lines('base', [], ['line\n'])
553
f.add_lines('noeol', ['base'], ['line'])
554
# detailed eol tests:
555
# shared last line with parent no-eol
556
f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
557
# differing last line with parent, both no-eol
558
f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
559
# add eol following a noneol parent, change content
560
f.add_lines('eol', ['noeol'], ['phone\n'])
561
# add eol following a noneol parent, no change content
562
f.add_lines('eolline', ['noeol'], ['line\n'])
563
# noeol with no parents:
564
f.add_lines('noeolbase', [], ['line'])
565
# noeol preceeding its leftmost parent in the output:
566
# this is done by making it a merge of two parents with no common
567
# anestry: noeolbase and noeol with the
568
# later-inserted parent the leftmost.
569
f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
570
# two identical eol texts
571
f.add_lines('noeoldup', ['noeol'], ['line'])
573
text_name = 'chain1-'
575
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
576
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
577
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
578
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
579
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
580
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
581
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
582
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
583
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
584
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
585
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
586
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
587
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
588
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
589
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
590
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
591
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
592
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
593
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
594
19:'1ebed371807ba5935958ad0884595126e8c4e823',
595
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
596
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
597
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
598
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
599
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
600
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
602
for depth in range(26):
603
new_version = text_name + '%s' % depth
604
text = text + ['line\n']
605
f.add_lines(new_version, [next_parent], text)
606
next_parent = new_version
608
text_name = 'chain2-'
610
for depth in range(26):
611
new_version = text_name + '%s' % depth
612
text = text + ['line\n']
613
f.add_lines(new_version, [next_parent], text)
614
next_parent = new_version
617
def test_ancestry(self):
619
self.assertEqual([], f.get_ancestry([]))
620
f.add_lines('r0', [], ['a\n', 'b\n'])
621
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
622
f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
623
f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
624
f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
625
self.assertEqual([], f.get_ancestry([]))
626
versions = f.get_ancestry(['rM'])
627
# there are some possibilities:
631
# so we check indexes
632
r0 = versions.index('r0')
633
r1 = versions.index('r1')
634
r2 = versions.index('r2')
635
self.assertFalse('r3' in versions)
636
rM = versions.index('rM')
637
self.assertTrue(r0 < r1)
638
self.assertTrue(r0 < r2)
639
self.assertTrue(r1 < rM)
640
self.assertTrue(r2 < rM)
642
self.assertRaises(RevisionNotPresent,
643
f.get_ancestry, ['rM', 'rX'])
645
self.assertEqual(set(f.get_ancestry('rM')),
646
set(f.get_ancestry('rM', topo_sorted=False)))
648
def test_mutate_after_finish(self):
649
self._transaction = 'before'
651
self._transaction = 'after'
652
self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
653
self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
655
def test_copy_to(self):
657
f.add_lines('0', [], ['a\n'])
658
t = MemoryTransport()
660
for suffix in self.get_factory().get_suffixes():
661
self.assertTrue(t.has('foo' + suffix))
663
def test_get_suffixes(self):
665
# and should be a list
666
self.assertTrue(isinstance(self.get_factory().get_suffixes(), list))
668
def test_get_parent_map(self):
670
f.add_lines('r0', [], ['a\n', 'b\n'])
672
{'r0':()}, f.get_parent_map(['r0']))
673
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
675
{'r1':('r0',)}, f.get_parent_map(['r1']))
679
f.get_parent_map(['r0', 'r1']))
680
f.add_lines('r2', [], ['a\n', 'b\n'])
681
f.add_lines('r3', [], ['a\n', 'b\n'])
682
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
684
{'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
685
self.assertEqual({}, f.get_parent_map('y'))
689
f.get_parent_map(['r0', 'y', 'r1']))
691
def test_annotate(self):
693
f.add_lines('r0', [], ['a\n', 'b\n'])
694
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
695
origins = f.annotate('r1')
696
self.assertEquals(origins[0][0], 'r1')
697
self.assertEquals(origins[1][0], 'r0')
699
self.assertRaises(RevisionNotPresent,
702
def test_detection(self):
703
# Test weaves detect corruption.
705
# Weaves contain a checksum of their texts.
706
# When a text is extracted, this checksum should be
709
w = self.get_file_corrupted_text()
711
self.assertEqual('hello\n', w.get_text('v1'))
712
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
713
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
714
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
716
w = self.get_file_corrupted_checksum()
718
self.assertEqual('hello\n', w.get_text('v1'))
719
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
720
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
721
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
723
def get_file_corrupted_text(self):
724
"""Return a versioned file with corrupt text but valid metadata."""
725
raise NotImplementedError(self.get_file_corrupted_text)
727
def reopen_file(self, name='foo'):
728
"""Open the versioned file from disk again."""
729
raise NotImplementedError(self.reopen_file)
731
def test_iter_lines_added_or_present_in_versions(self):
732
# test that we get at least an equalset of the lines added by
733
# versions in the weave
734
# the ordering here is to make a tree so that dumb searches have
735
# more changes to muck up.
737
class InstrumentedProgress(progress.DummyProgress):
741
progress.DummyProgress.__init__(self)
744
def update(self, msg=None, current=None, total=None):
745
self.updates.append((msg, current, total))
748
# add a base to get included
749
vf.add_lines('base', [], ['base\n'])
750
# add a ancestor to be included on one side
751
vf.add_lines('lancestor', [], ['lancestor\n'])
752
# add a ancestor to be included on the other side
753
vf.add_lines('rancestor', ['base'], ['rancestor\n'])
754
# add a child of rancestor with no eofile-nl
755
vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
756
# add a child of lancestor and base to join the two roots
757
vf.add_lines('otherchild',
758
['lancestor', 'base'],
759
['base\n', 'lancestor\n', 'otherchild\n'])
760
def iter_with_versions(versions, expected):
761
# now we need to see what lines are returned, and how often.
763
progress = InstrumentedProgress()
764
# iterate over the lines
765
for line in vf.iter_lines_added_or_present_in_versions(versions,
767
lines.setdefault(line, 0)
769
if []!= progress.updates:
770
self.assertEqual(expected, progress.updates)
772
lines = iter_with_versions(['child', 'otherchild'],
773
[('Walking content', 0, 2),
774
('Walking content', 1, 2),
775
('Walking content', 2, 2)])
776
# we must see child and otherchild
777
self.assertTrue(lines[('child\n', 'child')] > 0)
778
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
779
# we dont care if we got more than that.
782
lines = iter_with_versions(None, [('Walking content', 0, 5),
783
('Walking content', 1, 5),
784
('Walking content', 2, 5),
785
('Walking content', 3, 5),
786
('Walking content', 4, 5),
787
('Walking content', 5, 5)])
788
# all lines must be seen at least once
789
self.assertTrue(lines[('base\n', 'base')] > 0)
790
self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
791
self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
792
self.assertTrue(lines[('child\n', 'child')] > 0)
793
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
795
def test_add_lines_with_ghosts(self):
796
# some versioned file formats allow lines to be added with parent
797
# information that is > than that in the format. Formats that do
798
# not support this need to raise NotImplementedError on the
799
# add_lines_with_ghosts api.
801
# add a revision with ghost parents
802
# The preferred form is utf8, but we should translate when needed
803
parent_id_unicode = u'b\xbfse'
804
parent_id_utf8 = parent_id_unicode.encode('utf8')
806
vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
807
except NotImplementedError:
808
# check the other ghost apis are also not implemented
809
self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
810
self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
812
vf = self.reopen_file()
813
# test key graph related apis: getncestry, _graph, get_parents
815
# - these are ghost unaware and must not be reflect ghosts
816
self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
817
self.assertFalse(vf.has_version(parent_id_utf8))
818
# we have _with_ghost apis to give us ghost information.
819
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
820
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
821
# if we add something that is a ghost of another, it should correct the
822
# results of the prior apis
823
vf.add_lines(parent_id_utf8, [], [])
824
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
825
self.assertEqual({'notbxbfse':(parent_id_utf8,)},
826
vf.get_parent_map(['notbxbfse']))
827
self.assertTrue(vf.has_version(parent_id_utf8))
828
# we have _with_ghost apis to give us ghost information.
829
self.assertEqual([parent_id_utf8, 'notbxbfse'],
830
vf.get_ancestry_with_ghosts(['notbxbfse']))
831
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
833
def test_add_lines_with_ghosts_after_normal_revs(self):
834
# some versioned file formats allow lines to be added with parent
835
# information that is > than that in the format. Formats that do
836
# not support this need to raise NotImplementedError on the
837
# add_lines_with_ghosts api.
839
# probe for ghost support
841
vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
842
except NotImplementedError:
844
vf.add_lines_with_ghosts('references_ghost',
846
['line\n', 'line_b\n', 'line_c\n'])
847
origins = vf.annotate('references_ghost')
848
self.assertEquals(('base', 'line\n'), origins[0])
849
self.assertEquals(('base', 'line_b\n'), origins[1])
850
self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
852
def test_readonly_mode(self):
853
transport = get_transport(self.get_url('.'))
854
factory = self.get_factory()
855
vf = factory('id', transport, 0777, create=True, access_mode='w')
856
vf = factory('id', transport, access_mode='r')
857
self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
858
self.assertRaises(errors.ReadOnlyError,
859
vf.add_lines_with_ghosts,
864
def test_get_sha1s(self):
865
# check the sha1 data is available
868
vf.add_lines('a', [], ['a\n'])
869
# the same file, different metadata
870
vf.add_lines('b', ['a'], ['a\n'])
871
# a file differing only in last newline.
872
vf.add_lines('c', [], ['a'])
874
'a': '3f786850e387550fdab836ed7e6dc881de23001b',
875
'c': '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
876
'b': '3f786850e387550fdab836ed7e6dc881de23001b',
878
vf.get_sha1s(['a', 'c', 'b']))
881
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
883
def get_file(self, name='foo'):
884
return WeaveFile(name, get_transport(self.get_url('.')), create=True,
885
get_scope=self.get_transaction)
887
def get_file_corrupted_text(self):
888
w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
889
get_scope=self.get_transaction)
890
w.add_lines('v1', [], ['hello\n'])
891
w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
893
# We are going to invasively corrupt the text
894
# Make sure the internals of weave are the same
895
self.assertEqual([('{', 0)
903
self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
904
, '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
909
w._weave[4] = 'There\n'
912
def get_file_corrupted_checksum(self):
913
w = self.get_file_corrupted_text()
915
w._weave[4] = 'there\n'
916
self.assertEqual('hello\nthere\n', w.get_text('v2'))
918
#Invalid checksum, first digit changed
919
w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
922
def reopen_file(self, name='foo', create=False):
923
return WeaveFile(name, get_transport(self.get_url('.')), create=create,
924
get_scope=self.get_transaction)
926
def test_no_implicit_create(self):
927
self.assertRaises(errors.NoSuchFile,
930
get_transport(self.get_url('.')),
931
get_scope=self.get_transaction)
933
def get_factory(self):
937
class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
940
TestCaseWithMemoryTransport.setUp(self)
941
mapper = PrefixMapper()
942
factory = make_file_factory(True, mapper)
943
self.vf1 = factory(self.get_transport('root-1'))
944
self.vf2 = factory(self.get_transport('root-2'))
945
self.plan_merge_vf = versionedfile._PlanMergeVersionedFile('root')
946
self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
948
def test_add_lines(self):
949
self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
950
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
951
('root', 'a'), [], [])
952
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
953
('root', 'a:'), None, [])
954
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
955
('root', 'a:'), [], None)
957
def setup_abcde(self):
958
self.vf1.add_lines(('root', 'A'), [], ['a'])
959
self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
960
self.vf2.add_lines(('root', 'C'), [], ['c'])
961
self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
962
self.plan_merge_vf.add_lines(('root', 'E:'),
963
[('root', 'B'), ('root', 'D')], ['e'])
965
def test_get_parents(self):
967
self.assertEqual({('root', 'B'):(('root', 'A'),)},
968
self.plan_merge_vf.get_parent_map([('root', 'B')]))
969
self.assertEqual({('root', 'D'):(('root', 'C'),)},
970
self.plan_merge_vf.get_parent_map([('root', 'D')]))
971
self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
972
self.plan_merge_vf.get_parent_map([('root', 'E:')]))
974
self.plan_merge_vf.get_parent_map([('root', 'F')]))
976
('root', 'B'):(('root', 'A'),),
977
('root', 'D'):(('root', 'C'),),
978
('root', 'E:'):(('root', 'B'),('root', 'D')),
980
self.plan_merge_vf.get_parent_map(
981
[('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
983
def test_get_record_stream(self):
985
def get_record(suffix):
986
return self.plan_merge_vf.get_record_stream(
987
[('root', suffix)], 'unordered', True).next()
988
self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
989
self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
990
self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
991
self.assertEqual('absent', get_record('F').storage_kind)
994
class TestReadonlyHttpMixin(object):
996
def get_transaction(self):
999
def test_readonly_http_works(self):
1000
# we should be able to read from http with a versioned file.
1001
vf = self.get_file()
1002
# try an empty file access
1003
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
1004
self.assertEqual([], readonly_vf.versions())
1006
vf.add_lines('1', [], ['a\n'])
1007
vf.add_lines('2', ['1'], ['b\n', 'a\n'])
1008
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
1009
self.assertEqual(['1', '2'], vf.versions())
1010
for version in readonly_vf.versions():
1011
readonly_vf.get_lines(version)
1014
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
1017
return WeaveFile('foo', get_transport(self.get_url('.')), create=True,
1018
get_scope=self.get_transaction)
1020
def get_factory(self):
1024
class MergeCasesMixin(object):
1026
def doMerge(self, base, a, b, mp):
1027
from cStringIO import StringIO
1028
from textwrap import dedent
1034
w.add_lines('text0', [], map(addcrlf, base))
1035
w.add_lines('text1', ['text0'], map(addcrlf, a))
1036
w.add_lines('text2', ['text0'], map(addcrlf, b))
1038
self.log_contents(w)
1040
self.log('merge plan:')
1041
p = list(w.plan_merge('text1', 'text2'))
1042
for state, line in p:
1044
self.log('%12s | %s' % (state, line[:-1]))
1048
mt.writelines(w.weave_merge(p))
1050
self.log(mt.getvalue())
1052
mp = map(addcrlf, mp)
1053
self.assertEqual(mt.readlines(), mp)
1056
def testOneInsert(self):
1062
def testSeparateInserts(self):
1063
self.doMerge(['aaa', 'bbb', 'ccc'],
1064
['aaa', 'xxx', 'bbb', 'ccc'],
1065
['aaa', 'bbb', 'yyy', 'ccc'],
1066
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1068
def testSameInsert(self):
1069
self.doMerge(['aaa', 'bbb', 'ccc'],
1070
['aaa', 'xxx', 'bbb', 'ccc'],
1071
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
1072
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1073
overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
1074
def testOverlappedInsert(self):
1075
self.doMerge(['aaa', 'bbb'],
1076
['aaa', 'xxx', 'yyy', 'bbb'],
1077
['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
1079
# really it ought to reduce this to
1080
# ['aaa', 'xxx', 'yyy', 'bbb']
1083
def testClashReplace(self):
1084
self.doMerge(['aaa'],
1087
['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
1090
def testNonClashInsert1(self):
1091
self.doMerge(['aaa'],
1094
['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
1097
def testNonClashInsert2(self):
1098
self.doMerge(['aaa'],
1104
def testDeleteAndModify(self):
1105
"""Clashing delete and modification.
1107
If one side modifies a region and the other deletes it then
1108
there should be a conflict with one side blank.
1111
#######################################
1112
# skippd, not working yet
1115
self.doMerge(['aaa', 'bbb', 'ccc'],
1116
['aaa', 'ddd', 'ccc'],
1118
['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1120
def _test_merge_from_strings(self, base, a, b, expected):
1122
w.add_lines('text0', [], base.splitlines(True))
1123
w.add_lines('text1', ['text0'], a.splitlines(True))
1124
w.add_lines('text2', ['text0'], b.splitlines(True))
1125
self.log('merge plan:')
1126
p = list(w.plan_merge('text1', 'text2'))
1127
for state, line in p:
1129
self.log('%12s | %s' % (state, line[:-1]))
1130
self.log('merge result:')
1131
result_text = ''.join(w.weave_merge(p))
1132
self.log(result_text)
1133
self.assertEqualDiff(result_text, expected)
1135
def test_weave_merge_conflicts(self):
1136
# does weave merge properly handle plans that end with unchanged?
1137
result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
1138
self.assertEqual(result, 'hello\n')
1140
def test_deletion_extended(self):
1141
"""One side deletes, the other deletes more.
1162
self._test_merge_from_strings(base, a, b, result)
1164
def test_deletion_overlap(self):
1165
"""Delete overlapping regions with no other conflict.
1167
Arguably it'd be better to treat these as agreement, rather than
1168
conflict, but for now conflict is safer.
1196
self._test_merge_from_strings(base, a, b, result)
1198
def test_agreement_deletion(self):
1199
"""Agree to delete some lines, without conflicts."""
1221
self._test_merge_from_strings(base, a, b, result)
1223
def test_sync_on_deletion(self):
1224
"""Specific case of merge where we can synchronize incorrectly.
1226
A previous version of the weave merge concluded that the two versions
1227
agreed on deleting line 2, and this could be a synchronization point.
1228
Line 1 was then considered in isolation, and thought to be deleted on
1231
It's better to consider the whole thing as a disagreement region.
1242
a's replacement line 2
1255
a's replacement line 2
1262
self._test_merge_from_strings(base, a, b, result)
1265
class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1267
def get_file(self, name='foo'):
1268
return WeaveFile(name, get_transport(self.get_url('.')), create=True)
1270
def log_contents(self, w):
1271
self.log('weave is:')
1273
write_weave(w, tmpf)
1274
self.log(tmpf.getvalue())
1276
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1277
'xxx', '>>>>>>> ', 'bbb']
1280
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1282
def test_select_adaptor(self):
1283
"""Test expected adapters exist."""
1284
# One scenario for each lookup combination we expect to use.
1285
# Each is source_kind, requested_kind, adapter class
1287
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1288
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1289
('knit-annotated-delta-gz', 'knit-delta-gz',
1290
_mod_knit.DeltaAnnotatedToUnannotated),
1291
('knit-annotated-delta-gz', 'fulltext',
1292
_mod_knit.DeltaAnnotatedToFullText),
1293
('knit-annotated-ft-gz', 'knit-ft-gz',
1294
_mod_knit.FTAnnotatedToUnannotated),
1295
('knit-annotated-ft-gz', 'fulltext',
1296
_mod_knit.FTAnnotatedToFullText),
1298
for source, requested, klass in scenarios:
1299
adapter_factory = versionedfile.adapter_registry.get(
1300
(source, requested))
1301
adapter = adapter_factory(None)
1302
self.assertIsInstance(adapter, klass)
1304
def get_knit(self, annotated=True):
1305
mapper = ConstantMapper('knit')
1306
transport = self.get_transport()
1307
return make_file_factory(annotated, mapper)(transport)
1309
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1310
"""Grab the interested adapted texts for tests."""
1311
# origin is a fulltext
1312
entries = f.get_record_stream([('origin',)], 'unordered', False)
1313
base = entries.next()
1314
ft_data = ft_adapter.get_bytes(base)
1315
# merged is both a delta and multiple parents.
1316
entries = f.get_record_stream([('merged',)], 'unordered', False)
1317
merged = entries.next()
1318
delta_data = delta_adapter.get_bytes(merged)
1319
return ft_data, delta_data
1321
def test_deannotation_noeol(self):
1322
"""Test converting annotated knits to unannotated knits."""
1323
# we need a full text, and a delta
1325
get_diamond_files(f, 1, trailing_eol=False)
1326
ft_data, delta_data = self.helpGetBytes(f,
1327
_mod_knit.FTAnnotatedToUnannotated(None),
1328
_mod_knit.DeltaAnnotatedToUnannotated(None))
1330
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1333
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1335
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1336
'1,2,3\nleft\nright\nmerged\nend merged\n',
1337
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1339
def test_deannotation(self):
1340
"""Test converting annotated knits to unannotated knits."""
1341
# we need a full text, and a delta
1343
get_diamond_files(f, 1)
1344
ft_data, delta_data = self.helpGetBytes(f,
1345
_mod_knit.FTAnnotatedToUnannotated(None),
1346
_mod_knit.DeltaAnnotatedToUnannotated(None))
1348
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1351
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1353
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1354
'2,2,2\nright\nmerged\nend merged\n',
1355
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1357
def test_annotated_to_fulltext_no_eol(self):
1358
"""Test adapting annotated knits to full texts (for -> weaves)."""
1359
# we need a full text, and a delta
1361
get_diamond_files(f, 1, trailing_eol=False)
1362
# Reconstructing a full text requires a backing versioned file, and it
1363
# must have the base lines requested from it.
1364
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1365
ft_data, delta_data = self.helpGetBytes(f,
1366
_mod_knit.FTAnnotatedToFullText(None),
1367
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1368
self.assertEqual('origin', ft_data)
1369
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1370
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1371
True)], logged_vf.calls)
1373
def test_annotated_to_fulltext(self):
1374
"""Test adapting annotated knits to full texts (for -> weaves)."""
1375
# we need a full text, and a delta
1377
get_diamond_files(f, 1)
1378
# Reconstructing a full text requires a backing versioned file, and it
1379
# must have the base lines requested from it.
1380
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1381
ft_data, delta_data = self.helpGetBytes(f,
1382
_mod_knit.FTAnnotatedToFullText(None),
1383
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1384
self.assertEqual('origin\n', ft_data)
1385
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1386
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1387
True)], logged_vf.calls)
1389
def test_unannotated_to_fulltext(self):
1390
"""Test adapting unannotated knits to full texts.
1392
This is used for -> weaves, and for -> annotated knits.
1394
# we need a full text, and a delta
1395
f = self.get_knit(annotated=False)
1396
get_diamond_files(f, 1)
1397
# Reconstructing a full text requires a backing versioned file, and it
1398
# must have the base lines requested from it.
1399
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1400
ft_data, delta_data = self.helpGetBytes(f,
1401
_mod_knit.FTPlainToFullText(None),
1402
_mod_knit.DeltaPlainToFullText(logged_vf))
1403
self.assertEqual('origin\n', ft_data)
1404
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1405
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1406
True)], logged_vf.calls)
1408
def test_unannotated_to_fulltext_no_eol(self):
1409
"""Test adapting unannotated knits to full texts.
1411
This is used for -> weaves, and for -> annotated knits.
1413
# we need a full text, and a delta
1414
f = self.get_knit(annotated=False)
1415
get_diamond_files(f, 1, trailing_eol=False)
1416
# Reconstructing a full text requires a backing versioned file, and it
1417
# must have the base lines requested from it.
1418
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1419
ft_data, delta_data = self.helpGetBytes(f,
1420
_mod_knit.FTPlainToFullText(None),
1421
_mod_knit.DeltaPlainToFullText(logged_vf))
1422
self.assertEqual('origin', ft_data)
1423
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1424
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1425
True)], logged_vf.calls)
1428
class TestKeyMapper(TestCaseWithMemoryTransport):
1429
"""Tests for various key mapping logic."""
1431
def test_identity_mapper(self):
1432
mapper = versionedfile.ConstantMapper("inventory")
1433
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1434
self.assertEqual("inventory", mapper.map(('quux',)))
1436
def test_prefix_mapper(self):
1438
mapper = versionedfile.PrefixMapper()
1439
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1440
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1441
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1442
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1444
def test_hash_prefix_mapper(self):
1445
#format6: hash + plain
1446
mapper = versionedfile.HashPrefixMapper()
1447
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1448
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1449
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1450
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1452
def test_hash_escaped_mapper(self):
1453
#knit1: hash + escaped
1454
mapper = versionedfile.HashEscapedPrefixMapper()
1455
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1456
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1458
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1460
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1461
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1464
class TestVersionedFiles(TestCaseWithMemoryTransport):
1465
"""Tests for the multiple-file variant of VersionedFile."""
1467
def get_versionedfiles(self, relpath='files'):
1468
transport = self.get_transport(relpath)
1470
transport.mkdir('.')
1471
files = self.factory(transport)
1472
if self.cleanup is not None:
1473
self.addCleanup(lambda:self.cleanup(files))
1476
def get_simple_key(self, suffix):
1477
"""Return a key for the object under test."""
1478
if self.key_length == 1:
1481
return ('FileA',) + (suffix,)
1483
def test_add_lines(self):
1484
f = self.get_versionedfiles()
1485
key0 = self.get_simple_key('r0')
1486
key1 = self.get_simple_key('r1')
1487
key2 = self.get_simple_key('r2')
1488
keyf = self.get_simple_key('foo')
1489
f.add_lines(key0, [], ['a\n', 'b\n'])
1491
f.add_lines(key1, [key0], ['b\n', 'c\n'])
1493
f.add_lines(key1, [], ['b\n', 'c\n'])
1495
self.assertTrue(key0 in keys)
1496
self.assertTrue(key1 in keys)
1498
for record in f.get_record_stream([key0, key1], 'unordered', True):
1499
records.append((record.key, record.get_bytes_as('fulltext')))
1501
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1503
def test__add_text(self):
1504
f = self.get_versionedfiles()
1505
key0 = self.get_simple_key('r0')
1506
key1 = self.get_simple_key('r1')
1507
key2 = self.get_simple_key('r2')
1508
keyf = self.get_simple_key('foo')
1509
f._add_text(key0, [], 'a\nb\n')
1511
f._add_text(key1, [key0], 'b\nc\n')
1513
f._add_text(key1, [], 'b\nc\n')
1515
self.assertTrue(key0 in keys)
1516
self.assertTrue(key1 in keys)
1518
for record in f.get_record_stream([key0, key1], 'unordered', True):
1519
records.append((record.key, record.get_bytes_as('fulltext')))
1521
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1523
def test_annotate(self):
1524
files = self.get_versionedfiles()
1525
self.get_diamond_files(files)
1526
if self.key_length == 1:
1530
# introduced full text
1531
origins = files.annotate(prefix + ('origin',))
1533
(prefix + ('origin',), 'origin\n')],
1536
origins = files.annotate(prefix + ('base',))
1538
(prefix + ('base',), 'base\n')],
1541
origins = files.annotate(prefix + ('merged',))
1544
(prefix + ('base',), 'base\n'),
1545
(prefix + ('left',), 'left\n'),
1546
(prefix + ('right',), 'right\n'),
1547
(prefix + ('merged',), 'merged\n')
1551
# Without a graph everything is new.
1553
(prefix + ('merged',), 'base\n'),
1554
(prefix + ('merged',), 'left\n'),
1555
(prefix + ('merged',), 'right\n'),
1556
(prefix + ('merged',), 'merged\n')
1559
self.assertRaises(RevisionNotPresent,
1560
files.annotate, prefix + ('missing-key',))
1562
def test_check_no_parameters(self):
1563
files = self.get_versionedfiles()
1565
def test_check_progressbar_parameter(self):
1566
"""A progress bar can be supplied because check can be a generator."""
1567
pb = ui.ui_factory.nested_progress_bar()
1568
self.addCleanup(pb.finished)
1569
files = self.get_versionedfiles()
1570
files.check(progress_bar=pb)
1572
def test_check_with_keys_becomes_generator(self):
1573
files = self.get_versionedfiles()
1574
self.get_diamond_files(files)
1576
entries = files.check(keys=keys)
1578
# Texts output should be fulltexts.
1579
self.capture_stream(files, entries, seen.add,
1580
files.get_parent_map(keys), require_fulltext=True)
1581
# All texts should be output.
1582
self.assertEqual(set(keys), seen)
1584
def test_construct(self):
1585
"""Each parameterised test can be constructed on a transport."""
1586
files = self.get_versionedfiles()
1588
def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1590
return get_diamond_files(files, self.key_length,
1591
trailing_eol=trailing_eol, nograph=not self.graph,
1592
left_only=left_only, nokeys=nokeys)
1594
def _add_content_nostoresha(self, add_lines):
1595
"""When nostore_sha is supplied using old content raises."""
1596
vf = self.get_versionedfiles()
1597
empty_text = ('a', [])
1598
sample_text_nl = ('b', ["foo\n", "bar\n"])
1599
sample_text_no_nl = ('c', ["foo\n", "bar"])
1601
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1603
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1606
sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1609
# we now have a copy of all the lines in the vf.
1610
for sha, (version, lines) in zip(
1611
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1612
new_key = self.get_simple_key(version + "2")
1613
self.assertRaises(errors.ExistingContent,
1614
vf.add_lines, new_key, [], lines,
1616
self.assertRaises(errors.ExistingContent,
1617
vf._add_text, new_key, [], ''.join(lines),
1619
# and no new version should have been added.
1620
record = vf.get_record_stream([new_key], 'unordered', True).next()
1621
self.assertEqual('absent', record.storage_kind)
1623
def test_add_lines_nostoresha(self):
1624
self._add_content_nostoresha(add_lines=True)
1626
def test__add_text_nostoresha(self):
1627
self._add_content_nostoresha(add_lines=False)
1629
def test_add_lines_return(self):
1630
files = self.get_versionedfiles()
1631
# save code by using the stock data insertion helper.
1632
adds = self.get_diamond_files(files)
1634
# We can only validate the first 2 elements returned from add_lines.
1636
self.assertEqual(3, len(add))
1637
results.append(add[:2])
1638
if self.key_length == 1:
1640
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1641
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1642
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1643
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1644
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1646
elif self.key_length == 2:
1648
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1649
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1650
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1651
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1652
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1653
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1654
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1655
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1656
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1657
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1660
def test_add_lines_no_key_generates_chk_key(self):
1661
files = self.get_versionedfiles()
1662
# save code by using the stock data insertion helper.
1663
adds = self.get_diamond_files(files, nokeys=True)
1665
# We can only validate the first 2 elements returned from add_lines.
1667
self.assertEqual(3, len(add))
1668
results.append(add[:2])
1669
if self.key_length == 1:
1671
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1672
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1673
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1674
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1675
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1677
# Check the added items got CHK keys.
1678
self.assertEqual(set([
1679
('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1680
('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1681
('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1682
('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1683
('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1686
elif self.key_length == 2:
1688
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1689
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1690
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1691
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1692
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1693
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1694
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1695
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1696
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1697
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1699
# Check the added items got CHK keys.
1700
self.assertEqual(set([
1701
('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1702
('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1703
('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1704
('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1705
('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1706
('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1707
('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1708
('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1709
('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1710
('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1714
def test_empty_lines(self):
1715
"""Empty files can be stored."""
1716
f = self.get_versionedfiles()
1717
key_a = self.get_simple_key('a')
1718
f.add_lines(key_a, [], [])
1719
self.assertEqual('',
1720
f.get_record_stream([key_a], 'unordered', True
1721
).next().get_bytes_as('fulltext'))
1722
key_b = self.get_simple_key('b')
1723
f.add_lines(key_b, self.get_parents([key_a]), [])
1724
self.assertEqual('',
1725
f.get_record_stream([key_b], 'unordered', True
1726
).next().get_bytes_as('fulltext'))
1728
def test_newline_only(self):
1729
f = self.get_versionedfiles()
1730
key_a = self.get_simple_key('a')
1731
f.add_lines(key_a, [], ['\n'])
1732
self.assertEqual('\n',
1733
f.get_record_stream([key_a], 'unordered', True
1734
).next().get_bytes_as('fulltext'))
1735
key_b = self.get_simple_key('b')
1736
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1737
self.assertEqual('\n',
1738
f.get_record_stream([key_b], 'unordered', True
1739
).next().get_bytes_as('fulltext'))
1741
def test_get_known_graph_ancestry(self):
1742
f = self.get_versionedfiles()
1744
raise TestNotApplicable('ancestry info only relevant with graph.')
1745
key_a = self.get_simple_key('a')
1746
key_b = self.get_simple_key('b')
1747
key_c = self.get_simple_key('c')
1753
f.add_lines(key_a, [], ['\n'])
1754
f.add_lines(key_b, [key_a], ['\n'])
1755
f.add_lines(key_c, [key_a, key_b], ['\n'])
1756
kg = f.get_known_graph_ancestry([key_c])
1757
self.assertIsInstance(kg, _mod_graph.KnownGraph)
1758
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1760
def test_get_record_stream_empty(self):
1761
"""An empty stream can be requested without error."""
1762
f = self.get_versionedfiles()
1763
entries = f.get_record_stream([], 'unordered', False)
1764
self.assertEqual([], list(entries))
1766
def assertValidStorageKind(self, storage_kind):
1767
"""Assert that storage_kind is a valid storage_kind."""
1768
self.assertSubset([storage_kind],
1769
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1770
'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1771
'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1773
'knit-delta-closure', 'knit-delta-closure-ref',
1774
'groupcompress-block', 'groupcompress-block-ref'])
1776
def capture_stream(self, f, entries, on_seen, parents,
1777
require_fulltext=False):
1778
"""Capture a stream for testing."""
1779
for factory in entries:
1780
on_seen(factory.key)
1781
self.assertValidStorageKind(factory.storage_kind)
1782
if factory.sha1 is not None:
1783
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1785
self.assertEqual(parents[factory.key], factory.parents)
1786
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1788
if require_fulltext:
1789
factory.get_bytes_as('fulltext')
1791
def test_get_record_stream_interface(self):
1792
"""each item in a stream has to provide a regular interface."""
1793
files = self.get_versionedfiles()
1794
self.get_diamond_files(files)
1795
keys, _ = self.get_keys_and_sort_order()
1796
parent_map = files.get_parent_map(keys)
1797
entries = files.get_record_stream(keys, 'unordered', False)
1799
self.capture_stream(files, entries, seen.add, parent_map)
1800
self.assertEqual(set(keys), seen)
1802
def get_keys_and_sort_order(self):
1803
"""Get diamond test keys list, and their sort ordering."""
1804
if self.key_length == 1:
1805
keys = [('merged',), ('left',), ('right',), ('base',)]
1806
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1809
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1811
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1815
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1816
('FileA', 'base'):0,
1817
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1818
('FileB', 'base'):0,
1820
return keys, sort_order
1822
def get_keys_and_groupcompress_sort_order(self):
1823
"""Get diamond test keys list, and their groupcompress sort ordering."""
1824
if self.key_length == 1:
1825
keys = [('merged',), ('left',), ('right',), ('base',)]
1826
sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
1829
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1831
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1835
('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
1836
('FileA', 'base'):2,
1837
('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
1838
('FileB', 'base'):5,
1840
return keys, sort_order
1842
def test_get_record_stream_interface_ordered(self):
1843
"""each item in a stream has to provide a regular interface."""
1844
files = self.get_versionedfiles()
1845
self.get_diamond_files(files)
1846
keys, sort_order = self.get_keys_and_sort_order()
1847
parent_map = files.get_parent_map(keys)
1848
entries = files.get_record_stream(keys, 'topological', False)
1850
self.capture_stream(files, entries, seen.append, parent_map)
1851
self.assertStreamOrder(sort_order, seen, keys)
1853
def test_get_record_stream_interface_ordered_with_delta_closure(self):
1854
"""each item must be accessible as a fulltext."""
1855
files = self.get_versionedfiles()
1856
self.get_diamond_files(files)
1857
keys, sort_order = self.get_keys_and_sort_order()
1858
parent_map = files.get_parent_map(keys)
1859
entries = files.get_record_stream(keys, 'topological', True)
1861
for factory in entries:
1862
seen.append(factory.key)
1863
self.assertValidStorageKind(factory.storage_kind)
1864
self.assertSubset([factory.sha1],
1865
[None, files.get_sha1s([factory.key])[factory.key]])
1866
self.assertEqual(parent_map[factory.key], factory.parents)
1867
# self.assertEqual(files.get_text(factory.key),
1868
ft_bytes = factory.get_bytes_as('fulltext')
1869
self.assertIsInstance(ft_bytes, str)
1870
chunked_bytes = factory.get_bytes_as('chunked')
1871
self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1873
self.assertStreamOrder(sort_order, seen, keys)
1875
def test_get_record_stream_interface_groupcompress(self):
1876
"""each item in a stream has to provide a regular interface."""
1877
files = self.get_versionedfiles()
1878
self.get_diamond_files(files)
1879
keys, sort_order = self.get_keys_and_groupcompress_sort_order()
1880
parent_map = files.get_parent_map(keys)
1881
entries = files.get_record_stream(keys, 'groupcompress', False)
1883
self.capture_stream(files, entries, seen.append, parent_map)
1884
self.assertStreamOrder(sort_order, seen, keys)
1886
def assertStreamOrder(self, sort_order, seen, keys):
1887
self.assertEqual(len(set(seen)), len(keys))
1888
if self.key_length == 1:
1891
lows = {('FileA',):0, ('FileB',):0}
1893
self.assertEqual(set(keys), set(seen))
1896
sort_pos = sort_order[key]
1897
self.assertTrue(sort_pos >= lows[key[:-1]],
1898
"Out of order in sorted stream: %r, %r" % (key, seen))
1899
lows[key[:-1]] = sort_pos
1901
def test_get_record_stream_unknown_storage_kind_raises(self):
1902
"""Asking for a storage kind that the stream cannot supply raises."""
1903
files = self.get_versionedfiles()
1904
self.get_diamond_files(files)
1905
if self.key_length == 1:
1906
keys = [('merged',), ('left',), ('right',), ('base',)]
1909
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1911
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1914
parent_map = files.get_parent_map(keys)
1915
entries = files.get_record_stream(keys, 'unordered', False)
1916
# We track the contents because we should be able to try, fail a
1917
# particular kind and then ask for one that works and continue.
1919
for factory in entries:
1920
seen.add(factory.key)
1921
self.assertValidStorageKind(factory.storage_kind)
1922
if factory.sha1 is not None:
1923
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1925
self.assertEqual(parent_map[factory.key], factory.parents)
1926
# currently no stream emits mpdiff
1927
self.assertRaises(errors.UnavailableRepresentation,
1928
factory.get_bytes_as, 'mpdiff')
1929
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1931
self.assertEqual(set(keys), seen)
1933
def test_get_record_stream_missing_records_are_absent(self):
1934
files = self.get_versionedfiles()
1935
self.get_diamond_files(files)
1936
if self.key_length == 1:
1937
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1940
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1941
('FileA', 'absent'), ('FileA', 'base'),
1942
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1943
('FileB', 'absent'), ('FileB', 'base'),
1944
('absent', 'absent'),
1946
parent_map = files.get_parent_map(keys)
1947
entries = files.get_record_stream(keys, 'unordered', False)
1948
self.assertAbsentRecord(files, keys, parent_map, entries)
1949
entries = files.get_record_stream(keys, 'topological', False)
1950
self.assertAbsentRecord(files, keys, parent_map, entries)
1952
def assertRecordHasContent(self, record, bytes):
1953
"""Assert that record has the bytes bytes."""
1954
self.assertEqual(bytes, record.get_bytes_as('fulltext'))
1955
self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
1957
def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
1958
files = self.get_versionedfiles()
1959
key = self.get_simple_key('foo')
1960
files.add_lines(key, (), ['my text\n', 'content'])
1961
stream = files.get_record_stream([key], 'unordered', False)
1962
record = stream.next()
1963
if record.storage_kind in ('chunked', 'fulltext'):
1964
# chunked and fulltext representations are for direct use not wire
1965
# serialisation: check they are able to be used directly. To send
1966
# such records over the wire translation will be needed.
1967
self.assertRecordHasContent(record, "my text\ncontent")
1969
bytes = [record.get_bytes_as(record.storage_kind)]
1970
network_stream = versionedfile.NetworkRecordStream(bytes).read()
1971
source_record = record
1973
for record in network_stream:
1974
records.append(record)
1975
self.assertEqual(source_record.storage_kind,
1976
record.storage_kind)
1977
self.assertEqual(source_record.parents, record.parents)
1979
source_record.get_bytes_as(source_record.storage_kind),
1980
record.get_bytes_as(record.storage_kind))
1981
self.assertEqual(1, len(records))
1983
def assertStreamMetaEqual(self, records, expected, stream):
1984
"""Assert that streams expected and stream have the same records.
1986
:param records: A list to collect the seen records.
1987
:return: A generator of the records in stream.
1989
# We make assertions during copying to catch things early for
1991
for record, ref_record in izip(stream, expected):
1992
records.append(record)
1993
self.assertEqual(ref_record.key, record.key)
1994
self.assertEqual(ref_record.storage_kind, record.storage_kind)
1995
self.assertEqual(ref_record.parents, record.parents)
1998
def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2000
"""Convert a stream to a bytes iterator.
2002
:param skipped_records: A list with one element to increment when a
2004
:param full_texts: A dict from key->fulltext representation, for
2005
checking chunked or fulltext stored records.
2006
:param stream: A record_stream.
2007
:return: An iterator over the bytes of each record.
2009
for record in stream:
2010
if record.storage_kind in ('chunked', 'fulltext'):
2011
skipped_records[0] += 1
2012
# check the content is correct for direct use.
2013
self.assertRecordHasContent(record, full_texts[record.key])
2015
yield record.get_bytes_as(record.storage_kind)
2017
def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2018
files = self.get_versionedfiles()
2019
target_files = self.get_versionedfiles('target')
2020
key = self.get_simple_key('ft')
2021
key_delta = self.get_simple_key('delta')
2022
files.add_lines(key, (), ['my text\n', 'content'])
2024
delta_parents = (key,)
2027
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2028
local = files.get_record_stream([key, key_delta], 'unordered', False)
2029
ref = files.get_record_stream([key, key_delta], 'unordered', False)
2030
skipped_records = [0]
2032
key: "my text\ncontent",
2033
key_delta: "different\ncontent\n",
2035
byte_stream = self.stream_to_bytes_or_skip_counter(
2036
skipped_records, full_texts, local)
2037
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2039
# insert the stream from the network into a versioned files object so we can
2040
# check the content was carried across correctly without doing delta
2042
target_files.insert_record_stream(
2043
self.assertStreamMetaEqual(records, ref, network_stream))
2044
# No duplicates on the wire thank you!
2045
self.assertEqual(2, len(records) + skipped_records[0])
2047
# if any content was copied it all must have all been.
2048
self.assertIdenticalVersionedFile(files, target_files)
2050
def test_get_record_stream_native_formats_are_wire_ready_delta(self):
2051
# copy a delta over the wire
2052
files = self.get_versionedfiles()
2053
target_files = self.get_versionedfiles('target')
2054
key = self.get_simple_key('ft')
2055
key_delta = self.get_simple_key('delta')
2056
files.add_lines(key, (), ['my text\n', 'content'])
2058
delta_parents = (key,)
2061
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2062
# Copy the basis text across so we can reconstruct the delta during
2063
# insertion into target.
2064
target_files.insert_record_stream(files.get_record_stream([key],
2065
'unordered', False))
2066
local = files.get_record_stream([key_delta], 'unordered', False)
2067
ref = files.get_record_stream([key_delta], 'unordered', False)
2068
skipped_records = [0]
2070
key_delta: "different\ncontent\n",
2072
byte_stream = self.stream_to_bytes_or_skip_counter(
2073
skipped_records, full_texts, local)
2074
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2076
# insert the stream from the network into a versioned files object so we can
2077
# check the content was carried across correctly without doing delta
2078
# inspection during check_stream.
2079
target_files.insert_record_stream(
2080
self.assertStreamMetaEqual(records, ref, network_stream))
2081
# No duplicates on the wire thank you!
2082
self.assertEqual(1, len(records) + skipped_records[0])
2084
# if any content was copied it all must have all been
2085
self.assertIdenticalVersionedFile(files, target_files)
2087
def test_get_record_stream_wire_ready_delta_closure_included(self):
2088
# copy a delta over the wire with the ability to get its full text.
2089
files = self.get_versionedfiles()
2090
key = self.get_simple_key('ft')
2091
key_delta = self.get_simple_key('delta')
2092
files.add_lines(key, (), ['my text\n', 'content'])
2094
delta_parents = (key,)
2097
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2098
local = files.get_record_stream([key_delta], 'unordered', True)
2099
ref = files.get_record_stream([key_delta], 'unordered', True)
2100
skipped_records = [0]
2102
key_delta: "different\ncontent\n",
2104
byte_stream = self.stream_to_bytes_or_skip_counter(
2105
skipped_records, full_texts, local)
2106
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2108
# insert the stream from the network into a versioned files object so we can
2109
# check the content was carried across correctly without doing delta
2110
# inspection during check_stream.
2111
for record in self.assertStreamMetaEqual(records, ref, network_stream):
2112
# we have to be able to get the full text out:
2113
self.assertRecordHasContent(record, full_texts[record.key])
2114
# No duplicates on the wire thank you!
2115
self.assertEqual(1, len(records) + skipped_records[0])
2117
def assertAbsentRecord(self, files, keys, parents, entries):
2118
"""Helper for test_get_record_stream_missing_records_are_absent."""
2120
for factory in entries:
2121
seen.add(factory.key)
2122
if factory.key[-1] == 'absent':
2123
self.assertEqual('absent', factory.storage_kind)
2124
self.assertEqual(None, factory.sha1)
2125
self.assertEqual(None, factory.parents)
2127
self.assertValidStorageKind(factory.storage_kind)
2128
if factory.sha1 is not None:
2129
sha1 = files.get_sha1s([factory.key])[factory.key]
2130
self.assertEqual(sha1, factory.sha1)
2131
self.assertEqual(parents[factory.key], factory.parents)
2132
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2134
self.assertEqual(set(keys), seen)
2136
def test_filter_absent_records(self):
2137
"""Requested missing records can be filter trivially."""
2138
files = self.get_versionedfiles()
2139
self.get_diamond_files(files)
2140
keys, _ = self.get_keys_and_sort_order()
2141
parent_map = files.get_parent_map(keys)
2142
# Add an absent record in the middle of the present keys. (We don't ask
2143
# for just absent keys to ensure that content before and after the
2144
# absent keys is still delivered).
2145
present_keys = list(keys)
2146
if self.key_length == 1:
2147
keys.insert(2, ('extra',))
2149
keys.insert(2, ('extra', 'extra'))
2150
entries = files.get_record_stream(keys, 'unordered', False)
2152
self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
2154
self.assertEqual(set(present_keys), seen)
2156
def get_mapper(self):
2157
"""Get a mapper suitable for the key length of the test interface."""
2158
if self.key_length == 1:
2159
return ConstantMapper('source')
2161
return HashEscapedPrefixMapper()
2163
def get_parents(self, parents):
2164
"""Get parents, taking self.graph into consideration."""
2170
def test_get_annotator(self):
2171
files = self.get_versionedfiles()
2172
self.get_diamond_files(files)
2173
origin_key = self.get_simple_key('origin')
2174
base_key = self.get_simple_key('base')
2175
left_key = self.get_simple_key('left')
2176
right_key = self.get_simple_key('right')
2177
merged_key = self.get_simple_key('merged')
2178
# annotator = files.get_annotator()
2179
# introduced full text
2180
origins, lines = files.get_annotator().annotate(origin_key)
2181
self.assertEqual([(origin_key,)], origins)
2182
self.assertEqual(['origin\n'], lines)
2184
origins, lines = files.get_annotator().annotate(base_key)
2185
self.assertEqual([(base_key,)], origins)
2187
origins, lines = files.get_annotator().annotate(merged_key)
2196
# Without a graph everything is new.
2203
self.assertRaises(RevisionNotPresent,
2204
files.get_annotator().annotate, self.get_simple_key('missing-key'))
2206
def test_get_parent_map(self):
2207
files = self.get_versionedfiles()
2208
if self.key_length == 1:
2210
(('r0',), self.get_parents(())),
2211
(('r1',), self.get_parents((('r0',),))),
2212
(('r2',), self.get_parents(())),
2213
(('r3',), self.get_parents(())),
2214
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
2218
(('FileA', 'r0'), self.get_parents(())),
2219
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
2220
(('FileA', 'r2'), self.get_parents(())),
2221
(('FileA', 'r3'), self.get_parents(())),
2222
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
2223
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
2225
for key, parents in parent_details:
2226
files.add_lines(key, parents, [])
2227
# immediately after adding it should be queryable.
2228
self.assertEqual({key:parents}, files.get_parent_map([key]))
2229
# We can ask for an empty set
2230
self.assertEqual({}, files.get_parent_map([]))
2231
# We can ask for many keys
2232
all_parents = dict(parent_details)
2233
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2234
# Absent keys are just not included in the result.
2235
keys = all_parents.keys()
2236
if self.key_length == 1:
2237
keys.insert(1, ('missing',))
2239
keys.insert(1, ('missing', 'missing'))
2240
# Absent keys are just ignored
2241
self.assertEqual(all_parents, files.get_parent_map(keys))
2243
def test_get_sha1s(self):
2244
files = self.get_versionedfiles()
2245
self.get_diamond_files(files)
2246
if self.key_length == 1:
2247
keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
2249
# ask for shas from different prefixes.
2251
('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
2252
('FileA', 'merged'), ('FileB', 'right'),
2255
keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
2256
keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
2257
keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
2258
keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
2259
keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
2261
files.get_sha1s(keys))
2263
def test_insert_record_stream_empty(self):
2264
"""Inserting an empty record stream should work."""
2265
files = self.get_versionedfiles()
2266
files.insert_record_stream([])
2268
def assertIdenticalVersionedFile(self, expected, actual):
2269
"""Assert that left and right have the same contents."""
2270
self.assertEqual(set(actual.keys()), set(expected.keys()))
2271
actual_parents = actual.get_parent_map(actual.keys())
2273
self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
2275
for key, parents in actual_parents.items():
2276
self.assertEqual(None, parents)
2277
for key in actual.keys():
2278
actual_text = actual.get_record_stream(
2279
[key], 'unordered', True).next().get_bytes_as('fulltext')
2280
expected_text = expected.get_record_stream(
2281
[key], 'unordered', True).next().get_bytes_as('fulltext')
2282
self.assertEqual(actual_text, expected_text)
2284
def test_insert_record_stream_fulltexts(self):
2285
"""Any file should accept a stream of fulltexts."""
2286
files = self.get_versionedfiles()
2287
mapper = self.get_mapper()
2288
source_transport = self.get_transport('source')
2289
source_transport.mkdir('.')
2290
# weaves always output fulltexts.
2291
source = make_versioned_files_factory(WeaveFile, mapper)(
2293
self.get_diamond_files(source, trailing_eol=False)
2294
stream = source.get_record_stream(source.keys(), 'topological',
2296
files.insert_record_stream(stream)
2297
self.assertIdenticalVersionedFile(source, files)
2299
def test_insert_record_stream_fulltexts_noeol(self):
2300
"""Any file should accept a stream of fulltexts."""
2301
files = self.get_versionedfiles()
2302
mapper = self.get_mapper()
2303
source_transport = self.get_transport('source')
2304
source_transport.mkdir('.')
2305
# weaves always output fulltexts.
2306
source = make_versioned_files_factory(WeaveFile, mapper)(
2308
self.get_diamond_files(source, trailing_eol=False)
2309
stream = source.get_record_stream(source.keys(), 'topological',
2311
files.insert_record_stream(stream)
2312
self.assertIdenticalVersionedFile(source, files)
2314
def test_insert_record_stream_annotated_knits(self):
2315
"""Any file should accept a stream from plain knits."""
2316
files = self.get_versionedfiles()
2317
mapper = self.get_mapper()
2318
source_transport = self.get_transport('source')
2319
source_transport.mkdir('.')
2320
source = make_file_factory(True, mapper)(source_transport)
2321
self.get_diamond_files(source)
2322
stream = source.get_record_stream(source.keys(), 'topological',
2324
files.insert_record_stream(stream)
2325
self.assertIdenticalVersionedFile(source, files)
2327
def test_insert_record_stream_annotated_knits_noeol(self):
2328
"""Any file should accept a stream from plain knits."""
2329
files = self.get_versionedfiles()
2330
mapper = self.get_mapper()
2331
source_transport = self.get_transport('source')
2332
source_transport.mkdir('.')
2333
source = make_file_factory(True, mapper)(source_transport)
2334
self.get_diamond_files(source, trailing_eol=False)
2335
stream = source.get_record_stream(source.keys(), 'topological',
2337
files.insert_record_stream(stream)
2338
self.assertIdenticalVersionedFile(source, files)
2340
def test_insert_record_stream_plain_knits(self):
2341
"""Any file should accept a stream from plain knits."""
2342
files = self.get_versionedfiles()
2343
mapper = self.get_mapper()
2344
source_transport = self.get_transport('source')
2345
source_transport.mkdir('.')
2346
source = make_file_factory(False, mapper)(source_transport)
2347
self.get_diamond_files(source)
2348
stream = source.get_record_stream(source.keys(), 'topological',
2350
files.insert_record_stream(stream)
2351
self.assertIdenticalVersionedFile(source, files)
2353
def test_insert_record_stream_plain_knits_noeol(self):
2354
"""Any file should accept a stream from plain knits."""
2355
files = self.get_versionedfiles()
2356
mapper = self.get_mapper()
2357
source_transport = self.get_transport('source')
2358
source_transport.mkdir('.')
2359
source = make_file_factory(False, mapper)(source_transport)
2360
self.get_diamond_files(source, trailing_eol=False)
2361
stream = source.get_record_stream(source.keys(), 'topological',
2363
files.insert_record_stream(stream)
2364
self.assertIdenticalVersionedFile(source, files)
2366
def test_insert_record_stream_existing_keys(self):
2367
"""Inserting keys already in a file should not error."""
2368
files = self.get_versionedfiles()
2369
source = self.get_versionedfiles('source')
2370
self.get_diamond_files(source)
2371
# insert some keys into f.
2372
self.get_diamond_files(files, left_only=True)
2373
stream = source.get_record_stream(source.keys(), 'topological',
2375
files.insert_record_stream(stream)
2376
self.assertIdenticalVersionedFile(source, files)
2378
def test_insert_record_stream_missing_keys(self):
2379
"""Inserting a stream with absent keys should raise an error."""
2380
files = self.get_versionedfiles()
2381
source = self.get_versionedfiles('source')
2382
stream = source.get_record_stream([('missing',) * self.key_length],
2383
'topological', False)
2384
self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
2387
def test_insert_record_stream_out_of_order(self):
2388
"""An out of order stream can either error or work."""
2389
files = self.get_versionedfiles()
2390
source = self.get_versionedfiles('source')
2391
self.get_diamond_files(source)
2392
if self.key_length == 1:
2393
origin_keys = [('origin',)]
2394
end_keys = [('merged',), ('left',)]
2395
start_keys = [('right',), ('base',)]
2397
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
2398
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
2399
('FileB', 'merged',), ('FileB', 'left',)]
2400
start_keys = [('FileA', 'right',), ('FileA', 'base',),
2401
('FileB', 'right',), ('FileB', 'base',)]
2402
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
2403
end_entries = source.get_record_stream(end_keys, 'topological', False)
2404
start_entries = source.get_record_stream(start_keys, 'topological', False)
2405
entries = chain(origin_entries, end_entries, start_entries)
2407
files.insert_record_stream(entries)
2408
except RevisionNotPresent:
2409
# Must not have corrupted the file.
2412
self.assertIdenticalVersionedFile(source, files)
2414
def get_knit_delta_source(self):
2415
"""Get a source that can produce a stream with knit delta records,
2416
regardless of this test's scenario.
2418
mapper = self.get_mapper()
2419
source_transport = self.get_transport('source')
2420
source_transport.mkdir('.')
2421
source = make_file_factory(False, mapper)(source_transport)
2422
get_diamond_files(source, self.key_length, trailing_eol=True,
2423
nograph=False, left_only=False)
2426
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2427
"""Insertion where a needed basis is not included notifies the caller
2428
of the missing basis. In the meantime a record missing its basis is
2431
source = self.get_knit_delta_source()
2432
keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2433
entries = source.get_record_stream(keys, 'unordered', False)
2434
files = self.get_versionedfiles()
2435
if self.support_partial_insertion:
2436
self.assertEqual([],
2437
list(files.get_missing_compression_parent_keys()))
2438
files.insert_record_stream(entries)
2439
missing_bases = files.get_missing_compression_parent_keys()
2440
self.assertEqual(set([self.get_simple_key('left')]),
2442
self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2445
errors.RevisionNotPresent, files.insert_record_stream, entries)
2448
def test_insert_record_stream_delta_missing_basis_can_be_added_later(self):
2449
"""Insertion where a needed basis is not included notifies the caller
2450
of the missing basis. That basis can be added in a second
2451
insert_record_stream call that does not need to repeat records present
2452
in the previous stream. The record(s) that required that basis are
2453
fully inserted once their basis is no longer missing.
2455
if not self.support_partial_insertion:
2456
raise TestNotApplicable(
2457
'versioned file scenario does not support partial insertion')
2458
source = self.get_knit_delta_source()
2459
entries = source.get_record_stream([self.get_simple_key('origin'),
2460
self.get_simple_key('merged')], 'unordered', False)
2461
files = self.get_versionedfiles()
2462
files.insert_record_stream(entries)
2463
missing_bases = files.get_missing_compression_parent_keys()
2464
self.assertEqual(set([self.get_simple_key('left')]),
2466
# 'merged' is inserted (although a commit of a write group involving
2467
# this versionedfiles would fail).
2468
merged_key = self.get_simple_key('merged')
2470
[merged_key], files.get_parent_map([merged_key]).keys())
2471
# Add the full delta closure of the missing records
2472
missing_entries = source.get_record_stream(
2473
missing_bases, 'unordered', True)
2474
files.insert_record_stream(missing_entries)
2475
# Now 'merged' is fully inserted (and a commit would succeed).
2476
self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2478
[merged_key], files.get_parent_map([merged_key]).keys())
2481
def test_iter_lines_added_or_present_in_keys(self):
2482
# test that we get at least an equalset of the lines added by
2483
# versions in the store.
2484
# the ordering here is to make a tree so that dumb searches have
2485
# more changes to muck up.
2487
class InstrumentedProgress(progress.DummyProgress):
2491
progress.DummyProgress.__init__(self)
2494
def update(self, msg=None, current=None, total=None):
2495
self.updates.append((msg, current, total))
2497
files = self.get_versionedfiles()
2498
# add a base to get included
2499
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2500
# add a ancestor to be included on one side
2501
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2502
# add a ancestor to be included on the other side
2503
files.add_lines(self.get_simple_key('rancestor'),
2504
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2505
# add a child of rancestor with no eofile-nl
2506
files.add_lines(self.get_simple_key('child'),
2507
self.get_parents([self.get_simple_key('rancestor')]),
2508
['base\n', 'child\n'])
2509
# add a child of lancestor and base to join the two roots
2510
files.add_lines(self.get_simple_key('otherchild'),
2511
self.get_parents([self.get_simple_key('lancestor'),
2512
self.get_simple_key('base')]),
2513
['base\n', 'lancestor\n', 'otherchild\n'])
2514
def iter_with_keys(keys, expected):
2515
# now we need to see what lines are returned, and how often.
2517
progress = InstrumentedProgress()
2518
# iterate over the lines
2519
for line in files.iter_lines_added_or_present_in_keys(keys,
2521
lines.setdefault(line, 0)
2523
if []!= progress.updates:
2524
self.assertEqual(expected, progress.updates)
2526
lines = iter_with_keys(
2527
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2528
[('Walking content', 0, 2),
2529
('Walking content', 1, 2),
2530
('Walking content', 2, 2)])
2531
# we must see child and otherchild
2532
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2534
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2535
# we dont care if we got more than that.
2538
lines = iter_with_keys(files.keys(),
2539
[('Walking content', 0, 5),
2540
('Walking content', 1, 5),
2541
('Walking content', 2, 5),
2542
('Walking content', 3, 5),
2543
('Walking content', 4, 5),
2544
('Walking content', 5, 5)])
2545
# all lines must be seen at least once
2546
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2548
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2550
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2551
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2553
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2555
def test_make_mpdiffs(self):
2556
from bzrlib import multiparent
2557
files = self.get_versionedfiles('source')
2558
# add texts that should trip the knit maximum delta chain threshold
2559
# as well as doing parallel chains of data in knits.
2560
# this is done by two chains of 25 insertions
2561
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2562
files.add_lines(self.get_simple_key('noeol'),
2563
self.get_parents([self.get_simple_key('base')]), ['line'])
2564
# detailed eol tests:
2565
# shared last line with parent no-eol
2566
files.add_lines(self.get_simple_key('noeolsecond'),
2567
self.get_parents([self.get_simple_key('noeol')]),
2569
# differing last line with parent, both no-eol
2570
files.add_lines(self.get_simple_key('noeolnotshared'),
2571
self.get_parents([self.get_simple_key('noeolsecond')]),
2572
['line\n', 'phone'])
2573
# add eol following a noneol parent, change content
2574
files.add_lines(self.get_simple_key('eol'),
2575
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2576
# add eol following a noneol parent, no change content
2577
files.add_lines(self.get_simple_key('eolline'),
2578
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2579
# noeol with no parents:
2580
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2581
# noeol preceeding its leftmost parent in the output:
2582
# this is done by making it a merge of two parents with no common
2583
# anestry: noeolbase and noeol with the
2584
# later-inserted parent the leftmost.
2585
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2586
self.get_parents([self.get_simple_key('noeolbase'),
2587
self.get_simple_key('noeol')]),
2589
# two identical eol texts
2590
files.add_lines(self.get_simple_key('noeoldup'),
2591
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2592
next_parent = self.get_simple_key('base')
2593
text_name = 'chain1-'
2595
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2596
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2597
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2598
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2599
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2600
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2601
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2602
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2603
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2604
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2605
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2606
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2607
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2608
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2609
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2610
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2611
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2612
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2613
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2614
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2615
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2616
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2617
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2618
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2619
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2620
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2622
for depth in range(26):
2623
new_version = self.get_simple_key(text_name + '%s' % depth)
2624
text = text + ['line\n']
2625
files.add_lines(new_version, self.get_parents([next_parent]), text)
2626
next_parent = new_version
2627
next_parent = self.get_simple_key('base')
2628
text_name = 'chain2-'
2630
for depth in range(26):
2631
new_version = self.get_simple_key(text_name + '%s' % depth)
2632
text = text + ['line\n']
2633
files.add_lines(new_version, self.get_parents([next_parent]), text)
2634
next_parent = new_version
2635
target = self.get_versionedfiles('target')
2636
for key in multiparent.topo_iter_keys(files, files.keys()):
2637
mpdiff = files.make_mpdiffs([key])[0]
2638
parents = files.get_parent_map([key])[key] or []
2640
[(key, parents, files.get_sha1s([key])[key], mpdiff)])
2641
self.assertEqualDiff(
2642
files.get_record_stream([key], 'unordered',
2643
True).next().get_bytes_as('fulltext'),
2644
target.get_record_stream([key], 'unordered',
2645
True).next().get_bytes_as('fulltext')
2648
def test_keys(self):
2649
# While use is discouraged, versions() is still needed by aspects of
2651
files = self.get_versionedfiles()
2652
self.assertEqual(set(), set(files.keys()))
2653
if self.key_length == 1:
2656
key = ('foo', 'bar',)
2657
files.add_lines(key, (), [])
2658
self.assertEqual(set([key]), set(files.keys()))
2661
class VirtualVersionedFilesTests(TestCase):
2662
"""Basic tests for the VirtualVersionedFiles implementations."""
2664
def _get_parent_map(self, keys):
2667
if k in self._parent_map:
2668
ret[k] = self._parent_map[k]
2672
TestCase.setUp(self)
2674
self._parent_map = {}
2675
self.texts = VirtualVersionedFiles(self._get_parent_map,
2678
def test_add_lines(self):
2679
self.assertRaises(NotImplementedError,
2680
self.texts.add_lines, "foo", [], [])
2682
def test_add_mpdiffs(self):
2683
self.assertRaises(NotImplementedError,
2684
self.texts.add_mpdiffs, [])
2686
def test_check_noerrors(self):
2689
def test_insert_record_stream(self):
2690
self.assertRaises(NotImplementedError, self.texts.insert_record_stream,
2693
def test_get_sha1s_nonexistent(self):
2694
self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2696
def test_get_sha1s(self):
2697
self._lines["key"] = ["dataline1", "dataline2"]
2698
self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2699
self.texts.get_sha1s([("key",)]))
2701
def test_get_parent_map(self):
2702
self._parent_map = {"G": ("A", "B")}
2703
self.assertEquals({("G",): (("A",),("B",))},
2704
self.texts.get_parent_map([("G",), ("L",)]))
2706
def test_get_record_stream(self):
2707
self._lines["A"] = ["FOO", "BAR"]
2708
it = self.texts.get_record_stream([("A",)], "unordered", True)
2710
self.assertEquals("chunked", record.storage_kind)
2711
self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2712
self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2714
def test_get_record_stream_absent(self):
2715
it = self.texts.get_record_stream([("A",)], "unordered", True)
2717
self.assertEquals("absent", record.storage_kind)
2719
def test_iter_lines_added_or_present_in_keys(self):
2720
self._lines["A"] = ["FOO", "BAR"]
2721
self._lines["B"] = ["HEY"]
2722
self._lines["C"] = ["Alberta"]
2723
it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2724
self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2728
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2730
def get_ordering_vf(self, key_priority):
2731
builder = self.make_branch_builder('test')
2732
builder.start_series()
2733
builder.build_snapshot('A', None, [
2734
('add', ('', 'TREE_ROOT', 'directory', None))])
2735
builder.build_snapshot('B', ['A'], [])
2736
builder.build_snapshot('C', ['B'], [])
2737
builder.build_snapshot('D', ['C'], [])
2738
builder.finish_series()
2739
b = builder.get_branch()
2741
self.addCleanup(b.unlock)
2742
vf = b.repository.inventories
2743
return versionedfile.OrderingVersionedFilesDecorator(vf, key_priority)
2745
def test_get_empty(self):
2746
vf = self.get_ordering_vf({})
2747
self.assertEqual([], vf.calls)
2749
def test_get_record_stream_topological(self):
2750
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2751
request_keys = [('B',), ('C',), ('D',), ('A',)]
2752
keys = [r.key for r in vf.get_record_stream(request_keys,
2753
'topological', False)]
2754
# We should have gotten the keys in topological order
2755
self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2756
# And recorded that the request was made
2757
self.assertEqual([('get_record_stream', request_keys, 'topological',
2760
def test_get_record_stream_ordered(self):
2761
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2762
request_keys = [('B',), ('C',), ('D',), ('A',)]
2763
keys = [r.key for r in vf.get_record_stream(request_keys,
2764
'unordered', False)]
2765
# They should be returned based on their priority
2766
self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2767
# And the request recorded
2768
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2771
def test_get_record_stream_implicit_order(self):
2772
vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2773
request_keys = [('B',), ('C',), ('D',), ('A',)]
2774
keys = [r.key for r in vf.get_record_stream(request_keys,
2775
'unordered', False)]
2776
# A and C are not in the map, so they get sorted to the front. A comes
2777
# before C alphabetically, so it comes back first
2778
self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2779
# And the request recorded
2780
self.assertEqual([('get_record_stream', request_keys, 'unordered',