1
# Copyright (C) 2005 Canonical Ltd
4
# Johan Rydberg <jrydberg@gnu.org>
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
11
# This program is distributed in the hope that it will be useful,
12
# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
# GNU General Public License for more details.
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
# considered typical and check that it can be detected/corrected.
24
from StringIO import StringIO
32
from bzrlib.errors import (
34
RevisionAlreadyPresent,
37
from bzrlib import knit as _mod_knit
38
from bzrlib.knit import (
43
from bzrlib.symbol_versioning import one_four
44
from bzrlib.tests import TestCaseWithMemoryTransport, TestSkipped
45
from bzrlib.tests.http_utils import TestCaseWithWebserver
46
from bzrlib.trace import mutter
47
from bzrlib.transport import get_transport
48
from bzrlib.transport.memory import MemoryTransport
49
from bzrlib.tsort import topo_sort
50
from bzrlib.tuned_gzip import GzipFile
51
import bzrlib.versionedfile as versionedfile
52
from bzrlib.weave import WeaveFile
53
from bzrlib.weavefile import read_weave, write_weave
56
def get_diamond_vf(f, trailing_eol=True, left_only=False):
57
"""Get a diamond graph to exercise deltas and merges.
59
:param trailing_eol: If True end the last line with \n.
63
'base': (('origin',),),
65
'right': (('base',),),
66
'merged': (('left',), ('right',)),
68
# insert a diamond graph to exercise deltas and merges.
73
f.add_lines('origin', [], ['origin' + last_char])
74
f.add_lines('base', ['origin'], ['base' + last_char])
75
f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
77
f.add_lines('right', ['base'],
78
['base\n', 'right' + last_char])
79
f.add_lines('merged', ['left', 'right'],
80
['base\n', 'left\n', 'right\n', 'merged' + last_char])
84
class VersionedFileTestMixIn(object):
85
"""A mixin test class for testing VersionedFiles.
87
This is not an adaptor-style test at this point because
88
theres no dynamic substitution of versioned file implementations,
89
they are strictly controlled by their owning repositories.
92
def get_transaction(self):
93
if not hasattr(self, '_transaction'):
94
self._transaction = None
95
return self._transaction
99
f.add_lines('r0', [], ['a\n', 'b\n'])
100
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
102
versions = f.versions()
103
self.assertTrue('r0' in versions)
104
self.assertTrue('r1' in versions)
105
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
106
self.assertEquals(f.get_text('r0'), 'a\nb\n')
107
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
108
self.assertEqual(2, len(f))
109
self.assertEqual(2, f.num_versions())
111
self.assertRaises(RevisionNotPresent,
112
f.add_lines, 'r2', ['foo'], [])
113
self.assertRaises(RevisionAlreadyPresent,
114
f.add_lines, 'r1', [], [])
116
# this checks that reopen with create=True does not break anything.
117
f = self.reopen_file(create=True)
120
def test_get_record_stream_empty(self):
121
"""get_record_stream is a replacement for get_data_stream."""
123
entries = f.get_record_stream([], 'unordered', False)
124
self.assertEqual([], list(entries))
126
def assertValidStorageKind(self, storage_kind):
127
"""Assert that storage_kind is a valid storage_kind."""
128
self.assertSubset([storage_kind],
129
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
130
'knit-ft', 'knit-delta', 'fulltext', 'knit-annotated-ft-gz',
131
'knit-annotated-delta-gz', 'knit-ft-gz', 'knit-delta-gz'])
133
def capture_stream(self, f, entries, on_seen, parents):
134
"""Capture a stream for testing."""
135
for factory in entries:
137
self.assertValidStorageKind(factory.storage_kind)
138
self.assertEqual(f.get_sha1s([factory.key[0]])[0], factory.sha1)
139
self.assertEqual(parents[factory.key[0]], factory.parents)
140
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
143
def test_get_record_stream_interface(self):
144
"""each item in a stream has to provide a regular interface."""
145
f, parents = get_diamond_vf(self.get_file())
146
entries = f.get_record_stream(['merged', 'left', 'right', 'base'],
149
self.capture_stream(f, entries, seen.add, parents)
150
self.assertEqual(set([('base',), ('left',), ('right',), ('merged',)]),
153
def test_get_record_stream_interface_ordered(self):
154
"""each item in a stream has to provide a regular interface."""
155
f, parents = get_diamond_vf(self.get_file())
156
entries = f.get_record_stream(['merged', 'left', 'right', 'base'],
157
'topological', False)
159
self.capture_stream(f, entries, seen.append, parents)
160
self.assertSubset([tuple(seen)],
162
(('base',), ('left',), ('right',), ('merged',)),
163
(('base',), ('right',), ('left',), ('merged',)),
166
def test_get_record_stream_interface_ordered_with_delta_closure(self):
167
"""each item in a stream has to provide a regular interface."""
168
f, parents = get_diamond_vf(self.get_file())
169
entries = f.get_record_stream(['merged', 'left', 'right', 'base'],
172
for factory in entries:
173
seen.append(factory.key)
174
self.assertValidStorageKind(factory.storage_kind)
175
self.assertEqual(f.get_sha1s([factory.key[0]])[0], factory.sha1)
176
self.assertEqual(parents[factory.key[0]], factory.parents)
177
self.assertEqual(f.get_text(factory.key[0]),
178
factory.get_bytes_as('fulltext'))
179
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
181
self.assertSubset([tuple(seen)],
183
(('base',), ('left',), ('right',), ('merged',)),
184
(('base',), ('right',), ('left',), ('merged',)),
187
def test_get_record_stream_unknown_storage_kind_raises(self):
188
"""Asking for a storage kind that the stream cannot supply raises."""
189
f, parents = get_diamond_vf(self.get_file())
190
entries = f.get_record_stream(['merged', 'left', 'right', 'base'],
192
# We track the contents because we should be able to try, fail a
193
# particular kind and then ask for one that works and continue.
195
for factory in entries:
196
seen.add(factory.key)
197
self.assertValidStorageKind(factory.storage_kind)
198
self.assertEqual(f.get_sha1s([factory.key[0]])[0], factory.sha1)
199
self.assertEqual(parents[factory.key[0]], factory.parents)
200
# currently no stream emits mpdiff
201
self.assertRaises(errors.UnavailableRepresentation,
202
factory.get_bytes_as, 'mpdiff')
203
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
205
self.assertEqual(set([('base',), ('left',), ('right',), ('merged',)]),
208
def test_get_record_stream_missing_records_are_absent(self):
209
f, parents = get_diamond_vf(self.get_file())
210
entries = f.get_record_stream(['merged', 'left', 'right', 'or', 'base'],
213
for factory in entries:
214
seen.add(factory.key)
215
if factory.key == ('or',):
216
self.assertEqual('absent', factory.storage_kind)
217
self.assertEqual(None, factory.sha1)
218
self.assertEqual(None, factory.parents)
220
self.assertValidStorageKind(factory.storage_kind)
221
self.assertEqual(f.get_sha1s([factory.key[0]])[0], factory.sha1)
222
self.assertEqual(parents[factory.key[0]], factory.parents)
223
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
226
set([('base',), ('left',), ('right',), ('merged',), ('or',)]),
230
def test_insert_record_stream_empty(self):
231
"""Inserting an empty record stream should work."""
234
f.insert_record_stream([])
236
def assertIdenticalVersionedFile(self, left, right):
237
"""Assert that left and right have the same contents."""
238
self.assertEqual(set(left.versions()), set(right.versions()))
239
self.assertEqual(left.get_parent_map(left.versions()),
240
right.get_parent_map(right.versions()))
241
for v in left.versions():
242
self.assertEqual(left.get_text(v), right.get_text(v))
244
def test_insert_record_stream_fulltexts(self):
245
"""Any file should accept a stream of fulltexts."""
247
weave_vf = WeaveFile('source', get_transport(self.get_url('.')),
248
create=True, get_scope=self.get_transaction)
249
source, _ = get_diamond_vf(weave_vf)
250
stream = source.get_record_stream(source.versions(), 'topological',
252
f.insert_record_stream(stream)
253
self.assertIdenticalVersionedFile(f, source)
255
def test_insert_record_stream_fulltexts_noeol(self):
256
"""Any file should accept a stream of fulltexts."""
258
weave_vf = WeaveFile('source', get_transport(self.get_url('.')),
259
create=True, get_scope=self.get_transaction)
260
source, _ = get_diamond_vf(weave_vf, trailing_eol=False)
261
stream = source.get_record_stream(source.versions(), 'topological',
263
f.insert_record_stream(stream)
264
self.assertIdenticalVersionedFile(f, source)
266
def test_insert_record_stream_annotated_knits(self):
267
"""Any file should accept a stream from plain knits."""
269
source = make_file_knit('source', get_transport(self.get_url('.')),
271
get_diamond_vf(source)
272
stream = source.get_record_stream(source.versions(), 'topological',
274
f.insert_record_stream(stream)
275
self.assertIdenticalVersionedFile(f, source)
277
def test_insert_record_stream_annotated_knits_noeol(self):
278
"""Any file should accept a stream from plain knits."""
280
source = make_file_knit('source', get_transport(self.get_url('.')),
282
get_diamond_vf(source, trailing_eol=False)
283
stream = source.get_record_stream(source.versions(), 'topological',
285
f.insert_record_stream(stream)
286
self.assertIdenticalVersionedFile(f, source)
288
def test_insert_record_stream_plain_knits(self):
289
"""Any file should accept a stream from plain knits."""
291
source = make_file_knit('source', get_transport(self.get_url('.')),
292
create=True, factory=KnitPlainFactory())
293
get_diamond_vf(source)
294
stream = source.get_record_stream(source.versions(), 'topological',
296
f.insert_record_stream(stream)
297
self.assertIdenticalVersionedFile(f, source)
299
def test_insert_record_stream_plain_knits_noeol(self):
300
"""Any file should accept a stream from plain knits."""
302
source = make_file_knit('source', get_transport(self.get_url('.')),
303
create=True, factory=KnitPlainFactory())
304
get_diamond_vf(source, trailing_eol=False)
305
stream = source.get_record_stream(source.versions(), 'topological',
307
f.insert_record_stream(stream)
308
self.assertIdenticalVersionedFile(f, source)
310
def test_insert_record_stream_existing_keys(self):
311
"""Inserting keys already in a file should not error."""
313
source = make_file_knit('source', get_transport(self.get_url('.')),
314
create=True, factory=KnitPlainFactory())
315
get_diamond_vf(source)
316
# insert some keys into f.
317
get_diamond_vf(f, left_only=True)
318
stream = source.get_record_stream(source.versions(), 'topological',
320
f.insert_record_stream(stream)
321
self.assertIdenticalVersionedFile(f, source)
323
def test_adds_with_parent_texts(self):
326
_, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
328
_, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
329
['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
330
except NotImplementedError:
331
# if the format doesn't support ghosts, just add normally.
332
_, _, parent_texts['r1'] = f.add_lines('r1',
333
['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
334
f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
335
self.assertNotEqual(None, parent_texts['r0'])
336
self.assertNotEqual(None, parent_texts['r1'])
338
versions = f.versions()
339
self.assertTrue('r0' in versions)
340
self.assertTrue('r1' in versions)
341
self.assertTrue('r2' in versions)
342
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
343
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
344
self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
345
self.assertEqual(3, f.num_versions())
346
origins = f.annotate('r1')
347
self.assertEquals(origins[0][0], 'r0')
348
self.assertEquals(origins[1][0], 'r1')
349
origins = f.annotate('r2')
350
self.assertEquals(origins[0][0], 'r1')
351
self.assertEquals(origins[1][0], 'r2')
354
f = self.reopen_file()
357
def test_add_unicode_content(self):
358
# unicode content is not permitted in versioned files.
359
# versioned files version sequences of bytes only.
361
self.assertRaises(errors.BzrBadParameterUnicode,
362
vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
364
(errors.BzrBadParameterUnicode, NotImplementedError),
365
vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
367
def test_add_follows_left_matching_blocks(self):
368
"""If we change left_matching_blocks, delta changes
370
Note: There are multiple correct deltas in this case, because
371
we start with 1 "a" and we get 3.
374
if isinstance(vf, WeaveFile):
375
raise TestSkipped("WeaveFile ignores left_matching_blocks")
376
vf.add_lines('1', [], ['a\n'])
377
vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
378
left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
379
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
380
vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
381
left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
382
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
384
def test_inline_newline_throws(self):
385
# \r characters are not permitted in lines being added
387
self.assertRaises(errors.BzrBadParameterContainsNewline,
388
vf.add_lines, 'a', [], ['a\n\n'])
390
(errors.BzrBadParameterContainsNewline, NotImplementedError),
391
vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
392
# but inline CR's are allowed
393
vf.add_lines('a', [], ['a\r\n'])
395
vf.add_lines_with_ghosts('b', [], ['a\r\n'])
396
except NotImplementedError:
399
def test_add_reserved(self):
401
self.assertRaises(errors.ReservedId,
402
vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
404
def test_add_lines_nostoresha(self):
405
"""When nostore_sha is supplied using old content raises."""
407
empty_text = ('a', [])
408
sample_text_nl = ('b', ["foo\n", "bar\n"])
409
sample_text_no_nl = ('c', ["foo\n", "bar"])
411
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
412
sha, _, _ = vf.add_lines(version, [], lines)
414
# we now have a copy of all the lines in the vf.
415
for sha, (version, lines) in zip(
416
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
417
self.assertRaises(errors.ExistingContent,
418
vf.add_lines, version + "2", [], lines,
420
# and no new version should have been added.
421
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
424
def test_add_lines_with_ghosts_nostoresha(self):
425
"""When nostore_sha is supplied using old content raises."""
427
empty_text = ('a', [])
428
sample_text_nl = ('b', ["foo\n", "bar\n"])
429
sample_text_no_nl = ('c', ["foo\n", "bar"])
431
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
432
sha, _, _ = vf.add_lines(version, [], lines)
434
# we now have a copy of all the lines in the vf.
435
# is the test applicable to this vf implementation?
437
vf.add_lines_with_ghosts('d', [], [])
438
except NotImplementedError:
439
raise TestSkipped("add_lines_with_ghosts is optional")
440
for sha, (version, lines) in zip(
441
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
442
self.assertRaises(errors.ExistingContent,
443
vf.add_lines_with_ghosts, version + "2", [], lines,
445
# and no new version should have been added.
446
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
449
def test_add_lines_return_value(self):
450
# add_lines should return the sha1 and the text size.
452
empty_text = ('a', [])
453
sample_text_nl = ('b', ["foo\n", "bar\n"])
454
sample_text_no_nl = ('c', ["foo\n", "bar"])
455
# check results for the three cases:
456
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
457
# the first two elements are the same for all versioned files:
458
# - the digest and the size of the text. For some versioned files
459
# additional data is returned in additional tuple elements.
460
result = vf.add_lines(version, [], lines)
461
self.assertEqual(3, len(result))
462
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
464
# parents should not affect the result:
465
lines = sample_text_nl[1]
466
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
467
vf.add_lines('d', ['b', 'c'], lines)[0:2])
469
def test_get_reserved(self):
471
self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
472
self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
473
self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
475
def test_make_mpdiffs(self):
476
from bzrlib import multiparent
477
vf = self.get_file('foo')
478
sha1s = self._setup_for_deltas(vf)
479
new_vf = self.get_file('bar')
480
for version in multiparent.topo_iter(vf):
481
mpdiff = vf.make_mpdiffs([version])[0]
482
new_vf.add_mpdiffs([(version, vf.get_parent_map([version])[version],
483
vf.get_sha1s([version])[0], mpdiff)])
484
self.assertEqualDiff(vf.get_text(version),
485
new_vf.get_text(version))
487
def _setup_for_deltas(self, f):
488
self.assertFalse(f.has_version('base'))
489
# add texts that should trip the knit maximum delta chain threshold
490
# as well as doing parallel chains of data in knits.
491
# this is done by two chains of 25 insertions
492
f.add_lines('base', [], ['line\n'])
493
f.add_lines('noeol', ['base'], ['line'])
494
# detailed eol tests:
495
# shared last line with parent no-eol
496
f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
497
# differing last line with parent, both no-eol
498
f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
499
# add eol following a noneol parent, change content
500
f.add_lines('eol', ['noeol'], ['phone\n'])
501
# add eol following a noneol parent, no change content
502
f.add_lines('eolline', ['noeol'], ['line\n'])
503
# noeol with no parents:
504
f.add_lines('noeolbase', [], ['line'])
505
# noeol preceeding its leftmost parent in the output:
506
# this is done by making it a merge of two parents with no common
507
# anestry: noeolbase and noeol with the
508
# later-inserted parent the leftmost.
509
f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
510
# two identical eol texts
511
f.add_lines('noeoldup', ['noeol'], ['line'])
513
text_name = 'chain1-'
515
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
516
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
517
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
518
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
519
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
520
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
521
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
522
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
523
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
524
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
525
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
526
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
527
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
528
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
529
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
530
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
531
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
532
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
533
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
534
19:'1ebed371807ba5935958ad0884595126e8c4e823',
535
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
536
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
537
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
538
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
539
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
540
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
542
for depth in range(26):
543
new_version = text_name + '%s' % depth
544
text = text + ['line\n']
545
f.add_lines(new_version, [next_parent], text)
546
next_parent = new_version
548
text_name = 'chain2-'
550
for depth in range(26):
551
new_version = text_name + '%s' % depth
552
text = text + ['line\n']
553
f.add_lines(new_version, [next_parent], text)
554
next_parent = new_version
557
def test_ancestry(self):
559
self.assertEqual([], f.get_ancestry([]))
560
f.add_lines('r0', [], ['a\n', 'b\n'])
561
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
562
f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
563
f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
564
f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
565
self.assertEqual([], f.get_ancestry([]))
566
versions = f.get_ancestry(['rM'])
567
# there are some possibilities:
571
# so we check indexes
572
r0 = versions.index('r0')
573
r1 = versions.index('r1')
574
r2 = versions.index('r2')
575
self.assertFalse('r3' in versions)
576
rM = versions.index('rM')
577
self.assertTrue(r0 < r1)
578
self.assertTrue(r0 < r2)
579
self.assertTrue(r1 < rM)
580
self.assertTrue(r2 < rM)
582
self.assertRaises(RevisionNotPresent,
583
f.get_ancestry, ['rM', 'rX'])
585
self.assertEqual(set(f.get_ancestry('rM')),
586
set(f.get_ancestry('rM', topo_sorted=False)))
588
def test_mutate_after_finish(self):
589
self._transaction = 'before'
591
self._transaction = 'after'
592
self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
593
self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
594
self.assertRaises(errors.OutSideTransaction, f.join, '')
596
def test_clone_text(self):
598
f.add_lines('r0', [], ['a\n', 'b\n'])
599
self.applyDeprecated(one_four, f.clone_text, 'r1', 'r0', ['r0'])
601
self.assertEquals(f.get_lines('r1'), f.get_lines('r0'))
602
self.assertEquals(f.get_lines('r1'), ['a\n', 'b\n'])
603
self.assertEqual({'r1':('r0',)}, f.get_parent_map(['r1']))
604
self.assertRaises(RevisionNotPresent,
605
self.applyDeprecated, one_four, f.clone_text, 'r2', 'rX', [])
606
self.assertRaises(RevisionAlreadyPresent,
607
self.applyDeprecated, one_four, f.clone_text, 'r1', 'r0', [])
609
verify_file(self.reopen_file())
611
def test_copy_to(self):
613
f.add_lines('0', [], ['a\n'])
614
t = MemoryTransport()
616
for suffix in self.get_factory().get_suffixes():
617
self.assertTrue(t.has('foo' + suffix))
619
def test_get_suffixes(self):
621
# and should be a list
622
self.assertTrue(isinstance(self.get_factory().get_suffixes(), list))
624
def build_graph(self, file, graph):
625
for node in topo_sort(graph.items()):
626
file.add_lines(node, graph[node], [])
628
def test_get_graph(self):
634
self.build_graph(f, graph)
635
self.assertEqual(graph, self.applyDeprecated(one_four, f.get_graph))
637
def test_get_graph_partial(self):
645
complex_graph.update(simple_a)
650
complex_graph.update(simple_b)
657
complex_graph.update(simple_gam)
659
simple_b_gam.update(simple_gam)
660
simple_b_gam.update(simple_b)
661
self.build_graph(f, complex_graph)
662
self.assertEqual(simple_a, self.applyDeprecated(one_four, f.get_graph,
664
self.assertEqual(simple_b, self.applyDeprecated(one_four, f.get_graph,
666
self.assertEqual(simple_gam, self.applyDeprecated(one_four,
667
f.get_graph, ['gam']))
668
self.assertEqual(simple_b_gam, self.applyDeprecated(one_four,
669
f.get_graph, ['b', 'gam']))
671
def test_get_parents(self):
673
f.add_lines('r0', [], ['a\n', 'b\n'])
674
f.add_lines('r1', [], ['a\n', 'b\n'])
675
f.add_lines('r2', [], ['a\n', 'b\n'])
676
f.add_lines('r3', [], ['a\n', 'b\n'])
677
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
678
self.assertEqual(['r0', 'r1', 'r2', 'r3'],
679
self.applyDeprecated(one_four, f.get_parents, 'm'))
680
self.assertRaises(RevisionNotPresent,
681
self.applyDeprecated, one_four, f.get_parents, 'y')
683
def test_get_parent_map(self):
685
f.add_lines('r0', [], ['a\n', 'b\n'])
687
{'r0':()}, f.get_parent_map(['r0']))
688
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
690
{'r1':('r0',)}, f.get_parent_map(['r1']))
694
f.get_parent_map(['r0', 'r1']))
695
f.add_lines('r2', [], ['a\n', 'b\n'])
696
f.add_lines('r3', [], ['a\n', 'b\n'])
697
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
699
{'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
700
self.assertEqual({}, f.get_parent_map('y'))
704
f.get_parent_map(['r0', 'y', 'r1']))
706
def test_annotate(self):
708
f.add_lines('r0', [], ['a\n', 'b\n'])
709
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
710
origins = f.annotate('r1')
711
self.assertEquals(origins[0][0], 'r1')
712
self.assertEquals(origins[1][0], 'r0')
714
self.assertRaises(RevisionNotPresent,
717
def test_detection(self):
718
# Test weaves detect corruption.
720
# Weaves contain a checksum of their texts.
721
# When a text is extracted, this checksum should be
724
w = self.get_file_corrupted_text()
726
self.assertEqual('hello\n', w.get_text('v1'))
727
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
728
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
729
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
731
w = self.get_file_corrupted_checksum()
733
self.assertEqual('hello\n', w.get_text('v1'))
734
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
735
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
736
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
738
def get_file_corrupted_text(self):
739
"""Return a versioned file with corrupt text but valid metadata."""
740
raise NotImplementedError(self.get_file_corrupted_text)
742
def reopen_file(self, name='foo'):
743
"""Open the versioned file from disk again."""
744
raise NotImplementedError(self.reopen_file)
746
def test_iter_parents(self):
747
"""iter_parents returns the parents for many nodes."""
751
f.add_lines('r0', [], ['a\n', 'b\n'])
753
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
755
f.add_lines('r2', ['r1', 'r0'], ['a\n', 'b\n'])
757
# cases: each sample data individually:
758
self.assertEqual(set([('r0', ())]),
759
set(self.applyDeprecated(one_four, f.iter_parents, ['r0'])))
760
self.assertEqual(set([('r1', ('r0', ))]),
761
set(self.applyDeprecated(one_four, f.iter_parents, ['r1'])))
762
self.assertEqual(set([('r2', ('r1', 'r0'))]),
763
set(self.applyDeprecated(one_four, f.iter_parents, ['r2'])))
764
# no nodes returned for a missing node
765
self.assertEqual(set(),
766
set(self.applyDeprecated(one_four, f.iter_parents, ['missing'])))
767
# 1 node returned with missing nodes skipped
768
self.assertEqual(set([('r1', ('r0', ))]),
769
set(self.applyDeprecated(one_four, f.iter_parents, ['ghost1', 'r1',
772
self.assertEqual(set([('r0', ()), ('r1', ('r0', ))]),
773
set(self.applyDeprecated(one_four, f.iter_parents, ['r0', 'r1'])))
774
# 2 nodes returned, missing skipped
775
self.assertEqual(set([('r0', ()), ('r1', ('r0', ))]),
776
set(self.applyDeprecated(one_four, f.iter_parents,
777
['a', 'r0', 'b', 'r1', 'c'])))
779
def test_iter_lines_added_or_present_in_versions(self):
780
# test that we get at least an equalset of the lines added by
781
# versions in the weave
782
# the ordering here is to make a tree so that dumb searches have
783
# more changes to muck up.
785
class InstrumentedProgress(progress.DummyProgress):
789
progress.DummyProgress.__init__(self)
792
def update(self, msg=None, current=None, total=None):
793
self.updates.append((msg, current, total))
796
# add a base to get included
797
vf.add_lines('base', [], ['base\n'])
798
# add a ancestor to be included on one side
799
vf.add_lines('lancestor', [], ['lancestor\n'])
800
# add a ancestor to be included on the other side
801
vf.add_lines('rancestor', ['base'], ['rancestor\n'])
802
# add a child of rancestor with no eofile-nl
803
vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
804
# add a child of lancestor and base to join the two roots
805
vf.add_lines('otherchild',
806
['lancestor', 'base'],
807
['base\n', 'lancestor\n', 'otherchild\n'])
808
def iter_with_versions(versions, expected):
809
# now we need to see what lines are returned, and how often.
811
progress = InstrumentedProgress()
812
# iterate over the lines
813
for line in vf.iter_lines_added_or_present_in_versions(versions,
815
lines.setdefault(line, 0)
817
if []!= progress.updates:
818
self.assertEqual(expected, progress.updates)
820
lines = iter_with_versions(['child', 'otherchild'],
821
[('Walking content.', 0, 2),
822
('Walking content.', 1, 2),
823
('Walking content.', 2, 2)])
824
# we must see child and otherchild
825
self.assertTrue(lines[('child\n', 'child')] > 0)
826
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
827
# we dont care if we got more than that.
830
lines = iter_with_versions(None, [('Walking content.', 0, 5),
831
('Walking content.', 1, 5),
832
('Walking content.', 2, 5),
833
('Walking content.', 3, 5),
834
('Walking content.', 4, 5),
835
('Walking content.', 5, 5)])
836
# all lines must be seen at least once
837
self.assertTrue(lines[('base\n', 'base')] > 0)
838
self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
839
self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
840
self.assertTrue(lines[('child\n', 'child')] > 0)
841
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
843
def test_add_lines_with_ghosts(self):
844
# some versioned file formats allow lines to be added with parent
845
# information that is > than that in the format. Formats that do
846
# not support this need to raise NotImplementedError on the
847
# add_lines_with_ghosts api.
849
# add a revision with ghost parents
850
# The preferred form is utf8, but we should translate when needed
851
parent_id_unicode = u'b\xbfse'
852
parent_id_utf8 = parent_id_unicode.encode('utf8')
854
vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
855
except NotImplementedError:
856
# check the other ghost apis are also not implemented
857
self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
858
self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
860
vf = self.reopen_file()
861
# test key graph related apis: getncestry, _graph, get_parents
863
# - these are ghost unaware and must not be reflect ghosts
864
self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
866
self.applyDeprecated(one_four, vf.get_parents, 'notbxbfse'))
867
self.assertEqual({'notbxbfse':()}, self.applyDeprecated(one_four,
869
self.assertFalse(vf.has_version(parent_id_utf8))
870
# we have _with_ghost apis to give us ghost information.
871
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
872
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
873
self.assertEqual({'notbxbfse':(parent_id_utf8,)},
874
self.applyDeprecated(one_four, vf.get_graph_with_ghosts))
875
self.assertTrue(self.applyDeprecated(one_four, vf.has_ghost,
877
# if we add something that is a ghost of another, it should correct the
878
# results of the prior apis
879
vf.add_lines(parent_id_utf8, [], [])
880
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
881
self.assertEqual({'notbxbfse':(parent_id_utf8,)},
882
vf.get_parent_map(['notbxbfse']))
883
self.assertEqual({parent_id_utf8:(),
884
'notbxbfse':(parent_id_utf8, ),
886
self.applyDeprecated(one_four, vf.get_graph))
887
self.assertTrue(vf.has_version(parent_id_utf8))
888
# we have _with_ghost apis to give us ghost information.
889
self.assertEqual([parent_id_utf8, 'notbxbfse'],
890
vf.get_ancestry_with_ghosts(['notbxbfse']))
891
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
892
self.assertEqual({parent_id_utf8:(),
893
'notbxbfse':(parent_id_utf8,),
895
self.applyDeprecated(one_four, vf.get_graph_with_ghosts))
896
self.assertFalse(self.applyDeprecated(one_four, vf.has_ghost,
899
def test_add_lines_with_ghosts_after_normal_revs(self):
900
# some versioned file formats allow lines to be added with parent
901
# information that is > than that in the format. Formats that do
902
# not support this need to raise NotImplementedError on the
903
# add_lines_with_ghosts api.
905
# probe for ghost support
907
vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
908
except NotImplementedError:
910
vf.add_lines_with_ghosts('references_ghost',
912
['line\n', 'line_b\n', 'line_c\n'])
913
origins = vf.annotate('references_ghost')
914
self.assertEquals(('base', 'line\n'), origins[0])
915
self.assertEquals(('base', 'line_b\n'), origins[1])
916
self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
918
def test_readonly_mode(self):
919
transport = get_transport(self.get_url('.'))
920
factory = self.get_factory()
921
vf = factory('id', transport, 0777, create=True, access_mode='w')
922
vf = factory('id', transport, access_mode='r')
923
self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
924
self.assertRaises(errors.ReadOnlyError,
925
vf.add_lines_with_ghosts,
929
self.assertRaises(errors.ReadOnlyError, vf.join, 'base')
931
def test_get_sha1s(self):
932
# check the sha1 data is available
935
vf.add_lines('a', [], ['a\n'])
936
# the same file, different metadata
937
vf.add_lines('b', ['a'], ['a\n'])
938
# a file differing only in last newline.
939
vf.add_lines('c', [], ['a'])
940
# Deprecasted single-version API.
942
'3f786850e387550fdab836ed7e6dc881de23001b',
943
self.applyDeprecated(one_four, vf.get_sha1, 'a'))
945
'3f786850e387550fdab836ed7e6dc881de23001b',
946
self.applyDeprecated(one_four, vf.get_sha1, 'b'))
948
'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
949
self.applyDeprecated(one_four, vf.get_sha1, 'c'))
950
self.assertEqual(['3f786850e387550fdab836ed7e6dc881de23001b',
951
'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
952
'3f786850e387550fdab836ed7e6dc881de23001b'],
953
vf.get_sha1s(['a', 'c', 'b']))
956
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
958
def get_file(self, name='foo'):
959
return WeaveFile(name, get_transport(self.get_url('.')), create=True,
960
get_scope=self.get_transaction)
962
def get_file_corrupted_text(self):
963
w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
964
get_scope=self.get_transaction)
965
w.add_lines('v1', [], ['hello\n'])
966
w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
968
# We are going to invasively corrupt the text
969
# Make sure the internals of weave are the same
970
self.assertEqual([('{', 0)
978
self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
979
, '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
984
w._weave[4] = 'There\n'
987
def get_file_corrupted_checksum(self):
988
w = self.get_file_corrupted_text()
990
w._weave[4] = 'there\n'
991
self.assertEqual('hello\nthere\n', w.get_text('v2'))
993
#Invalid checksum, first digit changed
994
w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
997
def reopen_file(self, name='foo', create=False):
998
return WeaveFile(name, get_transport(self.get_url('.')), create=create,
999
get_scope=self.get_transaction)
1001
def test_no_implicit_create(self):
1002
self.assertRaises(errors.NoSuchFile,
1005
get_transport(self.get_url('.')),
1006
get_scope=self.get_transaction)
1008
def get_factory(self):
1012
class TestKnit(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
1014
def get_file(self, name='foo', create=True):
1015
return make_file_knit(name, get_transport(self.get_url('.')),
1016
delta=True, create=True, get_scope=self.get_transaction)
1018
def get_factory(self):
1019
return make_file_knit
1021
def get_file_corrupted_text(self):
1022
knit = self.get_file()
1023
knit.add_lines('v1', [], ['hello\n'])
1024
knit.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
1027
def reopen_file(self, name='foo', create=False):
1028
return self.get_file(name, create)
1030
def test_detection(self):
1031
knit = self.get_file()
1034
def test_no_implicit_create(self):
1035
self.assertRaises(errors.NoSuchFile, self.get_factory(), 'foo',
1036
get_transport(self.get_url('.')))
1039
class TestPlaintextKnit(TestKnit):
1040
"""Test a knit with no cached annotations"""
1042
def get_file(self, name='foo', create=True):
1043
return make_file_knit(name, get_transport(self.get_url('.')),
1044
delta=True, create=create, get_scope=self.get_transaction,
1045
factory=_mod_knit.KnitPlainFactory())
1048
class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
1051
TestCaseWithMemoryTransport.setUp(self)
1052
self.vf1 = make_file_knit('root', self.get_transport(), create=True)
1053
self.vf2 = make_file_knit('root', self.get_transport(), create=True)
1054
self.plan_merge_vf = versionedfile._PlanMergeVersionedFile('root',
1055
[self.vf1, self.vf2])
1057
def test_add_lines(self):
1058
self.plan_merge_vf.add_lines('a:', [], [])
1059
self.assertRaises(ValueError, self.plan_merge_vf.add_lines, 'a', [],
1061
self.assertRaises(ValueError, self.plan_merge_vf.add_lines, 'a:', None,
1063
self.assertRaises(ValueError, self.plan_merge_vf.add_lines, 'a:', [],
1066
def test_ancestry(self):
1067
self.vf1.add_lines('A', [], [])
1068
self.vf1.add_lines('B', ['A'], [])
1069
self.plan_merge_vf.add_lines('C:', ['B'], [])
1070
self.plan_merge_vf.add_lines('D:', ['C:'], [])
1071
self.assertEqual(set(['A', 'B', 'C:', 'D:']),
1072
self.plan_merge_vf.get_ancestry('D:', topo_sorted=False))
1074
def setup_abcde(self):
1075
self.vf1.add_lines('A', [], ['a'])
1076
self.vf1.add_lines('B', ['A'], ['b'])
1077
self.vf2.add_lines('C', [], ['c'])
1078
self.vf2.add_lines('D', ['C'], ['d'])
1079
self.plan_merge_vf.add_lines('E:', ['B', 'D'], ['e'])
1081
def test_ancestry_uses_all_versionedfiles(self):
1083
self.assertEqual(set(['A', 'B', 'C', 'D', 'E:']),
1084
self.plan_merge_vf.get_ancestry('E:', topo_sorted=False))
1086
def test_ancestry_raises_revision_not_present(self):
1087
error = self.assertRaises(errors.RevisionNotPresent,
1088
self.plan_merge_vf.get_ancestry, 'E:', False)
1089
self.assertContainsRe(str(error), '{E:} not present in "root"')
1091
def test_get_parents(self):
1093
self.assertEqual({'B':('A',)}, self.plan_merge_vf.get_parent_map(['B']))
1094
self.assertEqual({'D':('C',)}, self.plan_merge_vf.get_parent_map(['D']))
1095
self.assertEqual({'E:':('B', 'D')},
1096
self.plan_merge_vf.get_parent_map(['E:']))
1097
self.assertEqual({}, self.plan_merge_vf.get_parent_map(['F']))
1102
}, self.plan_merge_vf.get_parent_map(['B', 'D', 'E:', 'F']))
1104
def test_get_lines(self):
1106
self.assertEqual(['a'], self.plan_merge_vf.get_lines('A'))
1107
self.assertEqual(['c'], self.plan_merge_vf.get_lines('C'))
1108
self.assertEqual(['e'], self.plan_merge_vf.get_lines('E:'))
1109
error = self.assertRaises(errors.RevisionNotPresent,
1110
self.plan_merge_vf.get_lines, 'F')
1111
self.assertContainsRe(str(error), '{F} not present in "root"')
1114
class InterString(versionedfile.InterVersionedFile):
1115
"""An inter-versionedfile optimised code path for strings.
1117
This is for use during testing where we use strings as versionedfiles
1118
so that none of the default regsitered interversionedfile classes will
1119
match - which lets us test the match logic.
1123
def is_compatible(source, target):
1124
"""InterString is compatible with strings-as-versionedfiles."""
1125
return isinstance(source, str) and isinstance(target, str)
1128
# TODO this and the InterRepository core logic should be consolidatable
1129
# if we make the registry a separate class though we still need to
1130
# test the behaviour in the active registry to catch failure-to-handle-
1132
class TestInterVersionedFile(TestCaseWithMemoryTransport):
1134
def test_get_default_inter_versionedfile(self):
1135
# test that the InterVersionedFile.get(a, b) probes
1136
# for a class where is_compatible(a, b) returns
1137
# true and returns a default interversionedfile otherwise.
1138
# This also tests that the default registered optimised interversionedfile
1139
# classes do not barf inappropriately when a surprising versionedfile type
1140
# is handed to them.
1141
dummy_a = "VersionedFile 1."
1142
dummy_b = "VersionedFile 2."
1143
self.assertGetsDefaultInterVersionedFile(dummy_a, dummy_b)
1145
def assertGetsDefaultInterVersionedFile(self, a, b):
1146
"""Asserts that InterVersionedFile.get(a, b) -> the default."""
1147
inter = versionedfile.InterVersionedFile.get(a, b)
1148
self.assertEqual(versionedfile.InterVersionedFile,
1150
self.assertEqual(a, inter.source)
1151
self.assertEqual(b, inter.target)
1153
def test_register_inter_versionedfile_class(self):
1154
# test that a optimised code path provider - a
1155
# InterVersionedFile subclass can be registered and unregistered
1156
# and that it is correctly selected when given a versionedfile
1157
# pair that it returns true on for the is_compatible static method
1159
dummy_a = "VersionedFile 1."
1160
dummy_b = "VersionedFile 2."
1161
versionedfile.InterVersionedFile.register_optimiser(InterString)
1163
# we should get the default for something InterString returns False
1165
self.assertFalse(InterString.is_compatible(dummy_a, None))
1166
self.assertGetsDefaultInterVersionedFile(dummy_a, None)
1167
# and we should get an InterString for a pair it 'likes'
1168
self.assertTrue(InterString.is_compatible(dummy_a, dummy_b))
1169
inter = versionedfile.InterVersionedFile.get(dummy_a, dummy_b)
1170
self.assertEqual(InterString, inter.__class__)
1171
self.assertEqual(dummy_a, inter.source)
1172
self.assertEqual(dummy_b, inter.target)
1174
versionedfile.InterVersionedFile.unregister_optimiser(InterString)
1175
# now we should get the default InterVersionedFile object again.
1176
self.assertGetsDefaultInterVersionedFile(dummy_a, dummy_b)
1179
class TestReadonlyHttpMixin(object):
1181
def get_transaction(self):
1184
def test_readonly_http_works(self):
1185
# we should be able to read from http with a versioned file.
1186
vf = self.get_file()
1187
# try an empty file access
1188
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
1189
self.assertEqual([], readonly_vf.versions())
1191
vf.add_lines('1', [], ['a\n'])
1192
vf.add_lines('2', ['1'], ['b\n', 'a\n'])
1193
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
1194
self.assertEqual(['1', '2'], vf.versions())
1195
for version in readonly_vf.versions():
1196
readonly_vf.get_lines(version)
1199
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
1202
return WeaveFile('foo', get_transport(self.get_url('.')), create=True,
1203
get_scope=self.get_transaction)
1205
def get_factory(self):
1209
class TestKnitHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
1212
return make_file_knit('foo', get_transport(self.get_url('.')),
1213
delta=True, create=True, get_scope=self.get_transaction)
1215
def get_factory(self):
1216
return make_file_knit
1219
class MergeCasesMixin(object):
1221
def doMerge(self, base, a, b, mp):
1222
from cStringIO import StringIO
1223
from textwrap import dedent
1229
w.add_lines('text0', [], map(addcrlf, base))
1230
w.add_lines('text1', ['text0'], map(addcrlf, a))
1231
w.add_lines('text2', ['text0'], map(addcrlf, b))
1233
self.log_contents(w)
1235
self.log('merge plan:')
1236
p = list(w.plan_merge('text1', 'text2'))
1237
for state, line in p:
1239
self.log('%12s | %s' % (state, line[:-1]))
1243
mt.writelines(w.weave_merge(p))
1245
self.log(mt.getvalue())
1247
mp = map(addcrlf, mp)
1248
self.assertEqual(mt.readlines(), mp)
1251
def testOneInsert(self):
1257
def testSeparateInserts(self):
1258
self.doMerge(['aaa', 'bbb', 'ccc'],
1259
['aaa', 'xxx', 'bbb', 'ccc'],
1260
['aaa', 'bbb', 'yyy', 'ccc'],
1261
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1263
def testSameInsert(self):
1264
self.doMerge(['aaa', 'bbb', 'ccc'],
1265
['aaa', 'xxx', 'bbb', 'ccc'],
1266
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
1267
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1268
overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
1269
def testOverlappedInsert(self):
1270
self.doMerge(['aaa', 'bbb'],
1271
['aaa', 'xxx', 'yyy', 'bbb'],
1272
['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
1274
# really it ought to reduce this to
1275
# ['aaa', 'xxx', 'yyy', 'bbb']
1278
def testClashReplace(self):
1279
self.doMerge(['aaa'],
1282
['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
1285
def testNonClashInsert1(self):
1286
self.doMerge(['aaa'],
1289
['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
1292
def testNonClashInsert2(self):
1293
self.doMerge(['aaa'],
1299
def testDeleteAndModify(self):
1300
"""Clashing delete and modification.
1302
If one side modifies a region and the other deletes it then
1303
there should be a conflict with one side blank.
1306
#######################################
1307
# skippd, not working yet
1310
self.doMerge(['aaa', 'bbb', 'ccc'],
1311
['aaa', 'ddd', 'ccc'],
1313
['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1315
def _test_merge_from_strings(self, base, a, b, expected):
1317
w.add_lines('text0', [], base.splitlines(True))
1318
w.add_lines('text1', ['text0'], a.splitlines(True))
1319
w.add_lines('text2', ['text0'], b.splitlines(True))
1320
self.log('merge plan:')
1321
p = list(w.plan_merge('text1', 'text2'))
1322
for state, line in p:
1324
self.log('%12s | %s' % (state, line[:-1]))
1325
self.log('merge result:')
1326
result_text = ''.join(w.weave_merge(p))
1327
self.log(result_text)
1328
self.assertEqualDiff(result_text, expected)
1330
def test_weave_merge_conflicts(self):
1331
# does weave merge properly handle plans that end with unchanged?
1332
result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
1333
self.assertEqual(result, 'hello\n')
1335
def test_deletion_extended(self):
1336
"""One side deletes, the other deletes more.
1353
self._test_merge_from_strings(base, a, b, result)
1355
def test_deletion_overlap(self):
1356
"""Delete overlapping regions with no other conflict.
1358
Arguably it'd be better to treat these as agreement, rather than
1359
conflict, but for now conflict is safer.
1387
self._test_merge_from_strings(base, a, b, result)
1389
def test_agreement_deletion(self):
1390
"""Agree to delete some lines, without conflicts."""
1412
self._test_merge_from_strings(base, a, b, result)
1414
def test_sync_on_deletion(self):
1415
"""Specific case of merge where we can synchronize incorrectly.
1417
A previous version of the weave merge concluded that the two versions
1418
agreed on deleting line 2, and this could be a synchronization point.
1419
Line 1 was then considered in isolation, and thought to be deleted on
1422
It's better to consider the whole thing as a disagreement region.
1433
a's replacement line 2
1446
a's replacement line 2
1453
self._test_merge_from_strings(base, a, b, result)
1456
class TestKnitMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1458
def get_file(self, name='foo'):
1459
return make_file_knit(name, get_transport(self.get_url('.')),
1460
delta=True, create=True)
1462
def log_contents(self, w):
1466
class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1468
def get_file(self, name='foo'):
1469
return WeaveFile(name, get_transport(self.get_url('.')), create=True)
1471
def log_contents(self, w):
1472
self.log('weave is:')
1474
write_weave(w, tmpf)
1475
self.log(tmpf.getvalue())
1477
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1478
'xxx', '>>>>>>> ', 'bbb']
1481
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1483
def test_select_adaptor(self):
1484
"""Test expected adapters exist."""
1485
# One scenario for each lookup combination we expect to use.
1486
# Each is source_kind, requested_kind, adapter class
1488
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1489
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1490
('knit-annotated-delta-gz', 'knit-delta-gz',
1491
_mod_knit.DeltaAnnotatedToUnannotated),
1492
('knit-annotated-delta-gz', 'fulltext',
1493
_mod_knit.DeltaAnnotatedToFullText),
1494
('knit-annotated-ft-gz', 'knit-ft-gz',
1495
_mod_knit.FTAnnotatedToUnannotated),
1496
('knit-annotated-ft-gz', 'fulltext',
1497
_mod_knit.FTAnnotatedToFullText),
1499
for source, requested, klass in scenarios:
1500
adapter_factory = versionedfile.adapter_registry.get(
1501
(source, requested))
1502
adapter = adapter_factory(None)
1503
self.assertIsInstance(adapter, klass)
1505
def get_knit(self, annotated=True):
1507
factory = KnitAnnotateFactory()
1509
factory = KnitPlainFactory()
1510
return make_file_knit('knit', self.get_transport('.'), delta=True,
1511
create=True, factory=factory)
1513
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1514
"""grab the interested adapted texts for tests."""
1515
# origin is a fulltext
1516
entries = f.get_record_stream(['origin'], 'unordered', False)
1517
base = entries.next()
1518
ft_data = ft_adapter.get_bytes(base, base.get_bytes_as(base.storage_kind))
1519
# merged is both a delta and multiple parents.
1520
entries = f.get_record_stream(['merged'], 'unordered', False)
1521
merged = entries.next()
1522
delta_data = delta_adapter.get_bytes(merged,
1523
merged.get_bytes_as(merged.storage_kind))
1524
return ft_data, delta_data
1526
def test_deannotation_noeol(self):
1527
"""Test converting annotated knits to unannotated knits."""
1528
# we need a full text, and a delta
1529
f, parents = get_diamond_vf(self.get_knit(), trailing_eol=False)
1530
ft_data, delta_data = self.helpGetBytes(f,
1531
_mod_knit.FTAnnotatedToUnannotated(None),
1532
_mod_knit.DeltaAnnotatedToUnannotated(None))
1534
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1537
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1539
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1540
'1,2,3\nleft\nright\nmerged\nend merged\n',
1541
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1543
def test_deannotation(self):
1544
"""Test converting annotated knits to unannotated knits."""
1545
# we need a full text, and a delta
1546
f, parents = get_diamond_vf(self.get_knit())
1547
ft_data, delta_data = self.helpGetBytes(f,
1548
_mod_knit.FTAnnotatedToUnannotated(None),
1549
_mod_knit.DeltaAnnotatedToUnannotated(None))
1551
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1554
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1556
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1557
'2,2,2\nright\nmerged\nend merged\n',
1558
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1560
def test_annotated_to_fulltext_no_eol(self):
1561
"""Test adapting annotated knits to full texts (for -> weaves)."""
1562
# we need a full text, and a delta
1563
f, parents = get_diamond_vf(self.get_knit(), trailing_eol=False)
1564
# Reconstructing a full text requires a backing versioned file, and it
1565
# must have the base lines requested from it.
1566
logged_vf = versionedfile.RecordingVersionedFileDecorator(f)
1567
ft_data, delta_data = self.helpGetBytes(f,
1568
_mod_knit.FTAnnotatedToFullText(None),
1569
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1570
self.assertEqual('origin', ft_data)
1571
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1572
self.assertEqual([('get_lines', 'left')], logged_vf.calls)
1574
def test_annotated_to_fulltext(self):
1575
"""Test adapting annotated knits to full texts (for -> weaves)."""
1576
# we need a full text, and a delta
1577
f, parents = get_diamond_vf(self.get_knit())
1578
# Reconstructing a full text requires a backing versioned file, and it
1579
# must have the base lines requested from it.
1580
logged_vf = versionedfile.RecordingVersionedFileDecorator(f)
1581
ft_data, delta_data = self.helpGetBytes(f,
1582
_mod_knit.FTAnnotatedToFullText(None),
1583
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1584
self.assertEqual('origin\n', ft_data)
1585
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1586
self.assertEqual([('get_lines', 'left')], logged_vf.calls)
1588
def test_unannotated_to_fulltext(self):
1589
"""Test adapting unannotated knits to full texts.
1591
This is used for -> weaves, and for -> annotated knits.
1593
# we need a full text, and a delta
1594
f, parents = get_diamond_vf(self.get_knit(annotated=False))
1595
# Reconstructing a full text requires a backing versioned file, and it
1596
# must have the base lines requested from it.
1597
logged_vf = versionedfile.RecordingVersionedFileDecorator(f)
1598
ft_data, delta_data = self.helpGetBytes(f,
1599
_mod_knit.FTPlainToFullText(None),
1600
_mod_knit.DeltaPlainToFullText(logged_vf))
1601
self.assertEqual('origin\n', ft_data)
1602
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1603
self.assertEqual([('get_lines', 'left')], logged_vf.calls)
1605
def test_unannotated_to_fulltext_no_eol(self):
1606
"""Test adapting unannotated knits to full texts.
1608
This is used for -> weaves, and for -> annotated knits.
1610
# we need a full text, and a delta
1611
f, parents = get_diamond_vf(self.get_knit(annotated=False),
1613
# Reconstructing a full text requires a backing versioned file, and it
1614
# must have the base lines requested from it.
1615
logged_vf = versionedfile.RecordingVersionedFileDecorator(f)
1616
ft_data, delta_data = self.helpGetBytes(f,
1617
_mod_knit.FTPlainToFullText(None),
1618
_mod_knit.DeltaPlainToFullText(logged_vf))
1619
self.assertEqual('origin', ft_data)
1620
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1621
self.assertEqual([('get_lines', 'left')], logged_vf.calls)